gpu: Refactor image handling

Introduce a new GskGpuImageDescriptors object that tracks descriptors
for a set of images that can be managed by the GPU.
Then have each GskGpuShaderOp just reference the descriptors object they are
using, so that the coe can set things up properly.

To reference an image, the ops now just reference their descriptor -
which is the uint32 we've been sending to the shaders since forever.
This commit is contained in:
Benjamin Otte 2023-10-03 21:04:21 +02:00
parent f518d780ed
commit 53821da4d6
29 changed files with 632 additions and 332 deletions

View File

@ -0,0 +1,82 @@
#include "config.h"
#include "gskgldescriptorsprivate.h"
#include "gskglimageprivate.h"
struct _GskGLDescriptors
{
GskGpuDescriptors parent_instance;
GskGLDevice *device;
};
G_DEFINE_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static void
gsk_gl_descriptors_finalize (GObject *object)
{
GskGLDescriptors *self = GSK_GL_DESCRIPTORS (object);
g_object_unref (self->device);
G_OBJECT_CLASS (gsk_gl_descriptors_parent_class)->finalize (object);
}
static gboolean
gsk_gl_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
gsize used_texture_units;
used_texture_units = gsk_gpu_descriptors_get_size (desc);
if (used_texture_units >= 16)
return FALSE;
*out_descriptor = used_texture_units;
return TRUE;
}
static void
gsk_gl_descriptors_class_init (GskGLDescriptorsClass *klass)
{
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gl_descriptors_finalize;
descriptors_class->add_image = gsk_gl_descriptors_add_image;
}
static void
gsk_gl_descriptors_init (GskGLDescriptors *self)
{
}
GskGpuDescriptors *
gsk_gl_descriptors_new (GskGLDevice *device)
{
GskGLDescriptors *self;
self = g_object_new (GSK_TYPE_GL_DESCRIPTORS, NULL);
self->device = g_object_ref (device);
return GSK_GPU_DESCRIPTORS (self);
}
void
gsk_gl_descriptors_use (GskGLDescriptors *self)
{
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
gsize i;
for (i = 0; i < gsk_gpu_descriptors_get_size (desc); i++)
{
glActiveTexture (GL_TEXTURE0 + i);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (gsk_gpu_descriptors_get_image (desc, i)));
glBindSampler (i, gsk_gl_device_get_sampler_id (self->device, gsk_gpu_descriptors_get_sampler (desc, i)));
}
}

View File

@ -0,0 +1,18 @@
#pragma once
#include "gskgpudescriptorsprivate.h"
#include "gskgldeviceprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GL_DESCRIPTORS (gsk_gl_descriptors_get_type ())
G_DECLARE_FINAL_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK, GL_DESCRIPTORS, GskGpuDescriptors)
GskGpuDescriptors * gsk_gl_descriptors_new (GskGLDevice *device);
void gsk_gl_descriptors_use (GskGLDescriptors *self);
G_END_DECLS

View File

@ -6,6 +6,7 @@
#include "gskgpuopprivate.h"
#include "gskgpushaderopprivate.h"
#include "gskglbufferprivate.h"
#include "gskgldescriptorsprivate.h"
#include "gskgldeviceprivate.h"
struct _GskGLFrame
@ -49,18 +50,10 @@ gsk_gl_frame_cleanup (GskGpuFrame *frame)
GSK_GPU_FRAME_CLASS (gsk_gl_frame_parent_class)->cleanup (frame);
}
static guint32
gsk_gl_frame_get_image_descriptor (GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
static GskGpuDescriptors *
gsk_gl_frame_create_descriptors (GskGpuFrame *frame)
{
GskGLFrame *self = GSK_GL_FRAME (frame);
guint32 slot;
slot = self->next_texture_slot;
self->next_texture_slot = (self->next_texture_slot + 1) % 16;
return slot;
return GSK_GPU_DESCRIPTORS (gsk_gl_descriptors_new (GSK_GL_DEVICE (gsk_gpu_frame_get_device (frame))));
}
static GskGpuBuffer *
@ -140,7 +133,7 @@ gsk_gl_frame_class_init (GskGLFrameClass *klass)
gpu_frame_class->is_busy = gsk_gl_frame_is_busy;
gpu_frame_class->setup = gsk_gl_frame_setup;
gpu_frame_class->cleanup = gsk_gl_frame_cleanup;
gpu_frame_class->get_image_descriptor = gsk_gl_frame_get_image_descriptor;
gpu_frame_class->create_descriptors = gsk_gl_frame_create_descriptors;
gpu_frame_class->create_vertex_buffer = gsk_gl_frame_create_vertex_buffer;
gpu_frame_class->create_storage_buffer = gsk_gl_frame_create_storage_buffer;
gpu_frame_class->submit = gsk_gl_frame_submit;

View File

@ -13,25 +13,14 @@ typedef struct _GskGpuBlurOp GskGpuBlurOp;
struct _GskGpuBlurOp
{
GskGpuShaderOp op;
GskGpuShaderImage image;
};
static void
gsk_gpu_blur_op_finish (GskGpuOp *op)
{
GskGpuBlurOp *self = (GskGpuBlurOp *) op;
g_object_unref (self->image.image);
}
static void
gsk_gpu_blur_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuBlurOp *self = (GskGpuBlurOp *) op;
GskGpuShaderOp *shader = (GskGpuShaderOp *) op;
GskGpuBlurInstance *instance;
@ -39,26 +28,15 @@ gsk_gpu_blur_op_print (GskGpuOp *op,
gsk_gpu_print_op (string, indent, "blur");
gsk_gpu_print_rect (string, instance->rect);
gsk_gpu_print_image (string, self->image.image);
gsk_gpu_print_image_descriptor (string, shader->desc, instance->tex_id);
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_blur_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuBlurOp *self = (GskGpuBlurOp *) op;
*n_images = 1;
return &self->image;
}
static const GskGpuShaderOpClass GSK_GPU_BLUR_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuBlurOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_blur_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_blur_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@ -70,15 +48,14 @@ static const GskGpuShaderOpClass GSK_GPU_BLUR_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_blur_info,
#endif
gsk_gpu_blur_op_get_images,
gsk_gpu_blur_setup_vao
};
void
gsk_gpu_blur_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
GskGpuDescriptors *desc,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect,
@ -86,19 +63,16 @@ gsk_gpu_blur_op (GskGpuFrame *frame,
const GdkRGBA *blur_color)
{
GskGpuBlurInstance *instance;
GskGpuBlurOp *self;
self = (GskGpuBlurOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_BLUR_OP_CLASS,
clip,
&instance);
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_BLUR_OP_CLASS,
clip,
desc,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);
gsk_gpu_rect_to_float (tex_rect, offset, instance->tex_rect);
graphene_vec2_to_float (blur_direction, instance->blur_direction);
gsk_gpu_rgba_to_float (blur_color, instance->blur_color);
self->image.image = g_object_ref (image);
self->image.sampler = sampler;
self->image.descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, sampler);
instance->tex_id = self->image.descriptor;
instance->tex_id = descriptor;
}

View File

@ -8,8 +8,8 @@ G_BEGIN_DECLS
void gsk_gpu_blur_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
GskGpuDescriptors *desc,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect,

View File

@ -16,11 +16,6 @@ struct _GskGpuBorderOp
GskGpuShaderOp op;
};
static void
gsk_gpu_border_op_finish (GskGpuOp *op)
{
}
static gboolean
color_equal (const float *color1,
const float *color2)
@ -90,7 +85,7 @@ static const GskGpuShaderOpClass GSK_GPU_BORDER_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuBorderOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_border_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_border_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_border_op_vk_command,
@ -102,7 +97,6 @@ static const GskGpuShaderOpClass GSK_GPU_BORDER_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_border_info,
#endif
gsk_gpu_shader_op_no_images,
gsk_gpu_border_setup_vao
};
@ -121,6 +115,7 @@ gsk_gpu_border_op (GskGpuFrame *frame,
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_BORDER_OP_CLASS,
clip,
NULL,
&instance);
gsk_rounded_rect_to_float (outline, offset, instance->outline);

View File

@ -13,25 +13,14 @@ typedef struct _GskGpuColorizeOp GskGpuColorizeOp;
struct _GskGpuColorizeOp
{
GskGpuShaderOp op;
GskGpuShaderImage image;
};
static void
gsk_gpu_colorize_op_finish (GskGpuOp *op)
{
GskGpuColorizeOp *self = (GskGpuColorizeOp *) op;
g_object_unref (self->image.image);
}
static void
gsk_gpu_colorize_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuColorizeOp *self = (GskGpuColorizeOp *) op;
GskGpuShaderOp *shader = (GskGpuShaderOp *) op;
GskGpuColorizeInstance *instance;
@ -39,27 +28,16 @@ gsk_gpu_colorize_op_print (GskGpuOp *op,
gsk_gpu_print_op (string, indent, "colorize");
gsk_gpu_print_rect (string, instance->rect);
gsk_gpu_print_image (string, self->image.image);
gsk_gpu_print_image_descriptor (string, shader->desc, instance->tex_id);
gsk_gpu_print_rgba (string, instance->color);
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_colorize_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuColorizeOp *self = (GskGpuColorizeOp *) op;
*n_images = 1;
return &self->image;
}
static const GskGpuShaderOpClass GSK_GPU_COLORIZE_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuColorizeOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_colorize_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_colorize_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@ -71,32 +49,29 @@ static const GskGpuShaderOpClass GSK_GPU_COLORIZE_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_colorize_info,
#endif
gsk_gpu_colorize_op_get_images,
gsk_gpu_colorize_setup_vao
};
void
gsk_gpu_colorize_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuDescriptors *descriptors,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect,
const GdkRGBA *color)
{
GskGpuColorizeInstance *instance;
GskGpuColorizeOp *self;
self = (GskGpuColorizeOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_COLORIZE_OP_CLASS,
clip,
&instance);
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_COLORIZE_OP_CLASS,
clip,
descriptors,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);
gsk_gpu_rect_to_float (tex_rect, offset, instance->tex_rect);
self->image.image = g_object_ref (image);
self->image.sampler = GSK_GPU_SAMPLER_DEFAULT;
self->image.descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, GSK_GPU_SAMPLER_DEFAULT);
instance->tex_id = self->image.descriptor;
instance->tex_id = descriptor;
gsk_gpu_rgba_to_float (color, instance->color);
}

View File

@ -8,7 +8,8 @@ G_BEGIN_DECLS
void gsk_gpu_colorize_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuDescriptors *desc,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect,

View File

@ -16,11 +16,6 @@ struct _GskGpuColorOp
GskGpuShaderOp op;
};
static void
gsk_gpu_color_op_finish (GskGpuOp *op)
{
}
static void
gsk_gpu_color_op_print (GskGpuOp *op,
GskGpuFrame *frame,
@ -42,7 +37,7 @@ static const GskGpuShaderOpClass GSK_GPU_COLOR_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuColorOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_color_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_color_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@ -54,7 +49,6 @@ static const GskGpuShaderOpClass GSK_GPU_COLOR_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_color_info,
#endif
gsk_gpu_shader_op_no_images,
gsk_gpu_color_setup_vao
};
@ -70,6 +64,7 @@ gsk_gpu_color_op (GskGpuFrame *frame,
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_COLOR_OP_CLASS,
clip,
NULL,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);

158
gsk/gpu/gskgpudescriptors.c Normal file
View File

@ -0,0 +1,158 @@
#include "config.h"
#include "gskgpudescriptorsprivate.h"
typedef struct _GskGpuImageEntry GskGpuImageEntry;
struct _GskGpuImageEntry
{
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
};
static void
gsk_gpu_image_entry_clear (gpointer data)
{
GskGpuImageEntry *entry = data;
g_object_unref (entry->image);
}
#define GDK_ARRAY_NAME gsk_gpu_image_entries
#define GDK_ARRAY_TYPE_NAME GskGpuImageEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuImageEntry
#define GDK_ARRAY_FREE_FUNC gsk_gpu_image_entry_clear
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 16
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
typedef struct _GskGpuDescriptorsPrivate GskGpuDescriptorsPrivate;
struct _GskGpuDescriptorsPrivate
{
GskGpuImageEntries images;
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDescriptors, gsk_gpu_descriptors, G_TYPE_OBJECT)
static void
gsk_gpu_descriptors_finalize (GObject *object)
{
GskGpuDescriptors *self = GSK_GPU_DESCRIPTORS (object);
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_clear (&priv->images);
G_OBJECT_CLASS (gsk_gpu_descriptors_parent_class)->finalize (object);
}
static void
gsk_gpu_descriptors_class_init (GskGpuDescriptorsClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gpu_descriptors_finalize;
}
static void
gsk_gpu_descriptors_init (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_init (&priv->images);
}
gsize
gsk_gpu_descriptors_get_size (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
return gsk_gpu_image_entries_get_size (&priv->images);
}
void
gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize new_size)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
g_assert (new_size <= gsk_gpu_image_entries_get_size (&priv->images));
gsk_gpu_image_entries_set_size (&priv->images, new_size);
}
GskGpuImage *
gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
return entry->image;
}
GskGpuSampler
gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
return entry->sampler;
}
gsize
gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
if (entry->descriptor == descriptor)
return i;
}
g_return_val_if_reached ((gsize) -1);
}
gboolean
gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
guint32 descriptor;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
if (entry->image == image && entry->sampler == sampler)
{
*out_descriptor = entry->descriptor;
return TRUE;
}
}
if (!GSK_GPU_DESCRIPTORS_GET_CLASS (self)->add_image (self, image, sampler, &descriptor))
return FALSE;
gsk_gpu_image_entries_append (&priv->images,
&(GskGpuImageEntry) {
.image = g_object_ref (image),
.sampler = sampler,
.descriptor = descriptor
});
*out_descriptor = descriptor;
return TRUE;
}

View File

@ -0,0 +1,51 @@
#pragma once
#include "gskgputypesprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GPU_DESCRIPTORS (gsk_gpu_descriptors_get_type ())
#define GSK_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptors))
#define GSK_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
#define GSK_IS_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_IS_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_GPU_DESCRIPTORS_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
typedef struct _GskGpuDescriptorsClass GskGpuDescriptorsClass;
struct _GskGpuDescriptors
{
GObject parent_instance;
};
struct _GskGpuDescriptorsClass
{
GObjectClass parent_class;
gboolean (* add_image) (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_id);
};
GType gsk_gpu_descriptors_get_type (void) G_GNUC_CONST;
gsize gsk_gpu_descriptors_get_size (GskGpuDescriptors *self);
void gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize new_size);
GskGpuImage * gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id);
GskGpuSampler gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id);
gsize gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor);
gboolean gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDescriptors, g_object_unref)
G_END_DECLS

View File

@ -344,6 +344,12 @@ gsk_gpu_frame_alloc_op (GskGpuFrame *self,
return gsk_gpu_ops_index (&priv->ops, pos);
}
GskGpuDescriptors *
gsk_gpu_frame_create_descriptors (GskGpuFrame *self)
{
return GSK_GPU_FRAME_GET_CLASS (self)->create_descriptors (self);
}
static GskGpuBuffer *
gsk_gpu_frame_create_vertex_buffer (GskGpuFrame *self,
gsize size)
@ -423,14 +429,6 @@ gsk_gpu_frame_ensure_storage_buffer (GskGpuFrame *self)
priv->storage_buffer_data = gsk_gpu_buffer_map (priv->storage_buffer);
}
guint32
gsk_gpu_frame_get_image_descriptor (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler)
{
return GSK_GPU_FRAME_GET_CLASS (self)->get_image_descriptor (self, image, sampler);
}
static void
gsk_gpu_frame_buffer_memory_ensure_size (GskGpuBufferWriter *writer,
gsize size)

View File

@ -27,9 +27,7 @@ struct _GskGpuFrameClass
gboolean (* is_busy) (GskGpuFrame *self);
void (* setup) (GskGpuFrame *self);
void (* cleanup) (GskGpuFrame *self);
guint32 (* get_image_descriptor) (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler);
GskGpuDescriptors * (* create_descriptors) (GskGpuFrame *self);
GskGpuBuffer * (* create_vertex_buffer) (GskGpuFrame *self,
gsize size);
GskGpuBuffer * (* create_storage_buffer) (GskGpuFrame *self,
@ -56,13 +54,11 @@ gboolean gsk_gpu_frame_should_optimize (GskGpuF
gpointer gsk_gpu_frame_alloc_op (GskGpuFrame *self,
gsize size);
GskGpuDescriptors * gsk_gpu_frame_create_descriptors (GskGpuFrame *self);
gsize gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size);
guchar * gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset);
guint32 gsk_gpu_frame_get_image_descriptor (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler);
void gsk_gpu_frame_write_buffer_memory (GskGpuFrame *self,
GskGpuBufferWriter *writer);

View File

@ -8,6 +8,7 @@
#include "gskgpuclipprivate.h"
#include "gskgpucolorizeopprivate.h"
#include "gskgpucoloropprivate.h"
#include "gskgpudescriptorsprivate.h"
#include "gskgpudeviceprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuglobalsopprivate.h"
@ -70,21 +71,6 @@
* never uses it, other than to allow the vertex shaders to emit its vertices.
*/
static void
gsk_gpu_shader_image_clear (gpointer data)
{
GskGpuShaderImage *image = data;
g_object_unref (image->image);
}
#define GDK_ARRAY_NAME gsk_gpu_pattern_images
#define GDK_ARRAY_TYPE_NAME GskGpuPatternImages
#define GDK_ARRAY_ELEMENT_TYPE GskGpuShaderImage
#define GDK_ARRAY_FREE_FUNC gsk_gpu_shader_image_clear
#define GDK_ARRAY_PREALLOC 8
#define GDK_ARRAY_BY_VALUE 1
#include "gdk/gdkarrayimpl.c"
typedef struct _GskGpuNodeProcessor GskGpuNodeProcessor;
typedef struct _GskGpuPatternWriter GskGpuPatternWriter;
@ -97,28 +83,29 @@ typedef enum {
struct _GskGpuNodeProcessor
{
GskGpuFrame *frame;
cairo_rectangle_int_t scissor;
graphene_point_t offset;
graphene_matrix_t projection;
graphene_vec2_t scale;
GskTransform *modelview;
GskGpuClip clip;
GskGpuFrame *frame;
GskGpuDescriptors *desc;
cairo_rectangle_int_t scissor;
graphene_point_t offset;
graphene_matrix_t projection;
graphene_vec2_t scale;
GskTransform *modelview;
GskGpuClip clip;
GskGpuGlobals pending_globals;
GskGpuGlobals pending_globals;
};
struct _GskGpuPatternWriter
{
GskGpuFrame *frame;
GskGpuFrame *frame;
GskGpuDescriptors *desc;
graphene_rect_t bounds;
graphene_point_t offset;
graphene_vec2_t scale;
guint stack;
graphene_rect_t bounds;
graphene_point_t offset;
graphene_vec2_t scale;
guint stack;
GskGpuBufferWriter writer;
GskGpuPatternImages images;
GskGpuBufferWriter writer;
};
static void gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self,
@ -130,16 +117,22 @@ static void
gsk_gpu_node_processor_finish (GskGpuNodeProcessor *self)
{
g_clear_pointer (&self->modelview, gsk_transform_unref);
g_clear_object (&self->desc);
}
static void
gsk_gpu_node_processor_init (GskGpuNodeProcessor *self,
GskGpuFrame *frame,
GskGpuDescriptors *desc,
GskGpuImage *target,
const cairo_rectangle_int_t *clip,
const graphene_rect_t *viewport)
{
self->frame = frame;
if (desc)
self->desc = g_object_ref (desc);
else
self->desc = NULL;
self->scissor = *clip;
gsk_gpu_clip_init_empty (&self->clip, &GRAPHENE_RECT_INIT (0, 0, viewport->size.width, viewport->size.height));
@ -196,6 +189,32 @@ gsk_gpu_node_processor_sync_globals (GskGpuNodeProcessor *self,
gsk_gpu_node_processor_emit_scissor_op (self);
}
static guint32
gsk_gpu_node_processor_add_image (GskGpuNodeProcessor *self,
GskGpuImage *image,
GskGpuSampler sampler)
{
guint32 descriptor;
if (self->desc != NULL)
{
if (gsk_gpu_descriptors_add_image (self->desc, image, sampler, &descriptor))
return descriptor;
g_object_unref (self->desc);
}
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
if (!gsk_gpu_descriptors_add_image (self->desc, image, sampler, &descriptor))
{
g_assert_not_reached ();
return 0;
}
return descriptor;
}
static void
rect_round_to_pixels (const graphene_rect_t *src,
const graphene_vec2_t *pixel_scale,
@ -228,6 +247,7 @@ gsk_gpu_node_processor_process (GskGpuFrame *frame,
gsk_gpu_node_processor_init (&self,
frame,
NULL,
target,
clip,
viewport);
@ -245,6 +265,7 @@ gsk_gpu_pattern_writer_init (GskGpuPatternWriter *self,
const graphene_rect_t *bounds)
{
self->frame = frame;
self->desc = NULL;
self->bounds = GRAPHENE_RECT_INIT (bounds->origin.x + offset->x,
bounds->origin.y + offset->y,
bounds->size.width,
@ -254,7 +275,6 @@ gsk_gpu_pattern_writer_init (GskGpuPatternWriter *self,
self->stack = 0;
gsk_gpu_frame_write_buffer_memory (frame, &self->writer);
gsk_gpu_pattern_images_init (&self->images);
}
static gboolean
@ -278,7 +298,7 @@ static void
gsk_gpu_pattern_writer_finish (GskGpuPatternWriter *self)
{
g_assert (self->stack == 0);
gsk_gpu_pattern_images_clear (&self->images);
g_clear_object (&self->desc);
}
static void
@ -294,24 +314,14 @@ gsk_gpu_pattern_writer_commit_op (GskGpuPatternWriter *self,
GskGpuShaderClip clip)
{
guint32 pattern_id;
GskGpuShaderImage *images;
gsize n_images;
pattern_id = gsk_gpu_buffer_writer_commit (&self->writer) / sizeof (float);
n_images = gsk_gpu_pattern_images_get_size (&self->images);
images = gsk_gpu_pattern_images_steal (&self->images);
{
for (gsize i = 0; i < n_images; i++)
g_assert (images[i].image);
}
gsk_gpu_uber_op (self->frame,
clip,
&self->bounds,
&self->offset,
images,
n_images,
self->desc,
pattern_id);
gsk_gpu_pattern_writer_finish (self);
@ -323,18 +333,10 @@ gsk_gpu_pattern_writer_add_image (GskGpuPatternWriter *self,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
if (gsk_gpu_pattern_images_get_size (&self->images) >= 16)
return FALSE;
if (self->desc == NULL)
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
*out_descriptor = gsk_gpu_frame_get_image_descriptor (self->frame, image, sampler);
gsk_gpu_pattern_images_append (&self->images,
&(GskGpuShaderImage) {
image,
sampler,
*out_descriptor
});
return TRUE;
return gsk_gpu_descriptors_add_image (self->desc, image, sampler, out_descriptor);
}
static void
@ -566,11 +568,14 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
const graphene_point_t *shadow_offset,
float blur_radius,
const GdkRGBA *blur_color_or_null,
GskGpuImage *source,
GskGpuDescriptors *source_desc,
guint32 source_descriptor,
GdkMemoryDepth source_depth,
const graphene_rect_t *source_rect)
{
GskGpuNodeProcessor other;
GskGpuImage *intermediate;
guint32 intermediate_descriptor;
graphene_vec2_t direction;
graphene_rect_t clip_rect, intermediate_rect;
graphene_point_t real_offset;
@ -589,11 +594,12 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
height = ceil (graphene_vec2_get_y (&self->scale) * intermediate_rect.size.height);
intermediate = gsk_gpu_device_create_offscreen_image (gsk_gpu_frame_get_device (self->frame),
gdk_memory_format_get_depth (gsk_gpu_image_get_format (source)),
source_depth,
width, height);
gsk_gpu_node_processor_init (&other,
self->frame,
source_desc,
intermediate,
&(cairo_rectangle_int_t) { 0, 0, width, height },
&intermediate_rect);
@ -608,8 +614,8 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
graphene_vec2_init (&direction, blur_radius, 0.0f);
gsk_gpu_blur_op (other.frame,
gsk_gpu_clip_get_shader_clip (&other.clip, &other.offset, &intermediate_rect),
source,
GSK_GPU_SAMPLER_TRANSPARENT,
source_desc,
source_descriptor,
&intermediate_rect,
&other.offset,
source_rect,
@ -625,10 +631,11 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
real_offset = GRAPHENE_POINT_INIT (self->offset.x + shadow_offset->x,
self->offset.y + shadow_offset->y);
graphene_vec2_init (&direction, 0.0f, blur_radius);
intermediate_descriptor = gsk_gpu_node_processor_add_image (self, intermediate, GSK_GPU_SAMPLER_TRANSPARENT);
gsk_gpu_blur_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &real_offset, rect),
intermediate,
GSK_GPU_SAMPLER_TRANSPARENT,
self->desc,
intermediate_descriptor,
rect,
&real_offset,
&intermediate_rect,
@ -644,6 +651,7 @@ gsk_gpu_node_processor_add_fallback_node (GskGpuNodeProcessor *self,
{
GskGpuImage *image;
graphene_rect_t clipped_bounds;
guint32 descriptor;
if (!gsk_gpu_node_processor_clip_node_bounds (self, node, &clipped_bounds))
return;
@ -654,11 +662,12 @@ gsk_gpu_node_processor_add_fallback_node (GskGpuNodeProcessor *self,
node,
&self->scale,
&clipped_bounds);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &clipped_bounds),
image,
GSK_GPU_SAMPLER_DEFAULT,
self->desc,
descriptor,
&node->bounds,
&self->offset,
&clipped_bounds);
@ -1083,6 +1092,7 @@ gsk_gpu_node_processor_add_texture_node (GskGpuNodeProcessor *self,
GskGpuImage *image;
GdkTexture *texture;
gint64 timestamp;
guint32 descriptor;
device = gsk_gpu_frame_get_device (self->frame);
texture = gsk_texture_node_get_texture (node);
@ -1095,11 +1105,12 @@ gsk_gpu_node_processor_add_texture_node (GskGpuNodeProcessor *self,
gsk_gpu_device_cache_texture_image (device, texture, timestamp, image);
image = g_object_ref (image);
}
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
image,
GSK_GPU_SAMPLER_DEFAULT,
self->desc,
descriptor,
&node->bounds,
&self->offset,
&node->bounds);
@ -1129,12 +1140,17 @@ gsk_gpu_node_processor_create_texture_pattern (GskGpuPatternWriter *self,
}
if (!gsk_gpu_pattern_writer_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT, &descriptor))
return FALSE;
{
g_object_unref (image);
return FALSE;
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_buffer_writer_append_uint (&self->writer, descriptor);
gsk_gpu_buffer_writer_append_rect (&self->writer, &node->bounds, &self->offset);
g_object_unref (image);
return TRUE;
}
@ -1269,6 +1285,7 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self,
GskGpuImage *image;
graphene_rect_t tex_rect, clip_rect;
float blur_radius, clip_radius;
guint32 descriptor;
child = gsk_blur_node_get_child (node);
blur_radius = gsk_blur_node_get_radius (node);
@ -1286,13 +1303,16 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self,
&self->scale,
child,
&tex_rect);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_TRANSPARENT);
gsk_gpu_node_processor_blur_op (self,
&node->bounds,
graphene_point_zero (),
blur_radius,
NULL,
image,
self->desc,
descriptor,
gdk_memory_format_get_depth (gsk_gpu_image_get_format (image)),
&tex_rect);
g_object_unref (image);
@ -1306,6 +1326,8 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
graphene_rect_t clip_bounds, tex_rect;
GskRenderNode *child;
gsize i, n_shadows;
GskGpuDescriptors *desc;
guint32 descriptor;
n_shadows = gsk_shadow_node_get_n_shadows (node);
child = gsk_shadow_node_get_child (node);
@ -1321,6 +1343,8 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
&self->scale,
child,
&tex_rect);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_TRANSPARENT);
desc = self->desc;
for (i = 0; i < n_shadows; i++)
{
@ -1331,7 +1355,8 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
self->offset.y + shadow->dy);
gsk_gpu_colorize_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &shadow_offset, &child->bounds),
image,
desc,
descriptor,
&child->bounds,
&shadow_offset,
&tex_rect,
@ -1347,15 +1372,18 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
&GRAPHENE_POINT_INIT (shadow->dx, shadow->dy),
shadow->radius,
&shadow->color,
image,
desc,
descriptor,
gdk_memory_format_get_depth (gsk_gpu_image_get_format (image)),
&tex_rect);
}
}
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &child->bounds),
image,
GSK_GPU_SAMPLER_DEFAULT,
self->desc,
descriptor,
&child->bounds,
&self->offset,
&tex_rect);
@ -1492,6 +1520,7 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
GskGpuImage *image;
graphene_rect_t glyph_bounds, glyph_tex_rect;
graphene_point_t glyph_offset;
guint32 descriptor;
image = gsk_gpu_device_lookup_glyph_image (device,
self->frame,
@ -1506,18 +1535,20 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
graphene_rect_scale (&GRAPHENE_RECT_INIT(0, 0, glyph_bounds.size.width, glyph_bounds.size.height), inv_scale, inv_scale, &glyph_bounds);
glyph_offset = GRAPHENE_POINT_INIT (offset.x - glyph_offset.x * inv_scale + (float) glyphs[i].geometry.x_offset / PANGO_SCALE,
offset.y - glyph_offset.y * inv_scale + (float) glyphs[i].geometry.y_offset / PANGO_SCALE);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
if (gsk_text_node_has_color_glyphs (node))
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &glyph_offset, &glyph_bounds),
image,
GSK_GPU_SAMPLER_DEFAULT,
self->desc,
descriptor,
&glyph_bounds,
&glyph_offset,
&glyph_tex_rect);
else
gsk_gpu_colorize_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &glyph_offset, &glyph_bounds),
image,
self->desc,
descriptor,
&glyph_bounds,
&glyph_offset,
&glyph_tex_rect,
@ -1577,7 +1608,7 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
if (image != last_image)
{
if (!gsk_gpu_pattern_writer_add_image (self, g_object_ref (image), GSK_GPU_SAMPLER_DEFAULT, &tex_id))
if (!gsk_gpu_pattern_writer_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT, &tex_id))
return FALSE;
last_image = image;
@ -1942,12 +1973,12 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
if (nodes_vtable[node_type].create_pattern != NULL)
{
gsize size_before = gsk_gpu_buffer_writer_get_size (&self->writer);
gsize images_before = gsk_gpu_pattern_images_get_size (&self->images);
gsize images_before = self->desc ? gsk_gpu_descriptors_get_size (self->desc) : 0;
if (nodes_vtable[node_type].create_pattern (self, node))
return TRUE;
gsk_gpu_buffer_writer_rewind (&self->writer, size_before);
g_assert (gsk_gpu_pattern_images_get_size (&self->images) >= images_before);
gsk_gpu_pattern_images_set_size (&self->images, images_before);
if (self->desc)
gsk_gpu_descriptors_set_size (self->desc, images_before);
}
tmp_data = gsk_gpu_buffer_writer_backup (&self->writer, &tmp_size);
@ -1977,12 +2008,17 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
}
if (!gsk_gpu_pattern_writer_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT, &tex_id))
return FALSE;
{
g_object_unref (image);
return FALSE;
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_buffer_writer_append_uint (&self->writer, tex_id);
gsk_gpu_buffer_writer_append_rect (&self->writer, &bounds, &self->offset);
g_object_unref (image);
return TRUE;
}

View File

@ -2,6 +2,7 @@
#include "gskgpuprintprivate.h"
#include "gskgpudescriptorsprivate.h"
#include "gskgpuimageprivate.h"
void
@ -102,3 +103,13 @@ gsk_gpu_print_image (GString *string,
gsk_gpu_image_get_width (image),
gsk_gpu_image_get_height (image));
}
void
gsk_gpu_print_image_descriptor (GString *string,
GskGpuDescriptors *desc,
guint32 descriptor)
{
gsize id = gsk_gpu_descriptors_find_image (desc, descriptor);
gsk_gpu_print_image (string, gsk_gpu_descriptors_get_image (desc, id));
}

View File

@ -25,3 +25,6 @@ void gsk_gpu_print_rgba (GString
const float rgba[4]);
void gsk_gpu_print_image (GString *string,
GskGpuImage *image);
void gsk_gpu_print_image_descriptor (GString *string,
GskGpuDescriptors *desc,
guint32 descriptor);

View File

@ -11,6 +11,7 @@
#include "gskrendernodeprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkanimageprivate.h"
#include "gskvulkandescriptorsprivate.h"
#endif
typedef struct _GskGpuRenderPassOp GskGpuRenderPassOp;
@ -67,8 +68,7 @@ gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
{
GskGpuShaderOp *shader;
GskGpuOp *op;
const GskGpuShaderImage *images;
gsize i, n_images;
GskGpuDescriptors *desc = NULL;
for (op = ((GskGpuOp *) self)->next;
op->op_class->stage != GSK_GPU_STAGE_END_PASS;
@ -79,15 +79,11 @@ gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
shader = (GskGpuShaderOp *) op;
images = gsk_gpu_shader_op_get_images (shader, &n_images);
for (i = 0; i < n_images; i++)
{
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (images[i].image),
command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT);
}
if (shader->desc == NULL || shader->desc == desc)
continue;
desc = shader->desc;
gsk_vulkan_descriptors_transition (GSK_VULKAN_DESCRIPTORS (desc), command_buffer);
}
}

View File

@ -3,10 +3,12 @@
#include "gskgpushaderopprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgldescriptorsprivate.h"
#include "gskgldeviceprivate.h"
#include "gskglframeprivate.h"
#include "gskglimageprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkandeviceprivate.h"
#endif
@ -16,11 +18,12 @@
*/
#define MAX_MERGE_OPS (10 * 1000)
const GskGpuShaderImage *
gsk_gpu_shader_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
void
gsk_gpu_shader_op_finish (GskGpuOp *op)
{
return ((GskGpuShaderOpClass *) ((GskGpuOp *) op)->op_class)->get_images (op, n_images);
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
g_clear_object (&self->desc);
}
#ifdef GDK_RENDERING_VULKAN
@ -87,24 +90,13 @@ gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
const GskGpuShaderImage *images;
GskGLDevice *device;
gsize i, n_images;
device = GSK_GL_DEVICE (gsk_gpu_frame_get_device (frame));
gsk_gl_frame_use_program (GSK_GL_FRAME (frame),
shader_op_class,
self->clip);
images = gsk_gpu_shader_op_get_images (self, &n_images);
for (i = 0; i < n_images; i++)
{
glActiveTexture (GL_TEXTURE0 + images[i].descriptor);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (images[i].image));
glBindSampler (images[i].descriptor,
gsk_gl_device_get_sampler_id (device, images[i].sampler));
}
if (self->desc)
gsk_gl_descriptors_use (GSK_GL_DESCRIPTORS (self->desc));
if (gsk_gpu_frame_should_optimize (frame, GSK_GPU_OPTIMIZE_GL_BASE_INSTANCE))
{
@ -139,6 +131,7 @@ GskGpuShaderOp *
gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
gpointer out_vertex_data)
{
GskGpuShaderOp *self;
@ -146,6 +139,10 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
self = (GskGpuShaderOp *) gsk_gpu_op_alloc (frame, &op_class->parent_class);
self->clip = clip;
if (desc)
self->desc = g_object_ref (desc);
else
self->desc = NULL;
self->vertex_offset = gsk_gpu_frame_reserve_vertex_data (frame, op_class->vertex_size);
*((gpointer *) out_vertex_data) = gsk_gpu_frame_get_vertex_data (frame, self->vertex_offset);
@ -153,12 +150,3 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
return self;
}
const GskGpuShaderImage *
gsk_gpu_shader_op_no_images (GskGpuShaderOp *op,
gsize *n_images)
{
*n_images = 0;
return NULL;
}

View File

@ -6,19 +6,11 @@
G_BEGIN_DECLS
typedef struct _GskGpuShaderImage GskGpuShaderImage;
struct _GskGpuShaderImage
{
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
};
struct _GskGpuShaderOp
{
GskGpuOp parent_op;
GskGpuDescriptors *desc;
GskGpuShaderClip clip;
gsize vertex_offset;
};
@ -32,16 +24,17 @@ struct _GskGpuShaderOpClass
#ifdef GDK_RENDERING_VULKAN
const VkPipelineVertexInputStateCreateInfo *vertex_input_state;
#endif
const GskGpuShaderImage * (* get_images) (GskGpuShaderOp *op,
gsize *n_images);
void (* setup_vao) (gsize offset);
};
GskGpuShaderOp * gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
gpointer out_vertex_data);
void gsk_gpu_shader_op_finish (GskGpuOp *op);
#ifdef GDK_RENDERING_VULKAN
GskGpuOp * gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
GskGpuFrame *frame,
@ -63,12 +56,6 @@ GskGpuOp * gsk_gpu_shader_op_gl_command (GskGpuO
GskGpuFrame *frame,
gsize flip_y);
const GskGpuShaderImage *
gsk_gpu_shader_op_get_images (GskGpuShaderOp *op,
gsize *n_images);
const GskGpuShaderImage *
gsk_gpu_shader_op_no_images (GskGpuShaderOp *op,
gsize *n_images);
static inline void
gsk_gpu_rgba_to_float (const GdkRGBA *rgba,
float values[4])

View File

@ -13,25 +13,14 @@ typedef struct _GskGpuTextureOp GskGpuTextureOp;
struct _GskGpuTextureOp
{
GskGpuShaderOp op;
GskGpuShaderImage image;
};
static void
gsk_gpu_texture_op_finish (GskGpuOp *op)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
g_object_unref (self->image.image);
}
static void
gsk_gpu_texture_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
GskGpuShaderOp *shader = (GskGpuShaderOp *) op;
GskGpuTextureInstance *instance;
@ -39,26 +28,15 @@ gsk_gpu_texture_op_print (GskGpuOp *op,
gsk_gpu_print_op (string, indent, "texture");
gsk_gpu_print_rect (string, instance->rect);
gsk_gpu_print_image (string, self->image.image);
gsk_gpu_print_image_descriptor (string, shader->desc, instance->tex_id);
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_texture_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
*n_images = 1;
return &self->image;
}
static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuTextureOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_texture_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_texture_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@ -70,31 +48,27 @@ static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_texture_info,
#endif
gsk_gpu_texture_op_get_images,
gsk_gpu_texture_setup_vao
};
void
gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
GskGpuDescriptors *desc,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect)
{
GskGpuTextureInstance *instance;
GskGpuTextureOp *self;
self = (GskGpuTextureOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_TEXTURE_OP_CLASS,
clip,
&instance);
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_TEXTURE_OP_CLASS,
clip,
desc,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);
gsk_gpu_rect_to_float (tex_rect, offset, instance->tex_rect);
self->image.image = g_object_ref (image);
self->image.sampler = sampler;
self->image.descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, sampler);
instance->tex_id = self->image.descriptor;
instance->tex_id = descriptor;
}

View File

@ -8,8 +8,8 @@ G_BEGIN_DECLS
void gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
GskGpuDescriptors *desc,
guint32 descriptor,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect);

View File

@ -7,6 +7,7 @@
#define GSK_GPU_PATTERN_STACK_SIZE 16
typedef struct _GskGpuBuffer GskGpuBuffer;
typedef struct _GskGpuDescriptors GskGpuDescriptors;
typedef struct _GskGpuDevice GskGpuDevice;
typedef struct _GskGpuFrame GskGpuFrame;
typedef struct _GskGpuImage GskGpuImage;

View File

@ -14,23 +14,8 @@ typedef struct _GskGpuUberOp GskGpuUberOp;
struct _GskGpuUberOp
{
GskGpuShaderOp op;
GskGpuShaderImage *images;
gsize n_images;
};
static void
gsk_gpu_uber_op_finish (GskGpuOp *op)
{
GskGpuUberOp *self = (GskGpuUberOp *) op;
gsize i;
for (i = 0; i < self->n_images; i++)
g_object_unref (self->images[i].image);
g_free (self->images);
}
static void
gsk_gpu_uber_op_print (GskGpuOp *op,
GskGpuFrame *frame,
@ -47,22 +32,11 @@ gsk_gpu_uber_op_print (GskGpuOp *op,
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_uber_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuUberOp *self = (GskGpuUberOp *) op;
*n_images = self->n_images;
return self->images;
}
static const GskGpuShaderOpClass GSK_GPU_UBER_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuUberOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_uber_op_finish,
gsk_gpu_shader_op_finish,
gsk_gpu_uber_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@ -74,7 +48,6 @@ static const GskGpuShaderOpClass GSK_GPU_UBER_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_uber_info,
#endif
gsk_gpu_uber_op_get_images,
gsk_gpu_uber_setup_vao
};
@ -83,20 +56,16 @@ gsk_gpu_uber_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
const graphene_rect_t *rect,
const graphene_point_t *offset,
GskGpuShaderImage *images,
gsize n_images,
GskGpuDescriptors *desc,
guint32 pattern_id)
{
GskGpuUberOp *self;
GskGpuUberInstance *instance;
self = (GskGpuUberOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_UBER_OP_CLASS,
clip,
&instance);
self->images = images;
self->n_images = n_images;
gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_UBER_OP_CLASS,
clip,
desc,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);
instance->pattern_id = pattern_id;

View File

@ -10,8 +10,7 @@ void gsk_gpu_uber_op (GskGpuF
GskGpuShaderClip clip,
const graphene_rect_t *rect,
const graphene_point_t *offset,
GskGpuShaderImage *images,
gsize n_images,
GskGpuDescriptors *desc,
guint32 pattern_id);

View File

@ -0,0 +1,69 @@
#include "config.h"
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
struct _GskVulkanDescriptors
{
GskGpuDescriptors parent_instance;
GskVulkanFrame *frame; /* no reference, the frame owns us */
};
G_DEFINE_TYPE (GskVulkanDescriptors, gsk_vulkan_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static gboolean
gsk_vulkan_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskVulkanDescriptors *self = GSK_VULKAN_DESCRIPTORS (desc);
*out_descriptor = gsk_vulkan_frame_add_image (self->frame, image, sampler);
return TRUE;
}
static void
gsk_vulkan_descriptors_class_init (GskVulkanDescriptorsClass *klass)
{
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
descriptors_class->add_image = gsk_vulkan_descriptors_add_image;
}
static void
gsk_vulkan_descriptors_init (GskVulkanDescriptors *self)
{
}
GskGpuDescriptors *
gsk_vulkan_descriptors_new (GskVulkanFrame *frame)
{
GskVulkanDescriptors *self;
self = g_object_new (GSK_TYPE_VULKAN_DESCRIPTORS, NULL);
self->frame = frame;
return GSK_GPU_DESCRIPTORS (self);
}
void
gsk_vulkan_descriptors_transition (GskVulkanDescriptors *self,
VkCommandBuffer command_buffer)
{
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
gsize i;
for (i = 0; i < gsk_gpu_descriptors_get_size (desc); i++)
{
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (gsk_gpu_descriptors_get_image (desc, i)),
command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT);
}
}

View File

@ -0,0 +1,19 @@
#pragma once
#include "gskgpudescriptorsprivate.h"
#include "gskvulkanframeprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_DESCRIPTORS (gsk_vulkan_descriptors_get_type ())
G_DECLARE_FINAL_TYPE (GskVulkanDescriptors, gsk_vulkan_descriptors, GSK, VULKAN_DESCRIPTORS, GskGpuDescriptors)
GskGpuDescriptors * gsk_vulkan_descriptors_new (GskVulkanFrame *frame);
void gsk_vulkan_descriptors_transition (GskVulkanDescriptors *self,
VkCommandBuffer command_buffer);
G_END_DECLS

View File

@ -4,6 +4,7 @@
#include "gskgpuopprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkandeviceprivate.h"
#include "gskvulkanimageprivate.h"
@ -136,16 +137,15 @@ gsk_vulkan_frame_cleanup (GskGpuFrame *frame)
GSK_GPU_FRAME_CLASS (gsk_vulkan_frame_parent_class)->cleanup (frame);
}
static guint32
gsk_vulkan_frame_get_image_descriptor (GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
guint32
gsk_vulkan_frame_add_image (GskVulkanFrame *self,
GskGpuImage *image,
GskGpuSampler sampler)
{
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
GskVulkanDevice *device;
guint32 result;
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame));
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self)));
result = gsk_descriptor_image_infos_get_size (&self->descriptor_images);
g_assert (result < gsk_vulkan_device_get_max_descriptors (device));
@ -157,7 +157,6 @@ gsk_vulkan_frame_get_image_descriptor (GskGpuFrame *frame,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
});
return result;
}
@ -251,6 +250,12 @@ gsk_vulkan_frame_prepare_descriptor_sets (GskVulkanFrame *self)
NULL);
}
static GskGpuDescriptors *
gsk_vulkan_frame_create_descriptors (GskGpuFrame *frame)
{
return GSK_GPU_DESCRIPTORS (gsk_vulkan_descriptors_new (GSK_VULKAN_FRAME (frame)));
}
static GskGpuBuffer *
gsk_vulkan_frame_create_vertex_buffer (GskGpuFrame *frame,
gsize size)
@ -351,7 +356,7 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass)
gpu_frame_class->is_busy = gsk_vulkan_frame_is_busy;
gpu_frame_class->setup = gsk_vulkan_frame_setup;
gpu_frame_class->cleanup = gsk_vulkan_frame_cleanup;
gpu_frame_class->get_image_descriptor = gsk_vulkan_frame_get_image_descriptor;
gpu_frame_class->create_descriptors = gsk_vulkan_frame_create_descriptors;
gpu_frame_class->create_vertex_buffer = gsk_vulkan_frame_create_vertex_buffer;
gpu_frame_class->create_storage_buffer = gsk_vulkan_frame_create_storage_buffer;
gpu_frame_class->submit = gsk_vulkan_frame_submit;

View File

@ -10,4 +10,8 @@ G_DECLARE_FINAL_TYPE (GskVulkanFrame, gsk_vulkan_frame, GSK, VULKAN_FRAME, GskGp
VkFence gsk_vulkan_frame_get_vk_fence (GskVulkanFrame *self) G_GNUC_PURE;
guint32 gsk_vulkan_frame_add_image (GskVulkanFrame *self,
GskGpuImage *image,
GskGpuSampler sampler);
G_END_DECLS

View File

@ -69,6 +69,7 @@ gsk_private_sources = files([
'gl/fp16.c',
'gpu/gskglbuffer.c',
'gpu/gskgldevice.c',
'gpu/gskgldescriptors.c',
'gpu/gskglframe.c',
'gpu/gskglimage.c',
'gpu/gskgpublitop.c',
@ -80,6 +81,7 @@ gsk_private_sources = files([
'gpu/gskgpuclip.c',
'gpu/gskgpucolorizeop.c',
'gpu/gskgpucolorop.c',
'gpu/gskgpudescriptors.c',
'gpu/gskgpudownloadop.c',
'gpu/gskgpudevice.c',
'gpu/gskgpuframe.c',
@ -149,6 +151,7 @@ gsk_private_vulkan_shader_headers = []
if have_vulkan
gsk_private_sources += files([
'gpu/gskvulkanbuffer.c',
'gpu/gskvulkandescriptors.c',
'gpu/gskvulkandevice.c',
'gpu/gskvulkanframe.c',
'gpu/gskvulkanimage.c',