gpu: Remove descriptors

They are no longer a thing with the new way we manage textures.
This commit is contained in:
Benjamin Otte 2024-07-20 11:01:07 +02:00
parent 7b76170f46
commit 1b2156493b
51 changed files with 25 additions and 1250 deletions

View File

@ -1,117 +0,0 @@
#include "config.h"
#include "gskgldescriptorsprivate.h"
#include "gskglimageprivate.h"
struct _GskGLDescriptors
{
GskGpuDescriptors parent_instance;
GskGLDevice *device;
guint n_external;
};
G_DEFINE_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static void
gsk_gl_descriptors_finalize (GObject *object)
{
GskGLDescriptors *self = GSK_GL_DESCRIPTORS (object);
g_object_unref (self->device);
G_OBJECT_CLASS (gsk_gl_descriptors_parent_class)->finalize (object);
}
static gboolean
gsk_gl_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskGLDescriptors *self = GSK_GL_DESCRIPTORS (desc);
gsize used_texture_units;
used_texture_units = gsk_gpu_descriptors_get_n_images (desc) + 2 * self->n_external;
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_EXTERNAL)
{
if (16 - used_texture_units < 3)
return FALSE;
*out_descriptor = (self->n_external << 1) | 1;
self->n_external++;
return TRUE;
}
else
{
if (used_texture_units >= 16)
return FALSE;
*out_descriptor = (gsk_gpu_descriptors_get_n_images (desc) - self->n_external) << 1;
return TRUE;
}
}
static void
gsk_gl_descriptors_class_init (GskGLDescriptorsClass *klass)
{
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gl_descriptors_finalize;
descriptors_class->add_image = gsk_gl_descriptors_add_image;
}
static void
gsk_gl_descriptors_init (GskGLDescriptors *self)
{
}
GskGpuDescriptors *
gsk_gl_descriptors_new (GskGLDevice *device)
{
GskGLDescriptors *self;
self = g_object_new (GSK_TYPE_GL_DESCRIPTORS, NULL);
self->device = g_object_ref (device);
return GSK_GPU_DESCRIPTORS (self);
}
guint
gsk_gl_descriptors_get_n_external (GskGLDescriptors *self)
{
return self->n_external;
}
void
gsk_gl_descriptors_use (GskGLDescriptors *self)
{
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
gsize i, ext, n_textures;
n_textures = 16 - 3 * self->n_external;
ext = 0;
for (i = 0; i < gsk_gpu_descriptors_get_n_images (desc); i++)
{
GskGLImage *image = GSK_GL_IMAGE (gsk_gpu_descriptors_get_image (desc, i));
if (gsk_gpu_image_get_flags (GSK_GPU_IMAGE (image)) & GSK_GPU_IMAGE_EXTERNAL)
{
glActiveTexture (GL_TEXTURE0 + n_textures + 3 * ext);
gsk_gl_image_bind_texture (image);
ext++;
}
else
{
glActiveTexture (GL_TEXTURE0 + i - ext);
gsk_gl_image_bind_texture (image);
glBindSampler (i - ext, gsk_gl_device_get_sampler_id (self->device, gsk_gpu_descriptors_get_sampler (desc, i)));
}
}
}

View File

@ -1,19 +0,0 @@
#pragma once
#include "gskgpudescriptorsprivate.h"
#include "gskgldeviceprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GL_DESCRIPTORS (gsk_gl_descriptors_get_type ())
G_DECLARE_FINAL_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK, GL_DESCRIPTORS, GskGpuDescriptors)
GskGpuDescriptors * gsk_gl_descriptors_new (GskGLDevice *device);
guint gsk_gl_descriptors_get_n_external (GskGLDescriptors *self);
void gsk_gl_descriptors_use (GskGLDescriptors *self);
G_END_DECLS

View File

@ -38,7 +38,6 @@ struct _GLProgramKey
GskGpuShaderFlags flags;
GskGpuColorStates color_states;
guint32 variation;
guint n_external_textures;
};
G_DEFINE_TYPE (GskGLDevice, gsk_gl_device, GSK_TYPE_GPU_DEVICE)
@ -64,8 +63,7 @@ gl_program_key_equal (gconstpointer a,
return keya->op_class == keyb->op_class &&
keya->flags == keyb->flags &&
keya->color_states == keyb->color_states &&
keya->variation == keyb->variation &&
keya->n_external_textures == keyb->n_external_textures;
keya->variation == keyb->variation;
}
static GskGpuImage *
@ -383,7 +381,6 @@ gsk_gl_device_load_shader (GskGLDevice *self,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures,
GError **error)
{
GString *preamble;
@ -391,7 +388,6 @@ gsk_gl_device_load_shader (GskGLDevice *self,
GBytes *bytes;
GLuint shader_id;
g_assert ((n_external_textures > 0) == gsk_gpu_shader_flags_has_external_textures (flags));
preamble = g_string_new (NULL);
g_string_append (preamble, self->version_string);
@ -404,16 +400,12 @@ gsk_gl_device_load_shader (GskGLDevice *self,
g_string_append (preamble, "#extension GL_OES_EGL_image_external : require\n");
}
g_string_append (preamble, "#define GSK_GLES 1\n");
g_assert (3 * n_external_textures <= 16);
}
else
{
g_assert (!gsk_gpu_shader_flags_has_external_textures (flags));
}
g_string_append_printf (preamble, "#define N_TEXTURES %u\n", 16 - 3 * n_external_textures);
g_string_append_printf (preamble, "#define N_EXTERNAL_TEXTURES %u\n", n_external_textures);
switch (shader_type)
{
case GL_VERTEX_SHADER:
@ -471,18 +463,17 @@ gsk_gl_device_load_program (GskGLDevice *self,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures,
GError **error)
{
G_GNUC_UNUSED gint64 begin_time = GDK_PROFILER_CURRENT_TIME;
GLuint vertex_shader_id, fragment_shader_id, program_id;
GLint link_status;
vertex_shader_id = gsk_gl_device_load_shader (self, op_class->shader_name, GL_VERTEX_SHADER, flags, color_states, variation, n_external_textures, error);
vertex_shader_id = gsk_gl_device_load_shader (self, op_class->shader_name, GL_VERTEX_SHADER, flags, color_states, variation, error);
if (vertex_shader_id == 0)
return 0;
fragment_shader_id = gsk_gl_device_load_shader (self, op_class->shader_name, GL_FRAGMENT_SHADER, flags, color_states, variation, n_external_textures, error);
fragment_shader_id = gsk_gl_device_load_shader (self, op_class->shader_name, GL_FRAGMENT_SHADER, flags, color_states, variation, error);
if (fragment_shader_id == 0)
return 0;
@ -542,8 +533,7 @@ gsk_gl_device_use_program (GskGLDevice *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures)
guint32 variation)
{
GError *error = NULL;
GLuint program_id;
@ -552,7 +542,6 @@ gsk_gl_device_use_program (GskGLDevice *self,
.flags = flags,
.color_states = color_states,
.variation = variation,
.n_external_textures = n_external_textures
};
program_id = GPOINTER_TO_UINT (g_hash_table_lookup (self->gl_programs, &key));
@ -562,7 +551,7 @@ gsk_gl_device_use_program (GskGLDevice *self,
return;
}
program_id = gsk_gl_device_load_program (self, op_class, flags, color_states, variation, n_external_textures, &error);
program_id = gsk_gl_device_load_program (self, op_class, flags, color_states, variation, &error);
if (program_id == 0)
{
g_critical ("Failed to load shader program: %s", error->message);

View File

@ -15,8 +15,7 @@ void gsk_gl_device_use_program (GskGLDe
const GskGpuShaderOpClass *op_class,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures);
guint32 variation);
GLuint gsk_gl_device_get_sampler_id (GskGLDevice *self,
GskGpuSampler sampler);

View File

@ -6,7 +6,6 @@
#include "gskgpuopprivate.h"
#include "gskgpushaderopprivate.h"
#include "gskglbufferprivate.h"
#include "gskgldescriptorsprivate.h"
#include "gskgldeviceprivate.h"
#include "gskglimageprivate.h"
@ -125,12 +124,6 @@ gsk_gl_frame_upload_texture (GskGpuFrame *frame,
return GSK_GPU_FRAME_CLASS (gsk_gl_frame_parent_class)->upload_texture (frame, with_mipmap, texture);
}
static GskGpuDescriptors *
gsk_gl_frame_create_descriptors (GskGpuFrame *frame)
{
return GSK_GPU_DESCRIPTORS (gsk_gl_descriptors_new (GSK_GL_DEVICE (gsk_gpu_frame_get_device (frame))));
}
static GskGpuBuffer *
gsk_gl_frame_create_vertex_buffer (GskGpuFrame *frame,
gsize size)
@ -227,7 +220,6 @@ gsk_gl_frame_class_init (GskGLFrameClass *klass)
gpu_frame_class->wait = gsk_gl_frame_wait;
gpu_frame_class->cleanup = gsk_gl_frame_cleanup;
gpu_frame_class->upload_texture = gsk_gl_frame_upload_texture;
gpu_frame_class->create_descriptors = gsk_gl_frame_create_descriptors;
gpu_frame_class->create_vertex_buffer = gsk_gl_frame_create_vertex_buffer;
gpu_frame_class->create_storage_buffer = gsk_gl_frame_create_storage_buffer;
gpu_frame_class->write_texture_vertex_data = gsk_gl_frame_write_texture_vertex_data;
@ -253,8 +245,7 @@ gsk_gl_frame_use_program (GskGLFrame *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures)
guint32 variation)
{
GLuint vao;
@ -262,8 +253,7 @@ gsk_gl_frame_use_program (GskGLFrame *self,
op_class,
flags,
color_states,
variation,
n_external_textures);
variation);
vao = GPOINTER_TO_UINT (g_hash_table_lookup (self->vaos, op_class));
if (vao)

View File

@ -12,8 +12,7 @@ void gsk_gl_frame_use_program (GskGLFr
const GskGpuShaderOpClass *op_class,
GskGpuShaderFlags flags,
GskGpuColorStates color_states,
guint32 variation,
guint n_external_textures);
guint32 variation);
void gsk_gl_frame_bind_globals (GskGLFrame *self);

View File

@ -55,7 +55,6 @@ static const GskGpuShaderOpClass GSK_GPU_BLEND_MODE_OP_CLASS = {
void
gsk_gpu_blend_mode_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,
@ -70,7 +69,6 @@ gsk_gpu_blend_mode_op (GskGpuFrame *frame,
gsk_gpu_color_states_create_equal (TRUE, TRUE),
blend_mode,
clip,
desc,
(GskGpuImage *[2]) { bottom->image, top->image },
(GskGpuSampler[2]) { bottom->sampler, top->sampler },
&instance);

View File

@ -8,7 +8,6 @@ G_BEGIN_DECLS
void gsk_gpu_blend_mode_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,

View File

@ -58,7 +58,6 @@ gsk_gpu_blur_op_full (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
guint32 variation,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_vec2_t *blur_direction,
@ -71,7 +70,6 @@ gsk_gpu_blur_op_full (GskGpuFrame *frame,
color_states,
variation,
clip,
desc,
(GskGpuImage *[1]) { image->image },
(GskGpuSampler[1]) { image->sampler },
&instance);
@ -86,7 +84,6 @@ void
gsk_gpu_blur_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_vec2_t *blur_direction)
@ -95,7 +92,6 @@ gsk_gpu_blur_op (GskGpuFrame *frame,
clip,
color_states,
0,
desc,
offset,
image,
blur_direction,
@ -106,7 +102,6 @@ void
gsk_gpu_blur_shadow_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_vec2_t *blur_direction,
@ -116,7 +111,6 @@ gsk_gpu_blur_shadow_op (GskGpuFrame *frame,
clip,
color_states,
VARIATION_COLORIZE,
desc,
offset,
image,
blur_direction,

View File

@ -9,7 +9,6 @@ G_BEGIN_DECLS
void gsk_gpu_blur_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_vec2_t *blur_direction);
@ -17,7 +16,6 @@ void gsk_gpu_blur_op (GskGpuF
void gsk_gpu_blur_shadow_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_vec2_t *blur_direction,

View File

@ -114,7 +114,6 @@ gsk_gpu_border_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_rounded_rect_to_float (outline, offset, instance->outline);

View File

@ -98,7 +98,6 @@ gsk_gpu_box_shadow_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_gpu_rect_to_float (bounds, offset, instance->bounds);

View File

@ -53,7 +53,6 @@ void
gsk_gpu_colorize_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *descriptors,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const float color[4])
@ -65,7 +64,6 @@ gsk_gpu_colorize_op (GskGpuFrame *frame,
color_states,
0,
clip,
descriptors,
(GskGpuImage *[1]) { image->image },
(GskGpuSampler[1]) { image->sampler },
&instance);

View File

@ -9,7 +9,6 @@ G_BEGIN_DECLS
void gsk_gpu_colorize_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const float color[4]);

View File

@ -52,7 +52,6 @@ void
gsk_gpu_color_matrix_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_matrix_t *color_matrix,
@ -65,7 +64,6 @@ gsk_gpu_color_matrix_op (GskGpuFrame *frame,
color_states,
0,
clip,
desc,
(GskGpuImage *[1]) { image->image },
(GskGpuSampler[1]) { image->sampler },
&instance);

View File

@ -9,7 +9,6 @@ G_BEGIN_DECLS
void gsk_gpu_color_matrix_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image,
const graphene_matrix_t *color_matrix,

View File

@ -67,7 +67,6 @@ gsk_gpu_color_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);

View File

@ -74,7 +74,6 @@ gsk_gpu_conic_gradient_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);

View File

@ -59,7 +59,6 @@ gsk_gpu_convert_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
float opacity,
GskGpuDescriptors *desc,
gboolean straight_alpha,
const graphene_point_t *offset,
const GskGpuShaderImage *image)
@ -72,7 +71,6 @@ gsk_gpu_convert_op (GskGpuFrame *frame,
(opacity < 1.0 ? VARIATION_OPACITY : 0) |
(straight_alpha ? VARIATION_STRAIGHT_ALPHA : 0),
clip,
desc,
(GskGpuImage *[1]) { image->image },
(GskGpuSampler[1]) { image->sampler },
&instance);

View File

@ -10,7 +10,6 @@ void gsk_gpu_convert_op (GskGpuF
GskGpuShaderClip clip,
GskGpuColorStates color_states,
float opacity,
GskGpuDescriptors *desc,
gboolean straight_alpha,
const graphene_point_t *offset,
const GskGpuShaderImage *image);

View File

@ -54,7 +54,6 @@ void
gsk_gpu_cross_fade_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,
@ -69,7 +68,6 @@ gsk_gpu_cross_fade_op (GskGpuFrame *frame,
color_states,
0,
clip,
desc,
(GskGpuImage *[2]) { start->image, end->image },
(GskGpuSampler[2]) { start->sampler, end->sampler },
&instance);

View File

@ -9,7 +9,6 @@ G_BEGIN_DECLS
void gsk_gpu_cross_fade_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuColorStates color_states,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,

View File

@ -1,159 +0,0 @@
#include "config.h"
#include "gskgpudescriptorsprivate.h"
typedef struct _GskGpuImageEntry GskGpuImageEntry;
typedef struct _GskGpuBufferEntry GskGpuBufferEntry;
struct _GskGpuImageEntry
{
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
};
static void
gsk_gpu_image_entry_clear (gpointer data)
{
GskGpuImageEntry *entry = data;
g_object_unref (entry->image);
}
#define GDK_ARRAY_NAME gsk_gpu_image_entries
#define GDK_ARRAY_TYPE_NAME GskGpuImageEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuImageEntry
#define GDK_ARRAY_FREE_FUNC gsk_gpu_image_entry_clear
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 16
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
typedef struct _GskGpuDescriptorsPrivate GskGpuDescriptorsPrivate;
struct _GskGpuDescriptorsPrivate
{
GskGpuImageEntries images;
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDescriptors, gsk_gpu_descriptors, G_TYPE_OBJECT)
static void
gsk_gpu_descriptors_finalize (GObject *object)
{
GskGpuDescriptors *self = GSK_GPU_DESCRIPTORS (object);
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_clear (&priv->images);
G_OBJECT_CLASS (gsk_gpu_descriptors_parent_class)->finalize (object);
}
static void
gsk_gpu_descriptors_class_init (GskGpuDescriptorsClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gpu_descriptors_finalize;
}
static void
gsk_gpu_descriptors_init (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_init (&priv->images);
}
gsize
gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
return gsk_gpu_image_entries_get_size (&priv->images);
}
void
gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize n_images)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
g_assert (n_images <= gsk_gpu_image_entries_get_size (&priv->images));
gsk_gpu_image_entries_set_size (&priv->images, n_images);
}
GskGpuImage *
gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
return entry->image;
}
GskGpuSampler
gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
return entry->sampler;
}
gsize
gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
if (entry->descriptor == descriptor)
return i;
}
g_return_val_if_reached ((gsize) -1);
}
gboolean
gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
guint32 descriptor;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
if (entry->image == image && entry->sampler == sampler)
{
*out_descriptor = entry->descriptor;
return TRUE;
}
}
if (!GSK_GPU_DESCRIPTORS_GET_CLASS (self)->add_image (self, image, sampler, &descriptor))
return FALSE;
gsk_gpu_image_entries_append (&priv->images,
&(GskGpuImageEntry) {
.image = g_object_ref (image),
.sampler = sampler,
.descriptor = descriptor
});
*out_descriptor = descriptor;
return TRUE;
}

View File

@ -1,51 +0,0 @@
#pragma once
#include "gskgputypesprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GPU_DESCRIPTORS (gsk_gpu_descriptors_get_type ())
#define GSK_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptors))
#define GSK_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
#define GSK_IS_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_IS_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_GPU_DESCRIPTORS_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
typedef struct _GskGpuDescriptorsClass GskGpuDescriptorsClass;
struct _GskGpuDescriptors
{
GObject parent_instance;
};
struct _GskGpuDescriptorsClass
{
GObjectClass parent_class;
gboolean (* add_image) (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_id);
};
GType gsk_gpu_descriptors_get_type (void) G_GNUC_CONST;
gsize gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self);
void gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize n_images);
GskGpuImage * gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id);
GskGpuSampler gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id);
gsize gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor);
gboolean gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDescriptors, g_object_unref)
G_END_DECLS

View File

@ -473,12 +473,6 @@ gsk_gpu_frame_upload_texture (GskGpuFrame *self,
return image;
}
GskGpuDescriptors *
gsk_gpu_frame_create_descriptors (GskGpuFrame *self)
{
return GSK_GPU_FRAME_GET_CLASS (self)->create_descriptors (self);
}
static GskGpuBuffer *
gsk_gpu_frame_create_vertex_buffer (GskGpuFrame *self,
gsize size)

View File

@ -36,7 +36,6 @@ struct _GskGpuFrameClass
GskGpuImage * (* upload_texture) (GskGpuFrame *self,
gboolean with_mipmap,
GdkTexture *texture);
GskGpuDescriptors * (* create_descriptors) (GskGpuFrame *self);
GskGpuBuffer * (* create_vertex_buffer) (GskGpuFrame *self,
gsize size);
GskGpuBuffer * (* create_storage_buffer) (GskGpuFrame *self,
@ -82,7 +81,6 @@ gpointer gsk_gpu_frame_alloc_op (GskGpuF
GskGpuImage * gsk_gpu_frame_upload_texture (GskGpuFrame *self,
gboolean with_mipmap,
GdkTexture *texture);
GskGpuDescriptors * gsk_gpu_frame_create_descriptors (GskGpuFrame *self);
gsize gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size);
guchar * gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,

View File

@ -9,7 +9,6 @@
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkandeviceprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkandescriptorsprivate.h"
#endif
typedef struct _GskGpuGlobalsOp GskGpuGlobalsOp;
@ -51,8 +50,8 @@ gsk_gpu_globals_op_vk_command (GskGpuOp *op,
GskGpuGlobalsOp *self = (GskGpuGlobalsOp *) op;
vkCmdPushConstants (state->vk_command_buffer,
gsk_vulkan_device_get_vk_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
gsk_vulkan_descriptors_get_pipeline_layout (state->desc)),
NULL, //gsk_vulkan_device_get_vk_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
// gsk_vulkan_descriptors_get_pipeline_layout (state->desc)),
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
0,
sizeof (self->instance),

View File

@ -79,7 +79,6 @@ gsk_gpu_linear_gradient_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);

View File

@ -54,7 +54,6 @@ static const GskGpuShaderOpClass GSK_GPU_MASK_OP_CLASS = {
void
gsk_gpu_mask_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,
@ -69,7 +68,6 @@ gsk_gpu_mask_op (GskGpuFrame *frame,
gsk_gpu_color_states_create_equal (TRUE, TRUE),
mask_mode,
clip,
desc,
(GskGpuImage *[2]) { source->image, mask->image },
(GskGpuSampler[2]) { source->sampler, mask->sampler },
&instance);

View File

@ -8,7 +8,6 @@ G_BEGIN_DECLS
void gsk_gpu_mask_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_rect_t *rect,
const graphene_point_t *offset,
float opacity,

View File

@ -159,7 +159,7 @@ gsk_gpu_mipmap_op_gl_command (GskGpuOp *op,
glActiveTexture (GL_TEXTURE0);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (self->image));
/* need to reset the images again */
state->desc = NULL;
state->current_images[0] = NULL;
glGenerateMipmap (GL_TEXTURE_2D);

View File

@ -17,7 +17,6 @@
#include "gskgpuconicgradientopprivate.h"
#include "gskgpuconvertopprivate.h"
#include "gskgpucrossfadeopprivate.h"
#include "gskgpudescriptorsprivate.h"
#include "gskgpudeviceprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuglobalsopprivate.h"
@ -112,7 +111,6 @@ struct _GskGpuNodeProcessor
{
GskGpuFrame *frame;
GdkColorState *ccs;
GskGpuDescriptors *desc;
cairo_rectangle_int_t scissor;
GskGpuBlend blend;
graphene_point_t offset;
@ -143,7 +141,6 @@ static void
gsk_gpu_node_processor_finish (GskGpuNodeProcessor *self)
{
g_clear_pointer (&self->modelview, gsk_transform_unref);
g_clear_object (&self->desc);
}
static void
@ -161,7 +158,6 @@ gsk_gpu_node_processor_init (GskGpuNodeProcessor *self,
self->frame = frame;
self->ccs = ccs;
self->desc = NULL;
self->scissor = *clip;
self->blend = GSK_GPU_BLEND_OVER;
@ -271,42 +267,6 @@ gsk_gpu_node_processor_color_states_for_rgba (GskGpuNodeProcessor *self)
FALSE);
}
static guint32
gsk_gpu_node_processor_add_image (GskGpuNodeProcessor *self,
GskGpuImage *image,
GskGpuSampler sampler)
{
guint32 descriptor;
g_clear_object (&self->desc);
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
if (!gsk_gpu_descriptors_add_image (self->desc, image, sampler, &descriptor))
{
g_assert_not_reached ();
return 0;
}
return descriptor;
}
static void
gsk_gpu_node_processor_add_two_images (GskGpuNodeProcessor *self,
GskGpuImage *image1,
GskGpuSampler sampler1,
GskGpuImage *image2,
GskGpuSampler sampler2,
guint32 *out_descriptors)
{
g_clear_object (&self->desc);
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
if (!gsk_gpu_descriptors_add_image (self->desc, image1, sampler1, &out_descriptors[0]) ||
!gsk_gpu_descriptors_add_image (self->desc, image2, sampler2, &out_descriptors[1]))
{
g_assert_not_reached ();
}
}
static void
rect_round_to_pixels (const graphene_rect_t *src,
const graphene_vec2_t *pixel_scale,
@ -520,12 +480,10 @@ gsk_gpu_node_processor_image_op (GskGpuNodeProcessor *self,
const graphene_rect_t *rect,
const graphene_rect_t *tex_rect)
{
guint32 descriptor;
gboolean straight_alpha;
g_assert (self->pending_globals == 0);
descriptor = gsk_gpu_node_processor_add_image (self, image, sampler);
straight_alpha = gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA;
if (straight_alpha ||
@ -538,13 +496,11 @@ gsk_gpu_node_processor_image_op (GskGpuNodeProcessor *self,
image_color_state,
TRUE),
self->opacity,
self->desc,
straight_alpha,
&self->offset,
&(GskGpuShaderImage) {
image,
sampler,
descriptor,
rect,
tex_rect
});
@ -553,12 +509,10 @@ gsk_gpu_node_processor_image_op (GskGpuNodeProcessor *self,
{
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, rect),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
sampler,
descriptor,
rect,
tex_rect
});
@ -756,15 +710,12 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
const graphene_point_t *shadow_offset,
float blur_radius,
const GdkRGBA *shadow_color,
GskGpuDescriptors *source_desc,
GskGpuImage *source_image,
guint32 source_descriptor,
GdkMemoryDepth source_depth,
const graphene_rect_t *source_rect)
{
GskGpuNodeProcessor other;
GskGpuImage *intermediate;
guint32 intermediate_descriptor;
graphene_vec2_t direction;
graphene_rect_t clip_rect, intermediate_rect;
graphene_point_t real_offset;
@ -795,12 +746,10 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
gsk_gpu_blur_op (other.frame,
gsk_gpu_clip_get_shader_clip (&other.clip, &other.offset, &intermediate_rect),
gsk_gpu_node_processor_color_states_self (&other),
source_desc,
&other.offset,
&(GskGpuShaderImage) {
source_image,
GSK_GPU_SAMPLER_TRANSPARENT,
source_descriptor,
&intermediate_rect,
source_rect
},
@ -811,18 +760,15 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
real_offset = GRAPHENE_POINT_INIT (self->offset.x + shadow_offset->x,
self->offset.y + shadow_offset->y);
graphene_vec2_init (&direction, 0.0f, blur_radius);
intermediate_descriptor = gsk_gpu_node_processor_add_image (self, intermediate, GSK_GPU_SAMPLER_TRANSPARENT);
if (shadow_color)
{
gsk_gpu_blur_shadow_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &real_offset, rect),
gsk_gpu_node_processor_color_states_for_rgba (self),
self->desc,
&real_offset,
&(GskGpuShaderImage) {
intermediate,
GSK_GPU_SAMPLER_TRANSPARENT,
intermediate_descriptor,
rect,
&intermediate_rect,
},
@ -834,12 +780,10 @@ gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self,
gsk_gpu_blur_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &real_offset, rect),
gsk_gpu_node_processor_color_states_self (self),
self->desc,
&real_offset,
&(GskGpuShaderImage) {
intermediate,
GSK_GPU_SAMPLER_TRANSPARENT,
intermediate_descriptor,
rect,
&intermediate_rect,
},
@ -1046,7 +990,6 @@ gsk_gpu_node_processor_add_rounded_clip_node_with_mask (GskGpuNodeProcessor *sel
GskGpuNodeProcessor other;
graphene_rect_t clip_bounds, child_rect;
GskGpuImage *child_image, *mask_image;
guint32 descriptors[2];
if (!gsk_gpu_node_processor_clip_node_bounds (self, node, &clip_bounds))
return;
@ -1075,17 +1018,9 @@ gsk_gpu_node_processor_add_rounded_clip_node_with_mask (GskGpuNodeProcessor *sel
(float[4]) { 1, 1, 1, 1 });
gsk_gpu_node_processor_finish_draw (&other, mask_image);
gsk_gpu_node_processor_add_two_images (self,
child_image,
GSK_GPU_SAMPLER_DEFAULT,
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_node_processor_sync_globals (self, 0);
gsk_gpu_mask_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &clip_bounds),
self->desc,
&clip_bounds,
&self->offset,
self->opacity,
@ -1093,14 +1028,12 @@ gsk_gpu_node_processor_add_rounded_clip_node_with_mask (GskGpuNodeProcessor *sel
&(GskGpuShaderImage) {
child_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&child_rect,
},
&(GskGpuShaderImage) {
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&clip_bounds,
});
@ -2021,7 +1954,6 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
GdkTexture *texture;
GdkColorState *image_cs;
GskScalingFilter scaling_filter;
guint32 descriptor;
gboolean need_mipmap, need_offscreen;
texture = gsk_texture_scale_node_get_texture (node);
@ -2073,15 +2005,12 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
node);
g_object_unref (image);
}
descriptor = gsk_gpu_node_processor_add_image (self, offscreen, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
offscreen,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&node->bounds,
&clip_bounds
});
@ -2104,16 +2033,12 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
if (need_mipmap && !(gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_MIPMAP))
gsk_gpu_mipmap_op (self->frame, image);
descriptor = gsk_gpu_node_processor_add_image (self, image, gsk_gpu_sampler_for_scaling_filter (scaling_filter));
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
gsk_gpu_sampler_for_scaling_filter (scaling_filter),
descriptor,
&node->bounds,
&node->bounds,
});
@ -2269,7 +2194,6 @@ gsk_gpu_node_processor_add_gradient_node (GskGpuNodeProcessor *self,
graphene_rect_t bounds;
gsize i, j;
GskGpuImage *image;
guint32 descriptor;
if (n_stops < 8)
{
@ -2343,16 +2267,12 @@ gsk_gpu_node_processor_add_gradient_node (GskGpuNodeProcessor *self,
gsk_gpu_node_processor_finish_draw (&other, image);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &bounds),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&node->bounds,
&bounds
});
@ -2459,7 +2379,6 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self,
GskGpuImage *image;
graphene_rect_t tex_rect, clip_rect;
float blur_radius, clip_radius;
guint32 descriptor;
child = gsk_blur_node_get_child (node);
blur_radius = gsk_blur_node_get_radius (node);
@ -2479,16 +2398,12 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self,
if (image == NULL)
return;
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_TRANSPARENT);
gsk_gpu_node_processor_blur_op (self,
&node->bounds,
graphene_point_zero (),
blur_radius,
NULL,
self->desc,
image,
descriptor,
gdk_memory_format_get_depth (gsk_gpu_image_get_format (image),
gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB),
&tex_rect);
@ -2504,8 +2419,6 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
graphene_rect_t clip_bounds, tex_rect;
GskRenderNode *child;
gsize i, n_shadows;
GskGpuDescriptors *desc;
guint32 descriptor;
n_shadows = gsk_shadow_node_get_n_shadows (node);
child = gsk_shadow_node_get_child (node);
@ -2523,9 +2436,6 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
if (image == NULL)
return;
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_TRANSPARENT);
desc = self->desc;
for (i = 0; i < n_shadows; i++)
{
const GskShadow *shadow = gsk_shadow_node_get_shadow (node, i);
@ -2536,12 +2446,10 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
gsk_gpu_colorize_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &shadow_offset, &child->bounds),
gsk_gpu_node_processor_color_states_for_rgba (self),
desc,
&shadow_offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_TRANSPARENT,
descriptor,
&child->bounds,
&tex_rect,
},
@ -2557,24 +2465,19 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
&GRAPHENE_POINT_INIT (shadow->dx, shadow->dy),
shadow->radius,
&shadow->color,
desc,
image,
descriptor,
gdk_memory_format_get_depth (gsk_gpu_image_get_format (image),
gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB),
&tex_rect);
}
}
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &child->bounds),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&child->bounds,
&tex_rect,
});
@ -2603,7 +2506,6 @@ gsk_gpu_node_processor_add_blend_node (GskGpuNodeProcessor *self,
GskRenderNode *bottom_child, *top_child;
graphene_rect_t bottom_rect, top_rect;
GskGpuImage *bottom_image, *top_image;
guint32 descriptors[2];
bottom_child = gsk_blend_node_get_bottom_child (node);
top_child = gsk_blend_node_get_top_child (node);
@ -2631,16 +2533,8 @@ gsk_gpu_node_processor_add_blend_node (GskGpuNodeProcessor *self,
top_rect = *graphene_rect_zero ();
}
gsk_gpu_node_processor_add_two_images (self,
bottom_image,
GSK_GPU_SAMPLER_DEFAULT,
top_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_blend_mode_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,
&node->bounds,
&self->offset,
self->opacity,
@ -2648,14 +2542,12 @@ gsk_gpu_node_processor_add_blend_node (GskGpuNodeProcessor *self,
&(GskGpuShaderImage) {
bottom_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&bottom_rect
},
&(GskGpuShaderImage) {
top_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&top_rect
});
@ -2671,7 +2563,6 @@ gsk_gpu_node_processor_add_cross_fade_node (GskGpuNodeProcessor *self,
GskRenderNode *start_child, *end_child;
graphene_rect_t start_rect, end_rect;
GskGpuImage *start_image, *end_image;
guint32 descriptors[2];
float progress, old_opacity;
start_child = gsk_cross_fade_node_get_start_child (node);
@ -2730,17 +2621,9 @@ gsk_gpu_node_processor_add_cross_fade_node (GskGpuNodeProcessor *self,
return;
}
gsk_gpu_node_processor_add_two_images (self,
start_image,
GSK_GPU_SAMPLER_DEFAULT,
end_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_cross_fade_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
gsk_gpu_node_processor_color_states_for_rgba (self),
self->desc,
&node->bounds,
&self->offset,
self->opacity,
@ -2748,14 +2631,12 @@ gsk_gpu_node_processor_add_cross_fade_node (GskGpuNodeProcessor *self,
&(GskGpuShaderImage) {
start_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&start_rect
},
&(GskGpuShaderImage) {
end_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&end_rect
});
@ -2795,16 +2676,13 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self,
mask_mode == GSK_MASK_MODE_ALPHA)
{
const GdkRGBA *rgba = gsk_color_node_get_color (source_child);
guint32 descriptor = gsk_gpu_node_processor_add_image (self, mask_image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_colorize_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
gsk_gpu_node_processor_color_states_for_rgba (self),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&node->bounds,
&mask_rect,
},
@ -2814,7 +2692,6 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self,
{
GskGpuImage *source_image;
graphene_rect_t source_rect;
guint32 descriptors[2];
source_image = gsk_gpu_node_processor_get_node_as_image (self,
&bounds,
@ -2825,16 +2702,9 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self,
g_object_unref (mask_image);
return;
}
gsk_gpu_node_processor_add_two_images (self,
source_image,
GSK_GPU_SAMPLER_DEFAULT,
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_mask_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,
&node->bounds,
&self->offset,
self->opacity,
@ -2842,14 +2712,12 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self,
&(GskGpuShaderImage) {
source_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&source_rect,
},
&(GskGpuShaderImage) {
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&mask_rect,
});
@ -2875,7 +2743,6 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
float inv_align_scale_x, inv_align_scale_y;
unsigned int flags_mask;
GskGpuImage *last_image;
guint32 descriptor;
const float inv_pango_scale = 1.f / PANGO_SCALE;
if (self->opacity < 1.0 &&
@ -2914,7 +2781,6 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
inv_align_scale_y = 1 / align_scale_y;
last_image = NULL;
descriptor = 0;
for (i = 0; i < num_glyphs; i++)
{
GskGpuImage *image;
@ -2952,20 +2818,15 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
glyph_origin.y - glyph_offset.y / scale);
if (image != last_image)
{
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
last_image = image;
}
last_image = image;
if (glyphs[i].attr.is_color)
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &glyph_offset, &glyph_bounds),
self->desc,
&glyph_origin,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&glyph_bounds,
&glyph_tex_rect
});
@ -2973,12 +2834,10 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
gsk_gpu_colorize_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &glyph_offset, &glyph_bounds),
gsk_gpu_node_processor_color_states_for_rgba (self),
self->desc,
&glyph_origin,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&glyph_bounds,
&glyph_tex_rect
},
@ -2993,7 +2852,6 @@ gsk_gpu_node_processor_add_color_matrix_node (GskGpuNodeProcessor *self,
GskRenderNode *node)
{
GskGpuImage *image;
guint32 descriptor;
GskRenderNode *child;
graphene_matrix_t opacity_matrix;
const graphene_matrix_t *color_matrix;
@ -3022,17 +2880,13 @@ gsk_gpu_node_processor_add_color_matrix_node (GskGpuNodeProcessor *self,
if (image == NULL)
return;
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_color_matrix_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
gsk_gpu_node_processor_color_states_explicit (self, self->ccs, FALSE),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&node->bounds,
&tex_rect,
},
@ -3052,7 +2906,6 @@ gsk_gpu_node_processor_repeat_tile (GskGpuNodeProcessor *self,
{
GskGpuImage *image;
graphene_rect_t clipped_child_bounds, offset_rect;
guint32 descriptor;
gsk_rect_init_offset (&offset_rect,
rect,
@ -3074,16 +2927,12 @@ gsk_gpu_node_processor_repeat_tile (GskGpuNodeProcessor *self,
g_return_if_fail (image);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_REPEAT);
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, rect),
self->desc,
&self->offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_REPEAT,
descriptor,
rect,
&GRAPHENE_RECT_INIT (
clipped_child_bounds.origin.x + x * child_bounds->size.width,
@ -3261,7 +3110,6 @@ gsk_gpu_node_processor_add_fill_node (GskGpuNodeProcessor *self,
{
graphene_rect_t clip_bounds, source_rect;
GskGpuImage *mask_image, *source_image;
guint32 descriptors[2];
GskRenderNode *child;
if (!gsk_gpu_node_processor_clip_node_bounds (self, node, &clip_bounds))
@ -3301,16 +3149,8 @@ gsk_gpu_node_processor_add_fill_node (GskGpuNodeProcessor *self,
if (source_image == NULL)
return;
gsk_gpu_node_processor_add_two_images (self,
source_image,
GSK_GPU_SAMPLER_DEFAULT,
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_mask_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &clip_bounds),
self->desc,
&clip_bounds,
&self->offset,
self->opacity,
@ -3318,14 +3158,12 @@ gsk_gpu_node_processor_add_fill_node (GskGpuNodeProcessor *self,
&(GskGpuShaderImage) {
source_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&source_rect,
},
&(GskGpuShaderImage) {
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&clip_bounds,
});
@ -3369,7 +3207,6 @@ gsk_gpu_node_processor_add_stroke_node (GskGpuNodeProcessor *self,
{
graphene_rect_t clip_bounds, source_rect;
GskGpuImage *mask_image, *source_image;
guint32 descriptors[2];
GskRenderNode *child;
if (!gsk_gpu_node_processor_clip_node_bounds (self, node, &clip_bounds))
@ -3409,16 +3246,8 @@ gsk_gpu_node_processor_add_stroke_node (GskGpuNodeProcessor *self,
if (source_image == NULL)
return;
gsk_gpu_node_processor_add_two_images (self,
source_image,
GSK_GPU_SAMPLER_DEFAULT,
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors);
gsk_gpu_mask_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &clip_bounds),
self->desc,
&clip_bounds,
&self->offset,
self->opacity,
@ -3426,14 +3255,12 @@ gsk_gpu_node_processor_add_stroke_node (GskGpuNodeProcessor *self,
&(GskGpuShaderImage) {
source_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[0],
NULL,
&source_rect,
},
&(GskGpuShaderImage) {
mask_image,
GSK_GPU_SAMPLER_DEFAULT,
descriptors[1],
NULL,
&clip_bounds,
});
@ -4056,7 +3883,6 @@ gsk_gpu_node_processor_process (GskGpuFrame *frame,
{
GskGpuImage *image;
graphene_rect_t clip_bounds, tex_rect;
guint32 descriptor;
/* Can't use gsk_gpu_node_processor_get_node_as_image () because of colorspaces */
if (gsk_gpu_node_processor_clip_node_bounds (&self, node, &clip_bounds))
@ -4084,18 +3910,15 @@ gsk_gpu_node_processor_process (GskGpuFrame *frame,
self.pending_globals |= GSK_GPU_GLOBAL_BLEND;
gsk_gpu_node_processor_sync_globals (&self, 0);
descriptor = gsk_gpu_node_processor_add_image (&self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_convert_op (self.frame,
gsk_gpu_clip_get_shader_clip (&self.clip, &self.offset, &node->bounds),
gsk_gpu_node_processor_color_states_explicit (&self, ccs, TRUE),
self.opacity,
self.desc,
FALSE,
&self.offset,
&(GskGpuShaderImage) {
image,
GSK_GPU_SAMPLER_DEFAULT,
descriptor,
&node->bounds,
&tex_rect
});

View File

@ -28,11 +28,9 @@ struct _GskGLCommandState
GskGpuShaderFlags flags;
GskGpuColorStates color_states;
guint32 variation;
gsize n_external;
} current_program;
GskGpuImage *current_images[2];
GskGpuSampler current_samplers[2];
GskGLDescriptors *desc;
};
#ifdef GDK_RENDERING_VULKAN
@ -43,7 +41,6 @@ struct _GskVulkanCommandState
VkCommandBuffer vk_command_buffer;
GskGpuBlend blend;
GskVulkanDescriptors *desc;
GskVulkanSemaphores *semaphores;
};
#endif

View File

@ -3,7 +3,6 @@
#include "gskgpuprintprivate.h"
#include "gskgpucolorstatesprivate.h"
#include "gskgpudescriptorsprivate.h"
#include "gskgpuimageprivate.h"
#include "gskgpushaderflagsprivate.h"
@ -167,12 +166,3 @@ gsk_gpu_print_image (GString *string,
gdk_memory_format_get_name (gsk_gpu_image_get_format (image)));
}
void
gsk_gpu_print_image_descriptor (GString *string,
GskGpuDescriptors *desc,
guint32 descriptor)
{
gsize id = gsk_gpu_descriptors_find_image (desc, descriptor);
gsk_gpu_print_image (string, gsk_gpu_descriptors_get_image (desc, id));
}

View File

@ -34,6 +34,3 @@ void gsk_gpu_print_rgba (GString
const float rgba[4]);
void gsk_gpu_print_image (GString *string,
GskGpuImage *image);
void gsk_gpu_print_image_descriptor (GString *string,
GskGpuDescriptors *desc,
guint32 descriptor);

View File

@ -81,7 +81,6 @@ gsk_gpu_radial_gradient_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);

View File

@ -11,7 +11,6 @@
#include "gskrendernodeprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkanimageprivate.h"
#include "gskvulkandescriptorsprivate.h"
#endif
typedef struct _GskGpuRenderPassOp GskGpuRenderPassOp;
@ -72,6 +71,7 @@ static void
gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
GskVulkanCommandState *state)
{
#if 0
GskGpuShaderOp *shader;
GskGpuOp *op;
GskGpuDescriptors *desc = NULL;
@ -99,6 +99,7 @@ gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
if (desc == NULL)
gsk_vulkan_descriptors_transition (state->desc, state->semaphores, state->vk_command_buffer);
#endif
}
static GskGpuOp *

View File

@ -66,7 +66,6 @@ gsk_gpu_rounded_color_op (GskGpuFrame *frame,
clip,
NULL,
NULL,
NULL,
&instance);
gsk_rounded_rect_to_float (outline, offset, instance->outline);

View File

@ -5,12 +5,10 @@
#include "gskgpuframeprivate.h"
#include "gskgpuprintprivate.h"
#include "gskgpushaderflagsprivate.h"
#include "gskgldescriptorsprivate.h"
#include "gskgldeviceprivate.h"
#include "gskglframeprivate.h"
#include "gskglimageprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkandeviceprivate.h"
#endif
@ -27,7 +25,6 @@ gsk_gpu_shader_op_finish (GskGpuOp *op)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
g_clear_object (&self->desc);
g_clear_object (&self->images[0]);
g_clear_object (&self->images[1]);
}
@ -70,9 +67,9 @@ gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
GskVulkanCommandState *state,
gsize instance_scale)
{
#if 0
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
GskVulkanDescriptors *desc;
GskGpuOp *next;
gsize i, n_ops, max_ops_per_draw;
@ -126,6 +123,8 @@ gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
}
return next;
#endif
return NULL;
}
GskGpuOp *
@ -145,33 +144,23 @@ gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
GskGLDescriptors *desc;
GskGpuOp *next;
gsize i, n_ops, n_external, max_ops_per_draw;
desc = GSK_GL_DESCRIPTORS (self->desc);
if (desc)
n_external = gsk_gl_descriptors_get_n_external (desc);
else
n_external = 0;
gsize i, n_ops, max_ops_per_draw;
if (state->current_program.op_class != op->op_class ||
state->current_program.color_states != self->color_states ||
state->current_program.variation != self->variation ||
state->current_program.flags != self->flags ||
state->current_program.n_external != n_external)
state->current_program.flags != self->flags)
{
state->current_program.op_class = op->op_class;
state->current_program.flags = self->flags;
state->current_program.color_states = self->color_states;
state->current_program.variation = self->variation;
state->current_program.n_external = n_external;
gsk_gl_frame_use_program (GSK_GL_FRAME (frame),
shader_op_class,
self->flags,
self->color_states,
self->variation,
n_external);
self->variation);
}
for (i = 0; i < shader_op_class->n_textures; i++)
@ -201,7 +190,6 @@ gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
GskGpuShaderOp *next_shader = (GskGpuShaderOp *) next;
if (next->op_class != op->op_class ||
next_shader->desc != self->desc ||
next_shader->flags != self->flags ||
next_shader->color_states != self->color_states ||
next_shader->variation != self->variation ||
@ -252,7 +240,6 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
GskGpuColorStates color_states,
guint32 variation,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
GskGpuImage **images,
GskGpuSampler *samplers,
gpointer out_vertex_data)
@ -300,10 +287,6 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
self->color_states = color_states;
self->variation = variation;
self->vertex_offset = vertex_offset;
if (desc)
self->desc = g_object_ref (desc);
else
self->desc = NULL;
self->n_ops = 1;
for (i = 0; i < op_class->n_textures; i++)
{

View File

@ -13,7 +13,6 @@ struct _GskGpuShaderImage
{
GskGpuImage *image; /* image to draw */
GskGpuSampler sampler; /* sampler to use for image */
guint32 descriptor; /* FIXME: preallocated descriptor for image + sampler */
const graphene_rect_t *coverage; /* the clip area for the image or NULL for unclipped */
const graphene_rect_t *bounds; /* bounds for the image */
};
@ -22,7 +21,6 @@ struct _GskGpuShaderOp
{
GskGpuOp parent_op;
GskGpuDescriptors *desc;
GskGpuImage *images[2];
GskGpuSampler samplers[2];
GskGpuShaderFlags flags;
@ -54,7 +52,6 @@ void gsk_gpu_shader_op_alloc (GskGpuF
GskGpuColorStates color_states,
guint32 variation,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
GskGpuImage **images,
GskGpuSampler *samplers,
gpointer out_vertex_data);

View File

@ -51,7 +51,6 @@ static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
void
gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image)
{
@ -62,7 +61,6 @@ gsk_gpu_texture_op (GskGpuFrame *frame,
gsk_gpu_color_states_create_equal (TRUE, TRUE),
0,
clip,
desc,
(GskGpuImage *[1]) { image->image },
(GskGpuSampler[1]) { image->sampler },
&instance);

View File

@ -8,7 +8,6 @@ G_BEGIN_DECLS
void gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
const graphene_point_t *offset,
const GskGpuShaderImage *image);

View File

@ -5,11 +5,9 @@
#include "gdk/gdkmemoryformatprivate.h"
typedef struct _GskGLDescriptors GskGLDescriptors;
typedef struct _GskGpuBuffer GskGpuBuffer;
typedef struct _GskGpuCache GskGpuCache;
typedef guint32 GskGpuColorStates;
typedef struct _GskGpuDescriptors GskGpuDescriptors;
typedef struct _GskGpuDevice GskGpuDevice;
typedef struct _GskGpuFrame GskGpuFrame;
typedef struct _GskGpuImage GskGpuImage;
@ -19,7 +17,6 @@ typedef guint32 GskGpuShaderFlags;
typedef struct _GskGpuShaderImage GskGpuShaderImage;
typedef struct _GskGpuShaderOp GskGpuShaderOp;
typedef struct _GskGpuShaderOpClass GskGpuShaderOpClass;
typedef struct _GskVulkanDescriptors GskVulkanDescriptors;
typedef struct _GskVulkanSemaphores GskVulkanSemaphores;
typedef enum {

View File

@ -1,51 +0,0 @@
#include "config.h"
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
G_DEFINE_TYPE (GskVulkanDescriptors, gsk_vulkan_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static void
gsk_vulkan_descriptors_class_init (GskVulkanDescriptorsClass *klass)
{
}
static void
gsk_vulkan_descriptors_init (GskVulkanDescriptors *self)
{
}
GskVulkanPipelineLayout *
gsk_vulkan_descriptors_get_pipeline_layout (GskVulkanDescriptors *self)
{
return GSK_VULKAN_DESCRIPTORS_GET_CLASS (self)->get_pipeline_layout (self);
}
void
gsk_vulkan_descriptors_transition (GskVulkanDescriptors *self,
GskVulkanSemaphores *semaphores,
VkCommandBuffer vk_command_buffer)
{
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
gsize i;
for (i = 0; i < gsk_gpu_descriptors_get_n_images (desc); i++)
{
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (gsk_gpu_descriptors_get_image (desc, i)),
semaphores,
vk_command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT);
}
}
void
gsk_vulkan_descriptors_bind (GskVulkanDescriptors *self,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer)
{
GSK_VULKAN_DESCRIPTORS_GET_CLASS (self)->bind (self, previous, vk_command_buffer);
}

View File

@ -1,47 +0,0 @@
#pragma once
#include "gskgpudescriptorsprivate.h"
#include "gskvulkandeviceprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_DESCRIPTORS (gsk_vulkan_descriptors_get_type ())
#define GSK_VULKAN_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptors))
#define GSK_VULKAN_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptorsClass))
#define GSK_IS_VULKAN_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_VULKAN_DESCRIPTORS))
#define GSK_IS_VULKAN_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_VULKAN_DESCRIPTORS))
#define GSK_VULKAN_DESCRIPTORS_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptorsClass))
typedef struct _GskVulkanDescriptorsClass GskVulkanDescriptorsClass;
struct _GskVulkanDescriptors
{
GskGpuDescriptors parent_instance;
};
struct _GskVulkanDescriptorsClass
{
GskGpuDescriptorsClass parent_class;
GskVulkanPipelineLayout * (* get_pipeline_layout) (GskVulkanDescriptors *self);
void (* bind) (GskVulkanDescriptors *self,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer);
};
GType gsk_vulkan_descriptors_get_type (void) G_GNUC_CONST;
GskVulkanPipelineLayout * gsk_vulkan_descriptors_get_pipeline_layout (GskVulkanDescriptors *self);
void gsk_vulkan_descriptors_transition (GskVulkanDescriptors *self,
GskVulkanSemaphores *semaphores,
VkCommandBuffer vk_command_buffer);
void gsk_vulkan_descriptors_bind (GskVulkanDescriptors *self,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskVulkanDescriptors, g_object_unref)
G_END_DECLS

View File

@ -4,23 +4,13 @@
#include "gskgpuopprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkandeviceprivate.h"
#include "gskvulkanimageprivate.h"
#include "gskvulkanrealdescriptorsprivate.h"
#include "gskvulkansubdescriptorsprivate.h"
#include "gdk/gdkdmabuftextureprivate.h"
#include "gdk/gdkglcontextprivate.h"
#include "gdk/gdkgltextureprivate.h"
#define GDK_ARRAY_NAME gsk_descriptors
#define GDK_ARRAY_TYPE_NAME GskDescriptors
#define GDK_ARRAY_ELEMENT_TYPE GskVulkanRealDescriptors *
#define GDK_ARRAY_FREE_FUNC g_object_unref
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define GDK_ARRAY_NAME gsk_semaphores
#define GDK_ARRAY_TYPE_NAME GskSemaphores
#define GDK_ARRAY_ELEMENT_TYPE VkSemaphore
@ -51,8 +41,6 @@ struct _GskVulkanFrame
VkCommandBuffer vk_command_buffer;
VkDescriptorPool vk_descriptor_pool;
GskDescriptors descriptors;
gsize pool_n_sets;
gsize pool_n_images;
gsize pool_n_buffers;
@ -158,8 +146,6 @@ gsk_vulkan_frame_cleanup (GskGpuFrame *frame)
0);
}
gsk_descriptors_set_size (&self->descriptors, 0);
GSK_GPU_FRAME_CLASS (gsk_vulkan_frame_parent_class)->cleanup (frame);
}
@ -240,6 +226,7 @@ gsk_vulkan_frame_upload_texture (GskGpuFrame *frame,
static void
gsk_vulkan_frame_prepare_descriptors (GskVulkanFrame *self)
{
#if 0
GskVulkanDevice *device;
VkDevice vk_device;
gsize i, n_images, n_sets;
@ -302,43 +289,7 @@ gsk_vulkan_frame_prepare_descriptors (GskVulkanFrame *self)
gsk_vulkan_real_descriptors_update_sets (desc, self->vk_descriptor_pool);
}
}
static GskGpuDescriptors *
gsk_vulkan_frame_create_descriptors (GskGpuFrame *frame)
{
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
if (gsk_vulkan_device_has_feature (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)), GDK_VULKAN_FEATURE_DESCRIPTOR_INDEXING))
{
GskVulkanRealDescriptors *parent;
if (gsk_descriptors_get_size (&self->descriptors) > 0)
{
parent = gsk_descriptors_get (&self->descriptors, gsk_descriptors_get_size (&self->descriptors) - 1);
if (gsk_vulkan_real_descriptors_is_full (parent))
parent = NULL;
}
else
parent = NULL;
if (parent == NULL)
{
parent = gsk_vulkan_real_descriptors_new (self);
gsk_descriptors_append (&self->descriptors, parent);
}
return GSK_GPU_DESCRIPTORS (gsk_vulkan_sub_descriptors_new (GSK_VULKAN_DESCRIPTORS (parent)));
}
else
{
GskVulkanRealDescriptors *desc;
desc = gsk_vulkan_real_descriptors_new (self);
gsk_descriptors_append (&self->descriptors, desc);
return GSK_GPU_DESCRIPTORS (g_object_ref (desc));
}
#endif
}
static GskGpuBuffer *
@ -374,8 +325,10 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame,
GskVulkanSemaphores semaphores;
GskVulkanCommandState state;
#if 0
if (gsk_descriptors_get_size (&self->descriptors) == 0)
gsk_descriptors_append (&self->descriptors, gsk_vulkan_real_descriptors_new (self));
#endif
gsk_vulkan_frame_prepare_descriptors (self);
@ -409,13 +362,8 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame,
state.vk_render_pass = VK_NULL_HANDLE;
state.vk_format = VK_FORMAT_UNDEFINED;
state.blend = GSK_GPU_BLEND_OVER; /* should we have a BLEND_NONE? */
state.desc = GSK_VULKAN_DESCRIPTORS (gsk_descriptors_get (&self->descriptors, 0));
state.semaphores = &semaphores;
gsk_vulkan_descriptors_bind (GSK_VULKAN_DESCRIPTORS (gsk_descriptors_get (&self->descriptors, 0)),
NULL,
state.vk_command_buffer);
while (op)
{
op = gsk_gpu_op_vk_command (op, frame, &state);
@ -460,7 +408,6 @@ gsk_vulkan_frame_finalize (GObject *object)
self->vk_descriptor_pool,
NULL);
}
gsk_descriptors_clear (&self->descriptors);
vkFreeCommandBuffers (vk_device,
vk_command_pool,
@ -487,7 +434,6 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass)
gpu_frame_class->cleanup = gsk_vulkan_frame_cleanup;
gpu_frame_class->begin = gsk_vulkan_frame_begin;
gpu_frame_class->upload_texture = gsk_vulkan_frame_upload_texture;
gpu_frame_class->create_descriptors = gsk_vulkan_frame_create_descriptors;
gpu_frame_class->create_vertex_buffer = gsk_vulkan_frame_create_vertex_buffer;
gpu_frame_class->create_storage_buffer = gsk_vulkan_frame_create_storage_buffer;
gpu_frame_class->write_texture_vertex_data = gsk_vulkan_frame_write_texture_vertex_data;
@ -499,8 +445,6 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass)
static void
gsk_vulkan_frame_init (GskVulkanFrame *self)
{
gsk_descriptors_init (&self->descriptors);
self->pool_n_sets = 4;
self->pool_n_images = 8;
self->pool_n_buffers = 8;

View File

@ -1,308 +0,0 @@
#include "config.h"
#include "gskvulkanrealdescriptorsprivate.h"
#include "gskgpucacheprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
#define GDK_ARRAY_NAME gsk_descriptor_image_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorImageInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorImageInfo
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 128
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define GDK_ARRAY_NAME gsk_samplers
#define GDK_ARRAY_TYPE_NAME GskSamplers
#define GDK_ARRAY_ELEMENT_TYPE VkSampler
#define GDK_ARRAY_PREALLOC 32
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
struct _GskVulkanRealDescriptors
{
GskVulkanDescriptors parent_instance;
GskVulkanFrame *frame; /* no reference, the frame owns us */
GskVulkanPipelineLayout *pipeline_layout;
GskSamplers immutable_samplers;
GskDescriptorImageInfos descriptor_immutable_images;
GskDescriptorImageInfos descriptor_images;
VkDescriptorSet descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS];
};
G_DEFINE_TYPE (GskVulkanRealDescriptors, gsk_vulkan_real_descriptors, GSK_TYPE_VULKAN_DESCRIPTORS)
static GskVulkanPipelineLayout *
gsk_vulkan_real_descriptors_get_pipeline_layout (GskVulkanDescriptors *desc)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (desc);
return self->pipeline_layout;
}
static void
gsk_vulkan_real_descriptors_bind (GskVulkanDescriptors *desc,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (desc);
if (desc == previous)
return;
vkCmdBindDescriptorSets (vk_command_buffer,
VK_PIPELINE_BIND_POINT_GRAPHICS,
gsk_vulkan_device_get_vk_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame))),
self->pipeline_layout),
0,
G_N_ELEMENTS (self->descriptor_sets),
self->descriptor_sets,
0,
NULL);
}
static gboolean
gsk_vulkan_real_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (desc);
GskVulkanImage *vulkan_image = GSK_VULKAN_IMAGE (image);
GskVulkanDevice *device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame)));
VkSampler vk_sampler;
guint32 result;
vk_sampler = gsk_vulkan_image_get_vk_sampler (vulkan_image);
if (vk_sampler)
{
if (gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) >=
gsk_vulkan_device_get_max_immutable_samplers (device))
return FALSE;
if ((1 + gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images)) * 3 +
gsk_descriptor_image_infos_get_size (&self->descriptor_images) >
gsk_vulkan_device_get_max_samplers (device))
return FALSE;
result = gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) << 1 | 1;
gsk_samplers_append (&self->immutable_samplers, vk_sampler);
gsk_descriptor_image_infos_append (&self->descriptor_immutable_images,
&(VkDescriptorImageInfo) {
.imageView = gsk_vulkan_image_get_vk_image_view (vulkan_image),
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
});
}
else
{
if (MAX (1, gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) * 3) +
gsk_descriptor_image_infos_get_size (&self->descriptor_images) >=
gsk_vulkan_device_get_max_samplers (device))
return FALSE;
result = gsk_descriptor_image_infos_get_size (&self->descriptor_images) << 1;
gsk_descriptor_image_infos_append (&self->descriptor_images,
&(VkDescriptorImageInfo) {
.sampler = gsk_vulkan_device_get_vk_sampler (device, sampler),
.imageView = gsk_vulkan_image_get_vk_image_view (vulkan_image),
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
});
}
*out_descriptor = result;
return TRUE;
}
static void
gsk_vulkan_real_descriptors_finalize (GObject *object)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (object);
gsk_samplers_clear (&self->immutable_samplers);
gsk_descriptor_image_infos_clear (&self->descriptor_immutable_images);
gsk_descriptor_image_infos_clear (&self->descriptor_images);
gsk_vulkan_device_release_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame))),
self->pipeline_layout);
G_OBJECT_CLASS (gsk_vulkan_real_descriptors_parent_class)->finalize (object);
}
static void
gsk_vulkan_real_descriptors_class_init (GskVulkanRealDescriptorsClass *klass)
{
GskVulkanDescriptorsClass *vulkan_descriptors_class = GSK_VULKAN_DESCRIPTORS_CLASS (klass);
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_vulkan_real_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_real_descriptors_add_image;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_real_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_real_descriptors_bind;
}
static void
gsk_vulkan_real_descriptors_init (GskVulkanRealDescriptors *self)
{
gsk_samplers_init (&self->immutable_samplers);
gsk_descriptor_image_infos_init (&self->descriptor_immutable_images);
gsk_descriptor_image_infos_init (&self->descriptor_images);
}
GskVulkanRealDescriptors *
gsk_vulkan_real_descriptors_new (GskVulkanFrame *frame)
{
GskVulkanRealDescriptors *self;
self = g_object_new (GSK_TYPE_VULKAN_REAL_DESCRIPTORS, NULL);
self->frame = frame;
return self;
}
gboolean
gsk_vulkan_real_descriptors_is_full (GskVulkanRealDescriptors *self)
{
GskVulkanDevice *device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame)));
return gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) >= gsk_vulkan_device_get_max_immutable_samplers (device) ||
gsk_descriptor_image_infos_get_size (&self->descriptor_images) +
MAX (1, gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) * 3) >=
gsk_vulkan_device_get_max_samplers (device);
}
static void
gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
{
gsize n_immutable_samplers, n_samplers, n_buffers;
GskVulkanDevice *device;
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame)));
if (gsk_vulkan_device_has_feature (device, GDK_VULKAN_FEATURE_DESCRIPTOR_INDEXING))
return;
/* If descriptor indexing isn't supported, all descriptors in the shaders
* must be properly setup. And that means we need to have
* descriptors for all of them.
*/
gsk_vulkan_device_get_pipeline_sizes (device,
self->pipeline_layout,
&n_immutable_samplers,
&n_samplers,
&n_buffers);
if (gsk_descriptor_image_infos_get_size (&self->descriptor_images) == 0)
{
/* We have no image, find any random image and attach it */
guint32 ignored;
if (!gsk_gpu_descriptors_add_image (GSK_GPU_DESCRIPTORS (self),
gsk_gpu_cache_get_atlas_image (gsk_gpu_device_get_cache (GSK_GPU_DEVICE (device))),
GSK_GPU_SAMPLER_DEFAULT,
&ignored))
{
g_assert_not_reached ();
}
}
while (MAX (1, n_immutable_samplers) > gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images))
{
gsk_descriptor_image_infos_append (&self->descriptor_immutable_images, gsk_descriptor_image_infos_get (&self->descriptor_images, 0));
}
while (n_samplers - MAX (1, 3 * n_immutable_samplers) > gsk_descriptor_image_infos_get_size (&self->descriptor_images))
{
gsk_descriptor_image_infos_append (&self->descriptor_images, gsk_descriptor_image_infos_get (&self->descriptor_images, 0));
}
}
void
gsk_vulkan_real_descriptors_prepare (GskVulkanRealDescriptors *self,
gsize *n_images)
{
self->pipeline_layout = gsk_vulkan_device_acquire_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame))),
gsk_samplers_get_data (&self->immutable_samplers),
gsk_samplers_get_size (&self->immutable_samplers),
gsk_descriptor_image_infos_get_size (&self->descriptor_images),
0);
gsk_vulkan_real_descriptors_fill_sets (self);
*n_images = MAX (1, gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images)) +
gsk_descriptor_image_infos_get_size (&self->descriptor_images);
}
void
gsk_vulkan_real_descriptors_update_sets (GskVulkanRealDescriptors *self,
VkDescriptorPool vk_descriptor_pool)
{
VkWriteDescriptorSet write_descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS + 1];
gsize n_descriptor_sets;
VkDevice vk_device;
gboolean descriptor_indexing;
GskVulkanDevice *device;
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame)));
descriptor_indexing = gsk_vulkan_device_has_feature (device, GDK_VULKAN_FEATURE_DESCRIPTOR_INDEXING);
vk_device = gsk_vulkan_device_get_vk_device (device);
GSK_VK_CHECK (vkAllocateDescriptorSets, vk_device,
&(VkDescriptorSetAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.descriptorPool = vk_descriptor_pool,
.descriptorSetCount = GSK_VULKAN_N_DESCRIPTOR_SETS,
.pSetLayouts = (VkDescriptorSetLayout[GSK_VULKAN_N_DESCRIPTOR_SETS]) {
gsk_vulkan_device_get_vk_image_set_layout (device, self->pipeline_layout),
gsk_vulkan_device_get_vk_buffer_set_layout (device, self->pipeline_layout),
},
.pNext = !descriptor_indexing ? NULL : &(VkDescriptorSetVariableDescriptorCountAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO,
.descriptorSetCount = GSK_VULKAN_N_DESCRIPTOR_SETS,
.pDescriptorCounts = (uint32_t[GSK_VULKAN_N_DESCRIPTOR_SETS]) {
gsk_descriptor_image_infos_get_size (&self->descriptor_images),
0,
}
}
},
self->descriptor_sets);
n_descriptor_sets = 0;
if (gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images) > 0)
{
write_descriptor_sets[n_descriptor_sets++] = (VkWriteDescriptorSet) {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = self->descriptor_sets[GSK_VULKAN_IMAGE_SET_LAYOUT],
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images),
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = gsk_descriptor_image_infos_get_data (&self->descriptor_immutable_images)
};
}
if (gsk_descriptor_image_infos_get_size (&self->descriptor_images) > 0)
{
write_descriptor_sets[n_descriptor_sets++] = (VkWriteDescriptorSet) {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = self->descriptor_sets[GSK_VULKAN_IMAGE_SET_LAYOUT],
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = gsk_descriptor_image_infos_get_size (&self->descriptor_images),
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = gsk_descriptor_image_infos_get_data (&self->descriptor_images)
};
}
vkUpdateDescriptorSets (vk_device,
n_descriptor_sets,
write_descriptor_sets,
0, NULL);
}

View File

@ -1,22 +0,0 @@
#pragma once
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkanframeprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_REAL_DESCRIPTORS (gsk_vulkan_real_descriptors_get_type ())
G_DECLARE_FINAL_TYPE (GskVulkanRealDescriptors, gsk_vulkan_real_descriptors, GSK, VULKAN_REAL_DESCRIPTORS, GskVulkanDescriptors)
GskVulkanRealDescriptors * gsk_vulkan_real_descriptors_new (GskVulkanFrame *frame);
gboolean gsk_vulkan_real_descriptors_is_full (GskVulkanRealDescriptors *self);
void gsk_vulkan_real_descriptors_prepare (GskVulkanRealDescriptors *self,
gsize *n_images);
void gsk_vulkan_real_descriptors_update_sets (GskVulkanRealDescriptors *self,
VkDescriptorPool vk_descriptor_pool);
G_END_DECLS

View File

@ -1,93 +0,0 @@
#include "config.h"
#include "gskvulkansubdescriptorsprivate.h"
struct _GskVulkanSubDescriptors
{
GskVulkanDescriptors parent_instance;
GskVulkanDescriptors *parent;
};
G_DEFINE_TYPE (GskVulkanSubDescriptors, gsk_vulkan_sub_descriptors, GSK_TYPE_VULKAN_DESCRIPTORS)
static GskVulkanPipelineLayout *
gsk_vulkan_sub_descriptors_get_pipeline_layout (GskVulkanDescriptors *desc)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
return gsk_vulkan_descriptors_get_pipeline_layout (self->parent);
}
static void
gsk_vulkan_sub_descriptors_bind (GskVulkanDescriptors *desc,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
if (GSK_IS_VULKAN_SUB_DESCRIPTORS (previous))
previous = GSK_VULKAN_SUB_DESCRIPTORS (previous)->parent;
if (self->parent == previous)
return;
gsk_vulkan_descriptors_bind (self->parent, previous, vk_command_buffer);
}
static gboolean
gsk_vulkan_sub_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
return gsk_gpu_descriptors_add_image (GSK_GPU_DESCRIPTORS (self->parent),
image,
sampler,
out_descriptor);
}
static void
gsk_vulkan_sub_descriptors_finalize (GObject *object)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (object);
g_object_unref (self->parent);
G_OBJECT_CLASS (gsk_vulkan_sub_descriptors_parent_class)->finalize (object);
}
static void
gsk_vulkan_sub_descriptors_class_init (GskVulkanSubDescriptorsClass *klass)
{
GskVulkanDescriptorsClass *vulkan_descriptors_class = GSK_VULKAN_DESCRIPTORS_CLASS (klass);
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_vulkan_sub_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_sub_descriptors_add_image;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_sub_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_sub_descriptors_bind;
}
static void
gsk_vulkan_sub_descriptors_init (GskVulkanSubDescriptors *self)
{
}
GskVulkanSubDescriptors *
gsk_vulkan_sub_descriptors_new (GskVulkanDescriptors *parent)
{
GskVulkanSubDescriptors *self;
self = g_object_new (GSK_TYPE_VULKAN_SUB_DESCRIPTORS, NULL);
self->parent = g_object_ref (parent);
return self;
}

View File

@ -1,14 +0,0 @@
#pragma once
#include "gskvulkandescriptorsprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_SUB_DESCRIPTORS (gsk_vulkan_sub_descriptors_get_type ())
G_DECLARE_FINAL_TYPE (GskVulkanSubDescriptors, gsk_vulkan_sub_descriptors, GSK, VULKAN_SUB_DESCRIPTORS, GskVulkanDescriptors)
GskVulkanSubDescriptors * gsk_vulkan_sub_descriptors_new (GskVulkanDescriptors *parent);
G_END_DECLS

View File

@ -71,7 +71,6 @@ gsk_private_sources = files([
'gl/fp16.c',
'gpu/gskglbuffer.c',
'gpu/gskgldevice.c',
'gpu/gskgldescriptors.c',
'gpu/gskglframe.c',
'gpu/gskglimage.c',
'gpu/gskgpublendop.c',
@ -90,7 +89,6 @@ gsk_private_sources = files([
'gpu/gskgpuconicgradientop.c',
'gpu/gskgpuconvertop.c',
'gpu/gskgpucrossfadeop.c',
'gpu/gskgpudescriptors.c',
'gpu/gskgpudownloadop.c',
'gpu/gskgpudevice.c',
'gpu/gskgpuframe.c',
@ -161,13 +159,10 @@ gsk_private_vulkan_shader_headers = []
if have_vulkan
gsk_private_sources += files([
'gpu/gskvulkanbuffer.c',
'gpu/gskvulkandescriptors.c',
'gpu/gskvulkandevice.c',
'gpu/gskvulkanframe.c',
'gpu/gskvulkanimage.c',
'gpu/gskvulkanmemory.c',
'gpu/gskvulkanrealdescriptors.c',
'gpu/gskvulkansubdescriptors.c',
])
endif # have_vulkan