gpu: Add ability to run shaders

This heaves over an inital chunk of code from the Vulkan renderer to
execute shaders.

The only shader that exists for now is a shader that draws a single
texture.
We use that to replace the blit op we were doing before.
This commit is contained in:
Benjamin Otte 2023-08-21 02:18:37 +02:00
parent bd114ab1a8
commit 1a85d569e3
48 changed files with 3980 additions and 193 deletions

View File

@ -23,20 +23,24 @@ def replace_if_changed(new, old):
gl_source_shaders = [] gl_source_shaders = []
ngl_source_shaders = [] ngl_source_shaders = []
vulkan_compiled_shaders = [] vulkan_compiled_shaders = []
gpu_vulkan_compiled_shaders = []
vulkan_shaders = [] vulkan_shaders = []
for f in sys.argv[2:]: for f in sys.argv[2:]:
if f.endswith('.glsl'): if f.endswith('.glsl'):
if f.startswith('ngl'): if f.find('gsk/gpu') > -1:
ngl_source_shaders.append(f); ngl_source_shaders.append(f)
else: else:
gl_source_shaders.append(f) gl_source_shaders.append(f)
elif f.endswith('.spv'): elif f.endswith('.spv'):
vulkan_compiled_shaders.append(f) if f.find('gsk/gpu') > -1:
gpu_vulkan_compiled_shaders.append(f)
else:
vulkan_compiled_shaders.append(f)
elif f.endswith('.frag') or f.endswith('.vert'): elif f.endswith('.frag') or f.endswith('.vert'):
vulkan_shaders.append(f) vulkan_shaders.append(f)
else: else:
sys.exit(-1) # FIXME: error message raise Exception(f"No idea what XML to generate for {f}")
xml = '''<?xml version='1.0' encoding='UTF-8'?> xml = '''<?xml version='1.0' encoding='UTF-8'?>
<gresources> <gresources>
@ -50,7 +54,7 @@ for f in gl_source_shaders:
xml += '\n' xml += '\n'
for f in ngl_source_shaders: for f in ngl_source_shaders:
xml += ' <file alias=\'ngl/{0}\'>ngl/resources/{0}</file>\n'.format(os.path.basename(f)) xml += ' <file alias=\'shaders/gl/{0}\'>gpu/shaders/{0}</file>\n'.format(os.path.basename(f))
xml += '\n' xml += '\n'
@ -59,6 +63,11 @@ for f in vulkan_compiled_shaders:
xml += '\n' xml += '\n'
for f in gpu_vulkan_compiled_shaders:
xml += ' <file alias=\'shaders/vulkan/{0}\'>gpu/shaders/{0}</file>\n'.format(os.path.basename(f))
xml += '\n'
for f in vulkan_shaders: for f in vulkan_shaders:
xml += ' <file alias=\'vulkan/{0}\'>vulkan/resources/{0}</file>\n'.format(os.path.basename(f)) xml += ' <file alias=\'vulkan/{0}\'>vulkan/resources/{0}</file>\n'.format(os.path.basename(f))

92
gsk/gpu/gskglbuffer.c Normal file
View File

@ -0,0 +1,92 @@
#include "config.h"
#include "gskglbufferprivate.h"
struct _GskGLBuffer
{
GskGpuBuffer parent_instance;
GLenum target;
GLuint buffer_id;
GLenum access;
};
G_DEFINE_TYPE (GskGLBuffer, gsk_gl_buffer, GSK_TYPE_GPU_BUFFER)
static void
gsk_gl_buffer_finalize (GObject *object)
{
GskGLBuffer *self = GSK_GL_BUFFER (object);
glDeleteBuffers (1, &self->buffer_id);
G_OBJECT_CLASS (gsk_gl_buffer_parent_class)->finalize (object);
}
static guchar *
gsk_gl_buffer_map (GskGpuBuffer *buffer)
{
GskGLBuffer *self = GSK_GL_BUFFER (buffer);
gsk_gl_buffer_bind (self);
return glMapBuffer (self->target, self->access);
}
static void
gsk_gl_buffer_unmap (GskGpuBuffer *buffer)
{
GskGLBuffer *self = GSK_GL_BUFFER (buffer);
gsk_gl_buffer_bind (self);
if (!glUnmapBuffer (self->target))
{
g_warning ("glUnmapBuffer failed");
}
}
static void
gsk_gl_buffer_class_init (GskGLBufferClass *klass)
{
GskGpuBufferClass *buffer_class = GSK_GPU_BUFFER_CLASS (klass);
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
buffer_class->map = gsk_gl_buffer_map;
buffer_class->unmap = gsk_gl_buffer_unmap;
gobject_class->finalize = gsk_gl_buffer_finalize;
}
static void
gsk_gl_buffer_init (GskGLBuffer *self)
{
}
GskGpuBuffer *
gsk_gl_buffer_new (GLenum target,
gsize size,
GLenum access)
{
GskGLBuffer *self;
self = g_object_new (GSK_TYPE_GL_BUFFER, NULL);
gsk_gpu_buffer_setup (GSK_GPU_BUFFER (self), size);
self->target = target;
self->access = access;
glGenBuffers (1, &self->buffer_id);
glBindBuffer (target, self->buffer_id);
glBufferData (target, size, NULL, GL_STATIC_DRAW);
return GSK_GPU_BUFFER (self);
}
void
gsk_gl_buffer_bind (GskGLBuffer *self)
{
glBindBuffer (self->target, self->buffer_id);
}

View File

@ -0,0 +1,20 @@
#pragma once
#include "gskgpubufferprivate.h"
#include "gskgldeviceprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GL_BUFFER (gsk_gl_buffer_get_type ())
G_DECLARE_FINAL_TYPE (GskGLBuffer, gsk_gl_buffer, GSK, GL_BUFFER, GskGpuBuffer)
GskGpuBuffer * gsk_gl_buffer_new (GLenum target,
gsize size,
GLenum access);
void gsk_gl_buffer_bind (GskGLBuffer *self);
G_END_DECLS

View File

@ -1,6 +1,10 @@
#include "config.h" #include "config.h"
#include "gskgldeviceprivate.h" #include "gskgldeviceprivate.h"
#include "gskdebugprivate.h"
#include "gskgpushaderopprivate.h"
#include "gskglbufferprivate.h"
#include "gskglimageprivate.h" #include "gskglimageprivate.h"
#include "gdk/gdkdisplayprivate.h" #include "gdk/gdkdisplayprivate.h"
@ -11,6 +15,12 @@
struct _GskGLDevice struct _GskGLDevice
{ {
GskGpuDevice parent_instance; GskGpuDevice parent_instance;
GHashTable *gl_programs;
const char *version_string;
GdkGLAPI api;
guint sampler_ids[GSK_GPU_SAMPLER_N_SAMPLERS];
}; };
struct _GskGLDeviceClass struct _GskGLDeviceClass
@ -18,8 +28,36 @@ struct _GskGLDeviceClass
GskGpuDeviceClass parent_class; GskGpuDeviceClass parent_class;
}; };
typedef struct _GLProgramKey GLProgramKey;
struct _GLProgramKey
{
const GskGpuShaderOpClass *op_class;
GskGpuShaderClip clip;
};
G_DEFINE_TYPE (GskGLDevice, gsk_gl_device, GSK_TYPE_GPU_DEVICE) G_DEFINE_TYPE (GskGLDevice, gsk_gl_device, GSK_TYPE_GPU_DEVICE)
static guint
gl_program_key_hash (gconstpointer data)
{
const GLProgramKey *key = data;
return GPOINTER_TO_UINT (key->op_class) ^
key->clip;
}
static gboolean
gl_program_key_equal (gconstpointer a,
gconstpointer b)
{
const GLProgramKey *keya = a;
const GLProgramKey *keyb = b;
return keya->op_class == keyb->op_class &&
keya->clip == keyb->clip;
}
static GskGpuImage * static GskGpuImage *
gsk_gl_device_create_offscreen_image (GskGpuDevice *device, gsk_gl_device_create_offscreen_image (GskGpuDevice *device,
GdkMemoryDepth depth, GdkMemoryDepth depth,
@ -56,6 +94,11 @@ gsk_gl_device_finalize (GObject *object)
g_object_steal_data (G_OBJECT (gsk_gpu_device_get_display (device)), "-gsk-gl-device"); g_object_steal_data (G_OBJECT (gsk_gpu_device_get_display (device)), "-gsk-gl-device");
gdk_gl_context_make_current (gdk_display_get_gl_context (gsk_gpu_device_get_display (device)));
g_hash_table_unref (self->gl_programs);
glDeleteSamplers (G_N_ELEMENTS (self->sampler_ids), self->sampler_ids);
G_OBJECT_CLASS (gsk_gl_device_parent_class)->finalize (object); G_OBJECT_CLASS (gsk_gl_device_parent_class)->finalize (object);
} }
@ -71,9 +114,49 @@ gsk_gl_device_class_init (GskGLDeviceClass *klass)
object_class->finalize = gsk_gl_device_finalize; object_class->finalize = gsk_gl_device_finalize;
} }
static void
free_gl_program (gpointer program)
{
glDeleteProgram (GPOINTER_TO_UINT (program));
}
static void static void
gsk_gl_device_init (GskGLDevice *self) gsk_gl_device_init (GskGLDevice *self)
{ {
self->gl_programs = g_hash_table_new_full (gl_program_key_hash, gl_program_key_equal, g_free, free_gl_program);
}
static void
gsk_gl_device_setup_samplers (GskGLDevice *self)
{
struct {
GLuint filter;
GLuint wrap;
} sampler_flags[GSK_GPU_SAMPLER_N_SAMPLERS] = {
[GSK_GPU_SAMPLER_DEFAULT] = {
GL_LINEAR,
GL_CLAMP_TO_EDGE,
},
[GSK_GPU_SAMPLER_REPEAT] = {
GL_LINEAR,
GL_REPEAT,
},
[GSK_GPU_SAMPLER_NEAREST] = {
GL_NEAREST,
GL_CLAMP_TO_EDGE,
}
};
guint i;
glGenSamplers (G_N_ELEMENTS (self->sampler_ids), self->sampler_ids);
for (i = 0; i < G_N_ELEMENTS (self->sampler_ids); i++)
{
glSamplerParameteri (self->sampler_ids[i], GL_TEXTURE_MIN_FILTER, sampler_flags[i].filter);
glSamplerParameteri (self->sampler_ids[i], GL_TEXTURE_MAG_FILTER, sampler_flags[i].filter);
glSamplerParameteri (self->sampler_ids[i], GL_TEXTURE_WRAP_S, sampler_flags[i].wrap);
glSamplerParameteri (self->sampler_ids[i], GL_TEXTURE_WRAP_T, sampler_flags[i].wrap);
}
} }
GskGpuDevice * GskGpuDevice *
@ -104,11 +187,313 @@ gsk_gl_device_get_for_display (GdkDisplay *display,
gsk_gpu_device_setup (GSK_GPU_DEVICE (self), display); gsk_gpu_device_setup (GSK_GPU_DEVICE (self), display);
context = gdk_display_get_gl_context (display);
gdk_gl_context_make_current (context);
self->version_string = gdk_gl_context_get_glsl_version_string (context);
self->api = gdk_gl_context_get_api (context);
gsk_gl_device_setup_samplers (self);
g_object_set_data (G_OBJECT (display), "-gsk-gl-device", self); g_object_set_data (G_OBJECT (display), "-gsk-gl-device", self);
return GSK_GPU_DEVICE (self); return GSK_GPU_DEVICE (self);
} }
static char *
prepend_line_numbers (char *code)
{
GString *s;
char *p;
int line;
s = g_string_new ("");
p = code;
line = 1;
while (*p)
{
char *end = strchr (p, '\n');
if (end)
end = end + 1; /* Include newline */
else
end = p + strlen (p);
g_string_append_printf (s, "%3d| ", line++);
g_string_append_len (s, p, end - p);
p = end;
}
g_free (code);
return g_string_free (s, FALSE);
}
static gboolean
gsk_gl_device_check_shader_error (int shader_id,
GError **error)
{
GLint status;
GLint log_len;
GLint code_len;
char *log;
char *code;
glGetShaderiv (shader_id, GL_COMPILE_STATUS, &status);
if G_LIKELY (status == GL_TRUE)
return TRUE;
glGetShaderiv (shader_id, GL_INFO_LOG_LENGTH, &log_len);
log = g_malloc0 (log_len + 1);
glGetShaderInfoLog (shader_id, log_len, NULL, log);
glGetShaderiv (shader_id, GL_SHADER_SOURCE_LENGTH, &code_len);
code = g_malloc0 (code_len + 1);
glGetShaderSource (shader_id, code_len, NULL, code);
code = prepend_line_numbers (code);
g_set_error (error,
GDK_GL_ERROR,
GDK_GL_ERROR_COMPILATION_FAILED,
"Compilation failure in shader.\n"
"Source Code:\n"
"%s\n"
"\n"
"Error Message:\n"
"%s\n"
"\n",
code,
log);
g_free (code);
g_free (log);
return FALSE;
}
static void
print_shader_info (const char *prefix,
GLuint shader_id,
const char *name)
{
if (GSK_DEBUG_CHECK (SHADERS))
{
int code_len;
glGetShaderiv (shader_id, GL_SHADER_SOURCE_LENGTH, &code_len);
if (code_len > 0)
{
char *code;
code = g_malloc0 (code_len + 1);
glGetShaderSource (shader_id, code_len, NULL, code);
code = prepend_line_numbers (code);
g_message ("%s %d, %s:\n%s",
prefix, shader_id,
name ? name : "unnamed",
code);
g_free (code);
}
}
}
static GLuint
gsk_gl_device_load_shader (GskGLDevice *self,
const char *program_name,
GLenum shader_type,
GskGpuShaderClip clip,
GError **error)
{
GString *preamble;
char *resource_name;
GBytes *bytes;
GLuint shader_id;
preamble = g_string_new (NULL);
g_string_append (preamble, self->version_string);
g_string_append (preamble, "\n");
if (self->api == GDK_GL_API_GLES)
g_string_append (preamble, "#define GSK_GLES 1\n");
switch (shader_type)
{
case GL_VERTEX_SHADER:
g_string_append (preamble, "#define GSK_VERTEX_SHADER 1\n");
break;
case GL_FRAGMENT_SHADER:
g_string_append (preamble, "#define GSK_FRAGMENT_SHADER 1\n");
break;
default:
g_assert_not_reached ();
return 0;
}
switch (clip)
{
case GSK_GPU_SHADER_CLIP_NONE:
g_string_append (preamble, "#define GSK_SHADER_CLIP GSK_GPU_SHADER_CLIP_NONE\n");
break;
case GSK_GPU_SHADER_CLIP_RECT:
g_string_append (preamble, "#define GSK_SHADER_CLIP GSK_GPU_SHADER_CLIP_RECT\n");
break;
case GSK_GPU_SHADER_CLIP_ROUNDED:
g_string_append (preamble, "#define GSK_SHADER_CLIP GSK_GPU_SHADER_CLIP_ROUNDED\n");
break;
default:
g_assert_not_reached ();
break;
}
resource_name = g_strconcat ("/org/gtk/libgsk/shaders/gl/", program_name, ".glsl", NULL);
bytes = g_resources_lookup_data (resource_name, 0, error);
g_free (resource_name);
if (bytes == NULL)
return 0;
shader_id = glCreateShader (shader_type);
glShaderSource (shader_id,
2,
(const char *[]) {
preamble->str,
g_bytes_get_data (bytes, NULL),
},
NULL);
g_bytes_unref (bytes);
g_string_free (preamble, TRUE);
glCompileShader (shader_id);
print_shader_info (shader_type == GL_FRAGMENT_SHADER ? "fragment" : "vertex", shader_id, program_name);
if (!gsk_gl_device_check_shader_error (shader_id, error))
{
glDeleteShader (shader_id);
return 0;
}
return shader_id;
}
static GLuint
gsk_gl_device_load_program (GskGLDevice *self,
const char *program_name,
GskGpuShaderClip clip,
GError **error)
{
GLuint vertex_shader_id, fragment_shader_id, program_id;
GLint link_status;
vertex_shader_id = gsk_gl_device_load_shader (self, program_name, GL_VERTEX_SHADER, clip, error);
if (vertex_shader_id == 0)
return 0;
fragment_shader_id = gsk_gl_device_load_shader (self, program_name, GL_FRAGMENT_SHADER, clip, error);
if (fragment_shader_id == 0)
return 0;
program_id = glCreateProgram ();
glAttachShader (program_id, vertex_shader_id);
glAttachShader (program_id, fragment_shader_id);
glLinkProgram (program_id);
glGetProgramiv (program_id, GL_LINK_STATUS, &link_status);
glDetachShader (program_id, vertex_shader_id);
glDeleteShader (vertex_shader_id);
glDetachShader (program_id, fragment_shader_id);
glDeleteShader (fragment_shader_id);
if (link_status == GL_FALSE)
{
char *buffer = NULL;
int log_len = 0;
glGetProgramiv (program_id, GL_INFO_LOG_LENGTH, &log_len);
if (log_len > 0)
{
/* log_len includes NULL */
buffer = g_malloc0 (log_len);
glGetProgramInfoLog (program_id, log_len, NULL, buffer);
}
g_set_error (error,
GDK_GL_ERROR,
GDK_GL_ERROR_LINK_FAILED,
"Linking failure in shader: %s",
buffer ? buffer : "");
g_free (buffer);
glDeleteProgram (program_id);
return 0;
}
return program_id;
}
void
gsk_gl_device_use_program (GskGLDevice *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip)
{
GError *error = NULL;
GLuint program_id;
GLProgramKey key = {
.op_class = op_class,
.clip = clip,
};
guint i;
program_id = GPOINTER_TO_UINT (g_hash_table_lookup (self->gl_programs, &key));
if (program_id)
{
glUseProgram (program_id);
return;
}
program_id = gsk_gl_device_load_program (self, op_class->shader_name, clip, &error);
if (program_id == 0)
{
g_critical ("Failed to load shader program: %s", error->message);
g_clear_error (&error);
return;
}
g_hash_table_insert (self->gl_programs, g_memdup (&key, sizeof (GLProgramKey)), GUINT_TO_POINTER (program_id));
glUseProgram (program_id);
for (i = 0; i < 16; i++)
{
char *name = g_strdup_printf ("textures[%u]", i);
glUniform1i (glGetUniformLocation (program_id, name), i);
g_free (name);
}
}
GLuint
gsk_gl_device_get_sampler_id (GskGLDevice *self,
GskGpuSampler sampler)
{
g_return_val_if_fail (sampler < G_N_ELEMENTS (self->sampler_ids), 0);
return self->sampler_ids[sampler];
}
void void
gsk_gl_device_find_gl_format (GskGLDevice *self, gsk_gl_device_find_gl_format (GskGLDevice *self,
GdkMemoryFormat format, GdkMemoryFormat format,

View File

@ -11,6 +11,12 @@ G_DECLARE_FINAL_TYPE (GskGLDevice, gsk_gl_device, GSK, GL_DEVICE, GskGpuDevice)
GskGpuDevice * gsk_gl_device_get_for_display (GdkDisplay *display, GskGpuDevice * gsk_gl_device_get_for_display (GdkDisplay *display,
GError **error); GError **error);
void gsk_gl_device_use_program (GskGLDevice *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip);
GLuint gsk_gl_device_get_sampler_id (GskGLDevice *self,
GskGpuSampler sampler);
void gsk_gl_device_find_gl_format (GskGLDevice *self, void gsk_gl_device_find_gl_format (GskGLDevice *self,
GdkMemoryFormat format, GdkMemoryFormat format,

View File

@ -2,14 +2,19 @@
#include "gskglframeprivate.h" #include "gskglframeprivate.h"
#include "gskgpuglobalsopprivate.h"
#include "gskgpuopprivate.h" #include "gskgpuopprivate.h"
#include "gskglbufferprivate.h"
#include "gdk/gdkdisplayprivate.h" #include "gskgldeviceprivate.h"
#include "gdk/gdkglcontextprivate.h"
struct _GskGLFrame struct _GskGLFrame
{ {
GskGpuFrame parent_instance; GskGpuFrame parent_instance;
GLuint globals_buffer_id;
guint next_texture_slot;
GHashTable *vaos;
}; };
struct _GskGLFrameClass struct _GskGLFrameClass
@ -26,26 +31,131 @@ gsk_gl_frame_is_busy (GskGpuFrame *frame)
} }
static void static void
gsk_gl_frame_submit (GskGpuFrame *frame, gsk_gl_frame_setup (GskGpuFrame *frame)
GskGpuOp *op)
{ {
GskGLFrame *self = GSK_GL_FRAME (frame);
glGenBuffers (1, &self->globals_buffer_id);
}
static void
gsk_gl_frame_cleanup (GskGpuFrame *frame)
{
GskGLFrame *self = GSK_GL_FRAME (frame);
self->next_texture_slot = 0;
GSK_GPU_FRAME_CLASS (gsk_gl_frame_parent_class)->cleanup (frame);
}
static guint32
gsk_gl_frame_get_image_descriptor (GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
{
GskGLFrame *self = GSK_GL_FRAME (frame);
guint32 slot;
slot = self->next_texture_slot;
self->next_texture_slot = (self->next_texture_slot + 1) % 16;
return slot;
}
static GskGpuBuffer *
gsk_gl_frame_create_vertex_buffer (GskGpuFrame *frame,
gsize size)
{
return gsk_gl_buffer_new (GL_ARRAY_BUFFER, size, GL_WRITE_ONLY);
}
static void
gsk_gl_frame_submit (GskGpuFrame *frame,
GskGpuBuffer *vertex_buffer,
GskGpuOp *op)
{
GskGLFrame *self = GSK_GL_FRAME (frame);
glEnable (GL_DEPTH_TEST);
glDepthFunc (GL_LEQUAL);
/* Pre-multiplied alpha */
glEnable (GL_BLEND);
glBlendFunc (GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation (GL_FUNC_ADD);
gsk_gl_buffer_bind (GSK_GL_BUFFER (vertex_buffer));
glBindBufferBase (GL_UNIFORM_BUFFER, 0, self->globals_buffer_id);
glBufferData (GL_UNIFORM_BUFFER,
sizeof (GskGpuGlobalsInstance),
NULL,
GL_DYNAMIC_DRAW);
while (op) while (op)
{ {
op = gsk_gpu_op_gl_command (op, frame); op = gsk_gpu_op_gl_command (op, frame);
} }
} }
static void
gsk_gl_frame_finalize (GObject *object)
{
GskGLFrame *self = GSK_GL_FRAME (object);
g_hash_table_unref (self->vaos);
glDeleteBuffers (1, &self->globals_buffer_id);
G_OBJECT_CLASS (gsk_gl_frame_parent_class)->finalize (object);
}
static void static void
gsk_gl_frame_class_init (GskGLFrameClass *klass) gsk_gl_frame_class_init (GskGLFrameClass *klass)
{ {
GskGpuFrameClass *gpu_frame_class = GSK_GPU_FRAME_CLASS (klass); GskGpuFrameClass *gpu_frame_class = GSK_GPU_FRAME_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
gpu_frame_class->is_busy = gsk_gl_frame_is_busy; gpu_frame_class->is_busy = gsk_gl_frame_is_busy;
gpu_frame_class->setup = gsk_gl_frame_setup;
gpu_frame_class->cleanup = gsk_gl_frame_cleanup;
gpu_frame_class->get_image_descriptor = gsk_gl_frame_get_image_descriptor;
gpu_frame_class->create_vertex_buffer = gsk_gl_frame_create_vertex_buffer;
gpu_frame_class->submit = gsk_gl_frame_submit; gpu_frame_class->submit = gsk_gl_frame_submit;
object_class->finalize = gsk_gl_frame_finalize;
}
static void
free_vao (gpointer vao)
{
glDeleteVertexArrays (1, (GLuint[1]) { GPOINTER_TO_UINT (vao) });
} }
static void static void
gsk_gl_frame_init (GskGLFrame *self) gsk_gl_frame_init (GskGLFrame *self)
{ {
self->vaos = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, free_vao);
}
void
gsk_gl_frame_use_program (GskGLFrame *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip)
{
GLuint vao;
gsk_gl_device_use_program (GSK_GL_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self))),
op_class,
clip);
vao = GPOINTER_TO_UINT (g_hash_table_lookup (self->vaos, op_class));
if (vao)
{
glBindVertexArray(vao);
return;
}
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
g_hash_table_insert (self->vaos, (gpointer) op_class, GUINT_TO_POINTER (vao));
} }

View File

@ -8,5 +8,8 @@ G_BEGIN_DECLS
G_DECLARE_FINAL_TYPE (GskGLFrame, gsk_gl_frame, GSK, GL_FRAME, GskGpuFrame) G_DECLARE_FINAL_TYPE (GskGLFrame, gsk_gl_frame, GSK, GL_FRAME, GskGpuFrame)
void gsk_gl_frame_use_program (GskGLFrame *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip);
G_END_DECLS G_END_DECLS

View File

@ -31,9 +31,10 @@ gsk_gpu_blit_op_finish (GskGpuOp *op)
} }
static void static void
gsk_gpu_blit_op_print (GskGpuOp *op, gsk_gpu_blit_op_print (GskGpuOp *op,
GString *string, GskGpuFrame *frame,
guint indent) GString *string,
guint indent)
{ {
GskGpuBlitOp *self = (GskGpuBlitOp *) op; GskGpuBlitOp *self = (GskGpuBlitOp *) op;
@ -43,16 +44,11 @@ gsk_gpu_blit_op_print (GskGpuOp *op,
} }
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
static void
gsk_gpu_blit_op_vk_reserve_descriptor_sets (GskGpuOp *op,
GskGpuFrame *frame)
{
}
static GskGpuOp * static GskGpuOp *
gsk_gpu_blit_op_vk_command (GskGpuOp *op, gsk_gpu_blit_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer) VkCommandBuffer command_buffer)
{ {
GskGpuBlitOp *self = (GskGpuBlitOp *) op; GskGpuBlitOp *self = (GskGpuBlitOp *) op;
@ -195,7 +191,6 @@ static const GskGpuOpClass GSK_GPU_BLIT_OP_CLASS = {
gsk_gpu_blit_op_finish, gsk_gpu_blit_op_finish,
gsk_gpu_blit_op_print, gsk_gpu_blit_op_print,
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
gsk_gpu_blit_op_vk_reserve_descriptor_sets,
gsk_gpu_blit_op_vk_command, gsk_gpu_blit_op_vk_command,
#endif #endif
gsk_gpu_blit_op_gl_command gsk_gpu_blit_op_gl_command

52
gsk/gpu/gskgpubuffer.c Normal file
View File

@ -0,0 +1,52 @@
#include "config.h"
#include "gskgpubufferprivate.h"
typedef struct _GskGpuBufferPrivate GskGpuBufferPrivate;
struct _GskGpuBufferPrivate
{
gsize size;
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuBuffer, gsk_gpu_buffer, G_TYPE_OBJECT)
static void
gsk_gpu_buffer_class_init (GskGpuBufferClass *klass)
{
}
static void
gsk_gpu_buffer_init (GskGpuBuffer *self)
{
}
void
gsk_gpu_buffer_setup (GskGpuBuffer *self,
gsize size)
{
GskGpuBufferPrivate *priv = gsk_gpu_buffer_get_instance_private (self);
priv->size = size;
}
gsize
gsk_gpu_buffer_get_size (GskGpuBuffer *self)
{
GskGpuBufferPrivate *priv = gsk_gpu_buffer_get_instance_private (self);
return priv->size;
}
guchar *
gsk_gpu_buffer_map (GskGpuBuffer *self)
{
return GSK_GPU_BUFFER_GET_CLASS (self)->map (self);
}
void
gsk_gpu_buffer_unmap (GskGpuBuffer *self)
{
GSK_GPU_BUFFER_GET_CLASS (self)->unmap (self);
}

View File

@ -0,0 +1,42 @@
#pragma once
#include "gskgputypesprivate.h"
G_BEGIN_DECLS
#define GSK_TYPE_GPU_BUFFER (gsk_gpu_buffer_get_type ())
#define GSK_GPU_BUFFER(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_GPU_BUFFER, GskGpuBuffer))
#define GSK_GPU_BUFFER_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_GPU_BUFFER, GskGpuBufferClass))
#define GSK_IS_GPU_BUFFER(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_GPU_BUFFER))
#define GSK_IS_GPU_BUFFER_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_GPU_BUFFER))
#define GSK_GPU_BUFFER_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_GPU_BUFFER, GskGpuBufferClass))
typedef struct _GskGpuBufferClass GskGpuBufferClass;
struct _GskGpuBuffer
{
GObject parent_instance;
};
struct _GskGpuBufferClass
{
GObjectClass parent_class;
guchar * (* map) (GskGpuBuffer *self);
void (* unmap) (GskGpuBuffer *self);
};
GType gsk_gpu_buffer_get_type (void) G_GNUC_CONST;
void gsk_gpu_buffer_setup (GskGpuBuffer *self,
gsize size);
gsize gsk_gpu_buffer_get_size (GskGpuBuffer *self);
guchar * gsk_gpu_buffer_map (GskGpuBuffer *self);
void gsk_gpu_buffer_unmap (GskGpuBuffer *self);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuBuffer, g_object_unref)
G_END_DECLS

293
gsk/gpu/gskgpuclip.c Normal file
View File

@ -0,0 +1,293 @@
#include "config.h"
#include "gskgpuclipprivate.h"
#include "gskrectprivate.h"
#include "gskroundedrectprivate.h"
#include "gsktransform.h"
void
gsk_gpu_clip_init_empty (GskGpuClip *clip,
const graphene_rect_t *rect)
{
clip->type = GSK_GPU_CLIP_NONE;
gsk_rounded_rect_init_from_rect (&clip->rect, rect, 0);
}
void
gsk_gpu_clip_init_rect (GskGpuClip *clip,
const graphene_rect_t *rect)
{
clip->type = GSK_GPU_CLIP_RECT;
gsk_rounded_rect_init_from_rect (&clip->rect, rect, 0);
}
void
gsk_gpu_clip_init_copy (GskGpuClip *self,
const GskGpuClip *src)
{
self->type = src->type;
gsk_rounded_rect_init_copy (&self->rect, &src->rect);
}
static gboolean
gsk_gpu_clip_init_after_intersection (GskGpuClip *self,
GskRoundedRectIntersection res)
{
if (res == GSK_INTERSECTION_NOT_REPRESENTABLE)
return FALSE;
if (res == GSK_INTERSECTION_EMPTY)
self->type = GSK_GPU_CLIP_ALL_CLIPPED;
else if (gsk_rounded_rect_is_rectilinear (&self->rect))
self->type = GSK_GPU_CLIP_RECT;
else
self->type = GSK_GPU_CLIP_ROUNDED;
return TRUE;
}
gboolean
gsk_gpu_clip_intersect_rect (GskGpuClip *dest,
const GskGpuClip *src,
const graphene_rect_t *rect)
{
GskRoundedRectIntersection res;
if (gsk_rect_contains_rect (rect, &src->rect.bounds))
{
gsk_gpu_clip_init_copy (dest, src);
return TRUE;
}
if (!gsk_rect_intersects (rect, &src->rect.bounds))
{
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
return TRUE;
}
switch (src->type)
{
case GSK_GPU_CLIP_ALL_CLIPPED:
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
break;
case GSK_GPU_CLIP_NONE:
gsk_gpu_clip_init_copy (dest, src);
if (gsk_rect_intersection (&dest->rect.bounds, rect, &dest->rect.bounds))
dest->type = GSK_GPU_CLIP_RECT;
else
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
break;
case GSK_GPU_CLIP_RECT:
gsk_gpu_clip_init_copy (dest, src);
if (!gsk_rect_intersection (&dest->rect.bounds, rect, &dest->rect.bounds))
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
break;
case GSK_GPU_CLIP_ROUNDED:
res = gsk_rounded_rect_intersect_with_rect (&src->rect, rect, &dest->rect);
if (!gsk_gpu_clip_init_after_intersection (dest, res))
return FALSE;
break;
default:
g_assert_not_reached ();
return FALSE;
}
return TRUE;
}
gboolean
gsk_gpu_clip_intersect_rounded_rect (GskGpuClip *dest,
const GskGpuClip *src,
const GskRoundedRect *rounded)
{
GskRoundedRectIntersection res;
if (gsk_rounded_rect_contains_rect (rounded, &src->rect.bounds))
{
gsk_gpu_clip_init_copy (dest, src);
return TRUE;
}
if (!gsk_rect_intersects (&rounded->bounds, &src->rect.bounds))
{
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
return TRUE;
}
switch (src->type)
{
case GSK_GPU_CLIP_ALL_CLIPPED:
dest->type = GSK_GPU_CLIP_ALL_CLIPPED;
break;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_RECT:
res = gsk_rounded_rect_intersect_with_rect (rounded, &src->rect.bounds, &dest->rect);
if (!gsk_gpu_clip_init_after_intersection (dest, res))
return FALSE;
break;
case GSK_GPU_CLIP_ROUNDED:
res = gsk_rounded_rect_intersection (&src->rect, rounded, &dest->rect);
if (!gsk_gpu_clip_init_after_intersection (dest, res))
return FALSE;
break;
default:
g_assert_not_reached ();
return FALSE;
}
return TRUE;
}
void
gsk_gpu_clip_scale (GskGpuClip *dest,
const GskGpuClip *src,
float scale_x,
float scale_y)
{
dest->type = src->type;
gsk_rounded_rect_scale_affine (&dest->rect,
&src->rect,
1.0f / scale_x, 1.0f / scale_y,
0, 0);
}
gboolean
gsk_gpu_clip_transform (GskGpuClip *dest,
const GskGpuClip *src,
GskTransform *transform,
const graphene_rect_t *viewport)
{
switch (src->type)
{
default:
g_assert_not_reached();
return FALSE;
case GSK_GPU_CLIP_ALL_CLIPPED:
gsk_gpu_clip_init_copy (dest, src);
return TRUE;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_RECT:
case GSK_GPU_CLIP_ROUNDED:
switch (gsk_transform_get_category (transform))
{
case GSK_TRANSFORM_CATEGORY_IDENTITY:
gsk_gpu_clip_init_copy (dest, src);
return TRUE;
case GSK_TRANSFORM_CATEGORY_2D_TRANSLATE:
{
float dx, dy;
gsk_transform_to_translate (transform, &dx, &dy);
gsk_gpu_clip_init_copy (dest, src);
dest->rect.bounds.origin.x -= dx;
dest->rect.bounds.origin.y -= dy;
}
return TRUE;
case GSK_TRANSFORM_CATEGORY_2D_AFFINE:
{
float dx, dy, scale_x, scale_y;
gsk_transform_to_affine (transform, &scale_x, &scale_y, &dx, &dy);
scale_x = 1. / scale_x;
scale_y = 1. / scale_y;
gsk_gpu_clip_init_copy (dest, src);
dest->rect.bounds.origin.x = (dest->rect.bounds.origin.x - dx) * scale_x;
dest->rect.bounds.origin.y = (dest->rect.bounds.origin.y - dy) * scale_y;
dest->rect.bounds.size.width *= scale_x;
dest->rect.bounds.size.height *= scale_y;
if (src->type == GSK_GPU_CLIP_ROUNDED)
{
dest->rect.corner[0].width *= scale_x;
dest->rect.corner[0].height *= scale_y;
dest->rect.corner[1].width *= scale_x;
dest->rect.corner[1].height *= scale_y;
dest->rect.corner[2].width *= scale_x;
dest->rect.corner[2].height *= scale_y;
dest->rect.corner[3].width *= scale_x;
dest->rect.corner[3].height *= scale_y;
}
}
return TRUE;
case GSK_TRANSFORM_CATEGORY_UNKNOWN:
case GSK_TRANSFORM_CATEGORY_ANY:
case GSK_TRANSFORM_CATEGORY_3D:
case GSK_TRANSFORM_CATEGORY_2D:
default:
return FALSE;
}
}
}
gboolean
gsk_gpu_clip_may_intersect_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
graphene_rect_t r = *rect;
r.origin.x += offset->x;
r.origin.y += offset->y;
switch (self->type)
{
default:
g_assert_not_reached();
case GSK_GPU_CLIP_ALL_CLIPPED:
return FALSE;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_RECT:
case GSK_GPU_CLIP_ROUNDED:
return gsk_rect_intersects (&self->rect.bounds, &r);
}
}
gboolean
gsk_gpu_clip_contains_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
graphene_rect_t r = *rect;
r.origin.x += offset->x;
r.origin.y += offset->y;
switch (self->type)
{
default:
g_assert_not_reached();
case GSK_GPU_CLIP_ALL_CLIPPED:
return FALSE;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_RECT:
return gsk_rect_contains_rect (&self->rect.bounds, &r);
case GSK_GPU_CLIP_ROUNDED:
return gsk_rounded_rect_contains_rect (&self->rect, &r);
}
}
GskGpuShaderClip
gsk_gpu_clip_get_shader_clip (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
if (self->type == GSK_GPU_CLIP_NONE ||
gsk_gpu_clip_contains_rect (self, offset, rect))
return GSK_GPU_SHADER_CLIP_NONE;
else if (self->type == GSK_GPU_CLIP_RECT)
return GSK_GPU_SHADER_CLIP_RECT;
else
return GSK_GPU_SHADER_CLIP_ROUNDED;
}

View File

@ -0,0 +1,68 @@
#pragma once
#include "gskgputypesprivate.h"
#include <gdk/gdk.h>
#include <graphene.h>
#include <gsk/gskroundedrect.h>
G_BEGIN_DECLS
typedef enum {
/* The whole area is clipped, no drawing is necessary.
* This can't be handled by return values because for return
* values we return if clips could even be computed.
*/
GSK_GPU_CLIP_ALL_CLIPPED,
/* No clipping is necessary, but the clip rect is set
* to the actual bounds of the underlying framebuffer
*/
GSK_GPU_CLIP_NONE,
/* The clip is a rectangular area */
GSK_GPU_CLIP_RECT,
/* The clip is a rounded rectangle */
GSK_GPU_CLIP_ROUNDED
} GskGpuClipComplexity;
typedef struct _GskGpuClip GskGpuClip;
struct _GskGpuClip
{
GskGpuClipComplexity type;
GskRoundedRect rect;
};
void gsk_gpu_clip_init_empty (GskGpuClip *clip,
const graphene_rect_t *rect);
void gsk_gpu_clip_init_copy (GskGpuClip *self,
const GskGpuClip *src);
void gsk_gpu_clip_init_rect (GskGpuClip *clip,
const graphene_rect_t *rect);
gboolean gsk_gpu_clip_intersect_rect (GskGpuClip *dest,
const GskGpuClip *src,
const graphene_rect_t *rect) G_GNUC_WARN_UNUSED_RESULT;
gboolean gsk_gpu_clip_intersect_rounded_rect (GskGpuClip *dest,
const GskGpuClip *src,
const GskRoundedRect *rounded) G_GNUC_WARN_UNUSED_RESULT;
void gsk_gpu_clip_scale (GskGpuClip *dest,
const GskGpuClip *src,
float scale_x,
float scale_y);
gboolean gsk_gpu_clip_transform (GskGpuClip *dest,
const GskGpuClip *src,
GskTransform *transform,
const graphene_rect_t *viewport) G_GNUC_WARN_UNUSED_RESULT;
gboolean gsk_gpu_clip_contains_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect) G_GNUC_WARN_UNUSED_RESULT;
gboolean gsk_gpu_clip_may_intersect_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect) G_GNUC_WARN_UNUSED_RESULT;
GskGpuShaderClip gsk_gpu_clip_get_shader_clip (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect);
G_END_DECLS

View File

@ -27,9 +27,7 @@ struct _GskGpuDownloadOp
gpointer user_data; gpointer user_data;
GdkTexture *texture; GdkTexture *texture;
#ifdef GDK_RENDERING_VULKAN GskGpuBuffer *buffer;
GskVulkanBuffer *buffer;
#endif
}; };
static void static void
@ -44,15 +42,14 @@ gsk_gpu_download_op_finish (GskGpuOp *op)
g_object_unref (self->texture); g_object_unref (self->texture);
g_object_unref (self->image); g_object_unref (self->image);
#ifdef GDK_RENDERING_VULKAN g_clear_object (&self->buffer);
g_clear_pointer (&self->buffer, gsk_vulkan_buffer_free);
#endif
} }
static void static void
gsk_gpu_download_op_print (GskGpuOp *op, gsk_gpu_download_op_print (GskGpuOp *op,
GString *string, GskGpuFrame *frame,
guint indent) GString *string,
guint indent)
{ {
GskGpuDownloadOp *self = (GskGpuDownloadOp *) op; GskGpuDownloadOp *self = (GskGpuDownloadOp *) op;
@ -62,12 +59,6 @@ gsk_gpu_download_op_print (GskGpuOp *op,
} }
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
static void
gsk_gpu_download_op_vk_reserve_descriptor_sets (GskGpuOp *op,
GskGpuFrame *frame)
{
}
static void static void
gsk_gpu_download_op_vk_create (GskGpuDownloadOp *self) gsk_gpu_download_op_vk_create (GskGpuDownloadOp *self)
{ {
@ -76,7 +67,7 @@ gsk_gpu_download_op_vk_create (GskGpuDownloadOp *self)
gsize width, height, stride; gsize width, height, stride;
GdkMemoryFormat format; GdkMemoryFormat format;
data = gsk_vulkan_buffer_get_data (self->buffer); data = gsk_gpu_buffer_map (self->buffer);
width = gsk_gpu_image_get_width (self->image); width = gsk_gpu_image_get_width (self->image);
height = gsk_gpu_image_get_height (self->image); height = gsk_gpu_image_get_height (self->image);
format = gsk_gpu_image_get_format (self->image); format = gsk_gpu_image_get_format (self->image);
@ -88,12 +79,14 @@ gsk_gpu_download_op_vk_create (GskGpuDownloadOp *self)
bytes, bytes,
stride); stride);
g_bytes_unref (bytes); g_bytes_unref (bytes);
gsk_gpu_buffer_unmap (self->buffer);
} }
static GskGpuOp * static GskGpuOp *
gsk_gpu_download_op_vk_command (GskGpuOp *op, gsk_gpu_download_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer) VkCommandBuffer command_buffer)
{ {
GskGpuDownloadOp *self = (GskGpuDownloadOp *) op; GskGpuDownloadOp *self = (GskGpuDownloadOp *) op;
@ -102,9 +95,8 @@ gsk_gpu_download_op_vk_command (GskGpuOp *op,
width = gsk_gpu_image_get_width (self->image); width = gsk_gpu_image_get_width (self->image);
height = gsk_gpu_image_get_height (self->image); height = gsk_gpu_image_get_height (self->image);
stride = width * gdk_memory_format_bytes_per_pixel (gsk_gpu_image_get_format (self->image)); stride = width * gdk_memory_format_bytes_per_pixel (gsk_gpu_image_get_format (self->image));
self->buffer = gsk_vulkan_buffer_new_map (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)), self->buffer = gsk_vulkan_buffer_new_read (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
height * stride, height * stride);
GSK_VULKAN_READ);
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (self->image), gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (self->image),
command_buffer, command_buffer,
@ -115,7 +107,7 @@ gsk_gpu_download_op_vk_command (GskGpuOp *op,
vkCmdCopyImageToBuffer (command_buffer, vkCmdCopyImageToBuffer (command_buffer,
gsk_vulkan_image_get_vk_image (GSK_VULKAN_IMAGE (self->image)), gsk_vulkan_image_get_vk_image (GSK_VULKAN_IMAGE (self->image)),
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
gsk_vulkan_buffer_get_buffer (self->buffer), gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (self->buffer)),
1, 1,
(VkBufferImageCopy[1]) { (VkBufferImageCopy[1]) {
{ {
@ -152,7 +144,7 @@ gsk_gpu_download_op_vk_command (GskGpuOp *op,
.dstAccessMask = VK_ACCESS_HOST_READ_BIT, .dstAccessMask = VK_ACCESS_HOST_READ_BIT,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.buffer = gsk_vulkan_buffer_get_buffer (self->buffer), .buffer = gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (self->buffer)),
.offset = 0, .offset = 0,
.size = VK_WHOLE_SIZE, .size = VK_WHOLE_SIZE,
}, },
@ -228,7 +220,6 @@ static const GskGpuOpClass GSK_GPU_DOWNLOAD_OP_CLASS = {
gsk_gpu_download_op_finish, gsk_gpu_download_op_finish,
gsk_gpu_download_op_print, gsk_gpu_download_op_print,
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
gsk_gpu_download_op_vk_reserve_descriptor_sets,
gsk_gpu_download_op_vk_command, gsk_gpu_download_op_vk_command,
#endif #endif
gsk_gpu_download_op_gl_command gsk_gpu_download_op_gl_command

View File

@ -2,16 +2,20 @@
#include "gskgpuframeprivate.h" #include "gskgpuframeprivate.h"
#include "gskgpubufferprivate.h"
#include "gskgpudeviceprivate.h" #include "gskgpudeviceprivate.h"
#include "gskgpudownloadopprivate.h" #include "gskgpudownloadopprivate.h"
#include "gskgpuimageprivate.h" #include "gskgpuimageprivate.h"
#include "gskgpunodeprocessorprivate.h" #include "gskgpunodeprocessorprivate.h"
#include "gskgpuopprivate.h" #include "gskgpuopprivate.h"
#include "gskgpurendererprivate.h" #include "gskgpurendererprivate.h"
#include "gskgpurenderpassopprivate.h"
#include "gskdebugprivate.h" #include "gskdebugprivate.h"
#include "gskrendererprivate.h" #include "gskrendererprivate.h"
#define DEFAULT_VERTEX_BUFFER_SIZE 128 * 1024
#define GDK_ARRAY_NAME gsk_gpu_ops #define GDK_ARRAY_NAME gsk_gpu_ops
#define GDK_ARRAY_TYPE_NAME GskGpuOps #define GDK_ARRAY_TYPE_NAME GskGpuOps
#define GDK_ARRAY_ELEMENT_TYPE guchar #define GDK_ARRAY_ELEMENT_TYPE guchar
@ -27,6 +31,10 @@ struct _GskGpuFramePrivate
GskGpuOps ops; GskGpuOps ops;
GskGpuOp *first_op; GskGpuOp *first_op;
GskGpuBuffer *vertex_buffer;
guchar *vertex_buffer_data;
gsize vertex_buffer_used;
}; };
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuFrame, gsk_gpu_frame, G_TYPE_OBJECT) G_DEFINE_TYPE_WITH_PRIVATE (GskGpuFrame, gsk_gpu_frame, G_TYPE_OBJECT)
@ -76,6 +84,8 @@ gsk_gpu_frame_finalize (GObject *object)
gsk_gpu_ops_clear (&priv->ops); gsk_gpu_ops_clear (&priv->ops);
g_clear_object (&priv->vertex_buffer);
g_object_unref (priv->device); g_object_unref (priv->device);
G_OBJECT_CLASS (gsk_gpu_frame_parent_class)->finalize (object); G_OBJECT_CLASS (gsk_gpu_frame_parent_class)->finalize (object);
@ -148,7 +158,7 @@ gsk_gpu_frame_verbose_print (GskGpuFrame *self,
{ {
if (op->op_class->stage == GSK_GPU_STAGE_END_PASS) if (op->op_class->stage == GSK_GPU_STAGE_END_PASS)
indent--; indent--;
gsk_gpu_op_print (op, string, indent); gsk_gpu_op_print (op, self, string, indent);
if (op->op_class->stage == GSK_GPU_STAGE_BEGIN_PASS) if (op->op_class->stage == GSK_GPU_STAGE_BEGIN_PASS)
indent++; indent++;
} }
@ -295,6 +305,72 @@ gsk_gpu_frame_alloc_op (GskGpuFrame *self,
return gsk_gpu_ops_index (&priv->ops, pos); return gsk_gpu_ops_index (&priv->ops, pos);
} }
static GskGpuBuffer *
gsk_gpu_frame_create_vertex_buffer (GskGpuFrame *self,
gsize size)
{
return GSK_GPU_FRAME_GET_CLASS (self)->create_vertex_buffer (self, size);
}
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
gsize
gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
gsize size_needed;
if (priv->vertex_buffer == NULL)
priv->vertex_buffer = gsk_gpu_frame_create_vertex_buffer (self, DEFAULT_VERTEX_BUFFER_SIZE);
size_needed = round_up (priv->vertex_buffer_used, size) + size;
if (gsk_gpu_buffer_get_size (priv->vertex_buffer) < size_needed)
{
gsize old_size = gsk_gpu_buffer_get_size (priv->vertex_buffer);
GskGpuBuffer *new_buffer = gsk_gpu_frame_create_vertex_buffer (self, old_size * 2);
guchar *new_data = gsk_gpu_buffer_map (new_buffer);
if (priv->vertex_buffer_data)
{
memcpy (new_data, priv->vertex_buffer_data, old_size);
gsk_gpu_buffer_unmap (priv->vertex_buffer);
}
g_object_unref (priv->vertex_buffer);
priv->vertex_buffer = new_buffer;
priv->vertex_buffer_data = new_data;
}
priv->vertex_buffer_used = size_needed;
return size_needed - size;
}
guchar *
gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
if (priv->vertex_buffer_data == NULL)
priv->vertex_buffer_data = gsk_gpu_buffer_map (priv->vertex_buffer);
return priv->vertex_buffer_data + offset;
}
guint32
gsk_gpu_frame_get_image_descriptor (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler)
{
return GSK_GPU_FRAME_GET_CLASS (self)->get_image_descriptor (self, image, sampler);
}
gboolean gboolean
gsk_gpu_frame_is_busy (GskGpuFrame *self) gsk_gpu_frame_is_busy (GskGpuFrame *self)
{ {
@ -333,13 +409,10 @@ gsk_gpu_frame_record (GskGpuFrame *self,
}; };
} }
#if 0
gsk_gpu_render_pass_begin_op (self, gsk_gpu_render_pass_begin_op (self,
target, target,
&extents, &extents,
VK_IMAGE_LAYOUT_UNDEFINED, GSK_RENDER_PASS_PRESENT);
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
#endif
gsk_gpu_node_processor_process (self, gsk_gpu_node_processor_process (self,
target, target,
@ -347,11 +420,9 @@ gsk_gpu_frame_record (GskGpuFrame *self,
node, node,
viewport); viewport);
#if 0
gsk_gpu_render_pass_end_op (self, gsk_gpu_render_pass_end_op (self,
target, target,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); GSK_RENDER_PASS_PRESENT);
#endif
if (texture) if (texture)
gsk_gpu_download_op (self, target, copy_texture, texture); gsk_gpu_download_op (self, target, copy_texture, texture);
@ -367,7 +438,16 @@ gsk_gpu_frame_submit (GskGpuFrame *self)
gsk_gpu_frame_sort_ops (self); gsk_gpu_frame_sort_ops (self);
gsk_gpu_frame_verbose_print (self, "after sort"); gsk_gpu_frame_verbose_print (self, "after sort");
GSK_GPU_FRAME_GET_CLASS (self)->submit (self, priv->first_op); if (priv->vertex_buffer)
{
gsk_gpu_buffer_unmap (priv->vertex_buffer);
priv->vertex_buffer_data = NULL;
priv->vertex_buffer_used = 0;
}
GSK_GPU_FRAME_GET_CLASS (self)->submit (self,
priv->vertex_buffer,
priv->first_op);
} }
void void

View File

@ -26,7 +26,13 @@ struct _GskGpuFrameClass
gboolean (* is_busy) (GskGpuFrame *self); gboolean (* is_busy) (GskGpuFrame *self);
void (* setup) (GskGpuFrame *self); void (* setup) (GskGpuFrame *self);
void (* cleanup) (GskGpuFrame *self); void (* cleanup) (GskGpuFrame *self);
guint32 (* get_image_descriptor) (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler);
GskGpuBuffer * (* create_vertex_buffer) (GskGpuFrame *self,
gsize size);
void (* submit) (GskGpuFrame *self, void (* submit) (GskGpuFrame *self,
GskGpuBuffer *vertex_buffer,
GskGpuOp *op); GskGpuOp *op);
}; };
@ -37,11 +43,18 @@ void gsk_gpu_frame_setup (GskGpuF
GskGpuRenderer *renderer, GskGpuRenderer *renderer,
GskGpuDevice *device); GskGpuDevice *device);
GdkDrawContext * gsk_gpu_frame_get_context (GskGpuFrame *self); GdkDrawContext * gsk_gpu_frame_get_context (GskGpuFrame *self) G_GNUC_PURE;
GskGpuDevice * gsk_gpu_frame_get_device (GskGpuFrame *self); GskGpuDevice * gsk_gpu_frame_get_device (GskGpuFrame *self) G_GNUC_PURE;
gpointer gsk_gpu_frame_alloc_op (GskGpuFrame *self, gpointer gsk_gpu_frame_alloc_op (GskGpuFrame *self,
gsize size); gsize size);
gsize gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size);
guchar * gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset);
guint32 gsk_gpu_frame_get_image_descriptor (GskGpuFrame *self,
GskGpuImage *image,
GskGpuSampler sampler);
gboolean gsk_gpu_frame_is_busy (GskGpuFrame *self); gboolean gsk_gpu_frame_is_busy (GskGpuFrame *self);

97
gsk/gpu/gskgpuglobalsop.c Normal file
View File

@ -0,0 +1,97 @@
#include "config.h"
#include "gskgpuglobalsopprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuprintprivate.h"
#include "gskroundedrectprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkandeviceprivate.h"
#endif
typedef struct _GskGpuGlobalsOp GskGpuGlobalsOp;
struct _GskGpuGlobalsOp
{
GskGpuOp op;
GskGpuGlobalsInstance instance;
};
static void
gsk_gpu_globals_op_finish (GskGpuOp *op)
{
}
static void
gsk_gpu_globals_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
gsk_gpu_print_op (string, indent, "globals");
gsk_gpu_print_newline (string);
}
#ifdef GDK_RENDERING_VULKAN
static GskGpuOp *
gsk_gpu_globals_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer)
{
GskGpuGlobalsOp *self = (GskGpuGlobalsOp *) op;
vkCmdPushConstants (command_buffer,
gsk_vulkan_device_get_vk_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame))),
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
0,
sizeof (self->instance),
&self->instance);
return op->next;
}
#endif
static GskGpuOp *
gsk_gpu_globals_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame)
{
GskGpuGlobalsOp *self = (GskGpuGlobalsOp *) op;
/* the GskGLFrame makes sure the uniform buffer points to the globals */
/* FIXME: Does it matter if we glBufferData() or glSubBufferData() here? */
glBufferSubData (GL_UNIFORM_BUFFER,
0,
sizeof (self->instance),
&self->instance);
return op->next;
}
static const GskGpuOpClass GSK_GPU_GLOBALS_OP_CLASS = {
GSK_GPU_OP_SIZE (GskGpuGlobalsOp),
GSK_GPU_STAGE_COMMAND,
gsk_gpu_globals_op_finish,
gsk_gpu_globals_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_globals_op_vk_command,
#endif
gsk_gpu_globals_op_gl_command
};
void
gsk_gpu_globals_op (GskGpuFrame *frame,
const graphene_vec2_t *scale,
const graphene_matrix_t *mvp,
const GskRoundedRect *clip)
{
GskGpuGlobalsOp *self;
self = (GskGpuGlobalsOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_GLOBALS_OP_CLASS);
graphene_matrix_to_float (mvp, self->instance.mvp);
gsk_rounded_rect_to_float (clip, graphene_point_zero (), self->instance.clip);
graphene_vec2_to_float (scale, self->instance.scale);
}

View File

@ -0,0 +1,26 @@
#pragma once
#include "gskgpuopprivate.h"
#include <gsk/gskroundedrect.h>
#include <graphene.h>
G_BEGIN_DECLS
typedef struct _GskGpuGlobalsInstance GskGpuGlobalsInstance;
struct _GskGpuGlobalsInstance
{
float mvp[16];
float clip[12];
float scale[2];
};
void gsk_gpu_globals_op (GskGpuFrame *frame,
const graphene_vec2_t *scale,
const graphene_matrix_t *mvp,
const GskRoundedRect *clip);
G_END_DECLS

View File

@ -2,36 +2,50 @@
#include "gskgpunodeprocessorprivate.h" #include "gskgpunodeprocessorprivate.h"
#include "gskgpublitopprivate.h" #include "gskgpuclipprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuglobalsopprivate.h"
#include "gskgpuimageprivate.h" #include "gskgpuimageprivate.h"
#include "gskgputextureopprivate.h"
#include "gskgpuuploadopprivate.h" #include "gskgpuuploadopprivate.h"
#include "gskdebugprivate.h"
#include "gskrendernodeprivate.h" #include "gskrendernodeprivate.h"
#include "gskroundedrectprivate.h"
#include "gsktransformprivate.h"
#define ORTHO_NEAR_PLANE -10000 #define ORTHO_NEAR_PLANE -10000
#define ORTHO_FAR_PLANE 10000 #define ORTHO_FAR_PLANE 10000
typedef struct _GskGpuNodeProcessor GskGpuNodeProcessor; typedef struct _GskGpuNodeProcessor GskGpuNodeProcessor;
typedef enum {
GSK_GPU_GLOBAL_MATRIX = (1 << 0),
GSK_GPU_GLOBAL_SCALE = (1 << 1),
GSK_GPU_GLOBAL_CLIP = (1 << 2)
} GskGpuGlobals;
struct _GskGpuNodeProcessor struct _GskGpuNodeProcessor
{ {
GskGpuFrame *frame; GskGpuFrame *frame;
cairo_rectangle_int_t scissor; cairo_rectangle_int_t scissor;
graphene_point_t offset; graphene_point_t offset;
graphene_matrix_t projection;
graphene_vec2_t scale; graphene_vec2_t scale;
GskTransform *modelview; GskTransform *modelview;
graphene_matrix_t projection; GskGpuClip clip;
/* GskGpuClip clip; */
GskGpuGlobals pending_globals;
}; };
static void static void
gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self,
GskGpuImage *target,
GskRenderNode *node); GskRenderNode *node);
static void static void
gsk_gpu_node_processor_finish (GskGpuNodeProcessor *self) gsk_gpu_node_processor_finish (GskGpuNodeProcessor *self)
{ {
g_clear_pointer (&self->modelview, gsk_transform_unref);
} }
static void static void
@ -45,7 +59,7 @@ gsk_gpu_node_processor_init (GskGpuNodeProcessor *self,
self->frame = frame; self->frame = frame;
self->scissor = *clip; self->scissor = *clip;
//gsk_vulkan_clip_init_empty (&state.clip, &GRAPHENE_RECT_INIT (0, 0, viewport->size.width, viewport->size.height)); gsk_gpu_clip_init_empty (&self->clip, &GRAPHENE_RECT_INIT (0, 0, viewport->size.width, viewport->size.height));
self->modelview = NULL; self->modelview = NULL;
graphene_matrix_init_ortho (&self->projection, graphene_matrix_init_ortho (&self->projection,
@ -57,7 +71,28 @@ gsk_gpu_node_processor_init (GskGpuNodeProcessor *self,
height / viewport->size.height); height / viewport->size.height);
self->offset = GRAPHENE_POINT_INIT (-viewport->origin.x, self->offset = GRAPHENE_POINT_INIT (-viewport->origin.x,
-viewport->origin.y); -viewport->origin.y);
self->pending_globals = GSK_GPU_GLOBAL_MATRIX | GSK_GPU_GLOBAL_SCALE | GSK_GPU_GLOBAL_CLIP;
}
static void
gsk_gpu_node_processor_emit_globals_op (GskGpuNodeProcessor *self)
{
graphene_matrix_t mvp;
if (self->modelview)
{
gsk_transform_to_matrix (self->modelview, &mvp);
graphene_matrix_multiply (&mvp, &self->projection, &mvp);
}
else
graphene_matrix_init_from_matrix (&mvp, &self->projection);
gsk_gpu_globals_op (self->frame,
&self->scale,
&mvp,
&self->clip.rect);
self->pending_globals &= ~(GSK_GPU_GLOBAL_MATRIX | GSK_GPU_GLOBAL_SCALE | GSK_GPU_GLOBAL_CLIP);
} }
void void
@ -76,15 +111,14 @@ gsk_gpu_node_processor_process (GskGpuFrame *frame,
clip, clip,
viewport); viewport);
gsk_gpu_node_processor_add_node (&self, target, node); gsk_gpu_node_processor_add_node (&self, node);
gsk_gpu_node_processor_finish (&self); gsk_gpu_node_processor_finish (&self);
} }
static void static void
gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, gsk_gpu_node_processor_add_fallback_node (GskGpuNodeProcessor *self,
GskGpuImage *target, GskRenderNode *node)
GskRenderNode *node)
{ {
GskGpuImage *image; GskGpuImage *image;
@ -93,18 +127,182 @@ gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self,
&self->scale, &self->scale,
&node->bounds); &node->bounds);
gsk_gpu_blit_op (self->frame, gsk_gpu_texture_op (self->frame,
image, gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
target, image,
&(cairo_rectangle_int_t) { GSK_GPU_SAMPLER_DEFAULT,
0, 0, &node->bounds,
gsk_gpu_image_get_width (image), &self->offset,
gsk_gpu_image_get_height (image) &node->bounds);
}, }
&(cairo_rectangle_int_t) {
0, 0, static const struct
gsk_gpu_image_get_width (image), {
gsk_gpu_image_get_height (image) GskGpuGlobals ignored_globals;
}, void (* process_node) (GskGpuNodeProcessor *self,
GSK_GPU_BLIT_LINEAR); GskRenderNode *node);
} nodes_vtable[] = {
[GSK_NOT_A_RENDER_NODE] = {
0,
NULL,
},
[GSK_CONTAINER_NODE] = {
0,
NULL,
},
[GSK_CAIRO_NODE] = {
0,
NULL,
},
[GSK_COLOR_NODE] = {
0,
NULL,
},
[GSK_LINEAR_GRADIENT_NODE] = {
0,
NULL,
},
[GSK_REPEATING_LINEAR_GRADIENT_NODE] = {
0,
NULL,
},
[GSK_RADIAL_GRADIENT_NODE] = {
0,
NULL,
},
[GSK_REPEATING_RADIAL_GRADIENT_NODE] = {
0,
NULL,
},
[GSK_CONIC_GRADIENT_NODE] = {
0,
NULL,
},
[GSK_BORDER_NODE] = {
0,
NULL,
},
[GSK_TEXTURE_NODE] = {
0,
NULL,
},
[GSK_INSET_SHADOW_NODE] = {
0,
NULL,
},
[GSK_OUTSET_SHADOW_NODE] = {
0,
NULL,
},
[GSK_TRANSFORM_NODE] = {
0,
NULL,
},
[GSK_OPACITY_NODE] = {
0,
NULL,
},
[GSK_COLOR_MATRIX_NODE] = {
0,
NULL,
},
[GSK_REPEAT_NODE] = {
0,
NULL,
},
[GSK_CLIP_NODE] = {
0,
NULL,
},
[GSK_ROUNDED_CLIP_NODE] = {
0,
NULL,
},
[GSK_SHADOW_NODE] = {
0,
NULL,
},
[GSK_BLEND_NODE] = {
0,
NULL,
},
[GSK_CROSS_FADE_NODE] = {
0,
NULL,
},
[GSK_TEXT_NODE] = {
0,
NULL,
},
[GSK_BLUR_NODE] = {
0,
NULL,
},
[GSK_DEBUG_NODE] = {
0,
NULL,
},
[GSK_GL_SHADER_NODE] = {
0,
NULL,
},
[GSK_TEXTURE_SCALE_NODE] = {
0,
NULL,
},
[GSK_MASK_NODE] = {
0,
NULL,
},
[GSK_FILL_NODE] = {
0,
NULL,
},
[GSK_STROKE_NODE] = {
0,
NULL,
},
[GSK_SUBSURFACE_NODE] = {
0,
NULL,
},
};
static void
gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self,
GskRenderNode *node)
{
GskRenderNodeType node_type;
GskGpuGlobals required_globals;
/* This catches the corner cases of empty nodes, so after this check
* there's quaranteed to be at least 1 pixel that needs to be drawn */
if (node->bounds.size.width == 0 || node->bounds.size.height == 0)
return;
if (!gsk_gpu_clip_may_intersect_rect (&self->clip, &self->offset, &node->bounds))
return;
node_type = gsk_render_node_get_node_type (node);
if (node_type >= G_N_ELEMENTS (nodes_vtable))
{
g_critical ("unkonwn node type %u for %s", node_type, g_type_name_from_instance ((GTypeInstance *) node));
gsk_gpu_node_processor_add_fallback_node (self, node);
return;
}
required_globals = self->pending_globals & ~nodes_vtable[node_type].ignored_globals;
if (required_globals & (GSK_GPU_GLOBAL_MATRIX | GSK_GPU_GLOBAL_SCALE | GSK_GPU_GLOBAL_CLIP))
gsk_gpu_node_processor_emit_globals_op (self);
g_assert ((self->pending_globals & ~nodes_vtable[node_type].ignored_globals) == 0);
if (nodes_vtable[node_type].process_node)
{
nodes_vtable[node_type].process_node (self, node);
}
else
{
GSK_DEBUG (FALLBACK, "Unsupported node '%s'",
g_type_name_from_instance ((GTypeInstance *) node));
gsk_gpu_node_processor_add_fallback_node (self, node);
}
} }

View File

@ -23,28 +23,23 @@ gsk_gpu_op_finish (GskGpuOp *op)
} }
void void
gsk_gpu_op_print (GskGpuOp *op, gsk_gpu_op_print (GskGpuOp *op,
GString *string, GskGpuFrame *frame,
guint indent) GString *string,
guint indent)
{ {
op->op_class->print (op, string, indent); op->op_class->print (op, frame, string, indent);
} }
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
void
gsk_gpu_op_vk_reserve_descriptor_sets (GskGpuOp *op,
GskGpuFrame *frame)
{
op->op_class->vk_reserve_descriptor_sets (op, frame);
}
GskGpuOp * GskGpuOp *
gsk_gpu_op_vk_command (GskGpuOp *op, gsk_gpu_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer) VkCommandBuffer command_buffer)
{ {
return op->op_class->vk_command (op, frame, render_pass, command_buffer); return op->op_class->vk_command (op, frame, render_pass, format, command_buffer);
} }
#endif #endif

View File

@ -32,15 +32,15 @@ struct _GskGpuOpClass
void (* finish) (GskGpuOp *op); void (* finish) (GskGpuOp *op);
void (* print) (GskGpuOp *op, void (* print) (GskGpuOp *op,
GskGpuFrame *frame,
GString *string, GString *string,
guint indent); guint indent);
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
void (* vk_reserve_descriptor_sets) (GskGpuOp *op,
GskGpuFrame *frame);
GskGpuOp * (* vk_command) (GskGpuOp *op, GskGpuOp * (* vk_command) (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer); VkCommandBuffer command_buffer);
#endif #endif
GskGpuOp * (* gl_command) (GskGpuOp *op, GskGpuOp * (* gl_command) (GskGpuOp *op,
@ -55,15 +55,15 @@ GskGpuOp * gsk_gpu_op_alloc (GskGpuF
void gsk_gpu_op_finish (GskGpuOp *op); void gsk_gpu_op_finish (GskGpuOp *op);
void gsk_gpu_op_print (GskGpuOp *op, void gsk_gpu_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string, GString *string,
guint indent); guint indent);
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
void gsk_gpu_op_vk_reserve_descriptor_sets (GskGpuOp *op,
GskGpuFrame *frame);
GskGpuOp * gsk_gpu_op_vk_command (GskGpuOp *op, GskGpuOp * gsk_gpu_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer); VkCommandBuffer command_buffer);
#endif #endif
GskGpuOp * gsk_gpu_op_gl_command (GskGpuOp *op, GskGpuOp * gsk_gpu_op_gl_command (GskGpuOp *op,

View File

@ -0,0 +1,327 @@
#include "config.h"
#include "gskgpurenderpassopprivate.h"
#include "gskglimageprivate.h"
#include "gskgpudeviceprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpunodeprocessorprivate.h"
#include "gskgpuprintprivate.h"
#include "gskgpushaderopprivate.h"
#include "gskrendernodeprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkanimageprivate.h"
#endif
typedef struct _GskGpuRenderPassOp GskGpuRenderPassOp;
struct _GskGpuRenderPassOp
{
GskGpuOp op;
GskGpuImage *target;
cairo_rectangle_int_t area;
GskRenderPassType pass_type;
};
static void
gsk_gpu_render_pass_op_finish (GskGpuOp *op)
{
GskGpuRenderPassOp *self = (GskGpuRenderPassOp *) op;
g_object_unref (self->target);
}
static void
gsk_gpu_render_pass_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuRenderPassOp *self = (GskGpuRenderPassOp *) op;
gsk_gpu_print_op (string, indent, "begin-render-pass");
gsk_gpu_print_image (string, self->target);
gsk_gpu_print_newline (string);
}
#ifdef GDK_RENDERING_VULKAN
static VkImageLayout
gsk_gpu_render_pass_type_to_vk_image_layout (GskRenderPassType type)
{
switch (type)
{
default:
g_assert_not_reached ();
G_GNUC_FALLTHROUGH;
case GSK_RENDER_PASS_PRESENT:
return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
case GSK_RENDER_PASS_OFFSCREEN:
return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
}
static void
gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
VkCommandBuffer command_buffer)
{
GskGpuShaderOp *shader;
GskGpuOp *op;
gsize i;
for (op = ((GskGpuOp *) self)->next;
op->op_class->stage != GSK_GPU_STAGE_END_PASS;
op = op->next)
{
if (op->op_class->stage != GSK_GPU_STAGE_SHADER)
continue;
shader = (GskGpuShaderOp *) op;
for (i = 0; i < shader->n_images; i++)
{
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (shader->images[i].image),
command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT);
}
}
}
static GskGpuOp *
gsk_gpu_render_pass_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer)
{
GskGpuRenderPassOp *self = (GskGpuRenderPassOp *) op;
VkRenderPass vk_render_pass;
VkFormat vk_format;
/* nesting frame passes not allowed */
g_assert (render_pass == VK_NULL_HANDLE);
gsk_gpu_render_pass_op_do_barriers (self, command_buffer);
vk_format = gsk_vulkan_image_get_vk_format (GSK_VULKAN_IMAGE (self->target));
vk_render_pass = gsk_vulkan_device_get_vk_render_pass (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
vk_format,
gsk_vulkan_image_get_vk_image_layout (GSK_VULKAN_IMAGE (self->target)),
gsk_gpu_render_pass_type_to_vk_image_layout (self->pass_type));
vkCmdSetViewport (command_buffer,
0,
1,
&(VkViewport) {
.x = 0,
.y = 0,
.width = gsk_gpu_image_get_width (self->target),
.height = gsk_gpu_image_get_height (self->target),
.minDepth = 0,
.maxDepth = 1
});
vkCmdBeginRenderPass (command_buffer,
&(VkRenderPassBeginInfo) {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.renderPass = vk_render_pass,
.framebuffer = gsk_vulkan_image_get_vk_framebuffer (GSK_VULKAN_IMAGE(self->target),
vk_render_pass),
.renderArea = {
{ self->area.x, self->area.y },
{ self->area.width, self->area.height }
},
.clearValueCount = 1,
.pClearValues = (VkClearValue [1]) {
{ .color = { .float32 = { 0.f, 0.f, 0.f, 0.f } } }
}
},
VK_SUBPASS_CONTENTS_INLINE);
op = op->next;
while (op->op_class->stage != GSK_GPU_STAGE_END_PASS)
{
op = gsk_gpu_op_vk_command (op, frame, vk_render_pass, vk_format, command_buffer);
}
op = gsk_gpu_op_vk_command (op, frame, vk_render_pass, vk_format, command_buffer);
return op;
}
#endif
static GskGpuOp *
gsk_gpu_render_pass_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame)
{
GskGpuRenderPassOp *self = (GskGpuRenderPassOp *) op;
gsk_gl_image_bind_framebuffer (GSK_GL_IMAGE (self->target));
glViewport (0, 0,
gsk_gpu_image_get_width (self->target),
gsk_gpu_image_get_height (self->target));
glClearColor (0, 0, 0, 0);
glClear (GL_COLOR_BUFFER_BIT);
op = op->next;
while (op->op_class->stage != GSK_GPU_STAGE_END_PASS)
{
op = gsk_gpu_op_gl_command (op, frame);
}
op = gsk_gpu_op_gl_command (op, frame);
return op;
}
static const GskGpuOpClass GSK_GPU_RENDER_PASS_OP_CLASS = {
GSK_GPU_OP_SIZE (GskGpuRenderPassOp),
GSK_GPU_STAGE_BEGIN_PASS,
gsk_gpu_render_pass_op_finish,
gsk_gpu_render_pass_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_render_pass_op_vk_command,
#endif
gsk_gpu_render_pass_op_gl_command
};
typedef struct _GskGpuFramePassEndOp GskGpuFramePassEndOp;
struct _GskGpuFramePassEndOp
{
GskGpuOp op;
GskGpuImage *target;
GskRenderPassType pass_type;
};
static void
gsk_gpu_render_pass_end_op_finish (GskGpuOp *op)
{
GskGpuFramePassEndOp *self = (GskGpuFramePassEndOp *) op;
g_object_unref (self->target);
}
static void
gsk_gpu_render_pass_end_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuFramePassEndOp *self = (GskGpuFramePassEndOp *) op;
gsk_gpu_print_op (string, indent, "end-render-pass");
gsk_gpu_print_image (string, self->target);
gsk_gpu_print_newline (string);
}
#ifdef GDK_RENDERING_VULKAN
static GskGpuOp *
gsk_gpu_render_pass_end_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer)
{
GskGpuFramePassEndOp *self = (GskGpuFramePassEndOp *) op;
vkCmdEndRenderPass (command_buffer);
gsk_vulkan_image_set_vk_image_layout (GSK_VULKAN_IMAGE (self->target),
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
gsk_gpu_render_pass_type_to_vk_image_layout (self->pass_type),
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
return op->next;
}
#endif
static GskGpuOp *
gsk_gpu_render_pass_end_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame)
{
/* nothing to do here */
return op->next;
}
static const GskGpuOpClass GSK_GPU_RENDER_PASS_END_OP_CLASS = {
GSK_GPU_OP_SIZE (GskGpuFramePassEndOp),
GSK_GPU_STAGE_END_PASS,
gsk_gpu_render_pass_end_op_finish,
gsk_gpu_render_pass_end_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_render_pass_end_op_vk_command,
#endif
gsk_gpu_render_pass_end_op_gl_command
};
void
gsk_gpu_render_pass_begin_op (GskGpuFrame *frame,
GskGpuImage *image,
const cairo_rectangle_int_t *area,
GskRenderPassType pass_type)
{
GskGpuRenderPassOp *self;
self = (GskGpuRenderPassOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_RENDER_PASS_OP_CLASS);
self->target = g_object_ref (image);
self->area = *area;
self->pass_type = pass_type;
}
void
gsk_gpu_render_pass_end_op (GskGpuFrame *frame,
GskGpuImage *image,
GskRenderPassType pass_type)
{
GskGpuFramePassEndOp *self;
self = (GskGpuFramePassEndOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_RENDER_PASS_END_OP_CLASS);
self->target = g_object_ref (image);
self->pass_type = pass_type;
}
GskGpuImage *
gsk_gpu_render_pass_op_offscreen (GskGpuFrame *frame,
const graphene_vec2_t *scale,
const graphene_rect_t *viewport,
GskRenderNode *node)
{
GskGpuImage *image;
int width, height;
width = ceil (graphene_vec2_get_x (scale) * viewport->size.width);
height = ceil (graphene_vec2_get_y (scale) * viewport->size.height);
image = gsk_gpu_device_create_offscreen_image (gsk_gpu_frame_get_device (frame),
gsk_render_node_get_preferred_depth (node),
width, height);
gsk_gpu_render_pass_begin_op (frame,
image,
&(cairo_rectangle_int_t) { 0, 0, width, height },
GSK_RENDER_PASS_OFFSCREEN);
gsk_gpu_node_processor_process (frame,
image,
&(cairo_rectangle_int_t) { 0, 0, width, height },
node,
viewport);
gsk_gpu_render_pass_end_op (frame,
image,
GSK_RENDER_PASS_OFFSCREEN);
g_object_unref (image);
return image;
}

View File

@ -0,0 +1,32 @@
#pragma once
#include "gskgputypesprivate.h"
#include "gsktypes.h"
#include <graphene.h>
G_BEGIN_DECLS
/* We only need this for the final VkImageLayout, but don't tell anyone */
typedef enum
{
GSK_RENDER_PASS_OFFSCREEN,
GSK_RENDER_PASS_PRESENT
} GskRenderPassType;
void gsk_gpu_render_pass_begin_op (GskGpuFrame *frame,
GskGpuImage *image,
const cairo_rectangle_int_t *area,
GskRenderPassType pass_type);
void gsk_gpu_render_pass_end_op (GskGpuFrame *frame,
GskGpuImage *image,
GskRenderPassType pass_type);
GskGpuImage * gsk_gpu_render_pass_op_offscreen (GskGpuFrame *frame,
const graphene_vec2_t *scale,
const graphene_rect_t *viewport,
GskRenderNode *node);
G_END_DECLS

151
gsk/gpu/gskgpushaderop.c Normal file
View File

@ -0,0 +1,151 @@
#include "config.h"
#include "gskgpushaderopprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgldeviceprivate.h"
#include "gskglframeprivate.h"
#include "gskglimageprivate.h"
#ifdef GDK_RENDERING_VULKAN
#include "gskvulkandeviceprivate.h"
#endif
void
gsk_gpu_shader_op_finish (GskGpuOp *op)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
gsize i;
for (i = 0; i < self->n_images; i++)
g_object_unref (self->images[i].image);
}
#ifdef GDK_RENDERING_VULKAN
GskGpuOp *
gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer,
gsize instance_scale)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
GskGpuOp *next;
gsize i;
i = 1;
for (next = op->next; next && i < 10 * 1000; next = next->next)
{
GskGpuShaderOp *next_shader = (GskGpuShaderOp *) next;
if (next->op_class != op->op_class ||
next_shader->vertex_offset != self->vertex_offset + i * shader_op_class->vertex_size)
break;
i++;
}
vkCmdBindPipeline (command_buffer,
VK_PIPELINE_BIND_POINT_GRAPHICS,
gsk_vulkan_device_get_vk_pipeline (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
shader_op_class,
self->clip,
format,
render_pass));
vkCmdDraw (command_buffer,
6 * instance_scale, i,
0, self->vertex_offset / shader_op_class->vertex_size);
return next;
}
GskGpuOp *
gsk_gpu_shader_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer)
{
return gsk_gpu_shader_op_vk_command_n (op, frame, render_pass, format, command_buffer, 1);
}
#endif
GskGpuOp *
gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
GskGpuFrame *frame,
gsize instance_scale)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
GskGLDevice *device;
gsize i;
device = GSK_GL_DEVICE (gsk_gpu_frame_get_device (frame));
gsk_gl_frame_use_program (GSK_GL_FRAME (frame),
shader_op_class,
self->clip);
for (i = 0; i < self->n_images; i++)
{
glActiveTexture (GL_TEXTURE0 + self->images[i].descriptor);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (self->images[i].image));
glBindSampler (self->images[i].descriptor,
gsk_gl_device_get_sampler_id (device, self->images[i].sampler));
}
shader_op_class->setup_vao (self->vertex_offset);
glDrawArraysInstanced (GL_TRIANGLES,
0,
6 * instance_scale,
1);
return op->next;
}
GskGpuOp *
gsk_gpu_shader_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame)
{
return gsk_gpu_shader_op_gl_command_n (op, frame, 1);
}
GskGpuShaderOp *
gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
gpointer out_vertex_data)
{
GskGpuShaderOp *self;
self = (GskGpuShaderOp *) gsk_gpu_op_alloc (frame, &op_class->parent_class);
self->clip = clip;
self->vertex_offset = gsk_gpu_frame_reserve_vertex_data (frame, op_class->vertex_size);
*((gpointer *) out_vertex_data) = gsk_gpu_frame_get_vertex_data (frame, self->vertex_offset);
return self;
}
guint32
gsk_gpu_shader_op_use_image (GskGpuShaderOp *self,
GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
{
gsize id;
g_assert (self->n_images < G_N_ELEMENTS (self->images));
id = self->n_images;
self->images[id].image = g_object_ref (image);
self->images[id].sampler = sampler;
self->images[id].descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, sampler);
self->n_images++;
return self->images[id].descriptor;
}

View File

@ -0,0 +1,67 @@
#pragma once
#include "gskgpuopprivate.h"
#include "gskgputypesprivate.h"
G_BEGIN_DECLS
struct _GskGpuShaderOp
{
GskGpuOp parent_op;
GskGpuShaderClip clip;
gsize vertex_offset;
struct {
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
} images[2];
gsize n_images;
};
struct _GskGpuShaderOpClass
{
GskGpuOpClass parent_class;
const char * shader_name;
gsize vertex_size;
#ifdef GDK_RENDERING_VULKAN
const VkPipelineVertexInputStateCreateInfo *vertex_input_state;
#endif
void (* setup_vao) (gsize offset);
};
GskGpuShaderOp * gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
gpointer out_vertex_data);
void gsk_gpu_shader_op_finish (GskGpuOp *op);
guint32 gsk_gpu_shader_op_use_image (GskGpuShaderOp *self,
GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler);
#ifdef GDK_RENDERING_VULKAN
GskGpuOp * gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer,
gsize instance_scale);
GskGpuOp * gsk_gpu_shader_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
VkRenderPass render_pass,
VkFormat format,
VkCommandBuffer command_buffer);
#endif
GskGpuOp * gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
GskGpuFrame *frame,
gsize instance_scale);
GskGpuOp * gsk_gpu_shader_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame);
G_END_DECLS

74
gsk/gpu/gskgputextureop.c Normal file
View File

@ -0,0 +1,74 @@
#include "config.h"
#include "gskgputextureopprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuprintprivate.h"
#include "gskrectprivate.h"
#include "gpu/shaders/gskgputextureinstance.h"
typedef struct _GskGpuTextureOp GskGpuTextureOp;
struct _GskGpuTextureOp
{
GskGpuShaderOp op;
};
static void
gsk_gpu_texture_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuShaderOp *shader = (GskGpuShaderOp *) op;
GskGpuTextureInstance *instance;
instance = (GskGpuTextureInstance *) gsk_gpu_frame_get_vertex_data (frame, shader->vertex_offset);
gsk_gpu_print_op (string, indent, "texture");
gsk_gpu_print_rect (string, instance->rect);
gsk_gpu_print_image (string, shader->images[0].image);
gsk_gpu_print_newline (string);
}
static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuTextureOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_shader_op_finish,
gsk_gpu_texture_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
#endif
gsk_gpu_shader_op_gl_command
},
"gskgputexture",
sizeof (GskGpuTextureInstance),
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_texture_info,
#endif
gsk_gpu_texture_setup_vao
};
void
gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect)
{
GskGpuTextureInstance *instance;
GskGpuTextureOp *self;
self = (GskGpuTextureOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_TEXTURE_OP_CLASS,
clip,
&instance);
gsk_gpu_rect_to_float (rect, offset, instance->rect);
gsk_gpu_rect_to_float (tex_rect, offset, instance->tex_rect);
instance->tex_id = gsk_gpu_shader_op_use_image ((GskGpuShaderOp *) self, frame, image, sampler);
}

View File

@ -0,0 +1,19 @@
#pragma once
#include "gskgpushaderopprivate.h"
#include <graphene.h>
G_BEGIN_DECLS
void gsk_gpu_texture_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuImage *image,
GskGpuSampler sampler,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const graphene_rect_t *tex_rect);
G_END_DECLS

View File

@ -4,9 +4,26 @@
#include "gdk/gdkmemoryformatprivate.h" #include "gdk/gdkmemoryformatprivate.h"
typedef struct _GskGpuBuffer GskGpuBuffer;
typedef struct _GskGpuDevice GskGpuDevice; typedef struct _GskGpuDevice GskGpuDevice;
typedef struct _GskGpuFrame GskGpuFrame; typedef struct _GskGpuFrame GskGpuFrame;
typedef struct _GskGpuImage GskGpuImage; typedef struct _GskGpuImage GskGpuImage;
typedef struct _GskGpuOp GskGpuOp; typedef struct _GskGpuOp GskGpuOp;
typedef struct _GskGpuOpClass GskGpuOpClass; typedef struct _GskGpuOpClass GskGpuOpClass;
typedef struct _GskGpuShaderOp GskGpuShaderOp;
typedef struct _GskGpuShaderOpClass GskGpuShaderOpClass;
typedef enum {
GSK_GPU_SAMPLER_DEFAULT,
GSK_GPU_SAMPLER_REPEAT,
GSK_GPU_SAMPLER_NEAREST,
/* add more */
GSK_GPU_SAMPLER_N_SAMPLERS
} GskGpuSampler;
typedef enum {
GSK_GPU_SHADER_CLIP_NONE,
GSK_GPU_SHADER_CLIP_RECT,
GSK_GPU_SHADER_CLIP_ROUNDED
} GskGpuShaderClip;

View File

@ -92,12 +92,6 @@ gsk_gpu_upload_op_gl_command (GskGpuOp *op,
} }
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
static void
gsk_gpu_upload_op_vk_reserve_descriptor_sets (GskGpuOp *op,
GskGpuFrame *frame)
{
}
static GskGpuOp * static GskGpuOp *
gsk_gpu_upload_op_vk_command_with_area (GskGpuOp *op, gsk_gpu_upload_op_vk_command_with_area (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
@ -105,19 +99,20 @@ gsk_gpu_upload_op_vk_command_with_area (GskGpuOp *op,
GskVulkanImage *image, GskVulkanImage *image,
const cairo_rectangle_int_t *area, const cairo_rectangle_int_t *area,
void (* draw_func) (GskGpuOp *, guchar *, gsize), void (* draw_func) (GskGpuOp *, guchar *, gsize),
GskVulkanBuffer **buffer) GskGpuBuffer **buffer)
{ {
gsize stride; gsize stride;
guchar *data; guchar *data;
stride = area->width * gdk_memory_format_bytes_per_pixel (gsk_gpu_image_get_format (GSK_GPU_IMAGE (image))); stride = area->width * gdk_memory_format_bytes_per_pixel (gsk_gpu_image_get_format (GSK_GPU_IMAGE (image)));
*buffer = gsk_vulkan_buffer_new_map (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)), *buffer = gsk_vulkan_buffer_new_write (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)),
area->height * stride, area->height * stride);
GSK_VULKAN_WRITE); data = gsk_gpu_buffer_map (*buffer);
data = gsk_vulkan_buffer_get_data (*buffer);
draw_func (op, data, stride); draw_func (op, data, stride);
gsk_gpu_buffer_unmap (*buffer);
vkCmdPipelineBarrier (command_buffer, vkCmdPipelineBarrier (command_buffer,
VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
@ -129,7 +124,7 @@ gsk_gpu_upload_op_vk_command_with_area (GskGpuOp *op,
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.buffer = gsk_vulkan_buffer_get_buffer (*buffer), .buffer = gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (*buffer)),
.offset = 0, .offset = 0,
.size = VK_WHOLE_SIZE, .size = VK_WHOLE_SIZE,
}, },
@ -141,7 +136,7 @@ gsk_gpu_upload_op_vk_command_with_area (GskGpuOp *op,
VK_ACCESS_TRANSFER_WRITE_BIT); VK_ACCESS_TRANSFER_WRITE_BIT);
vkCmdCopyBufferToImage (command_buffer, vkCmdCopyBufferToImage (command_buffer,
gsk_vulkan_buffer_get_buffer (*buffer), gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (*buffer)),
gsk_vulkan_image_get_vk_image (image), gsk_vulkan_image_get_vk_image (image),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, 1,
@ -178,7 +173,7 @@ gsk_gpu_upload_op_vk_command (GskGpuOp *op,
VkCommandBuffer command_buffer, VkCommandBuffer command_buffer,
GskVulkanImage *image, GskVulkanImage *image,
void (* draw_func) (GskGpuOp *, guchar *, gsize), void (* draw_func) (GskGpuOp *, guchar *, gsize),
GskVulkanBuffer **buffer) GskGpuBuffer **buffer)
{ {
gsize stride; gsize stride;
guchar *data; guchar *data;
@ -218,9 +213,7 @@ struct _GskGpuUploadCairoOp
GskRenderNode *node; GskRenderNode *node;
graphene_rect_t viewport; graphene_rect_t viewport;
#ifdef GDK_RENDERING_VULKAN GskGpuBuffer *buffer;
GskVulkanBuffer *buffer;
#endif
}; };
static void static void
@ -230,15 +223,14 @@ gsk_gpu_upload_cairo_op_finish (GskGpuOp *op)
g_object_unref (self->image); g_object_unref (self->image);
gsk_render_node_unref (self->node); gsk_render_node_unref (self->node);
#ifdef GDK_RENDERING_VULKAN g_clear_object (&self->buffer);
g_clear_pointer (&self->buffer, gsk_vulkan_buffer_free);
#endif
} }
static void static void
gsk_gpu_upload_cairo_op_print (GskGpuOp *op, gsk_gpu_upload_cairo_op_print (GskGpuOp *op,
GString *string, GskGpuFrame *frame,
guint indent) GString *string,
guint indent)
{ {
GskGpuUploadCairoOp *self = (GskGpuUploadCairoOp *) op; GskGpuUploadCairoOp *self = (GskGpuUploadCairoOp *) op;
@ -283,10 +275,11 @@ gsk_gpu_upload_cairo_op_draw (GskGpuOp *op,
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
static GskGpuOp * static GskGpuOp *
gsk_gpu_upload_cairo_op_vk_command (GskGpuOp *op, gsk_gpu_upload_cairo_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame, GskGpuFrame *frame,
VkRenderPass render_pass, VkRenderPass render_pass,
VkCommandBuffer command_buffer) VkFormat format,
VkCommandBuffer command_buffer)
{ {
GskGpuUploadCairoOp *self = (GskGpuUploadCairoOp *) op; GskGpuUploadCairoOp *self = (GskGpuUploadCairoOp *) op;
@ -317,7 +310,6 @@ static const GskGpuOpClass GSK_GPU_UPLOAD_CAIRO_OP_CLASS = {
gsk_gpu_upload_cairo_op_finish, gsk_gpu_upload_cairo_op_finish,
gsk_gpu_upload_cairo_op_print, gsk_gpu_upload_cairo_op_print,
#ifdef GDK_RENDERING_VULKAN #ifdef GDK_RENDERING_VULKAN
gsk_gpu_upload_op_vk_reserve_descriptor_sets,
gsk_gpu_upload_cairo_op_vk_command, gsk_gpu_upload_cairo_op_vk_command,
#endif #endif
gsk_gpu_upload_cairo_op_gl_command gsk_gpu_upload_cairo_op_gl_command

View File

@ -7,6 +7,8 @@
struct _GskVulkanBuffer struct _GskVulkanBuffer
{ {
GskGpuBuffer parent_instance;
GskVulkanDevice *device; GskVulkanDevice *device;
VkBuffer vk_buffer; VkBuffer vk_buffer;
@ -15,7 +17,56 @@ struct _GskVulkanBuffer
GskVulkanAllocation allocation; GskVulkanAllocation allocation;
}; };
static GskVulkanBuffer * G_DEFINE_TYPE (GskVulkanBuffer, gsk_vulkan_buffer, GSK_TYPE_GPU_BUFFER)
static void
gsk_vulkan_buffer_finalize (GObject *object)
{
GskVulkanBuffer *self = GSK_VULKAN_BUFFER (object);
vkDestroyBuffer (gsk_vulkan_device_get_vk_device (self->device),
self->vk_buffer,
NULL);
gsk_vulkan_free (self->allocator, &self->allocation);
gsk_vulkan_allocator_unref (self->allocator);
g_object_unref (self->device);
G_OBJECT_CLASS (gsk_vulkan_buffer_parent_class)->finalize (object);
}
static guchar *
gsk_vulkan_buffer_map (GskGpuBuffer *buffer)
{
GskVulkanBuffer *self = GSK_VULKAN_BUFFER (buffer);
return self->allocation.map;
}
static void
gsk_vulkan_buffer_unmap (GskGpuBuffer *buffer)
{
}
static void
gsk_vulkan_buffer_class_init (GskVulkanBufferClass *klass)
{
GskGpuBufferClass *buffer_class = GSK_GPU_BUFFER_CLASS (klass);
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
buffer_class->map = gsk_vulkan_buffer_map;
buffer_class->unmap = gsk_vulkan_buffer_unmap;
gobject_class->finalize = gsk_vulkan_buffer_finalize;
}
static void
gsk_vulkan_buffer_init (GskVulkanBuffer *self)
{
}
static GskGpuBuffer *
gsk_vulkan_buffer_new_internal (GskVulkanDevice *device, gsk_vulkan_buffer_new_internal (GskVulkanDevice *device,
gsize size, gsize size,
VkBufferUsageFlags usage) VkBufferUsageFlags usage)
@ -23,7 +74,7 @@ gsk_vulkan_buffer_new_internal (GskVulkanDevice *device,
VkMemoryRequirements requirements; VkMemoryRequirements requirements;
GskVulkanBuffer *self; GskVulkanBuffer *self;
self = g_new0 (GskVulkanBuffer, 1); self = g_object_new (GSK_TYPE_VULKAN_BUFFER, NULL);
self->device = g_object_ref (device); self->device = g_object_ref (device);
@ -51,70 +102,49 @@ gsk_vulkan_buffer_new_internal (GskVulkanDevice *device,
requirements.alignment, requirements.alignment,
&self->allocation); &self->allocation);
gsk_gpu_buffer_setup (GSK_GPU_BUFFER (self), self->allocation.size);
GSK_VK_CHECK (vkBindBufferMemory, gsk_vulkan_device_get_vk_device (device), GSK_VK_CHECK (vkBindBufferMemory, gsk_vulkan_device_get_vk_device (device),
self->vk_buffer, self->vk_buffer,
self->allocation.vk_memory, self->allocation.vk_memory,
self->allocation.offset); self->allocation.offset);
return self; return GSK_GPU_BUFFER (self);
} }
GskVulkanBuffer * GskGpuBuffer *
gsk_vulkan_buffer_new (GskVulkanDevice *device, gsk_vulkan_buffer_new_vertex (GskVulkanDevice *device,
gsize size) gsize size)
{ {
return gsk_vulkan_buffer_new_internal (device, size, return gsk_vulkan_buffer_new_internal (device, size,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
| VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
} }
GskVulkanBuffer * GskGpuBuffer *
gsk_vulkan_buffer_new_storage (GskVulkanDevice *device, gsk_vulkan_buffer_new_storage (GskVulkanDevice *device,
gsize size) gsize size)
{ {
return gsk_vulkan_buffer_new_internal (device, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT); return gsk_vulkan_buffer_new_internal (device, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
} }
GskVulkanBuffer * GskGpuBuffer *
gsk_vulkan_buffer_new_map (GskVulkanDevice *device, gsk_vulkan_buffer_new_write (GskVulkanDevice *device,
gsize size, gsize size)
GskVulkanMapMode mode)
{ {
return gsk_vulkan_buffer_new_internal (device, return gsk_vulkan_buffer_new_internal (device, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
size,
(mode & GSK_VULKAN_READ ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : 0) |
(mode & GSK_VULKAN_WRITE ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : 0));
} }
void GskGpuBuffer *
gsk_vulkan_buffer_free (GskVulkanBuffer *self) gsk_vulkan_buffer_new_read (GskVulkanDevice *device,
gsize size)
{ {
vkDestroyBuffer (gsk_vulkan_device_get_vk_device (self->device), return gsk_vulkan_buffer_new_internal (device, size, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
self->vk_buffer,
NULL);
gsk_vulkan_free (self->allocator, &self->allocation);
g_object_unref (self->device);
g_free (self);
} }
VkBuffer VkBuffer
gsk_vulkan_buffer_get_buffer (GskVulkanBuffer *self) gsk_vulkan_buffer_get_vk_buffer (GskVulkanBuffer *self)
{ {
return self->vk_buffer; return self->vk_buffer;
} }
gsize
gsk_vulkan_buffer_get_size (GskVulkanBuffer *self)
{
return self->allocation.size;
}
guchar *
gsk_vulkan_buffer_get_data (GskVulkanBuffer *self)
{
return self->allocation.map;
}

View File

@ -1,30 +1,25 @@
#pragma once #pragma once
#include "gskgpubufferprivate.h"
#include "gskvulkandeviceprivate.h" #include "gskvulkandeviceprivate.h"
G_BEGIN_DECLS G_BEGIN_DECLS
typedef struct _GskVulkanBuffer GskVulkanBuffer; #define GSK_TYPE_VULKAN_BUFFER (gsk_vulkan_buffer_get_type ())
typedef enum G_DECLARE_FINAL_TYPE (GskVulkanBuffer, gsk_vulkan_buffer, GSK, VULKAN_BUFFER, GskGpuBuffer)
{
GSK_VULKAN_READ = (1 << 0),
GSK_VULKAN_WRITE = (1 << 1),
GSK_VULKAN_READWRITE = GSK_VULKAN_READ | GSK_VULKAN_WRITE
} GskVulkanMapMode;
GskVulkanBuffer * gsk_vulkan_buffer_new (GskVulkanDevice *device, GskGpuBuffer * gsk_vulkan_buffer_new_vertex (GskVulkanDevice *device,
gsize size); gsize size);
GskVulkanBuffer * gsk_vulkan_buffer_new_storage (GskVulkanDevice *device, GskGpuBuffer * gsk_vulkan_buffer_new_storage (GskVulkanDevice *device,
gsize size);
GskGpuBuffer * gsk_vulkan_buffer_new_write (GskVulkanDevice *device,
gsize size);
GskGpuBuffer * gsk_vulkan_buffer_new_read (GskVulkanDevice *device,
gsize size); gsize size);
GskVulkanBuffer * gsk_vulkan_buffer_new_map (GskVulkanDevice *device,
gsize size,
GskVulkanMapMode mode);
void gsk_vulkan_buffer_free (GskVulkanBuffer *buffer);
VkBuffer gsk_vulkan_buffer_get_buffer (GskVulkanBuffer *self); VkBuffer gsk_vulkan_buffer_get_vk_buffer (GskVulkanBuffer *self);
gsize gsk_vulkan_buffer_get_size (GskVulkanBuffer *self);
guchar * gsk_vulkan_buffer_get_data (GskVulkanBuffer *self);
G_END_DECLS G_END_DECLS

View File

@ -2,18 +2,29 @@
#include "gskvulkandeviceprivate.h" #include "gskvulkandeviceprivate.h"
#include "gskgpuglobalsopprivate.h"
#include "gskgpushaderopprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkanimageprivate.h" #include "gskvulkanimageprivate.h"
#include "gdk/gdkdisplayprivate.h" #include "gdk/gdkdisplayprivate.h"
#include "gdk/gdkvulkancontextprivate.h" #include "gdk/gdkvulkancontextprivate.h"
#define DESCRIPTOR_POOL_MAXITEMS 50000
struct _GskVulkanDevice struct _GskVulkanDevice
{ {
GskGpuDevice parent_instance; GskGpuDevice parent_instance;
GskVulkanAllocator *allocators[VK_MAX_MEMORY_TYPES]; GskVulkanAllocator *allocators[VK_MAX_MEMORY_TYPES];
GHashTable *pipeline_cache;
GHashTable *render_pass_cache;
VkDescriptorSetLayout vk_descriptor_set_layouts[GSK_VULKAN_N_DESCRIPTOR_SETS];
VkPipelineLayout vk_pipeline_layout;
VkCommandPool vk_command_pool; VkCommandPool vk_command_pool;
VkSampler vk_samplers[GSK_GPU_SAMPLER_N_SAMPLERS];
}; };
struct _GskVulkanDeviceClass struct _GskVulkanDeviceClass
@ -23,6 +34,67 @@ struct _GskVulkanDeviceClass
G_DEFINE_TYPE (GskVulkanDevice, gsk_vulkan_device, GSK_TYPE_GPU_DEVICE) G_DEFINE_TYPE (GskVulkanDevice, gsk_vulkan_device, GSK_TYPE_GPU_DEVICE)
typedef struct _PipelineCacheKey PipelineCacheKey;
typedef struct _RenderPassCacheKey RenderPassCacheKey;
struct _PipelineCacheKey
{
const GskGpuShaderOpClass *op_class;
GskGpuShaderClip clip;
VkFormat format;
};
struct _RenderPassCacheKey
{
VkFormat format;
VkImageLayout from_layout;
VkImageLayout to_layout;
};
static guint
pipeline_cache_key_hash (gconstpointer data)
{
const PipelineCacheKey *key = data;
return GPOINTER_TO_UINT (key->op_class) ^
key->clip ^
(key->format << 2);
}
static gboolean
pipeline_cache_key_equal (gconstpointer a,
gconstpointer b)
{
const PipelineCacheKey *keya = a;
const PipelineCacheKey *keyb = b;
return keya->op_class == keyb->op_class &&
keya->clip == keyb->clip &&
keya->format == keyb->format;
}
static guint
render_pass_cache_key_hash (gconstpointer data)
{
const RenderPassCacheKey *key = data;
return (key->from_layout << 20) ^
(key->to_layout << 16) ^
(key->format);
}
static gboolean
render_pass_cache_key_equal (gconstpointer a,
gconstpointer b)
{
const RenderPassCacheKey *keya = a;
const RenderPassCacheKey *keyb = b;
return keya->from_layout == keyb->from_layout &&
keya->to_layout == keyb->to_layout &&
keya->format == keyb->format;
}
static GskGpuImage * static GskGpuImage *
gsk_vulkan_device_create_offscreen_image (GskGpuDevice *device, gsk_vulkan_device_create_offscreen_image (GskGpuDevice *device,
GdkMemoryDepth depth, GdkMemoryDepth depth,
@ -57,16 +129,49 @@ gsk_vulkan_device_finalize (GObject *object)
GskVulkanDevice *self = GSK_VULKAN_DEVICE (object); GskVulkanDevice *self = GSK_VULKAN_DEVICE (object);
GskGpuDevice *device = GSK_GPU_DEVICE (self); GskGpuDevice *device = GSK_GPU_DEVICE (self);
GdkDisplay *display; GdkDisplay *display;
GHashTableIter iter;
gpointer key, value;
gsize i; gsize i;
g_object_steal_data (G_OBJECT (gsk_gpu_device_get_display (device)), "-gsk-vulkan-device"); g_object_steal_data (G_OBJECT (gsk_gpu_device_get_display (device)), "-gsk-vulkan-device");
display = gsk_gpu_device_get_display (device); display = gsk_gpu_device_get_display (device);
g_hash_table_iter_init (&iter, self->pipeline_cache);
while (g_hash_table_iter_next (&iter, &key, &value))
{
g_free (key);
vkDestroyPipeline (display->vk_device, value, NULL);
}
g_hash_table_unref (self->pipeline_cache);
g_hash_table_iter_init (&iter, self->render_pass_cache);
while (g_hash_table_iter_next (&iter, &key, &value))
{
g_free (key);
vkDestroyRenderPass (display->vk_device, value, NULL);
}
g_hash_table_unref (self->render_pass_cache);
for (i = 0; i < G_N_ELEMENTS (self->vk_samplers); i++)
{
vkDestroySampler (display->vk_device,
self->vk_samplers[i],
NULL);
}
vkDestroyPipelineLayout (display->vk_device,
self->vk_pipeline_layout,
NULL);
vkDestroyCommandPool (display->vk_device, vkDestroyCommandPool (display->vk_device,
self->vk_command_pool, self->vk_command_pool,
NULL); NULL);
for (i = 0; i < GSK_VULKAN_N_DESCRIPTOR_SETS; i++)
vkDestroyDescriptorSetLayout (display->vk_device,
self->vk_descriptor_set_layouts[i],
NULL);
for (i = 0; i < VK_MAX_MEMORY_TYPES; i++) for (i = 0; i < VK_MAX_MEMORY_TYPES; i++)
g_clear_pointer (&self->allocators[i], gsk_vulkan_allocator_unref); g_clear_pointer (&self->allocators[i], gsk_vulkan_allocator_unref);
@ -90,6 +195,8 @@ gsk_vulkan_device_class_init (GskVulkanDeviceClass *klass)
static void static void
gsk_vulkan_device_init (GskVulkanDevice *self) gsk_vulkan_device_init (GskVulkanDevice *self)
{ {
self->pipeline_cache = g_hash_table_new (pipeline_cache_key_hash, pipeline_cache_key_equal);
self->render_pass_cache = g_hash_table_new (render_pass_cache_key_hash, render_pass_cache_key_equal);
} }
static void static void
@ -99,6 +206,75 @@ gsk_vulkan_device_setup (GskVulkanDevice *self)
display = gsk_gpu_device_get_display (GSK_GPU_DEVICE (self)); display = gsk_gpu_device_get_display (GSK_GPU_DEVICE (self));
GSK_VK_CHECK (vkCreateDescriptorSetLayout, display->vk_device,
&(VkDescriptorSetLayoutCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 1,
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
.pBindings = (VkDescriptorSetLayoutBinding[1]) {
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = DESCRIPTOR_POOL_MAXITEMS,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT
}
},
.pNext = &(VkDescriptorSetLayoutBindingFlagsCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.bindingCount = 1,
.pBindingFlags = (VkDescriptorBindingFlags[1]) {
VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT
| VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT
| VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT,
},
}
},
NULL,
&self->vk_descriptor_set_layouts[GSK_VULKAN_IMAGE_SET_LAYOUT]);
GSK_VK_CHECK (vkCreateDescriptorSetLayout, display->vk_device,
&(VkDescriptorSetLayoutCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 1,
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
.pBindings = (VkDescriptorSetLayoutBinding[1]) {
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = DESCRIPTOR_POOL_MAXITEMS,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT
},
},
.pNext = &(VkDescriptorSetLayoutBindingFlagsCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.bindingCount = 1,
.pBindingFlags = (VkDescriptorBindingFlags[1]) {
VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT
| VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT
| VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT,
},
}
},
NULL,
&self->vk_descriptor_set_layouts[GSK_VULKAN_BUFFER_SET_LAYOUT]);
GSK_VK_CHECK (vkCreatePipelineLayout, display->vk_device,
&(VkPipelineLayoutCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = G_N_ELEMENTS (self->vk_descriptor_set_layouts),
.pSetLayouts = self->vk_descriptor_set_layouts,
.pushConstantRangeCount = 1,
.pPushConstantRanges = (VkPushConstantRange[1]) {
{
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
.offset = 0,
.size = sizeof (GskGpuGlobalsInstance)
}
}
},
NULL,
&self->vk_pipeline_layout);
GSK_VK_CHECK (vkCreateCommandPool, display->vk_device, GSK_VK_CHECK (vkCreateCommandPool, display->vk_device,
&(const VkCommandPoolCreateInfo) { &(const VkCommandPoolCreateInfo) {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
@ -107,6 +283,51 @@ gsk_vulkan_device_setup (GskVulkanDevice *self)
}, },
NULL, NULL,
&self->vk_command_pool); &self->vk_command_pool);
GSK_VK_CHECK (vkCreateSampler, display->vk_device,
&(VkSamplerCreateInfo) {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
.unnormalizedCoordinates = VK_FALSE,
.maxAnisotropy = 1.0,
},
NULL,
&self->vk_samplers[GSK_GPU_SAMPLER_DEFAULT]);
GSK_VK_CHECK (vkCreateSampler, display->vk_device,
&(VkSamplerCreateInfo) {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
.unnormalizedCoordinates = VK_FALSE,
.maxAnisotropy = 1.0,
},
NULL,
&self->vk_samplers[GSK_GPU_SAMPLER_REPEAT]);
GSK_VK_CHECK (vkCreateSampler, display->vk_device,
&(VkSamplerCreateInfo) {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.magFilter = VK_FILTER_NEAREST,
.minFilter = VK_FILTER_NEAREST,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
.unnormalizedCoordinates = VK_FALSE,
.maxAnisotropy = 1.0,
},
NULL,
&self->vk_samplers[GSK_GPU_SAMPLER_NEAREST]);
} }
GskGpuDevice * GskGpuDevice *
@ -132,6 +353,12 @@ gsk_vulkan_device_get_for_display (GdkDisplay *display,
return GSK_GPU_DEVICE (self); return GSK_GPU_DEVICE (self);
} }
gsize
gsk_vulkan_device_get_max_descriptors (GskVulkanDevice *self)
{
return DESCRIPTOR_POOL_MAXITEMS;
}
VkDevice VkDevice
gsk_vulkan_device_get_vk_device (GskVulkanDevice *self) gsk_vulkan_device_get_vk_device (GskVulkanDevice *self)
{ {
@ -150,12 +377,256 @@ gsk_vulkan_device_get_vk_queue (GskVulkanDevice *self)
return gsk_gpu_device_get_display (GSK_GPU_DEVICE (self))->vk_queue; return gsk_gpu_device_get_display (GSK_GPU_DEVICE (self))->vk_queue;
} }
VkDescriptorSetLayout
gsk_vulkan_device_get_vk_image_set_layout (GskVulkanDevice *self)
{
return self->vk_descriptor_set_layouts[GSK_VULKAN_IMAGE_SET_LAYOUT];
}
VkDescriptorSetLayout
gsk_vulkan_device_get_vk_buffer_set_layout (GskVulkanDevice *self)
{
return self->vk_descriptor_set_layouts[GSK_VULKAN_BUFFER_SET_LAYOUT];
}
VkPipelineLayout
gsk_vulkan_device_get_vk_pipeline_layout (GskVulkanDevice *self)
{
return self->vk_pipeline_layout;
}
VkCommandPool VkCommandPool
gsk_vulkan_device_get_vk_command_pool (GskVulkanDevice *self) gsk_vulkan_device_get_vk_command_pool (GskVulkanDevice *self)
{ {
return self->vk_command_pool; return self->vk_command_pool;
} }
VkSampler
gsk_vulkan_device_get_vk_sampler (GskVulkanDevice *self,
GskGpuSampler sampler)
{
return self->vk_samplers[sampler];
}
VkRenderPass
gsk_vulkan_device_get_vk_render_pass (GskVulkanDevice *self,
VkFormat format,
VkImageLayout from_layout,
VkImageLayout to_layout)
{
RenderPassCacheKey cache_key;
VkRenderPass render_pass;
GdkDisplay *display;
cache_key = (RenderPassCacheKey) {
.format = format,
.from_layout = from_layout,
.to_layout = to_layout,
};
render_pass = g_hash_table_lookup (self->render_pass_cache, &cache_key);
if (render_pass)
return render_pass;
display = gsk_gpu_device_get_display (GSK_GPU_DEVICE (self));
GSK_VK_CHECK (vkCreateRenderPass, display->vk_device,
&(VkRenderPassCreateInfo) {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.attachmentCount = 1,
.pAttachments = (VkAttachmentDescription[]) {
{
.format = format,
.samples = VK_SAMPLE_COUNT_1_BIT,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.initialLayout = from_layout,
.finalLayout = to_layout
}
},
.subpassCount = 1,
.pSubpasses = (VkSubpassDescription []) {
{
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.inputAttachmentCount = 0,
.colorAttachmentCount = 1,
.pColorAttachments = (VkAttachmentReference []) {
{
.attachment = 0,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
}
},
.pResolveAttachments = (VkAttachmentReference []) {
{
.attachment = VK_ATTACHMENT_UNUSED,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
}
},
.pDepthStencilAttachment = NULL,
}
},
.dependencyCount = 0,
},
NULL,
&render_pass);
g_hash_table_insert (self->render_pass_cache, g_memdup (&cache_key, sizeof (RenderPassCacheKey)), render_pass);
return render_pass;
}
typedef struct _GskVulkanShaderSpecialization GskVulkanShaderSpecialization;
struct _GskVulkanShaderSpecialization
{
guint32 clip;
};
VkPipeline
gsk_vulkan_device_get_vk_pipeline (GskVulkanDevice *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
VkFormat format,
VkRenderPass render_pass)
{
PipelineCacheKey cache_key;
VkPipeline pipeline;
GdkDisplay *display;
char *vertex_shader_name, *fragment_shader_name;
cache_key = (PipelineCacheKey) {
.op_class = op_class,
.clip = clip,
.format = format,
};
pipeline = g_hash_table_lookup (self->pipeline_cache, &cache_key);
if (pipeline)
return pipeline;
display = gsk_gpu_device_get_display (GSK_GPU_DEVICE (self));
vertex_shader_name = g_strconcat ("/org/gtk/libgsk/shaders/vulkan/", op_class->shader_name, ".vert.spv", NULL);
fragment_shader_name = g_strconcat ("/org/gtk/libgsk/shaders/vulkan/", op_class->shader_name, ".frag.spv", NULL);
GSK_VK_CHECK (vkCreateGraphicsPipelines, display->vk_device,
display->vk_pipeline_cache,
1,
&(VkGraphicsPipelineCreateInfo) {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.stageCount = 2,
.pStages = (VkPipelineShaderStageCreateInfo[2]) {
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = gdk_display_get_vk_shader_module (display, vertex_shader_name),
.pName = "main",
.pSpecializationInfo = &(VkSpecializationInfo) {
.mapEntryCount = 1,
.pMapEntries = (VkSpecializationMapEntry[1]) {
{
.constantID = 0,
.offset = G_STRUCT_OFFSET (GskVulkanShaderSpecialization, clip),
.size = sizeof (guint32),
},
},
.dataSize = sizeof (GskVulkanShaderSpecialization),
.pData = &(GskVulkanShaderSpecialization) {
.clip = clip,
},
},
},
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = gdk_display_get_vk_shader_module (display, fragment_shader_name),
.pName = "main",
.pSpecializationInfo = &(VkSpecializationInfo) {
.mapEntryCount = 1,
.pMapEntries = (VkSpecializationMapEntry[1]) {
{
.constantID = 0,
.offset = G_STRUCT_OFFSET (GskVulkanShaderSpecialization, clip),
.size = sizeof (guint32),
}
},
.dataSize = sizeof (GskVulkanShaderSpecialization),
.pData = &(GskVulkanShaderSpecialization) {
.clip = clip,
},
},
},
},
.pVertexInputState = op_class->vertex_input_state,
.pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
.primitiveRestartEnable = VK_FALSE,
},
.pTessellationState = NULL,
.pViewportState = &(VkPipelineViewportStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.viewportCount = 1,
.scissorCount = 1
},
.pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.depthClampEnable = VK_FALSE,
.rasterizerDiscardEnable = VK_FALSE,
.polygonMode = VK_POLYGON_MODE_FILL,
.cullMode = VK_CULL_MODE_NONE,
.frontFace = VK_FRONT_FACE_CLOCKWISE,
.lineWidth = 1.0f,
},
.pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.rasterizationSamples = 1,
},
.pDepthStencilState = &(VkPipelineDepthStencilStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO
},
.pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.attachmentCount = 1,
.pAttachments = (VkPipelineColorBlendAttachmentState []) {
{
.blendEnable = VK_TRUE,
.colorBlendOp = VK_BLEND_OP_ADD,
.srcColorBlendFactor = VK_BLEND_FACTOR_ONE,
.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.alphaBlendOp = VK_BLEND_OP_ADD,
.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.colorWriteMask = VK_COLOR_COMPONENT_A_BIT
| VK_COLOR_COMPONENT_R_BIT
| VK_COLOR_COMPONENT_G_BIT
| VK_COLOR_COMPONENT_B_BIT
},
}
},
.pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = 2,
.pDynamicStates = (VkDynamicState[2]) {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR
},
},
.layout = self->vk_pipeline_layout,
.renderPass = render_pass,
.subpass = 0,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = -1,
},
NULL,
&pipeline);
g_free (fragment_shader_name);
g_free (vertex_shader_name);
g_hash_table_insert (self->pipeline_cache, g_memdup (&cache_key, sizeof (PipelineCacheKey)), pipeline);
//gdk_vulkan_context_pipeline_cache_updated (self->vulkan);
return pipeline;
}
static GskVulkanAllocator * static GskVulkanAllocator *
gsk_vulkan_device_get_allocator (GskVulkanDevice *self, gsk_vulkan_device_get_allocator (GskVulkanDevice *self,
gsize index, gsize index,

View File

@ -3,12 +3,21 @@
#include "gskgpudeviceprivate.h" #include "gskgpudeviceprivate.h"
#include "gskdebugprivate.h" #include "gskdebugprivate.h"
#include "gskgpuclipprivate.h"
#include "gskvulkanmemoryprivate.h" #include "gskvulkanmemoryprivate.h"
#include <gdk/gdkvulkancontext.h> #include <gdk/gdkvulkancontext.h>
G_BEGIN_DECLS G_BEGIN_DECLS
/* also used by gskvulkanframe.c */
enum {
GSK_VULKAN_IMAGE_SET_LAYOUT,
GSK_VULKAN_BUFFER_SET_LAYOUT,
GSK_VULKAN_N_DESCRIPTOR_SETS
};
#define GSK_TYPE_VULKAN_DEVICE (gsk_vulkan_device_get_type ()) #define GSK_TYPE_VULKAN_DEVICE (gsk_vulkan_device_get_type ())
G_DECLARE_FINAL_TYPE(GskVulkanDevice, gsk_vulkan_device, GSK, VULKAN_DEVICE, GskGpuDevice) G_DECLARE_FINAL_TYPE(GskVulkanDevice, gsk_vulkan_device, GSK, VULKAN_DEVICE, GskGpuDevice)
@ -16,10 +25,27 @@ G_DECLARE_FINAL_TYPE(GskVulkanDevice, gsk_vulkan_device, GSK, VULKAN_DEVICE, Gsk
GskGpuDevice * gsk_vulkan_device_get_for_display (GdkDisplay *display, GskGpuDevice * gsk_vulkan_device_get_for_display (GdkDisplay *display,
GError **error); GError **error);
VkDevice gsk_vulkan_device_get_vk_device (GskVulkanDevice *self); gsize gsk_vulkan_device_get_max_descriptors (GskVulkanDevice *self) G_GNUC_PURE;
VkPhysicalDevice gsk_vulkan_device_get_vk_physical_device (GskVulkanDevice *self);
VkQueue gsk_vulkan_device_get_vk_queue (GskVulkanDevice *self); VkDevice gsk_vulkan_device_get_vk_device (GskVulkanDevice *self) G_GNUC_PURE;
VkCommandPool gsk_vulkan_device_get_vk_command_pool (GskVulkanDevice *self); VkPhysicalDevice gsk_vulkan_device_get_vk_physical_device (GskVulkanDevice *self) G_GNUC_PURE;
VkQueue gsk_vulkan_device_get_vk_queue (GskVulkanDevice *self) G_GNUC_PURE;
VkDescriptorSetLayout gsk_vulkan_device_get_vk_image_set_layout (GskVulkanDevice *self) G_GNUC_PURE;
VkDescriptorSetLayout gsk_vulkan_device_get_vk_buffer_set_layout (GskVulkanDevice *self) G_GNUC_PURE;
VkPipelineLayout gsk_vulkan_device_get_vk_pipeline_layout (GskVulkanDevice *self) G_GNUC_PURE;
VkCommandPool gsk_vulkan_device_get_vk_command_pool (GskVulkanDevice *self) G_GNUC_PURE;
VkSampler gsk_vulkan_device_get_vk_sampler (GskVulkanDevice *self,
GskGpuSampler sampler) G_GNUC_PURE;
VkRenderPass gsk_vulkan_device_get_vk_render_pass (GskVulkanDevice *self,
VkFormat format,
VkImageLayout from_layout,
VkImageLayout to_layout);
VkPipeline gsk_vulkan_device_get_vk_pipeline (GskVulkanDevice *self,
const GskGpuShaderOpClass *op_class,
GskGpuShaderClip clip,
VkFormat format,
VkRenderPass render_pass);
GskVulkanAllocator * gsk_vulkan_device_find_allocator (GskVulkanDevice *self, GskVulkanAllocator * gsk_vulkan_device_find_allocator (GskVulkanDevice *self,
uint32_t allowed_types, uint32_t allowed_types,

View File

@ -3,16 +3,38 @@
#include "gskvulkanframeprivate.h" #include "gskvulkanframeprivate.h"
#include "gskgpuopprivate.h" #include "gskgpuopprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkandeviceprivate.h" #include "gskvulkandeviceprivate.h"
#include "gskvulkanimageprivate.h"
#include "gdk/gdkdisplayprivate.h" #include "gdk/gdkdisplayprivate.h"
#define GDK_ARRAY_NAME gsk_descriptor_image_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorImageInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorImageInfo
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 128
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define GDK_ARRAY_NAME gsk_descriptor_buffer_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorBufferInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorBufferInfo
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 32
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
struct _GskVulkanFrame struct _GskVulkanFrame
{ {
GskGpuFrame parent_instance; GskGpuFrame parent_instance;
GskDescriptorImageInfos descriptor_images;
GskDescriptorBufferInfos descriptor_buffers;
VkFence vk_fence; VkFence vk_fence;
VkCommandBuffer vk_command_buffer; VkCommandBuffer vk_command_buffer;
VkDescriptorPool vk_descriptor_pool;
}; };
struct _GskVulkanFrameClass struct _GskVulkanFrameClass
@ -61,35 +83,168 @@ gsk_vulkan_frame_setup (GskGpuFrame *frame)
}, },
NULL, NULL,
&self->vk_fence); &self->vk_fence);
GSK_VK_CHECK (vkCreateDescriptorPool, vk_device,
&(VkDescriptorPoolCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
.maxSets = GSK_VULKAN_N_DESCRIPTOR_SETS,
.poolSizeCount = GSK_VULKAN_N_DESCRIPTOR_SETS,
.pPoolSizes = (VkDescriptorPoolSize[GSK_VULKAN_N_DESCRIPTOR_SETS]) {
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = gsk_vulkan_device_get_max_descriptors (device),
},
{
.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = gsk_vulkan_device_get_max_descriptors (device),
}
}
},
NULL,
&self->vk_descriptor_pool);
} }
static void static void
gsk_vulkan_frame_cleanup (GskGpuFrame *frame) gsk_vulkan_frame_cleanup (GskGpuFrame *frame)
{ {
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
VkDevice device; VkDevice vk_device;
device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame))); vk_device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)));
GSK_VK_CHECK (vkWaitForFences, device, GSK_VK_CHECK (vkWaitForFences, vk_device,
1, 1,
&self->vk_fence, &self->vk_fence,
VK_TRUE, VK_TRUE,
INT64_MAX); INT64_MAX);
GSK_VK_CHECK (vkResetFences, device, GSK_VK_CHECK (vkResetFences, vk_device,
1, 1,
&self->vk_fence); &self->vk_fence);
GSK_VK_CHECK (vkResetCommandBuffer, self->vk_command_buffer, GSK_VK_CHECK (vkResetCommandBuffer, self->vk_command_buffer,
0); 0);
GSK_VK_CHECK (vkResetDescriptorPool, vk_device,
self->vk_descriptor_pool,
0);
gsk_descriptor_image_infos_set_size (&self->descriptor_images, 0);
gsk_descriptor_buffer_infos_set_size (&self->descriptor_buffers, 0);
GSK_GPU_FRAME_CLASS (gsk_vulkan_frame_parent_class)->cleanup (frame); GSK_GPU_FRAME_CLASS (gsk_vulkan_frame_parent_class)->cleanup (frame);
} }
static guint32
gsk_vulkan_frame_get_image_descriptor (GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
{
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
GskVulkanDevice *device;
guint32 result;
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame));
result = gsk_descriptor_image_infos_get_size (&self->descriptor_images);
g_assert (result < gsk_vulkan_device_get_max_descriptors (device));
gsk_descriptor_image_infos_append (&self->descriptor_images,
&(VkDescriptorImageInfo) {
.sampler = gsk_vulkan_device_get_vk_sampler (device, sampler),
.imageView = gsk_vulkan_image_get_vk_image_view (GSK_VULKAN_IMAGE (image)),
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
});
return result;
}
static void static void
gsk_vulkan_frame_submit (GskGpuFrame *frame, gsk_vulkan_frame_prepare_descriptor_sets (GskVulkanFrame *self)
GskGpuOp *op) {
GskVulkanDevice *device;
VkDevice vk_device;
VkWriteDescriptorSet write_descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS];
gsize n_descriptor_sets;
VkDescriptorSet descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS];
device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self)));
vk_device = gsk_vulkan_device_get_vk_device (device);
GSK_VK_CHECK (vkAllocateDescriptorSets, vk_device,
&(VkDescriptorSetAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.descriptorPool = self->vk_descriptor_pool,
.descriptorSetCount = GSK_VULKAN_N_DESCRIPTOR_SETS,
.pSetLayouts = (VkDescriptorSetLayout[GSK_VULKAN_N_DESCRIPTOR_SETS]) {
gsk_vulkan_device_get_vk_image_set_layout (device),
gsk_vulkan_device_get_vk_buffer_set_layout (device),
},
.pNext = &(VkDescriptorSetVariableDescriptorCountAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO,
.descriptorSetCount = GSK_VULKAN_N_DESCRIPTOR_SETS,
.pDescriptorCounts = (uint32_t[GSK_VULKAN_N_DESCRIPTOR_SETS]) {
MAX (1, gsk_descriptor_image_infos_get_size (&self->descriptor_images)),
MAX (1, gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers))
}
}
},
descriptor_sets);
n_descriptor_sets = 0;
if (gsk_descriptor_image_infos_get_size (&self->descriptor_images) > 0)
{
write_descriptor_sets[n_descriptor_sets++] = (VkWriteDescriptorSet) {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_sets[GSK_VULKAN_IMAGE_SET_LAYOUT],
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = gsk_descriptor_image_infos_get_size (&self->descriptor_images),
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = gsk_descriptor_image_infos_get_data (&self->descriptor_images)
};
}
if (gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers) > 0)
{
write_descriptor_sets[n_descriptor_sets++] = (VkWriteDescriptorSet) {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_sets[GSK_VULKAN_BUFFER_SET_LAYOUT],
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers),
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.pBufferInfo = gsk_descriptor_buffer_infos_get_data (&self->descriptor_buffers)
};
}
vkUpdateDescriptorSets (vk_device,
n_descriptor_sets,
write_descriptor_sets,
0, NULL);
vkCmdBindDescriptorSets (self->vk_command_buffer,
VK_PIPELINE_BIND_POINT_GRAPHICS,
gsk_vulkan_device_get_vk_pipeline_layout (device),
0,
GSK_VULKAN_N_DESCRIPTOR_SETS,
descriptor_sets,
0,
NULL);
}
static GskGpuBuffer *
gsk_vulkan_frame_create_vertex_buffer (GskGpuFrame *frame,
gsize size)
{
return gsk_vulkan_buffer_new_vertex (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)), size);
}
static void
gsk_vulkan_frame_submit (GskGpuFrame *frame,
GskGpuBuffer *vertex_buffer,
GskGpuOp *op)
{ {
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
@ -99,9 +254,20 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
}); });
gsk_vulkan_frame_prepare_descriptor_sets (self);
if (vertex_buffer)
vkCmdBindVertexBuffers (self->vk_command_buffer,
0,
1,
(VkBuffer[1]) {
gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (vertex_buffer))
},
(VkDeviceSize[1]) { 0 });
while (op) while (op)
{ {
op = gsk_gpu_op_vk_command (op, frame, VK_NULL_HANDLE, self->vk_command_buffer); op = gsk_gpu_op_vk_command (op, frame, VK_NULL_HANDLE, VK_FORMAT_UNDEFINED, self->vk_command_buffer);
} }
GSK_VK_CHECK (vkEndCommandBuffer, self->vk_command_buffer); GSK_VK_CHECK (vkEndCommandBuffer, self->vk_command_buffer);
@ -128,6 +294,12 @@ gsk_vulkan_frame_finalize (GObject *object)
vk_device = gsk_vulkan_device_get_vk_device (device); vk_device = gsk_vulkan_device_get_vk_device (device);
vk_command_pool = gsk_vulkan_device_get_vk_command_pool (device); vk_command_pool = gsk_vulkan_device_get_vk_command_pool (device);
vkDestroyDescriptorPool (vk_device,
self->vk_descriptor_pool,
NULL);
gsk_descriptor_image_infos_clear (&self->descriptor_images);
gsk_descriptor_buffer_infos_clear (&self->descriptor_buffers);
vkFreeCommandBuffers (vk_device, vkFreeCommandBuffers (vk_device,
vk_command_pool, vk_command_pool,
1, &self->vk_command_buffer); 1, &self->vk_command_buffer);
@ -147,6 +319,8 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass)
gpu_frame_class->is_busy = gsk_vulkan_frame_is_busy; gpu_frame_class->is_busy = gsk_vulkan_frame_is_busy;
gpu_frame_class->setup = gsk_vulkan_frame_setup; gpu_frame_class->setup = gsk_vulkan_frame_setup;
gpu_frame_class->cleanup = gsk_vulkan_frame_cleanup; gpu_frame_class->cleanup = gsk_vulkan_frame_cleanup;
gpu_frame_class->get_image_descriptor = gsk_vulkan_frame_get_image_descriptor;
gpu_frame_class->create_vertex_buffer = gsk_vulkan_frame_create_vertex_buffer;
gpu_frame_class->submit = gsk_vulkan_frame_submit; gpu_frame_class->submit = gsk_vulkan_frame_submit;
object_class->finalize = gsk_vulkan_frame_finalize; object_class->finalize = gsk_vulkan_frame_finalize;
@ -155,6 +329,8 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass)
static void static void
gsk_vulkan_frame_init (GskVulkanFrame *self) gsk_vulkan_frame_init (GskVulkanFrame *self)
{ {
gsk_descriptor_image_infos_init (&self->descriptor_images);
gsk_descriptor_buffer_infos_init (&self->descriptor_buffers);
} }
VkFence VkFence

View File

@ -805,6 +805,7 @@ gsk_vulkan_image_finalize (GObject *object)
{ {
vkDestroyImage (device, self->vk_image, NULL); vkDestroyImage (device, self->vk_image, NULL);
gsk_vulkan_free (self->allocator, &self->allocation); gsk_vulkan_free (self->allocator, &self->allocation);
gsk_vulkan_allocator_unref (self->allocator);
} }
gdk_display_unref_vulkan (self->display); gdk_display_unref_vulkan (self->display);
@ -825,8 +826,8 @@ gsk_vulkan_image_init (GskVulkanImage *self)
} }
VkFramebuffer VkFramebuffer
gsk_vulkan_image_get_framebuffer (GskVulkanImage *self, gsk_vulkan_image_get_vk_framebuffer (GskVulkanImage *self,
VkRenderPass render_pass) VkRenderPass render_pass)
{ {
if (self->vk_framebuffer) if (self->vk_framebuffer)
return self->vk_framebuffer; return self->vk_framebuffer;
@ -862,7 +863,7 @@ gsk_vulkan_image_get_vk_image (GskVulkanImage *self)
} }
VkImageView VkImageView
gsk_vulkan_image_get_image_view (GskVulkanImage *self) gsk_vulkan_image_get_vk_image_view (GskVulkanImage *self)
{ {
return self->vk_image_view; return self->vk_image_view;
} }

View File

@ -55,9 +55,9 @@ void gsk_vulkan_image_transition (GskVulk
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT) VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT)
VkImage gsk_vulkan_image_get_vk_image (GskVulkanImage *self); VkImage gsk_vulkan_image_get_vk_image (GskVulkanImage *self);
VkImageView gsk_vulkan_image_get_image_view (GskVulkanImage *self); VkImageView gsk_vulkan_image_get_vk_image_view (GskVulkanImage *self);
VkFormat gsk_vulkan_image_get_vk_format (GskVulkanImage *self); VkFormat gsk_vulkan_image_get_vk_format (GskVulkanImage *self);
VkFramebuffer gsk_vulkan_image_get_framebuffer (GskVulkanImage *self, VkFramebuffer gsk_vulkan_image_get_vk_framebuffer (GskVulkanImage *self,
VkRenderPass pass); VkRenderPass pass);
G_END_DECLS G_END_DECLS

View File

@ -0,0 +1,49 @@
precision highp float;
#if defined(GSK_GLES) && __VERSION__ < 310
layout(std140)
#else
layout(std140, binding = 0)
#endif
uniform PushConstants
{
mat4 mvp;
vec4 clip_bounds;
vec4 clip_widths;
vec4 clip_heights;
vec2 scale;
} push;
#define GSK_VERTEX_INDEX gl_VertexID
#ifdef GSK_VERTEX_SHADER
#define IN(_loc) in
#define PASS(_loc) out
#define PASS_FLAT(_loc) flat out
#endif
#ifdef GSK_FRAGMENT_SHADER
#define PASS(_loc) in
#define PASS_FLAT(_loc) flat in
uniform sampler2D textures[16];
#define gsk_get_texture(id) textures[id]
#ifdef GSK_GLES
void
gsk_set_output_color (vec4 color)
{
gl_FragColor = color;
}
#else
layout(location = 0) out vec4 out_color;
void
gsk_set_output_color (vec4 color)
{
out_color = color;
}
#endif
#endif

View File

@ -0,0 +1,47 @@
#extension GL_EXT_nonuniform_qualifier : enable
#include "enums.glsl"
layout(push_constant) uniform PushConstants {
mat4 mvp;
vec4 clip_bounds;
vec4 clip_widths;
vec4 clip_heights;
vec2 scale;
} push;
layout(constant_id=0) const uint GSK_SHADER_CLIP = GSK_GPU_SHADER_CLIP_NONE;
#define GSK_VERTEX_INDEX gl_VertexIndex
#ifdef GSK_VERTEX_SHADER
#define IN(_loc) layout(location = _loc) in
#define PASS(_loc) layout(location = _loc) out
#define PASS_FLAT(_loc) layout(location = _loc) flat out
#endif
#ifdef GSK_FRAGMENT_SHADER
#define PASS(_loc) layout(location = _loc) in
#define PASS_FLAT(_loc) layout(location = _loc) flat in
layout(set = 0, binding = 0) uniform sampler2D textures[50000];
layout(set = 1, binding = 0) readonly buffer FloatBuffers {
float floats[];
} buffers[50000];
layout(location = 0) out vec4 out_color;
#define gsk_get_texture(id) textures[nonuniformEXT (id)]
#if 0
#define get_buffer(id) buffers[nonuniformEXT (id)]
#define get_float(id) get_buffer(0).floats[nonuniformEXT (id)]
#endif
void
gsk_set_output_color (vec4 color)
{
out_color = color;
}
#endif

139
gsk/gpu/shaders/common.glsl Normal file
View File

@ -0,0 +1,139 @@
#ifndef _COMMON_
#define _COMMON_
void main_clip_none (void);
void main_clip_rect (void);
void main_clip_rounded (void);
#include "enums.glsl"
#ifdef VULKAN
#include "common-vulkan.glsl"
#else
#include "common-gl.glsl"
#endif
#include "rect.glsl"
#include "roundedrect.glsl"
Rect
rect_clip (Rect r)
{
if (GSK_SHADER_CLIP == GSK_GPU_SHADER_CLIP_NONE)
return r;
else
return rect_intersect (r, rect_from_gsk (push.clip_bounds));
}
#ifdef GSK_VERTEX_SHADER
const vec2 offsets[6] = vec2[6](vec2(0.0, 0.0),
vec2(1.0, 0.0),
vec2(0.0, 1.0),
vec2(0.0, 1.0),
vec2(1.0, 0.0),
vec2(1.0, 1.0));
void
gsk_set_position (vec2 pos)
{
gl_Position = push.mvp * vec4 (pos, 0.0, 1.0);
}
vec2
rect_get_position (Rect rect)
{
Rect r = rect_round_larger (rect_clip (rect));
vec2 pos = mix (r.bounds.xy, r.bounds.zw, offsets[GSK_VERTEX_INDEX]);
return pos;
}
vec2
scale_tex_coord (vec2 in_pos,
Rect in_rect,
vec4 tex_rect)
{
return tex_rect.xy + (in_pos - in_rect.bounds.xy) / rect_size (in_rect) * tex_rect.zw;
}
void run (out vec2 pos);
void
main (void)
{
vec2 pos;
run (pos);
gsk_set_position (pos);
}
#endif /* GSK_VERTEX_SHADER */
#ifdef GSK_FRAGMENT_SHADER
void run (out vec4 color,
out vec2 pos);
void
main_clip_none (void)
{
vec4 color;
vec2 pos;
run (color, pos);
gsk_set_output_color (color);
}
void
main_clip_rect (void)
{
vec4 color;
vec2 pos;
run (color, pos);
Rect clip = rect_from_gsk (push.clip_bounds);
float coverage = rect_coverage (clip, pos);
color *= coverage;
gsk_set_output_color (color);
}
void
main_clip_rounded (void)
{
vec4 color;
vec2 pos;
run (color, pos);
RoundedRect clip = RoundedRect(vec4(push.clip_bounds.xy, push.clip_bounds.xy + push.clip_bounds.zw), push.clip_widths, push.clip_heights);
clip = rounded_rect_scale (clip, push.scale);
float coverage = rounded_rect_coverage (clip, pos);
color *= coverage;
gsk_set_output_color (color);
}
void
main (void)
{
if (GSK_SHADER_CLIP == GSK_GPU_SHADER_CLIP_NONE)
main_clip_none ();
else if (GSK_SHADER_CLIP == GSK_GPU_SHADER_CLIP_RECT)
main_clip_rect ();
else if (GSK_SHADER_CLIP == GSK_GPU_SHADER_CLIP_ROUNDED)
main_clip_rounded ();
}
#endif /* GSK_FRAGMENT_SHADER */
#endif

View File

@ -0,0 +1,38 @@
#ifndef _ELLIPSE_
#define _ELLIPSE_
struct Ellipse
{
vec2 center;
vec2 radius;
};
float
ellipse_distance (Ellipse r, vec2 p)
{
vec2 e = r.radius;
p = p - r.center;
if (e.x == e.y)
return length (p) - e.x;
/* from https://www.shadertoy.com/view/tt3yz7 */
vec2 pAbs = abs(p);
vec2 ei = 1.0 / e;
vec2 e2 = e*e;
vec2 ve = ei * vec2(e2.x - e2.y, e2.y - e2.x);
vec2 t = vec2(0.70710678118654752, 0.70710678118654752);
for (int i = 0; i < 3; i++) {
vec2 v = ve*t*t*t;
vec2 u = normalize(pAbs - v) * length(t * e - v);
vec2 w = ei * (v + u);
t = normalize(clamp(w, 0.0, 1.0));
}
vec2 nearestAbs = t * e;
float dist = length(pAbs - nearestAbs);
return dot(pAbs, pAbs) < dot(nearestAbs, nearestAbs) ? -dist : dist;
}
#endif

View File

@ -0,0 +1,8 @@
#ifndef _ENUMS_
#define _ENUMS_
#define GSK_GPU_SHADER_CLIP_NONE 0u
#define GSK_GPU_SHADER_CLIP_RECT 1u
#define GSK_GPU_SHADER_CLIP_ROUNDED 2u
#endif

View File

@ -0,0 +1,291 @@
#!/usr/bin/env python3
import sys
import re
import os
name = os.path.splitext(os.path.splitext(os.path.basename(sys.argv[1]))[0])[0][6:]
var_name = "gsk_gpu_" + name.replace('-', '_')
struct_name = "GskGpu" + name.title().replace('-', '') + "Instance"
with open(sys.argv[1]) as f:
lines = f.readlines()
matches = []
for line in lines:
match = re.search(r"^IN\(([0-9]+)\) ([a-z0-9]+) ([a-zA-Z0-9_]+);$", line)
if not match:
if re.search(r"layout.*\sin\s.*", line):
raise Exception("Failed to parse file")
continue
if not match.group(3).startswith('in'):
raise Exception("Variable doesn't start with 'in'")
matches.append({'name': ''.join('_' + char.lower() if char.isupper() else char for char in match.group(3))[3:],
'location': int(match.group(1)),
'type': match.group(2)})
print(f'''/* This file is auto-generated; any change will not be preserved */
#pragma once
typedef struct _{struct_name} {struct_name};
struct _{struct_name} {{''')
expected = 0
for match in matches:
if expected != int(match['location']):
raise Exception(f"Should be layout location {expected} but is {match['location']}") # noqa
if match['type'] == 'float':
print(f" float {match['name']};")
expected += 1
elif match['type'] == 'int':
print(f" gint32 {match['name']};")
expected += 1
elif match['type'] == 'uint':
print(f" guint32 {match['name']};")
expected += 1
elif match['type'] == 'uvec2':
print(f" guint32 {match['name']}[2];")
expected += 1
elif match['type'] == 'vec2':
print(f" float {match['name']}[2];")
expected += 1
elif match['type'] == 'vec4':
print(f" float {match['name']}[4];")
expected += 1
elif match['type'] == 'mat3x4':
print(f" float {match['name']}[12];")
expected += 3
elif match['type'] == 'mat4':
print(f" float {match['name']}[16];")
expected += 4
else:
raise Exception(f"Don't know what a {match['type']} is")
print(f'''}};
''')
print(f'''static inline void
{var_name}_setup_vao (gsize offset)
{{''')
for i, match in enumerate(matches):
if match['type'] == 'float':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribPointer ({match['location']},
1,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));''');
elif match['type'] == 'uint':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribIPointer ({match['location']},
1,
GL_UNSIGNED_INT,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));''');
elif match['type'] == 'uvec2':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribIPointer ({match['location']},
2,
GL_UNSIGNED_INT,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));''');
elif match['type'] == 'vec2':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribPointer ({match['location']},
2,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));''');
elif match['type'] == 'vec4':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribPointer ({match['location']},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));''');
elif match['type'] == 'mat3x4':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribPointer ({match['location']},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));
glEnableVertexAttribArray ({int(match['location'] + 1)});
glVertexAttribDivisor ({int(match['location'] + 1)}, 1);
glVertexAttribPointer ({int(match['location']) + 1},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 4));
glEnableVertexAttribArray ({int(match['location'] + 2)});
glVertexAttribDivisor ({int(match['location'] + 2)}, 1);
glVertexAttribPointer ({int(match['location']) + 2},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 8));''')
elif match['type'] == 'mat4':
print(f''' glEnableVertexAttribArray ({match['location']});
glVertexAttribDivisor ({match['location']}, 1);
glVertexAttribPointer ({match['location']},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']})));
glEnableVertexAttribArray ({int(match['location'] + 1)});
glVertexAttribDivisor ({int(match['location'] + 1)}, 1);
glVertexAttribPointer ({int(match['location']) + 1},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 4));
glEnableVertexAttribArray ({int(match['location'] + 2)});
glVertexAttribDivisor ({int(match['location'] + 2)}, 1);
glVertexAttribPointer ({int(match['location']) + 2},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 8));
glEnableVertexAttribArray ({int(match['location'] + 3)});
glVertexAttribDivisor ({int(match['location'] + 3)}, 1);
glVertexAttribPointer ({int(match['location']) + 3},
4,
GL_FLOAT,
GL_FALSE,
sizeof ({struct_name}),
GSIZE_TO_POINTER (offset + G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 12));''')
else:
raise Exception(f"Don't know what a {match['type']} is")
print(f'''}}
''');
print(f'''#ifdef GDK_RENDERING_VULKAN
static const VkPipelineVertexInputStateCreateInfo {var_name}_info = {{
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = (VkVertexInputBindingDescription[1]) {{
{{
.binding = 0,
.stride = sizeof ({struct_name}),
.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE
}}
}},
.vertexAttributeDescriptionCount = {expected},
.pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[{expected}]) {{''')
for match in matches:
if match['type'] == 'float':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'int':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32_SINT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'uint':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32_UINT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'uvec2':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32G32_UINT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'vec2':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'vec4':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},''')
elif match['type'] == 'mat3x4':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},
{{
.location = {int(match['location']) + 1},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 4,
}},
{{
.location = {int(match['location']) + 2},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 8,
}},''')
elif match['type'] == 'mat4':
print(f''' {{
.location = {match['location']},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}),
}},
{{
.location = {int(match['location']) + 1},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 4,
}},
{{
.location = {int(match['location']) + 2},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 8,
}},
{{
.location = {int(match['location']) + 3},
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = G_STRUCT_OFFSET({struct_name}, {match['name']}) + sizeof (float) * 12,
}},''')
else:
raise Exception(f"Don't know what a {match['type']} is")
print(f''' }},
}};
#endif
''');

View File

@ -0,0 +1,44 @@
#include "common.glsl"
PASS(0) vec2 _pos;
PASS_FLAT(1) Rect _rect;
PASS(2) vec2 _tex_coord;
PASS_FLAT(3) uint _tex_id;
#ifdef GSK_VERTEX_SHADER
IN(0) vec4 in_rect;
IN(1) vec4 in_tex_rect;
IN(2) uint in_tex_id;
void
run (out vec2 pos)
{
Rect r = rect_from_gsk (in_rect);
pos = rect_get_position (r);
_pos = pos;
_rect = r;
_tex_coord = rect_get_coord (rect_from_gsk (in_tex_rect), pos);
_tex_id = in_tex_id;
}
#endif
#ifdef GSK_FRAGMENT_SHADER
void
run (out vec4 color,
out vec2 position)
{
color = texture (gsk_get_texture (_tex_id), _tex_coord) *
rect_coverage (_rect, _pos);
position = _pos;
}
#endif

View File

@ -0,0 +1,82 @@
gsk_private_gpu_include_shaders = files([
'common.glsl',
'common-gl.glsl',
'common-vulkan.glsl',
'ellipse.glsl',
'enums.glsl',
'rect.glsl',
'roundedrect.glsl',
])
gsk_private_gpu_shaders = files([
'gskgputexture.glsl',
])
gsk_private_gpu_shader_headers = []
gsk_private_gpu_gl_shaders = []
gsk_private_gpu_vulkan_vertex_shaders = []
gsk_private_gpu_vulkan_fragment_shaders = []
generate_header = find_program('generate-header.py')
process_includes = find_program('process-glsl-includes.py')
foreach shader: gsk_private_gpu_shaders
instance = fs.name (fs.replace_suffix (shader, '')) + 'instance.h'
shader_header = custom_target(instance,
output: instance,
input: shader,
command: [
generate_header,
'@INPUT@',
],
capture: true)
gsk_private_gpu_shader_headers += shader_header
gl_shader_name = fs.name (shader)
gl_shader = custom_target (gl_shader_name,
output: gl_shader_name,
input: shader,
depend_files: gsk_private_gpu_include_shaders,
command: [
process_includes,
'@INPUT@',
],
capture: true)
gsk_private_gpu_gl_shaders += gl_shader
if (have_vulkan)
vert_spv = fs.name (fs.replace_suffix (shader, '')) + '.vert.spv'
frag_spv = fs.name (fs.replace_suffix (shader, '')) + '.frag.spv'
vert_target = custom_target(vert_spv,
input: shader,
output: vert_spv,
depend_files: gsk_private_gpu_include_shaders,
command: [
glslc,
'-std=450',
'--target-env=vulkan1.2',
'-fshader-stage=vertex',
'-DGSK_VERTEX_SHADER=1',
'-O',
'@INPUT@',
'-o', '@OUTPUT@'
])
frag_target = custom_target(frag_spv,
input: shader,
output: frag_spv,
depend_files: gsk_private_gpu_include_shaders,
command: [
glslc,
'-std=450',
'--target-env=vulkan1.2',
'-fshader-stage=fragment',
'-DGSK_FRAGMENT_SHADER=1',
'-O',
'@INPUT@',
'-o', '@OUTPUT@'
])
gsk_private_gpu_vulkan_vertex_shaders += vert_target
gsk_private_gpu_vulkan_fragment_shaders += frag_target
endif
endforeach

View File

@ -0,0 +1,25 @@
#!/usr/bin/env python3
import sys
import re
import os
loaded_files = []
def load (path):
if (path in loaded_files):
return
loaded_files.append (path)
with open(path) as f:
lines = f.readlines()
for line in lines:
match = re.search (r"^#include \"(.*)\"$", line)
if (match):
load (os.path.join (os.path.dirname(path), match.group(1)))
else:
print (line, end="")
load (sys.argv[1])

92
gsk/gpu/shaders/rect.glsl Normal file
View File

@ -0,0 +1,92 @@
#ifndef _RECT_
#define _RECT_
struct Rect
{
/* x,y and y,w make up the 2 points of this rect,
note that this is not containing width or height */
vec4 bounds;
};
Rect
rect_new_size (vec4 coords)
{
return Rect (coords + vec4 (0.0, 0.0, coords.xy));
}
Rect
rect_from_gsk (vec4 coords)
{
Rect result = rect_new_size (coords);
result.bounds *= push.scale.xyxy;
return result;
}
float
rect_distance (Rect r, vec2 p)
{
vec4 distance = (r.bounds - p.xyxy) * vec4(1.0, 1.0, -1.0, -1.0);
vec2 max2 = max (distance.xy, distance.zw);
return length (max (max2, 0.0)) + min (max(max2.x, max2.y), 0.0);
}
vec2
rect_size (Rect r)
{
return r.bounds.zw - r.bounds.xy;
}
Rect
rect_round_larger (Rect r)
{
return Rect (vec4 (floor(r.bounds.xy), ceil (r.bounds.zw)));
}
Rect
rect_round_larger_smaller (Rect r)
{
return Rect (mix (floor(r.bounds), ceil (r.bounds), bvec4(0, 1, 1, 0)));
}
Rect
rect_round_smaller_larger (Rect r)
{
return Rect (mix (floor(r.bounds), ceil (r.bounds), bvec4(1, 0, 0, 1)));
}
Rect
rect_intersect (Rect a, Rect b)
{
vec4 result = vec4(max(a.bounds.xy, b.bounds.xy), min(a.bounds.zw, b.bounds.zw));
if (any (greaterThanEqual (result.xy, result.zw)))
return Rect (vec4(0.0));
return Rect(result);
}
Rect
rect_union (Rect a, Rect b)
{
return Rect (vec4 (min (a.bounds.xy, b.bounds.xy), max (a.bounds.zw, b.bounds.zw)));
}
vec2
rect_get_coord (Rect r, vec2 pt)
{
return (pt - r.bounds.xy) / rect_size (r);
}
#ifdef GSK_FRAGMENT_SHADER
float
rect_coverage (Rect r, vec2 p)
{
vec2 dFdp = abs(fwidth (p));
Rect prect = Rect(vec4(p - 0.5 * dFdp, p + 0.5 * dFdp));
Rect coverect = rect_intersect (r, prect);
vec2 coverage = rect_size(coverect) / dFdp;
return coverage.x * coverage.y;
}
#endif
#endif

View File

@ -0,0 +1,90 @@
#ifndef _ROUNDED_RECT_
#define _ROUNDED_RECT_
#include "ellipse.glsl"
#include "rect.glsl"
struct RoundedRect
{
vec4 bounds;
vec4 corner_widths;
vec4 corner_heights;
};
RoundedRect
rounded_rect_from_gsk (mat3x4 gsk_rounded_rect)
{
return RoundedRect ((gsk_rounded_rect[0].xyxy + vec4 (0.0, 0.0, gsk_rounded_rect[0].zw)) * push.scale.xyxy,
gsk_rounded_rect[1] * push.scale.xxxx,
gsk_rounded_rect[2] * push.scale.yyyy);
}
float
rounded_rect_distance (RoundedRect r, vec2 p)
{
Rect bounds = Rect(vec4(r.bounds));
float bounds_distance = rect_distance (bounds, p);
Ellipse tl = Ellipse (r.bounds.xy + vec2( r.corner_widths.x, r.corner_heights.x),
vec2(r.corner_widths.x, r.corner_heights.x));
Ellipse tr = Ellipse (r.bounds.zy + vec2(-r.corner_widths.y, r.corner_heights.y),
vec2(r.corner_widths.y, r.corner_heights.y));
Ellipse br = Ellipse (r.bounds.zw + vec2(-r.corner_widths.z, -r.corner_heights.z),
vec2(r.corner_widths.z, r.corner_heights.z));
Ellipse bl = Ellipse (r.bounds.xw + vec2( r.corner_widths.w, -r.corner_heights.w),
vec2(r.corner_widths.w, r.corner_heights.w));
vec4 distances = vec4(ellipse_distance (tl, p),
ellipse_distance (tr, p),
ellipse_distance (br, p),
ellipse_distance (bl, p));
bvec4 is_out = bvec4(p.x < tl.center.x && p.y < tl.center.y,
p.x > tr.center.x && p.y < tr.center.y,
p.x > br.center.x && p.y > br.center.y,
p.x < bl.center.x && p.y > bl.center.y);
distances = mix (vec4(bounds_distance), distances, is_out);
vec2 max2 = max (distances.xy, distances.zw);
return max (max2.x, max2.y);
}
RoundedRect
rounded_rect_scale (RoundedRect r, vec2 scale)
{
r.bounds *= scale.xyxy;
r.corner_widths *= scale.xxxx;
r.corner_heights *= scale.yyyy;
return r;
}
RoundedRect
rounded_rect_shrink (RoundedRect r, vec4 amount)
{
vec4 new_bounds = r.bounds + vec4(1.0,1.0,-1.0,-1.0) * amount.wxyz;
vec4 new_widths = max (r.corner_widths - sign (r.corner_widths) * amount.wyyw, 0.0);
vec4 new_heights = max (r.corner_heights - sign (r.corner_heights) * amount.xxzz, 0.0);
new_widths = min (new_widths, new_bounds.z - new_bounds.x);
new_heights = min (new_heights, new_bounds.w - new_bounds.y);
return RoundedRect (new_bounds, new_widths, new_heights);
}
#ifdef GSK_FRAGMENT_SHADER
float
rounded_rect_coverage (RoundedRect r, vec2 p)
{
vec2 fw = abs (fwidth (p));
float distance_scale = max (fw.x, fw.y);
float distance = rounded_rect_distance (r, p) / distance_scale;
float coverage = 0.5 - distance;
return clamp (coverage, 0.0, 1.0);
}
#endif
#endif

View File

@ -94,3 +94,15 @@ gsk_rect_equal (const graphene_rect_t *r1,
r1->size.width == r2->size.width && r1->size.width == r2->size.width &&
r1->size.height == r2->size.height; r1->size.height == r2->size.height;
} }
static inline void
gsk_gpu_rect_to_float (const graphene_rect_t *rect,
const graphene_point_t *offset,
float values[4])
{
values[0] = rect->origin.x + offset->x;
values[1] = rect->origin.y + offset->y;
values[2] = rect->size.width;
values[3] = rect->size.height;
}

View File

@ -67,18 +67,25 @@ gsk_private_sources = files([
'gl/gskglprofiler.c', 'gl/gskglprofiler.c',
'gl/stb_rect_pack.c', 'gl/stb_rect_pack.c',
'gl/fp16.c', 'gl/fp16.c',
'gpu/gskglbuffer.c',
'gpu/gskgldevice.c', 'gpu/gskgldevice.c',
'gpu/gskglframe.c', 'gpu/gskglframe.c',
'gpu/gskglimage.c', 'gpu/gskglimage.c',
'gpu/gskgpublitop.c', 'gpu/gskgpublitop.c',
'gpu/gskgpubuffer.c',
'gpu/gskgpuclip.c',
'gpu/gskgpudownloadop.c', 'gpu/gskgpudownloadop.c',
'gpu/gskgpudevice.c', 'gpu/gskgpudevice.c',
'gpu/gskgpuframe.c', 'gpu/gskgpuframe.c',
'gpu/gskgpuglobalsop.c',
'gpu/gskgpuimage.c', 'gpu/gskgpuimage.c',
'gpu/gskgpunodeprocessor.c', 'gpu/gskgpunodeprocessor.c',
'gpu/gskgpuop.c', 'gpu/gskgpuop.c',
'gpu/gskgpuprint.c', 'gpu/gskgpuprint.c',
'gpu/gskgpurenderer.c', 'gpu/gskgpurenderer.c',
'gpu/gskgpurenderpassop.c',
'gpu/gskgpushaderop.c',
'gpu/gskgputextureop.c',
'gpu/gskgpuuploadop.c', 'gpu/gskgpuuploadop.c',
'gpu/gsknglrenderer.c', 'gpu/gsknglrenderer.c',
]) ])
@ -142,6 +149,8 @@ if have_vulkan
]) ])
endif # have_vulkan endif # have_vulkan
subdir('gpu/shaders')
if get_variable('broadway_enabled') if get_variable('broadway_enabled')
gsk_public_sources += files([ gsk_public_sources += files([
'broadway/gskbroadwayrenderer.c', 'broadway/gskbroadwayrenderer.c',
@ -155,7 +164,10 @@ gsk_resources_xml = custom_target(output: 'gsk.resources.xml',
'@OUTPUT@', '@OUTPUT@',
gsk_private_gl_shaders, gsk_private_gl_shaders,
gsk_private_vulkan_compiled_shaders, gsk_private_vulkan_compiled_shaders,
gsk_private_vulkan_shaders gsk_private_vulkan_shaders,
gsk_private_gpu_gl_shaders,
gsk_private_gpu_vulkan_vertex_shaders,
gsk_private_gpu_vulkan_fragment_shaders,
], ],
) )
@ -171,7 +183,11 @@ gskenum_h = gsk_enums[1]
gskresources = gnome.compile_resources('gskresources', gskresources = gnome.compile_resources('gskresources',
gsk_resources_xml, gsk_resources_xml,
dependencies: gsk_private_vulkan_compiled_shaders_deps, dependencies: [
gsk_private_vulkan_compiled_shaders_deps,
gsk_private_gpu_vulkan_fragment_shaders,
gsk_private_gpu_vulkan_vertex_shaders,
],
source_dir: [meson.current_build_dir(), meson.current_source_dir()], source_dir: [meson.current_build_dir(), meson.current_source_dir()],
c_name: '_gsk', c_name: '_gsk',
extra_args: [ '--manual-register', ], extra_args: [ '--manual-register', ],
@ -205,6 +221,7 @@ libgsk = static_library('gsk',
gsk_enums, gsk_enums,
gskresources, gskresources,
gsk_private_vulkan_shader_headers, gsk_private_vulkan_shader_headers,
gsk_private_gpu_shader_headers,
], ],
dependencies: gsk_deps, dependencies: gsk_deps,
include_directories: [ confinc, ], include_directories: [ confinc, ],