gpu: Sort passes correctly

In a very particular situation, it could happen that our renderpass
reordering did not work out.
Consider this nesting of renderpasses (indentation indicates subpasses):

pass A
  subpass of A
pass B
  subpass of B

Out reordering code would reorder this as:

subpass of B
subpass of A
pass A
pass B

Which doesn't sound too bad, the subpasses happen before the passes
after all.

However, a subpass might be a pass that converts the image for a texture
stored in the texture cache and then updates the cached image.
If "subpass of A" is such a pass *and* if "subpass of B" then renders
with exactly this texture, then "subpass of B" will use the result of
"subpass of A" as a source.

The fix is to ensure that subpasses stay ordered, too.

The new order moves subpasses right before their parent pass, so the
order of the example now looks like:

subpass of A
pass A
subpass of B
pass B

The place where this would happen most common was when drawing thumbnail
images in Nautilus, the GTK filechooser or Fractal.
Those images are usually PNG files, which are straight alpha. They are then
drawn with a drop shadow, which requires an offscreen for drawing as
well as those images as premultipled sources, so lots of subpasses happen.
If there is then a redraw with a somewhat tricky subregion, then the
slicing of the region code could end up generating 2 passes that each draw
half of the thumbnail image - the first pass drawing the top half and the
second pass drawing the bottom half.
And due to the bug the bottom half would then be drawn from the
offscreen before the actual contents of the offscreen would be drawn,
leading to a corrupt bottom part of the image.

Test included.

Fixes: #6318
This commit is contained in:
Benjamin Otte 2024-03-16 23:30:22 +01:00
parent a23a7d4149
commit aff34e8d1b
4 changed files with 77 additions and 25 deletions

View File

@ -251,27 +251,35 @@ gsk_gpu_frame_sort_render_pass (GskGpuFrame *self,
SortData *sort_data)
{
SortData subpasses = { { NULL, NULL }, { NULL, NULL } };
SortData pass = { { NULL, NULL }, { NULL, NULL } };
if (op->op_class->stage == GSK_GPU_STAGE_BEGIN_PASS)
{
pass.command.first = op;
pass.command.last = op;
op = op->next;
}
while (op)
{
switch (op->op_class->stage)
{
case GSK_GPU_STAGE_UPLOAD:
if (sort_data->upload.first == NULL)
sort_data->upload.first = op;
if (pass.upload.first == NULL)
pass.upload.first = op;
else
sort_data->upload.last->next = op;
sort_data->upload.last = op;
pass.upload.last->next = op;
pass.upload.last = op;
op = op->next;
break;
case GSK_GPU_STAGE_COMMAND:
case GSK_GPU_STAGE_SHADER:
if (sort_data->command.first == NULL)
sort_data->command.first = op;
if (pass.command.first == NULL)
pass.command.first = op;
else
sort_data->command.last->next = op;
sort_data->command.last = op;
pass.command.last->next = op;
pass.command.last = op;
op = op->next;
break;
@ -285,19 +293,13 @@ gsk_gpu_frame_sort_render_pass (GskGpuFrame *self,
break;
case GSK_GPU_STAGE_BEGIN_PASS:
if (subpasses.command.first == NULL)
subpasses.command.first = op;
else
subpasses.command.last->next = op;
subpasses.command.last = op;
/* append subpass to existing subpasses */
op = gsk_gpu_frame_sort_render_pass (self, op->next, &subpasses);
op = gsk_gpu_frame_sort_render_pass (self, op, &subpasses);
break;
case GSK_GPU_STAGE_END_PASS:
sort_data->command.last->next = op;
sort_data->command.last = op;
pass.command.last->next = op;
pass.command.last = op;
op = op->next;
goto out;
@ -308,22 +310,38 @@ gsk_gpu_frame_sort_render_pass (GskGpuFrame *self,
}
out:
/* prepend subpasses to the current pass */
/* append to the sort data, first the subpasses, then the current pass */
if (subpasses.upload.first)
{
if (sort_data->upload.first != NULL)
subpasses.upload.last->next = sort_data->upload.first;
sort_data->upload.last->next = subpasses.upload.first;
else
sort_data->upload.last = subpasses.upload.last;
sort_data->upload.first = subpasses.upload.first;
sort_data->upload.first = subpasses.upload.first;
sort_data->upload.last = subpasses.upload.last;
}
if (pass.upload.first)
{
if (sort_data->upload.first != NULL)
sort_data->upload.last->next = pass.upload.first;
else
sort_data->upload.first = pass.upload.first;
sort_data->upload.last = pass.upload.last;
}
if (subpasses.command.first)
{
if (sort_data->command.first != NULL)
subpasses.command.last->next = sort_data->command.first;
sort_data->command.last->next = subpasses.command.first;
else
sort_data->command.last = subpasses.command.last;
sort_data->command.first = subpasses.command.first;
sort_data->command.first = subpasses.command.first;
sort_data->command.last = subpasses.command.last;
}
if (pass.command.first)
{
if (sort_data->command.first != NULL)
sort_data->command.last->next = pass.command.first;
else
sort_data->command.first = pass.command.first;
sort_data->command.last = pass.command.last;
}
return op;
@ -334,8 +352,13 @@ gsk_gpu_frame_sort_ops (GskGpuFrame *self)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
SortData sort_data = { { NULL, }, };
GskGpuOp *op;
gsk_gpu_frame_sort_render_pass (self, priv->first_op, &sort_data);
op = priv->first_op;
while (op)
{
op = gsk_gpu_frame_sort_render_pass (self, op, &sort_data);
}
if (sort_data.upload.first)
{

View File

@ -0,0 +1,28 @@
clip {
clip: 0 0 12 8;
child: color-matrix "node1" {
matrix: matrix3d(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 255);
child: shadow "node2" {
shadows: rgb(255,0,0) 0 0 10;
child: texture "node3" {
bounds: 0 0 20 20;
texture: "texture1" url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABkAAAAZCAYAAADE6YVjAAAAOUlEQVRIiWNk+M/wn4HGgInWFoxa\
QjJgwSrKyMBItolYEtLwCa5RS0YtGbVk1JKhaAnjaB0/Mi0BALtiBi9IMFUcAAAAAElFTkSuQmCC\
\
");
}
}
}
}
clip {
clip: 0 8 8 12;
child: "node1";
}
clip {
clip: 8 12 12 8;
child: "node1";
}
clip {
clip: 12 0 8 12;
child: "node1";
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 B

View File

@ -136,6 +136,7 @@ compare_render_tests = [
'repeat-scaling',
'repeat-texture',
'repeating-gradient-scaled',
'reuse-of-texture-nested-in-offscreens',
'rounded-clip-with-huge-bounds-nogl',
'scale-textures-negative-ngl',
'scale-up-down',