webrender/
prepare.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5//! # Prepare pass
6//!
7//! TODO: document this!
8
9use api::{ColorF, DebugFlags, PropertyBinding};
10use api::{BoxShadowClipMode, BorderStyle, ClipMode};
11use api::units::*;
12use euclid::Scale;
13use smallvec::SmallVec;
14use crate::composite::CompositorSurfaceKind;
15use crate::command_buffer::{CommandBufferIndex, PrimitiveCommand};
16use crate::image_tiling::{self, Repetition};
17use crate::border::{get_max_scale_for_border, build_border_instances};
18use crate::clip::{ClipStore, ClipNodeRange};
19use crate::pattern::Pattern;
20use crate::spatial_tree::{SpatialNodeIndex, SpatialTree};
21use crate::clip::{ClipDataStore, ClipNodeFlags, ClipChainInstance, ClipItemKind};
22use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureContext, PictureState};
23use crate::gpu_cache::{GpuCacheHandle, GpuDataRequest};
24use crate::gpu_types::BrushFlags;
25use crate::internal_types::{FastHashMap, PlaneSplitAnchor, Filter};
26use crate::picture::{ClusterFlags, PictureCompositeMode, PicturePrimitive, SliceId};
27use crate::picture::{PrimitiveList, PrimitiveCluster, SurfaceIndex, TileCacheInstance, SubpixelMode, Picture3DContext};
28use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION;
29use crate::prim_store::*;
30use crate::quad;
31use crate::prim_store::gradient::GradientGpuBlockBuilder;
32use crate::render_backend::DataStores;
33use crate::render_task_graph::RenderTaskId;
34use crate::render_task_cache::RenderTaskCacheKeyKind;
35use crate::render_task_cache::{RenderTaskCacheKey, to_cache_size, RenderTaskParent};
36use crate::render_task::{EmptyTask, MaskSubPass, RenderTask, RenderTaskKind, SubPass};
37use crate::segment::SegmentBuilder;
38use crate::util::{clamp_to_scale_factor, pack_as_float, ScaleOffset};
39use crate::visibility::{compute_conservative_visible_rect, PrimitiveVisibility, VisibilityState};
40
41
42const MAX_MASK_SIZE: i32 = 4096;
43
44const MIN_BRUSH_SPLIT_AREA: f32 = 128.0 * 128.0;
45
46/// The entry point of the preapre pass.
47pub fn prepare_picture(
48    pic_index: PictureIndex,
49    store: &mut PrimitiveStore,
50    surface_index: Option<SurfaceIndex>,
51    subpixel_mode: SubpixelMode,
52    frame_context: &FrameBuildingContext,
53    frame_state: &mut FrameBuildingState,
54    data_stores: &mut DataStores,
55    scratch: &mut PrimitiveScratchBuffer,
56    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
57    prim_instances: &mut Vec<PrimitiveInstance>,
58) -> bool {
59    if frame_state.visited_pictures[pic_index.0] {
60        return true;
61    }
62
63    frame_state.visited_pictures[pic_index.0] = true;
64
65    let pic = &mut store.pictures[pic_index.0];
66    let Some((pic_context, mut pic_state, mut prim_list)) = pic.take_context(
67        pic_index,
68        surface_index,
69        subpixel_mode,
70        frame_state,
71        frame_context,
72        data_stores,
73        scratch,
74        tile_caches,
75    ) else {
76        return false;
77    };
78
79    prepare_primitives(
80        store,
81        &mut prim_list,
82        &pic_context,
83        &mut pic_state,
84        frame_context,
85        frame_state,
86        data_stores,
87        scratch,
88        tile_caches,
89        prim_instances,
90    );
91
92    // Restore the dependencies (borrow check dance)
93    store.pictures[pic_context.pic_index.0].restore_context(
94        pic_context.pic_index,
95        prim_list,
96        pic_context,
97        prim_instances,
98        frame_context,
99        frame_state,
100    );
101
102    true
103}
104
105fn prepare_primitives(
106    store: &mut PrimitiveStore,
107    prim_list: &mut PrimitiveList,
108    pic_context: &PictureContext,
109    pic_state: &mut PictureState,
110    frame_context: &FrameBuildingContext,
111    frame_state: &mut FrameBuildingState,
112    data_stores: &mut DataStores,
113    scratch: &mut PrimitiveScratchBuffer,
114    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
115    prim_instances: &mut Vec<PrimitiveInstance>,
116) {
117    profile_scope!("prepare_primitives");
118    let mut cmd_buffer_targets = Vec::new();
119
120    for cluster in &mut prim_list.clusters {
121        if !cluster.flags.contains(ClusterFlags::IS_VISIBLE) {
122            continue;
123        }
124        profile_scope!("cluster");
125        pic_state.map_local_to_pic.set_target_spatial_node(
126            cluster.spatial_node_index,
127            frame_context.spatial_tree,
128        );
129
130        for prim_instance_index in cluster.prim_range() {
131            if frame_state.surface_builder.get_cmd_buffer_targets_for_prim(
132                &prim_instances[prim_instance_index].vis,
133                &mut cmd_buffer_targets,
134            ) {
135                let plane_split_anchor = PlaneSplitAnchor::new(
136                    cluster.spatial_node_index,
137                    PrimitiveInstanceIndex(prim_instance_index as u32),
138                );
139
140                prepare_prim_for_render(
141                    store,
142                    prim_instance_index,
143                    cluster,
144                    pic_context,
145                    pic_state,
146                    frame_context,
147                    frame_state,
148                    plane_split_anchor,
149                    data_stores,
150                    scratch,
151                    tile_caches,
152                    prim_instances,
153                    &cmd_buffer_targets,
154                );
155
156                frame_state.num_visible_primitives += 1;
157                continue;
158            }
159
160            // TODO(gw): Technically no need to clear visibility here, since from this point it
161            //           only matters if it got added to a command buffer. Kept here for now to
162            //           make debugging simpler, but perhaps we can remove / tidy this up.
163            prim_instances[prim_instance_index].clear_visibility();
164        }
165    }
166}
167
168fn can_use_clip_chain_for_quad_path(
169    clip_chain: &ClipChainInstance,
170    clip_store: &ClipStore,
171    data_stores: &DataStores,
172) -> bool {
173    if !clip_chain.needs_mask {
174        return true;
175    }
176
177    for i in 0 .. clip_chain.clips_range.count {
178        let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, i);
179        let clip_node = &data_stores.clip[clip_instance.handle];
180
181        match clip_node.item.kind {
182            ClipItemKind::RoundedRectangle { .. } | ClipItemKind::Rectangle { .. } => {}
183            ClipItemKind::BoxShadow { .. } => {
184                // legacy path for box-shadows for now (move them to a separate primitive next)
185                return false;
186            }
187            ClipItemKind::Image { .. } => {
188                panic!("bug: image-masks not expected on rect/quads");
189            }
190        }
191    }
192
193    true
194}
195
196fn prepare_prim_for_render(
197    store: &mut PrimitiveStore,
198    prim_instance_index: usize,
199    cluster: &mut PrimitiveCluster,
200    pic_context: &PictureContext,
201    pic_state: &mut PictureState,
202    frame_context: &FrameBuildingContext,
203    frame_state: &mut FrameBuildingState,
204    plane_split_anchor: PlaneSplitAnchor,
205    data_stores: &mut DataStores,
206    scratch: &mut PrimitiveScratchBuffer,
207    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
208    prim_instances: &mut Vec<PrimitiveInstance>,
209    targets: &[CommandBufferIndex],
210) {
211    profile_scope!("prepare_prim_for_render");
212
213    // If we have dependencies, we need to prepare them first, in order
214    // to know the actual rect of this primitive.
215    // For example, scrolling may affect the location of an item in
216    // local space, which may force us to render this item on a larger
217    // picture target, if being composited.
218    let mut is_passthrough = false;
219    if let PrimitiveInstanceKind::Picture { pic_index, .. } = prim_instances[prim_instance_index].kind {
220        if !prepare_picture(
221            pic_index,
222            store,
223            Some(pic_context.surface_index),
224            pic_context.subpixel_mode,
225            frame_context,
226            frame_state,
227            data_stores,
228            scratch,
229            tile_caches,
230            prim_instances
231        ) {
232            return;
233        }
234
235        is_passthrough = store
236            .pictures[pic_index.0]
237            .composite_mode
238            .is_none();
239    }
240
241    let prim_instance = &mut prim_instances[prim_instance_index];
242
243    if !is_passthrough {
244        fn may_need_repetition(stretch_size: LayoutSize, prim_rect: LayoutRect) -> bool {
245            stretch_size.width < prim_rect.width() ||
246                stretch_size.height < prim_rect.height()
247        }
248        // Bug 1887841: At the moment the quad shader does not support repetitions.
249        // Bug 1888349: Some primitives have brush segments that aren't handled by
250        // the quad infrastructure yet.
251        let disable_quad_path = match &prim_instance.kind {
252            PrimitiveInstanceKind::Rectangle { .. } => false,
253            PrimitiveInstanceKind::LinearGradient { data_handle, .. } => {
254                let prim_data = &data_stores.linear_grad[*data_handle];
255                !prim_data.brush_segments.is_empty() ||
256                    may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
257                    || !frame_context.fb_config.precise_linear_gradients
258            }
259            PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
260                let prim_data = &data_stores.radial_grad[*data_handle];
261                !prim_data.brush_segments.is_empty() ||
262                    may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
263            }
264            // TODO(bug 1899546) Enable quad conic gradients with SWGL.
265            PrimitiveInstanceKind::ConicGradient { data_handle, .. } if !frame_context.fb_config.is_software => {
266                let prim_data = &data_stores.conic_grad[*data_handle];
267                !prim_data.brush_segments.is_empty() ||
268                    may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
269            }
270            _ => true,
271        };
272
273        // In this initial patch, we only support non-masked primitives through the new
274        // quad rendering path. Follow up patches will extend this to support masks, and
275        // then use by other primitives. In the new quad rendering path, we'll still want
276        // to skip the entry point to `update_clip_task` as that does old-style segmenting
277        // and mask generation.
278        let should_update_clip_task = match &mut prim_instance.kind {
279            PrimitiveInstanceKind::Rectangle { use_legacy_path, .. }
280            | PrimitiveInstanceKind::RadialGradient { use_legacy_path, .. }
281            | PrimitiveInstanceKind::ConicGradient { use_legacy_path, .. }
282            | PrimitiveInstanceKind::LinearGradient { use_legacy_path, .. }
283            => {
284                *use_legacy_path = disable_quad_path || !can_use_clip_chain_for_quad_path(
285                    &prim_instance.vis.clip_chain,
286                    frame_state.clip_store,
287                    data_stores,
288                );
289
290                *use_legacy_path
291            }
292            PrimitiveInstanceKind::BoxShadow { .. } |
293            PrimitiveInstanceKind::Picture { .. } => false,
294            _ => true,
295        };
296
297        if should_update_clip_task {
298            let prim_rect = data_stores.get_local_prim_rect(
299                prim_instance,
300                &store.pictures,
301                frame_state.surfaces,
302            );
303
304            if !update_clip_task(
305                prim_instance,
306                &prim_rect.min,
307                cluster.spatial_node_index,
308                pic_context.raster_spatial_node_index,
309                pic_context.visibility_spatial_node_index,
310                pic_context,
311                pic_state,
312                frame_context,
313                frame_state,
314                store,
315                data_stores,
316                scratch,
317            ) {
318                return;
319            }
320        }
321    }
322
323    prepare_interned_prim_for_render(
324        store,
325        PrimitiveInstanceIndex(prim_instance_index as u32),
326        prim_instance,
327        cluster,
328        plane_split_anchor,
329        pic_context,
330        pic_state,
331        frame_context,
332        frame_state,
333        data_stores,
334        scratch,
335        targets,
336    )
337}
338
339/// Prepare an interned primitive for rendering, by requesting
340/// resources, render tasks etc. This is equivalent to the
341/// prepare_prim_for_render_inner call for old style primitives.
342fn prepare_interned_prim_for_render(
343    store: &mut PrimitiveStore,
344    prim_instance_index: PrimitiveInstanceIndex,
345    prim_instance: &mut PrimitiveInstance,
346    cluster: &mut PrimitiveCluster,
347    plane_split_anchor: PlaneSplitAnchor,
348    pic_context: &PictureContext,
349    pic_state: &mut PictureState,
350    frame_context: &FrameBuildingContext,
351    frame_state: &mut FrameBuildingState,
352    data_stores: &mut DataStores,
353    scratch: &mut PrimitiveScratchBuffer,
354    targets: &[CommandBufferIndex],
355) {
356    let prim_spatial_node_index = cluster.spatial_node_index;
357    let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
358
359    match &mut prim_instance.kind {
360        PrimitiveInstanceKind::BoxShadow { data_handle } => {
361            let prim_data = &mut data_stores.box_shadow[*data_handle];
362
363            quad::prepare_quad(
364                prim_data,
365                &prim_data.kind.outer_shadow_rect,
366                prim_instance_index,
367                prim_spatial_node_index,
368                &prim_instance.vis.clip_chain,
369                device_pixel_scale,
370                frame_context,
371                pic_context,
372                targets,
373                &data_stores.clip,
374                frame_state,
375                pic_state,
376                scratch,
377            );
378
379            return;
380        }
381        PrimitiveInstanceKind::LineDecoration { data_handle, ref mut render_task, .. } => {
382            profile_scope!("LineDecoration");
383            let prim_data = &mut data_stores.line_decoration[*data_handle];
384            let common_data = &mut prim_data.common;
385            let line_dec_data = &mut prim_data.kind;
386
387            // Update the template this instane references, which may refresh the GPU
388            // cache with any shared template data.
389            line_dec_data.update(common_data, frame_state);
390
391            // Work out the device pixel size to be used to cache this line decoration.
392
393            // If we have a cache key, it's a wavy / dashed / dotted line. Otherwise, it's
394            // a simple solid line.
395            if let Some(cache_key) = line_dec_data.cache_key.as_ref() {
396                // TODO(gw): These scale factors don't do a great job if the world transform
397                //           contains perspective
398                let scale = frame_context
399                    .spatial_tree
400                    .get_world_transform(prim_spatial_node_index)
401                    .scale_factors();
402
403                // Scale factors are normalized to a power of 2 to reduce the number of
404                // resolution changes.
405                // For frames with a changing scale transform round scale factors up to
406                // nearest power-of-2 boundary so that we don't keep having to redraw
407                // the content as it scales up and down. Rounding up to nearest
408                // power-of-2 boundary ensures we never scale up, only down --- avoiding
409                // jaggies. It also ensures we never scale down by more than a factor of
410                // 2, avoiding bad downscaling quality.
411                let scale_width = clamp_to_scale_factor(scale.0, false);
412                let scale_height = clamp_to_scale_factor(scale.1, false);
413                // Pick the maximum dimension as scale
414                let world_scale = LayoutToWorldScale::new(scale_width.max(scale_height));
415
416                let scale_factor = world_scale * Scale::new(1.0);
417                let task_size_f = (LayoutSize::from_au(cache_key.size) * scale_factor).ceil();
418                let mut task_size = if task_size_f.width > MAX_LINE_DECORATION_RESOLUTION as f32 ||
419                   task_size_f.height > MAX_LINE_DECORATION_RESOLUTION as f32 {
420                     let max_extent = task_size_f.width.max(task_size_f.height);
421                     let task_scale_factor = Scale::new(MAX_LINE_DECORATION_RESOLUTION as f32 / max_extent);
422                     let task_size = (LayoutSize::from_au(cache_key.size) * scale_factor * task_scale_factor)
423                                    .ceil().to_i32();
424                    task_size
425                } else {
426                    task_size_f.to_i32()
427                };
428
429                // It's plausible, due to float accuracy issues that the line decoration may be considered
430                // visible even if the scale factors are ~0. However, the render task allocation below requires
431                // that the size of the task is > 0. To work around this, ensure that the task size is at least
432                // 1x1 pixels
433                task_size.width = task_size.width.max(1);
434                task_size.height = task_size.height.max(1);
435
436                // Request a pre-rendered image task.
437                // TODO(gw): This match is a bit untidy, but it should disappear completely
438                //           once the prepare_prims and batching are unified. When that
439                //           happens, we can use the cache handle immediately, and not need
440                //           to temporarily store it in the primitive instance.
441                *render_task = Some(frame_state.resource_cache.request_render_task(
442                    Some(RenderTaskCacheKey {
443                        size: task_size,
444                        kind: RenderTaskCacheKeyKind::LineDecoration(cache_key.clone()),
445                    }),
446                    false,
447                    RenderTaskParent::Surface,
448                    frame_state.gpu_cache,
449                    &mut frame_state.frame_gpu_data.f32,
450                    frame_state.rg_builder,
451                    &mut frame_state.surface_builder,
452                    &mut |rg_builder, _, _| {
453                        rg_builder.add().init(RenderTask::new_dynamic(
454                            task_size,
455                            RenderTaskKind::new_line_decoration(
456                                cache_key.style,
457                                cache_key.orientation,
458                                cache_key.wavy_line_thickness.to_f32_px(),
459                                LayoutSize::from_au(cache_key.size),
460                            ),
461                        ))
462                    }
463                ));
464            }
465        }
466        PrimitiveInstanceKind::TextRun { run_index, data_handle, .. } => {
467            profile_scope!("TextRun");
468            let prim_data = &mut data_stores.text_run[*data_handle];
469            let run = &mut store.text_runs[*run_index];
470
471            prim_data.common.may_need_repetition = false;
472
473            // The glyph transform has to match `glyph_transform` in "ps_text_run" shader.
474            // It's relative to the rasterizing space of a glyph.
475            let transform = frame_context.spatial_tree
476                .get_relative_transform(
477                    prim_spatial_node_index,
478                    pic_context.raster_spatial_node_index,
479                )
480                .into_fast_transform();
481            let prim_offset = prim_data.common.prim_rect.min.to_vector() - run.reference_frame_relative_offset;
482
483            let surface = &frame_state.surfaces[pic_context.surface_index.0];
484
485            // If subpixel AA is disabled due to the backing surface the glyphs
486            // are being drawn onto, disable it (unless we are using the
487            // specifial subpixel mode that estimates background color).
488            let allow_subpixel = match prim_instance.vis.state {
489                VisibilityState::Culled |
490                VisibilityState::Unset |
491                VisibilityState::PassThrough => {
492                    panic!("bug: invalid visibility state");
493                }
494                VisibilityState::Visible { sub_slice_index, .. } => {
495                    // For now, we only allow subpixel AA on primary sub-slices. In future we
496                    // may support other sub-slices if we find content that does this.
497                    if sub_slice_index.is_primary() {
498                        match pic_context.subpixel_mode {
499                            SubpixelMode::Allow => true,
500                            SubpixelMode::Deny => false,
501                            SubpixelMode::Conditional { allowed_rect, prohibited_rect } => {
502                                // Conditional mode allows subpixel AA to be enabled for this
503                                // text run, so long as it's inside the allowed rect.
504                                allowed_rect.contains_box(&prim_instance.vis.clip_chain.pic_coverage_rect) &&
505                                !prohibited_rect.intersects(&prim_instance.vis.clip_chain.pic_coverage_rect)
506                            }
507                        }
508                    } else {
509                        false
510                    }
511                }
512            };
513
514            run.request_resources(
515                prim_offset,
516                &prim_data.font,
517                &prim_data.glyphs,
518                &transform.to_transform().with_destination::<_>(),
519                surface,
520                prim_spatial_node_index,
521                allow_subpixel,
522                frame_context.fb_config.low_quality_pinch_zoom,
523                frame_state.resource_cache,
524                frame_state.gpu_cache,
525                frame_context.spatial_tree,
526                scratch,
527            );
528
529            // Update the template this instane references, which may refresh the GPU
530            // cache with any shared template data.
531            prim_data.update(frame_state);
532        }
533        PrimitiveInstanceKind::Clear { data_handle, .. } => {
534            profile_scope!("Clear");
535            let prim_data = &mut data_stores.prim[*data_handle];
536
537            prim_data.common.may_need_repetition = false;
538
539            // Update the template this instane references, which may refresh the GPU
540            // cache with any shared template data.
541            prim_data.update(frame_state, frame_context.scene_properties);
542        }
543        PrimitiveInstanceKind::NormalBorder { data_handle, ref mut render_task_ids, .. } => {
544            profile_scope!("NormalBorder");
545            let prim_data = &mut data_stores.normal_border[*data_handle];
546            let common_data = &mut prim_data.common;
547            let border_data = &mut prim_data.kind;
548
549            common_data.may_need_repetition =
550                matches!(border_data.border.top.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
551                matches!(border_data.border.right.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
552                matches!(border_data.border.bottom.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
553                matches!(border_data.border.left.style, BorderStyle::Dotted | BorderStyle::Dashed);
554
555
556            // Update the template this instance references, which may refresh the GPU
557            // cache with any shared template data.
558            border_data.update(common_data, frame_state);
559
560            // TODO(gw): For now, the scale factors to rasterize borders at are
561            //           based on the true world transform of the primitive. When
562            //           raster roots with local scale are supported in future,
563            //           that will need to be accounted for here.
564            let scale = frame_context
565                .spatial_tree
566                .get_world_transform(prim_spatial_node_index)
567                .scale_factors();
568
569            // Scale factors are normalized to a power of 2 to reduce the number of
570            // resolution changes.
571            // For frames with a changing scale transform round scale factors up to
572            // nearest power-of-2 boundary so that we don't keep having to redraw
573            // the content as it scales up and down. Rounding up to nearest
574            // power-of-2 boundary ensures we never scale up, only down --- avoiding
575            // jaggies. It also ensures we never scale down by more than a factor of
576            // 2, avoiding bad downscaling quality.
577            let scale_width = clamp_to_scale_factor(scale.0, false);
578            let scale_height = clamp_to_scale_factor(scale.1, false);
579            // Pick the maximum dimension as scale
580            let world_scale = LayoutToWorldScale::new(scale_width.max(scale_height));
581            let mut scale = world_scale * device_pixel_scale;
582            let max_scale = get_max_scale_for_border(border_data);
583            scale.0 = scale.0.min(max_scale.0);
584
585            // For each edge and corner, request the render task by content key
586            // from the render task cache. This ensures that the render task for
587            // this segment will be available for batching later in the frame.
588            let mut handles: SmallVec<[RenderTaskId; 8]> = SmallVec::new();
589
590            for segment in &border_data.border_segments {
591                // Update the cache key device size based on requested scale.
592                let cache_size = to_cache_size(segment.local_task_size, &mut scale);
593                let cache_key = RenderTaskCacheKey {
594                    kind: RenderTaskCacheKeyKind::BorderSegment(segment.cache_key.clone()),
595                    size: cache_size,
596                };
597
598                handles.push(frame_state.resource_cache.request_render_task(
599                    Some(cache_key),
600                    false,          // TODO(gw): We don't calculate opacity for borders yet!
601                    RenderTaskParent::Surface,
602                    frame_state.gpu_cache,
603                    &mut frame_state.frame_gpu_data.f32,
604                    frame_state.rg_builder,
605                    &mut frame_state.surface_builder,
606                    &mut |rg_builder, _, _| {
607                        rg_builder.add().init(RenderTask::new_dynamic(
608                            cache_size,
609                            RenderTaskKind::new_border_segment(
610                                build_border_instances(
611                                    &segment.cache_key,
612                                    cache_size,
613                                    &border_data.border,
614                                    scale,
615                                )
616                            ),
617                        ))
618                    }
619                ));
620            }
621
622            *render_task_ids = scratch
623                .border_cache_handles
624                .extend(handles);
625        }
626        PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
627            profile_scope!("ImageBorder");
628            let prim_data = &mut data_stores.image_border[*data_handle];
629
630            // TODO: get access to the ninepatch and to check whether we need support
631            // for repetitions in the shader.
632
633            // Update the template this instance references, which may refresh the GPU
634            // cache with any shared template data.
635            prim_data.kind.update(
636                &mut prim_data.common,
637                frame_state
638            );
639        }
640        PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, color_binding_index, use_legacy_path, .. } => {
641            profile_scope!("Rectangle");
642
643            if *use_legacy_path {
644                let prim_data = &mut data_stores.prim[*data_handle];
645                prim_data.common.may_need_repetition = false;
646
647                // TODO(gw): Legacy rect rendering path - remove once we support masks on quad prims
648                if *color_binding_index != ColorBindingIndex::INVALID {
649                    match store.color_bindings[*color_binding_index] {
650                        PropertyBinding::Binding(..) => {
651                            // We explicitly invalidate the gpu cache
652                            // if the color is animating.
653                            let gpu_cache_handle =
654                                if *segment_instance_index == SegmentInstanceIndex::INVALID {
655                                    None
656                                } else if *segment_instance_index == SegmentInstanceIndex::UNUSED {
657                                    Some(&prim_data.common.gpu_cache_handle)
658                                } else {
659                                    Some(&scratch.segment_instances[*segment_instance_index].gpu_cache_handle)
660                                };
661                            if let Some(gpu_cache_handle) = gpu_cache_handle {
662                                frame_state.gpu_cache.invalidate(gpu_cache_handle);
663                            }
664                        }
665                        PropertyBinding::Value(..) => {},
666                    }
667                }
668
669                // Update the template this instane references, which may refresh the GPU
670                // cache with any shared template data.
671                prim_data.update(
672                    frame_state,
673                    frame_context.scene_properties,
674                );
675
676                write_segment(
677                    *segment_instance_index,
678                    frame_state,
679                    &mut scratch.segments,
680                    &mut scratch.segment_instances,
681                    |request| {
682                        prim_data.kind.write_prim_gpu_blocks(
683                            request,
684                            frame_context.scene_properties,
685                        );
686                    }
687                );
688            } else {
689                let prim_data = &data_stores.prim[*data_handle];
690
691                quad::prepare_quad(
692                    prim_data,
693                    &prim_data.common.prim_rect,
694                    prim_instance_index,
695                    prim_spatial_node_index,
696                    &prim_instance.vis.clip_chain,
697                    device_pixel_scale,
698                    frame_context,
699                    pic_context,
700                    targets,
701                    &data_stores.clip,
702                    frame_state,
703                    pic_state,
704                    scratch,
705                );
706
707                return;
708            }
709        }
710        PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, compositor_surface_kind, .. } => {
711            profile_scope!("YuvImage");
712            let prim_data = &mut data_stores.yuv_image[*data_handle];
713            let common_data = &mut prim_data.common;
714            let yuv_image_data = &mut prim_data.kind;
715
716            common_data.may_need_repetition = false;
717
718            // Update the template this instane references, which may refresh the GPU
719            // cache with any shared template data.
720            yuv_image_data.update(
721                common_data,
722                compositor_surface_kind.is_composited(),
723                frame_state,
724            );
725
726            write_segment(
727                *segment_instance_index,
728                frame_state,
729                &mut scratch.segments,
730                &mut scratch.segment_instances,
731                |request| {
732                    yuv_image_data.write_prim_gpu_blocks(request);
733                }
734            );
735        }
736        PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => {
737            profile_scope!("Image");
738
739            let prim_data = &mut data_stores.image[*data_handle];
740            let common_data = &mut prim_data.common;
741            let image_data = &mut prim_data.kind;
742            let image_instance = &mut store.images[*image_instance_index];
743
744            // Update the template this instance references, which may refresh the GPU
745            // cache with any shared template data.
746            image_data.update(
747                common_data,
748                image_instance,
749                prim_spatial_node_index,
750                frame_state,
751                frame_context,
752                &mut prim_instance.vis,
753            );
754
755            write_segment(
756                image_instance.segment_instance_index,
757                frame_state,
758                &mut scratch.segments,
759                &mut scratch.segment_instances,
760                |request| {
761                    image_data.write_prim_gpu_blocks(&image_instance.adjustment, request);
762                },
763            );
764        }
765        PrimitiveInstanceKind::LinearGradient { data_handle, ref mut visible_tiles_range, use_legacy_path: cached, .. } => {
766            profile_scope!("LinearGradient");
767            let prim_data = &mut data_stores.linear_grad[*data_handle];
768
769            if !*cached {
770                quad::prepare_quad(
771                    prim_data,
772                    &prim_data.common.prim_rect,
773                    prim_instance_index,
774                    prim_spatial_node_index,
775                    &prim_instance.vis.clip_chain,
776                    device_pixel_scale,
777                    frame_context,
778                    pic_context,
779                    targets,
780                    &data_stores.clip,
781                    frame_state,
782                    pic_state,
783                    scratch,
784                );
785
786                return;
787            }
788
789            // Update the template this instane references, which may refresh the GPU
790            // cache with any shared template data.
791            prim_data.update(frame_state);
792
793            if prim_data.stretch_size.width >= prim_data.common.prim_rect.width() &&
794                prim_data.stretch_size.height >= prim_data.common.prim_rect.height() {
795
796                prim_data.common.may_need_repetition = false;
797            }
798
799            if prim_data.tile_spacing != LayoutSize::zero() {
800                // We are performing the decomposition on the CPU here, no need to
801                // have it in the shader.
802                prim_data.common.may_need_repetition = false;
803
804                *visible_tiles_range = decompose_repeated_gradient(
805                    &prim_instance.vis,
806                    &prim_data.common.prim_rect,
807                    prim_spatial_node_index,
808                    &prim_data.stretch_size,
809                    &prim_data.tile_spacing,
810                    frame_state,
811                    &mut scratch.gradient_tiles,
812                    &frame_context.spatial_tree,
813                    Some(&mut |_, mut request| {
814                        request.push([
815                            prim_data.start_point.x,
816                            prim_data.start_point.y,
817                            prim_data.end_point.x,
818                            prim_data.end_point.y,
819                        ]);
820                        request.push([
821                            pack_as_float(prim_data.extend_mode as u32),
822                            prim_data.stretch_size.width,
823                            prim_data.stretch_size.height,
824                            0.0,
825                        ]);
826                    }),
827                );
828
829                if visible_tiles_range.is_empty() {
830                    prim_instance.clear_visibility();
831                }
832            }
833
834            let stops_address = GradientGpuBlockBuilder::build(
835                prim_data.reverse_stops,
836                &mut frame_state.frame_gpu_data.f32,
837                &prim_data.stops,
838            );
839
840            // TODO(gw): Consider whether it's worth doing segment building
841            //           for gradient primitives.
842            frame_state.push_prim(
843                &PrimitiveCommand::instance(prim_instance_index, stops_address),
844                prim_spatial_node_index,
845                targets,
846            );
847            return;
848        }
849        PrimitiveInstanceKind::CachedLinearGradient { data_handle, ref mut visible_tiles_range, .. } => {
850            profile_scope!("CachedLinearGradient");
851            let prim_data = &mut data_stores.linear_grad[*data_handle];
852            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
853                || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
854
855            // Update the template this instance references, which may refresh the GPU
856            // cache with any shared template data.
857            prim_data.update(frame_state);
858
859            if prim_data.tile_spacing != LayoutSize::zero() {
860                prim_data.common.may_need_repetition = false;
861
862                *visible_tiles_range = decompose_repeated_gradient(
863                    &prim_instance.vis,
864                    &prim_data.common.prim_rect,
865                    prim_spatial_node_index,
866                    &prim_data.stretch_size,
867                    &prim_data.tile_spacing,
868                    frame_state,
869                    &mut scratch.gradient_tiles,
870                    &frame_context.spatial_tree,
871                    None,
872                );
873
874                if visible_tiles_range.is_empty() {
875                    prim_instance.clear_visibility();
876                }
877            }
878        }
879        PrimitiveInstanceKind::RadialGradient { data_handle, ref mut visible_tiles_range, use_legacy_path: cached, .. } => {
880            profile_scope!("RadialGradient");
881            let prim_data = &mut data_stores.radial_grad[*data_handle];
882
883            if !*cached {
884                quad::prepare_quad(
885                    prim_data,
886                    &prim_data.common.prim_rect,
887                    prim_instance_index,
888                    prim_spatial_node_index,
889                    &prim_instance.vis.clip_chain,
890                    device_pixel_scale,
891                    frame_context,
892                    pic_context,
893                    targets,
894                    &data_stores.clip,
895                    frame_state,
896                    pic_state,
897                    scratch,
898                );
899
900                return;
901            }
902
903            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
904            || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
905
906            // Update the template this instane references, which may refresh the GPU
907            // cache with any shared template data.
908            prim_data.update(frame_state);
909
910            if prim_data.tile_spacing != LayoutSize::zero() {
911                prim_data.common.may_need_repetition = false;
912
913                *visible_tiles_range = decompose_repeated_gradient(
914                    &prim_instance.vis,
915                    &prim_data.common.prim_rect,
916                    prim_spatial_node_index,
917                    &prim_data.stretch_size,
918                    &prim_data.tile_spacing,
919                    frame_state,
920                    &mut scratch.gradient_tiles,
921                    &frame_context.spatial_tree,
922                    None,
923                );
924
925                if visible_tiles_range.is_empty() {
926                    prim_instance.clear_visibility();
927                }
928            }
929        }
930        PrimitiveInstanceKind::ConicGradient { data_handle, ref mut visible_tiles_range, use_legacy_path: cached, .. } => {
931            profile_scope!("ConicGradient");
932            let prim_data = &mut data_stores.conic_grad[*data_handle];
933
934            if !*cached {
935                quad::prepare_quad(
936                    prim_data,
937                    &prim_data.common.prim_rect,
938                    prim_instance_index,
939                    prim_spatial_node_index,
940                    &prim_instance.vis.clip_chain,
941                    device_pixel_scale,
942                    frame_context,
943                    pic_context,
944                    targets,
945                    &data_stores.clip,
946                    frame_state,
947                    pic_state,
948                    scratch,
949                );
950
951                return;
952            }
953
954            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
955                || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
956
957            // Update the template this instane references, which may refresh the GPU
958            // cache with any shared template data.
959            prim_data.update(frame_state);
960
961            if prim_data.tile_spacing != LayoutSize::zero() {
962                prim_data.common.may_need_repetition = false;
963
964                *visible_tiles_range = decompose_repeated_gradient(
965                    &prim_instance.vis,
966                    &prim_data.common.prim_rect,
967                    prim_spatial_node_index,
968                    &prim_data.stretch_size,
969                    &prim_data.tile_spacing,
970                    frame_state,
971                    &mut scratch.gradient_tiles,
972                    &frame_context.spatial_tree,
973                    None,
974                );
975
976                if visible_tiles_range.is_empty() {
977                    prim_instance.clear_visibility();
978                }
979            }
980
981            // TODO(gw): Consider whether it's worth doing segment building
982            //           for gradient primitives.
983        }
984        PrimitiveInstanceKind::Picture { pic_index, .. } => {
985            profile_scope!("Picture");
986            let pic = &mut store.pictures[pic_index.0];
987
988            if prim_instance.vis.clip_chain.needs_mask {
989                // TODO(gw): Much of the code in this branch could be moved in to a common
990                //           function as we move more primitives to the new clip-mask paths.
991
992                // We are going to split the clip mask tasks in to a list to be rendered
993                // on the source picture, and those to be rendered in to a mask for
994                // compositing the picture in to the target.
995                let mut source_masks = Vec::new();
996                let mut target_masks = Vec::new();
997
998                // For some composite modes, we force target mask due to limitations. That
999                // might results in artifacts for these modes (which are already an existing
1000                // problem) but we can handle these cases as follow ups.
1001                let force_target_mask = match pic.composite_mode {
1002                    // We can't currently render over top of these filters as their size
1003                    // may have changed due to downscaling. We could handle this separate
1004                    // case as a follow up.
1005                    Some(PictureCompositeMode::Filter(Filter::Blur { .. })) |
1006                    Some(PictureCompositeMode::Filter(Filter::DropShadows { .. })) |
1007                    Some(PictureCompositeMode::SVGFEGraph( .. )) => {
1008                        true
1009                    }
1010                    _ => {
1011                        false
1012                    }
1013                };
1014
1015                // Work out which clips get drawn in to the source / target mask
1016                for i in 0 .. prim_instance.vis.clip_chain.clips_range.count {
1017                    let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, i);
1018
1019                    if !force_target_mask && clip_instance.flags.contains(ClipNodeFlags::SAME_COORD_SYSTEM) {
1020                        source_masks.push(i);
1021                    } else {
1022                        target_masks.push(i);
1023                    }
1024                }
1025
1026                let pic_surface_index = pic.raster_config.as_ref().unwrap().surface_index;
1027                let prim_local_rect = frame_state
1028                    .surfaces[pic_surface_index.0]
1029                    .clipped_local_rect
1030                    .cast_unit();
1031
1032                let pattern = Pattern::color(ColorF::WHITE);
1033
1034                let prim_address_f = quad::write_prim_blocks(
1035                    &mut frame_state.frame_gpu_data.f32,
1036                    prim_local_rect,
1037                    prim_instance.vis.clip_chain.local_clip_rect,
1038                    pattern.base_color,
1039                    pattern.texture_input.task_id,
1040                    &[],
1041                    ScaleOffset::identity(),
1042                );
1043
1044                // Handle masks on the source. This is the common case, and occurs for:
1045                // (a) Any masks in the same coord space as the surface
1046                // (b) All masks if the surface and parent are axis-aligned
1047                if !source_masks.is_empty() {
1048                    let first_clip_node_index = frame_state.clip_store.clip_node_instances.len() as u32;
1049                    let parent_task_id = pic.primary_render_task_id.expect("bug: no composite mode");
1050
1051                    // Construct a new clip node range, also add image-mask dependencies as needed
1052                    for instance in source_masks {
1053                        let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, instance);
1054
1055                        for tile in frame_state.clip_store.visible_mask_tiles(clip_instance) {
1056                            frame_state.rg_builder.add_dependency(
1057                                parent_task_id,
1058                                tile.task_id,
1059                            );
1060                        }
1061
1062                        frame_state.clip_store.clip_node_instances.push(clip_instance.clone());
1063                    }
1064
1065                    let clip_node_range = ClipNodeRange {
1066                        first: first_clip_node_index,
1067                        count: frame_state.clip_store.clip_node_instances.len() as u32 - first_clip_node_index,
1068                    };
1069
1070                    let masks = MaskSubPass {
1071                        clip_node_range,
1072                        prim_spatial_node_index,
1073                        prim_address_f,
1074                    };
1075
1076                    // Add the mask as a sub-pass of the picture
1077                    let pic_task_id = pic.primary_render_task_id.expect("uh oh");
1078                    let pic_task = frame_state.rg_builder.get_task_mut(pic_task_id);
1079                    pic_task.add_sub_pass(SubPass::Masks {
1080                        masks,
1081                    });
1082                }
1083
1084                // Handle masks on the target. This is the rare case, and occurs for:
1085                // Masks in parent space when non-axis-aligned to source space
1086                if !target_masks.is_empty() {
1087                    let surface = &frame_state.surfaces[pic_context.surface_index.0];
1088                    let coverage_rect = prim_instance.vis.clip_chain.pic_coverage_rect;
1089
1090                    let device_pixel_scale = surface.device_pixel_scale;
1091                    let raster_spatial_node_index = surface.raster_spatial_node_index;
1092
1093                    let Some(clipped_surface_rect) = surface.get_surface_rect(
1094                        &coverage_rect,
1095                        frame_context.spatial_tree,
1096                    ) else {
1097                        return;
1098                    };
1099
1100                    // Draw a normal screens-space mask to an alpha target that
1101                    // can be sampled when compositing this picture.
1102                    let empty_task = EmptyTask {
1103                        content_origin: clipped_surface_rect.min.to_f32(),
1104                        device_pixel_scale,
1105                        raster_spatial_node_index,
1106                    };
1107
1108                    let task_size = clipped_surface_rect.size();
1109
1110                    let clip_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
1111                        task_size,
1112                        RenderTaskKind::Empty(empty_task),
1113                    ));
1114
1115                    // Construct a new clip node range, also add image-mask dependencies as needed
1116                    let first_clip_node_index = frame_state.clip_store.clip_node_instances.len() as u32;
1117                    for instance in target_masks {
1118                        let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, instance);
1119
1120                        for tile in frame_state.clip_store.visible_mask_tiles(clip_instance) {
1121                            frame_state.rg_builder.add_dependency(
1122                                clip_task_id,
1123                                tile.task_id,
1124                            );
1125                        }
1126
1127                        frame_state.clip_store.clip_node_instances.push(clip_instance.clone());
1128                    }
1129
1130                    let clip_node_range = ClipNodeRange {
1131                        first: first_clip_node_index,
1132                        count: frame_state.clip_store.clip_node_instances.len() as u32 - first_clip_node_index,
1133                    };
1134
1135                    let masks = MaskSubPass {
1136                        clip_node_range,
1137                        prim_spatial_node_index,
1138                        prim_address_f,
1139                    };
1140
1141                    let clip_task = frame_state.rg_builder.get_task_mut(clip_task_id);
1142                    clip_task.add_sub_pass(SubPass::Masks {
1143                        masks,
1144                    });
1145
1146                    let clip_task_index = ClipTaskIndex(scratch.clip_mask_instances.len() as _);
1147                    scratch.clip_mask_instances.push(ClipMaskKind::Mask(clip_task_id));
1148                    prim_instance.vis.clip_task_index = clip_task_index;
1149                    frame_state.surface_builder.add_child_render_task(
1150                        clip_task_id,
1151                        frame_state.rg_builder,
1152                    );
1153                }
1154            }
1155
1156            if pic.prepare_for_render(
1157                frame_state,
1158                data_stores,
1159            ) {
1160                if let Picture3DContext::In { root_data: None, plane_splitter_index, .. } = pic.context_3d {
1161                    let dirty_rect = frame_state.current_dirty_region().combined;
1162                    let visibility_node = frame_state.current_dirty_region().visibility_spatial_node;
1163                    let splitter = &mut frame_state.plane_splitters[plane_splitter_index.0];
1164                    let surface_index = pic.raster_config.as_ref().unwrap().surface_index;
1165                    let surface = &frame_state.surfaces[surface_index.0];
1166                    let local_prim_rect = surface.clipped_local_rect.cast_unit();
1167
1168                    PicturePrimitive::add_split_plane(
1169                        splitter,
1170                        frame_context.spatial_tree,
1171                        prim_spatial_node_index,
1172                        visibility_node,
1173                        local_prim_rect,
1174                        &prim_instance.vis.clip_chain.local_clip_rect,
1175                        dirty_rect,
1176                        plane_split_anchor,
1177                    );
1178                }
1179            } else {
1180                prim_instance.clear_visibility();
1181            }
1182        }
1183        PrimitiveInstanceKind::BackdropCapture { .. } => {
1184            // Register the owner picture of this backdrop primitive as the
1185            // target for resolve of the sub-graph
1186            frame_state.surface_builder.register_resolve_source();
1187
1188            if frame_context.debug_flags.contains(DebugFlags::HIGHLIGHT_BACKDROP_FILTERS) {
1189                if let Some(world_rect) = pic_state.map_pic_to_vis.map(&prim_instance.vis.clip_chain.pic_coverage_rect) {
1190                    scratch.push_debug_rect(
1191                        world_rect.cast_unit(),
1192                        2,
1193                        crate::debug_colors::MAGENTA,
1194                        ColorF::TRANSPARENT,
1195                    );
1196                }
1197            }
1198        }
1199        PrimitiveInstanceKind::BackdropRender { pic_index, .. } => {
1200            match frame_state.surface_builder.sub_graph_output_map.get(pic_index).cloned() {
1201                Some(sub_graph_output_id) => {
1202                    frame_state.surface_builder.add_child_render_task(
1203                        sub_graph_output_id,
1204                        frame_state.rg_builder,
1205                    );
1206                }
1207                None => {
1208                    // Backdrop capture was found not visible, didn't produce a sub-graph
1209                    // so we can just skip drawing
1210                    prim_instance.clear_visibility();
1211                }
1212            }
1213        }
1214    }
1215
1216    match prim_instance.vis.state {
1217        VisibilityState::Unset => {
1218            panic!("bug: invalid vis state");
1219        }
1220        VisibilityState::Visible { .. } => {
1221            frame_state.push_prim(
1222                &PrimitiveCommand::simple(prim_instance_index),
1223                prim_spatial_node_index,
1224                targets,
1225            );
1226        }
1227        VisibilityState::PassThrough | VisibilityState::Culled => {}
1228    }
1229}
1230
1231
1232fn write_segment<F>(
1233    segment_instance_index: SegmentInstanceIndex,
1234    frame_state: &mut FrameBuildingState,
1235    segments: &mut SegmentStorage,
1236    segment_instances: &mut SegmentInstanceStorage,
1237    f: F,
1238) where F: Fn(&mut GpuDataRequest) {
1239    debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID);
1240    if segment_instance_index != SegmentInstanceIndex::UNUSED {
1241        let segment_instance = &mut segment_instances[segment_instance_index];
1242
1243        if let Some(mut request) = frame_state.gpu_cache.request(&mut segment_instance.gpu_cache_handle) {
1244            let segments = &segments[segment_instance.segments_range];
1245
1246            f(&mut request);
1247
1248            for segment in segments {
1249                request.write_segment(
1250                    segment.local_rect,
1251                    [0.0; 4],
1252                );
1253            }
1254        }
1255    }
1256}
1257
1258fn decompose_repeated_gradient(
1259    prim_vis: &PrimitiveVisibility,
1260    prim_local_rect: &LayoutRect,
1261    prim_spatial_node_index: SpatialNodeIndex,
1262    stretch_size: &LayoutSize,
1263    tile_spacing: &LayoutSize,
1264    frame_state: &mut FrameBuildingState,
1265    gradient_tiles: &mut GradientTileStorage,
1266    spatial_tree: &SpatialTree,
1267    mut callback: Option<&mut dyn FnMut(&LayoutRect, GpuDataRequest)>,
1268) -> GradientTileRange {
1269    let tile_range = gradient_tiles.open_range();
1270
1271    // Tighten the clip rect because decomposing the repeated image can
1272    // produce primitives that are partially covering the original image
1273    // rect and we want to clip these extra parts out.
1274    if let Some(tight_clip_rect) = prim_vis
1275        .clip_chain
1276        .local_clip_rect
1277        .intersection(prim_local_rect) {
1278
1279        let visible_rect = compute_conservative_visible_rect(
1280            &prim_vis.clip_chain,
1281            frame_state.current_dirty_region().combined,
1282            frame_state.current_dirty_region().visibility_spatial_node,
1283            prim_spatial_node_index,
1284            spatial_tree,
1285        );
1286        let stride = *stretch_size + *tile_spacing;
1287
1288        let repetitions = image_tiling::repetitions(prim_local_rect, &visible_rect, stride);
1289        gradient_tiles.reserve(repetitions.num_repetitions());
1290        for Repetition { origin, .. } in repetitions {
1291            let mut handle = GpuCacheHandle::new();
1292            let rect = LayoutRect::from_origin_and_size(
1293                origin,
1294                *stretch_size,
1295            );
1296
1297            if let Some(callback) = &mut callback {
1298                if let Some(request) = frame_state.gpu_cache.request(&mut handle) {
1299                    callback(&rect, request);
1300                }
1301            }
1302
1303            gradient_tiles.push(VisibleGradientTile {
1304                local_rect: rect,
1305                local_clip_rect: tight_clip_rect,
1306                handle
1307            });
1308        }
1309    }
1310
1311    // At this point if we don't have tiles to show it means we could probably
1312    // have done a better a job at culling during an earlier stage.
1313    gradient_tiles.close_range(tile_range)
1314}
1315
1316
1317fn update_clip_task_for_brush(
1318    instance: &PrimitiveInstance,
1319    prim_origin: &LayoutPoint,
1320    prim_spatial_node_index: SpatialNodeIndex,
1321    root_spatial_node_index: SpatialNodeIndex,
1322    visibility_spatial_node_index: SpatialNodeIndex,
1323    pic_context: &PictureContext,
1324    pic_state: &mut PictureState,
1325    frame_context: &FrameBuildingContext,
1326    frame_state: &mut FrameBuildingState,
1327    prim_store: &PrimitiveStore,
1328    data_stores: &mut DataStores,
1329    segments_store: &mut SegmentStorage,
1330    segment_instances_store: &mut SegmentInstanceStorage,
1331    clip_mask_instances: &mut Vec<ClipMaskKind>,
1332    device_pixel_scale: DevicePixelScale,
1333) -> Option<ClipTaskIndex> {
1334    let segments = match instance.kind {
1335        PrimitiveInstanceKind::BoxShadow { .. } => {
1336            unreachable!("BUG: box-shadows should not hit legacy brush clip path");
1337        }
1338        PrimitiveInstanceKind::Picture { .. } |
1339        PrimitiveInstanceKind::TextRun { .. } |
1340        PrimitiveInstanceKind::Clear { .. } |
1341        PrimitiveInstanceKind::LineDecoration { .. } |
1342        PrimitiveInstanceKind::BackdropCapture { .. } |
1343        PrimitiveInstanceKind::BackdropRender { .. } => {
1344            return None;
1345        }
1346        PrimitiveInstanceKind::Image { image_instance_index, .. } => {
1347            let segment_instance_index = prim_store
1348                .images[image_instance_index]
1349                .segment_instance_index;
1350
1351            if segment_instance_index == SegmentInstanceIndex::UNUSED {
1352                return None;
1353            }
1354
1355            let segment_instance = &segment_instances_store[segment_instance_index];
1356
1357            &segments_store[segment_instance.segments_range]
1358        }
1359        PrimitiveInstanceKind::YuvImage { segment_instance_index, .. } => {
1360            debug_assert!(segment_instance_index != SegmentInstanceIndex::INVALID);
1361
1362            if segment_instance_index == SegmentInstanceIndex::UNUSED {
1363                return None;
1364            }
1365
1366            let segment_instance = &segment_instances_store[segment_instance_index];
1367
1368            &segments_store[segment_instance.segments_range]
1369        }
1370        PrimitiveInstanceKind::Rectangle { use_legacy_path, segment_instance_index, .. } => {
1371            assert!(use_legacy_path);
1372            debug_assert!(segment_instance_index != SegmentInstanceIndex::INVALID);
1373
1374            if segment_instance_index == SegmentInstanceIndex::UNUSED {
1375                return None;
1376            }
1377
1378            let segment_instance = &segment_instances_store[segment_instance_index];
1379
1380            &segments_store[segment_instance.segments_range]
1381        }
1382        PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
1383            let border_data = &data_stores.image_border[data_handle].kind;
1384
1385            // TODO: This is quite messy - once we remove legacy primitives we
1386            //       can change this to be a tuple match on (instance, template)
1387            border_data.brush_segments.as_slice()
1388        }
1389        PrimitiveInstanceKind::NormalBorder { data_handle, .. } => {
1390            let border_data = &data_stores.normal_border[data_handle].kind;
1391
1392            // TODO: This is quite messy - once we remove legacy primitives we
1393            //       can change this to be a tuple match on (instance, template)
1394            border_data.brush_segments.as_slice()
1395        }
1396        PrimitiveInstanceKind::LinearGradient { data_handle, .. }
1397        | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => {
1398            let prim_data = &data_stores.linear_grad[data_handle];
1399
1400            // TODO: This is quite messy - once we remove legacy primitives we
1401            //       can change this to be a tuple match on (instance, template)
1402            if prim_data.brush_segments.is_empty() {
1403                return None;
1404            }
1405
1406            prim_data.brush_segments.as_slice()
1407        }
1408        PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
1409            let prim_data = &data_stores.radial_grad[data_handle];
1410
1411            // TODO: This is quite messy - once we remove legacy primitives we
1412            //       can change this to be a tuple match on (instance, template)
1413            if prim_data.brush_segments.is_empty() {
1414                return None;
1415            }
1416
1417            prim_data.brush_segments.as_slice()
1418        }
1419        PrimitiveInstanceKind::ConicGradient { data_handle, .. } => {
1420            let prim_data = &data_stores.conic_grad[data_handle];
1421
1422            // TODO: This is quite messy - once we remove legacy primitives we
1423            //       can change this to be a tuple match on (instance, template)
1424            if prim_data.brush_segments.is_empty() {
1425                return None;
1426            }
1427
1428            prim_data.brush_segments.as_slice()
1429        }
1430    };
1431
1432    // If there are no segments, early out to avoid setting a valid
1433    // clip task instance location below.
1434    if segments.is_empty() {
1435        return None;
1436    }
1437
1438    // Set where in the clip mask instances array the clip mask info
1439    // can be found for this primitive. Each segment will push the
1440    // clip mask information for itself in update_clip_task below.
1441    let clip_task_index = ClipTaskIndex(clip_mask_instances.len() as _);
1442
1443    // If we only built 1 segment, there is no point in re-running
1444    // the clip chain builder. Instead, just use the clip chain
1445    // instance that was built for the main primitive. This is a
1446    // significant optimization for the common case.
1447    if segments.len() == 1 {
1448        let clip_mask_kind = update_brush_segment_clip_task(
1449            &segments[0],
1450            Some(&instance.vis.clip_chain),
1451            root_spatial_node_index,
1452            pic_context.surface_index,
1453            frame_context,
1454            frame_state,
1455            &mut data_stores.clip,
1456            device_pixel_scale,
1457        );
1458        clip_mask_instances.push(clip_mask_kind);
1459    } else {
1460        let dirty_rect = frame_state.current_dirty_region().combined;
1461
1462        for segment in segments {
1463            // Build a clip chain for the smaller segment rect. This will
1464            // often manage to eliminate most/all clips, and sometimes
1465            // clip the segment completely.
1466            frame_state.clip_store.set_active_clips_from_clip_chain(
1467                &instance.vis.clip_chain,
1468                prim_spatial_node_index,
1469                visibility_spatial_node_index,
1470                &frame_context.spatial_tree,
1471                &data_stores.clip,
1472            );
1473
1474            let segment_clip_chain = frame_state
1475                .clip_store
1476                .build_clip_chain_instance(
1477                    segment.local_rect.translate(prim_origin.to_vector()),
1478                    &pic_state.map_local_to_pic,
1479                    &pic_state.map_pic_to_vis,
1480                    &frame_context.spatial_tree,
1481                    frame_state.gpu_cache,
1482                    frame_state.resource_cache,
1483                    device_pixel_scale,
1484                    &dirty_rect,
1485                    &mut data_stores.clip,
1486                    frame_state.rg_builder,
1487                    false,
1488                );
1489
1490            let clip_mask_kind = update_brush_segment_clip_task(
1491                &segment,
1492                segment_clip_chain.as_ref(),
1493                root_spatial_node_index,
1494                pic_context.surface_index,
1495                frame_context,
1496                frame_state,
1497                &mut data_stores.clip,
1498                device_pixel_scale,
1499            );
1500            clip_mask_instances.push(clip_mask_kind);
1501        }
1502    }
1503
1504    Some(clip_task_index)
1505}
1506
1507pub fn update_clip_task(
1508    instance: &mut PrimitiveInstance,
1509    prim_origin: &LayoutPoint,
1510    prim_spatial_node_index: SpatialNodeIndex,
1511    root_spatial_node_index: SpatialNodeIndex,
1512    visibility_spatial_node_index: SpatialNodeIndex,
1513    pic_context: &PictureContext,
1514    pic_state: &mut PictureState,
1515    frame_context: &FrameBuildingContext,
1516    frame_state: &mut FrameBuildingState,
1517    prim_store: &mut PrimitiveStore,
1518    data_stores: &mut DataStores,
1519    scratch: &mut PrimitiveScratchBuffer,
1520) -> bool {
1521    let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
1522
1523    build_segments_if_needed(
1524        instance,
1525        frame_state,
1526        prim_store,
1527        data_stores,
1528        &mut scratch.segments,
1529        &mut scratch.segment_instances,
1530    );
1531
1532    // First try to  render this primitive's mask using optimized brush rendering.
1533    instance.vis.clip_task_index = if let Some(clip_task_index) = update_clip_task_for_brush(
1534        instance,
1535        prim_origin,
1536        prim_spatial_node_index,
1537        root_spatial_node_index,
1538        visibility_spatial_node_index,
1539        pic_context,
1540        pic_state,
1541        frame_context,
1542        frame_state,
1543        prim_store,
1544        data_stores,
1545        &mut scratch.segments,
1546        &mut scratch.segment_instances,
1547        &mut scratch.clip_mask_instances,
1548        device_pixel_scale,
1549    ) {
1550        clip_task_index
1551    } else if instance.vis.clip_chain.needs_mask {
1552        // Get a minimal device space rect, clipped to the screen that we
1553        // need to allocate for the clip mask, as well as interpolated
1554        // snap offsets.
1555        let unadjusted_device_rect = match frame_state.surfaces[pic_context.surface_index.0].get_surface_rect(
1556            &instance.vis.clip_chain.pic_coverage_rect,
1557            frame_context.spatial_tree,
1558        ) {
1559            Some(rect) => rect,
1560            None => return false,
1561        };
1562
1563        let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(
1564            unadjusted_device_rect,
1565            device_pixel_scale,
1566        );
1567
1568        if device_rect.size().to_i32().is_empty() {
1569            log::warn!("Bad adjusted clip task size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
1570            return false;
1571        }
1572
1573        let clip_task_id = RenderTaskKind::new_mask(
1574            device_rect,
1575            instance.vis.clip_chain.clips_range,
1576            root_spatial_node_index,
1577            frame_state.clip_store,
1578            frame_state.gpu_cache,
1579            &mut frame_state.frame_gpu_data.f32,
1580            frame_state.resource_cache,
1581            frame_state.rg_builder,
1582            &mut data_stores.clip,
1583            device_pixel_scale,
1584            frame_context.fb_config,
1585            &mut frame_state.surface_builder,
1586        );
1587        // Set the global clip mask instance for this primitive.
1588        let clip_task_index = ClipTaskIndex(scratch.clip_mask_instances.len() as _);
1589        scratch.clip_mask_instances.push(ClipMaskKind::Mask(clip_task_id));
1590        instance.vis.clip_task_index = clip_task_index;
1591        frame_state.surface_builder.add_child_render_task(
1592            clip_task_id,
1593            frame_state.rg_builder,
1594        );
1595        clip_task_index
1596    } else {
1597        ClipTaskIndex::INVALID
1598    };
1599
1600    true
1601}
1602
1603/// Write out to the clip mask instances array the correct clip mask
1604/// config for this segment.
1605pub fn update_brush_segment_clip_task(
1606    segment: &BrushSegment,
1607    clip_chain: Option<&ClipChainInstance>,
1608    root_spatial_node_index: SpatialNodeIndex,
1609    surface_index: SurfaceIndex,
1610    frame_context: &FrameBuildingContext,
1611    frame_state: &mut FrameBuildingState,
1612    clip_data_store: &mut ClipDataStore,
1613    device_pixel_scale: DevicePixelScale,
1614) -> ClipMaskKind {
1615    let clip_chain = match clip_chain {
1616        Some(chain) => chain,
1617        None => return ClipMaskKind::Clipped,
1618    };
1619    if !clip_chain.needs_mask ||
1620       (!segment.may_need_clip_mask && !clip_chain.has_non_local_clips) {
1621        return ClipMaskKind::None;
1622    }
1623
1624    let unadjusted_device_rect = match frame_state.surfaces[surface_index.0].get_surface_rect(
1625        &clip_chain.pic_coverage_rect,
1626        frame_context.spatial_tree,
1627    ) {
1628        Some(rect) => rect,
1629        None => return ClipMaskKind::Clipped,
1630    };
1631
1632    let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(unadjusted_device_rect, device_pixel_scale);
1633
1634    if device_rect.size().to_i32().is_empty() {
1635        log::warn!("Bad adjusted mask size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
1636        return ClipMaskKind::Clipped;
1637    }
1638
1639    let clip_task_id = RenderTaskKind::new_mask(
1640        device_rect,
1641        clip_chain.clips_range,
1642        root_spatial_node_index,
1643        frame_state.clip_store,
1644        frame_state.gpu_cache,
1645        &mut frame_state.frame_gpu_data.f32,
1646        frame_state.resource_cache,
1647        frame_state.rg_builder,
1648        clip_data_store,
1649        device_pixel_scale,
1650        frame_context.fb_config,
1651        &mut frame_state.surface_builder,
1652    );
1653
1654    frame_state.surface_builder.add_child_render_task(
1655        clip_task_id,
1656        frame_state.rg_builder,
1657    );
1658    ClipMaskKind::Mask(clip_task_id)
1659}
1660
1661
1662fn write_brush_segment_description(
1663    prim_local_rect: LayoutRect,
1664    prim_local_clip_rect: LayoutRect,
1665    clip_chain: &ClipChainInstance,
1666    segment_builder: &mut SegmentBuilder,
1667    clip_store: &ClipStore,
1668    data_stores: &DataStores,
1669) -> bool {
1670    // If the brush is small, we want to skip building segments
1671    // and just draw it as a single primitive with clip mask.
1672    if prim_local_rect.area() < MIN_BRUSH_SPLIT_AREA {
1673        return false;
1674    }
1675
1676    // NOTE: The local clip rect passed to the segment builder must be the unmodified
1677    //       local clip rect from the clip leaf, not the local_clip_rect from the
1678    //       clip-chain instance. The clip-chain instance may have been reduced by
1679    //       clips that are in the same coordinate system, but not the same spatial
1680    //       node as the primitive. This can result in the clip for the segment building
1681    //       being affected by scrolling clips, which we can't handle (since the segments
1682    //       are not invalidated during frame building after being built).
1683    segment_builder.initialize(
1684        prim_local_rect,
1685        None,
1686        prim_local_clip_rect,
1687    );
1688
1689    // Segment the primitive on all the local-space clip sources that we can.
1690    for i in 0 .. clip_chain.clips_range.count {
1691        let clip_instance = clip_store
1692            .get_instance_from_range(&clip_chain.clips_range, i);
1693        let clip_node = &data_stores.clip[clip_instance.handle];
1694
1695        // If this clip item is positioned by another positioning node, its relative position
1696        // could change during scrolling. This means that we would need to resegment. Instead
1697        // of doing that, only segment with clips that have the same positioning node.
1698        // TODO(mrobinson, #2858): It may make sense to include these nodes, resegmenting only
1699        // when necessary while scrolling.
1700        if !clip_instance.flags.contains(ClipNodeFlags::SAME_SPATIAL_NODE) {
1701            continue;
1702        }
1703
1704        let (local_clip_rect, radius, mode) = match clip_node.item.kind {
1705            ClipItemKind::RoundedRectangle { rect, radius, mode } => {
1706                (rect, Some(radius), mode)
1707            }
1708            ClipItemKind::Rectangle { rect, mode } => {
1709                (rect, None, mode)
1710            }
1711            ClipItemKind::BoxShadow { ref source } => {
1712                // For inset box shadows, we can clip out any
1713                // pixels that are inside the shadow region
1714                // and are beyond the inner rect, as they can't
1715                // be affected by the blur radius.
1716                let inner_clip_mode = match source.clip_mode {
1717                    BoxShadowClipMode::Outset => None,
1718                    BoxShadowClipMode::Inset => Some(ClipMode::ClipOut),
1719                };
1720
1721                // Push a region into the segment builder where the
1722                // box-shadow can have an effect on the result. This
1723                // ensures clip-mask tasks get allocated for these
1724                // pixel regions, even if no other clips affect them.
1725                segment_builder.push_mask_region(
1726                    source.prim_shadow_rect,
1727                    source.prim_shadow_rect.inflate(
1728                        -0.5 * source.original_alloc_size.width,
1729                        -0.5 * source.original_alloc_size.height,
1730                    ),
1731                    inner_clip_mode,
1732                );
1733
1734                continue;
1735            }
1736            ClipItemKind::Image { .. } => {
1737                panic!("bug: masks not supported on old segment path");
1738            }
1739        };
1740
1741        segment_builder.push_clip_rect(local_clip_rect, radius, mode);
1742    }
1743
1744    true
1745}
1746
1747fn build_segments_if_needed(
1748    instance: &mut PrimitiveInstance,
1749    frame_state: &mut FrameBuildingState,
1750    prim_store: &mut PrimitiveStore,
1751    data_stores: &DataStores,
1752    segments_store: &mut SegmentStorage,
1753    segment_instances_store: &mut SegmentInstanceStorage,
1754) {
1755    let prim_clip_chain = &instance.vis.clip_chain;
1756
1757    // Usually, the primitive rect can be found from information
1758    // in the instance and primitive template.
1759    let prim_local_rect = data_stores.get_local_prim_rect(
1760        instance,
1761        &prim_store.pictures,
1762        frame_state.surfaces,
1763    );
1764
1765    let segment_instance_index = match instance.kind {
1766        PrimitiveInstanceKind::Rectangle { use_legacy_path, ref mut segment_instance_index, .. } => {
1767            assert!(use_legacy_path);
1768            segment_instance_index
1769        }
1770        PrimitiveInstanceKind::YuvImage { ref mut segment_instance_index, compositor_surface_kind, .. } => {
1771            // Only use segments for YUV images if not drawing as a compositor surface
1772            if !compositor_surface_kind.supports_segments() {
1773                *segment_instance_index = SegmentInstanceIndex::UNUSED;
1774                return;
1775            }
1776
1777            segment_instance_index
1778        }
1779        PrimitiveInstanceKind::Image { data_handle, image_instance_index, compositor_surface_kind, .. } => {
1780            let image_data = &data_stores.image[data_handle].kind;
1781            let image_instance = &mut prim_store.images[image_instance_index];
1782
1783            //Note: tiled images don't support automatic segmentation,
1784            // they strictly produce one segment per visible tile instead.
1785            if !compositor_surface_kind.supports_segments() ||
1786                frame_state.resource_cache
1787                    .get_image_properties(image_data.key)
1788                    .and_then(|properties| properties.tiling)
1789                    .is_some()
1790            {
1791                image_instance.segment_instance_index = SegmentInstanceIndex::UNUSED;
1792                return;
1793            }
1794            &mut image_instance.segment_instance_index
1795        }
1796        PrimitiveInstanceKind::Picture { .. } |
1797        PrimitiveInstanceKind::TextRun { .. } |
1798        PrimitiveInstanceKind::NormalBorder { .. } |
1799        PrimitiveInstanceKind::ImageBorder { .. } |
1800        PrimitiveInstanceKind::Clear { .. } |
1801        PrimitiveInstanceKind::LinearGradient { .. } |
1802        PrimitiveInstanceKind::CachedLinearGradient { .. } |
1803        PrimitiveInstanceKind::RadialGradient { .. } |
1804        PrimitiveInstanceKind::ConicGradient { .. } |
1805        PrimitiveInstanceKind::LineDecoration { .. } |
1806        PrimitiveInstanceKind::BackdropCapture { .. } |
1807        PrimitiveInstanceKind::BackdropRender { .. } => {
1808            // These primitives don't support / need segments.
1809            return;
1810        }
1811        PrimitiveInstanceKind::BoxShadow { .. } => {
1812            unreachable!("BUG: box-shadows should not hit legacy brush clip path");
1813        }
1814    };
1815
1816    if *segment_instance_index == SegmentInstanceIndex::INVALID {
1817        let mut segments: SmallVec<[BrushSegment; 8]> = SmallVec::new();
1818        let clip_leaf = frame_state.clip_tree.get_leaf(instance.clip_leaf_id);
1819
1820        if write_brush_segment_description(
1821            prim_local_rect,
1822            clip_leaf.local_clip_rect,
1823            prim_clip_chain,
1824            &mut frame_state.segment_builder,
1825            frame_state.clip_store,
1826            data_stores,
1827        ) {
1828            frame_state.segment_builder.build(|segment| {
1829                segments.push(
1830                    BrushSegment::new(
1831                        segment.rect.translate(-prim_local_rect.min.to_vector()),
1832                        segment.has_mask,
1833                        segment.edge_flags,
1834                        [0.0; 4],
1835                        BrushFlags::PERSPECTIVE_INTERPOLATION,
1836                    ),
1837                );
1838            });
1839        }
1840
1841        // If only a single segment is produced, there is no benefit to writing
1842        // a segment instance array. Instead, just use the main primitive rect
1843        // written into the GPU cache.
1844        // TODO(gw): This is (sortof) a bandaid - due to a limitation in the current
1845        //           brush encoding, we can only support a total of up to 2^16 segments.
1846        //           This should be (more than) enough for any real world case, so for
1847        //           now we can handle this by skipping cases where we were generating
1848        //           segments where there is no benefit. The long term / robust fix
1849        //           for this is to move the segment building to be done as a more
1850        //           limited nine-patch system during scene building, removing arbitrary
1851        //           segmentation during frame-building (see bug #1617491).
1852        if segments.len() <= 1 {
1853            *segment_instance_index = SegmentInstanceIndex::UNUSED;
1854        } else {
1855            let segments_range = segments_store.extend(segments);
1856
1857            let instance = SegmentedInstance {
1858                segments_range,
1859                gpu_cache_handle: GpuCacheHandle::new(),
1860            };
1861
1862            *segment_instance_index = segment_instances_store.push(instance);
1863        };
1864    }
1865}
1866
1867// Ensures that the size of mask render tasks are within MAX_MASK_SIZE.
1868fn adjust_mask_scale_for_max_size(device_rect: DeviceIntRect, device_pixel_scale: DevicePixelScale) -> (DeviceIntRect, DevicePixelScale) {
1869    if device_rect.width() > MAX_MASK_SIZE || device_rect.height() > MAX_MASK_SIZE {
1870        // round_out will grow by 1 integer pixel if origin is on a
1871        // fractional position, so keep that margin for error with -1:
1872        let device_rect_f = device_rect.to_f32();
1873        let scale = (MAX_MASK_SIZE - 1) as f32 /
1874            f32::max(device_rect_f.width(), device_rect_f.height());
1875        let new_device_pixel_scale = device_pixel_scale * Scale::new(scale);
1876        let new_device_rect = (device_rect_f * Scale::new(scale))
1877            .round_out()
1878            .to_i32();
1879        (new_device_rect, new_device_pixel_scale)
1880    } else {
1881        (device_rect, device_pixel_scale)
1882    }
1883}
1884
1885impl CompositorSurfaceKind {
1886    /// Returns true if the compositor surface strategy supports segment rendering
1887    fn supports_segments(&self) -> bool {
1888        match self {
1889            CompositorSurfaceKind::Underlay | CompositorSurfaceKind::Overlay => false,
1890            CompositorSurfaceKind::Blit => true,
1891        }
1892    }
1893}