webrender/renderer/
mod.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5//! The high-level module responsible for interfacing with the GPU.
6//!
7//! Much of WebRender's design is driven by separating work into different
8//! threads. To avoid the complexities of multi-threaded GPU access, we restrict
9//! all communication with the GPU to one thread, the render thread. But since
10//! issuing GPU commands is often a bottleneck, we move everything else (i.e.
11//! the computation of what commands to issue) to another thread, the
12//! RenderBackend thread. The RenderBackend, in turn, may delegate work to other
13//! thread (like the SceneBuilder threads or Rayon workers), but the
14//! Render-vs-RenderBackend distinction is the most important.
15//!
16//! The consumer is responsible for initializing the render thread before
17//! calling into WebRender, which means that this module also serves as the
18//! initial entry point into WebRender, and is responsible for spawning the
19//! various other threads discussed above. That said, WebRender initialization
20//! returns both the `Renderer` instance as well as a channel for communicating
21//! directly with the `RenderBackend`. Aside from a few high-level operations
22//! like 'render now', most of interesting commands from the consumer go over
23//! that channel and operate on the `RenderBackend`.
24//!
25//! ## Space conversion guidelines
26//! At this stage, we shuld be operating with `DevicePixel` and `FramebufferPixel` only.
27//! "Framebuffer" space represents the final destination of our rendeing,
28//! and it happens to be Y-flipped on OpenGL. The conversion is done as follows:
29//!   - for rasterized primitives, the orthographics projection transforms
30//! the content rectangle to -1 to 1
31//!   - the viewport transformation is setup to map the whole range to
32//! the framebuffer rectangle provided by the document view, stored in `DrawTarget`
33//!   - all the direct framebuffer operations, like blitting, reading pixels, and setting
34//! up the scissor, are accepting already transformed coordinates, which we can get by
35//! calling `DrawTarget::to_framebuffer_rect`
36
37use api::{ClipMode, ColorF, ColorU, MixBlendMode};
38use api::{DocumentId, Epoch, ExternalImageHandler, RenderReasons};
39#[cfg(feature = "replay")]
40use api::ExternalImageId;
41use api::{ExternalImageSource, ExternalImageType, ImageFormat, PremultipliedColorF};
42use api::{PipelineId, ImageRendering, Checkpoint, NotificationRequest, ImageBufferKind};
43#[cfg(feature = "replay")]
44use api::ExternalImage;
45use api::FramePublishId;
46use api::precise_time_ns;
47use api::units::*;
48use api::channel::{Sender, Receiver};
49pub use api::DebugFlags;
50use core::time::Duration;
51
52use crate::pattern::PatternKind;
53use crate::render_api::{DebugCommand, ApiMsg, MemoryReport};
54use crate::batch::{AlphaBatchContainer, BatchKind, BatchFeatures, BatchTextures, BrushBatchKind, ClipBatchList};
55use crate::batch::ClipMaskInstanceList;
56#[cfg(any(feature = "capture", feature = "replay"))]
57use crate::capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
58use crate::composite::{CompositeState, CompositeTileSurface, CompositorInputLayer, CompositorSurfaceTransform, ResolvedExternalSurface};
59use crate::composite::{CompositorKind, Compositor, NativeTileId, CompositeFeatures, CompositeSurfaceFormat, ResolvedExternalSurfaceColorData};
60use crate::composite::{CompositorConfig, NativeSurfaceOperationDetails, NativeSurfaceId, NativeSurfaceOperation, ClipRadius};
61use crate::composite::TileKind;
62use crate::segment::SegmentBuilder;
63use crate::{debug_colors, CompositorInputConfig, CompositorSurfaceUsage};
64use crate::device::{DepthFunction, Device, DrawTarget, ExternalTexture, GpuFrameId, UploadPBOPool};
65use crate::device::{ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot, Texel};
66use crate::device::query::{GpuSampler, GpuTimer};
67#[cfg(feature = "capture")]
68use crate::device::FBOId;
69use crate::debug_item::DebugItem;
70use crate::frame_builder::Frame;
71use glyph_rasterizer::GlyphFormat;
72use crate::gpu_cache::{GpuCacheUpdate, GpuCacheUpdateList};
73use crate::gpu_cache::{GpuCacheDebugChunk, GpuCacheDebugCmd};
74use crate::gpu_types::{ScalingInstance, SvgFilterInstance, SVGFEFilterInstance, CopyInstance, PrimitiveInstanceData};
75use crate::gpu_types::{BlurInstance, ClearInstance, CompositeInstance};
76use crate::internal_types::{TextureSource, TextureSourceExternal, TextureCacheCategory, FrameId, FrameVec};
77#[cfg(any(feature = "capture", feature = "replay"))]
78use crate::internal_types::DebugOutput;
79use crate::internal_types::{CacheTextureId, FastHashMap, FastHashSet, RenderedDocument, ResultMsg};
80use crate::internal_types::{TextureCacheAllocInfo, TextureCacheAllocationKind, TextureUpdateList};
81use crate::internal_types::{RenderTargetInfo, Swizzle, DeferredResolveIndex};
82use crate::picture::ResolvedSurfaceTexture;
83use crate::prim_store::DeferredResolve;
84use crate::profiler::{self, GpuProfileTag, TransactionProfile};
85use crate::profiler::{Profiler, add_event_marker, add_text_marker, thread_is_being_profiled};
86use crate::device::query::GpuProfiler;
87use crate::render_target::ResolveOp;
88use crate::render_task_graph::RenderTaskGraph;
89use crate::render_task::{RenderTask, RenderTaskKind, ReadbackTask};
90use crate::screen_capture::AsyncScreenshotGrabber;
91use crate::render_target::{RenderTarget, PictureCacheTarget, PictureCacheTargetKind};
92use crate::render_target::{RenderTargetKind, BlitJob};
93use crate::telemetry::Telemetry;
94use crate::tile_cache::PictureCacheDebugInfo;
95use crate::util::drain_filter;
96use crate::rectangle_occlusion as occlusion;
97use upload::{upload_to_texture_cache, UploadTexturePool};
98use init::*;
99
100use euclid::{rect, Transform3D, Scale, default};
101use gleam::gl;
102use malloc_size_of::MallocSizeOfOps;
103
104#[cfg(feature = "replay")]
105use std::sync::Arc;
106
107use std::{
108    cell::RefCell,
109    collections::VecDeque,
110    f32,
111    ffi::c_void,
112    mem,
113    num::NonZeroUsize,
114    path::PathBuf,
115    rc::Rc,
116};
117#[cfg(any(feature = "capture", feature = "replay"))]
118use std::collections::hash_map::Entry;
119
120mod debug;
121mod gpu_buffer;
122mod gpu_cache;
123mod shade;
124mod vertex;
125mod upload;
126pub(crate) mod init;
127
128pub use debug::DebugRenderer;
129pub use shade::{PendingShadersToPrecache, Shaders, SharedShaders};
130pub use vertex::{desc, VertexArrayKind, MAX_VERTEX_TEXTURE_WIDTH};
131pub use gpu_buffer::{GpuBuffer, GpuBufferF, GpuBufferBuilderF, GpuBufferI, GpuBufferBuilderI, GpuBufferAddress, GpuBufferBuilder};
132
133/// The size of the array of each type of vertex data texture that
134/// is round-robin-ed each frame during bind_frame_data. Doing this
135/// helps avoid driver stalls while updating the texture in some
136/// drivers. The size of these textures are typically very small
137/// (e.g. < 16 kB) so it's not a huge waste of memory. Despite that,
138/// this is a short-term solution - we want to find a better way
139/// to provide this frame data, which will likely involve some
140/// combination of UBO/SSBO usage. Although this only affects some
141/// platforms, it's enabled on all platforms to reduce testing
142/// differences between platforms.
143pub const VERTEX_DATA_TEXTURE_COUNT: usize = 3;
144
145/// Number of GPU blocks per UV rectangle provided for an image.
146pub const BLOCKS_PER_UV_RECT: usize = 2;
147
148const GPU_TAG_BRUSH_OPACITY: GpuProfileTag = GpuProfileTag {
149    label: "B_Opacity",
150    color: debug_colors::DARKMAGENTA,
151};
152const GPU_TAG_BRUSH_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
153    label: "B_LinearGradient",
154    color: debug_colors::POWDERBLUE,
155};
156const GPU_TAG_BRUSH_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
157    label: "B_YuvImage",
158    color: debug_colors::DARKGREEN,
159};
160const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
161    label: "B_MixBlend",
162    color: debug_colors::MAGENTA,
163};
164const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
165    label: "B_Blend",
166    color: debug_colors::ORANGE,
167};
168const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
169    label: "B_Image",
170    color: debug_colors::SPRINGGREEN,
171};
172const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
173    label: "B_Solid",
174    color: debug_colors::RED,
175};
176const GPU_TAG_CACHE_CLIP: GpuProfileTag = GpuProfileTag {
177    label: "C_Clip",
178    color: debug_colors::PURPLE,
179};
180const GPU_TAG_CACHE_BORDER: GpuProfileTag = GpuProfileTag {
181    label: "C_Border",
182    color: debug_colors::CORNSILK,
183};
184const GPU_TAG_CACHE_LINE_DECORATION: GpuProfileTag = GpuProfileTag {
185    label: "C_LineDecoration",
186    color: debug_colors::YELLOWGREEN,
187};
188const GPU_TAG_CACHE_FAST_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
189    label: "C_FastLinearGradient",
190    color: debug_colors::BROWN,
191};
192const GPU_TAG_CACHE_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
193    label: "C_LinearGradient",
194    color: debug_colors::BROWN,
195};
196const GPU_TAG_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
197    label: "C_RadialGradient",
198    color: debug_colors::BROWN,
199};
200const GPU_TAG_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
201    label: "C_ConicGradient",
202    color: debug_colors::BROWN,
203};
204const GPU_TAG_SETUP_TARGET: GpuProfileTag = GpuProfileTag {
205    label: "target init",
206    color: debug_colors::SLATEGREY,
207};
208const GPU_TAG_SETUP_DATA: GpuProfileTag = GpuProfileTag {
209    label: "data init",
210    color: debug_colors::LIGHTGREY,
211};
212const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
213    label: "SplitComposite",
214    color: debug_colors::DARKBLUE,
215};
216const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
217    label: "TextRun",
218    color: debug_colors::BLUE,
219};
220const GPU_TAG_PRIMITIVE: GpuProfileTag = GpuProfileTag {
221    label: "Primitive",
222    color: debug_colors::RED,
223};
224const GPU_TAG_INDIRECT_PRIM: GpuProfileTag = GpuProfileTag {
225    label: "Primitive (indirect)",
226    color: debug_colors::YELLOWGREEN,
227};
228const GPU_TAG_INDIRECT_MASK: GpuProfileTag = GpuProfileTag {
229    label: "Mask (indirect)",
230    color: debug_colors::IVORY,
231};
232const GPU_TAG_BLUR: GpuProfileTag = GpuProfileTag {
233    label: "Blur",
234    color: debug_colors::VIOLET,
235};
236const GPU_TAG_BLIT: GpuProfileTag = GpuProfileTag {
237    label: "Blit",
238    color: debug_colors::LIME,
239};
240const GPU_TAG_SCALE: GpuProfileTag = GpuProfileTag {
241    label: "Scale",
242    color: debug_colors::GHOSTWHITE,
243};
244const GPU_SAMPLER_TAG_ALPHA: GpuProfileTag = GpuProfileTag {
245    label: "Alpha targets",
246    color: debug_colors::BLACK,
247};
248const GPU_SAMPLER_TAG_OPAQUE: GpuProfileTag = GpuProfileTag {
249    label: "Opaque pass",
250    color: debug_colors::BLACK,
251};
252const GPU_SAMPLER_TAG_TRANSPARENT: GpuProfileTag = GpuProfileTag {
253    label: "Transparent pass",
254    color: debug_colors::BLACK,
255};
256const GPU_TAG_SVG_FILTER: GpuProfileTag = GpuProfileTag {
257    label: "SvgFilter",
258    color: debug_colors::LEMONCHIFFON,
259};
260const GPU_TAG_SVG_FILTER_NODES: GpuProfileTag = GpuProfileTag {
261    label: "SvgFilterNodes",
262    color: debug_colors::LEMONCHIFFON,
263};
264const GPU_TAG_COMPOSITE: GpuProfileTag = GpuProfileTag {
265    label: "Composite",
266    color: debug_colors::TOMATO,
267};
268
269// Key used when adding compositing tiles to the occlusion tracker.
270// Since an entire tile may have a mask, but we may segment that in
271// to masked and non-masked regions, we need to track which of the
272// occlusion tracker outputs need a mask
273#[derive(Debug, Copy, Clone)]
274struct OcclusionItemKey {
275    tile_index: usize,
276    needs_mask: bool,
277}
278
279// Defines the content that we will draw to a given swapchain / layer, calculated
280// after occlusion culling.
281struct SwapChainLayer {
282    occlusion: occlusion::FrontToBackBuilder<OcclusionItemKey>,
283    clear_tiles: Vec<occlusion::Item<OcclusionItemKey>>,
284}
285
286/// The clear color used for the texture cache when the debug display is enabled.
287/// We use a shade of blue so that we can still identify completely blue items in
288/// the texture cache.
289pub const TEXTURE_CACHE_DBG_CLEAR_COLOR: [f32; 4] = [0.0, 0.0, 0.8, 1.0];
290
291impl BatchKind {
292    fn sampler_tag(&self) -> GpuProfileTag {
293        match *self {
294            BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
295            BatchKind::Brush(kind) => {
296                match kind {
297                    BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
298                    BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
299                    BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
300                    BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
301                    BrushBatchKind::YuvImage(..) => GPU_TAG_BRUSH_YUV_IMAGE,
302                    BrushBatchKind::LinearGradient => GPU_TAG_BRUSH_LINEAR_GRADIENT,
303                    BrushBatchKind::Opacity => GPU_TAG_BRUSH_OPACITY,
304                }
305            }
306            BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN,
307            BatchKind::Quad(PatternKind::ColorOrTexture) => GPU_TAG_PRIMITIVE,
308            BatchKind::Quad(PatternKind::RadialGradient) => GPU_TAG_RADIAL_GRADIENT,
309            BatchKind::Quad(PatternKind::ConicGradient) => GPU_TAG_CONIC_GRADIENT,
310            BatchKind::Quad(PatternKind::Mask) => GPU_TAG_INDIRECT_MASK,
311        }
312    }
313}
314
315fn flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool> {
316    if before & select != after & select {
317        Some(after.contains(select))
318    } else {
319        None
320    }
321}
322
323#[repr(C)]
324#[derive(Copy, Clone, Debug)]
325pub enum ShaderColorMode {
326    Alpha = 0,
327    SubpixelDualSource = 1,
328    BitmapShadow = 2,
329    ColorBitmap = 3,
330    Image = 4,
331    MultiplyDualSource = 5,
332}
333
334impl From<GlyphFormat> for ShaderColorMode {
335    fn from(format: GlyphFormat) -> ShaderColorMode {
336        match format {
337            GlyphFormat::Alpha |
338            GlyphFormat::TransformedAlpha |
339            GlyphFormat::Bitmap => ShaderColorMode::Alpha,
340            GlyphFormat::Subpixel | GlyphFormat::TransformedSubpixel => {
341                panic!("Subpixel glyph formats must be handled separately.");
342            }
343            GlyphFormat::ColorBitmap => ShaderColorMode::ColorBitmap,
344        }
345    }
346}
347
348/// Enumeration of the texture samplers used across the various WebRender shaders.
349///
350/// Each variant corresponds to a uniform declared in shader source. We only bind
351/// the variants we need for a given shader, so not every variant is bound for every
352/// batch.
353#[derive(Debug, Copy, Clone, PartialEq, Eq)]
354pub(crate) enum TextureSampler {
355    Color0,
356    Color1,
357    Color2,
358    GpuCache,
359    TransformPalette,
360    RenderTasks,
361    Dither,
362    PrimitiveHeadersF,
363    PrimitiveHeadersI,
364    ClipMask,
365    GpuBufferF,
366    GpuBufferI,
367}
368
369impl TextureSampler {
370    pub(crate) fn color(n: usize) -> TextureSampler {
371        match n {
372            0 => TextureSampler::Color0,
373            1 => TextureSampler::Color1,
374            2 => TextureSampler::Color2,
375            _ => {
376                panic!("There are only 3 color samplers.");
377            }
378        }
379    }
380}
381
382impl Into<TextureSlot> for TextureSampler {
383    fn into(self) -> TextureSlot {
384        match self {
385            TextureSampler::Color0 => TextureSlot(0),
386            TextureSampler::Color1 => TextureSlot(1),
387            TextureSampler::Color2 => TextureSlot(2),
388            TextureSampler::GpuCache => TextureSlot(3),
389            TextureSampler::TransformPalette => TextureSlot(4),
390            TextureSampler::RenderTasks => TextureSlot(5),
391            TextureSampler::Dither => TextureSlot(6),
392            TextureSampler::PrimitiveHeadersF => TextureSlot(7),
393            TextureSampler::PrimitiveHeadersI => TextureSlot(8),
394            TextureSampler::ClipMask => TextureSlot(9),
395            TextureSampler::GpuBufferF => TextureSlot(10),
396            TextureSampler::GpuBufferI => TextureSlot(11),
397        }
398    }
399}
400
401#[derive(Clone, Debug, PartialEq)]
402pub enum GraphicsApi {
403    OpenGL,
404}
405
406#[derive(Clone, Debug)]
407pub struct GraphicsApiInfo {
408    pub kind: GraphicsApi,
409    pub renderer: String,
410    pub version: String,
411}
412
413#[derive(Debug)]
414pub struct GpuProfile {
415    pub frame_id: GpuFrameId,
416    pub paint_time_ns: u64,
417}
418
419impl GpuProfile {
420    fn new(frame_id: GpuFrameId, timers: &[GpuTimer]) -> GpuProfile {
421        let mut paint_time_ns = 0;
422        for timer in timers {
423            paint_time_ns += timer.time_ns;
424        }
425        GpuProfile {
426            frame_id,
427            paint_time_ns,
428        }
429    }
430}
431
432#[derive(Debug)]
433pub struct CpuProfile {
434    pub frame_id: GpuFrameId,
435    pub backend_time_ns: u64,
436    pub composite_time_ns: u64,
437    pub draw_calls: usize,
438}
439
440impl CpuProfile {
441    fn new(
442        frame_id: GpuFrameId,
443        backend_time_ns: u64,
444        composite_time_ns: u64,
445        draw_calls: usize,
446    ) -> CpuProfile {
447        CpuProfile {
448            frame_id,
449            backend_time_ns,
450            composite_time_ns,
451            draw_calls,
452        }
453    }
454}
455
456/// The selected partial present mode for a given frame.
457#[derive(Debug, Copy, Clone)]
458enum PartialPresentMode {
459    /// The device supports fewer dirty rects than the number of dirty rects
460    /// that WR produced. In this case, the WR dirty rects are union'ed into
461    /// a single dirty rect, that is provided to the caller.
462    Single {
463        dirty_rect: DeviceRect,
464    },
465}
466
467struct CacheTexture {
468    texture: Texture,
469    category: TextureCacheCategory,
470}
471
472/// Helper struct for resolving device Textures for use during rendering passes.
473///
474/// Manages the mapping between the at-a-distance texture handles used by the
475/// `RenderBackend` (which does not directly interface with the GPU) and actual
476/// device texture handles.
477struct TextureResolver {
478    /// A map to resolve texture cache IDs to native textures.
479    texture_cache_map: FastHashMap<CacheTextureId, CacheTexture>,
480
481    /// Map of external image IDs to native textures.
482    external_images: FastHashMap<DeferredResolveIndex, ExternalTexture>,
483
484    /// A special 1x1 dummy texture used for shaders that expect to work with
485    /// the output of the previous pass but are actually running in the first
486    /// pass.
487    dummy_cache_texture: Texture,
488}
489
490impl TextureResolver {
491    fn new(device: &mut Device) -> TextureResolver {
492        let dummy_cache_texture = device
493            .create_texture(
494                ImageBufferKind::Texture2D,
495                ImageFormat::RGBA8,
496                1,
497                1,
498                TextureFilter::Linear,
499                None,
500            );
501        device.upload_texture_immediate(
502            &dummy_cache_texture,
503            &[0xff, 0xff, 0xff, 0xff],
504        );
505
506        TextureResolver {
507            texture_cache_map: FastHashMap::default(),
508            external_images: FastHashMap::default(),
509            dummy_cache_texture,
510        }
511    }
512
513    fn deinit(self, device: &mut Device) {
514        device.delete_texture(self.dummy_cache_texture);
515
516        for (_id, item) in self.texture_cache_map {
517            device.delete_texture(item.texture);
518        }
519    }
520
521    fn begin_frame(&mut self) {
522    }
523
524    fn end_pass(
525        &mut self,
526        device: &mut Device,
527        textures_to_invalidate: &[CacheTextureId],
528    ) {
529        // For any texture that is no longer needed, immediately
530        // invalidate it so that tiled GPUs don't need to resolve it
531        // back to memory.
532        for texture_id in textures_to_invalidate {
533            let render_target = &self.texture_cache_map[texture_id].texture;
534            device.invalidate_render_target(render_target);
535        }
536    }
537
538    // Bind a source texture to the device.
539    fn bind(&self, texture_id: &TextureSource, sampler: TextureSampler, device: &mut Device) -> Swizzle {
540        match *texture_id {
541            TextureSource::Invalid => {
542                Swizzle::default()
543            }
544            TextureSource::Dummy => {
545                let swizzle = Swizzle::default();
546                device.bind_texture(sampler, &self.dummy_cache_texture, swizzle);
547                swizzle
548            }
549            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
550                let texture = self.external_images
551                    .get(index)
552                    .expect("BUG: External image should be resolved by now");
553                device.bind_external_texture(sampler, texture);
554                Swizzle::default()
555            }
556            TextureSource::TextureCache(index, swizzle) => {
557                let texture = &self.texture_cache_map[&index].texture;
558                device.bind_texture(sampler, texture, swizzle);
559                swizzle
560            }
561        }
562    }
563
564    // Get the real (OpenGL) texture ID for a given source texture.
565    // For a texture cache texture, the IDs are stored in a vector
566    // map for fast access.
567    fn resolve(&self, texture_id: &TextureSource) -> Option<(&Texture, Swizzle)> {
568        match *texture_id {
569            TextureSource::Invalid => None,
570            TextureSource::Dummy => {
571                Some((&self.dummy_cache_texture, Swizzle::default()))
572            }
573            TextureSource::External(..) => {
574                panic!("BUG: External textures cannot be resolved, they can only be bound.");
575            }
576            TextureSource::TextureCache(index, swizzle) => {
577                Some((&self.texture_cache_map[&index].texture, swizzle))
578            }
579        }
580    }
581
582    // Retrieve the deferred / resolved UV rect if an external texture, otherwise
583    // return the default supplied UV rect.
584    fn get_uv_rect(
585        &self,
586        source: &TextureSource,
587        default_value: TexelRect,
588    ) -> TexelRect {
589        match source {
590            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
591                let texture = self.external_images
592                    .get(index)
593                    .expect("BUG: External image should be resolved by now");
594                texture.get_uv_rect()
595            }
596            _ => {
597                default_value
598            }
599        }
600    }
601
602    /// Returns the size of the texture in pixels
603    fn get_texture_size(&self, texture: &TextureSource) -> DeviceIntSize {
604        match *texture {
605            TextureSource::Invalid => DeviceIntSize::zero(),
606            TextureSource::TextureCache(id, _) => {
607                self.texture_cache_map[&id].texture.get_dimensions()
608            },
609            TextureSource::External(TextureSourceExternal { index, .. }) => {
610                // If UV coords are normalized then this value will be incorrect. However, the
611                // texture size is currently only used to set the uTextureSize uniform, so that
612                // shaders without access to textureSize() can normalize unnormalized UVs. Which
613                // means this is not a problem.
614                let uv_rect = self.external_images[&index].get_uv_rect();
615                (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32()
616            },
617            TextureSource::Dummy => DeviceIntSize::new(1, 1),
618        }
619    }
620
621    fn report_memory(&self) -> MemoryReport {
622        let mut report = MemoryReport::default();
623
624        // We're reporting GPU memory rather than heap-allocations, so we don't
625        // use size_of_op.
626        for item in self.texture_cache_map.values() {
627            let counter = match item.category {
628                TextureCacheCategory::Atlas => &mut report.atlas_textures,
629                TextureCacheCategory::Standalone => &mut report.standalone_textures,
630                TextureCacheCategory::PictureTile => &mut report.picture_tile_textures,
631                TextureCacheCategory::RenderTarget => &mut report.render_target_textures,
632            };
633            *counter += item.texture.size_in_bytes();
634        }
635
636        report
637    }
638
639    fn update_profile(&self, profile: &mut TransactionProfile) {
640        let mut external_image_bytes = 0;
641        for img in self.external_images.values() {
642            let uv_rect = img.get_uv_rect();
643            // If UV coords are normalized then this value will be incorrect. This is unfortunate
644            // but doesn't impact end users at all.
645            let size = (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32();
646
647            // Assume 4 bytes per pixels which is true most of the time but
648            // not always.
649            let bpp = 4;
650            external_image_bytes += size.area() as usize * bpp;
651        }
652
653        profile.set(profiler::EXTERNAL_IMAGE_BYTES, profiler::bytes_to_mb(external_image_bytes));
654    }
655
656    fn get_cache_texture_mut(&mut self, id: &CacheTextureId) -> &mut Texture {
657        &mut self.texture_cache_map
658            .get_mut(id)
659            .expect("bug: texture not allocated")
660            .texture
661    }
662}
663
664#[derive(Debug, Copy, Clone, PartialEq)]
665#[cfg_attr(feature = "capture", derive(Serialize))]
666#[cfg_attr(feature = "replay", derive(Deserialize))]
667pub enum BlendMode {
668    None,
669    Alpha,
670    PremultipliedAlpha,
671    PremultipliedDestOut,
672    SubpixelDualSource,
673    Advanced(MixBlendMode),
674    MultiplyDualSource,
675    Screen,
676    Exclusion,
677    PlusLighter,
678}
679
680impl BlendMode {
681    /// Decides when a given mix-blend-mode can be implemented in terms of
682    /// simple blending, dual-source blending, advanced blending, or not at
683    /// all based on available capabilities.
684    pub fn from_mix_blend_mode(
685        mode: MixBlendMode,
686        advanced_blend: bool,
687        coherent: bool,
688        dual_source: bool,
689    ) -> Option<BlendMode> {
690        // If we emulate a mix-blend-mode via simple or dual-source blending,
691        // care must be taken to output alpha As + Ad*(1-As) regardless of what
692        // the RGB output is to comply with the mix-blend-mode spec.
693        Some(match mode {
694            // If we have coherent advanced blend, just use that.
695            _ if advanced_blend && coherent => BlendMode::Advanced(mode),
696            // Screen can be implemented as Cs + Cd - Cs*Cd => Cs + Cd*(1-Cs)
697            MixBlendMode::Screen => BlendMode::Screen,
698            // Exclusion can be implemented as Cs + Cd - 2*Cs*Cd => Cs*(1-Cd) + Cd*(1-Cs)
699            MixBlendMode::Exclusion => BlendMode::Exclusion,
700            // PlusLighter is basically a clamped add.
701            MixBlendMode::PlusLighter => BlendMode::PlusLighter,
702            // Multiply can be implemented as Cs*Cd + Cs*(1-Ad) + Cd*(1-As) => Cs*(1-Ad) + Cd*(1 - SRC1=(As-Cs))
703            MixBlendMode::Multiply if dual_source => BlendMode::MultiplyDualSource,
704            // Otherwise, use advanced blend without coherency if available.
705            _ if advanced_blend => BlendMode::Advanced(mode),
706            // If advanced blend is not available, then we have to use brush_mix_blend.
707            _ => return None,
708        })
709    }
710}
711
712/// Information about the state of the debugging / profiler overlay in native compositing mode.
713struct DebugOverlayState {
714    /// True if any of the current debug flags will result in drawing a debug overlay.
715    is_enabled: bool,
716
717    /// The current size of the debug overlay surface. None implies that the
718    /// debug surface isn't currently allocated.
719    current_size: Option<DeviceIntSize>,
720
721    layer_index: usize,
722}
723
724impl DebugOverlayState {
725    fn new() -> Self {
726        DebugOverlayState {
727            is_enabled: false,
728            current_size: None,
729            layer_index: 0,
730        }
731    }
732}
733
734/// Tracks buffer damage rects over a series of frames.
735#[derive(Debug, Default)]
736pub(crate) struct BufferDamageTracker {
737    damage_rects: [DeviceRect; 4],
738    current_offset: usize,
739}
740
741impl BufferDamageTracker {
742    /// Sets the damage rect for the current frame. Should only be called *after*
743    /// get_damage_rect() has been called to get the current backbuffer's damage rect.
744    fn push_dirty_rect(&mut self, rect: &DeviceRect) {
745        self.damage_rects[self.current_offset] = rect.clone();
746        self.current_offset = match self.current_offset {
747            0 => self.damage_rects.len() - 1,
748            n => n - 1,
749        }
750    }
751
752    /// Gets the damage rect for the current backbuffer, given the backbuffer's age.
753    /// (The number of frames since it was previously the backbuffer.)
754    /// Returns an empty rect if the buffer is valid, and None if the entire buffer is invalid.
755    fn get_damage_rect(&self, buffer_age: usize) -> Option<DeviceRect> {
756        match buffer_age {
757            // 0 means this is a new buffer, so is completely invalid.
758            0 => None,
759            // 1 means this backbuffer was also the previous frame's backbuffer
760            // (so must have been copied to the frontbuffer). It is therefore entirely valid.
761            1 => Some(DeviceRect::zero()),
762            // We must calculate the union of the damage rects since this buffer was previously
763            // the backbuffer.
764            n if n <= self.damage_rects.len() + 1 => {
765                Some(
766                    self.damage_rects.iter()
767                        .cycle()
768                        .skip(self.current_offset + 1)
769                        .take(n - 1)
770                        .fold(DeviceRect::zero(), |acc, r| acc.union(r))
771                )
772            }
773            // The backbuffer is older than the number of frames for which we track,
774            // so we treat it as entirely invalid.
775            _ => None,
776        }
777    }
778}
779
780/// The renderer is responsible for submitting to the GPU the work prepared by the
781/// RenderBackend.
782///
783/// We have a separate `Renderer` instance for each instance of WebRender (generally
784/// one per OS window), and all instances share the same thread.
785pub struct Renderer {
786    result_rx: Receiver<ResultMsg>,
787    api_tx: Sender<ApiMsg>,
788    pub device: Device,
789    pending_texture_updates: Vec<TextureUpdateList>,
790    /// True if there are any TextureCacheUpdate pending.
791    pending_texture_cache_updates: bool,
792    pending_native_surface_updates: Vec<NativeSurfaceOperation>,
793    pending_gpu_cache_updates: Vec<GpuCacheUpdateList>,
794    pending_gpu_cache_clear: bool,
795    pending_shader_updates: Vec<PathBuf>,
796    active_documents: FastHashMap<DocumentId, RenderedDocument>,
797
798    shaders: Rc<RefCell<Shaders>>,
799
800    max_recorded_profiles: usize,
801
802    clear_color: ColorF,
803    enable_clear_scissor: bool,
804    enable_advanced_blend_barriers: bool,
805    clear_caches_with_quads: bool,
806    clear_alpha_targets_with_quads: bool,
807
808    debug: debug::LazyInitializedDebugRenderer,
809    debug_flags: DebugFlags,
810    profile: TransactionProfile,
811    frame_counter: u64,
812    resource_upload_time: f64,
813    gpu_cache_upload_time: f64,
814    profiler: Profiler,
815
816    last_time: u64,
817
818    pub gpu_profiler: GpuProfiler,
819    vaos: vertex::RendererVAOs,
820
821    gpu_cache_texture: gpu_cache::GpuCacheTexture,
822    vertex_data_textures: Vec<vertex::VertexDataTextures>,
823    current_vertex_data_textures: usize,
824
825    /// When the GPU cache debugger is enabled, we keep track of the live blocks
826    /// in the GPU cache so that we can use them for the debug display. This
827    /// member stores those live blocks, indexed by row.
828    gpu_cache_debug_chunks: Vec<Vec<GpuCacheDebugChunk>>,
829
830    gpu_cache_frame_id: FrameId,
831    gpu_cache_overflow: bool,
832
833    pipeline_info: PipelineInfo,
834
835    // Manages and resolves source textures IDs to real texture IDs.
836    texture_resolver: TextureResolver,
837
838    texture_upload_pbo_pool: UploadPBOPool,
839    staging_texture_pool: UploadTexturePool,
840
841    dither_matrix_texture: Option<Texture>,
842
843    /// Optional trait object that allows the client
844    /// application to provide external buffers for image data.
845    external_image_handler: Option<Box<dyn ExternalImageHandler>>,
846
847    /// Optional function pointers for measuring memory used by a given
848    /// heap-allocated pointer.
849    size_of_ops: Option<MallocSizeOfOps>,
850
851    pub renderer_errors: Vec<RendererError>,
852
853    pub(in crate) async_frame_recorder: Option<AsyncScreenshotGrabber>,
854    pub(in crate) async_screenshots: Option<AsyncScreenshotGrabber>,
855
856    /// List of profile results from previous frames. Can be retrieved
857    /// via get_frame_profiles().
858    cpu_profiles: VecDeque<CpuProfile>,
859    gpu_profiles: VecDeque<GpuProfile>,
860
861    /// Notification requests to be fulfilled after rendering.
862    notifications: Vec<NotificationRequest>,
863
864    device_size: Option<DeviceIntSize>,
865
866    /// A lazily created texture for the zoom debugging widget.
867    zoom_debug_texture: Option<Texture>,
868
869    /// The current mouse position. This is used for debugging
870    /// functionality only, such as the debug zoom widget.
871    cursor_position: DeviceIntPoint,
872
873    /// Guards to check if we might be rendering a frame with expired texture
874    /// cache entries.
875    shared_texture_cache_cleared: bool,
876
877    /// The set of documents which we've seen a publish for since last render.
878    documents_seen: FastHashSet<DocumentId>,
879
880    #[cfg(feature = "capture")]
881    read_fbo: FBOId,
882    #[cfg(feature = "replay")]
883    owned_external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
884
885    /// The compositing config, affecting how WR composites into the final scene.
886    compositor_config: CompositorConfig,
887    current_compositor_kind: CompositorKind,
888
889    /// Maintains a set of allocated native composite surfaces. This allows any
890    /// currently allocated surfaces to be cleaned up as soon as deinit() is
891    /// called (the normal bookkeeping for native surfaces exists in the
892    /// render backend thread).
893    allocated_native_surfaces: FastHashSet<NativeSurfaceId>,
894
895    /// If true, partial present state has been reset and everything needs to
896    /// be drawn on the next render.
897    force_redraw: bool,
898
899    /// State related to the debug / profiling overlays
900    debug_overlay_state: DebugOverlayState,
901
902    /// Tracks the dirty rectangles from previous frames. Used on platforms
903    /// that require keeping the front buffer fully correct when doing
904    /// partial present (e.g. unix desktop with EGL_EXT_buffer_age).
905    buffer_damage_tracker: BufferDamageTracker,
906
907    max_primitive_instance_count: usize,
908    enable_instancing: bool,
909
910    /// Count consecutive oom frames to detectif we are stuck unable to render
911    /// in a loop.
912    consecutive_oom_frames: u32,
913
914    /// update() defers processing of ResultMsg, if frame_publish_id of
915    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
916    target_frame_publish_id: Option<FramePublishId>,
917
918    /// Hold a next ResultMsg that will be handled by update().
919    pending_result_msg: Option<ResultMsg>,
920}
921
922#[derive(Debug)]
923pub enum RendererError {
924    Shader(ShaderError),
925    Thread(std::io::Error),
926    MaxTextureSize,
927    SoftwareRasterizer,
928    OutOfMemory,
929}
930
931impl From<ShaderError> for RendererError {
932    fn from(err: ShaderError) -> Self {
933        RendererError::Shader(err)
934    }
935}
936
937impl From<std::io::Error> for RendererError {
938    fn from(err: std::io::Error) -> Self {
939        RendererError::Thread(err)
940    }
941}
942
943impl Renderer {
944    pub fn device_size(&self) -> Option<DeviceIntSize> {
945        self.device_size
946    }
947
948    /// Update the current position of the debug cursor.
949    pub fn set_cursor_position(
950        &mut self,
951        position: DeviceIntPoint,
952    ) {
953        self.cursor_position = position;
954    }
955
956    pub fn get_max_texture_size(&self) -> i32 {
957        self.device.max_texture_size()
958    }
959
960    pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
961        GraphicsApiInfo {
962            kind: GraphicsApi::OpenGL,
963            version: self.device.gl().get_string(gl::VERSION),
964            renderer: self.device.gl().get_string(gl::RENDERER),
965        }
966    }
967
968    pub fn preferred_color_format(&self) -> ImageFormat {
969        self.device.preferred_color_formats().external
970    }
971
972    pub fn required_texture_stride_alignment(&self, format: ImageFormat) -> usize {
973        self.device.required_pbo_stride().num_bytes(format).get()
974    }
975
976    pub fn set_clear_color(&mut self, color: ColorF) {
977        self.clear_color = color;
978    }
979
980    pub fn flush_pipeline_info(&mut self) -> PipelineInfo {
981        mem::replace(&mut self.pipeline_info, PipelineInfo::default())
982    }
983
984    /// Returns the Epoch of the current frame in a pipeline.
985    pub fn current_epoch(&self, document_id: DocumentId, pipeline_id: PipelineId) -> Option<Epoch> {
986        self.pipeline_info.epochs.get(&(pipeline_id, document_id)).cloned()
987    }
988
989    fn get_next_result_msg(&mut self) -> Option<ResultMsg> {
990        if self.pending_result_msg.is_none() {
991            if let Ok(msg) = self.result_rx.try_recv() {
992                self.pending_result_msg = Some(msg);
993            }
994        }
995
996        match (&self.pending_result_msg, &self.target_frame_publish_id) {
997          (Some(ResultMsg::PublishDocument(frame_publish_id, _, _, _)), Some(target_id)) => {
998            if frame_publish_id > target_id {
999              return None;
1000            }
1001          }
1002          _ => {}
1003        }
1004
1005        self.pending_result_msg.take()
1006    }
1007
1008    /// Processes the result queue.
1009    ///
1010    /// Should be called before `render()`, as texture cache updates are done here.
1011    pub fn update(&mut self) {
1012        profile_scope!("update");
1013
1014        // Pull any pending results and return the most recent.
1015        while let Some(msg) = self.get_next_result_msg() {
1016            match msg {
1017                ResultMsg::PublishPipelineInfo(mut pipeline_info) => {
1018                    for ((pipeline_id, document_id), epoch) in pipeline_info.epochs {
1019                        self.pipeline_info.epochs.insert((pipeline_id, document_id), epoch);
1020                    }
1021                    self.pipeline_info.removed_pipelines.extend(pipeline_info.removed_pipelines.drain(..));
1022                }
1023                ResultMsg::PublishDocument(
1024                    _,
1025                    document_id,
1026                    mut doc,
1027                    resource_update_list,
1028                ) => {
1029                    // Add a new document to the active set
1030
1031                    // If the document we are replacing must be drawn (in order to
1032                    // update the texture cache), issue a render just to
1033                    // off-screen targets, ie pass None to render_impl. We do this
1034                    // because a) we don't need to render to the main framebuffer
1035                    // so it is cheaper not to, and b) doing so without a
1036                    // subsequent present would break partial present.
1037                    let prev_frame_memory = if let Some(mut prev_doc) = self.active_documents.remove(&document_id) {
1038                        doc.profile.merge(&mut prev_doc.profile);
1039
1040                        if prev_doc.frame.must_be_drawn() {
1041                            prev_doc.render_reasons |= RenderReasons::TEXTURE_CACHE_FLUSH;
1042                            self.render_impl(
1043                                document_id,
1044                                &mut prev_doc,
1045                                None,
1046                                0,
1047                            ).ok();
1048                        }
1049
1050                        Some(prev_doc.frame.allocator_memory)
1051                    } else {
1052                        None
1053                    };
1054
1055                    if let Some(memory) = prev_frame_memory {
1056                        // We just dropped the frame a few lives above. There should be no
1057                        // live allocations left in the frame's memory.
1058                        memory.assert_memory_reusable();
1059                    }
1060
1061                    self.active_documents.insert(document_id, doc);
1062
1063                    // IMPORTANT: The pending texture cache updates must be applied
1064                    //            *after* the previous frame has been rendered above
1065                    //            (if neceessary for a texture cache update). For
1066                    //            an example of why this is required:
1067                    //            1) Previous frame contains a render task that
1068                    //               targets Texture X.
1069                    //            2) New frame contains a texture cache update which
1070                    //               frees Texture X.
1071                    //            3) bad stuff happens.
1072
1073                    //TODO: associate `document_id` with target window
1074                    self.pending_texture_cache_updates |= !resource_update_list.texture_updates.updates.is_empty();
1075                    self.pending_texture_updates.push(resource_update_list.texture_updates);
1076                    self.pending_native_surface_updates.extend(resource_update_list.native_surface_updates);
1077                    self.documents_seen.insert(document_id);
1078                }
1079                ResultMsg::UpdateGpuCache(mut list) => {
1080                    if list.clear {
1081                        self.pending_gpu_cache_clear = true;
1082                    }
1083                    if list.clear {
1084                        self.gpu_cache_debug_chunks = Vec::new();
1085                    }
1086                    for cmd in mem::replace(&mut list.debug_commands, Vec::new()) {
1087                        match cmd {
1088                            GpuCacheDebugCmd::Alloc(chunk) => {
1089                                let row = chunk.address.v as usize;
1090                                if row >= self.gpu_cache_debug_chunks.len() {
1091                                    self.gpu_cache_debug_chunks.resize(row + 1, Vec::new());
1092                                }
1093                                self.gpu_cache_debug_chunks[row].push(chunk);
1094                            },
1095                            GpuCacheDebugCmd::Free(address) => {
1096                                let chunks = &mut self.gpu_cache_debug_chunks[address.v as usize];
1097                                let pos = chunks.iter()
1098                                    .position(|x| x.address == address).unwrap();
1099                                chunks.remove(pos);
1100                            },
1101                        }
1102                    }
1103                    self.pending_gpu_cache_updates.push(list);
1104                }
1105                ResultMsg::UpdateResources {
1106                    resource_updates,
1107                    memory_pressure,
1108                } => {
1109                    if memory_pressure {
1110                        // If a memory pressure event arrives _after_ a new scene has
1111                        // been published that writes persistent targets (i.e. cached
1112                        // render tasks to the texture cache, or picture cache tiles)
1113                        // but _before_ the next update/render loop, those targets
1114                        // will not be updated due to the active_documents list being
1115                        // cleared at the end of this message. To work around that,
1116                        // if any of the existing documents have not rendered yet, and
1117                        // have picture/texture cache targets, force a render so that
1118                        // those targets are updated.
1119                        let active_documents = mem::replace(
1120                            &mut self.active_documents,
1121                            FastHashMap::default(),
1122                        );
1123                        for (doc_id, mut doc) in active_documents {
1124                            if doc.frame.must_be_drawn() {
1125                                // As this render will not be presented, we must pass None to
1126                                // render_impl. This avoids interfering with partial present
1127                                // logic, as well as being more efficient.
1128                                self.render_impl(
1129                                    doc_id,
1130                                    &mut doc,
1131                                    None,
1132                                    0,
1133                                ).ok();
1134                            }
1135                        }
1136                    }
1137
1138                    self.pending_texture_cache_updates |= !resource_updates.texture_updates.updates.is_empty();
1139                    self.pending_texture_updates.push(resource_updates.texture_updates);
1140                    self.pending_native_surface_updates.extend(resource_updates.native_surface_updates);
1141                    self.device.begin_frame();
1142
1143                    self.update_texture_cache();
1144                    self.update_native_surfaces();
1145
1146                    // Flush the render target pool on memory pressure.
1147                    //
1148                    // This needs to be separate from the block below because
1149                    // the device module asserts if we delete textures while
1150                    // not in a frame.
1151                    if memory_pressure {
1152                        self.texture_upload_pbo_pool.on_memory_pressure(&mut self.device);
1153                        self.staging_texture_pool.delete_textures(&mut self.device);
1154                    }
1155
1156                    self.device.end_frame();
1157                }
1158                ResultMsg::AppendNotificationRequests(mut notifications) => {
1159                    // We need to know specifically if there are any pending
1160                    // TextureCacheUpdate updates in any of the entries in
1161                    // pending_texture_updates. They may simply be nops, which do not
1162                    // need to prevent issuing the notification, and if so, may not
1163                    // cause a timely frame render to occur to wake up any listeners.
1164                    if !self.pending_texture_cache_updates {
1165                        drain_filter(
1166                            &mut notifications,
1167                            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
1168                            |n| { n.notify(); },
1169                        );
1170                    }
1171                    self.notifications.append(&mut notifications);
1172                }
1173                ResultMsg::ForceRedraw => {
1174                    self.force_redraw = true;
1175                }
1176                ResultMsg::RefreshShader(path) => {
1177                    self.pending_shader_updates.push(path);
1178                }
1179                ResultMsg::SetParameter(ref param) => {
1180                    self.device.set_parameter(param);
1181                    self.profiler.set_parameter(param);
1182                }
1183                ResultMsg::DebugOutput(output) => match output {
1184                    #[cfg(feature = "capture")]
1185                    DebugOutput::SaveCapture(config, deferred) => {
1186                        self.save_capture(config, deferred);
1187                    }
1188                    #[cfg(feature = "replay")]
1189                    DebugOutput::LoadCapture(config, plain_externals) => {
1190                        self.active_documents.clear();
1191                        self.load_capture(config, plain_externals);
1192                    }
1193                },
1194                ResultMsg::DebugCommand(command) => {
1195                    self.handle_debug_command(command);
1196                }
1197            }
1198        }
1199    }
1200
1201    /// update() defers processing of ResultMsg, if frame_publish_id of
1202    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
1203    pub fn set_target_frame_publish_id(&mut self, publish_id: FramePublishId) {
1204        self.target_frame_publish_id = Some(publish_id);
1205    }
1206
1207    fn handle_debug_command(&mut self, command: DebugCommand) {
1208        match command {
1209            DebugCommand::SetPictureTileSize(_) |
1210            DebugCommand::SetMaximumSurfaceSize(_) => {
1211                panic!("Should be handled by render backend");
1212            }
1213            DebugCommand::SaveCapture(..) |
1214            DebugCommand::LoadCapture(..) |
1215            DebugCommand::StartCaptureSequence(..) |
1216            DebugCommand::StopCaptureSequence => {
1217                panic!("Capture commands are not welcome here! Did you build with 'capture' feature?")
1218            }
1219            DebugCommand::ClearCaches(_)
1220            | DebugCommand::SimulateLongSceneBuild(_)
1221            | DebugCommand::EnableNativeCompositor(_)
1222            | DebugCommand::SetBatchingLookback(_) => {}
1223            DebugCommand::InvalidateGpuCache => {
1224                self.gpu_cache_texture.invalidate();
1225            }
1226            DebugCommand::SetFlags(flags) => {
1227                self.set_debug_flags(flags);
1228            }
1229        }
1230    }
1231
1232    /// Set a callback for handling external images.
1233    pub fn set_external_image_handler(&mut self, handler: Box<dyn ExternalImageHandler>) {
1234        self.external_image_handler = Some(handler);
1235    }
1236
1237    /// Retrieve (and clear) the current list of recorded frame profiles.
1238    pub fn get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>) {
1239        let cpu_profiles = self.cpu_profiles.drain(..).collect();
1240        let gpu_profiles = self.gpu_profiles.drain(..).collect();
1241        (cpu_profiles, gpu_profiles)
1242    }
1243
1244    /// Reset the current partial present state. This forces the entire framebuffer
1245    /// to be refreshed next time `render` is called.
1246    pub fn force_redraw(&mut self) {
1247        self.force_redraw = true;
1248    }
1249
1250    /// Renders the current frame.
1251    ///
1252    /// A Frame is supplied by calling [`generate_frame()`][webrender_api::Transaction::generate_frame].
1253    /// buffer_age is the age of the current backbuffer. It is only relevant if partial present
1254    /// is active, otherwise 0 should be passed here.
1255    pub fn render(
1256        &mut self,
1257        device_size: DeviceIntSize,
1258        buffer_age: usize,
1259    ) -> Result<RenderResults, Vec<RendererError>> {
1260        self.device_size = Some(device_size);
1261
1262        // TODO(gw): We want to make the active document that is
1263        //           being rendered configurable via the public
1264        //           API in future. For now, just select the last
1265        //           added document as the active one to render
1266        //           (Gecko only ever creates a single document
1267        //           per renderer right now).
1268        let doc_id = self.active_documents.keys().last().cloned();
1269
1270        let result = match doc_id {
1271            Some(doc_id) => {
1272                // Remove the doc from the map to appease the borrow checker
1273                let mut doc = self.active_documents
1274                    .remove(&doc_id)
1275                    .unwrap();
1276
1277                let size = if !device_size.is_empty() {
1278                    Some(device_size)
1279                } else {
1280                    None
1281                };
1282
1283                let result = self.render_impl(
1284                    doc_id,
1285                    &mut doc,
1286                    size,
1287                    buffer_age,
1288                );
1289
1290                self.active_documents.insert(doc_id, doc);
1291
1292                result
1293            }
1294            None => {
1295                self.last_time = precise_time_ns();
1296                Ok(RenderResults::default())
1297            }
1298        };
1299
1300        drain_filter(
1301            &mut self.notifications,
1302            |n| { n.when() == Checkpoint::FrameRendered },
1303            |n| { n.notify(); },
1304        );
1305
1306        let mut oom = false;
1307        if let Err(ref errors) = result {
1308            for error in errors {
1309                if matches!(error, &RendererError::OutOfMemory) {
1310                    oom = true;
1311                    break;
1312                }
1313            }
1314        }
1315
1316        if oom {
1317            let _ = self.api_tx.send(ApiMsg::MemoryPressure);
1318            // Ensure we don't get stuck in a loop.
1319            self.consecutive_oom_frames += 1;
1320            assert!(self.consecutive_oom_frames < 5, "Renderer out of memory");
1321        } else {
1322            self.consecutive_oom_frames = 0;
1323        }
1324
1325        // This is the end of the rendering pipeline. If some notifications are is still there,
1326        // just clear them and they will autimatically fire the Checkpoint::TransactionDropped
1327        // event. Otherwise they would just pile up in this vector forever.
1328        self.notifications.clear();
1329
1330        tracy_frame_marker!();
1331
1332        result
1333    }
1334
1335    /// Update the state of any debug / profiler overlays. This is currently only needed
1336    /// when running with the native compositor enabled.
1337    fn update_debug_overlay(
1338        &mut self,
1339        framebuffer_size: DeviceIntSize,
1340        has_debug_items: bool,
1341    ) {
1342        // If any of the following debug flags are set, something will be drawn on the debug overlay.
1343        self.debug_overlay_state.is_enabled = has_debug_items || self.debug_flags.intersects(
1344            DebugFlags::PROFILER_DBG |
1345            DebugFlags::RENDER_TARGET_DBG |
1346            DebugFlags::TEXTURE_CACHE_DBG |
1347            DebugFlags::EPOCHS |
1348            DebugFlags::GPU_CACHE_DBG |
1349            DebugFlags::PICTURE_CACHING_DBG |
1350            DebugFlags::PRIMITIVE_DBG |
1351            DebugFlags::ZOOM_DBG |
1352            DebugFlags::WINDOW_VISIBILITY_DBG
1353        );
1354
1355        // Update the debug overlay surface, if we are running in native compositor mode.
1356        if let CompositorKind::Native { .. } = self.current_compositor_kind {
1357            let compositor = self.compositor_config.compositor().unwrap();
1358
1359            // If there is a current surface, destroy it if we don't need it for this frame, or if
1360            // the size has changed.
1361            if let Some(current_size) = self.debug_overlay_state.current_size {
1362                if !self.debug_overlay_state.is_enabled || current_size != framebuffer_size {
1363                    compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
1364                    self.debug_overlay_state.current_size = None;
1365                }
1366            }
1367
1368            // Allocate a new surface, if we need it and there isn't one.
1369            if self.debug_overlay_state.is_enabled && self.debug_overlay_state.current_size.is_none() {
1370                compositor.create_surface(
1371                    &mut self.device,
1372                    NativeSurfaceId::DEBUG_OVERLAY,
1373                    DeviceIntPoint::zero(),
1374                    framebuffer_size,
1375                    false,
1376                );
1377                compositor.create_tile(
1378                    &mut self.device,
1379                    NativeTileId::DEBUG_OVERLAY,
1380                );
1381                self.debug_overlay_state.current_size = Some(framebuffer_size);
1382            }
1383        }
1384    }
1385
1386    /// Bind a draw target for the debug / profiler overlays, if required.
1387    fn bind_debug_overlay(&mut self, device_size: DeviceIntSize) -> Option<DrawTarget> {
1388        // Debug overlay setup are only required in native compositing mode
1389        if self.debug_overlay_state.is_enabled {
1390            match self.current_compositor_kind {
1391                CompositorKind::Native { .. } => {
1392                    let compositor = self.compositor_config.compositor().unwrap();
1393                    let surface_size = self.debug_overlay_state.current_size.unwrap();
1394
1395                    // Ensure old surface is invalidated before binding
1396                    compositor.invalidate_tile(
1397                        &mut self.device,
1398                        NativeTileId::DEBUG_OVERLAY,
1399                        DeviceIntRect::from_size(surface_size),
1400                    );
1401                    // Bind the native surface
1402                    let surface_info = compositor.bind(
1403                        &mut self.device,
1404                        NativeTileId::DEBUG_OVERLAY,
1405                        DeviceIntRect::from_size(surface_size),
1406                        DeviceIntRect::from_size(surface_size),
1407                    );
1408
1409                    // Bind the native surface to current FBO target
1410                    let draw_target = DrawTarget::NativeSurface {
1411                        offset: surface_info.origin,
1412                        external_fbo_id: surface_info.fbo_id,
1413                        dimensions: surface_size,
1414                    };
1415                    self.device.bind_draw_target(draw_target);
1416
1417                    // When native compositing, clear the debug overlay each frame.
1418                    self.device.clear_target(
1419                        Some([0.0, 0.0, 0.0, 0.0]),
1420                        None, // debug renderer does not use depth
1421                        None,
1422                    );
1423
1424                    Some(draw_target)
1425                }
1426                CompositorKind::Layer { .. } => {
1427                    let compositor = self.compositor_config.layer_compositor().unwrap();
1428                    compositor.bind_layer(self.debug_overlay_state.layer_index);
1429
1430                    self.device.clear_target(
1431                        Some([0.0, 0.0, 0.0, 0.0]),
1432                        None, // debug renderer does not use depth
1433                        None,
1434                    );
1435
1436                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
1437                }
1438                CompositorKind::Draw { .. } => {
1439                    // If we're not using the native compositor, then the default
1440                    // frame buffer is already bound. Create a DrawTarget for it and
1441                    // return it.
1442                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
1443                }
1444            }
1445        } else {
1446            None
1447        }
1448    }
1449
1450    /// Unbind the draw target for debug / profiler overlays, if required.
1451    fn unbind_debug_overlay(&mut self) {
1452        // Debug overlay setup are only required in native compositing mode
1453        if self.debug_overlay_state.is_enabled {
1454            match self.current_compositor_kind {
1455                CompositorKind::Native { .. } => {
1456                    let compositor = self.compositor_config.compositor().unwrap();
1457                    // Unbind the draw target and add it to the visual tree to be composited
1458                    compositor.unbind(&mut self.device);
1459
1460                    let clip_rect = DeviceIntRect::from_size(
1461                        self.debug_overlay_state.current_size.unwrap(),
1462                    );
1463
1464                    compositor.add_surface(
1465                        &mut self.device,
1466                        NativeSurfaceId::DEBUG_OVERLAY,
1467                        CompositorSurfaceTransform::identity(),
1468                        clip_rect,
1469                        ImageRendering::Auto,
1470                        clip_rect,
1471                        ClipRadius::EMPTY,
1472                    );
1473                }
1474                CompositorKind::Draw { .. } => {}
1475                CompositorKind::Layer { .. } => {
1476                    let compositor = self.compositor_config.layer_compositor().unwrap();
1477                    compositor.present_layer(self.debug_overlay_state.layer_index);
1478                }
1479            }
1480        }
1481    }
1482
1483    // If device_size is None, don't render to the main frame buffer. This is useful to
1484    // update texture cache render tasks but avoid doing a full frame render. If the
1485    // render is not going to be presented, then this must be set to None, as performing a
1486    // composite without a present will confuse partial present.
1487    fn render_impl(
1488        &mut self,
1489        doc_id: DocumentId,
1490        active_doc: &mut RenderedDocument,
1491        mut device_size: Option<DeviceIntSize>,
1492        buffer_age: usize,
1493    ) -> Result<RenderResults, Vec<RendererError>> {
1494        profile_scope!("render");
1495        let mut results = RenderResults::default();
1496        self.profile.end_time_if_started(profiler::FRAME_SEND_TIME);
1497        self.profile.start_time(profiler::RENDERER_TIME);
1498
1499        self.staging_texture_pool.begin_frame();
1500
1501        let compositor_kind = active_doc.frame.composite_state.compositor_kind;
1502        // CompositorKind is updated
1503        if self.current_compositor_kind != compositor_kind {
1504            let enable = match (self.current_compositor_kind, compositor_kind) {
1505                (CompositorKind::Native { .. }, CompositorKind::Draw { .. }) => {
1506                    if self.debug_overlay_state.current_size.is_some() {
1507                        self.compositor_config
1508                            .compositor()
1509                            .unwrap()
1510                            .destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
1511                        self.debug_overlay_state.current_size = None;
1512                    }
1513                    false
1514                }
1515                (CompositorKind::Draw { .. }, CompositorKind::Native { .. }) => {
1516                    true
1517                }
1518                (current_compositor_kind, active_doc_compositor_kind) => {
1519                    warn!("Compositor mismatch, assuming this is Wrench running. Current {:?}, active {:?}",
1520                        current_compositor_kind, active_doc_compositor_kind);
1521                    false
1522                }
1523            };
1524
1525            if let Some(config) = self.compositor_config.compositor() {
1526                config.enable_native_compositor(&mut self.device, enable);
1527            }
1528            self.current_compositor_kind = compositor_kind;
1529        }
1530
1531        // The texture resolver scope should be outside of any rendering, including
1532        // debug rendering. This ensures that when we return render targets to the
1533        // pool via glInvalidateFramebuffer, we don't do any debug rendering after
1534        // that point. Otherwise, the bind / invalidate / bind logic trips up the
1535        // render pass logic in tiled / mobile GPUs, resulting in an extra copy /
1536        // resolve step when the debug overlay is enabled.
1537        self.texture_resolver.begin_frame();
1538
1539        if let Some(device_size) = device_size {
1540            self.update_gpu_profile(device_size);
1541        }
1542
1543        let cpu_frame_id = {
1544            let _gm = self.gpu_profiler.start_marker("begin frame");
1545            let frame_id = self.device.begin_frame();
1546            self.gpu_profiler.begin_frame(frame_id);
1547
1548            self.device.disable_scissor();
1549            self.device.disable_depth();
1550            self.set_blend(false, FramebufferKind::Main);
1551            //self.update_shaders();
1552
1553            self.update_texture_cache();
1554            self.update_native_surfaces();
1555
1556            frame_id
1557        };
1558
1559        if !active_doc.frame.present {
1560            // Setting device_size to None is what ensures compositing/presenting
1561            // the frame is skipped in the rest of this module.
1562            device_size = None;
1563        }
1564
1565        if let Some(device_size) = device_size {
1566            // Inform the client that we are starting a composition transaction if native
1567            // compositing is enabled. This needs to be done early in the frame, so that
1568            // we can create debug overlays after drawing the main surfaces.
1569            if let CompositorKind::Native { .. } = self.current_compositor_kind {
1570                let compositor = self.compositor_config.compositor().unwrap();
1571                compositor.begin_frame(&mut self.device);
1572            }
1573
1574            // Update the state of the debug overlay surface, ensuring that
1575            // the compositor mode has a suitable surface to draw to, if required.
1576            self.update_debug_overlay(device_size, !active_doc.frame.debug_items.is_empty());
1577        }
1578
1579        let frame = &mut active_doc.frame;
1580        let profile = &mut active_doc.profile;
1581        assert!(self.current_compositor_kind == frame.composite_state.compositor_kind);
1582
1583        if self.shared_texture_cache_cleared {
1584            assert!(self.documents_seen.contains(&doc_id),
1585                    "Cleared texture cache without sending new document frame.");
1586        }
1587
1588        match self.prepare_gpu_cache(&frame.deferred_resolves) {
1589            Ok(..) => {
1590                assert!(frame.gpu_cache_frame_id <= self.gpu_cache_frame_id,
1591                    "Received frame depends on a later GPU cache epoch ({:?}) than one we received last via `UpdateGpuCache` ({:?})",
1592                    frame.gpu_cache_frame_id, self.gpu_cache_frame_id);
1593
1594                {
1595                    profile_scope!("gl.flush");
1596                    self.device.gl().flush();  // early start on gpu cache updates
1597                }
1598
1599                self.draw_frame(
1600                    frame,
1601                    device_size,
1602                    buffer_age,
1603                    &mut results,
1604                );
1605
1606                // TODO(nical): do this automatically by selecting counters in the wr profiler
1607                // Profile marker for the number of invalidated picture cache
1608                if thread_is_being_profiled() {
1609                    let duration = Duration::new(0,0);
1610                    if let Some(n) = self.profile.get(profiler::RENDERED_PICTURE_TILES) {
1611                        let message = (n as usize).to_string();
1612                        add_text_marker("NumPictureCacheInvalidated", &message, duration);
1613                    }
1614                }
1615
1616                if device_size.is_some() {
1617                    self.draw_frame_debug_items(&frame.debug_items);
1618                }
1619
1620                self.profile.merge(profile);
1621            }
1622            Err(e) => {
1623                self.renderer_errors.push(e);
1624            }
1625        }
1626
1627        self.unlock_external_images(&frame.deferred_resolves);
1628
1629        let _gm = self.gpu_profiler.start_marker("end frame");
1630        self.gpu_profiler.end_frame();
1631
1632        let t = self.profile.end_time(profiler::RENDERER_TIME);
1633        self.profile.end_time_if_started(profiler::TOTAL_FRAME_CPU_TIME);
1634
1635        let current_time = precise_time_ns();
1636        if device_size.is_some() {
1637            let time = profiler::ns_to_ms(current_time - self.last_time);
1638            self.profile.set(profiler::FRAME_TIME, time);
1639        }
1640
1641        let debug_overlay = device_size.and_then(|device_size| {
1642            // Bind a surface to draw the debug / profiler information to.
1643            self.bind_debug_overlay(device_size).map(|draw_target| {
1644                self.draw_render_target_debug(&draw_target);
1645                self.draw_texture_cache_debug(&draw_target);
1646                self.draw_gpu_cache_debug(device_size);
1647                self.draw_zoom_debug(device_size);
1648                self.draw_epoch_debug();
1649                self.draw_window_visibility_debug();
1650                draw_target
1651            })
1652        });
1653
1654        Telemetry::record_renderer_time(Duration::from_micros((t * 1000.00) as u64));
1655        if self.profile.get(profiler::SHADER_BUILD_TIME).is_none() {
1656          Telemetry::record_renderer_time_no_sc(Duration::from_micros((t * 1000.00) as u64));
1657        }
1658
1659        if self.max_recorded_profiles > 0 {
1660            while self.cpu_profiles.len() >= self.max_recorded_profiles {
1661                self.cpu_profiles.pop_front();
1662            }
1663            let cpu_profile = CpuProfile::new(
1664                cpu_frame_id,
1665                (self.profile.get_or(profiler::FRAME_BUILDING_TIME, 0.0) * 1000000.0) as u64,
1666                (self.profile.get_or(profiler::RENDERER_TIME, 0.0) * 1000000.0) as u64,
1667                self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize,
1668            );
1669            self.cpu_profiles.push_back(cpu_profile);
1670        }
1671
1672        if thread_is_being_profiled() {
1673            let duration = Duration::new(0,0);
1674            let message = (self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize).to_string();
1675            add_text_marker("NumDrawCalls", &message, duration);
1676        }
1677
1678        let report = self.texture_resolver.report_memory();
1679        self.profile.set(profiler::RENDER_TARGET_MEM, profiler::bytes_to_mb(report.render_target_textures));
1680        self.profile.set(profiler::PICTURE_TILES_MEM, profiler::bytes_to_mb(report.picture_tile_textures));
1681        self.profile.set(profiler::ATLAS_TEXTURES_MEM, profiler::bytes_to_mb(report.atlas_textures));
1682        self.profile.set(profiler::STANDALONE_TEXTURES_MEM, profiler::bytes_to_mb(report.standalone_textures));
1683
1684        self.profile.set(profiler::DEPTH_TARGETS_MEM, profiler::bytes_to_mb(self.device.depth_targets_memory()));
1685
1686        self.profile.set(profiler::TEXTURES_CREATED, self.device.textures_created);
1687        self.profile.set(profiler::TEXTURES_DELETED, self.device.textures_deleted);
1688
1689        results.stats.texture_upload_mb = self.profile.get_or(profiler::TEXTURE_UPLOADS_MEM, 0.0);
1690        self.frame_counter += 1;
1691        results.stats.resource_upload_time = self.resource_upload_time;
1692        self.resource_upload_time = 0.0;
1693        results.stats.gpu_cache_upload_time = self.gpu_cache_upload_time;
1694        self.gpu_cache_upload_time = 0.0;
1695
1696        if let Some(stats) = active_doc.frame_stats.take() {
1697          // Copy the full frame stats to RendererStats
1698          results.stats.merge(&stats);
1699
1700          self.profiler.update_frame_stats(stats);
1701        }
1702
1703        // Turn the render reasons bitflags into something we can see in the profiler.
1704        // For now this is just a binary yes/no for each bit, which means that when looking
1705        // at "Render reasons" in the profiler HUD the average view indicates the proportion
1706        // of frames that had the bit set over a half second window whereas max shows whether
1707        // the bit as been set at least once during that time window.
1708        // We could implement better ways to visualize this information.
1709        let add_markers = thread_is_being_profiled();
1710        for i in 0..RenderReasons::NUM_BITS {
1711            let counter = profiler::RENDER_REASON_FIRST + i as usize;
1712            let mut val = 0.0;
1713            let reason_bit = RenderReasons::from_bits_truncate(1 << i);
1714            if active_doc.render_reasons.contains(reason_bit) {
1715                val = 1.0;
1716                if add_markers {
1717                    let event_str = format!("Render reason {:?}", reason_bit);
1718                    add_event_marker(&event_str);
1719                }
1720            }
1721            self.profile.set(counter, val);
1722        }
1723        active_doc.render_reasons = RenderReasons::empty();
1724
1725
1726        self.texture_resolver.update_profile(&mut self.profile);
1727
1728        // Note: this clears the values in self.profile.
1729        self.profiler.set_counters(&mut self.profile);
1730
1731        // Note: profile counters must be set before this or they will count for next frame.
1732        self.profiler.update();
1733
1734        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
1735            if let Some(device_size) = device_size {
1736                //TODO: take device/pixel ratio into equation?
1737                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
1738                    self.profiler.draw_profile(
1739                        self.frame_counter,
1740                        debug_renderer,
1741                        device_size,
1742                    );
1743                }
1744            }
1745        }
1746
1747        if self.debug_flags.contains(DebugFlags::ECHO_DRIVER_MESSAGES) {
1748            self.device.echo_driver_messages();
1749        }
1750
1751        if let Some(debug_renderer) = self.debug.try_get_mut() {
1752            let small_screen = self.debug_flags.contains(DebugFlags::SMALL_SCREEN);
1753            let scale = if small_screen { 1.6 } else { 1.0 };
1754            // TODO(gw): Tidy this up so that compositor config integrates better
1755            //           with the (non-compositor) surface y-flip options.
1756            let surface_origin_is_top_left = match self.current_compositor_kind {
1757                CompositorKind::Native { .. } => true,
1758                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => self.device.surface_origin_is_top_left(),
1759            };
1760            // If there is a debug overlay, render it. Otherwise, just clear
1761            // the debug renderer.
1762            debug_renderer.render(
1763                &mut self.device,
1764                debug_overlay.and(device_size),
1765                scale,
1766                surface_origin_is_top_left,
1767            );
1768        }
1769
1770        self.staging_texture_pool.end_frame(&mut self.device);
1771        self.texture_upload_pbo_pool.end_frame(&mut self.device);
1772        self.device.end_frame();
1773
1774        if debug_overlay.is_some() {
1775            self.last_time = current_time;
1776
1777            // Unbind the target for the debug overlay. No debug or profiler drawing
1778            // can occur afer this point.
1779            self.unbind_debug_overlay();
1780        }
1781
1782        if device_size.is_some() {
1783            // Inform the client that we are finished this composition transaction if native
1784            // compositing is enabled. This must be called after any debug / profiling compositor
1785            // surfaces have been drawn and added to the visual tree.
1786            match self.current_compositor_kind {
1787                CompositorKind::Layer { .. } => {
1788                    let compositor = self.compositor_config.layer_compositor().unwrap();
1789                    compositor.end_frame();
1790                }
1791                CompositorKind::Native { .. } => {
1792                    profile_scope!("compositor.end_frame");
1793                    let compositor = self.compositor_config.compositor().unwrap();
1794                    compositor.end_frame(&mut self.device);
1795                }
1796                CompositorKind::Draw { .. } => {}
1797            }
1798        }
1799
1800        self.documents_seen.clear();
1801        self.shared_texture_cache_cleared = false;
1802
1803        self.check_gl_errors();
1804
1805        if self.renderer_errors.is_empty() {
1806            Ok(results)
1807        } else {
1808            Err(mem::replace(&mut self.renderer_errors, Vec::new()))
1809        }
1810    }
1811
1812    fn update_gpu_profile(&mut self, device_size: DeviceIntSize) {
1813        let _gm = self.gpu_profiler.start_marker("build samples");
1814        // Block CPU waiting for last frame's GPU profiles to arrive.
1815        // In general this shouldn't block unless heavily GPU limited.
1816        let (gpu_frame_id, timers, samplers) = self.gpu_profiler.build_samples();
1817
1818        if self.max_recorded_profiles > 0 {
1819            while self.gpu_profiles.len() >= self.max_recorded_profiles {
1820                self.gpu_profiles.pop_front();
1821            }
1822
1823            self.gpu_profiles.push_back(GpuProfile::new(gpu_frame_id, &timers));
1824        }
1825
1826        self.profiler.set_gpu_time_queries(timers);
1827
1828        if !samplers.is_empty() {
1829            let screen_fraction = 1.0 / device_size.to_f32().area();
1830
1831            fn accumulate_sampler_value(description: &str, samplers: &[GpuSampler]) -> f32 {
1832                let mut accum = 0.0;
1833                for sampler in samplers {
1834                    if sampler.tag.label != description {
1835                        continue;
1836                    }
1837
1838                    accum += sampler.count as f32;
1839                }
1840
1841                accum
1842            }
1843
1844            let alpha_targets = accumulate_sampler_value(&"Alpha targets", &samplers) * screen_fraction;
1845            let transparent_pass = accumulate_sampler_value(&"Transparent pass", &samplers) * screen_fraction;
1846            let opaque_pass = accumulate_sampler_value(&"Opaque pass", &samplers) * screen_fraction;
1847            self.profile.set(profiler::ALPHA_TARGETS_SAMPLERS, alpha_targets);
1848            self.profile.set(profiler::TRANSPARENT_PASS_SAMPLERS, transparent_pass);
1849            self.profile.set(profiler::OPAQUE_PASS_SAMPLERS, opaque_pass);
1850            self.profile.set(profiler::TOTAL_SAMPLERS, alpha_targets + transparent_pass + opaque_pass);
1851        }
1852    }
1853
1854    fn update_texture_cache(&mut self) {
1855        profile_scope!("update_texture_cache");
1856
1857        let _gm = self.gpu_profiler.start_marker("texture cache update");
1858        let mut pending_texture_updates = mem::replace(&mut self.pending_texture_updates, vec![]);
1859        self.pending_texture_cache_updates = false;
1860
1861        self.profile.start_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
1862
1863        let mut create_cache_texture_time = 0;
1864        let mut delete_cache_texture_time = 0;
1865
1866        for update_list in pending_texture_updates.drain(..) {
1867            // Handle copies from one texture to another.
1868            for ((src_tex, dst_tex), copies) in &update_list.copies {
1869
1870                let dest_texture = &self.texture_resolver.texture_cache_map[&dst_tex].texture;
1871                let dst_texture_size = dest_texture.get_dimensions().to_f32();
1872
1873                let mut copy_instances = Vec::new();
1874                for copy in copies {
1875                    copy_instances.push(CopyInstance {
1876                        src_rect: copy.src_rect.to_f32(),
1877                        dst_rect: copy.dst_rect.to_f32(),
1878                        dst_texture_size,
1879                    });
1880                }
1881
1882                let draw_target = DrawTarget::from_texture(dest_texture, false);
1883                self.device.bind_draw_target(draw_target);
1884
1885                self.shaders
1886                    .borrow_mut()
1887                    .ps_copy()
1888                    .bind(
1889                        &mut self.device,
1890                        &Transform3D::identity(),
1891                        None,
1892                        &mut self.renderer_errors,
1893                        &mut self.profile,
1894                    );
1895
1896                self.draw_instanced_batch(
1897                    &copy_instances,
1898                    VertexArrayKind::Copy,
1899                    &BatchTextures::composite_rgb(
1900                        TextureSource::TextureCache(*src_tex, Swizzle::default())
1901                    ),
1902                    &mut RendererStats::default(),
1903                );
1904            }
1905
1906            // Find any textures that will need to be deleted in this group of allocations.
1907            let mut pending_deletes = Vec::new();
1908            for allocation in &update_list.allocations {
1909                let old = self.texture_resolver.texture_cache_map.remove(&allocation.id);
1910                match allocation.kind {
1911                    TextureCacheAllocationKind::Alloc(_) => {
1912                        assert!(old.is_none(), "Renderer and backend disagree!");
1913                    }
1914                    TextureCacheAllocationKind::Reset(_) |
1915                    TextureCacheAllocationKind::Free => {
1916                        assert!(old.is_some(), "Renderer and backend disagree!");
1917                    }
1918                }
1919                if let Some(old) = old {
1920
1921                    // Regenerate the cache allocation info so we can search through deletes for reuse.
1922                    let size = old.texture.get_dimensions();
1923                    let info = TextureCacheAllocInfo {
1924                        width: size.width,
1925                        height: size.height,
1926                        format: old.texture.get_format(),
1927                        filter: old.texture.get_filter(),
1928                        target: old.texture.get_target(),
1929                        is_shared_cache: old.texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE),
1930                        has_depth: old.texture.supports_depth(),
1931                        category: old.category,
1932                    };
1933                    pending_deletes.push((old.texture, info));
1934                }
1935            }
1936            // Look for any alloc or reset that has matching alloc info and save it from being deleted.
1937            let mut reused_textures = VecDeque::with_capacity(pending_deletes.len());
1938            for allocation in &update_list.allocations {
1939                match allocation.kind {
1940                    TextureCacheAllocationKind::Alloc(ref info) |
1941                    TextureCacheAllocationKind::Reset(ref info) => {
1942                        reused_textures.push_back(
1943                            pending_deletes.iter()
1944                                .position(|(_, old_info)| *old_info == *info)
1945                                .map(|index| pending_deletes.swap_remove(index).0)
1946                        );
1947                    }
1948                    TextureCacheAllocationKind::Free => {}
1949                }
1950            }
1951
1952            // Now that we've saved as many deletions for reuse as we can, actually delete whatever is left.
1953            if !pending_deletes.is_empty() {
1954                let delete_texture_start = precise_time_ns();
1955                for (texture, _) in pending_deletes {
1956                    add_event_marker("TextureCacheFree");
1957                    self.device.delete_texture(texture);
1958                }
1959                delete_cache_texture_time += precise_time_ns() - delete_texture_start;
1960            }
1961
1962            for allocation in update_list.allocations {
1963                match allocation.kind {
1964                    TextureCacheAllocationKind::Alloc(_) => add_event_marker("TextureCacheAlloc"),
1965                    TextureCacheAllocationKind::Reset(_) => add_event_marker("TextureCacheReset"),
1966                    TextureCacheAllocationKind::Free => {}
1967                };
1968                match allocation.kind {
1969                    TextureCacheAllocationKind::Alloc(ref info) |
1970                    TextureCacheAllocationKind::Reset(ref info) => {
1971                        let create_cache_texture_start = precise_time_ns();
1972                        // Create a new native texture, as requested by the texture cache.
1973                        // If we managed to reuse a deleted texture, then prefer that instead.
1974                        //
1975                        // Ensure no PBO is bound when creating the texture storage,
1976                        // or GL will attempt to read data from there.
1977                        let mut texture = reused_textures.pop_front().unwrap_or(None).unwrap_or_else(|| {
1978                            self.device.create_texture(
1979                                info.target,
1980                                info.format,
1981                                info.width,
1982                                info.height,
1983                                info.filter,
1984                                // This needs to be a render target because some render
1985                                // tasks get rendered into the texture cache.
1986                                Some(RenderTargetInfo { has_depth: info.has_depth }),
1987                            )
1988                        });
1989
1990                        if info.is_shared_cache {
1991                            texture.flags_mut()
1992                                .insert(TextureFlags::IS_SHARED_TEXTURE_CACHE);
1993
1994                            // On Mali-Gxx devices we use batched texture uploads as it performs much better.
1995                            // However, due to another driver bug we must ensure the textures are fully cleared,
1996                            // otherwise we get visual artefacts when blitting to the texture cache.
1997                            if self.device.use_batched_texture_uploads() &&
1998                                !self.device.get_capabilities().supports_render_target_partial_update
1999                            {
2000                                self.clear_texture(&texture, [0.0; 4]);
2001                            }
2002
2003                            // Textures in the cache generally don't need to be cleared,
2004                            // but we do so if the debug display is active to make it
2005                            // easier to identify unallocated regions.
2006                            if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
2007                                self.clear_texture(&texture, TEXTURE_CACHE_DBG_CLEAR_COLOR);
2008                            }
2009                        }
2010
2011                        create_cache_texture_time += precise_time_ns() - create_cache_texture_start;
2012
2013                        self.texture_resolver.texture_cache_map.insert(allocation.id, CacheTexture {
2014                            texture,
2015                            category: info.category,
2016                        });
2017                    }
2018                    TextureCacheAllocationKind::Free => {}
2019                };
2020            }
2021
2022            upload_to_texture_cache(self, update_list.updates);
2023
2024            self.check_gl_errors();
2025        }
2026
2027        if create_cache_texture_time > 0 {
2028            self.profile.set(
2029                profiler::CREATE_CACHE_TEXTURE_TIME,
2030                profiler::ns_to_ms(create_cache_texture_time)
2031            );
2032        }
2033        if delete_cache_texture_time > 0 {
2034            self.profile.set(
2035                profiler::DELETE_CACHE_TEXTURE_TIME,
2036                profiler::ns_to_ms(delete_cache_texture_time)
2037            )
2038        }
2039
2040        let t = self.profile.end_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
2041        self.resource_upload_time += t;
2042        Telemetry::record_texture_cache_update_time(Duration::from_micros((t * 1000.00) as u64));
2043
2044        drain_filter(
2045            &mut self.notifications,
2046            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
2047            |n| { n.notify(); },
2048        );
2049    }
2050
2051    fn check_gl_errors(&mut self) {
2052        let err = self.device.gl().get_error();
2053        if err == gl::OUT_OF_MEMORY {
2054            self.renderer_errors.push(RendererError::OutOfMemory);
2055        }
2056
2057        // Probably should check for other errors?
2058    }
2059
2060    fn bind_textures(&mut self, textures: &BatchTextures) {
2061        for i in 0 .. 3 {
2062            self.texture_resolver.bind(
2063                &textures.input.colors[i],
2064                TextureSampler::color(i),
2065                &mut self.device,
2066            );
2067        }
2068
2069        self.texture_resolver.bind(
2070            &textures.clip_mask,
2071            TextureSampler::ClipMask,
2072            &mut self.device,
2073        );
2074
2075        // TODO: this probably isn't the best place for this.
2076        if let Some(ref texture) = self.dither_matrix_texture {
2077            self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
2078        }
2079    }
2080
2081    fn draw_instanced_batch<T: Clone>(
2082        &mut self,
2083        data: &[T],
2084        vertex_array_kind: VertexArrayKind,
2085        textures: &BatchTextures,
2086        stats: &mut RendererStats,
2087    ) {
2088        self.bind_textures(textures);
2089
2090        // If we end up with an empty draw call here, that means we have
2091        // probably introduced unnecessary batch breaks during frame
2092        // building - so we should be catching this earlier and removing
2093        // the batch.
2094        debug_assert!(!data.is_empty());
2095
2096        let vao = &self.vaos[vertex_array_kind];
2097        self.device.bind_vao(vao);
2098
2099        let chunk_size = if self.debug_flags.contains(DebugFlags::DISABLE_BATCHING) {
2100            1
2101        } else if vertex_array_kind == VertexArrayKind::Primitive {
2102            self.max_primitive_instance_count
2103        } else {
2104            data.len()
2105        };
2106
2107        for chunk in data.chunks(chunk_size) {
2108            if self.enable_instancing {
2109                self.device
2110                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, None);
2111                self.device
2112                    .draw_indexed_triangles_instanced_u16(6, chunk.len() as i32);
2113            } else {
2114                self.device
2115                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, NonZeroUsize::new(4));
2116                self.device
2117                    .draw_indexed_triangles(6 * chunk.len() as i32);
2118            }
2119            self.profile.inc(profiler::DRAW_CALLS);
2120            stats.total_draw_calls += 1;
2121        }
2122
2123        self.profile.add(profiler::VERTICES, 6 * data.len());
2124    }
2125
2126    fn handle_readback_composite(
2127        &mut self,
2128        draw_target: DrawTarget,
2129        uses_scissor: bool,
2130        backdrop: &RenderTask,
2131        readback: &RenderTask,
2132    ) {
2133        // Extract the rectangle in the backdrop surface's device space of where
2134        // we need to read from.
2135        let readback_origin = match readback.kind {
2136            RenderTaskKind::Readback(ReadbackTask { readback_origin: Some(o), .. }) => o,
2137            RenderTaskKind::Readback(ReadbackTask { readback_origin: None, .. }) => {
2138                // If this is a dummy readback, just early out. We know that the
2139                // clear of the target will ensure the task rect is already zero alpha,
2140                // so it won't affect the rendering output.
2141                return;
2142            }
2143            _ => unreachable!(),
2144        };
2145
2146        if uses_scissor {
2147            self.device.disable_scissor();
2148        }
2149
2150        let texture_source = TextureSource::TextureCache(
2151            readback.get_target_texture(),
2152            Swizzle::default(),
2153        );
2154        let (cache_texture, _) = self.texture_resolver
2155            .resolve(&texture_source).expect("bug: no source texture");
2156
2157        // Before submitting the composite batch, do the
2158        // framebuffer readbacks that are needed for each
2159        // composite operation in this batch.
2160        let readback_rect = readback.get_target_rect();
2161        let backdrop_rect = backdrop.get_target_rect();
2162        let (backdrop_screen_origin, _) = match backdrop.kind {
2163            RenderTaskKind::Picture(ref task_info) => (task_info.content_origin, task_info.device_pixel_scale),
2164            _ => panic!("bug: composite on non-picture?"),
2165        };
2166
2167        // Bind the FBO to blit the backdrop to.
2168        // Called per-instance in case the FBO changes. The device will skip
2169        // the GL call if the requested target is already bound.
2170        let cache_draw_target = DrawTarget::from_texture(
2171            cache_texture,
2172            false,
2173        );
2174
2175        // Get the rect that we ideally want, in space of the parent surface
2176        let wanted_rect = DeviceRect::from_origin_and_size(
2177            readback_origin,
2178            readback_rect.size().to_f32(),
2179        );
2180
2181        // Get the rect that is available on the parent surface. It may be smaller
2182        // than desired because this is a picture cache tile covering only part of
2183        // the wanted rect and/or because the parent surface was clipped.
2184        let avail_rect = DeviceRect::from_origin_and_size(
2185            backdrop_screen_origin,
2186            backdrop_rect.size().to_f32(),
2187        );
2188
2189        if let Some(int_rect) = wanted_rect.intersection(&avail_rect) {
2190            // If there is a valid intersection, work out the correct origins and
2191            // sizes of the copy rects, and do the blit.
2192            let copy_size = int_rect.size().to_i32();
2193
2194            let src_origin = backdrop_rect.min.to_f32() +
2195                int_rect.min.to_vector() -
2196                backdrop_screen_origin.to_vector();
2197
2198            let src = DeviceIntRect::from_origin_and_size(
2199                src_origin.to_i32(),
2200                copy_size,
2201            );
2202
2203            let dest_origin = readback_rect.min.to_f32() +
2204                int_rect.min.to_vector() -
2205                readback_origin.to_vector();
2206
2207            let dest = DeviceIntRect::from_origin_and_size(
2208                dest_origin.to_i32(),
2209                copy_size,
2210            );
2211
2212            // Should always be drawing to picture cache tiles or off-screen surface!
2213            debug_assert!(!draw_target.is_default());
2214            let device_to_framebuffer = Scale::new(1i32);
2215
2216            self.device.blit_render_target(
2217                draw_target.into(),
2218                src * device_to_framebuffer,
2219                cache_draw_target,
2220                dest * device_to_framebuffer,
2221                TextureFilter::Linear,
2222            );
2223        }
2224
2225        // Restore draw target to current pass render target, and reset
2226        // the read target.
2227        self.device.bind_draw_target(draw_target);
2228        self.device.reset_read_target();
2229
2230        if uses_scissor {
2231            self.device.enable_scissor();
2232        }
2233    }
2234
2235    fn handle_resolves(
2236        &mut self,
2237        resolve_ops: &[ResolveOp],
2238        render_tasks: &RenderTaskGraph,
2239        draw_target: DrawTarget,
2240    ) {
2241        if resolve_ops.is_empty() {
2242            return;
2243        }
2244
2245        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
2246
2247        for resolve_op in resolve_ops {
2248            self.handle_resolve(
2249                resolve_op,
2250                render_tasks,
2251                draw_target,
2252            );
2253        }
2254
2255        self.device.reset_read_target();
2256    }
2257
2258    fn handle_prims(
2259        &mut self,
2260        draw_target: &DrawTarget,
2261        prim_instances: &[FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>],
2262        prim_instances_with_scissor: &FastHashMap<(DeviceIntRect, PatternKind), FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>>,
2263        projection: &default::Transform3D<f32>,
2264        stats: &mut RendererStats,
2265    ) {
2266        self.device.disable_depth_write();
2267
2268        {
2269            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_PRIM);
2270
2271            self.set_blend(false, FramebufferKind::Other);
2272
2273            for (pattern_idx, prim_instances_map) in prim_instances.iter().enumerate() {
2274                if prim_instances_map.is_empty() {
2275                    continue;
2276                }
2277                let pattern = PatternKind::from_u32(pattern_idx as u32);
2278
2279                self.shaders.borrow_mut().get_quad_shader(pattern).bind(
2280                    &mut self.device,
2281                    projection,
2282                    None,
2283                    &mut self.renderer_errors,
2284                    &mut self.profile,
2285                );
2286
2287                for (texture_source, prim_instances) in prim_instances_map {
2288                    let texture_bindings = BatchTextures::composite_rgb(*texture_source);
2289
2290                    self.draw_instanced_batch(
2291                        prim_instances,
2292                        VertexArrayKind::Primitive,
2293                        &texture_bindings,
2294                        stats,
2295                    );
2296                }
2297            }
2298
2299            if !prim_instances_with_scissor.is_empty() {
2300                self.set_blend(true, FramebufferKind::Other);
2301                self.device.set_blend_mode_premultiplied_alpha();
2302                self.device.enable_scissor();
2303
2304                let mut prev_pattern = None;
2305
2306                for ((scissor_rect, pattern), prim_instances_map) in prim_instances_with_scissor {
2307                    if prev_pattern != Some(*pattern) {
2308                        prev_pattern = Some(*pattern);
2309                        self.shaders.borrow_mut().get_quad_shader(*pattern).bind(
2310                            &mut self.device,
2311                            projection,
2312                            None,
2313                            &mut self.renderer_errors,
2314                            &mut self.profile,
2315                        );
2316                    }
2317
2318                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2319
2320                    for (texture_source, prim_instances) in prim_instances_map {
2321                        let texture_bindings = BatchTextures::composite_rgb(*texture_source);
2322
2323                        self.draw_instanced_batch(
2324                            prim_instances,
2325                            VertexArrayKind::Primitive,
2326                            &texture_bindings,
2327                            stats,
2328                        );
2329                    }
2330                }
2331
2332                self.device.disable_scissor();
2333            }
2334        }
2335    }
2336
2337    fn handle_clips(
2338        &mut self,
2339        draw_target: &DrawTarget,
2340        masks: &ClipMaskInstanceList,
2341        projection: &default::Transform3D<f32>,
2342        stats: &mut RendererStats,
2343    ) {
2344        self.device.disable_depth_write();
2345
2346        {
2347            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_MASK);
2348
2349            self.set_blend(true, FramebufferKind::Other);
2350            self.set_blend_mode_multiply(FramebufferKind::Other);
2351
2352            if !masks.mask_instances_fast.is_empty() {
2353                self.shaders.borrow_mut().ps_mask_fast().bind(
2354                    &mut self.device,
2355                    projection,
2356                    None,
2357                    &mut self.renderer_errors,
2358                    &mut self.profile,
2359                );
2360
2361                self.draw_instanced_batch(
2362                    &masks.mask_instances_fast,
2363                    VertexArrayKind::Mask,
2364                    &BatchTextures::empty(),
2365                    stats,
2366                );
2367            }
2368
2369            if !masks.mask_instances_fast_with_scissor.is_empty() {
2370                self.shaders.borrow_mut().ps_mask_fast().bind(
2371                    &mut self.device,
2372                    projection,
2373                    None,
2374                    &mut self.renderer_errors,
2375                    &mut self.profile,
2376                );
2377
2378                self.device.enable_scissor();
2379
2380                for (scissor_rect, instances) in &masks.mask_instances_fast_with_scissor {
2381                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2382
2383                    self.draw_instanced_batch(
2384                        instances,
2385                        VertexArrayKind::Mask,
2386                        &BatchTextures::empty(),
2387                        stats,
2388                    );
2389                }
2390
2391                self.device.disable_scissor();
2392            }
2393
2394            if !masks.image_mask_instances.is_empty() {
2395                self.shaders.borrow_mut().ps_quad_textured().bind(
2396                    &mut self.device,
2397                    projection,
2398                    None,
2399                    &mut self.renderer_errors,
2400                    &mut self.profile,
2401                );
2402
2403                for (texture, prim_instances) in &masks.image_mask_instances {
2404                    self.draw_instanced_batch(
2405                        prim_instances,
2406                        VertexArrayKind::Primitive,
2407                        &BatchTextures::composite_rgb(*texture),
2408                        stats,
2409                    );
2410                }
2411            }
2412
2413            if !masks.image_mask_instances_with_scissor.is_empty() {
2414                self.device.enable_scissor();
2415
2416                self.shaders.borrow_mut().ps_quad_textured().bind(
2417                    &mut self.device,
2418                    projection,
2419                    None,
2420                    &mut self.renderer_errors,
2421                    &mut self.profile,
2422                );
2423
2424                for ((scissor_rect, texture), prim_instances) in &masks.image_mask_instances_with_scissor {
2425                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2426
2427                    self.draw_instanced_batch(
2428                        prim_instances,
2429                        VertexArrayKind::Primitive,
2430                        &BatchTextures::composite_rgb(*texture),
2431                        stats,
2432                    );
2433                }
2434
2435                self.device.disable_scissor();
2436            }
2437
2438            if !masks.mask_instances_slow.is_empty() {
2439                self.shaders.borrow_mut().ps_mask().bind(
2440                    &mut self.device,
2441                    projection,
2442                    None,
2443                    &mut self.renderer_errors,
2444                    &mut self.profile,
2445                );
2446
2447                self.draw_instanced_batch(
2448                    &masks.mask_instances_slow,
2449                    VertexArrayKind::Mask,
2450                    &BatchTextures::empty(),
2451                    stats,
2452                );
2453            }
2454
2455            if !masks.mask_instances_slow_with_scissor.is_empty() {
2456                self.shaders.borrow_mut().ps_mask().bind(
2457                    &mut self.device,
2458                    projection,
2459                    None,
2460                    &mut self.renderer_errors,
2461                    &mut self.profile,
2462                );
2463
2464                self.device.enable_scissor();
2465
2466                for (scissor_rect, instances) in &masks.mask_instances_slow_with_scissor {
2467                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2468
2469                    self.draw_instanced_batch(
2470                        instances,
2471                        VertexArrayKind::Mask,
2472                        &BatchTextures::empty(),
2473                        stats,
2474                    );
2475                }
2476
2477                self.device.disable_scissor();
2478            }
2479        }
2480    }
2481
2482    fn handle_blits(
2483        &mut self,
2484        blits: &[BlitJob],
2485        render_tasks: &RenderTaskGraph,
2486        draw_target: DrawTarget,
2487    ) {
2488        if blits.is_empty() {
2489            return;
2490        }
2491
2492        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
2493
2494        // TODO(gw): For now, we don't bother batching these by source texture.
2495        //           If if ever shows up as an issue, we can easily batch them.
2496        for blit in blits {
2497            let (source, source_rect) = {
2498                // A blit from the child render task into this target.
2499                // TODO(gw): Support R8 format here once we start
2500                //           creating mips for alpha masks.
2501                let task = &render_tasks[blit.source];
2502                let source_rect = blit.source_rect.translate(task.get_target_rect().min.to_vector());
2503                let source_texture = task.get_texture_source();
2504
2505                (source_texture, source_rect)
2506            };
2507
2508            let (texture, swizzle) = self.texture_resolver
2509                .resolve(&source)
2510                .expect("BUG: invalid source texture");
2511
2512            if swizzle != Swizzle::default() {
2513                error!("Swizzle {:?} can't be handled by a blit", swizzle);
2514            }
2515
2516            let read_target = DrawTarget::from_texture(
2517                texture,
2518                false,
2519            );
2520
2521            self.device.blit_render_target(
2522                read_target.into(),
2523                read_target.to_framebuffer_rect(source_rect),
2524                draw_target,
2525                draw_target.to_framebuffer_rect(blit.target_rect),
2526                TextureFilter::Linear,
2527            );
2528        }
2529    }
2530
2531    fn handle_scaling(
2532        &mut self,
2533        scalings: &FastHashMap<TextureSource, FrameVec<ScalingInstance>>,
2534        projection: &default::Transform3D<f32>,
2535        stats: &mut RendererStats,
2536    ) {
2537        if scalings.is_empty() {
2538            return
2539        }
2540
2541        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SCALE);
2542        for (source, instances) in scalings {
2543            let buffer_kind = source.image_buffer_kind();
2544
2545            // When the source texture is an external texture, the UV rect is not known
2546            // when the external surface descriptor is created, because external textures
2547            // are not resolved until the lock() callback is invoked at the start of the
2548            // frame render. We must therefore override the source rects now.
2549            let uv_override_instances;
2550            let instances = match source {
2551                TextureSource::External(..) => {
2552                    uv_override_instances = instances.iter().map(|instance| {
2553                        let mut new_instance = instance.clone();
2554                        let texel_rect: TexelRect = self.texture_resolver.get_uv_rect(
2555                            &source,
2556                            instance.source_rect.cast().into()
2557                        ).into();
2558                        new_instance.source_rect = DeviceRect::new(texel_rect.uv0, texel_rect.uv1);
2559                        new_instance
2560                    }).collect::<Vec<_>>();
2561                    uv_override_instances.as_slice()
2562                }
2563                _ => instances.as_slice()
2564            };
2565
2566            self.shaders
2567                .borrow_mut()
2568                .get_scale_shader(buffer_kind)
2569                .bind(
2570                    &mut self.device,
2571                    &projection,
2572                    Some(self.texture_resolver.get_texture_size(source).to_f32()),
2573                    &mut self.renderer_errors,
2574                    &mut self.profile,
2575                );
2576
2577            self.draw_instanced_batch(
2578                instances,
2579                VertexArrayKind::Scale,
2580                &BatchTextures::composite_rgb(*source),
2581                stats,
2582            );
2583        }
2584    }
2585
2586    fn handle_svg_filters(
2587        &mut self,
2588        textures: &BatchTextures,
2589        svg_filters: &[SvgFilterInstance],
2590        projection: &default::Transform3D<f32>,
2591        stats: &mut RendererStats,
2592    ) {
2593        if svg_filters.is_empty() {
2594            return;
2595        }
2596
2597        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER);
2598
2599        self.shaders.borrow_mut().cs_svg_filter().bind(
2600            &mut self.device,
2601            &projection,
2602            None,
2603            &mut self.renderer_errors,
2604            &mut self.profile,
2605        );
2606
2607        self.draw_instanced_batch(
2608            &svg_filters,
2609            VertexArrayKind::SvgFilter,
2610            textures,
2611            stats,
2612        );
2613    }
2614
2615    fn handle_svg_nodes(
2616        &mut self,
2617        textures: &BatchTextures,
2618        svg_filters: &[SVGFEFilterInstance],
2619        projection: &default::Transform3D<f32>,
2620        stats: &mut RendererStats,
2621    ) {
2622        if svg_filters.is_empty() {
2623            return;
2624        }
2625
2626        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER_NODES);
2627
2628        self.shaders.borrow_mut().cs_svg_filter_node().bind(
2629            &mut self.device,
2630            &projection,
2631            None,
2632            &mut self.renderer_errors,
2633            &mut self.profile,
2634        );
2635
2636        self.draw_instanced_batch(
2637            &svg_filters,
2638            VertexArrayKind::SvgFilterNode,
2639            textures,
2640            stats,
2641        );
2642    }
2643
2644    fn handle_resolve(
2645        &mut self,
2646        resolve_op: &ResolveOp,
2647        render_tasks: &RenderTaskGraph,
2648        draw_target: DrawTarget,
2649    ) {
2650        for src_task_id in &resolve_op.src_task_ids {
2651            let src_task = &render_tasks[*src_task_id];
2652            let src_info = match src_task.kind {
2653                RenderTaskKind::Picture(ref info) => info,
2654                _ => panic!("bug: not a picture"),
2655            };
2656            let src_task_rect = src_task.get_target_rect().to_f32();
2657
2658            let dest_task = &render_tasks[resolve_op.dest_task_id];
2659            let dest_info = match dest_task.kind {
2660                RenderTaskKind::Picture(ref info) => info,
2661                _ => panic!("bug: not a picture"),
2662            };
2663            let dest_task_rect = dest_task.get_target_rect().to_f32();
2664
2665            // Get the rect that we ideally want, in space of the parent surface
2666            let wanted_rect = DeviceRect::from_origin_and_size(
2667                dest_info.content_origin,
2668                dest_task_rect.size().to_f32(),
2669            ).cast_unit() * dest_info.device_pixel_scale.inverse();
2670
2671            // Get the rect that is available on the parent surface. It may be smaller
2672            // than desired because this is a picture cache tile covering only part of
2673            // the wanted rect and/or because the parent surface was clipped.
2674            let avail_rect = DeviceRect::from_origin_and_size(
2675                src_info.content_origin,
2676                src_task_rect.size().to_f32(),
2677            ).cast_unit() * src_info.device_pixel_scale.inverse();
2678
2679            if let Some(device_int_rect) = wanted_rect.intersection(&avail_rect) {
2680                let src_int_rect = (device_int_rect * src_info.device_pixel_scale).cast_unit();
2681                let dest_int_rect = (device_int_rect * dest_info.device_pixel_scale).cast_unit();
2682
2683                // If there is a valid intersection, work out the correct origins and
2684                // sizes of the copy rects, and do the blit.
2685
2686                let src_origin = src_task_rect.min.to_f32() +
2687                    src_int_rect.min.to_vector() -
2688                    src_info.content_origin.to_vector();
2689
2690                let src = DeviceIntRect::from_origin_and_size(
2691                    src_origin.to_i32(),
2692                    src_int_rect.size().round().to_i32(),
2693                );
2694
2695                let dest_origin = dest_task_rect.min.to_f32() +
2696                    dest_int_rect.min.to_vector() -
2697                    dest_info.content_origin.to_vector();
2698
2699                let dest = DeviceIntRect::from_origin_and_size(
2700                    dest_origin.to_i32(),
2701                    dest_int_rect.size().round().to_i32(),
2702                );
2703
2704                let texture_source = TextureSource::TextureCache(
2705                    src_task.get_target_texture(),
2706                    Swizzle::default(),
2707                );
2708                let (cache_texture, _) = self.texture_resolver
2709                    .resolve(&texture_source).expect("bug: no source texture");
2710
2711                let read_target = ReadTarget::from_texture(cache_texture);
2712
2713                // Should always be drawing to picture cache tiles or off-screen surface!
2714                debug_assert!(!draw_target.is_default());
2715                let device_to_framebuffer = Scale::new(1i32);
2716
2717                self.device.blit_render_target(
2718                    read_target,
2719                    src * device_to_framebuffer,
2720                    draw_target,
2721                    dest * device_to_framebuffer,
2722                    TextureFilter::Linear,
2723                );
2724            }
2725        }
2726    }
2727
2728    fn draw_picture_cache_target(
2729        &mut self,
2730        target: &PictureCacheTarget,
2731        draw_target: DrawTarget,
2732        projection: &default::Transform3D<f32>,
2733        render_tasks: &RenderTaskGraph,
2734        stats: &mut RendererStats,
2735    ) {
2736        profile_scope!("draw_picture_cache_target");
2737
2738        self.profile.inc(profiler::RENDERED_PICTURE_TILES);
2739        let _gm = self.gpu_profiler.start_marker("picture cache target");
2740        let framebuffer_kind = FramebufferKind::Other;
2741
2742        {
2743            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
2744            self.device.bind_draw_target(draw_target);
2745
2746            if self.device.get_capabilities().supports_qcom_tiled_rendering {
2747                self.device.gl().start_tiling_qcom(
2748                    target.dirty_rect.min.x.max(0) as _,
2749                    target.dirty_rect.min.y.max(0) as _,
2750                    target.dirty_rect.width() as _,
2751                    target.dirty_rect.height() as _,
2752                    0,
2753                );
2754            }
2755
2756            self.device.enable_depth_write();
2757            self.set_blend(false, framebuffer_kind);
2758
2759            let clear_color = target.clear_color.map(|c| c.to_array());
2760            let scissor_rect = if self.device.get_capabilities().supports_render_target_partial_update
2761                && (target.dirty_rect != target.valid_rect
2762                    || self.device.get_capabilities().prefers_clear_scissor)
2763            {
2764                Some(target.dirty_rect)
2765            } else {
2766                None
2767            };
2768            match scissor_rect {
2769                // If updating only a dirty rect within a picture cache target, the
2770                // clear must also be scissored to that dirty region.
2771                Some(r) if self.clear_caches_with_quads => {
2772                    self.device.enable_depth(DepthFunction::Always);
2773                    // Save the draw call count so that our reftests don't get confused...
2774                    let old_draw_call_count = stats.total_draw_calls;
2775                    if clear_color.is_none() {
2776                        self.device.disable_color_write();
2777                    }
2778                    let instance = ClearInstance {
2779                        rect: [
2780                            r.min.x as f32, r.min.y as f32,
2781                            r.max.x as f32, r.max.y as f32,
2782                        ],
2783                        color: clear_color.unwrap_or([0.0; 4]),
2784                    };
2785                    self.shaders.borrow_mut().ps_clear().bind(
2786                        &mut self.device,
2787                        &projection,
2788                        None,
2789                        &mut self.renderer_errors,
2790                        &mut self.profile,
2791                    );
2792                    self.draw_instanced_batch(
2793                        &[instance],
2794                        VertexArrayKind::Clear,
2795                        &BatchTextures::empty(),
2796                        stats,
2797                    );
2798                    if clear_color.is_none() {
2799                        self.device.enable_color_write();
2800                    }
2801                    stats.total_draw_calls = old_draw_call_count;
2802                    self.device.disable_depth();
2803                }
2804                other => {
2805                    let scissor_rect = other.map(|rect| {
2806                        draw_target.build_scissor_rect(Some(rect))
2807                    });
2808                    self.device.clear_target(clear_color, Some(1.0), scissor_rect);
2809                }
2810            };
2811            self.device.disable_depth_write();
2812        }
2813
2814        match target.kind {
2815            PictureCacheTargetKind::Draw { ref alpha_batch_container } => {
2816                self.draw_alpha_batch_container(
2817                    alpha_batch_container,
2818                    draw_target,
2819                    framebuffer_kind,
2820                    projection,
2821                    render_tasks,
2822                    stats,
2823                );
2824            }
2825            PictureCacheTargetKind::Blit { task_id, sub_rect_offset } => {
2826                let src_task = &render_tasks[task_id];
2827                let (texture, _swizzle) = self.texture_resolver
2828                    .resolve(&src_task.get_texture_source())
2829                    .expect("BUG: invalid source texture");
2830
2831                let src_task_rect = src_task.get_target_rect();
2832
2833                let p0 = src_task_rect.min + sub_rect_offset;
2834                let p1 = p0 + target.dirty_rect.size();
2835                let src_rect = DeviceIntRect::new(p0, p1);
2836
2837                // TODO(gw): In future, it'd be tidier to have the draw target offset
2838                //           for DC surfaces handled by `blit_render_target`. However,
2839                //           for now they are only ever written to here.
2840                let target_rect = target
2841                    .dirty_rect
2842                    .translate(draw_target.offset().to_vector())
2843                    .cast_unit();
2844
2845                self.device.blit_render_target(
2846                    ReadTarget::from_texture(texture),
2847                    src_rect.cast_unit(),
2848                    draw_target,
2849                    target_rect,
2850                    TextureFilter::Nearest,
2851                );
2852            }
2853        }
2854
2855        self.device.invalidate_depth_target();
2856        if self.device.get_capabilities().supports_qcom_tiled_rendering {
2857            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
2858        }
2859    }
2860
2861    /// Draw an alpha batch container into a given draw target. This is used
2862    /// by both color and picture cache target kinds.
2863    fn draw_alpha_batch_container(
2864        &mut self,
2865        alpha_batch_container: &AlphaBatchContainer,
2866        draw_target: DrawTarget,
2867        framebuffer_kind: FramebufferKind,
2868        projection: &default::Transform3D<f32>,
2869        render_tasks: &RenderTaskGraph,
2870        stats: &mut RendererStats,
2871    ) {
2872        let uses_scissor = alpha_batch_container.task_scissor_rect.is_some();
2873
2874        if uses_scissor {
2875            self.device.enable_scissor();
2876            let scissor_rect = draw_target.build_scissor_rect(
2877                alpha_batch_container.task_scissor_rect,
2878            );
2879            self.device.set_scissor_rect(scissor_rect)
2880        }
2881
2882        if !alpha_batch_container.opaque_batches.is_empty()
2883            && !self.debug_flags.contains(DebugFlags::DISABLE_OPAQUE_PASS) {
2884            let _gl = self.gpu_profiler.start_marker("opaque batches");
2885            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
2886            self.set_blend(false, framebuffer_kind);
2887            //Note: depth equality is needed for split planes
2888            self.device.enable_depth(DepthFunction::LessEqual);
2889            self.device.enable_depth_write();
2890
2891            // Draw opaque batches front-to-back for maximum
2892            // z-buffer efficiency!
2893            for batch in alpha_batch_container
2894                .opaque_batches
2895                .iter()
2896                .rev()
2897                {
2898                    if should_skip_batch(&batch.key.kind, self.debug_flags) {
2899                        continue;
2900                    }
2901
2902                    self.shaders.borrow_mut()
2903                        .get(&batch.key, batch.features, self.debug_flags, &self.device)
2904                        .bind(
2905                            &mut self.device, projection, None,
2906                            &mut self.renderer_errors,
2907                            &mut self.profile,
2908                        );
2909
2910                    let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
2911                    self.draw_instanced_batch(
2912                        &batch.instances,
2913                        VertexArrayKind::Primitive,
2914                        &batch.key.textures,
2915                        stats
2916                    );
2917                }
2918
2919            self.device.disable_depth_write();
2920            self.gpu_profiler.finish_sampler(opaque_sampler);
2921        } else {
2922            self.device.disable_depth();
2923        }
2924
2925        if !alpha_batch_container.alpha_batches.is_empty()
2926            && !self.debug_flags.contains(DebugFlags::DISABLE_ALPHA_PASS) {
2927            let _gl = self.gpu_profiler.start_marker("alpha batches");
2928            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
2929            self.set_blend(true, framebuffer_kind);
2930
2931            let mut prev_blend_mode = BlendMode::None;
2932            let shaders_rc = self.shaders.clone();
2933
2934            for batch in &alpha_batch_container.alpha_batches {
2935                if should_skip_batch(&batch.key.kind, self.debug_flags) {
2936                    continue;
2937                }
2938
2939                let mut shaders = shaders_rc.borrow_mut();
2940                let shader = shaders.get(
2941                    &batch.key,
2942                    batch.features | BatchFeatures::ALPHA_PASS,
2943                    self.debug_flags,
2944                    &self.device,
2945                );
2946
2947                if batch.key.blend_mode != prev_blend_mode {
2948                    match batch.key.blend_mode {
2949                        _ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
2950                            framebuffer_kind == FramebufferKind::Main => {
2951                            self.device.set_blend_mode_show_overdraw();
2952                        }
2953                        BlendMode::None => {
2954                            unreachable!("bug: opaque blend in alpha pass");
2955                        }
2956                        BlendMode::Alpha => {
2957                            self.device.set_blend_mode_alpha();
2958                        }
2959                        BlendMode::PremultipliedAlpha => {
2960                            self.device.set_blend_mode_premultiplied_alpha();
2961                        }
2962                        BlendMode::PremultipliedDestOut => {
2963                            self.device.set_blend_mode_premultiplied_dest_out();
2964                        }
2965                        BlendMode::SubpixelDualSource => {
2966                            self.device.set_blend_mode_subpixel_dual_source();
2967                        }
2968                        BlendMode::Advanced(mode) => {
2969                            if self.enable_advanced_blend_barriers {
2970                                self.device.gl().blend_barrier_khr();
2971                            }
2972                            self.device.set_blend_mode_advanced(mode);
2973                        }
2974                        BlendMode::MultiplyDualSource => {
2975                            self.device.set_blend_mode_multiply_dual_source();
2976                        }
2977                        BlendMode::Screen => {
2978                            self.device.set_blend_mode_screen();
2979                        }
2980                        BlendMode::Exclusion => {
2981                            self.device.set_blend_mode_exclusion();
2982                        }
2983                        BlendMode::PlusLighter => {
2984                            self.device.set_blend_mode_plus_lighter();
2985                        }
2986                    }
2987                    prev_blend_mode = batch.key.blend_mode;
2988                }
2989
2990                // Handle special case readback for composites.
2991                if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, backdrop_id }) = batch.key.kind {
2992                    // composites can't be grouped together because
2993                    // they may overlap and affect each other.
2994                    debug_assert_eq!(batch.instances.len(), 1);
2995                    self.handle_readback_composite(
2996                        draw_target,
2997                        uses_scissor,
2998                        &render_tasks[task_id],
2999                        &render_tasks[backdrop_id],
3000                    );
3001                }
3002
3003                let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
3004                shader.bind(
3005                    &mut self.device,
3006                    projection,
3007                    None,
3008                    &mut self.renderer_errors,
3009                    &mut self.profile,
3010                );
3011
3012                self.draw_instanced_batch(
3013                    &batch.instances,
3014                    VertexArrayKind::Primitive,
3015                    &batch.key.textures,
3016                    stats
3017                );
3018            }
3019
3020            self.set_blend(false, framebuffer_kind);
3021            self.gpu_profiler.finish_sampler(transparent_sampler);
3022        }
3023
3024        self.device.disable_depth();
3025        if uses_scissor {
3026            self.device.disable_scissor();
3027        }
3028    }
3029
3030    /// Rasterize any external compositor surfaces that require updating
3031    fn update_external_native_surfaces(
3032        &mut self,
3033        external_surfaces: &[ResolvedExternalSurface],
3034        results: &mut RenderResults,
3035    ) {
3036        if external_surfaces.is_empty() {
3037            return;
3038        }
3039
3040        let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3041
3042        self.device.disable_depth();
3043        self.set_blend(false, FramebufferKind::Main);
3044
3045        for surface in external_surfaces {
3046            // See if this surface needs to be updated
3047            let (native_surface_id, surface_size) = match surface.update_params {
3048                Some(params) => params,
3049                None => continue,
3050            };
3051
3052            // When updating an external surface, the entire surface rect is used
3053            // for all of the draw, dirty, valid and clip rect parameters.
3054            let surface_rect = surface_size.into();
3055
3056            // Bind the native compositor surface to update
3057            let surface_info = self.compositor_config
3058                .compositor()
3059                .unwrap()
3060                .bind(
3061                    &mut self.device,
3062                    NativeTileId {
3063                        surface_id: native_surface_id,
3064                        x: 0,
3065                        y: 0,
3066                    },
3067                    surface_rect,
3068                    surface_rect,
3069                );
3070
3071            // Bind the native surface to current FBO target
3072            let draw_target = DrawTarget::NativeSurface {
3073                offset: surface_info.origin,
3074                external_fbo_id: surface_info.fbo_id,
3075                dimensions: surface_size,
3076            };
3077            self.device.bind_draw_target(draw_target);
3078
3079            let projection = Transform3D::ortho(
3080                0.0,
3081                surface_size.width as f32,
3082                0.0,
3083                surface_size.height as f32,
3084                self.device.ortho_near_plane(),
3085                self.device.ortho_far_plane(),
3086            );
3087
3088            let ( textures, instance ) = match surface.color_data {
3089                ResolvedExternalSurfaceColorData::Yuv{
3090                        ref planes, color_space, format, channel_bit_depth, .. } => {
3091
3092                    let textures = BatchTextures::composite_yuv(
3093                        planes[0].texture,
3094                        planes[1].texture,
3095                        planes[2].texture,
3096                    );
3097
3098                    // When the texture is an external texture, the UV rect is not known when
3099                    // the external surface descriptor is created, because external textures
3100                    // are not resolved until the lock() callback is invoked at the start of
3101                    // the frame render. To handle this, query the texture resolver for the
3102                    // UV rect if it's an external texture, otherwise use the default UV rect.
3103                    let uv_rects = [
3104                        self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3105                        self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3106                        self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3107                    ];
3108
3109                    let instance = CompositeInstance::new_yuv(
3110                        surface_rect.to_f32(),
3111                        surface_rect.to_f32(),
3112                        // z-id is not relevant when updating a native compositor surface.
3113                        // TODO(gw): Support compositor surfaces without z-buffer, for memory / perf win here.
3114                        color_space,
3115                        format,
3116                        channel_bit_depth,
3117                        uv_rects,
3118                        (false, false),
3119                        None,
3120                    );
3121
3122                    // Bind an appropriate YUV shader for the texture format kind
3123                    self.shaders
3124                        .borrow_mut()
3125                        .get_composite_shader(
3126                            CompositeSurfaceFormat::Yuv,
3127                            surface.image_buffer_kind,
3128                            instance.get_yuv_features(),
3129                        ).bind(
3130                            &mut self.device,
3131                            &projection,
3132                            None,
3133                            &mut self.renderer_errors,
3134                            &mut self.profile,
3135                        );
3136
3137                    ( textures, instance )
3138                },
3139                ResolvedExternalSurfaceColorData::Rgb{ ref plane, .. } => {
3140                    let textures = BatchTextures::composite_rgb(plane.texture);
3141                    let uv_rect = self.texture_resolver.get_uv_rect(&textures.input.colors[0], plane.uv_rect);
3142                    let instance = CompositeInstance::new_rgb(
3143                        surface_rect.to_f32(),
3144                        surface_rect.to_f32(),
3145                        PremultipliedColorF::WHITE,
3146                        uv_rect,
3147                        plane.texture.uses_normalized_uvs(),
3148                        (false, false),
3149                        None,
3150                    );
3151                    let features = instance.get_rgb_features();
3152
3153                    self.shaders
3154                        .borrow_mut()
3155                        .get_composite_shader(
3156                            CompositeSurfaceFormat::Rgba,
3157                            surface.image_buffer_kind,
3158                            features,
3159                        ).bind(
3160                            &mut self.device,
3161                            &projection,
3162                            None,
3163                            &mut self.renderer_errors,
3164                            &mut self.profile,
3165                        );
3166
3167                    ( textures, instance )
3168                },
3169            };
3170
3171            self.draw_instanced_batch(
3172                &[instance],
3173                VertexArrayKind::Composite,
3174                &textures,
3175                &mut results.stats,
3176            );
3177
3178            self.compositor_config
3179                .compositor()
3180                .unwrap()
3181                .unbind(&mut self.device);
3182        }
3183
3184        self.gpu_profiler.finish_sampler(opaque_sampler);
3185    }
3186
3187    /// Draw a list of tiles to the framebuffer
3188    fn draw_tile_list<'a, I: Iterator<Item = &'a occlusion::Item<OcclusionItemKey>>>(
3189        &mut self,
3190        tiles_iter: I,
3191        composite_state: &CompositeState,
3192        external_surfaces: &[ResolvedExternalSurface],
3193        projection: &default::Transform3D<f32>,
3194        stats: &mut RendererStats,
3195    ) {
3196        let mut current_shader_params = (
3197            CompositeSurfaceFormat::Rgba,
3198            ImageBufferKind::Texture2D,
3199            CompositeFeatures::empty(),
3200            None,
3201        );
3202        let mut current_textures = BatchTextures::empty();
3203        let mut instances = Vec::new();
3204
3205        self.shaders
3206            .borrow_mut()
3207            .get_composite_shader(
3208                current_shader_params.0,
3209                current_shader_params.1,
3210                current_shader_params.2,
3211            ).bind(
3212                &mut self.device,
3213                projection,
3214                None,
3215                &mut self.renderer_errors,
3216                &mut self.profile,
3217            );
3218
3219        for item in tiles_iter {
3220            let tile = &composite_state.tiles[item.key.tile_index];
3221
3222            let clip_rect = item.rectangle;
3223            let tile_rect = composite_state.get_device_rect(&tile.local_rect, tile.transform_index);
3224            let transform = composite_state.get_device_transform(tile.transform_index);
3225            let flip = (transform.scale.x < 0.0, transform.scale.y < 0.0);
3226
3227            let clip = if item.key.needs_mask {
3228                tile.clip_index.map(|index| {
3229                    composite_state.get_compositor_clip(index)
3230                })
3231            } else {
3232                None
3233            };
3234
3235            // Work out the draw params based on the tile surface
3236            let (instance, textures, shader_params) = match tile.surface {
3237                CompositeTileSurface::Color { color } => {
3238                    let dummy = TextureSource::Dummy;
3239                    let image_buffer_kind = dummy.image_buffer_kind();
3240                    let instance = CompositeInstance::new(
3241                        tile_rect,
3242                        clip_rect,
3243                        color.premultiplied(),
3244                        flip,
3245                        clip,
3246                    );
3247                    let features = instance.get_rgb_features();
3248                    (
3249                        instance,
3250                        BatchTextures::composite_rgb(dummy),
3251                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3252                    )
3253                }
3254                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::TextureCache { texture } } => {
3255                    let instance = CompositeInstance::new(
3256                        tile_rect,
3257                        clip_rect,
3258                        PremultipliedColorF::WHITE,
3259                        flip,
3260                        clip,
3261                    );
3262                    let features = instance.get_rgb_features();
3263                    (
3264                        instance,
3265                        BatchTextures::composite_rgb(texture),
3266                        (
3267                            CompositeSurfaceFormat::Rgba,
3268                            ImageBufferKind::Texture2D,
3269                            features,
3270                            None,
3271                        ),
3272                    )
3273                }
3274                CompositeTileSurface::ExternalSurface { external_surface_index } => {
3275                    let surface = &external_surfaces[external_surface_index.0];
3276
3277                    match surface.color_data {
3278                        ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, channel_bit_depth, .. } => {
3279                            let textures = BatchTextures::composite_yuv(
3280                                planes[0].texture,
3281                                planes[1].texture,
3282                                planes[2].texture,
3283                            );
3284
3285                            // When the texture is an external texture, the UV rect is not known when
3286                            // the external surface descriptor is created, because external textures
3287                            // are not resolved until the lock() callback is invoked at the start of
3288                            // the frame render. To handle this, query the texture resolver for the
3289                            // UV rect if it's an external texture, otherwise use the default UV rect.
3290                            let uv_rects = [
3291                                self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3292                                self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3293                                self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3294                            ];
3295
3296                            let instance = CompositeInstance::new_yuv(
3297                                tile_rect,
3298                                clip_rect,
3299                                color_space,
3300                                format,
3301                                channel_bit_depth,
3302                                uv_rects,
3303                                flip,
3304                                clip,
3305                            );
3306                            let features = instance.get_yuv_features();
3307
3308                            (
3309                                instance,
3310                                textures,
3311                                (
3312                                    CompositeSurfaceFormat::Yuv,
3313                                    surface.image_buffer_kind,
3314                                    features,
3315                                    None
3316                                ),
3317                            )
3318                        },
3319                        ResolvedExternalSurfaceColorData::Rgb { ref plane, .. } => {
3320                            let uv_rect = self.texture_resolver.get_uv_rect(&plane.texture, plane.uv_rect);
3321                            let instance = CompositeInstance::new_rgb(
3322                                tile_rect,
3323                                clip_rect,
3324                                PremultipliedColorF::WHITE,
3325                                uv_rect,
3326                                plane.texture.uses_normalized_uvs(),
3327                                flip,
3328                                clip,
3329                            );
3330                            let features = instance.get_rgb_features();
3331                            (
3332                                instance,
3333                                BatchTextures::composite_rgb(plane.texture),
3334                                (
3335                                    CompositeSurfaceFormat::Rgba,
3336                                    surface.image_buffer_kind,
3337                                    features,
3338                                    Some(self.texture_resolver.get_texture_size(&plane.texture).to_f32()),
3339                                ),
3340                            )
3341                        },
3342                    }
3343                }
3344                CompositeTileSurface::Clear => {
3345                    let dummy = TextureSource::Dummy;
3346                    let image_buffer_kind = dummy.image_buffer_kind();
3347                    let instance = CompositeInstance::new(
3348                        tile_rect,
3349                        clip_rect,
3350                        PremultipliedColorF::BLACK,
3351                        flip,
3352                        clip,
3353                    );
3354                    let features = instance.get_rgb_features();
3355                    (
3356                        instance,
3357                        BatchTextures::composite_rgb(dummy),
3358                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3359                    )
3360                }
3361                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { .. } } => {
3362                    unreachable!("bug: found native surface in simple composite path");
3363                }
3364            };
3365
3366            // Flush batch if shader params or textures changed
3367            let flush_batch = !current_textures.is_compatible_with(&textures) ||
3368                shader_params != current_shader_params;
3369
3370            if flush_batch {
3371                if !instances.is_empty() {
3372                    self.draw_instanced_batch(
3373                        &instances,
3374                        VertexArrayKind::Composite,
3375                        &current_textures,
3376                        stats,
3377                    );
3378                    instances.clear();
3379                }
3380            }
3381
3382            if shader_params != current_shader_params {
3383                self.shaders
3384                    .borrow_mut()
3385                    .get_composite_shader(shader_params.0, shader_params.1, shader_params.2)
3386                    .bind(
3387                        &mut self.device,
3388                        projection,
3389                        shader_params.3,
3390                        &mut self.renderer_errors,
3391                        &mut self.profile,
3392                    );
3393
3394                current_shader_params = shader_params;
3395            }
3396
3397            current_textures = textures;
3398
3399            // Add instance to current batch
3400            instances.push(instance);
3401        }
3402
3403        // Flush the last batch
3404        if !instances.is_empty() {
3405            self.draw_instanced_batch(
3406                &instances,
3407                VertexArrayKind::Composite,
3408                &current_textures,
3409                stats,
3410            );
3411        }
3412    }
3413
3414    // Composite tiles in a swapchain. When using LayerCompositor, we may
3415    // split the compositing in to multiple swapchains.
3416    fn composite_pass(
3417        &mut self,
3418        composite_state: &CompositeState,
3419        draw_target: DrawTarget,
3420        clear_color: ColorF,
3421        projection: &default::Transform3D<f32>,
3422        results: &mut RenderResults,
3423        partial_present_mode: Option<PartialPresentMode>,
3424        layer: &SwapChainLayer,
3425    ) {
3426        self.device.bind_draw_target(draw_target);
3427        self.device.disable_depth_write();
3428        self.device.disable_depth();
3429
3430        // If using KHR_partial_update, call eglSetDamageRegion.
3431        // This must be called exactly once per frame, and prior to any rendering to the main
3432        // framebuffer. Additionally, on Mali-G77 we encountered rendering issues when calling
3433        // this earlier in the frame, during offscreen render passes. So call it now, immediately
3434        // before rendering to the main framebuffer. See bug 1685276 for details.
3435        if let Some(partial_present) = self.compositor_config.partial_present() {
3436            if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
3437                partial_present.set_buffer_damage_region(&[dirty_rect.to_i32()]);
3438            }
3439        }
3440
3441        // Clear the framebuffer
3442        let clear_color = Some(clear_color.to_array());
3443
3444        match partial_present_mode {
3445            Some(PartialPresentMode::Single { dirty_rect }) => {
3446                // There is no need to clear if the dirty rect is occluded. Additionally,
3447                // on Mali-G77 we have observed artefacts when calling glClear (even with
3448                // the empty scissor rect set) after calling eglSetDamageRegion with an
3449                // empty damage region. So avoid clearing in that case. See bug 1709548.
3450                if !dirty_rect.is_empty() && layer.occlusion.test(&dirty_rect) {
3451                    // We have a single dirty rect, so clear only that
3452                    self.device.clear_target(clear_color,
3453                                             None,
3454                                             Some(draw_target.to_framebuffer_rect(dirty_rect.to_i32())));
3455                }
3456            }
3457            None => {
3458                // Partial present is disabled, so clear the entire framebuffer
3459                self.device.clear_target(clear_color,
3460                                         None,
3461                                         None);
3462            }
3463        }
3464
3465        // Draw opaque tiles
3466        let opaque_items = layer.occlusion.opaque_items();
3467        if !opaque_items.is_empty() {
3468            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3469            self.set_blend(false, FramebufferKind::Main);
3470            self.draw_tile_list(
3471                opaque_items.iter(),
3472                &composite_state,
3473                &composite_state.external_surfaces,
3474                projection,
3475                &mut results.stats,
3476            );
3477            self.gpu_profiler.finish_sampler(opaque_sampler);
3478        }
3479
3480        // Draw clear tiles
3481        if !layer.clear_tiles.is_empty() {
3482            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3483            self.set_blend(true, FramebufferKind::Main);
3484            self.device.set_blend_mode_premultiplied_dest_out();
3485            self.draw_tile_list(
3486                layer.clear_tiles.iter(),
3487                &composite_state,
3488                &composite_state.external_surfaces,
3489                projection,
3490                &mut results.stats,
3491            );
3492            self.gpu_profiler.finish_sampler(transparent_sampler);
3493        }
3494
3495        // Draw alpha tiles
3496        let alpha_items = layer.occlusion.alpha_items();
3497        if !alpha_items.is_empty() {
3498            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3499            self.set_blend(true, FramebufferKind::Main);
3500            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Main);
3501            self.draw_tile_list(
3502                alpha_items.iter().rev(),
3503                &composite_state,
3504                &composite_state.external_surfaces,
3505                projection,
3506                &mut results.stats,
3507            );
3508            self.gpu_profiler.finish_sampler(transparent_sampler);
3509        }
3510    }
3511
3512    /// Composite picture cache tiles into the framebuffer. This is currently
3513    /// the only way that picture cache tiles get drawn. In future, the tiles
3514    /// will often be handed to the OS compositor, and this method will be
3515    /// rarely used.
3516    fn composite_simple(
3517        &mut self,
3518        composite_state: &CompositeState,
3519        fb_draw_target: DrawTarget,
3520        projection: &default::Transform3D<f32>,
3521        results: &mut RenderResults,
3522        partial_present_mode: Option<PartialPresentMode>,
3523        device_size: DeviceIntSize,
3524    ) {
3525        let _gm = self.gpu_profiler.start_marker("framebuffer");
3526        let _timer = self.gpu_profiler.start_timer(GPU_TAG_COMPOSITE);
3527
3528        // We are only interested in tiles backed with actual cached pixels so we don't
3529        // count clear tiles here.
3530        let num_tiles = composite_state.tiles
3531            .iter()
3532            .filter(|tile| tile.kind != TileKind::Clear).count();
3533        self.profile.set(profiler::PICTURE_TILES, num_tiles);
3534
3535        let window_is_opaque = match self.compositor_config.layer_compositor() {
3536            Some(ref compositor) => {
3537                let props = compositor.get_window_properties();
3538                props.is_opaque
3539            }
3540            None => true,
3541        };
3542
3543        let mut input_layers: Vec<CompositorInputLayer> = Vec::new();
3544        let mut swapchain_layers = Vec::new();
3545        let cap = composite_state.tiles.len();
3546        let mut segment_builder = SegmentBuilder::new();
3547
3548        // NOTE: Tiles here are being iterated in front-to-back order by
3549        //       z-id, due to the sort in composite_state.end_frame()
3550        for (idx, tile) in composite_state.tiles.iter().enumerate() {
3551            let device_tile_box = composite_state.get_device_rect(
3552                &tile.local_rect,
3553                tile.transform_index
3554            );
3555
3556            // Determine a clip rect to apply to this tile, depending on what
3557            // the partial present mode is.
3558            let partial_clip_rect = match partial_present_mode {
3559                Some(PartialPresentMode::Single { dirty_rect }) => dirty_rect,
3560                None => device_tile_box,
3561            };
3562
3563            // Simple compositor needs the valid rect in device space to match clip rect
3564            let device_valid_rect = composite_state
3565                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
3566
3567            let rect = device_tile_box
3568                .intersection_unchecked(&tile.device_clip_rect)
3569                .intersection_unchecked(&partial_clip_rect)
3570                .intersection_unchecked(&device_valid_rect);
3571
3572            if rect.is_empty() {
3573                continue;
3574            }
3575
3576            // Determine if the tile is an external surface or content
3577            let usage = match tile.surface {
3578                CompositeTileSurface::Texture { .. } |
3579                CompositeTileSurface::Color { .. } |
3580                CompositeTileSurface::Clear => {
3581                    CompositorSurfaceUsage::Content
3582                }
3583                CompositeTileSurface::ExternalSurface { external_surface_index } => {
3584                    match self.current_compositor_kind {
3585                        CompositorKind::Native { .. } | CompositorKind::Draw { .. } => {
3586                            CompositorSurfaceUsage::Content
3587                        }
3588                        CompositorKind::Layer { .. } => {
3589                            let surface = &composite_state.external_surfaces[external_surface_index.0];
3590
3591                            // TODO(gwc): For now, we only select a hardware overlay swapchain if we
3592                            // have an external image, but it may make sense to do for compositor
3593                            // surfaces without in future.
3594                            match surface.external_image_id {
3595                                Some(external_image_id) => {
3596                                    let image_key = match surface.color_data {
3597                                        ResolvedExternalSurfaceColorData::Rgb { image_dependency, .. } => image_dependency.key,
3598                                        ResolvedExternalSurfaceColorData::Yuv { image_dependencies, .. } => image_dependencies[0].key,
3599                                    };
3600
3601                                    CompositorSurfaceUsage::External {
3602                                        image_key,
3603                                        external_image_id,
3604                                        transform_index: tile.transform_index,
3605                                    }
3606                                }
3607                                None => {
3608                                    CompositorSurfaceUsage::Content
3609                                }
3610                            }
3611                        }
3612                    }
3613                }
3614            };
3615
3616            // Determine whether we need a new layer, and if so, what kind
3617            let new_layer_kind = match input_layers.last() {
3618                Some(curr_layer) => {
3619                    match (curr_layer.usage, usage) {
3620                        // Content -> content, composite in to same layer
3621                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::Content) => None,
3622                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::Content) => Some(usage),
3623
3624                        // Switch of layer type, or video -> video, need new swapchain
3625                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::External { .. }) |
3626                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::External { .. }) => {
3627                            // Only create a new layer if we're using LayerCompositor
3628                            match self.compositor_config {
3629                                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => None,
3630                                CompositorConfig::Layer { .. } => {
3631                                    Some(usage)
3632                                }
3633                            }
3634                        }
3635
3636                        // Should not encounter debug layers here
3637                        (CompositorSurfaceUsage::DebugOverlay, _) | (_, CompositorSurfaceUsage::DebugOverlay) => {
3638                            unreachable!();
3639                        }
3640                    }
3641                }
3642                None => {
3643                    // No layers yet, so we need a new one
3644                    Some(usage)
3645                }
3646            };
3647
3648            if let Some(new_layer_kind) = new_layer_kind {
3649                let (offset, clip_rect, is_opaque) = match usage {
3650                    CompositorSurfaceUsage::Content => {
3651                        (
3652                            DeviceIntPoint::zero(),
3653                            device_size.into(),
3654                            false,      // Assume not opaque, we'll calculate this later
3655                        )
3656                    }
3657                    CompositorSurfaceUsage::External { .. } => {
3658                        let rect = composite_state.get_device_rect(
3659                            &tile.local_rect,
3660                            tile.transform_index
3661                        );
3662
3663                        let clip_rect = tile.device_clip_rect.to_i32();
3664                        let is_opaque = tile.kind != TileKind::Alpha;
3665
3666                        (rect.min.to_i32(), clip_rect, is_opaque)
3667                    }
3668                    CompositorSurfaceUsage::DebugOverlay => unreachable!(),
3669                };
3670
3671                input_layers.push(CompositorInputLayer {
3672                    usage: new_layer_kind,
3673                    is_opaque,
3674                    offset,
3675                    clip_rect,
3676                });
3677
3678                swapchain_layers.push(SwapChainLayer {
3679                    clear_tiles: Vec::new(),
3680                    occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3681                })
3682            }
3683
3684            // For normal tiles, add to occlusion tracker. For clear tiles, add directly
3685            // to the swapchain tile list
3686            let layer = swapchain_layers.last_mut().unwrap();
3687
3688            // Clear tiles overwrite whatever is under them, so they are treated as opaque.
3689            match tile.kind {
3690                TileKind::Opaque | TileKind::Alpha => {
3691                    let is_opaque = tile.kind != TileKind::Alpha;
3692
3693                    match tile.clip_index {
3694                        Some(clip_index) => {
3695                            let clip = composite_state.get_compositor_clip(clip_index);
3696
3697                                // TODO(gw): Make segment builder generic on unit to avoid casts below.
3698                            segment_builder.initialize(
3699                                rect.cast_unit(),
3700                                None,
3701                                rect.cast_unit(),
3702                            );
3703                            segment_builder.push_clip_rect(
3704                                clip.rect.cast_unit(),
3705                                Some(clip.radius),
3706                                ClipMode::Clip,
3707                            );
3708                            segment_builder.build(|segment| {
3709                                let key = OcclusionItemKey { tile_index: idx, needs_mask: segment.has_mask };
3710
3711                                layer. occlusion.add(
3712                                    &segment.rect.cast_unit(),
3713                                    is_opaque && !segment.has_mask,
3714                                    key,
3715                                );
3716                            });
3717                        }
3718                        None => {
3719                            layer.occlusion.add(&rect, is_opaque, OcclusionItemKey {
3720                                tile_index: idx,
3721                                needs_mask: false,
3722                            });
3723                        }
3724                    }
3725                }
3726                TileKind::Clear => {
3727                    // Clear tiles are specific to how we render the window buttons on
3728                    // Windows 8. They clobber what's under them so they can be treated as opaque,
3729                    // but require a different blend state so they will be rendered after the opaque
3730                    // tiles and before transparent ones.
3731                    layer.clear_tiles.push(occlusion::Item { rectangle: rect, key: OcclusionItemKey { tile_index: idx, needs_mask: false } });
3732                }
3733            }
3734        }
3735
3736        // Reverse the layers - we're now working in back-to-front order from here onwards
3737        assert_eq!(swapchain_layers.len(), input_layers.len());
3738        input_layers.reverse();
3739        swapchain_layers.reverse();
3740
3741        if window_is_opaque {
3742            match input_layers.first_mut() {
3743                Some(_layer) => {
3744                    // If the window is opaque, and the first layer is a content layer
3745                    // then mark that as opaque.
3746                    // TODO(gw): This causes flickering in some cases when changing
3747                    //           layer count. We need to find out why so we can enable
3748                    //           selecting an opaque swapchain where possible.
3749                    // if let CompositorSurfaceUsage::Content = layer.usage {
3750                    //     layer.is_opaque = true;
3751                    // }
3752                }
3753                None => {
3754                    // If no tiles were present, and we expect an opaque window,
3755                    // add an empty layer to force a composite that clears the screen,
3756                    // to match existing semantics.
3757                    input_layers.push(CompositorInputLayer {
3758                        usage: CompositorSurfaceUsage::Content,
3759                        is_opaque: true,
3760                        offset: DeviceIntPoint::zero(),
3761                        clip_rect: device_size.into(),
3762                    });
3763
3764                    swapchain_layers.push(SwapChainLayer {
3765                        clear_tiles: Vec::new(),
3766                        occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3767                    });
3768                }
3769            }
3770        }
3771
3772        // Add a debug overlay request if enabled
3773        if self.debug_overlay_state.is_enabled {
3774            self.debug_overlay_state.layer_index = input_layers.len();
3775
3776            input_layers.push(CompositorInputLayer {
3777                usage: CompositorSurfaceUsage::DebugOverlay,
3778                is_opaque: false,
3779                offset: DeviceIntPoint::zero(),
3780                clip_rect: device_size.into(),
3781            });
3782
3783            swapchain_layers.push(SwapChainLayer {
3784                clear_tiles: Vec::new(),
3785                occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3786            });
3787        }
3788
3789        // Start compositing if using OS compositor
3790        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
3791            let input = CompositorInputConfig {
3792                layers: &input_layers,
3793            };
3794            compositor.begin_frame(&input);
3795        }
3796
3797        for (layer_index, (layer, swapchain_layer)) in input_layers.iter().zip(swapchain_layers.iter()).enumerate() {
3798            self.device.reset_state();
3799
3800            // Skip compositing external images or debug layers here
3801            match layer.usage {
3802                CompositorSurfaceUsage::Content => {}
3803                CompositorSurfaceUsage::External { .. } | CompositorSurfaceUsage::DebugOverlay => {
3804                    continue;
3805                }
3806            }
3807
3808            let clear_color = if layer_index == 0 {
3809                self.clear_color
3810            } else {
3811                ColorF::TRANSPARENT
3812            };
3813
3814            let draw_target = match self.compositor_config {
3815                CompositorConfig::Layer { ref mut compositor } => {
3816                    compositor.bind_layer(layer_index);
3817
3818                    DrawTarget::NativeSurface {
3819                        offset: -layer.offset,
3820                        external_fbo_id: 0,
3821                        dimensions: fb_draw_target.dimensions(),
3822                    }
3823                }
3824                // Native can be hit when switching compositors (disable when using Layer)
3825                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => {
3826                    fb_draw_target
3827                }
3828            };
3829
3830            // TODO(gwc): When supporting external attached swapchains, need to skip the composite pass here
3831
3832            // Draw each compositing pass in to a swap chain
3833            self.composite_pass(
3834                composite_state,
3835                draw_target,
3836                clear_color,
3837                projection,
3838                results,
3839                partial_present_mode,
3840                swapchain_layer,
3841            );
3842
3843            if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
3844                compositor.present_layer(layer_index);
3845            }
3846        }
3847
3848        // End frame notify for experimental compositor
3849        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
3850            for (layer_index, layer) in input_layers.iter().enumerate() {
3851                // External surfaces need transform applied, but content
3852                // surfaces are always at identity
3853                let transform = match layer.usage {
3854                    CompositorSurfaceUsage::Content => CompositorSurfaceTransform::identity(),
3855                    CompositorSurfaceUsage::External { transform_index, .. } => composite_state.get_compositor_transform(transform_index),
3856                    CompositorSurfaceUsage::DebugOverlay => CompositorSurfaceTransform::identity(),
3857                };
3858
3859                compositor.add_surface(
3860                    layer_index,
3861                    transform,
3862                    layer.clip_rect,
3863                    ImageRendering::Auto,
3864                );
3865            }
3866        }
3867    }
3868
3869    fn clear_render_target(
3870        &mut self,
3871        target: &RenderTarget,
3872        draw_target: DrawTarget,
3873        framebuffer_kind: FramebufferKind,
3874        projection: &default::Transform3D<f32>,
3875        stats: &mut RendererStats,
3876    ) {
3877        let needs_depth = target.needs_depth();
3878
3879        let clear_depth = if needs_depth {
3880            Some(1.0)
3881        } else {
3882            None
3883        };
3884
3885        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
3886
3887        self.device.disable_depth();
3888        self.set_blend(false, framebuffer_kind);
3889
3890        let is_alpha = target.target_kind == RenderTargetKind::Alpha;
3891        let require_precise_clear = target.cached;
3892
3893        // On some Mali-T devices we have observed crashes in subsequent draw calls
3894        // immediately after clearing the alpha render target regions with glClear().
3895        // Using the shader to clear the regions avoids the crash. See bug 1638593.
3896        let clear_with_quads = (target.cached && self.clear_caches_with_quads)
3897            || (is_alpha && self.clear_alpha_targets_with_quads);
3898
3899        let favor_partial_updates = self.device.get_capabilities().supports_render_target_partial_update
3900            && self.enable_clear_scissor;
3901
3902        // On some Adreno 4xx devices we have seen render tasks to alpha targets have no
3903        // effect unless the target is fully cleared prior to rendering. See bug 1714227.
3904        let full_clears_on_adreno = is_alpha && self.device.get_capabilities().requires_alpha_target_full_clear;
3905        let require_full_clear = !require_precise_clear
3906            && (full_clears_on_adreno || !favor_partial_updates);
3907
3908        let clear_color = target
3909            .clear_color
3910            .map(|color| color.to_array());
3911
3912        let mut cleared_depth = false;
3913        if clear_with_quads {
3914            // Will be handled last. Only specific rects will be cleared.
3915        } else if require_precise_clear {
3916            // Only clear specific rects
3917            for (rect, color) in &target.clears {
3918                self.device.clear_target(
3919                    Some(color.to_array()),
3920                    None,
3921                    Some(draw_target.to_framebuffer_rect(*rect)),
3922                );
3923            }
3924        } else {
3925            // At this point we know we don't require precise clears for correctness.
3926            // We may still attempt to restruct the clear rect as an optimization on
3927            // some configurations.
3928            let clear_rect = if require_full_clear {
3929                None
3930            } else {
3931                match draw_target {
3932                    DrawTarget::Default { rect, total_size, .. } => {
3933                        if rect.min == FramebufferIntPoint::zero() && rect.size() == total_size {
3934                            // Whole screen is covered, no need for scissor
3935                            None
3936                        } else {
3937                            Some(rect)
3938                        }
3939                    }
3940                    DrawTarget::Texture { .. } => {
3941                        // TODO(gw): Applying a scissor rect and minimal clear here
3942                        // is a very large performance win on the Intel and nVidia
3943                        // GPUs that I have tested with. It's possible it may be a
3944                        // performance penalty on other GPU types - we should test this
3945                        // and consider different code paths.
3946                        //
3947                        // Note: The above measurements were taken when render
3948                        // target slices were minimum 2048x2048. Now that we size
3949                        // them adaptively, this may be less of a win (except perhaps
3950                        // on a mostly-unused last slice of a large texture array).
3951                        target.used_rect.map(|rect| draw_target.to_framebuffer_rect(rect))
3952                    }
3953                    // Full clear.
3954                    _ => None,
3955                }
3956            };
3957
3958            self.device.clear_target(
3959                clear_color,
3960                clear_depth,
3961                clear_rect,
3962            );
3963            cleared_depth = true;
3964        }
3965
3966        // Make sure to clear the depth buffer if it is used.
3967        if needs_depth && !cleared_depth {
3968            // TODO: We could also clear the depth buffer via ps_clear. This
3969            // is done by picture cache targets in some cases.
3970            self.device.clear_target(None, clear_depth, None);
3971        }
3972
3973        // Finally, if we decided to clear with quads or if we need to clear
3974        // some areas with specific colors that don't match the global clear
3975        // color, clear more areas using a draw call.
3976
3977        let mut clear_instances = Vec::with_capacity(target.clears.len());
3978        for (rect, color) in &target.clears {
3979            if clear_with_quads || (!require_precise_clear && target.clear_color != Some(*color)) {
3980                let rect = rect.to_f32();
3981                clear_instances.push(ClearInstance {
3982                    rect: [
3983                        rect.min.x, rect.min.y,
3984                        rect.max.x, rect.max.y,
3985                    ],
3986                    color: color.to_array(),
3987                })
3988            }
3989        }
3990
3991        if !clear_instances.is_empty() {
3992            self.shaders.borrow_mut().ps_clear().bind(
3993                &mut self.device,
3994                &projection,
3995                None,
3996                &mut self.renderer_errors,
3997                &mut self.profile,
3998            );
3999            self.draw_instanced_batch(
4000                &clear_instances,
4001                VertexArrayKind::Clear,
4002                &BatchTextures::empty(),
4003                stats,
4004            );
4005        }
4006    }
4007
4008    fn draw_render_target(
4009        &mut self,
4010        texture_id: CacheTextureId,
4011        target: &RenderTarget,
4012        render_tasks: &RenderTaskGraph,
4013        stats: &mut RendererStats,
4014    ) {
4015        let needs_depth = target.needs_depth();
4016
4017        let texture = self.texture_resolver.get_cache_texture_mut(&texture_id);
4018        if needs_depth {
4019            self.device.reuse_render_target::<u8>(
4020                texture,
4021                RenderTargetInfo { has_depth: needs_depth },
4022            );
4023        }
4024
4025        let draw_target = DrawTarget::from_texture(
4026            texture,
4027            needs_depth,
4028        );
4029
4030        let projection = Transform3D::ortho(
4031            0.0,
4032            draw_target.dimensions().width as f32,
4033            0.0,
4034            draw_target.dimensions().height as f32,
4035            self.device.ortho_near_plane(),
4036            self.device.ortho_far_plane(),
4037        );
4038
4039        profile_scope!("draw_render_target");
4040        let _gm = self.gpu_profiler.start_marker("render target");
4041
4042        let counter = match target.target_kind {
4043            RenderTargetKind::Color => profiler::COLOR_PASSES,
4044            RenderTargetKind::Alpha => profiler::ALPHA_PASSES,
4045        };
4046        self.profile.inc(counter);
4047
4048        let sampler_query = match target.target_kind {
4049            RenderTargetKind::Color => None,
4050            RenderTargetKind::Alpha => Some(self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_ALPHA)),
4051        };
4052
4053        // sanity check for the depth buffer
4054        if let DrawTarget::Texture { with_depth, .. } = draw_target {
4055            assert!(with_depth >= target.needs_depth());
4056        }
4057
4058        let framebuffer_kind = if draw_target.is_default() {
4059            FramebufferKind::Main
4060        } else {
4061            FramebufferKind::Other
4062        };
4063
4064        self.device.bind_draw_target(draw_target);
4065
4066        if self.device.get_capabilities().supports_qcom_tiled_rendering {
4067            let preserve_mask = match target.clear_color {
4068                Some(_) => 0,
4069                None => gl::COLOR_BUFFER_BIT0_QCOM,
4070            };
4071            if let Some(used_rect) = target.used_rect {
4072                self.device.gl().start_tiling_qcom(
4073                    used_rect.min.x.max(0) as _,
4074                    used_rect.min.y.max(0) as _,
4075                    used_rect.width() as _,
4076                    used_rect.height() as _,
4077                    preserve_mask,
4078                );
4079            }
4080        }
4081
4082        if needs_depth {
4083            self.device.enable_depth_write();
4084        } else {
4085            self.device.disable_depth_write();
4086        }
4087
4088        self.clear_render_target(
4089            target,
4090            draw_target,
4091            framebuffer_kind,
4092            &projection,
4093            stats,
4094        );
4095
4096        if needs_depth {
4097            self.device.disable_depth_write();
4098        }
4099
4100        // Handle any resolves from parent pictures to this target
4101        self.handle_resolves(
4102            &target.resolve_ops,
4103            render_tasks,
4104            draw_target,
4105        );
4106
4107        // Handle any blits from the texture cache to this target.
4108        self.handle_blits(
4109            &target.blits,
4110            render_tasks,
4111            draw_target,
4112        );
4113
4114        // Draw any borders for this target.
4115        if !target.border_segments_solid.is_empty() ||
4116           !target.border_segments_complex.is_empty()
4117        {
4118            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_BORDER);
4119
4120            self.set_blend(true, FramebufferKind::Other);
4121            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
4122
4123            if !target.border_segments_solid.is_empty() {
4124                self.shaders.borrow_mut().cs_border_solid().bind(
4125                    &mut self.device,
4126                    &projection,
4127                    None,
4128                    &mut self.renderer_errors,
4129                    &mut self.profile,
4130                );
4131
4132                self.draw_instanced_batch(
4133                    &target.border_segments_solid,
4134                    VertexArrayKind::Border,
4135                    &BatchTextures::empty(),
4136                    stats,
4137                );
4138            }
4139
4140            if !target.border_segments_complex.is_empty() {
4141                self.shaders.borrow_mut().cs_border_segment().bind(
4142                    &mut self.device,
4143                    &projection,
4144                    None,
4145                    &mut self.renderer_errors,
4146                    &mut self.profile,
4147                );
4148
4149                self.draw_instanced_batch(
4150                    &target.border_segments_complex,
4151                    VertexArrayKind::Border,
4152                    &BatchTextures::empty(),
4153                    stats,
4154                );
4155            }
4156
4157            self.set_blend(false, FramebufferKind::Other);
4158        }
4159
4160        // Draw any line decorations for this target.
4161        if !target.line_decorations.is_empty() {
4162            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINE_DECORATION);
4163
4164            self.set_blend(true, FramebufferKind::Other);
4165            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
4166
4167            self.shaders.borrow_mut().cs_line_decoration().bind(
4168                &mut self.device,
4169                &projection,
4170                None,
4171                &mut self.renderer_errors,
4172                &mut self.profile,
4173            );
4174
4175            self.draw_instanced_batch(
4176                &target.line_decorations,
4177                VertexArrayKind::LineDecoration,
4178                &BatchTextures::empty(),
4179                stats,
4180            );
4181
4182            self.set_blend(false, FramebufferKind::Other);
4183        }
4184
4185        // Draw any fast path linear gradients for this target.
4186        if !target.fast_linear_gradients.is_empty() {
4187            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_FAST_LINEAR_GRADIENT);
4188
4189            self.set_blend(false, FramebufferKind::Other);
4190
4191            self.shaders.borrow_mut().cs_fast_linear_gradient().bind(
4192                &mut self.device,
4193                &projection,
4194                None,
4195                &mut self.renderer_errors,
4196                &mut self.profile,
4197            );
4198
4199            self.draw_instanced_batch(
4200                &target.fast_linear_gradients,
4201                VertexArrayKind::FastLinearGradient,
4202                &BatchTextures::empty(),
4203                stats,
4204            );
4205        }
4206
4207        // Draw any linear gradients for this target.
4208        if !target.linear_gradients.is_empty() {
4209            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINEAR_GRADIENT);
4210
4211            self.set_blend(false, FramebufferKind::Other);
4212
4213            self.shaders.borrow_mut().cs_linear_gradient().bind(
4214                &mut self.device,
4215                &projection,
4216                None,
4217                &mut self.renderer_errors,
4218                &mut self.profile,
4219            );
4220
4221            if let Some(ref texture) = self.dither_matrix_texture {
4222                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4223            }
4224
4225            self.draw_instanced_batch(
4226                &target.linear_gradients,
4227                VertexArrayKind::LinearGradient,
4228                &BatchTextures::empty(),
4229                stats,
4230            );
4231        }
4232
4233        // Draw any radial gradients for this target.
4234        if !target.radial_gradients.is_empty() {
4235            let _timer = self.gpu_profiler.start_timer(GPU_TAG_RADIAL_GRADIENT);
4236
4237            self.set_blend(false, FramebufferKind::Other);
4238
4239            self.shaders.borrow_mut().cs_radial_gradient().bind(
4240                &mut self.device,
4241                &projection,
4242                None,
4243                &mut self.renderer_errors,
4244                &mut self.profile,
4245            );
4246
4247            if let Some(ref texture) = self.dither_matrix_texture {
4248                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4249            }
4250
4251            self.draw_instanced_batch(
4252                &target.radial_gradients,
4253                VertexArrayKind::RadialGradient,
4254                &BatchTextures::empty(),
4255                stats,
4256            );
4257        }
4258
4259        // Draw any conic gradients for this target.
4260        if !target.conic_gradients.is_empty() {
4261            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CONIC_GRADIENT);
4262
4263            self.set_blend(false, FramebufferKind::Other);
4264
4265            self.shaders.borrow_mut().cs_conic_gradient().bind(
4266                &mut self.device,
4267                &projection,
4268                None,
4269                &mut self.renderer_errors,
4270                &mut self.profile,
4271            );
4272
4273            if let Some(ref texture) = self.dither_matrix_texture {
4274                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4275            }
4276
4277            self.draw_instanced_batch(
4278                &target.conic_gradients,
4279                VertexArrayKind::ConicGradient,
4280                &BatchTextures::empty(),
4281                stats,
4282            );
4283        }
4284
4285        // Draw any blurs for this target.
4286        // Blurs are rendered as a standard 2-pass
4287        // separable implementation.
4288        // TODO(gw): In the future, consider having
4289        //           fast path blur shaders for common
4290        //           blur radii with fixed weights.
4291        if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
4292            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
4293
4294            self.set_blend(false, framebuffer_kind);
4295            self.shaders.borrow_mut().cs_blur_rgba8()
4296                .bind(&mut self.device, &projection, None, &mut self.renderer_errors, &mut self.profile);
4297
4298            if !target.vertical_blurs.is_empty() {
4299                self.draw_blurs(
4300                    &target.vertical_blurs,
4301                    stats,
4302                );
4303            }
4304
4305            if !target.horizontal_blurs.is_empty() {
4306                self.draw_blurs(
4307                    &target.horizontal_blurs,
4308                    stats,
4309                );
4310            }
4311        }
4312
4313        self.handle_scaling(
4314            &target.scalings,
4315            &projection,
4316            stats,
4317        );
4318
4319        for (ref textures, ref filters) in &target.svg_filters {
4320            self.handle_svg_filters(
4321                textures,
4322                filters,
4323                &projection,
4324                stats,
4325            );
4326        }
4327
4328        for (ref textures, ref filters) in &target.svg_nodes {
4329            self.handle_svg_nodes(textures, filters, &projection, stats);
4330        }
4331
4332        for alpha_batch_container in &target.alpha_batch_containers {
4333            self.draw_alpha_batch_container(
4334                alpha_batch_container,
4335                draw_target,
4336                framebuffer_kind,
4337                &projection,
4338                render_tasks,
4339                stats,
4340            );
4341        }
4342
4343        self.handle_prims(
4344            &draw_target,
4345            &target.prim_instances,
4346            &target.prim_instances_with_scissor,
4347            &projection,
4348            stats,
4349        );
4350
4351        // Draw the clip items into the tiled alpha mask.
4352        {
4353            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CLIP);
4354
4355            // TODO(gw): Consider grouping multiple clip masks per shader
4356            //           invocation here to reduce memory bandwith further?
4357
4358            if !target.clip_batcher.primary_clips.is_empty() {
4359                // Draw the primary clip mask - since this is the first mask
4360                // for the task, we can disable blending, knowing that it will
4361                // overwrite every pixel in the mask area.
4362                self.set_blend(false, FramebufferKind::Other);
4363                self.draw_clip_batch_list(
4364                    &target.clip_batcher.primary_clips,
4365                    &projection,
4366                    stats,
4367                );
4368            }
4369
4370            if !target.clip_batcher.secondary_clips.is_empty() {
4371                // switch to multiplicative blending for secondary masks, using
4372                // multiplicative blending to accumulate clips into the mask.
4373                self.set_blend(true, FramebufferKind::Other);
4374                self.set_blend_mode_multiply(FramebufferKind::Other);
4375                self.draw_clip_batch_list(
4376                    &target.clip_batcher.secondary_clips,
4377                    &projection,
4378                    stats,
4379                );
4380            }
4381
4382            self.handle_clips(
4383                &draw_target,
4384                &target.clip_masks,
4385                &projection,
4386                stats,
4387            );
4388        }
4389
4390        if needs_depth {
4391            self.device.invalidate_depth_target();
4392        }
4393        if self.device.get_capabilities().supports_qcom_tiled_rendering {
4394            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
4395        }
4396
4397        if let Some(sampler) = sampler_query {
4398            self.gpu_profiler.finish_sampler(sampler);
4399        }
4400    }
4401
4402    fn draw_blurs(
4403        &mut self,
4404        blurs: &FastHashMap<TextureSource, FrameVec<BlurInstance>>,
4405        stats: &mut RendererStats,
4406    ) {
4407        for (texture, blurs) in blurs {
4408            let textures = BatchTextures::composite_rgb(
4409                *texture,
4410            );
4411
4412            self.draw_instanced_batch(
4413                blurs,
4414                VertexArrayKind::Blur,
4415                &textures,
4416                stats,
4417            );
4418        }
4419    }
4420
4421    /// Draw all the instances in a clip batcher list to the current target.
4422    fn draw_clip_batch_list(
4423        &mut self,
4424        list: &ClipBatchList,
4425        projection: &default::Transform3D<f32>,
4426        stats: &mut RendererStats,
4427    ) {
4428        if self.debug_flags.contains(DebugFlags::DISABLE_CLIP_MASKS) {
4429            return;
4430        }
4431
4432        // draw rounded cornered rectangles
4433        if !list.slow_rectangles.is_empty() {
4434            let _gm2 = self.gpu_profiler.start_marker("slow clip rectangles");
4435            self.shaders.borrow_mut().cs_clip_rectangle_slow().bind(
4436                &mut self.device,
4437                projection,
4438                None,
4439                &mut self.renderer_errors,
4440                &mut self.profile,
4441            );
4442            self.draw_instanced_batch(
4443                &list.slow_rectangles,
4444                VertexArrayKind::ClipRect,
4445                &BatchTextures::empty(),
4446                stats,
4447            );
4448        }
4449        if !list.fast_rectangles.is_empty() {
4450            let _gm2 = self.gpu_profiler.start_marker("fast clip rectangles");
4451            self.shaders.borrow_mut().cs_clip_rectangle_fast().bind(
4452                &mut self.device,
4453                projection,
4454                None,
4455                &mut self.renderer_errors,
4456                &mut self.profile,
4457            );
4458            self.draw_instanced_batch(
4459                &list.fast_rectangles,
4460                VertexArrayKind::ClipRect,
4461                &BatchTextures::empty(),
4462                stats,
4463            );
4464        }
4465
4466        // draw box-shadow clips
4467        for (mask_texture_id, items) in list.box_shadows.iter() {
4468            let _gm2 = self.gpu_profiler.start_marker("box-shadows");
4469            let textures = BatchTextures::composite_rgb(*mask_texture_id);
4470            self.shaders.borrow_mut().cs_clip_box_shadow()
4471                .bind(&mut self.device, projection, None, &mut self.renderer_errors, &mut self.profile);
4472            self.draw_instanced_batch(
4473                items,
4474                VertexArrayKind::ClipBoxShadow,
4475                &textures,
4476                stats,
4477            );
4478        }
4479    }
4480
4481    fn update_deferred_resolves(&mut self, deferred_resolves: &[DeferredResolve]) -> Option<GpuCacheUpdateList> {
4482        // The first thing we do is run through any pending deferred
4483        // resolves, and use a callback to get the UV rect for this
4484        // custom item. Then we patch the resource_rects structure
4485        // here before it's uploaded to the GPU.
4486        if deferred_resolves.is_empty() {
4487            return None;
4488        }
4489
4490        let handler = self.external_image_handler
4491            .as_mut()
4492            .expect("Found external image, but no handler set!");
4493
4494        let mut list = GpuCacheUpdateList {
4495            frame_id: FrameId::INVALID,
4496            clear: false,
4497            height: self.gpu_cache_texture.get_height(),
4498            blocks: Vec::new(),
4499            updates: Vec::new(),
4500            debug_commands: Vec::new(),
4501        };
4502
4503        for (i, deferred_resolve) in deferred_resolves.iter().enumerate() {
4504            self.gpu_profiler.place_marker("deferred resolve");
4505            let props = &deferred_resolve.image_properties;
4506            let ext_image = props
4507                .external_image
4508                .expect("BUG: Deferred resolves must be external images!");
4509            // Provide rendering information for NativeTexture external images.
4510            let image = handler.lock(ext_image.id, ext_image.channel_index);
4511            let texture_target = match ext_image.image_type {
4512                ExternalImageType::TextureHandle(target) => target,
4513                ExternalImageType::Buffer => {
4514                    panic!("not a suitable image type in update_deferred_resolves()");
4515                }
4516            };
4517
4518            // In order to produce the handle, the external image handler may call into
4519            // the GL context and change some states.
4520            self.device.reset_state();
4521
4522            let texture = match image.source {
4523                ExternalImageSource::NativeTexture(texture_id) => {
4524                    ExternalTexture::new(
4525                        texture_id,
4526                        texture_target,
4527                        image.uv,
4528                        deferred_resolve.rendering,
4529                    )
4530                }
4531                ExternalImageSource::Invalid => {
4532                    warn!("Invalid ext-image");
4533                    debug!(
4534                        "For ext_id:{:?}, channel:{}.",
4535                        ext_image.id,
4536                        ext_image.channel_index
4537                    );
4538                    // Just use 0 as the gl handle for this failed case.
4539                    ExternalTexture::new(
4540                        0,
4541                        texture_target,
4542                        image.uv,
4543                        deferred_resolve.rendering,
4544                    )
4545                }
4546                ExternalImageSource::RawData(_) => {
4547                    panic!("Raw external data is not expected for deferred resolves!");
4548                }
4549            };
4550
4551            self.texture_resolver
4552                .external_images
4553                .insert(DeferredResolveIndex(i as u32), texture);
4554
4555            list.updates.push(GpuCacheUpdate::Copy {
4556                block_index: list.blocks.len(),
4557                block_count: BLOCKS_PER_UV_RECT,
4558                address: deferred_resolve.address,
4559            });
4560            list.blocks.push(image.uv.into());
4561            list.blocks.push([0f32; 4].into());
4562        }
4563
4564        Some(list)
4565    }
4566
4567    fn unlock_external_images(
4568        &mut self,
4569        deferred_resolves: &[DeferredResolve],
4570    ) {
4571        if !self.texture_resolver.external_images.is_empty() {
4572            let handler = self.external_image_handler
4573                .as_mut()
4574                .expect("Found external image, but no handler set!");
4575
4576            for (index, _) in self.texture_resolver.external_images.drain() {
4577                let props = &deferred_resolves[index.0 as usize].image_properties;
4578                let ext_image = props
4579                    .external_image
4580                    .expect("BUG: Deferred resolves must be external images!");
4581                handler.unlock(ext_image.id, ext_image.channel_index);
4582            }
4583        }
4584    }
4585
4586    /// Update the dirty rects based on current compositing mode and config
4587    // TODO(gw): This can be tidied up significantly once the Draw compositor
4588    //           is implemented in terms of the compositor trait.
4589    fn calculate_dirty_rects(
4590        &mut self,
4591        buffer_age: usize,
4592        composite_state: &CompositeState,
4593        draw_target_dimensions: DeviceIntSize,
4594        results: &mut RenderResults,
4595    ) -> Option<PartialPresentMode> {
4596        let mut partial_present_mode = None;
4597
4598        let (max_partial_present_rects, draw_previous_partial_present_regions) = match self.current_compositor_kind {
4599            CompositorKind::Native { .. } => {
4600                // Assume that we can return a single dirty rect for native
4601                // compositor for now, and that there is no buffer-age functionality.
4602                // These params can be exposed by the compositor capabilities struct
4603                // as the Draw compositor is ported to use it.
4604                (1, false)
4605            }
4606            CompositorKind::Draw { draw_previous_partial_present_regions, max_partial_present_rects } => {
4607                (max_partial_present_rects, draw_previous_partial_present_regions)
4608            }
4609            CompositorKind::Layer { .. } => {
4610                (0, false)
4611            }
4612        };
4613
4614        if max_partial_present_rects > 0 {
4615            let prev_frames_damage_rect = if let Some(..) = self.compositor_config.partial_present() {
4616                self.buffer_damage_tracker
4617                    .get_damage_rect(buffer_age)
4618                    .or_else(|| Some(DeviceRect::from_size(draw_target_dimensions.to_f32())))
4619            } else {
4620                None
4621            };
4622
4623            let can_use_partial_present =
4624                composite_state.dirty_rects_are_valid &&
4625                !self.force_redraw &&
4626                !(prev_frames_damage_rect.is_none() && draw_previous_partial_present_regions) &&
4627                !self.debug_overlay_state.is_enabled;
4628
4629            if can_use_partial_present {
4630                let mut combined_dirty_rect = DeviceRect::zero();
4631                let fb_rect = DeviceRect::from_size(draw_target_dimensions.to_f32());
4632
4633                // Work out how many dirty rects WR produced, and if that's more than
4634                // what the device supports.
4635                for tile in &composite_state.tiles {
4636                    if tile.kind == TileKind::Clear {
4637                        continue;
4638                    }
4639                    let dirty_rect = composite_state.get_device_rect(
4640                        &tile.local_dirty_rect,
4641                        tile.transform_index,
4642                    );
4643
4644                    // In pathological cases where a tile is extremely zoomed, it
4645                    // may end up with device coords outside the range of an i32,
4646                    // so clamp it to the frame buffer rect here, before it gets
4647                    // casted to an i32 rect below.
4648                    if let Some(dirty_rect) = dirty_rect.intersection(&fb_rect) {
4649                        combined_dirty_rect = combined_dirty_rect.union(&dirty_rect);
4650                    }
4651                }
4652
4653                let combined_dirty_rect = combined_dirty_rect.round();
4654                let combined_dirty_rect_i32 = combined_dirty_rect.to_i32();
4655                // Return this frame's dirty region. If nothing has changed, don't return any dirty
4656                // rects at all (the client can use this as a signal to skip present completely).
4657                if !combined_dirty_rect.is_empty() {
4658                    results.dirty_rects.push(combined_dirty_rect_i32);
4659                }
4660
4661                // Track this frame's dirty region, for calculating subsequent frames' damage.
4662                if draw_previous_partial_present_regions {
4663                    self.buffer_damage_tracker.push_dirty_rect(&combined_dirty_rect);
4664                }
4665
4666                // If the implementation requires manually keeping the buffer consistent,
4667                // then we must combine this frame's dirty region with that of previous frames
4668                // to determine the total_dirty_rect. The is used to determine what region we
4669                // render to, and is what we send to the compositor as the buffer damage region
4670                // (eg for KHR_partial_update).
4671                let total_dirty_rect = if draw_previous_partial_present_regions {
4672                    combined_dirty_rect.union(&prev_frames_damage_rect.unwrap())
4673                } else {
4674                    combined_dirty_rect
4675                };
4676
4677                partial_present_mode = Some(PartialPresentMode::Single {
4678                    dirty_rect: total_dirty_rect,
4679                });
4680            } else {
4681                // If we don't have a valid partial present scenario, return a single
4682                // dirty rect to the client that covers the entire framebuffer.
4683                let fb_rect = DeviceIntRect::from_size(
4684                    draw_target_dimensions,
4685                );
4686                results.dirty_rects.push(fb_rect);
4687
4688                if draw_previous_partial_present_regions {
4689                    self.buffer_damage_tracker.push_dirty_rect(&fb_rect.to_f32());
4690                }
4691            }
4692
4693            self.force_redraw = false;
4694        }
4695
4696        partial_present_mode
4697    }
4698
4699    fn bind_frame_data(&mut self, frame: &mut Frame) {
4700        profile_scope!("bind_frame_data");
4701
4702        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_DATA);
4703
4704        self.vertex_data_textures[self.current_vertex_data_textures].update(
4705            &mut self.device,
4706            &mut self.texture_upload_pbo_pool,
4707            frame,
4708        );
4709        self.current_vertex_data_textures =
4710            (self.current_vertex_data_textures + 1) % VERTEX_DATA_TEXTURE_COUNT;
4711    }
4712
4713    fn update_native_surfaces(&mut self) {
4714        profile_scope!("update_native_surfaces");
4715
4716        match self.compositor_config {
4717            CompositorConfig::Native { ref mut compositor, .. } => {
4718                for op in self.pending_native_surface_updates.drain(..) {
4719                    match op.details {
4720                        NativeSurfaceOperationDetails::CreateSurface { id, virtual_offset, tile_size, is_opaque } => {
4721                            let _inserted = self.allocated_native_surfaces.insert(id);
4722                            debug_assert!(_inserted, "bug: creating existing surface");
4723                            compositor.create_surface(
4724                                    &mut self.device,
4725                                    id,
4726                                    virtual_offset,
4727                                    tile_size,
4728                                    is_opaque,
4729                            );
4730                        }
4731                        NativeSurfaceOperationDetails::CreateExternalSurface { id, is_opaque } => {
4732                            let _inserted = self.allocated_native_surfaces.insert(id);
4733                            debug_assert!(_inserted, "bug: creating existing surface");
4734                            compositor.create_external_surface(
4735                                &mut self.device,
4736                                id,
4737                                is_opaque,
4738                            );
4739                        }
4740                        NativeSurfaceOperationDetails::CreateBackdropSurface { id, color } => {
4741                            let _inserted = self.allocated_native_surfaces.insert(id);
4742                            debug_assert!(_inserted, "bug: creating existing surface");
4743                            compositor.create_backdrop_surface(
4744                                &mut self.device,
4745                                id,
4746                                color,
4747                            );
4748                        }
4749                        NativeSurfaceOperationDetails::DestroySurface { id } => {
4750                            let _existed = self.allocated_native_surfaces.remove(&id);
4751                            debug_assert!(_existed, "bug: removing unknown surface");
4752                            compositor.destroy_surface(&mut self.device, id);
4753                        }
4754                        NativeSurfaceOperationDetails::CreateTile { id } => {
4755                            compositor.create_tile(&mut self.device, id);
4756                        }
4757                        NativeSurfaceOperationDetails::DestroyTile { id } => {
4758                            compositor.destroy_tile(&mut self.device, id);
4759                        }
4760                        NativeSurfaceOperationDetails::AttachExternalImage { id, external_image } => {
4761                            compositor.attach_external_image(&mut self.device, id, external_image);
4762                        }
4763                    }
4764                }
4765            }
4766            CompositorConfig::Draw { .. } | CompositorConfig::Layer { .. } => {
4767                // Ensure nothing is added in simple composite mode, since otherwise
4768                // memory will leak as this doesn't get drained
4769                debug_assert!(self.pending_native_surface_updates.is_empty());
4770            }
4771        }
4772    }
4773
4774    fn create_gpu_buffer_texture<T: Texel>(
4775        &mut self,
4776        buffer: &GpuBuffer<T>,
4777        sampler: TextureSampler,
4778    ) -> Option<Texture> {
4779        if buffer.is_empty() {
4780            None
4781        } else {
4782            let gpu_buffer_texture = self.device.create_texture(
4783                ImageBufferKind::Texture2D,
4784                buffer.format,
4785                buffer.size.width,
4786                buffer.size.height,
4787                TextureFilter::Nearest,
4788                None,
4789            );
4790
4791            self.device.bind_texture(
4792                sampler,
4793                &gpu_buffer_texture,
4794                Swizzle::default(),
4795            );
4796
4797            self.device.upload_texture_immediate(
4798                &gpu_buffer_texture,
4799                &buffer.data,
4800            );
4801
4802            Some(gpu_buffer_texture)
4803        }
4804    }
4805
4806    fn draw_frame(
4807        &mut self,
4808        frame: &mut Frame,
4809        device_size: Option<DeviceIntSize>,
4810        buffer_age: usize,
4811        results: &mut RenderResults,
4812    ) {
4813        profile_scope!("draw_frame");
4814
4815        // These markers seem to crash a lot on Android, see bug 1559834
4816        #[cfg(not(target_os = "android"))]
4817        let _gm = self.gpu_profiler.start_marker("draw frame");
4818
4819        if frame.passes.is_empty() {
4820            frame.has_been_rendered = true;
4821            return;
4822        }
4823
4824        self.device.disable_depth_write();
4825        self.set_blend(false, FramebufferKind::Other);
4826        self.device.disable_stencil();
4827
4828        self.bind_frame_data(frame);
4829
4830        // Upload experimental GPU buffer texture if there is any data present
4831        // TODO: Recycle these textures, upload via PBO or best approach for platform
4832        let gpu_buffer_texture_f = self.create_gpu_buffer_texture(
4833            &frame.gpu_buffer_f,
4834            TextureSampler::GpuBufferF,
4835        );
4836        let gpu_buffer_texture_i = self.create_gpu_buffer_texture(
4837            &frame.gpu_buffer_i,
4838            TextureSampler::GpuBufferI,
4839        );
4840
4841        let bytes_to_mb = 1.0 / 1000000.0;
4842        let gpu_buffer_bytes_f = gpu_buffer_texture_f
4843            .as_ref()
4844            .map(|tex| tex.size_in_bytes())
4845            .unwrap_or(0);
4846        let gpu_buffer_bytes_i = gpu_buffer_texture_i
4847            .as_ref()
4848            .map(|tex| tex.size_in_bytes())
4849            .unwrap_or(0);
4850        let gpu_buffer_mb = (gpu_buffer_bytes_f + gpu_buffer_bytes_i) as f32 * bytes_to_mb;
4851        self.profile.set(profiler::GPU_BUFFER_MEM, gpu_buffer_mb);
4852
4853        let gpu_cache_bytes = self.gpu_cache_texture.gpu_size_in_bytes();
4854        let gpu_cache_mb = gpu_cache_bytes as f32 * bytes_to_mb;
4855        self.profile.set(profiler::GPU_CACHE_MEM, gpu_cache_mb);
4856
4857        // Determine the present mode and dirty rects, if device_size
4858        // is Some(..). If it's None, no composite will occur and only
4859        // picture cache and texture cache targets will be updated.
4860        // TODO(gw): Split Frame so that it's clearer when a composite
4861        //           is occurring.
4862        let present_mode = device_size.and_then(|device_size| {
4863            self.calculate_dirty_rects(
4864                buffer_age,
4865                &frame.composite_state,
4866                device_size,
4867                results,
4868            )
4869        });
4870
4871        // If we have a native OS compositor, then make use of that interface to
4872        // specify how to composite each of the picture cache surfaces. First, we
4873        // need to find each tile that may be bound and updated later in the frame
4874        // and invalidate it so that the native render compositor knows that these
4875        // tiles can't be composited early. Next, after all such tiles have been
4876        // invalidated, then we queue surfaces for native composition by the render
4877        // compositor before we actually update the tiles. This allows the render
4878        // compositor to start early composition while the tiles are updating.
4879        if let CompositorKind::Native { .. } = self.current_compositor_kind {
4880            let compositor = self.compositor_config.compositor().unwrap();
4881            // Invalidate any native surface tiles that might be updated by passes.
4882            if !frame.has_been_rendered {
4883                for tile in &frame.composite_state.tiles {
4884                    if tile.kind == TileKind::Clear {
4885                        continue;
4886                    }
4887                    if !tile.local_dirty_rect.is_empty() {
4888                        if let CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { id, .. } } = tile.surface {
4889                            let valid_rect = frame.composite_state.get_surface_rect(
4890                                &tile.local_valid_rect,
4891                                &tile.local_rect,
4892                                tile.transform_index,
4893                            ).to_i32();
4894
4895                            compositor.invalidate_tile(&mut self.device, id, valid_rect);
4896                        }
4897                    }
4898                }
4899            }
4900            // Ensure any external surfaces that might be used during early composition
4901            // are invalidated first so that the native compositor can properly schedule
4902            // composition to happen only when the external surface is updated.
4903            // See update_external_native_surfaces for more details.
4904            for surface in &frame.composite_state.external_surfaces {
4905                if let Some((native_surface_id, size)) = surface.update_params {
4906                    let surface_rect = size.into();
4907                    compositor.invalidate_tile(&mut self.device, NativeTileId { surface_id: native_surface_id, x: 0, y: 0 }, surface_rect);
4908                }
4909            }
4910            // Finally queue native surfaces for early composition, if applicable. By now,
4911            // we have already invalidated any tiles that such surfaces may depend upon, so
4912            // the native render compositor can keep track of when to actually schedule
4913            // composition as surfaces are updated.
4914            if device_size.is_some() {
4915                frame.composite_state.composite_native(
4916                    self.clear_color,
4917                    &results.dirty_rects,
4918                    &mut self.device,
4919                    &mut **compositor,
4920                );
4921            }
4922        }
4923
4924        for (_pass_index, pass) in frame.passes.iter_mut().enumerate() {
4925            #[cfg(not(target_os = "android"))]
4926            let _gm = self.gpu_profiler.start_marker(&format!("pass {}", _pass_index));
4927
4928            profile_scope!("offscreen target");
4929
4930            // If this frame has already been drawn, then any texture
4931            // cache targets have already been updated and can be
4932            // skipped this time.
4933            if !frame.has_been_rendered {
4934                for (&texture_id, target) in &pass.texture_cache {
4935                    self.draw_render_target(
4936                        texture_id,
4937                        target,
4938                        &frame.render_tasks,
4939                        &mut results.stats,
4940                    );
4941                }
4942
4943                if !pass.picture_cache.is_empty() {
4944                    self.profile.inc(profiler::COLOR_PASSES);
4945                }
4946
4947                // Draw picture caching tiles for this pass.
4948                for picture_target in &pass.picture_cache {
4949                    results.stats.color_target_count += 1;
4950
4951                    let draw_target = match picture_target.surface {
4952                        ResolvedSurfaceTexture::TextureCache { ref texture } => {
4953                            let (texture, _) = self.texture_resolver
4954                                .resolve(texture)
4955                                .expect("bug");
4956
4957                            DrawTarget::from_texture(
4958                                texture,
4959                                true,
4960                            )
4961                        }
4962                        ResolvedSurfaceTexture::Native { id, size } => {
4963                            let surface_info = match self.current_compositor_kind {
4964                                CompositorKind::Native { .. } => {
4965                                    let compositor = self.compositor_config.compositor().unwrap();
4966                                    compositor.bind(
4967                                        &mut self.device,
4968                                        id,
4969                                        picture_target.dirty_rect,
4970                                        picture_target.valid_rect,
4971                                    )
4972                                }
4973                                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
4974                                    unreachable!();
4975                                }
4976                            };
4977
4978                            DrawTarget::NativeSurface {
4979                                offset: surface_info.origin,
4980                                external_fbo_id: surface_info.fbo_id,
4981                                dimensions: size,
4982                            }
4983                        }
4984                    };
4985
4986                    let projection = Transform3D::ortho(
4987                        0.0,
4988                        draw_target.dimensions().width as f32,
4989                        0.0,
4990                        draw_target.dimensions().height as f32,
4991                        self.device.ortho_near_plane(),
4992                        self.device.ortho_far_plane(),
4993                    );
4994
4995                    self.draw_picture_cache_target(
4996                        picture_target,
4997                        draw_target,
4998                        &projection,
4999                        &frame.render_tasks,
5000                        &mut results.stats,
5001                    );
5002
5003                    // Native OS surfaces must be unbound at the end of drawing to them
5004                    if let ResolvedSurfaceTexture::Native { .. } = picture_target.surface {
5005                        match self.current_compositor_kind {
5006                            CompositorKind::Native { .. } => {
5007                                let compositor = self.compositor_config.compositor().unwrap();
5008                                compositor.unbind(&mut self.device);
5009                            }
5010                            CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
5011                                unreachable!();
5012                            }
5013                        }
5014                    }
5015                }
5016            }
5017
5018            for target in &pass.alpha.targets {
5019                results.stats.alpha_target_count += 1;
5020                self.draw_render_target(
5021                    target.texture_id(),
5022                    target,
5023                    &frame.render_tasks,
5024                    &mut results.stats,
5025                );
5026            }
5027
5028            for target in &pass.color.targets {
5029                results.stats.color_target_count += 1;
5030                self.draw_render_target(
5031                    target.texture_id(),
5032                    target,
5033                    &frame.render_tasks,
5034                    &mut results.stats,
5035                );
5036            }
5037
5038            // Only end the pass here and invalidate previous textures for
5039            // off-screen targets. Deferring return of the inputs to the
5040            // frame buffer until the implicit end_pass in end_frame allows
5041            // debug draw overlays to be added without triggering a copy
5042            // resolve stage in mobile / tiled GPUs.
5043            self.texture_resolver.end_pass(
5044                &mut self.device,
5045                &pass.textures_to_invalidate,
5046            );
5047            {
5048                profile_scope!("gl.flush");
5049                self.device.gl().flush();
5050            }
5051        }
5052
5053        self.composite_frame(
5054            frame,
5055            device_size,
5056            results,
5057            present_mode,
5058        );
5059
5060        if let Some(gpu_buffer_texture_f) = gpu_buffer_texture_f {
5061            self.device.delete_texture(gpu_buffer_texture_f);
5062        }
5063        if let Some(gpu_buffer_texture_i) = gpu_buffer_texture_i {
5064            self.device.delete_texture(gpu_buffer_texture_i);
5065        }
5066
5067        frame.has_been_rendered = true;
5068    }
5069
5070    fn composite_frame(
5071        &mut self,
5072        frame: &mut Frame,
5073        device_size: Option<DeviceIntSize>,
5074        results: &mut RenderResults,
5075        present_mode: Option<PartialPresentMode>,
5076    ) {
5077        profile_scope!("main target");
5078
5079        if let Some(device_size) = device_size {
5080            results.stats.color_target_count += 1;
5081            results.picture_cache_debug = mem::replace(
5082                &mut frame.composite_state.picture_cache_debug,
5083                PictureCacheDebugInfo::new(),
5084            );
5085
5086            let size = frame.device_rect.size().to_f32();
5087            let surface_origin_is_top_left = self.device.surface_origin_is_top_left();
5088            let (bottom, top) = if surface_origin_is_top_left {
5089              (0.0, size.height)
5090            } else {
5091              (size.height, 0.0)
5092            };
5093
5094            let projection = Transform3D::ortho(
5095                0.0,
5096                size.width,
5097                bottom,
5098                top,
5099                self.device.ortho_near_plane(),
5100                self.device.ortho_far_plane(),
5101            );
5102
5103            let fb_scale = Scale::<_, _, FramebufferPixel>::new(1i32);
5104            let mut fb_rect = frame.device_rect * fb_scale;
5105
5106            if !surface_origin_is_top_left {
5107                let h = fb_rect.height();
5108                fb_rect.min.y = device_size.height - fb_rect.max.y;
5109                fb_rect.max.y = fb_rect.min.y + h;
5110            }
5111
5112            let draw_target = DrawTarget::Default {
5113                rect: fb_rect,
5114                total_size: device_size * fb_scale,
5115                surface_origin_is_top_left,
5116            };
5117
5118            // If we have a native OS compositor, then make use of that interface
5119            // to specify how to composite each of the picture cache surfaces.
5120            match self.current_compositor_kind {
5121                CompositorKind::Native { .. } => {
5122                    // We have already queued surfaces for early native composition by this point.
5123                    // All that is left is to finally update any external native surfaces that were
5124                    // invalidated so that composition can complete.
5125                    self.update_external_native_surfaces(
5126                        &frame.composite_state.external_surfaces,
5127                        results,
5128                    );
5129                }
5130                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
5131                    self.composite_simple(
5132                        &frame.composite_state,
5133                        draw_target,
5134                        &projection,
5135                        results,
5136                        present_mode,
5137                        device_size,
5138                    );
5139                }
5140            }
5141        } else {
5142            // Rendering a frame without presenting it will confuse the partial
5143            // present logic, so force a full present for the next frame.
5144            self.force_redraw();
5145        }
5146    }
5147
5148    pub fn debug_renderer(&mut self) -> Option<&mut DebugRenderer> {
5149        self.debug.get_mut(&mut self.device)
5150    }
5151
5152    pub fn get_debug_flags(&self) -> DebugFlags {
5153        self.debug_flags
5154    }
5155
5156    pub fn set_debug_flags(&mut self, flags: DebugFlags) {
5157        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_TIME_QUERIES) {
5158            if enabled {
5159                self.gpu_profiler.enable_timers();
5160            } else {
5161                self.gpu_profiler.disable_timers();
5162            }
5163        }
5164        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_SAMPLE_QUERIES) {
5165            if enabled {
5166                self.gpu_profiler.enable_samplers();
5167            } else {
5168                self.gpu_profiler.disable_samplers();
5169            }
5170        }
5171
5172        self.debug_flags = flags;
5173    }
5174
5175    pub fn set_profiler_ui(&mut self, ui_str: &str) {
5176        self.profiler.set_ui(ui_str);
5177    }
5178
5179    fn draw_frame_debug_items(&mut self, items: &[DebugItem]) {
5180        if items.is_empty() {
5181            return;
5182        }
5183
5184        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5185            Some(render) => render,
5186            None => return,
5187        };
5188
5189        for item in items {
5190            match item {
5191                DebugItem::Rect { rect, outer_color, inner_color, thickness } => {
5192                    if inner_color.a > 0.001 {
5193                        let rect = rect.inflate(-thickness as f32, -thickness as f32);
5194                        debug_renderer.add_quad(
5195                            rect.min.x,
5196                            rect.min.y,
5197                            rect.max.x,
5198                            rect.max.y,
5199                            (*inner_color).into(),
5200                            (*inner_color).into(),
5201                        );
5202                    }
5203
5204                    if outer_color.a > 0.001 {
5205                        debug_renderer.add_rect(
5206                            &rect.to_i32(),
5207                            *thickness,
5208                            (*outer_color).into(),
5209                        );
5210                    }
5211                }
5212                DebugItem::Text { ref msg, position, color } => {
5213                    debug_renderer.add_text(
5214                        position.x,
5215                        position.y,
5216                        msg,
5217                        (*color).into(),
5218                        None,
5219                    );
5220                }
5221            }
5222        }
5223    }
5224
5225    fn draw_render_target_debug(&mut self, draw_target: &DrawTarget) {
5226        if !self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
5227            return;
5228        }
5229
5230        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5231            Some(render) => render,
5232            None => return,
5233        };
5234
5235        let textures = self.texture_resolver
5236            .texture_cache_map
5237            .values()
5238            .filter(|item| item.category == TextureCacheCategory::RenderTarget)
5239            .map(|item| &item.texture)
5240            .collect::<Vec<&Texture>>();
5241
5242        Self::do_debug_blit(
5243            &mut self.device,
5244            debug_renderer,
5245            textures,
5246            draw_target,
5247            0,
5248            &|_| [0.0, 1.0, 0.0, 1.0], // Use green for all RTs.
5249        );
5250    }
5251
5252    fn draw_zoom_debug(
5253        &mut self,
5254        device_size: DeviceIntSize,
5255    ) {
5256        if !self.debug_flags.contains(DebugFlags::ZOOM_DBG) {
5257            return;
5258        }
5259
5260        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5261            Some(render) => render,
5262            None => return,
5263        };
5264
5265        let source_size = DeviceIntSize::new(64, 64);
5266        let target_size = DeviceIntSize::new(1024, 1024);
5267
5268        let source_origin = DeviceIntPoint::new(
5269            (self.cursor_position.x - source_size.width / 2)
5270                .min(device_size.width - source_size.width)
5271                .max(0),
5272            (self.cursor_position.y - source_size.height / 2)
5273                .min(device_size.height - source_size.height)
5274                .max(0),
5275        );
5276
5277        let source_rect = DeviceIntRect::from_origin_and_size(
5278            source_origin,
5279            source_size,
5280        );
5281
5282        let target_rect = DeviceIntRect::from_origin_and_size(
5283            DeviceIntPoint::new(
5284                device_size.width - target_size.width - 64,
5285                device_size.height - target_size.height - 64,
5286            ),
5287            target_size,
5288        );
5289
5290        let texture_rect = FramebufferIntRect::from_size(
5291            source_rect.size().cast_unit(),
5292        );
5293
5294        debug_renderer.add_rect(
5295            &target_rect.inflate(1, 1),
5296            1,
5297            debug_colors::RED.into(),
5298        );
5299
5300        if self.zoom_debug_texture.is_none() {
5301            let texture = self.device.create_texture(
5302                ImageBufferKind::Texture2D,
5303                ImageFormat::BGRA8,
5304                source_rect.width(),
5305                source_rect.height(),
5306                TextureFilter::Nearest,
5307                Some(RenderTargetInfo { has_depth: false }),
5308            );
5309
5310            self.zoom_debug_texture = Some(texture);
5311        }
5312
5313        // Copy frame buffer into the zoom texture
5314        let read_target = DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left());
5315        self.device.blit_render_target(
5316            read_target.into(),
5317            read_target.to_framebuffer_rect(source_rect),
5318            DrawTarget::from_texture(
5319                self.zoom_debug_texture.as_ref().unwrap(),
5320                false,
5321            ),
5322            texture_rect,
5323            TextureFilter::Nearest,
5324        );
5325
5326        // Draw the zoom texture back to the framebuffer
5327        self.device.blit_render_target(
5328            ReadTarget::from_texture(
5329                self.zoom_debug_texture.as_ref().unwrap(),
5330            ),
5331            texture_rect,
5332            read_target,
5333            read_target.to_framebuffer_rect(target_rect),
5334            TextureFilter::Nearest,
5335        );
5336    }
5337
5338    fn draw_texture_cache_debug(&mut self, draw_target: &DrawTarget) {
5339        if !self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
5340            return;
5341        }
5342
5343        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5344            Some(render) => render,
5345            None => return,
5346        };
5347
5348        let textures = self.texture_resolver
5349            .texture_cache_map
5350            .values()
5351            .filter(|item| item.category == TextureCacheCategory::Atlas)
5352            .map(|item| &item.texture)
5353            .collect::<Vec<&Texture>>();
5354
5355        fn select_color(texture: &Texture) -> [f32; 4] {
5356            if texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE) {
5357                [1.0, 0.5, 0.0, 1.0] // Orange for shared.
5358            } else {
5359                [1.0, 0.0, 1.0, 1.0] // Fuchsia for standalone.
5360            }
5361        }
5362
5363        Self::do_debug_blit(
5364            &mut self.device,
5365            debug_renderer,
5366            textures,
5367            draw_target,
5368            if self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) { 544 } else { 0 },
5369            &select_color,
5370        );
5371    }
5372
5373    fn do_debug_blit(
5374        device: &mut Device,
5375        debug_renderer: &mut DebugRenderer,
5376        mut textures: Vec<&Texture>,
5377        draw_target: &DrawTarget,
5378        bottom: i32,
5379        select_color: &dyn Fn(&Texture) -> [f32; 4],
5380    ) {
5381        let mut spacing = 16;
5382        let mut size = 512;
5383
5384        let device_size = draw_target.dimensions();
5385        let fb_width = device_size.width;
5386        let fb_height = device_size.height;
5387        let surface_origin_is_top_left = draw_target.surface_origin_is_top_left();
5388
5389        let num_textures = textures.len() as i32;
5390
5391        if num_textures * (size + spacing) > fb_width {
5392            let factor = fb_width as f32 / (num_textures * (size + spacing)) as f32;
5393            size = (size as f32 * factor) as i32;
5394            spacing = (spacing as f32 * factor) as i32;
5395        }
5396
5397        let text_height = 14; // Visually approximated.
5398        let text_margin = 1;
5399        let tag_height = text_height + text_margin * 2;
5400        let tag_y = fb_height - (bottom + spacing + tag_height);
5401        let image_y = tag_y - size;
5402
5403        // Sort the display by size (in bytes), so that left-to-right is
5404        // largest-to-smallest.
5405        //
5406        // Note that the vec here is in increasing order, because the elements
5407        // get drawn right-to-left.
5408        textures.sort_by_key(|t| t.size_in_bytes());
5409
5410        let mut i = 0;
5411        for texture in textures.iter() {
5412            let dimensions = texture.get_dimensions();
5413            let src_rect = FramebufferIntRect::from_size(
5414                FramebufferIntSize::new(dimensions.width as i32, dimensions.height as i32),
5415            );
5416
5417            let x = fb_width - (spacing + size) * (i as i32 + 1);
5418
5419            // If we have more targets than fit on one row in screen, just early exit.
5420            if x > fb_width {
5421                return;
5422            }
5423
5424            // Draw the info tag.
5425            let tag_rect = rect(x, tag_y, size, tag_height).to_box2d();
5426            let tag_color = select_color(texture);
5427            device.clear_target(
5428                Some(tag_color),
5429                None,
5430                Some(draw_target.to_framebuffer_rect(tag_rect)),
5431            );
5432
5433            // Draw the dimensions onto the tag.
5434            let dim = texture.get_dimensions();
5435            let text_rect = tag_rect.inflate(-text_margin, -text_margin);
5436            debug_renderer.add_text(
5437                text_rect.min.x as f32,
5438                text_rect.max.y as f32, // Top-relative.
5439                &format!("{}x{}", dim.width, dim.height),
5440                ColorU::new(0, 0, 0, 255),
5441                Some(tag_rect.to_f32())
5442            );
5443
5444            // Blit the contents of the texture.
5445            let dest_rect = draw_target.to_framebuffer_rect(rect(x, image_y, size, size).to_box2d());
5446            let read_target = ReadTarget::from_texture(texture);
5447
5448            if surface_origin_is_top_left {
5449                device.blit_render_target(
5450                    read_target,
5451                    src_rect,
5452                    *draw_target,
5453                    dest_rect,
5454                    TextureFilter::Linear,
5455                );
5456            } else {
5457                 // Invert y.
5458                 device.blit_render_target_invert_y(
5459                    read_target,
5460                    src_rect,
5461                    *draw_target,
5462                    dest_rect,
5463                );
5464            }
5465            i += 1;
5466        }
5467    }
5468
5469    fn draw_epoch_debug(&mut self) {
5470        if !self.debug_flags.contains(DebugFlags::EPOCHS) {
5471            return;
5472        }
5473
5474        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5475            Some(render) => render,
5476            None => return,
5477        };
5478
5479        let dy = debug_renderer.line_height();
5480        let x0: f32 = 30.0;
5481        let y0: f32 = 30.0;
5482        let mut y = y0;
5483        let mut text_width = 0.0;
5484        for ((pipeline, document_id), epoch) in  &self.pipeline_info.epochs {
5485            y += dy;
5486            let w = debug_renderer.add_text(
5487                x0, y,
5488                &format!("({:?}, {:?}): {:?}", pipeline, document_id, epoch),
5489                ColorU::new(255, 255, 0, 255),
5490                None,
5491            ).size.width;
5492            text_width = f32::max(text_width, w);
5493        }
5494
5495        let margin = 10.0;
5496        debug_renderer.add_quad(
5497            x0 - margin,
5498            y0 - margin,
5499            x0 + text_width + margin,
5500            y + margin,
5501            ColorU::new(25, 25, 25, 200),
5502            ColorU::new(51, 51, 51, 200),
5503        );
5504    }
5505
5506    fn draw_window_visibility_debug(&mut self) {
5507        if !self.debug_flags.contains(DebugFlags::WINDOW_VISIBILITY_DBG) {
5508            return;
5509        }
5510
5511        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5512            Some(render) => render,
5513            None => return,
5514        };
5515
5516        let x: f32 = 30.0;
5517        let y: f32 = 40.0;
5518
5519        if let CompositorConfig::Native { ref mut compositor, .. } = self.compositor_config {
5520            let visibility = compositor.get_window_visibility(&mut self.device);
5521            let color = if visibility.is_fully_occluded {
5522                ColorU::new(255, 0, 0, 255)
5523
5524            } else {
5525                ColorU::new(0, 0, 255, 255)
5526            };
5527
5528            debug_renderer.add_text(
5529                x, y,
5530                &format!("{:?}", visibility),
5531                color,
5532                None,
5533            );
5534        }
5535
5536
5537    }
5538
5539    fn draw_gpu_cache_debug(&mut self, device_size: DeviceIntSize) {
5540        if !self.debug_flags.contains(DebugFlags::GPU_CACHE_DBG) {
5541            return;
5542        }
5543
5544        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5545            Some(render) => render,
5546            None => return,
5547        };
5548
5549        let (x_off, y_off) = (30f32, 30f32);
5550        let height = self.gpu_cache_texture.get_height()
5551            .min(device_size.height - (y_off as i32) * 2) as usize;
5552        debug_renderer.add_quad(
5553            x_off,
5554            y_off,
5555            x_off + MAX_VERTEX_TEXTURE_WIDTH as f32,
5556            y_off + height as f32,
5557            ColorU::new(80, 80, 80, 80),
5558            ColorU::new(80, 80, 80, 80),
5559        );
5560
5561        let upper = self.gpu_cache_debug_chunks.len().min(height);
5562        for chunk in self.gpu_cache_debug_chunks[0..upper].iter().flatten() {
5563            let color = ColorU::new(250, 0, 0, 200);
5564            debug_renderer.add_quad(
5565                x_off + chunk.address.u as f32,
5566                y_off + chunk.address.v as f32,
5567                x_off + chunk.address.u as f32 + chunk.size as f32,
5568                y_off + chunk.address.v as f32 + 1.0,
5569                color,
5570                color,
5571            );
5572        }
5573    }
5574
5575    /// Pass-through to `Device::read_pixels_into`, used by Gecko's WR bindings.
5576    pub fn read_pixels_into(&mut self, rect: FramebufferIntRect, format: ImageFormat, output: &mut [u8]) {
5577        self.device.read_pixels_into(rect, format, output);
5578    }
5579
5580    pub fn read_pixels_rgba8(&mut self, rect: FramebufferIntRect) -> Vec<u8> {
5581        let mut pixels = vec![0; (rect.area() * 4) as usize];
5582        self.device.read_pixels_into(rect, ImageFormat::RGBA8, &mut pixels);
5583        pixels
5584    }
5585
5586    // De-initialize the Renderer safely, assuming the GL is still alive and active.
5587    pub fn deinit(mut self) {
5588        //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
5589        self.device.begin_frame();
5590        // If we are using a native compositor, ensure that any remaining native
5591        // surfaces are freed.
5592        if let CompositorConfig::Native { mut compositor, .. } = self.compositor_config {
5593            for id in self.allocated_native_surfaces.drain() {
5594                compositor.destroy_surface(&mut self.device, id);
5595            }
5596            // Destroy the debug overlay surface, if currently allocated.
5597            if self.debug_overlay_state.current_size.is_some() {
5598                compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
5599            }
5600            compositor.deinit(&mut self.device);
5601        }
5602        self.gpu_cache_texture.deinit(&mut self.device);
5603        if let Some(dither_matrix_texture) = self.dither_matrix_texture {
5604            self.device.delete_texture(dither_matrix_texture);
5605        }
5606        if let Some(zoom_debug_texture) = self.zoom_debug_texture {
5607            self.device.delete_texture(zoom_debug_texture);
5608        }
5609        for textures in self.vertex_data_textures.drain(..) {
5610            textures.deinit(&mut self.device);
5611        }
5612        self.texture_upload_pbo_pool.deinit(&mut self.device);
5613        self.staging_texture_pool.delete_textures(&mut self.device);
5614        self.texture_resolver.deinit(&mut self.device);
5615        self.vaos.deinit(&mut self.device);
5616        self.debug.deinit(&mut self.device);
5617
5618        if let Ok(shaders) = Rc::try_unwrap(self.shaders) {
5619            shaders.into_inner().deinit(&mut self.device);
5620        }
5621
5622        if let Some(async_screenshots) = self.async_screenshots.take() {
5623            async_screenshots.deinit(&mut self.device);
5624        }
5625
5626        if let Some(async_frame_recorder) = self.async_frame_recorder.take() {
5627            async_frame_recorder.deinit(&mut self.device);
5628        }
5629
5630        #[cfg(feature = "capture")]
5631        self.device.delete_fbo(self.read_fbo);
5632        #[cfg(feature = "replay")]
5633        for (_, ext) in self.owned_external_images {
5634            self.device.delete_external_texture(ext);
5635        }
5636        self.device.end_frame();
5637    }
5638
5639    /// Collects a memory report.
5640    pub fn report_memory(&self, swgl: *mut c_void) -> MemoryReport {
5641        let mut report = MemoryReport::default();
5642
5643        // GPU cache CPU memory.
5644        self.gpu_cache_texture.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5645
5646        self.staging_texture_pool.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5647
5648        // Render task CPU memory.
5649        for (_id, doc) in &self.active_documents {
5650            let frame_alloc_stats = doc.frame.allocator_memory.get_stats();
5651            report.frame_allocator += frame_alloc_stats.reserved_bytes;
5652            report.render_tasks += doc.frame.render_tasks.report_memory();
5653        }
5654
5655        // Vertex data GPU memory.
5656        for textures in &self.vertex_data_textures {
5657            report.vertex_data_textures += textures.size_in_bytes();
5658        }
5659
5660        // Texture cache and render target GPU memory.
5661        report += self.texture_resolver.report_memory();
5662
5663        // Texture upload PBO memory.
5664        report += self.texture_upload_pbo_pool.report_memory();
5665
5666        // Textures held internally within the device layer.
5667        report += self.device.report_memory(self.size_of_ops.as_ref().unwrap(), swgl);
5668
5669        report
5670    }
5671
5672    // Sets the blend mode. Blend is unconditionally set if the "show overdraw" debugging mode is
5673    // enabled.
5674    fn set_blend(&mut self, mut blend: bool, framebuffer_kind: FramebufferKind) {
5675        if framebuffer_kind == FramebufferKind::Main &&
5676                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5677            blend = true
5678        }
5679        self.device.set_blend(blend)
5680    }
5681
5682    fn set_blend_mode_multiply(&mut self, framebuffer_kind: FramebufferKind) {
5683        if framebuffer_kind == FramebufferKind::Main &&
5684                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5685            self.device.set_blend_mode_show_overdraw();
5686        } else {
5687            self.device.set_blend_mode_multiply();
5688        }
5689    }
5690
5691    fn set_blend_mode_premultiplied_alpha(&mut self, framebuffer_kind: FramebufferKind) {
5692        if framebuffer_kind == FramebufferKind::Main &&
5693                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5694            self.device.set_blend_mode_show_overdraw();
5695        } else {
5696            self.device.set_blend_mode_premultiplied_alpha();
5697        }
5698    }
5699
5700    /// Clears the texture with a given color.
5701    fn clear_texture(&mut self, texture: &Texture, color: [f32; 4]) {
5702        self.device.bind_draw_target(DrawTarget::from_texture(
5703            &texture,
5704            false,
5705        ));
5706        self.device.clear_target(Some(color), None, None);
5707    }
5708}
5709
5710bitflags! {
5711    /// Flags that control how shaders are pre-cached, if at all.
5712    #[derive(Default, Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
5713    pub struct ShaderPrecacheFlags: u32 {
5714        /// Needed for const initialization
5715        const EMPTY                 = 0;
5716
5717        /// Only start async compile
5718        const ASYNC_COMPILE         = 1 << 2;
5719
5720        /// Do a full compile/link during startup
5721        const FULL_COMPILE          = 1 << 3;
5722    }
5723}
5724
5725/// The cumulative times spent in each painting phase to generate this frame.
5726#[derive(Debug, Default)]
5727pub struct FullFrameStats {
5728    pub full_display_list: bool,
5729    pub gecko_display_list_time: f64,
5730    pub wr_display_list_time: f64,
5731    pub scene_build_time: f64,
5732    pub frame_build_time: f64,
5733}
5734
5735impl FullFrameStats {
5736    pub fn merge(&self, other: &FullFrameStats) -> Self {
5737        Self {
5738            full_display_list: self.full_display_list || other.full_display_list,
5739            gecko_display_list_time: self.gecko_display_list_time + other.gecko_display_list_time,
5740            wr_display_list_time: self.wr_display_list_time + other.wr_display_list_time,
5741            scene_build_time: self.scene_build_time + other.scene_build_time,
5742            frame_build_time: self.frame_build_time + other.frame_build_time
5743        }
5744    }
5745
5746    pub fn total(&self) -> f64 {
5747      self.gecko_display_list_time + self.wr_display_list_time + self.scene_build_time + self.frame_build_time
5748    }
5749}
5750
5751/// Some basic statistics about the rendered scene, used in Gecko, as
5752/// well as in wrench reftests to ensure that tests are batching and/or
5753/// allocating on render targets as we expect them to.
5754#[repr(C)]
5755#[derive(Debug, Default)]
5756pub struct RendererStats {
5757    pub total_draw_calls: usize,
5758    pub alpha_target_count: usize,
5759    pub color_target_count: usize,
5760    pub texture_upload_mb: f64,
5761    pub resource_upload_time: f64,
5762    pub gpu_cache_upload_time: f64,
5763    pub gecko_display_list_time: f64,
5764    pub wr_display_list_time: f64,
5765    pub scene_build_time: f64,
5766    pub frame_build_time: f64,
5767    pub full_display_list: bool,
5768    pub full_paint: bool,
5769}
5770
5771impl RendererStats {
5772    pub fn merge(&mut self, stats: &FullFrameStats) {
5773        self.gecko_display_list_time = stats.gecko_display_list_time;
5774        self.wr_display_list_time = stats.wr_display_list_time;
5775        self.scene_build_time = stats.scene_build_time;
5776        self.frame_build_time = stats.frame_build_time;
5777        self.full_display_list = stats.full_display_list;
5778        self.full_paint = true;
5779    }
5780}
5781
5782/// Return type from render(), which contains some repr(C) statistics as well as
5783/// some non-repr(C) data.
5784#[derive(Debug, Default)]
5785pub struct RenderResults {
5786    /// Statistics about the frame that was rendered.
5787    pub stats: RendererStats,
5788
5789    /// A list of the device dirty rects that were updated
5790    /// this frame.
5791    /// TODO(gw): This is an initial interface, likely to change in future.
5792    /// TODO(gw): The dirty rects here are currently only useful when scrolling
5793    ///           is not occurring. They are still correct in the case of
5794    ///           scrolling, but will be very large (until we expose proper
5795    ///           OS compositor support where the dirty rects apply to a
5796    ///           specific picture cache slice / OS compositor surface).
5797    pub dirty_rects: Vec<DeviceIntRect>,
5798
5799    /// Information about the state of picture cache tiles. This is only
5800    /// allocated and stored if config.testing is true (such as wrench)
5801    pub picture_cache_debug: PictureCacheDebugInfo,
5802}
5803
5804#[cfg(any(feature = "capture", feature = "replay"))]
5805#[cfg_attr(feature = "capture", derive(Serialize))]
5806#[cfg_attr(feature = "replay", derive(Deserialize))]
5807struct PlainTexture {
5808    data: String,
5809    size: DeviceIntSize,
5810    format: ImageFormat,
5811    filter: TextureFilter,
5812    has_depth: bool,
5813    category: Option<TextureCacheCategory>,
5814}
5815
5816
5817#[cfg(any(feature = "capture", feature = "replay"))]
5818#[cfg_attr(feature = "capture", derive(Serialize))]
5819#[cfg_attr(feature = "replay", derive(Deserialize))]
5820struct PlainRenderer {
5821    device_size: Option<DeviceIntSize>,
5822    gpu_cache: PlainTexture,
5823    gpu_cache_frame_id: FrameId,
5824    textures: FastHashMap<CacheTextureId, PlainTexture>,
5825}
5826
5827#[cfg(any(feature = "capture", feature = "replay"))]
5828#[cfg_attr(feature = "capture", derive(Serialize))]
5829#[cfg_attr(feature = "replay", derive(Deserialize))]
5830struct PlainExternalResources {
5831    images: Vec<ExternalCaptureImage>
5832}
5833
5834#[cfg(feature = "replay")]
5835enum CapturedExternalImageData {
5836    NativeTexture(gl::GLuint),
5837    Buffer(Arc<Vec<u8>>),
5838}
5839
5840#[cfg(feature = "replay")]
5841struct DummyExternalImageHandler {
5842    data: FastHashMap<(ExternalImageId, u8), (CapturedExternalImageData, TexelRect)>,
5843}
5844
5845#[cfg(feature = "replay")]
5846impl ExternalImageHandler for DummyExternalImageHandler {
5847    fn lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage {
5848        let (ref captured_data, ref uv) = self.data[&(key, channel_index)];
5849        ExternalImage {
5850            uv: *uv,
5851            source: match *captured_data {
5852                CapturedExternalImageData::NativeTexture(tid) => ExternalImageSource::NativeTexture(tid),
5853                CapturedExternalImageData::Buffer(ref arc) => ExternalImageSource::RawData(&*arc),
5854            }
5855        }
5856    }
5857    fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {}
5858}
5859
5860#[derive(Default)]
5861pub struct PipelineInfo {
5862    pub epochs: FastHashMap<(PipelineId, DocumentId), Epoch>,
5863    pub removed_pipelines: Vec<(PipelineId, DocumentId)>,
5864}
5865
5866impl Renderer {
5867    #[cfg(feature = "capture")]
5868    fn save_texture(
5869        texture: &Texture, category: Option<TextureCacheCategory>, name: &str, root: &PathBuf, device: &mut Device
5870    ) -> PlainTexture {
5871        use std::fs;
5872        use std::io::Write;
5873
5874        let short_path = format!("textures/{}.raw", name);
5875
5876        let bytes_per_pixel = texture.get_format().bytes_per_pixel();
5877        let read_format = texture.get_format();
5878        let rect_size = texture.get_dimensions();
5879
5880        let mut file = fs::File::create(root.join(&short_path))
5881            .expect(&format!("Unable to create {}", short_path));
5882        let bytes_per_texture = (rect_size.width * rect_size.height * bytes_per_pixel) as usize;
5883        let mut data = vec![0; bytes_per_texture];
5884
5885        //TODO: instead of reading from an FBO with `read_pixels*`, we could
5886        // read from textures directly with `get_tex_image*`.
5887
5888        let rect = device_size_as_framebuffer_size(rect_size).into();
5889
5890        device.attach_read_texture(texture);
5891        #[cfg(feature = "png")]
5892        {
5893            let mut png_data;
5894            let (data_ref, format) = match texture.get_format() {
5895                ImageFormat::RGBAF32 => {
5896                    png_data = vec![0; (rect_size.width * rect_size.height * 4) as usize];
5897                    device.read_pixels_into(rect, ImageFormat::RGBA8, &mut png_data);
5898                    (&png_data, ImageFormat::RGBA8)
5899                }
5900                fm => (&data, fm),
5901            };
5902            CaptureConfig::save_png(
5903                root.join(format!("textures/{}-{}.png", name, 0)),
5904                rect_size, format,
5905                None,
5906                data_ref,
5907            );
5908        }
5909        device.read_pixels_into(rect, read_format, &mut data);
5910        file.write_all(&data)
5911            .unwrap();
5912
5913        PlainTexture {
5914            data: short_path,
5915            size: rect_size,
5916            format: texture.get_format(),
5917            filter: texture.get_filter(),
5918            has_depth: texture.supports_depth(),
5919            category,
5920        }
5921    }
5922
5923    #[cfg(feature = "replay")]
5924    fn load_texture(
5925        target: ImageBufferKind,
5926        plain: &PlainTexture,
5927        rt_info: Option<RenderTargetInfo>,
5928        root: &PathBuf,
5929        device: &mut Device
5930    ) -> (Texture, Vec<u8>)
5931    {
5932        use std::fs::File;
5933        use std::io::Read;
5934
5935        let mut texels = Vec::new();
5936        File::open(root.join(&plain.data))
5937            .expect(&format!("Unable to open texture at {}", plain.data))
5938            .read_to_end(&mut texels)
5939            .unwrap();
5940
5941        let texture = device.create_texture(
5942            target,
5943            plain.format,
5944            plain.size.width,
5945            plain.size.height,
5946            plain.filter,
5947            rt_info,
5948        );
5949        device.upload_texture_immediate(&texture, &texels);
5950
5951        (texture, texels)
5952    }
5953
5954    #[cfg(feature = "capture")]
5955    fn save_capture(
5956        &mut self,
5957        config: CaptureConfig,
5958        deferred_images: Vec<ExternalCaptureImage>,
5959    ) {
5960        use std::fs;
5961        use std::io::Write;
5962        use api::ExternalImageData;
5963        use crate::render_api::CaptureBits;
5964
5965        let root = config.resource_root();
5966
5967        self.device.begin_frame();
5968        let _gm = self.gpu_profiler.start_marker("read GPU data");
5969        self.device.bind_read_target_impl(self.read_fbo, DeviceIntPoint::zero());
5970
5971        if config.bits.contains(CaptureBits::EXTERNAL_RESOURCES) && !deferred_images.is_empty() {
5972            info!("saving external images");
5973            let mut arc_map = FastHashMap::<*const u8, String>::default();
5974            let mut tex_map = FastHashMap::<u32, String>::default();
5975            let handler = self.external_image_handler
5976                .as_mut()
5977                .expect("Unable to lock the external image handler!");
5978            for def in &deferred_images {
5979                info!("\t{}", def.short_path);
5980                let ExternalImageData { id, channel_index, image_type, .. } = def.external;
5981                // The image rendering parameter is irrelevant because no filtering happens during capturing.
5982                let ext_image = handler.lock(id, channel_index);
5983                let (data, short_path) = match ext_image.source {
5984                    ExternalImageSource::RawData(data) => {
5985                        let arc_id = arc_map.len() + 1;
5986                        match arc_map.entry(data.as_ptr()) {
5987                            Entry::Occupied(e) => {
5988                                (None, e.get().clone())
5989                            }
5990                            Entry::Vacant(e) => {
5991                                let short_path = format!("externals/d{}.raw", arc_id);
5992                                (Some(data.to_vec()), e.insert(short_path).clone())
5993                            }
5994                        }
5995                    }
5996                    ExternalImageSource::NativeTexture(gl_id) => {
5997                        let tex_id = tex_map.len() + 1;
5998                        match tex_map.entry(gl_id) {
5999                            Entry::Occupied(e) => {
6000                                (None, e.get().clone())
6001                            }
6002                            Entry::Vacant(e) => {
6003                                let target = match image_type {
6004                                    ExternalImageType::TextureHandle(target) => target,
6005                                    ExternalImageType::Buffer => unreachable!(),
6006                                };
6007                                info!("\t\tnative texture of target {:?}", target);
6008                                self.device.attach_read_texture_external(gl_id, target);
6009                                let data = self.device.read_pixels(&def.descriptor);
6010                                let short_path = format!("externals/t{}.raw", tex_id);
6011                                (Some(data), e.insert(short_path).clone())
6012                            }
6013                        }
6014                    }
6015                    ExternalImageSource::Invalid => {
6016                        info!("\t\tinvalid source!");
6017                        (None, String::new())
6018                    }
6019                };
6020                if let Some(bytes) = data {
6021                    fs::File::create(root.join(&short_path))
6022                        .expect(&format!("Unable to create {}", short_path))
6023                        .write_all(&bytes)
6024                        .unwrap();
6025                    #[cfg(feature = "png")]
6026                    CaptureConfig::save_png(
6027                        root.join(&short_path).with_extension("png"),
6028                        def.descriptor.size,
6029                        def.descriptor.format,
6030                        def.descriptor.stride,
6031                        &bytes,
6032                    );
6033                }
6034                let plain = PlainExternalImage {
6035                    data: short_path,
6036                    external: def.external,
6037                    uv: ext_image.uv,
6038                };
6039                config.serialize_for_resource(&plain, &def.short_path);
6040            }
6041            for def in &deferred_images {
6042                handler.unlock(def.external.id, def.external.channel_index);
6043            }
6044            let plain_external = PlainExternalResources {
6045                images: deferred_images,
6046            };
6047            config.serialize_for_resource(&plain_external, "external_resources");
6048        }
6049
6050        if config.bits.contains(CaptureBits::FRAME) {
6051            let path_textures = root.join("textures");
6052            if !path_textures.is_dir() {
6053                fs::create_dir(&path_textures).unwrap();
6054            }
6055
6056            info!("saving GPU cache");
6057            self.update_gpu_cache(); // flush pending updates
6058            let mut plain_self = PlainRenderer {
6059                device_size: self.device_size,
6060                gpu_cache: Self::save_texture(
6061                    self.gpu_cache_texture.get_texture(),
6062                    None, "gpu", &root, &mut self.device,
6063                ),
6064                gpu_cache_frame_id: self.gpu_cache_frame_id,
6065                textures: FastHashMap::default(),
6066            };
6067
6068            info!("saving cached textures");
6069            for (id, item) in &self.texture_resolver.texture_cache_map {
6070                let file_name = format!("cache-{}", plain_self.textures.len() + 1);
6071                info!("\t{}", file_name);
6072                let plain = Self::save_texture(&item.texture, Some(item.category), &file_name, &root, &mut self.device);
6073                plain_self.textures.insert(*id, plain);
6074            }
6075
6076            config.serialize_for_resource(&plain_self, "renderer");
6077        }
6078
6079        self.device.reset_read_target();
6080        self.device.end_frame();
6081
6082        let mut stats_file = fs::File::create(config.root.join("profiler-stats.txt"))
6083            .expect(&format!("Unable to create profiler-stats.txt"));
6084        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
6085            self.profiler.dump_stats(&mut stats_file).unwrap();
6086        } else {
6087            writeln!(stats_file, "Turn on PROFILER_DBG or PROFILER_CAPTURE to get stats here!").unwrap();
6088        }
6089
6090        info!("done.");
6091    }
6092
6093    #[cfg(feature = "replay")]
6094    fn load_capture(
6095        &mut self,
6096        config: CaptureConfig,
6097        plain_externals: Vec<PlainExternalImage>,
6098    ) {
6099        use std::{fs::File, io::Read};
6100
6101        info!("loading external buffer-backed images");
6102        assert!(self.texture_resolver.external_images.is_empty());
6103        let mut raw_map = FastHashMap::<String, Arc<Vec<u8>>>::default();
6104        let mut image_handler = DummyExternalImageHandler {
6105            data: FastHashMap::default(),
6106        };
6107
6108        let root = config.resource_root();
6109
6110        // Note: this is a `SCENE` level population of the external image handlers
6111        // It would put both external buffers and texture into the map.
6112        // But latter are going to be overwritten later in this function
6113        // if we are in the `FRAME` level.
6114        for plain_ext in plain_externals {
6115            let data = match raw_map.entry(plain_ext.data) {
6116                Entry::Occupied(e) => e.get().clone(),
6117                Entry::Vacant(e) => {
6118                    let mut buffer = Vec::new();
6119                    File::open(root.join(e.key()))
6120                        .expect(&format!("Unable to open {}", e.key()))
6121                        .read_to_end(&mut buffer)
6122                        .unwrap();
6123                    e.insert(Arc::new(buffer)).clone()
6124                }
6125            };
6126            let ext = plain_ext.external;
6127            let value = (CapturedExternalImageData::Buffer(data), plain_ext.uv);
6128            image_handler.data.insert((ext.id, ext.channel_index), value);
6129        }
6130
6131        if let Some(external_resources) = config.deserialize_for_resource::<PlainExternalResources, _>("external_resources") {
6132            info!("loading external texture-backed images");
6133            let mut native_map = FastHashMap::<String, gl::GLuint>::default();
6134            for ExternalCaptureImage { short_path, external, descriptor } in external_resources.images {
6135                let target = match external.image_type {
6136                    ExternalImageType::TextureHandle(target) => target,
6137                    ExternalImageType::Buffer => continue,
6138                };
6139                let plain_ext = config.deserialize_for_resource::<PlainExternalImage, _>(&short_path)
6140                    .expect(&format!("Unable to read {}.ron", short_path));
6141                let key = (external.id, external.channel_index);
6142
6143                let tid = match native_map.entry(plain_ext.data) {
6144                    Entry::Occupied(e) => e.get().clone(),
6145                    Entry::Vacant(e) => {
6146                        let plain_tex = PlainTexture {
6147                            data: e.key().clone(),
6148                            size: descriptor.size,
6149                            format: descriptor.format,
6150                            filter: TextureFilter::Linear,
6151                            has_depth: false,
6152                            category: None,
6153                        };
6154                        let t = Self::load_texture(
6155                            target,
6156                            &plain_tex,
6157                            None,
6158                            &root,
6159                            &mut self.device
6160                        );
6161                        let extex = t.0.into_external();
6162                        self.owned_external_images.insert(key, extex.clone());
6163                        e.insert(extex.internal_id()).clone()
6164                    }
6165                };
6166
6167                let value = (CapturedExternalImageData::NativeTexture(tid), plain_ext.uv);
6168                image_handler.data.insert(key, value);
6169            }
6170        }
6171
6172        self.device.begin_frame();
6173        self.gpu_cache_texture.remove_texture(&mut self.device);
6174
6175        if let Some(renderer) = config.deserialize_for_resource::<PlainRenderer, _>("renderer") {
6176            info!("loading cached textures");
6177            self.device_size = renderer.device_size;
6178
6179            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
6180                self.device.delete_texture(item.texture);
6181            }
6182            for (id, texture) in renderer.textures {
6183                info!("\t{}", texture.data);
6184                let target = ImageBufferKind::Texture2D;
6185                let t = Self::load_texture(
6186                    target,
6187                    &texture,
6188                    Some(RenderTargetInfo { has_depth: texture.has_depth }),
6189                    &root,
6190                    &mut self.device
6191                );
6192                self.texture_resolver.texture_cache_map.insert(id, CacheTexture {
6193                    texture: t.0,
6194                    category: texture.category.unwrap_or(TextureCacheCategory::Standalone),
6195                });
6196            }
6197
6198            info!("loading gpu cache");
6199            let (t, gpu_cache_data) = Self::load_texture(
6200                ImageBufferKind::Texture2D,
6201                &renderer.gpu_cache,
6202                Some(RenderTargetInfo { has_depth: false }),
6203                &root,
6204                &mut self.device,
6205            );
6206            self.gpu_cache_texture.load_from_data(t, gpu_cache_data);
6207            self.gpu_cache_frame_id = renderer.gpu_cache_frame_id;
6208        } else {
6209            info!("loading cached textures");
6210            self.device.begin_frame();
6211            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
6212                self.device.delete_texture(item.texture);
6213            }
6214        }
6215        self.device.end_frame();
6216
6217        self.external_image_handler = Some(Box::new(image_handler) as Box<_>);
6218        info!("done.");
6219    }
6220}
6221
6222#[derive(Clone, Copy, PartialEq)]
6223enum FramebufferKind {
6224    Main,
6225    Other,
6226}
6227
6228fn should_skip_batch(kind: &BatchKind, flags: DebugFlags) -> bool {
6229    match kind {
6230        BatchKind::TextRun(_) => {
6231            flags.contains(DebugFlags::DISABLE_TEXT_PRIMS)
6232        }
6233        BatchKind::Brush(BrushBatchKind::LinearGradient) => {
6234            flags.contains(DebugFlags::DISABLE_GRADIENT_PRIMS)
6235        }
6236        _ => false,
6237    }
6238}
6239
6240impl CompositeState {
6241    /// Use the client provided native compositor interface to add all picture
6242    /// cache tiles to the OS compositor
6243    fn composite_native(
6244        &self,
6245        clear_color: ColorF,
6246        dirty_rects: &[DeviceIntRect],
6247        device: &mut Device,
6248        compositor: &mut dyn Compositor,
6249    ) {
6250        // Add each surface to the visual tree. z-order is implicit based on
6251        // order added. Offset and clip rect apply to all tiles within this
6252        // surface.
6253        for surface in &self.descriptor.surfaces {
6254            compositor.add_surface(
6255                device,
6256                surface.surface_id.expect("bug: no native surface allocated"),
6257                surface.transform,
6258                surface.clip_rect.to_i32(),
6259                surface.image_rendering,
6260                surface.rounded_clip_rect.to_i32(),
6261                surface.rounded_clip_radii,
6262            );
6263        }
6264        compositor.start_compositing(device, clear_color, dirty_rects, &[]);
6265    }
6266}
6267
6268mod tests {
6269    #[test]
6270    fn test_buffer_damage_tracker() {
6271        use super::BufferDamageTracker;
6272        use api::units::{DevicePoint, DeviceRect, DeviceSize};
6273
6274        let mut tracker = BufferDamageTracker::default();
6275        assert_eq!(tracker.get_damage_rect(0), None);
6276        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6277        assert_eq!(tracker.get_damage_rect(2), Some(DeviceRect::zero()));
6278        assert_eq!(tracker.get_damage_rect(3), Some(DeviceRect::zero()));
6279
6280        let damage1 = DeviceRect::from_origin_and_size(DevicePoint::new(10.0, 10.0), DeviceSize::new(10.0, 10.0));
6281        let damage2 = DeviceRect::from_origin_and_size(DevicePoint::new(20.0, 20.0), DeviceSize::new(10.0, 10.0));
6282        let combined = damage1.union(&damage2);
6283
6284        tracker.push_dirty_rect(&damage1);
6285        assert_eq!(tracker.get_damage_rect(0), None);
6286        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6287        assert_eq!(tracker.get_damage_rect(2), Some(damage1));
6288        assert_eq!(tracker.get_damage_rect(3), Some(damage1));
6289
6290        tracker.push_dirty_rect(&damage2);
6291        assert_eq!(tracker.get_damage_rect(0), None);
6292        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6293        assert_eq!(tracker.get_damage_rect(2), Some(damage2));
6294        assert_eq!(tracker.get_damage_rect(3), Some(combined));
6295    }
6296}