1use api::units::*;
11use api::ImageFormat;
12use crate::gpu_cache::{GpuCache, GpuCacheAddress};
13use crate::internal_types::{TextureSource, CacheTextureId, FastHashMap, FastHashSet, FrameId};
14use crate::internal_types::size_of_frame_vec;
15use crate::render_task::{StaticRenderTaskSurface, RenderTaskLocation, RenderTask};
16use crate::render_target::RenderTargetKind;
17use crate::render_task::{RenderTaskData, RenderTaskKind};
18use crate::resource_cache::ResourceCache;
19use crate::texture_pack::GuillotineAllocator;
20use crate::prim_store::DeferredResolve;
21use crate::image_source::{resolve_image, resolve_cached_render_task};
22use smallvec::SmallVec;
23use topological_sort::TopologicalSort;
24
25use crate::render_target::{RenderTargetList, PictureCacheTarget, RenderTarget};
26use crate::util::{Allocation, VecHelper};
27use std::{usize, f32};
28
29use crate::internal_types::{FrameVec, FrameMemory};
30
31#[cfg(test)]
32use crate::frame_allocator::FrameAllocator;
33
34const TEXTURE_DIMENSION_MASK: i32 = 0xFF;
38
39pub struct RenderTaskAllocation<'a> {
45 pub alloc: Allocation<'a, RenderTask>,
46}
47
48impl<'l> RenderTaskAllocation<'l> {
49 #[inline(always)]
50 pub fn init(self, value: RenderTask) -> RenderTaskId {
51 RenderTaskId {
52 index: self.alloc.init(value) as u32,
53 }
54 }
55}
56
57#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
58#[derive(MallocSizeOf)]
59#[cfg_attr(feature = "capture", derive(Serialize))]
60#[cfg_attr(feature = "replay", derive(Deserialize))]
61pub struct RenderTaskId {
62 pub index: u32,
63}
64
65impl RenderTaskId {
66 pub const INVALID: RenderTaskId = RenderTaskId {
67 index: u32::MAX,
68 };
69}
70
71#[cfg_attr(feature = "capture", derive(Serialize))]
72#[cfg_attr(feature = "replay", derive(Deserialize))]
73#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord)]
74pub struct PassId(usize);
75
76impl PassId {
77 pub const MIN: PassId = PassId(0);
78 pub const MAX: PassId = PassId(!0 - 1);
79 pub const INVALID: PassId = PassId(!0 - 2);
80}
81
82#[cfg_attr(feature = "capture", derive(Serialize))]
86#[cfg_attr(feature = "replay", derive(Deserialize))]
87struct Surface {
88 kind: RenderTargetKind,
90 allocator: GuillotineAllocator,
92 is_shared: bool,
94 free_after: PassId,
97}
98
99impl Surface {
100 fn alloc_rect(
103 &mut self,
104 size: DeviceIntSize,
105 kind: RenderTargetKind,
106 is_shared: bool,
107 free_after: PassId,
108 ) -> Option<DeviceIntPoint> {
109 if self.kind == kind && self.is_shared == is_shared && self.free_after == free_after {
110 self.allocator
111 .allocate(&size)
112 .map(|(_slice, origin)| origin)
113 } else {
114 None
115 }
116 }
117}
118
119#[cfg_attr(feature = "capture", derive(Serialize))]
122#[cfg_attr(feature = "replay", derive(Deserialize))]
123#[derive(Debug)]
124pub enum SubPassSurface {
125 Dynamic {
127 texture_id: CacheTextureId,
129 target_kind: RenderTargetKind,
131 used_rect: DeviceIntRect,
134 },
135 Persistent {
136 surface: StaticRenderTaskSurface,
138 },
139}
140
141#[cfg_attr(feature = "capture", derive(Serialize))]
143#[cfg_attr(feature = "replay", derive(Deserialize))]
144pub struct SubPass {
145 pub surface: SubPassSurface,
147 pub task_ids: FrameVec<RenderTaskId>,
149}
150
151#[cfg_attr(feature = "capture", derive(Serialize))]
154#[cfg_attr(feature = "replay", derive(Deserialize))]
155pub struct Pass {
156 pub task_ids: FrameVec<RenderTaskId>,
158 pub sub_passes: FrameVec<SubPass>,
160 pub textures_to_invalidate: FrameVec<CacheTextureId>,
163}
164
165#[cfg_attr(feature = "capture", derive(Serialize))]
168#[cfg_attr(feature = "replay", derive(Deserialize))]
169pub struct RenderTaskGraph {
170 pub tasks: FrameVec<RenderTask>,
172
173 pub passes: FrameVec<Pass>,
175
176 frame_id: FrameId,
178
179 pub task_data: FrameVec<RenderTaskData>,
181
182 #[cfg(test)]
184 surface_count: usize,
185
186 #[cfg(test)]
188 unique_surfaces: FastHashSet<CacheTextureId>,
189}
190
191pub struct RenderTaskGraphBuilder {
194 tasks: Vec<RenderTask>,
196
197 roots: FastHashSet<RenderTaskId>,
199
200 frame_id: FrameId,
202
203 textures_to_free: FastHashSet<CacheTextureId>,
206
207 active_surfaces: FastHashMap<CacheTextureId, Surface>,
210}
211
212impl RenderTaskGraphBuilder {
213 pub fn new() -> Self {
216 RenderTaskGraphBuilder {
217 tasks: Vec::new(),
218 roots: FastHashSet::default(),
219 frame_id: FrameId::INVALID,
220 textures_to_free: FastHashSet::default(),
221 active_surfaces: FastHashMap::default(),
222 }
223 }
224
225 pub fn frame_id(&self) -> FrameId {
226 self.frame_id
227 }
228
229 pub fn begin_frame(&mut self, frame_id: FrameId) {
231 self.frame_id = frame_id;
232 self.roots.clear();
233 }
234
235 pub fn get_task(
239 &self,
240 task_id: RenderTaskId,
241 ) -> &RenderTask {
242 &self.tasks[task_id.index as usize]
243 }
244
245 pub fn get_task_mut(
249 &mut self,
250 task_id: RenderTaskId,
251 ) -> &mut RenderTask {
252 &mut self.tasks[task_id.index as usize]
253 }
254
255 pub fn add(&mut self) -> RenderTaskAllocation {
257 self.roots.insert(
259 RenderTaskId { index: self.tasks.len() as u32 }
260 );
261
262 RenderTaskAllocation {
263 alloc: self.tasks.alloc(),
264 }
265 }
266
267 pub fn add_dependency(
269 &mut self,
270 task_id: RenderTaskId,
271 input: RenderTaskId,
272 ) {
273 self.tasks[task_id.index as usize].children.push(input);
274
275 self.roots.remove(&input);
277 }
278
279 pub fn end_frame(
281 &mut self,
282 resource_cache: &mut ResourceCache,
283 gpu_cache: &mut GpuCache,
284 deferred_resolves: &mut FrameVec<DeferredResolve>,
285 max_shared_surface_size: i32,
286 memory: &FrameMemory,
287 ) -> RenderTaskGraph {
288 let task_count = self.tasks.len();
290
291 let mut tasks = memory.new_vec_with_capacity(task_count);
296 for task in self.tasks.drain(..) {
297 tasks.push(task)
298 }
299
300 let mut graph = RenderTaskGraph {
301 tasks,
302 passes: memory.new_vec(),
303 task_data: memory.new_vec_with_capacity(task_count),
304 frame_id: self.frame_id,
305 #[cfg(test)]
306 surface_count: 0,
307 #[cfg(test)]
308 unique_surfaces: FastHashSet::default(),
309 };
310
311 let mut pass_count = 0;
320 let mut passes = memory.new_vec();
321 let mut task_sorter = TopologicalSort::<RenderTaskId>::new();
322
323 for (parent_id, task) in graph.tasks.iter().enumerate() {
325 let parent_id = RenderTaskId { index: parent_id as u32 };
326
327 for child_id in &task.children {
328 task_sorter.add_dependency(
329 parent_id,
330 *child_id,
331 );
332 }
333 }
334
335 loop {
337 let tasks = task_sorter.pop_all();
339
340 if tasks.is_empty() {
342 assert!(task_sorter.is_empty());
345 break;
346 } else {
347 for task_id in &tasks {
349 graph.tasks[task_id.index as usize].render_on = PassId(pass_count);
350 }
351
352 passes.push(tasks);
354 pass_count += 1;
355 }
356 }
357
358 pass_count = pass_count.max(1);
360
361 for pass in passes {
366 for task_id in pass {
367 assign_free_pass(
368 task_id,
369 &mut graph,
370 );
371 }
372 }
373
374 for _ in 0 .. pass_count {
376 graph.passes.push(Pass {
377 task_ids: memory.new_vec(),
378 sub_passes: memory.new_vec(),
379 textures_to_invalidate: memory.new_vec(),
380 });
381 }
382
383 for (index, task) in graph.tasks.iter().enumerate() {
385 if task.kind.is_a_rendering_operation() {
386 let id = RenderTaskId { index: index as u32 };
387 graph.passes[task.render_on.0].task_ids.push(id);
388 }
389 }
390
391 assert!(self.active_surfaces.is_empty());
395
396 for (pass_id, pass) in graph.passes.iter_mut().enumerate().rev() {
397 assert!(self.textures_to_free.is_empty());
398
399 for task_id in &pass.task_ids {
400
401 let task_location = graph.tasks[task_id.index as usize].location.clone();
402
403 match task_location {
404 RenderTaskLocation::Unallocated { size } => {
405 let task = &mut graph.tasks[task_id.index as usize];
406
407 let mut location = None;
408 let kind = task.kind.target_kind();
409
410 let can_use_shared_surface =
413 task.kind.can_use_shared_surface() &&
414 task.free_after != PassId::INVALID;
415
416 if can_use_shared_surface {
417 for sub_pass in &mut pass.sub_passes {
421 if let SubPassSurface::Dynamic { texture_id, ref mut used_rect, .. } = sub_pass.surface {
422 let surface = self.active_surfaces.get_mut(&texture_id).unwrap();
423 if let Some(p) = surface.alloc_rect(size, kind, true, task.free_after) {
424 location = Some((texture_id, p));
425 *used_rect = used_rect.union(&DeviceIntRect::from_origin_and_size(p, size));
426 sub_pass.task_ids.push(*task_id);
427 break;
428 }
429 }
430 }
431 }
432
433 if location.is_none() {
434 let can_use_shared_surface = can_use_shared_surface &&
441 size.width <= max_shared_surface_size &&
442 size.height <= max_shared_surface_size;
443
444 let surface_size = if can_use_shared_surface {
445 DeviceIntSize::new(
446 max_shared_surface_size,
447 max_shared_surface_size,
448 )
449 } else {
450 DeviceIntSize::new(
452 (size.width + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK,
453 (size.height + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK,
454 )
455 };
456
457 if surface_size.is_empty() {
458 let task_name = graph.tasks[task_id.index as usize].kind.as_str();
461 panic!("{} render task has invalid size {:?}", task_name, surface_size);
462 }
463
464 let format = match kind {
465 RenderTargetKind::Color => ImageFormat::RGBA8,
466 RenderTargetKind::Alpha => ImageFormat::R8,
467 };
468
469 let texture_id = resource_cache.get_or_create_render_target_from_pool(
471 surface_size,
472 format,
473 );
474
475 let mut surface = Surface {
477 kind,
478 allocator: GuillotineAllocator::new(Some(surface_size)),
479 is_shared: can_use_shared_surface,
480 free_after: task.free_after,
481 };
482
483 let p = surface.alloc_rect(
485 size,
486 kind,
487 can_use_shared_surface,
488 task.free_after,
489 ).expect("bug: alloc must succeed!");
490
491 location = Some((texture_id, p));
492
493 let _prev_surface = self.active_surfaces.insert(texture_id, surface);
496 assert!(_prev_surface.is_none());
497
498 #[cfg(test)]
500 {
501 graph.surface_count += 1;
502 graph.unique_surfaces.insert(texture_id);
503 }
504
505 let mut task_ids = memory.new_vec();
506 task_ids.push(*task_id);
507
508 pass.sub_passes.push(SubPass {
510 surface: SubPassSurface::Dynamic {
511 texture_id,
512 target_kind: kind,
513 used_rect: DeviceIntRect::from_origin_and_size(p, size),
514 },
515 task_ids,
516 });
517 }
518
519 assert!(location.is_some());
521 task.location = RenderTaskLocation::Dynamic {
522 texture_id: location.unwrap().0,
523 rect: DeviceIntRect::from_origin_and_size(location.unwrap().1, size),
524 };
525 }
526 RenderTaskLocation::Existing { parent_task_id, size: existing_size, .. } => {
527 let parent_task_location = graph.tasks[parent_task_id.index as usize].location.clone();
528
529 match parent_task_location {
530 RenderTaskLocation::Unallocated { .. } |
531 RenderTaskLocation::CacheRequest { .. } |
532 RenderTaskLocation::Existing { .. } => {
533 panic!("bug: reference to existing task must be allocated by now");
534 }
535 RenderTaskLocation::Dynamic { texture_id, rect, .. } => {
536 assert_eq!(existing_size, rect.size());
537
538 let kind = graph.tasks[parent_task_id.index as usize].kind.target_kind();
539 let mut task_ids = memory.new_vec();
540 task_ids.push(*task_id);
541 pass.sub_passes.push(SubPass {
543 surface: SubPassSurface::Dynamic {
544 texture_id,
545 target_kind: kind,
546 used_rect: rect, },
548 task_ids,
549 });
550
551 let task = &mut graph.tasks[task_id.index as usize];
552 task.location = parent_task_location;
553 }
554 RenderTaskLocation::Static { .. } => {
555 unreachable!("bug: not possible since we don't dup static locations");
556 }
557 }
558 }
559 RenderTaskLocation::Static { ref surface, .. } => {
560 let mut task_ids = memory.new_vec();
563 task_ids.push(*task_id);
564 pass.sub_passes.push(SubPass {
565 surface: SubPassSurface::Persistent {
566 surface: surface.clone(),
567 },
568 task_ids,
569 });
570 }
571 RenderTaskLocation::CacheRequest { .. } => {
572 }
574 RenderTaskLocation::Dynamic { .. } => {
575 panic!("bug: encountered an already allocated task");
577 }
578 }
579
580 let task = &graph.tasks[task_id.index as usize];
582 for child_id in &task.children {
583 let child_task = &graph.tasks[child_id.index as usize];
584 match child_task.location {
585 RenderTaskLocation::Unallocated { .. } |
586 RenderTaskLocation::Existing { .. } => panic!("bug: must be allocated"),
587 RenderTaskLocation::Dynamic { texture_id, .. } => {
588 if child_task.free_after == PassId(pass_id) {
591 self.textures_to_free.insert(texture_id);
592 }
593 }
594 RenderTaskLocation::Static { .. } => {}
595 RenderTaskLocation::CacheRequest { .. } => {}
596 }
597 }
598 }
599
600 for texture_id in self.textures_to_free.drain() {
603 resource_cache.return_render_target_to_pool(texture_id);
604 self.active_surfaces.remove(&texture_id).unwrap();
605 pass.textures_to_invalidate.push(texture_id);
606 }
607 }
608
609 if !self.active_surfaces.is_empty() {
610 graph.print();
611 assert!(self.active_surfaces.is_empty());
614 }
615
616 for task in &mut graph.tasks {
621 let cache_item = if let Some(ref cache_handle) = task.cache_handle {
625 Some(resolve_cached_render_task(
626 cache_handle,
627 resource_cache,
628 ))
629 } else if let RenderTaskKind::Image(request) = &task.kind {
630 Some(resolve_image(
631 *request,
632 resource_cache,
633 gpu_cache,
634 deferred_resolves,
635 ))
636 } else {
637 None
639 };
640
641 if let Some(cache_item) = cache_item {
642 task.uv_rect_handle = cache_item.uv_rect_handle;
647 if let RenderTaskLocation::CacheRequest { .. } = &task.location {
648 let source = cache_item.texture_id;
649 task.location = RenderTaskLocation::Static {
650 surface: StaticRenderTaskSurface::ReadOnly { source },
651 rect: cache_item.uv_rect,
652 };
653 }
654 }
655
656 let target_rect = task.get_target_rect();
659
660 task.write_gpu_blocks(
661 target_rect,
662 gpu_cache,
663 );
664
665 graph.task_data.push(
666 task.kind.write_task_data(target_rect)
667 );
668 }
669
670 graph
671 }
672}
673
674impl RenderTaskGraph {
675 #[allow(dead_code)]
677 pub fn print(
678 &self,
679 ) {
680 print!("-- RenderTaskGraph --\n");
681
682 for (i, task) in self.tasks.iter().enumerate() {
683 print!("Task {} [{}]: render_on={} free_after={} children={:?} target_size={:?}\n",
684 i,
685 task.kind.as_str(),
686 task.render_on.0,
687 task.free_after.0,
688 task.children,
689 task.get_target_size(),
690 );
691 }
692
693 for (p, pass) in self.passes.iter().enumerate() {
694 print!("Pass {}:\n", p);
695
696 for (s, sub_pass) in pass.sub_passes.iter().enumerate() {
697 print!("\tSubPass {}: {:?}\n",
698 s,
699 sub_pass.surface,
700 );
701
702 for task_id in &sub_pass.task_ids {
703 print!("\t\tTask {:?}\n", task_id.index);
704 }
705 }
706 }
707 }
708
709 pub fn resolve_texture(
710 &self,
711 task_id: impl Into<Option<RenderTaskId>>,
712 ) -> Option<TextureSource> {
713 let task_id = task_id.into()?;
714 let task = &self[task_id];
715
716 match task.get_texture_source() {
717 TextureSource::Invalid => None,
718 source => Some(source),
719 }
720 }
721
722 pub fn resolve_location(
723 &self,
724 task_id: impl Into<Option<RenderTaskId>>,
725 gpu_cache: &GpuCache,
726 ) -> Option<(GpuCacheAddress, TextureSource)> {
727 self.resolve_impl(task_id.into()?, gpu_cache)
728 }
729
730 fn resolve_impl(
731 &self,
732 task_id: RenderTaskId,
733 gpu_cache: &GpuCache,
734 ) -> Option<(GpuCacheAddress, TextureSource)> {
735 let task = &self[task_id];
736 let texture_source = task.get_texture_source();
737
738 if let TextureSource::Invalid = texture_source {
739 return None;
740 }
741
742 let uv_address = task.get_texture_address(gpu_cache);
743
744 Some((uv_address, texture_source))
745 }
746
747 pub fn report_memory(&self) -> usize {
748 let mut mem = size_of_frame_vec(&self.tasks)
753 + size_of_frame_vec(&self.task_data)
754 + size_of_frame_vec(&self.passes);
755
756 for pass in &self.passes {
757 mem += size_of_frame_vec(&pass.task_ids)
758 + size_of_frame_vec(&pass.sub_passes)
759 + size_of_frame_vec(&pass.textures_to_invalidate);
760 for sub_pass in &pass.sub_passes {
761 mem += size_of_frame_vec(&sub_pass.task_ids);
762 }
763 }
764
765 mem
766 }
767
768 #[cfg(test)]
769 pub fn new_for_testing() -> Self {
770 let allocator = FrameAllocator::fallback();
771 RenderTaskGraph {
772 tasks: allocator.clone().new_vec(),
773 passes: allocator.clone().new_vec(),
774 frame_id: FrameId::INVALID,
775 task_data: allocator.clone().new_vec(),
776 surface_count: 0,
777 unique_surfaces: FastHashSet::default(),
778 }
779 }
780
781 #[cfg(test)]
783 pub fn surface_counts(&self) -> (usize, usize) {
784 (self.surface_count, self.unique_surfaces.len())
785 }
786
787 #[cfg(debug_assertions)]
789 pub fn frame_id(&self) -> FrameId {
790 self.frame_id
791 }
792}
793
794impl std::ops::Index<RenderTaskId> for RenderTaskGraph {
796 type Output = RenderTask;
797 fn index(&self, id: RenderTaskId) -> &RenderTask {
798 &self.tasks[id.index as usize]
799 }
800}
801
802fn assign_free_pass(
803 id: RenderTaskId,
804 graph: &mut RenderTaskGraph,
805) {
806 let task = &mut graph.tasks[id.index as usize];
807 let render_on = task.render_on;
808
809 let mut child_task_ids: SmallVec<[RenderTaskId; 8]> = SmallVec::new();
810 child_task_ids.extend_from_slice(&task.children);
811
812 for child_id in child_task_ids {
813 let child_location = graph.tasks[child_id.index as usize].location.clone();
814
815 match child_location {
820 RenderTaskLocation::CacheRequest { .. } => {}
821 RenderTaskLocation::Static { .. } => {
822 }
825 RenderTaskLocation::Dynamic { .. } => {
826 panic!("bug: should not be allocated yet");
827 }
828 RenderTaskLocation::Unallocated { .. } => {
829 let child_task = &mut graph.tasks[child_id.index as usize];
830
831 if child_task.free_after != PassId::INVALID {
832 child_task.free_after = child_task.free_after.min(render_on);
833 }
834 }
835 RenderTaskLocation::Existing { parent_task_id, .. } => {
836 let parent_task = &mut graph.tasks[parent_task_id.index as usize];
837 parent_task.free_after = PassId::INVALID;
838
839 let child_task = &mut graph.tasks[child_id.index as usize];
840
841 if child_task.free_after != PassId::INVALID {
842 child_task.free_after = child_task.free_after.min(render_on);
843 }
844 }
845 }
846 }
847}
848
849#[cfg_attr(feature = "capture", derive(Serialize))]
855#[cfg_attr(feature = "replay", derive(Deserialize))]
856pub struct RenderPass {
857 pub alpha: RenderTargetList,
859 pub color: RenderTargetList,
860 pub texture_cache: FastHashMap<CacheTextureId, RenderTarget>,
861 pub picture_cache: FrameVec<PictureCacheTarget>,
862 pub textures_to_invalidate: FrameVec<CacheTextureId>,
863}
864
865impl RenderPass {
866 pub fn new(src: &Pass, memory: &mut FrameMemory) -> Self {
868 RenderPass {
869 color: RenderTargetList::new(memory.allocator()),
870 alpha: RenderTargetList::new(memory.allocator()),
871 texture_cache: FastHashMap::default(),
872 picture_cache: memory.allocator().new_vec(),
873 textures_to_invalidate: src.textures_to_invalidate.clone(),
874 }
875 }
876}
877
878#[cfg(feature = "capture")]
880pub fn dump_render_tasks_as_svg(
881 render_tasks: &RenderTaskGraph,
882 output: &mut dyn std::io::Write,
883) -> std::io::Result<()> {
884 use svg_fmt::*;
885
886 let node_width = 80.0;
887 let node_height = 30.0;
888 let vertical_spacing = 8.0;
889 let horizontal_spacing = 20.0;
890 let margin = 10.0;
891 let text_size = 10.0;
892
893 let mut pass_rects = Vec::new();
894 let mut nodes = vec![None; render_tasks.tasks.len()];
895
896 let mut x = margin;
897 let mut max_y: f32 = 0.0;
898
899 #[derive(Clone)]
900 struct Node {
901 rect: Rectangle,
902 label: Text,
903 size: Text,
904 }
905
906 for pass in render_tasks.passes.iter().rev() {
907 let mut layout = VerticalLayout::new(x, margin, node_width);
908
909 for task_id in &pass.task_ids {
910 let task_index = task_id.index as usize;
911 let task = &render_tasks.tasks[task_index];
912
913 let rect = layout.push_rectangle(node_height);
914
915 let tx = rect.x + rect.w / 2.0;
916 let ty = rect.y + 10.0;
917
918 let label = text(tx, ty, format!("{}", task.kind.as_str()));
919 let size = text(tx, ty + 12.0, format!("{:?}", task.location.size()));
920
921 nodes[task_index] = Some(Node { rect, label, size });
922
923 layout.advance(vertical_spacing);
924 }
925
926 pass_rects.push(layout.total_rectangle());
927
928 x += node_width + horizontal_spacing;
929 max_y = max_y.max(layout.y + margin);
930 }
931
932 let mut links = Vec::new();
933 for node_index in 0..nodes.len() {
934 if nodes[node_index].is_none() {
935 continue;
936 }
937
938 let task = &render_tasks.tasks[node_index];
939 for dep in &task.children {
940 let dep_index = dep.index as usize;
941
942 if let (&Some(ref node), &Some(ref dep_node)) = (&nodes[node_index], &nodes[dep_index]) {
943 links.push((
944 dep_node.rect.x + dep_node.rect.w,
945 dep_node.rect.y + dep_node.rect.h / 2.0,
946 node.rect.x,
947 node.rect.y + node.rect.h / 2.0,
948 ));
949 }
950 }
951 }
952
953 let svg_w = x + margin;
954 let svg_h = max_y + margin;
955 writeln!(output, "{}", BeginSvg { w: svg_w, h: svg_h })?;
956
957 writeln!(output,
959 " {}",
960 rectangle(0.0, 0.0, svg_w, svg_h)
961 .inflate(1.0, 1.0)
962 .fill(rgb(50, 50, 50))
963 )?;
964
965 for rect in pass_rects {
967 writeln!(output,
968 " {}",
969 rect.inflate(3.0, 3.0)
970 .border_radius(4.0)
971 .opacity(0.4)
972 .fill(black())
973 )?;
974 }
975
976 for (x1, y1, x2, y2) in links {
978 dump_task_dependency_link(output, x1, y1, x2, y2);
979 }
980
981 for node in &nodes {
983 if let Some(node) = node {
984 writeln!(output,
985 " {}",
986 node.rect
987 .clone()
988 .fill(black())
989 .border_radius(3.0)
990 .opacity(0.5)
991 .offset(0.0, 2.0)
992 )?;
993 writeln!(output,
994 " {}",
995 node.rect
996 .clone()
997 .fill(rgb(200, 200, 200))
998 .border_radius(3.0)
999 .opacity(0.8)
1000 )?;
1001
1002 writeln!(output,
1003 " {}",
1004 node.label
1005 .clone()
1006 .size(text_size)
1007 .align(Align::Center)
1008 .color(rgb(50, 50, 50))
1009 )?;
1010 writeln!(output,
1011 " {}",
1012 node.size
1013 .clone()
1014 .size(text_size * 0.7)
1015 .align(Align::Center)
1016 .color(rgb(50, 50, 50))
1017 )?;
1018 }
1019 }
1020
1021 writeln!(output, "{}", EndSvg)
1022}
1023
1024#[allow(dead_code)]
1025fn dump_task_dependency_link(
1026 output: &mut dyn std::io::Write,
1027 x1: f32, y1: f32,
1028 x2: f32, y2: f32,
1029) {
1030 use svg_fmt::*;
1031
1032 let simple_path = (y1 - y2).abs() > 1.0 || (x2 - x1) < 45.0;
1036
1037 let mid_x = (x1 + x2) / 2.0;
1038 if simple_path {
1039 write!(output, " {}",
1040 path().move_to(x1, y1)
1041 .cubic_bezier_to(mid_x, y1, mid_x, y2, x2, y2)
1042 .fill(Fill::None)
1043 .stroke(Stroke::Color(rgb(100, 100, 100), 3.0))
1044 ).unwrap();
1045 } else {
1046 let ctrl1_x = (mid_x + x1) / 2.0;
1047 let ctrl2_x = (mid_x + x2) / 2.0;
1048 let ctrl_y = y1 - 25.0;
1049 write!(output, " {}",
1050 path().move_to(x1, y1)
1051 .cubic_bezier_to(ctrl1_x, y1, ctrl1_x, ctrl_y, mid_x, ctrl_y)
1052 .cubic_bezier_to(ctrl2_x, ctrl_y, ctrl2_x, y2, x2, y2)
1053 .fill(Fill::None)
1054 .stroke(Stroke::Color(rgb(100, 100, 100), 3.0))
1055 ).unwrap();
1056 }
1057}
1058
1059#[cfg(test)]
1061fn pc_target(
1062 surface_id: u64,
1063 tile_x: i32,
1064 tile_y: i32,
1065) -> RenderTaskLocation {
1066 use crate::{
1067 composite::{NativeSurfaceId, NativeTileId},
1068 picture::ResolvedSurfaceTexture,
1069 };
1070
1071 let width = 512;
1072 let height = 512;
1073
1074 RenderTaskLocation::Static {
1075 surface: StaticRenderTaskSurface::PictureCache {
1076 surface: ResolvedSurfaceTexture::Native {
1077 id: NativeTileId {
1078 surface_id: NativeSurfaceId(surface_id),
1079 x: tile_x,
1080 y: tile_y,
1081 },
1082 size: DeviceIntSize::new(width, height),
1083 },
1084 },
1085 rect: DeviceIntSize::new(width, height).into(),
1086 }
1087}
1088
1089#[cfg(test)]
1090impl RenderTaskGraphBuilder {
1091 fn test_expect(
1092 mut self,
1093 pass_count: usize,
1094 total_surface_count: usize,
1095 unique_surfaces: &[(i32, i32, ImageFormat)],
1096 ) {
1097 use crate::internal_types::FrameStamp;
1098 use api::{DocumentId, IdNamespace};
1099
1100 let mut rc = ResourceCache::new_for_testing();
1101 let mut gc = GpuCache::new();
1102
1103 let mut frame_stamp = FrameStamp::first(DocumentId::new(IdNamespace(1), 1));
1104 frame_stamp.advance();
1105 gc.prepare_for_frames();
1106 gc.begin_frame(frame_stamp);
1107
1108 let frame_memory = FrameMemory::fallback();
1109 let g = self.end_frame(&mut rc, &mut gc, &mut frame_memory.new_vec(), 2048, &frame_memory);
1110 g.print();
1111
1112 assert_eq!(g.passes.len(), pass_count);
1113 assert_eq!(g.surface_counts(), (total_surface_count, unique_surfaces.len()));
1114
1115 rc.validate_surfaces(unique_surfaces);
1116 }
1117}
1118
1119#[cfg(test)]
1121fn task_location(location: RenderTaskLocation) -> RenderTask {
1122 RenderTask::new_test(
1123 location,
1124 RenderTargetKind::Color,
1125 )
1126}
1127
1128#[cfg(test)]
1130fn task_dynamic(size: i32) -> RenderTask {
1131 RenderTask::new_test(
1132 RenderTaskLocation::Unallocated { size: DeviceIntSize::new(size, size) },
1133 RenderTargetKind::Color,
1134 )
1135}
1136
1137#[test]
1138fn fg_test_1() {
1139 let mut gb = RenderTaskGraphBuilder::new();
1143
1144 let root_target = pc_target(0, 0, 0);
1145
1146 let root = gb.add().init(task_location(root_target.clone()));
1147
1148 let readback = gb.add().init(task_dynamic(100));
1149 gb.add_dependency(readback, root);
1150
1151 let mix_blend_content = gb.add().init(task_dynamic(50));
1152
1153 let content = gb.add().init(task_location(root_target));
1154 gb.add_dependency(content, readback);
1155 gb.add_dependency(content, mix_blend_content);
1156
1157 gb.test_expect(3, 1, &[
1158 (2048, 2048, ImageFormat::RGBA8),
1159 ]);
1160}
1161
1162#[test]
1163fn fg_test_3() {
1164 let mut gb = RenderTaskGraphBuilder::new();
1168
1169 let pc_root = gb.add().init(task_location(pc_target(0, 0, 0)));
1170
1171 let child_pic_0 = gb.add().init(task_dynamic(128));
1172 let child_pic_1 = gb.add().init(task_dynamic(3000));
1173
1174 gb.add_dependency(pc_root, child_pic_0);
1175 gb.add_dependency(pc_root, child_pic_1);
1176
1177 gb.test_expect(2, 2, &[
1178 (2048, 2048, ImageFormat::RGBA8),
1179 (3072, 3072, ImageFormat::RGBA8),
1180 ]);
1181}
1182
1183#[test]
1184fn fg_test_4() {
1185 let mut gb = RenderTaskGraphBuilder::new();
1189
1190 let pc_root = gb.add().init(task_location(pc_target(0, 0, 0)));
1191
1192 let child_pic_0 = gb.add().init(task_dynamic(128));
1193 let child_pic_1 = gb.add().init(task_dynamic(128));
1194 let child_pic_2 = gb.add().init(task_dynamic(128));
1195
1196 gb.add_dependency(pc_root, child_pic_0);
1197 gb.add_dependency(child_pic_0, child_pic_1);
1198 gb.add_dependency(child_pic_1, child_pic_2);
1199
1200 gb.test_expect(4, 3, &[
1201 (2048, 2048, ImageFormat::RGBA8),
1202 (2048, 2048, ImageFormat::RGBA8),
1203 ]);
1204}
1205
1206#[test]
1207fn fg_test_5() {
1208 let mut gb = RenderTaskGraphBuilder::new();
1213
1214 let pc_root = gb.add().init(task_location(pc_target(0, 0, 0)));
1215
1216 let child_pic_0 = gb.add().init(task_dynamic(128));
1217 let child_pic_1 = gb.add().init(task_dynamic(64));
1218 let child_pic_2 = gb.add().init(task_dynamic(32));
1219 let child_pic_3 = gb.add().init(task_dynamic(16));
1220
1221 gb.add_dependency(pc_root, child_pic_0);
1222 gb.add_dependency(child_pic_0, child_pic_1);
1223 gb.add_dependency(child_pic_1, child_pic_2);
1224 gb.add_dependency(child_pic_2, child_pic_3);
1225 gb.add_dependency(pc_root, child_pic_3);
1226
1227 gb.test_expect(5, 4, &[
1228 (2048, 2048, ImageFormat::RGBA8),
1229 (2048, 2048, ImageFormat::RGBA8),
1230 (2048, 2048, ImageFormat::RGBA8),
1231 ]);
1232}
1233
1234#[test]
1235fn fg_test_6() {
1236 let mut gb = RenderTaskGraphBuilder::new();
1240
1241 let pc_root_1 = gb.add().init(task_location(pc_target(0, 0, 0)));
1242 let pc_root_2 = gb.add().init(task_location(pc_target(0, 1, 0)));
1243
1244 let child_pic = gb.add().init(task_dynamic(128));
1245
1246 gb.add_dependency(pc_root_1, child_pic);
1247 gb.add_dependency(pc_root_2, child_pic);
1248
1249 gb.test_expect(2, 1, &[
1250 (2048, 2048, ImageFormat::RGBA8),
1251 ]);
1252}
1253
1254#[test]
1255fn fg_test_7() {
1256 let mut gb = RenderTaskGraphBuilder::new();
1260
1261 let pc_root = gb.add().init(task_location(pc_target(0, 0, 0)));
1262
1263 let child0 = gb.add().init(task_dynamic(16));
1264 let child1 = gb.add().init(task_dynamic(16));
1265
1266 let child2 = gb.add().init(task_dynamic(16));
1267 let child3 = gb.add().init(task_dynamic(16));
1268
1269 gb.add_dependency(pc_root, child0);
1270 gb.add_dependency(child0, child1);
1271 gb.add_dependency(pc_root, child1);
1272
1273 gb.add_dependency(pc_root, child2);
1274 gb.add_dependency(child2, child3);
1275
1276 gb.test_expect(3, 3, &[
1277 (2048, 2048, ImageFormat::RGBA8),
1278 (2048, 2048, ImageFormat::RGBA8),
1279 (2048, 2048, ImageFormat::RGBA8),
1280 ]);
1281}