wgpu_hal/gles/
device.rs

1use alloc::{
2    borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15    naga::ShaderStage,
16    &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21    layout: &'a super::PipelineLayout,
22    sampler_map: &'a mut super::SamplerBindMap,
23    name_binding_map: &'a mut NameBindingMap,
24    push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
25    multiview: Option<NonZeroU32>,
26    clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30    fn consume_reflection(
31        self,
32        gl: &glow::Context,
33        module: &naga::Module,
34        ep_info: &naga::valid::FunctionInfo,
35        reflection_info: naga::back::glsl::ReflectionInfo,
36        naga_stage: naga::ShaderStage,
37        program: glow::Program,
38    ) {
39        for (handle, var) in module.global_variables.iter() {
40            if ep_info[handle].is_empty() {
41                continue;
42            }
43            let register = match var.space {
44                naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45                naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46                _ => continue,
47            };
48
49            let br = var.binding.as_ref().unwrap();
50            let slot = self.layout.get_slot(br);
51
52            let name = match reflection_info.uniforms.get(&handle) {
53                Some(name) => name.clone(),
54                None => continue,
55            };
56            log::trace!(
57                "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58                var.name.as_ref(),
59                &name,
60                register,
61                slot
62            );
63            self.name_binding_map.insert(name, (register, slot));
64        }
65
66        for (name, mapping) in reflection_info.texture_mapping {
67            let var = &module.global_variables[mapping.texture];
68            let register = match module.types[var.ty].inner {
69                naga::TypeInner::Image {
70                    class: naga::ImageClass::Storage { .. },
71                    ..
72                } => super::BindingRegister::Images,
73                _ => super::BindingRegister::Textures,
74            };
75
76            let tex_br = var.binding.as_ref().unwrap();
77            let texture_linear_index = self.layout.get_slot(tex_br);
78
79            self.name_binding_map
80                .insert(name, (register, texture_linear_index));
81            if let Some(sampler_handle) = mapping.sampler {
82                let sam_br = module.global_variables[sampler_handle]
83                    .binding
84                    .as_ref()
85                    .unwrap();
86                let sampler_linear_index = self.layout.get_slot(sam_br);
87                self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88            }
89        }
90
91        for (name, location) in reflection_info.varying {
92            match naga_stage {
93                naga::ShaderStage::Vertex => {
94                    assert_eq!(location.index, 0);
95                    unsafe { gl.bind_attrib_location(program, location.location, &name) }
96                }
97                naga::ShaderStage::Fragment => {
98                    assert_eq!(location.index, 0);
99                    unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100                }
101                naga::ShaderStage::Compute => {}
102                naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
103            }
104        }
105
106        *self.push_constant_items = reflection_info.push_constant_items;
107
108        if naga_stage == naga::ShaderStage::Vertex {
109            *self.clip_distance_count = reflection_info.clip_distance_count;
110        }
111    }
112}
113
114impl super::Device {
115    /// # Safety
116    ///
117    /// - `name` must be created respecting `desc`
118    /// - `name` must be a texture
119    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the texture. If
120    ///   `drop_callback` is [`Some`], the texture must be valid until the callback is called.
121    #[cfg(any(native, Emscripten))]
122    pub unsafe fn texture_from_raw(
123        &self,
124        name: NonZeroU32,
125        desc: &crate::TextureDescriptor,
126        drop_callback: Option<crate::DropCallback>,
127    ) -> super::Texture {
128        super::Texture {
129            inner: super::TextureInner::Texture {
130                raw: glow::NativeTexture(name),
131                target: super::Texture::get_info_from_desc(desc),
132            },
133            drop_guard: crate::DropGuard::from_option(drop_callback),
134            mip_level_count: desc.mip_level_count,
135            array_layer_count: desc.array_layer_count(),
136            format: desc.format,
137            format_desc: self.shared.describe_texture_format(desc.format),
138            copy_size: desc.copy_extent(),
139        }
140    }
141
142    /// # Safety
143    ///
144    /// - `name` must be created respecting `desc`
145    /// - `name` must be a renderbuffer
146    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the renderbuffer. If
147    ///   `drop_callback` is [`Some`], the renderbuffer must be valid until the callback is called.
148    #[cfg(any(native, Emscripten))]
149    pub unsafe fn texture_from_raw_renderbuffer(
150        &self,
151        name: NonZeroU32,
152        desc: &crate::TextureDescriptor,
153        drop_callback: Option<crate::DropCallback>,
154    ) -> super::Texture {
155        super::Texture {
156            inner: super::TextureInner::Renderbuffer {
157                raw: glow::NativeRenderbuffer(name),
158            },
159            drop_guard: crate::DropGuard::from_option(drop_callback),
160            mip_level_count: desc.mip_level_count,
161            array_layer_count: desc.array_layer_count(),
162            format: desc.format,
163            format_desc: self.shared.describe_texture_format(desc.format),
164            copy_size: desc.copy_extent(),
165        }
166    }
167
168    unsafe fn compile_shader(
169        gl: &glow::Context,
170        shader: &str,
171        naga_stage: naga::ShaderStage,
172        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
173    ) -> Result<glow::Shader, crate::PipelineError> {
174        let target = match naga_stage {
175            naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
176            naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
177            naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
178            naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
179        };
180
181        let raw = unsafe { gl.create_shader(target) }.unwrap();
182        #[cfg(native)]
183        if gl.supports_debug() {
184            let name = raw.0.get();
185            unsafe { gl.object_label(glow::SHADER, name, label) };
186        }
187
188        unsafe { gl.shader_source(raw, shader) };
189        unsafe { gl.compile_shader(raw) };
190
191        log::debug!("\tCompiled shader {:?}", raw);
192
193        let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
194        let msg = unsafe { gl.get_shader_info_log(raw) };
195        if compiled_ok {
196            if !msg.is_empty() {
197                log::warn!("\tCompile: {}", msg);
198            }
199            Ok(raw)
200        } else {
201            log::error!("\tShader compilation failed: {}", msg);
202            unsafe { gl.delete_shader(raw) };
203            Err(crate::PipelineError::Linkage(
204                map_naga_stage(naga_stage),
205                msg,
206            ))
207        }
208    }
209
210    fn create_shader(
211        gl: &glow::Context,
212        naga_stage: naga::ShaderStage,
213        stage: &crate::ProgrammableStage<super::ShaderModule>,
214        context: CompilationContext,
215        program: glow::Program,
216    ) -> Result<glow::Shader, crate::PipelineError> {
217        use naga::back::glsl;
218        let pipeline_options = glsl::PipelineOptions {
219            shader_stage: naga_stage,
220            entry_point: stage.entry_point.to_owned(),
221            multiview: context.multiview,
222        };
223
224        let (module, info) = naga::back::pipeline_constants::process_overrides(
225            &stage.module.naga.module,
226            &stage.module.naga.info,
227            Some((naga_stage, stage.entry_point)),
228            stage.constants,
229        )
230        .map_err(|e| {
231            let msg = format!("{e}");
232            crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
233        })?;
234
235        let entry_point_index = module
236            .entry_points
237            .iter()
238            .position(|ep| ep.name.as_str() == stage.entry_point)
239            .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
240
241        use naga::proc::BoundsCheckPolicy;
242        // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 4.3+.
243        let version = gl.version();
244        let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
245            BoundsCheckPolicy::ReadZeroSkipWrite
246        } else {
247            BoundsCheckPolicy::Unchecked
248        };
249
250        // Other bounds check are either provided by glsl or not implemented yet.
251        let policies = naga::proc::BoundsCheckPolicies {
252            index: BoundsCheckPolicy::Unchecked,
253            buffer: BoundsCheckPolicy::Unchecked,
254            image_load: image_check,
255            binding_array: BoundsCheckPolicy::Unchecked,
256        };
257
258        let mut output = String::new();
259        let needs_temp_options = stage.zero_initialize_workgroup_memory
260            != context.layout.naga_options.zero_initialize_workgroup_memory;
261        let mut temp_options;
262        let naga_options = if needs_temp_options {
263            // We use a conditional here, as cloning the naga_options could be expensive
264            // That is, we want to avoid doing that unless we cannot avoid it
265            temp_options = context.layout.naga_options.clone();
266            temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
267            &temp_options
268        } else {
269            &context.layout.naga_options
270        };
271        let mut writer = glsl::Writer::new(
272            &mut output,
273            &module,
274            &info,
275            naga_options,
276            &pipeline_options,
277            policies,
278        )
279        .map_err(|e| {
280            let msg = format!("{e}");
281            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
282        })?;
283
284        let reflection_info = writer.write().map_err(|e| {
285            let msg = format!("{e}");
286            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
287        })?;
288
289        log::debug!("Naga generated shader:\n{}", output);
290
291        context.consume_reflection(
292            gl,
293            &module,
294            info.get_entry_point(entry_point_index),
295            reflection_info,
296            naga_stage,
297            program,
298        );
299
300        unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
301    }
302
303    unsafe fn create_pipeline<'a>(
304        &self,
305        gl: &glow::Context,
306        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
307        layout: &super::PipelineLayout,
308        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
309        multiview: Option<NonZeroU32>,
310    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
311        let mut program_stages = ArrayVec::new();
312        let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
313        for group in &*layout.group_infos {
314            group_to_binding_to_slot.push(group.binding_to_slot.clone());
315        }
316        for &(naga_stage, stage) in &shaders {
317            program_stages.push(super::ProgramStage {
318                naga_stage: naga_stage.to_owned(),
319                shader_id: stage.module.id,
320                entry_point: stage.entry_point.to_owned(),
321                zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
322            });
323        }
324        let mut guard = self
325            .shared
326            .program_cache
327            .try_lock()
328            .expect("Couldn't acquire program_cache lock");
329        // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
330        // The only place that destroys a pipeline is also locking on `program_cache`
331        let program = guard
332            .entry(super::ProgramCacheKey {
333                stages: program_stages,
334                group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
335            })
336            .or_insert_with(|| unsafe {
337                Self::create_program(
338                    gl,
339                    shaders,
340                    layout,
341                    label,
342                    multiview,
343                    self.shared.shading_language_version,
344                    self.shared.private_caps,
345                )
346            })
347            .to_owned()?;
348        drop(guard);
349
350        Ok(program)
351    }
352
353    unsafe fn create_program<'a>(
354        gl: &glow::Context,
355        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
356        layout: &super::PipelineLayout,
357        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
358        multiview: Option<NonZeroU32>,
359        glsl_version: naga::back::glsl::Version,
360        private_caps: PrivateCapabilities,
361    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
362        let glsl_version = match glsl_version {
363            naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
364            naga::back::glsl::Version::Desktop(version) => format!("{version}"),
365        };
366        let program = unsafe { gl.create_program() }.unwrap();
367        #[cfg(native)]
368        if let Some(label) = label {
369            if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
370                let name = program.0.get();
371                unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
372            }
373        }
374
375        let mut name_binding_map = NameBindingMap::default();
376        let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
377        let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
378        let mut has_stages = wgt::ShaderStages::empty();
379        let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
380        let mut clip_distance_count = 0;
381
382        for &(naga_stage, stage) in &shaders {
383            has_stages |= map_naga_stage(naga_stage);
384            let pc_item = {
385                push_constant_items.push(Vec::new());
386                push_constant_items.last_mut().unwrap()
387            };
388            let context = CompilationContext {
389                layout,
390                sampler_map: &mut sampler_map,
391                name_binding_map: &mut name_binding_map,
392                push_constant_items: pc_item,
393                multiview,
394                clip_distance_count: &mut clip_distance_count,
395            };
396
397            let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
398            shaders_to_delete.push(shader);
399        }
400
401        // Create empty fragment shader if only vertex shader is present
402        if has_stages == wgt::ShaderStages::VERTEX {
403            let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
404            log::info!("Only vertex shader is present. Creating an empty fragment shader",);
405            let shader = unsafe {
406                Self::compile_shader(
407                    gl,
408                    &shader_src,
409                    naga::ShaderStage::Fragment,
410                    Some("(wgpu internal) dummy fragment shader"),
411                )
412            }?;
413            shaders_to_delete.push(shader);
414        }
415
416        for &shader in shaders_to_delete.iter() {
417            unsafe { gl.attach_shader(program, shader) };
418        }
419        unsafe { gl.link_program(program) };
420
421        for shader in shaders_to_delete {
422            unsafe { gl.delete_shader(shader) };
423        }
424
425        log::debug!("\tLinked program {:?}", program);
426
427        let linked_ok = unsafe { gl.get_program_link_status(program) };
428        let msg = unsafe { gl.get_program_info_log(program) };
429        if !linked_ok {
430            return Err(crate::PipelineError::Linkage(has_stages, msg));
431        }
432        if !msg.is_empty() {
433            log::warn!("\tLink: {}", msg);
434        }
435
436        if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
437            // This remapping is only needed if we aren't able to put the binding layout
438            // in the shader. We can't remap storage buffers this way.
439            unsafe { gl.use_program(Some(program)) };
440            for (ref name, (register, slot)) in name_binding_map {
441                log::trace!("Get binding {:?} from program {:?}", name, program);
442                match register {
443                    super::BindingRegister::UniformBuffers => {
444                        let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
445                        log::trace!("\tBinding slot {slot} to block index {index}");
446                        unsafe { gl.uniform_block_binding(program, index, slot as _) };
447                    }
448                    super::BindingRegister::StorageBuffers => {
449                        let index =
450                            unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
451                        log::error!(
452                            "Unable to re-map shader storage block {} to {}",
453                            name,
454                            index
455                        );
456                        return Err(crate::DeviceError::Lost.into());
457                    }
458                    super::BindingRegister::Textures | super::BindingRegister::Images => {
459                        let location = unsafe { gl.get_uniform_location(program, name) };
460                        unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
461                    }
462                }
463            }
464        }
465
466        let mut uniforms = ArrayVec::new();
467
468        for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
469            for item in stage_items {
470                let naga_module = &shaders[stage_idx].1.module.naga.module;
471                let type_inner = &naga_module.types[item.ty].inner;
472
473                let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
474
475                log::trace!(
476                    "push constant item: name={}, ty={:?}, offset={}, location={:?}",
477                    item.access_path,
478                    type_inner,
479                    item.offset,
480                    location,
481                );
482
483                if let Some(location) = location {
484                    uniforms.push(super::PushConstantDesc {
485                        location,
486                        offset: item.offset,
487                        size_bytes: type_inner.size(naga_module.to_ctx()),
488                        ty: type_inner.clone(),
489                    });
490                }
491            }
492        }
493
494        let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
495            // If this returns none (the uniform isn't active), that's fine, we just won't set it.
496            unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
497        } else {
498            None
499        };
500
501        Ok(Arc::new(super::PipelineInner {
502            program,
503            sampler_map,
504            first_instance_location,
505            push_constant_descs: uniforms,
506            clip_distance_count,
507        }))
508    }
509}
510
511impl crate::Device for super::Device {
512    type A = super::Api;
513
514    unsafe fn create_buffer(
515        &self,
516        desc: &crate::BufferDescriptor,
517    ) -> Result<super::Buffer, crate::DeviceError> {
518        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
519            glow::ELEMENT_ARRAY_BUFFER
520        } else {
521            glow::ARRAY_BUFFER
522        };
523
524        let emulate_map = self
525            .shared
526            .workarounds
527            .contains(super::Workarounds::EMULATE_BUFFER_MAP)
528            || !self
529                .shared
530                .private_caps
531                .contains(PrivateCapabilities::BUFFER_ALLOCATION);
532
533        if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
534            return Ok(super::Buffer {
535                raw: None,
536                target,
537                size: desc.size,
538                map_flags: 0,
539                data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
540                offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
541            });
542        }
543
544        let gl = &self.shared.context.lock();
545
546        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
547            glow::ELEMENT_ARRAY_BUFFER
548        } else {
549            glow::ARRAY_BUFFER
550        };
551
552        let is_host_visible = desc
553            .usage
554            .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
555        let is_coherent = desc
556            .memory_flags
557            .contains(crate::MemoryFlags::PREFER_COHERENT);
558
559        let mut map_flags = 0;
560        if desc.usage.contains(wgt::BufferUses::MAP_READ) {
561            map_flags |= glow::MAP_READ_BIT;
562        }
563        if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
564            map_flags |= glow::MAP_WRITE_BIT;
565        }
566
567        let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
568        unsafe { gl.bind_buffer(target, raw) };
569        let raw_size = desc
570            .size
571            .try_into()
572            .map_err(|_| crate::DeviceError::OutOfMemory)?;
573
574        if self
575            .shared
576            .private_caps
577            .contains(PrivateCapabilities::BUFFER_ALLOCATION)
578        {
579            if is_host_visible {
580                map_flags |= glow::MAP_PERSISTENT_BIT;
581                if is_coherent {
582                    map_flags |= glow::MAP_COHERENT_BIT;
583                }
584            }
585            // TODO: may also be required for other calls involving `buffer_sub_data_u8_slice` (e.g. copy buffer to buffer and clear buffer)
586            if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
587                map_flags |= glow::DYNAMIC_STORAGE_BIT;
588            }
589            unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
590        } else {
591            assert!(!is_coherent);
592            let usage = if is_host_visible {
593                if desc.usage.contains(wgt::BufferUses::MAP_READ) {
594                    glow::STREAM_READ
595                } else {
596                    glow::DYNAMIC_DRAW
597                }
598            } else {
599                // Even if the usage doesn't contain SRC_READ, we update it internally at least once
600                // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
601                // https://github.com/gfx-rs/wgpu/issues/3371
602                glow::DYNAMIC_DRAW
603            };
604            unsafe { gl.buffer_data_size(target, raw_size, usage) };
605        }
606
607        unsafe { gl.bind_buffer(target, None) };
608
609        if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
610            map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
611        }
612        //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
613
614        #[cfg(native)]
615        if let Some(label) = desc.label {
616            if self
617                .shared
618                .private_caps
619                .contains(PrivateCapabilities::DEBUG_FNS)
620            {
621                let name = raw.map_or(0, |buf| buf.0.get());
622                unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
623            }
624        }
625
626        let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
627            Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
628        } else {
629            None
630        };
631
632        self.counters.buffers.add(1);
633
634        Ok(super::Buffer {
635            raw,
636            target,
637            size: desc.size,
638            map_flags,
639            data,
640            offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
641        })
642    }
643
644    unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
645        if let Some(raw) = buffer.raw {
646            let gl = &self.shared.context.lock();
647            unsafe { gl.delete_buffer(raw) };
648        }
649
650        self.counters.buffers.sub(1);
651    }
652
653    unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
654        self.counters.buffers.add(1);
655    }
656
657    unsafe fn map_buffer(
658        &self,
659        buffer: &super::Buffer,
660        range: crate::MemoryRange,
661    ) -> Result<crate::BufferMapping, crate::DeviceError> {
662        let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
663        let ptr = match buffer.raw {
664            None => {
665                let mut vec = lock(buffer.data.as_ref().unwrap());
666                let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
667                slice.as_mut_ptr()
668            }
669            Some(raw) => {
670                let gl = &self.shared.context.lock();
671                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
672                let ptr = if let Some(ref map_read_allocation) = buffer.data {
673                    let mut guard = lock(map_read_allocation);
674                    let slice = guard.as_mut_slice();
675                    unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
676                    slice.as_mut_ptr()
677                } else {
678                    *lock(&buffer.offset_of_current_mapping) = range.start;
679                    unsafe {
680                        gl.map_buffer_range(
681                            buffer.target,
682                            range.start as i32,
683                            (range.end - range.start) as i32,
684                            buffer.map_flags,
685                        )
686                    }
687                };
688                unsafe { gl.bind_buffer(buffer.target, None) };
689                ptr
690            }
691        };
692        Ok(crate::BufferMapping {
693            ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
694            is_coherent,
695        })
696    }
697    unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
698        if let Some(raw) = buffer.raw {
699            if buffer.data.is_none() {
700                let gl = &self.shared.context.lock();
701                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
702                unsafe { gl.unmap_buffer(buffer.target) };
703                unsafe { gl.bind_buffer(buffer.target, None) };
704                *lock(&buffer.offset_of_current_mapping) = 0;
705            }
706        }
707    }
708    unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
709    where
710        I: Iterator<Item = crate::MemoryRange>,
711    {
712        if let Some(raw) = buffer.raw {
713            if buffer.data.is_none() {
714                let gl = &self.shared.context.lock();
715                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
716                for range in ranges {
717                    let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
718                    unsafe {
719                        gl.flush_mapped_buffer_range(
720                            buffer.target,
721                            (range.start - offset_of_current_mapping) as i32,
722                            (range.end - range.start) as i32,
723                        )
724                    };
725                }
726            }
727        }
728    }
729    unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
730        //TODO: do we need to do anything?
731    }
732
733    unsafe fn create_texture(
734        &self,
735        desc: &crate::TextureDescriptor,
736    ) -> Result<super::Texture, crate::DeviceError> {
737        let gl = &self.shared.context.lock();
738
739        let render_usage = wgt::TextureUses::COLOR_TARGET
740            | wgt::TextureUses::DEPTH_STENCIL_WRITE
741            | wgt::TextureUses::DEPTH_STENCIL_READ;
742        let format_desc = self.shared.describe_texture_format(desc.format);
743
744        let inner = if render_usage.contains(desc.usage)
745            && desc.dimension == wgt::TextureDimension::D2
746            && desc.size.depth_or_array_layers == 1
747        {
748            let raw = unsafe { gl.create_renderbuffer().unwrap() };
749            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
750            if desc.sample_count > 1 {
751                unsafe {
752                    gl.renderbuffer_storage_multisample(
753                        glow::RENDERBUFFER,
754                        desc.sample_count as i32,
755                        format_desc.internal,
756                        desc.size.width as i32,
757                        desc.size.height as i32,
758                    )
759                };
760            } else {
761                unsafe {
762                    gl.renderbuffer_storage(
763                        glow::RENDERBUFFER,
764                        format_desc.internal,
765                        desc.size.width as i32,
766                        desc.size.height as i32,
767                    )
768                };
769            }
770
771            #[cfg(native)]
772            if let Some(label) = desc.label {
773                if self
774                    .shared
775                    .private_caps
776                    .contains(PrivateCapabilities::DEBUG_FNS)
777                {
778                    let name = raw.0.get();
779                    unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
780                }
781            }
782
783            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
784            super::TextureInner::Renderbuffer { raw }
785        } else {
786            let raw = unsafe { gl.create_texture().unwrap() };
787            let target = super::Texture::get_info_from_desc(desc);
788
789            unsafe { gl.bind_texture(target, Some(raw)) };
790            //Note: this has to be done before defining the storage!
791            match desc.format.sample_type(None, Some(self.shared.features)) {
792                Some(
793                    wgt::TextureSampleType::Float { filterable: false }
794                    | wgt::TextureSampleType::Uint
795                    | wgt::TextureSampleType::Sint,
796                ) => {
797                    // reset default filtering mode
798                    unsafe {
799                        gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
800                    };
801                    unsafe {
802                        gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
803                    };
804                }
805                _ => {}
806            }
807
808            if conv::is_layered_target(target) {
809                unsafe {
810                    if self
811                        .shared
812                        .private_caps
813                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
814                    {
815                        gl.tex_storage_3d(
816                            target,
817                            desc.mip_level_count as i32,
818                            format_desc.internal,
819                            desc.size.width as i32,
820                            desc.size.height as i32,
821                            desc.size.depth_or_array_layers as i32,
822                        )
823                    } else if target == glow::TEXTURE_3D {
824                        let mut width = desc.size.width;
825                        let mut height = desc.size.width;
826                        let mut depth = desc.size.depth_or_array_layers;
827                        for i in 0..desc.mip_level_count {
828                            gl.tex_image_3d(
829                                target,
830                                i as i32,
831                                format_desc.internal as i32,
832                                width as i32,
833                                height as i32,
834                                depth as i32,
835                                0,
836                                format_desc.external,
837                                format_desc.data_type,
838                                glow::PixelUnpackData::Slice(None),
839                            );
840                            width = max(1, width / 2);
841                            height = max(1, height / 2);
842                            depth = max(1, depth / 2);
843                        }
844                    } else {
845                        let mut width = desc.size.width;
846                        let mut height = desc.size.width;
847                        for i in 0..desc.mip_level_count {
848                            gl.tex_image_3d(
849                                target,
850                                i as i32,
851                                format_desc.internal as i32,
852                                width as i32,
853                                height as i32,
854                                desc.size.depth_or_array_layers as i32,
855                                0,
856                                format_desc.external,
857                                format_desc.data_type,
858                                glow::PixelUnpackData::Slice(None),
859                            );
860                            width = max(1, width / 2);
861                            height = max(1, height / 2);
862                        }
863                    }
864                };
865            } else if desc.sample_count > 1 {
866                unsafe {
867                    gl.tex_storage_2d_multisample(
868                        target,
869                        desc.sample_count as i32,
870                        format_desc.internal,
871                        desc.size.width as i32,
872                        desc.size.height as i32,
873                        true,
874                    )
875                };
876            } else {
877                unsafe {
878                    if self
879                        .shared
880                        .private_caps
881                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
882                    {
883                        gl.tex_storage_2d(
884                            target,
885                            desc.mip_level_count as i32,
886                            format_desc.internal,
887                            desc.size.width as i32,
888                            desc.size.height as i32,
889                        )
890                    } else if target == glow::TEXTURE_CUBE_MAP {
891                        let mut width = desc.size.width;
892                        let mut height = desc.size.width;
893                        for i in 0..desc.mip_level_count {
894                            for face in [
895                                glow::TEXTURE_CUBE_MAP_POSITIVE_X,
896                                glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
897                                glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
898                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
899                                glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
900                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
901                            ] {
902                                gl.tex_image_2d(
903                                    face,
904                                    i as i32,
905                                    format_desc.internal as i32,
906                                    width as i32,
907                                    height as i32,
908                                    0,
909                                    format_desc.external,
910                                    format_desc.data_type,
911                                    glow::PixelUnpackData::Slice(None),
912                                );
913                            }
914                            width = max(1, width / 2);
915                            height = max(1, height / 2);
916                        }
917                    } else {
918                        let mut width = desc.size.width;
919                        let mut height = desc.size.width;
920                        for i in 0..desc.mip_level_count {
921                            gl.tex_image_2d(
922                                target,
923                                i as i32,
924                                format_desc.internal as i32,
925                                width as i32,
926                                height as i32,
927                                0,
928                                format_desc.external,
929                                format_desc.data_type,
930                                glow::PixelUnpackData::Slice(None),
931                            );
932                            width = max(1, width / 2);
933                            height = max(1, height / 2);
934                        }
935                    }
936                };
937            }
938
939            #[cfg(native)]
940            if let Some(label) = desc.label {
941                if self
942                    .shared
943                    .private_caps
944                    .contains(PrivateCapabilities::DEBUG_FNS)
945                {
946                    let name = raw.0.get();
947                    unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
948                }
949            }
950
951            unsafe { gl.bind_texture(target, None) };
952            super::TextureInner::Texture { raw, target }
953        };
954
955        self.counters.textures.add(1);
956
957        Ok(super::Texture {
958            inner,
959            drop_guard: None,
960            mip_level_count: desc.mip_level_count,
961            array_layer_count: desc.array_layer_count(),
962            format: desc.format,
963            format_desc,
964            copy_size: desc.copy_extent(),
965        })
966    }
967
968    unsafe fn destroy_texture(&self, texture: super::Texture) {
969        if texture.drop_guard.is_none() {
970            let gl = &self.shared.context.lock();
971            match texture.inner {
972                super::TextureInner::Renderbuffer { raw, .. } => {
973                    unsafe { gl.delete_renderbuffer(raw) };
974                }
975                super::TextureInner::DefaultRenderbuffer => {}
976                super::TextureInner::Texture { raw, .. } => {
977                    unsafe { gl.delete_texture(raw) };
978                }
979                #[cfg(webgl)]
980                super::TextureInner::ExternalFramebuffer { .. } => {}
981                #[cfg(native)]
982                super::TextureInner::ExternalNativeFramebuffer { .. } => {}
983            }
984        }
985
986        // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
987        // end of the scope will drop the drop guard since this function takes ownership of the texture.
988        drop(texture.drop_guard);
989
990        self.counters.textures.sub(1);
991    }
992
993    unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
994        self.counters.textures.add(1);
995    }
996
997    unsafe fn create_texture_view(
998        &self,
999        texture: &super::Texture,
1000        desc: &crate::TextureViewDescriptor,
1001    ) -> Result<super::TextureView, crate::DeviceError> {
1002        self.counters.texture_views.add(1);
1003        Ok(super::TextureView {
1004            //TODO: use `conv::map_view_dimension(desc.dimension)`?
1005            inner: texture.inner.clone(),
1006            aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1007            mip_levels: desc.range.mip_range(texture.mip_level_count),
1008            array_layers: desc.range.layer_range(texture.array_layer_count),
1009            format: texture.format,
1010        })
1011    }
1012
1013    unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1014        self.counters.texture_views.sub(1);
1015    }
1016
1017    unsafe fn create_sampler(
1018        &self,
1019        desc: &crate::SamplerDescriptor,
1020    ) -> Result<super::Sampler, crate::DeviceError> {
1021        let gl = &self.shared.context.lock();
1022
1023        let raw = unsafe { gl.create_sampler().unwrap() };
1024
1025        let (min, mag) =
1026            conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1027
1028        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1029        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1030
1031        unsafe {
1032            gl.sampler_parameter_i32(
1033                raw,
1034                glow::TEXTURE_WRAP_S,
1035                conv::map_address_mode(desc.address_modes[0]) as i32,
1036            )
1037        };
1038        unsafe {
1039            gl.sampler_parameter_i32(
1040                raw,
1041                glow::TEXTURE_WRAP_T,
1042                conv::map_address_mode(desc.address_modes[1]) as i32,
1043            )
1044        };
1045        unsafe {
1046            gl.sampler_parameter_i32(
1047                raw,
1048                glow::TEXTURE_WRAP_R,
1049                conv::map_address_mode(desc.address_modes[2]) as i32,
1050            )
1051        };
1052
1053        if let Some(border_color) = desc.border_color {
1054            let border = match border_color {
1055                wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1056                    [0.0; 4]
1057                }
1058                wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1059                wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1060            };
1061            unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1062        }
1063
1064        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1065        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1066
1067        // If clamp is not 1, we know anisotropy is supported up to 16x
1068        if desc.anisotropy_clamp != 1 {
1069            unsafe {
1070                gl.sampler_parameter_i32(
1071                    raw,
1072                    glow::TEXTURE_MAX_ANISOTROPY,
1073                    desc.anisotropy_clamp as i32,
1074                )
1075            };
1076        }
1077
1078        //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
1079
1080        if let Some(compare) = desc.compare {
1081            unsafe {
1082                gl.sampler_parameter_i32(
1083                    raw,
1084                    glow::TEXTURE_COMPARE_MODE,
1085                    glow::COMPARE_REF_TO_TEXTURE as i32,
1086                )
1087            };
1088            unsafe {
1089                gl.sampler_parameter_i32(
1090                    raw,
1091                    glow::TEXTURE_COMPARE_FUNC,
1092                    conv::map_compare_func(compare) as i32,
1093                )
1094            };
1095        }
1096
1097        #[cfg(native)]
1098        if let Some(label) = desc.label {
1099            if self
1100                .shared
1101                .private_caps
1102                .contains(PrivateCapabilities::DEBUG_FNS)
1103            {
1104                let name = raw.0.get();
1105                unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1106            }
1107        }
1108
1109        self.counters.samplers.add(1);
1110
1111        Ok(super::Sampler { raw })
1112    }
1113
1114    unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1115        let gl = &self.shared.context.lock();
1116        unsafe { gl.delete_sampler(sampler.raw) };
1117        self.counters.samplers.sub(1);
1118    }
1119
1120    unsafe fn create_command_encoder(
1121        &self,
1122        _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1123    ) -> Result<super::CommandEncoder, crate::DeviceError> {
1124        self.counters.command_encoders.add(1);
1125
1126        Ok(super::CommandEncoder {
1127            cmd_buffer: super::CommandBuffer::default(),
1128            state: Default::default(),
1129            private_caps: self.shared.private_caps,
1130            counters: Arc::clone(&self.counters),
1131        })
1132    }
1133
1134    unsafe fn create_bind_group_layout(
1135        &self,
1136        desc: &crate::BindGroupLayoutDescriptor,
1137    ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1138        self.counters.bind_group_layouts.add(1);
1139        Ok(super::BindGroupLayout {
1140            entries: Arc::from(desc.entries),
1141        })
1142    }
1143
1144    unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1145        self.counters.bind_group_layouts.sub(1);
1146    }
1147
1148    unsafe fn create_pipeline_layout(
1149        &self,
1150        desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1151    ) -> Result<super::PipelineLayout, crate::DeviceError> {
1152        use naga::back::glsl;
1153
1154        let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1155        let mut num_samplers = 0u8;
1156        let mut num_textures = 0u8;
1157        let mut num_images = 0u8;
1158        let mut num_uniform_buffers = 0u8;
1159        let mut num_storage_buffers = 0u8;
1160
1161        let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1162        writer_flags.set(
1163            glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1164            self.shared
1165                .private_caps
1166                .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1167        );
1168        writer_flags.set(
1169            glsl::WriterFlags::DRAW_PARAMETERS,
1170            self.shared
1171                .private_caps
1172                .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1173        );
1174        // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
1175        // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
1176        writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1177        let mut binding_map = glsl::BindingMap::default();
1178
1179        for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1180            // create a vector with the size enough to hold all the bindings, filled with `!0`
1181            let mut binding_to_slot = vec![
1182                !0;
1183                bg_layout
1184                    .entries
1185                    .iter()
1186                    .map(|b| b.binding)
1187                    .max()
1188                    .map_or(0, |idx| idx as usize + 1)
1189            ]
1190            .into_boxed_slice();
1191
1192            for entry in bg_layout.entries.iter() {
1193                let counter = match entry.ty {
1194                    wgt::BindingType::Sampler { .. } => &mut num_samplers,
1195                    wgt::BindingType::Texture { .. } => &mut num_textures,
1196                    wgt::BindingType::StorageTexture { .. } => &mut num_images,
1197                    wgt::BindingType::Buffer {
1198                        ty: wgt::BufferBindingType::Uniform,
1199                        ..
1200                    } => &mut num_uniform_buffers,
1201                    wgt::BindingType::Buffer {
1202                        ty: wgt::BufferBindingType::Storage { .. },
1203                        ..
1204                    } => &mut num_storage_buffers,
1205                    wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1206                    wgt::BindingType::ExternalTexture => unimplemented!(),
1207                };
1208
1209                binding_to_slot[entry.binding as usize] = *counter;
1210                let br = naga::ResourceBinding {
1211                    group: group_index as u32,
1212                    binding: entry.binding,
1213                };
1214                binding_map.insert(br, *counter);
1215                *counter += entry.count.map_or(1, |c| c.get() as u8);
1216            }
1217
1218            group_infos.push(super::BindGroupLayoutInfo {
1219                entries: Arc::clone(&bg_layout.entries),
1220                binding_to_slot,
1221            });
1222        }
1223
1224        self.counters.pipeline_layouts.add(1);
1225
1226        Ok(super::PipelineLayout {
1227            group_infos: group_infos.into_boxed_slice(),
1228            naga_options: glsl::Options {
1229                version: self.shared.shading_language_version,
1230                writer_flags,
1231                binding_map,
1232                zero_initialize_workgroup_memory: true,
1233            },
1234        })
1235    }
1236
1237    unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1238        self.counters.pipeline_layouts.sub(1);
1239    }
1240
1241    unsafe fn create_bind_group(
1242        &self,
1243        desc: &crate::BindGroupDescriptor<
1244            super::BindGroupLayout,
1245            super::Buffer,
1246            super::Sampler,
1247            super::TextureView,
1248            super::AccelerationStructure,
1249        >,
1250    ) -> Result<super::BindGroup, crate::DeviceError> {
1251        let mut contents = Vec::new();
1252
1253        let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1254            let layout = desc
1255                .layout
1256                .entries
1257                .iter()
1258                .find(|layout_entry| layout_entry.binding == entry.binding)
1259                .expect("internal error: no layout entry found with binding slot");
1260            (entry, layout)
1261        });
1262        for (entry, layout) in layout_and_entry_iter {
1263            let binding = match layout.ty {
1264                wgt::BindingType::Buffer { .. } => {
1265                    let bb = &desc.buffers[entry.resource_index as usize];
1266                    super::RawBinding::Buffer {
1267                        raw: bb.buffer.raw.unwrap(),
1268                        offset: bb.offset as i32,
1269                        size: match bb.size {
1270                            Some(s) => s.get() as i32,
1271                            None => (bb.buffer.size - bb.offset) as i32,
1272                        },
1273                    }
1274                }
1275                wgt::BindingType::Sampler { .. } => {
1276                    let sampler = desc.samplers[entry.resource_index as usize];
1277                    super::RawBinding::Sampler(sampler.raw)
1278                }
1279                wgt::BindingType::Texture { view_dimension, .. } => {
1280                    let view = desc.textures[entry.resource_index as usize].view;
1281                    if view.array_layers.start != 0 {
1282                        log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1283                            "This is an implementation problem of wgpu-hal/gles backend.")
1284                    }
1285                    let (raw, target) = view.inner.as_native();
1286
1287                    super::Texture::log_failing_target_heuristics(view_dimension, target);
1288
1289                    super::RawBinding::Texture {
1290                        raw,
1291                        target,
1292                        aspects: view.aspects,
1293                        mip_levels: view.mip_levels.clone(),
1294                    }
1295                }
1296                wgt::BindingType::StorageTexture {
1297                    access,
1298                    format,
1299                    view_dimension,
1300                } => {
1301                    let view = desc.textures[entry.resource_index as usize].view;
1302                    let format_desc = self.shared.describe_texture_format(format);
1303                    let (raw, _target) = view.inner.as_native();
1304                    super::RawBinding::Image(super::ImageBinding {
1305                        raw,
1306                        mip_level: view.mip_levels.start,
1307                        array_layer: match view_dimension {
1308                            wgt::TextureViewDimension::D2Array
1309                            | wgt::TextureViewDimension::CubeArray => None,
1310                            _ => Some(view.array_layers.start),
1311                        },
1312                        access: conv::map_storage_access(access),
1313                        format: format_desc.internal,
1314                    })
1315                }
1316                wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1317                wgt::BindingType::ExternalTexture => unimplemented!(),
1318            };
1319            contents.push(binding);
1320        }
1321
1322        self.counters.bind_groups.add(1);
1323
1324        Ok(super::BindGroup {
1325            contents: contents.into_boxed_slice(),
1326        })
1327    }
1328
1329    unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1330        self.counters.bind_groups.sub(1);
1331    }
1332
1333    unsafe fn create_shader_module(
1334        &self,
1335        desc: &crate::ShaderModuleDescriptor,
1336        shader: crate::ShaderInput,
1337    ) -> Result<super::ShaderModule, crate::ShaderError> {
1338        self.counters.shader_modules.add(1);
1339
1340        Ok(super::ShaderModule {
1341            naga: match shader {
1342                crate::ShaderInput::SpirV(_) => {
1343                    panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
1344                }
1345                crate::ShaderInput::Msl { .. } => {
1346                    panic!("`Features::MSL_SHADER_PASSTHROUGH` is not enabled")
1347                }
1348                crate::ShaderInput::Naga(naga) => naga,
1349                crate::ShaderInput::Dxil { .. } | crate::ShaderInput::Hlsl { .. } => {
1350                    panic!("`Features::HLSL_DXIL_SHADER_PASSTHROUGH` is not enabled")
1351                }
1352            },
1353            label: desc.label.map(|str| str.to_string()),
1354            id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1355        })
1356    }
1357
1358    unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1359        self.counters.shader_modules.sub(1);
1360    }
1361
1362    unsafe fn create_render_pipeline(
1363        &self,
1364        desc: &crate::RenderPipelineDescriptor<
1365            super::PipelineLayout,
1366            super::ShaderModule,
1367            super::PipelineCache,
1368        >,
1369    ) -> Result<super::RenderPipeline, crate::PipelineError> {
1370        let gl = &self.shared.context.lock();
1371        let mut shaders = ArrayVec::new();
1372        shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
1373        if let Some(ref fs) = desc.fragment_stage {
1374            shaders.push((naga::ShaderStage::Fragment, fs));
1375        }
1376        let inner =
1377            unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1378
1379        let (vertex_buffers, vertex_attributes) = {
1380            let mut buffers = Vec::new();
1381            let mut attributes = Vec::new();
1382            for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
1383                buffers.push(super::VertexBufferDesc {
1384                    step: vb_layout.step_mode,
1385                    stride: vb_layout.array_stride as u32,
1386                });
1387                for vat in vb_layout.attributes.iter() {
1388                    let format_desc = conv::describe_vertex_format(vat.format);
1389                    attributes.push(super::AttributeDesc {
1390                        location: vat.shader_location,
1391                        offset: vat.offset as u32,
1392                        buffer_index: index as u32,
1393                        format_desc,
1394                    });
1395                }
1396            }
1397            (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1398        };
1399
1400        let color_targets = {
1401            let mut targets = Vec::new();
1402            for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1403                targets.push(super::ColorTargetDesc {
1404                    mask: ct.write_mask,
1405                    blend: ct.blend.as_ref().map(conv::map_blend),
1406                });
1407            }
1408            //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
1409            // is not exposed, then this pipeline will not bind correctly.
1410            targets.into_boxed_slice()
1411        };
1412
1413        self.counters.render_pipelines.add(1);
1414
1415        Ok(super::RenderPipeline {
1416            inner,
1417            primitive: desc.primitive,
1418            vertex_buffers,
1419            vertex_attributes,
1420            color_targets,
1421            depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1422                function: conv::map_compare_func(ds.depth_compare),
1423                mask: ds.depth_write_enabled,
1424            }),
1425            depth_bias: desc
1426                .depth_stencil
1427                .as_ref()
1428                .map(|ds| ds.bias)
1429                .unwrap_or_default(),
1430            stencil: desc
1431                .depth_stencil
1432                .as_ref()
1433                .map(|ds| conv::map_stencil(&ds.stencil)),
1434            alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1435        })
1436    }
1437    unsafe fn create_mesh_pipeline(
1438        &self,
1439        _desc: &crate::MeshPipelineDescriptor<
1440            <Self::A as crate::Api>::PipelineLayout,
1441            <Self::A as crate::Api>::ShaderModule,
1442            <Self::A as crate::Api>::PipelineCache,
1443        >,
1444    ) -> Result<<Self::A as crate::Api>::RenderPipeline, crate::PipelineError> {
1445        unreachable!()
1446    }
1447
1448    unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1449        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
1450        // This is safe to assume as long as:
1451        // - `RenderPipeline` can't be cloned
1452        // - The only place that we can get a new reference is during `program_cache.lock()`
1453        if Arc::strong_count(&pipeline.inner) == 2 {
1454            let gl = &self.shared.context.lock();
1455            let mut program_cache = self.shared.program_cache.lock();
1456            program_cache.retain(|_, v| match *v {
1457                Ok(ref p) => p.program != pipeline.inner.program,
1458                Err(_) => false,
1459            });
1460            unsafe { gl.delete_program(pipeline.inner.program) };
1461        }
1462
1463        self.counters.render_pipelines.sub(1);
1464    }
1465
1466    unsafe fn create_compute_pipeline(
1467        &self,
1468        desc: &crate::ComputePipelineDescriptor<
1469            super::PipelineLayout,
1470            super::ShaderModule,
1471            super::PipelineCache,
1472        >,
1473    ) -> Result<super::ComputePipeline, crate::PipelineError> {
1474        let gl = &self.shared.context.lock();
1475        let mut shaders = ArrayVec::new();
1476        shaders.push((naga::ShaderStage::Compute, &desc.stage));
1477        let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1478
1479        self.counters.compute_pipelines.add(1);
1480
1481        Ok(super::ComputePipeline { inner })
1482    }
1483
1484    unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1485        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
1486        // This is safe to assume as long as:
1487        // - `ComputePipeline` can't be cloned
1488        // - The only place that we can get a new reference is during `program_cache.lock()`
1489        if Arc::strong_count(&pipeline.inner) == 2 {
1490            let gl = &self.shared.context.lock();
1491            let mut program_cache = self.shared.program_cache.lock();
1492            program_cache.retain(|_, v| match *v {
1493                Ok(ref p) => p.program != pipeline.inner.program,
1494                Err(_) => false,
1495            });
1496            unsafe { gl.delete_program(pipeline.inner.program) };
1497        }
1498
1499        self.counters.compute_pipelines.sub(1);
1500    }
1501
1502    unsafe fn create_pipeline_cache(
1503        &self,
1504        _: &crate::PipelineCacheDescriptor<'_>,
1505    ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1506        // Even though the cache doesn't do anything, we still return something here
1507        // as the least bad option
1508        Ok(super::PipelineCache)
1509    }
1510    unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1511
1512    #[cfg_attr(target_arch = "wasm32", allow(unused))]
1513    unsafe fn create_query_set(
1514        &self,
1515        desc: &wgt::QuerySetDescriptor<crate::Label>,
1516    ) -> Result<super::QuerySet, crate::DeviceError> {
1517        let gl = &self.shared.context.lock();
1518
1519        let mut queries = Vec::with_capacity(desc.count as usize);
1520        for _ in 0..desc.count {
1521            let query =
1522                unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1523
1524            // We aren't really able to, in general, label queries.
1525            //
1526            // We could take a timestamp here to "initialize" the query,
1527            // but that's a bit of a hack, and we don't want to insert
1528            // random timestamps into the command stream of we don't have to.
1529
1530            queries.push(query);
1531        }
1532
1533        self.counters.query_sets.add(1);
1534
1535        Ok(super::QuerySet {
1536            queries: queries.into_boxed_slice(),
1537            target: match desc.ty {
1538                wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1539                wgt::QueryType::Timestamp => glow::TIMESTAMP,
1540                _ => unimplemented!(),
1541            },
1542        })
1543    }
1544
1545    unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1546        let gl = &self.shared.context.lock();
1547        for &query in set.queries.iter() {
1548            unsafe { gl.delete_query(query) };
1549        }
1550        self.counters.query_sets.sub(1);
1551    }
1552
1553    unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1554        self.counters.fences.add(1);
1555        Ok(super::Fence::new(&self.shared.options))
1556    }
1557
1558    unsafe fn destroy_fence(&self, fence: super::Fence) {
1559        let gl = &self.shared.context.lock();
1560        fence.destroy(gl);
1561        self.counters.fences.sub(1);
1562    }
1563
1564    unsafe fn get_fence_value(
1565        &self,
1566        fence: &super::Fence,
1567    ) -> Result<crate::FenceValue, crate::DeviceError> {
1568        #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1569        Ok(fence.get_latest(&self.shared.context.lock()))
1570    }
1571    unsafe fn wait(
1572        &self,
1573        fence: &super::Fence,
1574        wait_value: crate::FenceValue,
1575        timeout_ms: u32,
1576    ) -> Result<bool, crate::DeviceError> {
1577        if fence.satisfied(wait_value) {
1578            return Ok(true);
1579        }
1580
1581        let gl = &self.shared.context.lock();
1582        // MAX_CLIENT_WAIT_TIMEOUT_WEBGL is:
1583        // - 1s in Gecko https://searchfox.org/mozilla-central/rev/754074e05178e017ef6c3d8e30428ffa8f1b794d/dom/canvas/WebGLTypes.h#1386
1584        // - 0 in WebKit https://github.com/WebKit/WebKit/blob/4ef90d4672ca50267c0971b85db403d9684508ea/Source/WebCore/html/canvas/WebGL2RenderingContext.cpp#L110
1585        // - 0 in Chromium https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/modules/webgl/webgl2_rendering_context_base.cc;l=112;drc=a3cb0ac4c71ec04abfeaed199e5d63230eca2551
1586        let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1587            0
1588        } else {
1589            (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
1590        };
1591        fence.wait(gl, wait_value, timeout_ns)
1592    }
1593
1594    unsafe fn start_graphics_debugger_capture(&self) -> bool {
1595        #[cfg(all(native, feature = "renderdoc"))]
1596        return unsafe {
1597            self.render_doc
1598                .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1599        };
1600        #[allow(unreachable_code)]
1601        false
1602    }
1603    unsafe fn stop_graphics_debugger_capture(&self) {
1604        #[cfg(all(native, feature = "renderdoc"))]
1605        unsafe {
1606            self.render_doc
1607                .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1608        }
1609    }
1610    unsafe fn create_acceleration_structure(
1611        &self,
1612        _desc: &crate::AccelerationStructureDescriptor,
1613    ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1614        unimplemented!()
1615    }
1616    unsafe fn get_acceleration_structure_build_sizes<'a>(
1617        &self,
1618        _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1619    ) -> crate::AccelerationStructureBuildSizes {
1620        unimplemented!()
1621    }
1622    unsafe fn get_acceleration_structure_device_address(
1623        &self,
1624        _acceleration_structure: &super::AccelerationStructure,
1625    ) -> wgt::BufferAddress {
1626        unimplemented!()
1627    }
1628    unsafe fn destroy_acceleration_structure(
1629        &self,
1630        _acceleration_structure: super::AccelerationStructure,
1631    ) {
1632    }
1633
1634    fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1635        unimplemented!()
1636    }
1637
1638    fn get_internal_counters(&self) -> wgt::HalCounters {
1639        self.counters.as_ref().clone()
1640    }
1641
1642    fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1643        Ok(())
1644    }
1645}
1646
1647#[cfg(send_sync)]
1648unsafe impl Sync for super::Device {}
1649#[cfg(send_sync)]
1650unsafe impl Send for super::Device {}