naga/back/glsl/mod.rs
1/*!
2Backend for [GLSL][glsl] (OpenGL Shading Language).
3
4The main structure is [`Writer`], it maintains internal state that is used
5to output a [`Module`](crate::Module) into glsl
6
7# Supported versions
8### Core
9- 330
10- 400
11- 410
12- 420
13- 430
14- 450
15
16### ES
17- 300
18- 310
19
20[glsl]: https://www.khronos.org/registry/OpenGL/index_gl.php
21*/
22
23// GLSL is mostly a superset of C but it also removes some parts of it this is a list of relevant
24// aspects for this backend.
25//
26// The most notable change is the introduction of the version preprocessor directive that must
27// always be the first line of a glsl file and is written as
28// `#version number profile`
29// `number` is the version itself (i.e. 300) and `profile` is the
30// shader profile we only support "core" and "es", the former is used in desktop applications and
31// the later is used in embedded contexts, mobile devices and browsers. Each one as it's own
32// versions (at the time of writing this the latest version for "core" is 460 and for "es" is 320)
33//
34// Other important preprocessor addition is the extension directive which is written as
35// `#extension name: behaviour`
36// Extensions provide increased features in a plugin fashion but they aren't required to be
37// supported hence why they are called extensions, that's why `behaviour` is used it specifies
38// whether the extension is strictly required or if it should only be enabled if needed. In our case
39// when we use extensions we set behaviour to `require` always.
40//
41// The only thing that glsl removes that makes a difference are pointers.
42//
43// Additions that are relevant for the backend are the discard keyword, the introduction of
44// vector, matrices, samplers, image types and functions that provide common shader operations
45
46pub use features::Features;
47
48use crate::{
49 back::{self, Baked},
50 proc::{self, ExpressionKindTracker, NameKey},
51 valid, Handle, ShaderStage, TypeInner,
52};
53use features::FeaturesManager;
54use std::{
55 cmp::Ordering,
56 fmt::{self, Error as FmtError, Write},
57 mem,
58};
59use thiserror::Error;
60
61/// Contains the features related code and the features querying method
62mod features;
63/// Contains a constant with a slice of all the reserved keywords RESERVED_KEYWORDS
64mod keywords;
65
66/// List of supported `core` GLSL versions.
67pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460];
68/// List of supported `es` GLSL versions.
69pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
70
71/// The suffix of the variable that will hold the calculated clamped level
72/// of detail for bounds checking in `ImageLoad`
73const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod";
74
75pub(crate) const MODF_FUNCTION: &str = "naga_modf";
76pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
77
78// Must match code in glsl_built_in
79pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance";
80
81/// Mapping between resources and bindings.
82pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>;
83
84impl crate::AtomicFunction {
85 const fn to_glsl(self) -> &'static str {
86 match self {
87 Self::Add | Self::Subtract => "Add",
88 Self::And => "And",
89 Self::InclusiveOr => "Or",
90 Self::ExclusiveOr => "Xor",
91 Self::Min => "Min",
92 Self::Max => "Max",
93 Self::Exchange { compare: None } => "Exchange",
94 Self::Exchange { compare: Some(_) } => "", //TODO
95 }
96 }
97}
98
99impl crate::AddressSpace {
100 const fn is_buffer(&self) -> bool {
101 match *self {
102 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => true,
103 _ => false,
104 }
105 }
106
107 /// Whether a variable with this address space can be initialized
108 const fn initializable(&self) -> bool {
109 match *self {
110 crate::AddressSpace::Function | crate::AddressSpace::Private => true,
111 crate::AddressSpace::WorkGroup
112 | crate::AddressSpace::Uniform
113 | crate::AddressSpace::Storage { .. }
114 | crate::AddressSpace::Handle
115 | crate::AddressSpace::PushConstant => false,
116 }
117 }
118}
119
120/// A GLSL version.
121#[derive(Debug, Copy, Clone, PartialEq)]
122#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
123#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
124pub enum Version {
125 /// `core` GLSL.
126 Desktop(u16),
127 /// `es` GLSL.
128 Embedded { version: u16, is_webgl: bool },
129}
130
131impl Version {
132 /// Create a new gles version
133 pub const fn new_gles(version: u16) -> Self {
134 Self::Embedded {
135 version,
136 is_webgl: false,
137 }
138 }
139
140 /// Returns true if self is `Version::Embedded` (i.e. is a es version)
141 const fn is_es(&self) -> bool {
142 match *self {
143 Version::Desktop(_) => false,
144 Version::Embedded { .. } => true,
145 }
146 }
147
148 /// Returns true if targeting WebGL
149 const fn is_webgl(&self) -> bool {
150 match *self {
151 Version::Desktop(_) => false,
152 Version::Embedded { is_webgl, .. } => is_webgl,
153 }
154 }
155
156 /// Checks the list of currently supported versions and returns true if it contains the
157 /// specified version
158 ///
159 /// # Notes
160 /// As an invalid version number will never be added to the supported version list
161 /// so this also checks for version validity
162 fn is_supported(&self) -> bool {
163 match *self {
164 Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v),
165 Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v),
166 }
167 }
168
169 fn supports_io_locations(&self) -> bool {
170 *self >= Version::Desktop(330) || *self >= Version::new_gles(300)
171 }
172
173 /// Checks if the version supports all of the explicit layouts:
174 /// - `location=` qualifiers for bindings
175 /// - `binding=` qualifiers for resources
176 ///
177 /// Note: `location=` for vertex inputs and fragment outputs is supported
178 /// unconditionally for GLES 300.
179 fn supports_explicit_locations(&self) -> bool {
180 *self >= Version::Desktop(420) || *self >= Version::new_gles(310)
181 }
182
183 fn supports_early_depth_test(&self) -> bool {
184 *self >= Version::Desktop(130) || *self >= Version::new_gles(310)
185 }
186
187 fn supports_std430_layout(&self) -> bool {
188 *self >= Version::Desktop(430) || *self >= Version::new_gles(310)
189 }
190
191 fn supports_fma_function(&self) -> bool {
192 *self >= Version::Desktop(400) || *self >= Version::new_gles(320)
193 }
194
195 fn supports_integer_functions(&self) -> bool {
196 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
197 }
198
199 fn supports_frexp_function(&self) -> bool {
200 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
201 }
202
203 fn supports_derivative_control(&self) -> bool {
204 *self >= Version::Desktop(450)
205 }
206}
207
208impl PartialOrd for Version {
209 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
210 match (*self, *other) {
211 (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)),
212 (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => {
213 Some(x.cmp(&y))
214 }
215 _ => None,
216 }
217 }
218}
219
220impl fmt::Display for Version {
221 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
222 match *self {
223 Version::Desktop(v) => write!(f, "{v} core"),
224 Version::Embedded { version: v, .. } => write!(f, "{v} es"),
225 }
226 }
227}
228
229bitflags::bitflags! {
230 /// Configuration flags for the [`Writer`].
231 #[cfg_attr(feature = "serialize", derive(serde::Serialize))]
232 #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
233 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
234 pub struct WriterFlags: u32 {
235 /// Flip output Y and extend Z from (0, 1) to (-1, 1).
236 const ADJUST_COORDINATE_SPACE = 0x1;
237 /// Supports GL_EXT_texture_shadow_lod on the host, which provides
238 /// additional functions on shadows and arrays of shadows.
239 const TEXTURE_SHADOW_LOD = 0x2;
240 /// Supports ARB_shader_draw_parameters on the host, which provides
241 /// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, `gl_DrawIDARB`, and `gl_DrawID`.
242 const DRAW_PARAMETERS = 0x4;
243 /// Include unused global variables, constants and functions. By default the output will exclude
244 /// global variables that are not used in the specified entrypoint (including indirect use),
245 /// all constant declarations, and functions that use excluded global variables.
246 const INCLUDE_UNUSED_ITEMS = 0x10;
247 /// Emit `PointSize` output builtin to vertex shaders, which is
248 /// required for drawing with `PointList` topology.
249 ///
250 /// https://registry.khronos.org/OpenGL/specs/es/3.2/GLSL_ES_Specification_3.20.html#built-in-language-variables
251 /// The variable gl_PointSize is intended for a shader to write the size of the point to be rasterized. It is measured in pixels.
252 /// If gl_PointSize is not written to, its value is undefined in subsequent pipe stages.
253 const FORCE_POINT_SIZE = 0x20;
254 }
255}
256
257/// Configuration used in the [`Writer`].
258#[derive(Debug, Clone)]
259#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
260#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
261pub struct Options {
262 /// The GLSL version to be used.
263 pub version: Version,
264 /// Configuration flags for the [`Writer`].
265 pub writer_flags: WriterFlags,
266 /// Map of resources association to binding locations.
267 pub binding_map: BindingMap,
268 /// Should workgroup variables be zero initialized (by polyfilling)?
269 pub zero_initialize_workgroup_memory: bool,
270}
271
272impl Default for Options {
273 fn default() -> Self {
274 Options {
275 version: Version::new_gles(310),
276 writer_flags: WriterFlags::ADJUST_COORDINATE_SPACE,
277 binding_map: BindingMap::default(),
278 zero_initialize_workgroup_memory: true,
279 }
280 }
281}
282
283/// A subset of options meant to be changed per pipeline.
284#[derive(Debug, Clone)]
285#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
286#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
287pub struct PipelineOptions {
288 /// The stage of the entry point.
289 pub shader_stage: ShaderStage,
290 /// The name of the entry point.
291 ///
292 /// If no entry point that matches is found while creating a [`Writer`], a error will be thrown.
293 pub entry_point: String,
294 /// How many views to render to, if doing multiview rendering.
295 pub multiview: Option<std::num::NonZeroU32>,
296}
297
298#[derive(Debug)]
299pub struct VaryingLocation {
300 /// The location of the global.
301 /// This corresponds to `layout(location = ..)` in GLSL.
302 pub location: u32,
303 /// The index which can be used for dual source blending.
304 /// This corresponds to `layout(index = ..)` in GLSL.
305 pub index: u32,
306}
307
308/// Reflection info for texture mappings and uniforms.
309#[derive(Debug)]
310pub struct ReflectionInfo {
311 /// Mapping between texture names and variables/samplers.
312 pub texture_mapping: crate::FastHashMap<String, TextureMapping>,
313 /// Mapping between uniform variables and names.
314 pub uniforms: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
315 /// Mapping between names and attribute locations.
316 pub varying: crate::FastHashMap<String, VaryingLocation>,
317 /// List of push constant items in the shader.
318 pub push_constant_items: Vec<PushConstantItem>,
319}
320
321/// Mapping between a texture and its sampler, if it exists.
322///
323/// GLSL pre-Vulkan has no concept of separate textures and samplers. Instead, everything is a
324/// `gsamplerN` where `g` is the scalar type and `N` is the dimension. But naga uses separate textures
325/// and samplers in the IR, so the backend produces a [`FastHashMap`](crate::FastHashMap) with the texture name
326/// as a key and a [`TextureMapping`] as a value. This way, the user knows where to bind.
327///
328/// [`Storage`](crate::ImageClass::Storage) images produce `gimageN` and don't have an associated sampler,
329/// so the [`sampler`](Self::sampler) field will be [`None`].
330#[derive(Debug, Clone)]
331pub struct TextureMapping {
332 /// Handle to the image global variable.
333 pub texture: Handle<crate::GlobalVariable>,
334 /// Handle to the associated sampler global variable, if it exists.
335 pub sampler: Option<Handle<crate::GlobalVariable>>,
336}
337
338/// All information to bind a single uniform value to the shader.
339///
340/// Push constants are emulated using traditional uniforms in OpenGL.
341///
342/// These are composed of a set of primitives (scalar, vector, matrix) that
343/// are given names. Because they are not backed by the concept of a buffer,
344/// we must do the work of calculating the offset of each primitive in the
345/// push constant block.
346#[derive(Debug, Clone)]
347pub struct PushConstantItem {
348 /// GL uniform name for the item. This name is the same as if you were
349 /// to access it directly from a GLSL shader.
350 ///
351 /// The with the following example, the following names will be generated,
352 /// one name per GLSL uniform.
353 ///
354 /// ```glsl
355 /// struct InnerStruct {
356 /// value: f32,
357 /// }
358 ///
359 /// struct PushConstant {
360 /// InnerStruct inner;
361 /// vec4 array[2];
362 /// }
363 ///
364 /// uniform PushConstants _push_constant_binding_cs;
365 /// ```
366 ///
367 /// ```text
368 /// - _push_constant_binding_cs.inner.value
369 /// - _push_constant_binding_cs.array[0]
370 /// - _push_constant_binding_cs.array[1]
371 /// ```
372 ///
373 pub access_path: String,
374 /// Type of the uniform. This will only ever be a scalar, vector, or matrix.
375 pub ty: Handle<crate::Type>,
376 /// The offset in the push constant memory block this uniform maps to.
377 ///
378 /// The size of the uniform can be derived from the type.
379 pub offset: u32,
380}
381
382/// Helper structure that generates a number
383#[derive(Default)]
384struct IdGenerator(u32);
385
386impl IdGenerator {
387 /// Generates a number that's guaranteed to be unique for this `IdGenerator`
388 fn generate(&mut self) -> u32 {
389 // It's just an increasing number but it does the job
390 let ret = self.0;
391 self.0 += 1;
392 ret
393 }
394}
395
396/// Assorted options needed for generating varyings.
397#[derive(Clone, Copy)]
398struct VaryingOptions {
399 output: bool,
400 targeting_webgl: bool,
401 draw_parameters: bool,
402}
403
404impl VaryingOptions {
405 const fn from_writer_options(options: &Options, output: bool) -> Self {
406 Self {
407 output,
408 targeting_webgl: options.version.is_webgl(),
409 draw_parameters: options.writer_flags.contains(WriterFlags::DRAW_PARAMETERS),
410 }
411 }
412}
413
414/// Helper wrapper used to get a name for a varying
415///
416/// Varying have different naming schemes depending on their binding:
417/// - Varyings with builtin bindings get the from [`glsl_built_in`].
418/// - Varyings with location bindings are named `_S_location_X` where `S` is a
419/// prefix identifying which pipeline stage the varying connects, and `X` is
420/// the location.
421struct VaryingName<'a> {
422 binding: &'a crate::Binding,
423 stage: ShaderStage,
424 options: VaryingOptions,
425}
426impl fmt::Display for VaryingName<'_> {
427 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
428 match *self.binding {
429 crate::Binding::Location {
430 second_blend_source: true,
431 ..
432 } => {
433 write!(f, "_fs2p_location1",)
434 }
435 crate::Binding::Location { location, .. } => {
436 let prefix = match (self.stage, self.options.output) {
437 (ShaderStage::Compute, _) => unreachable!(),
438 // pipeline to vertex
439 (ShaderStage::Vertex, false) => "p2vs",
440 // vertex to fragment
441 (ShaderStage::Vertex, true) | (ShaderStage::Fragment, false) => "vs2fs",
442 // fragment to pipeline
443 (ShaderStage::Fragment, true) => "fs2p",
444 };
445 write!(f, "_{prefix}_location{location}",)
446 }
447 crate::Binding::BuiltIn(built_in) => {
448 write!(f, "{}", glsl_built_in(built_in, self.options))
449 }
450 }
451 }
452}
453
454impl ShaderStage {
455 const fn to_str(self) -> &'static str {
456 match self {
457 ShaderStage::Compute => "cs",
458 ShaderStage::Fragment => "fs",
459 ShaderStage::Vertex => "vs",
460 }
461 }
462}
463
464/// Shorthand result used internally by the backend
465type BackendResult<T = ()> = Result<T, Error>;
466
467/// A GLSL compilation error.
468#[derive(Debug, Error)]
469pub enum Error {
470 /// A error occurred while writing to the output.
471 #[error("Format error")]
472 FmtError(#[from] FmtError),
473 /// The specified [`Version`] doesn't have all required [`Features`].
474 ///
475 /// Contains the missing [`Features`].
476 #[error("The selected version doesn't support {0:?}")]
477 MissingFeatures(Features),
478 /// [`AddressSpace::PushConstant`](crate::AddressSpace::PushConstant) was used more than
479 /// once in the entry point, which isn't supported.
480 #[error("Multiple push constants aren't supported")]
481 MultiplePushConstants,
482 /// The specified [`Version`] isn't supported.
483 #[error("The specified version isn't supported")]
484 VersionNotSupported,
485 /// The entry point couldn't be found.
486 #[error("The requested entry point couldn't be found")]
487 EntryPointNotFound,
488 /// A call was made to an unsupported external.
489 #[error("A call was made to an unsupported external: {0}")]
490 UnsupportedExternal(String),
491 /// A scalar with an unsupported width was requested.
492 #[error("A scalar with an unsupported width was requested: {0:?}")]
493 UnsupportedScalar(crate::Scalar),
494 /// A image was used with multiple samplers, which isn't supported.
495 #[error("A image was used with multiple samplers")]
496 ImageMultipleSamplers,
497 #[error("{0}")]
498 Custom(String),
499 #[error("overrides should not be present at this stage")]
500 Override,
501 /// [`crate::Sampling::First`] is unsupported.
502 #[error("`{:?}` sampling is unsupported", crate::Sampling::First)]
503 FirstSamplingNotSupported,
504}
505
506/// Binary operation with a different logic on the GLSL side.
507enum BinaryOperation {
508 /// Vector comparison should use the function like `greaterThan()`, etc.
509 VectorCompare,
510 /// Vector component wise operation; used to polyfill unsupported ops like `|` and `&` for `bvecN`'s
511 VectorComponentWise,
512 /// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`.
513 Modulo,
514 /// Any plain operation. No additional logic required.
515 Other,
516}
517
518/// Writer responsible for all code generation.
519pub struct Writer<'a, W> {
520 // Inputs
521 /// The module being written.
522 module: &'a crate::Module,
523 /// The module analysis.
524 info: &'a valid::ModuleInfo,
525 /// The output writer.
526 out: W,
527 /// User defined configuration to be used.
528 options: &'a Options,
529 /// The bound checking policies to be used
530 policies: proc::BoundsCheckPolicies,
531
532 // Internal State
533 /// Features manager used to store all the needed features and write them.
534 features: FeaturesManager,
535 namer: proc::Namer,
536 /// A map with all the names needed for writing the module
537 /// (generated by a [`Namer`](crate::proc::Namer)).
538 names: crate::FastHashMap<NameKey, String>,
539 /// A map with the names of global variables needed for reflections.
540 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
541 /// The selected entry point.
542 entry_point: &'a crate::EntryPoint,
543 /// The index of the selected entry point.
544 entry_point_idx: proc::EntryPointIndex,
545 /// A generator for unique block numbers.
546 block_id: IdGenerator,
547 /// Set of expressions that have associated temporary variables.
548 named_expressions: crate::NamedExpressions,
549 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
550 need_bake_expressions: back::NeedBakeExpressions,
551 /// Information about nesting of loops and switches.
552 ///
553 /// Used for forwarding continue statements in switches that have been
554 /// transformed to `do {} while(false);` loops.
555 continue_ctx: back::continue_forward::ContinueCtx,
556 /// How many views to render to, if doing multiview rendering.
557 multiview: Option<std::num::NonZeroU32>,
558 /// Mapping of varying variables to their location. Needed for reflections.
559 varying: crate::FastHashMap<String, VaryingLocation>,
560}
561
562impl<'a, W: Write> Writer<'a, W> {
563 /// Creates a new [`Writer`] instance.
564 ///
565 /// # Errors
566 /// - If the version specified is invalid or supported.
567 /// - If the entry point couldn't be found in the module.
568 /// - If the version specified doesn't support some used features.
569 pub fn new(
570 out: W,
571 module: &'a crate::Module,
572 info: &'a valid::ModuleInfo,
573 options: &'a Options,
574 pipeline_options: &'a PipelineOptions,
575 policies: proc::BoundsCheckPolicies,
576 ) -> Result<Self, Error> {
577 if !module.overrides.is_empty() {
578 return Err(Error::Override);
579 }
580
581 // Check if the requested version is supported
582 if !options.version.is_supported() {
583 log::error!("Version {}", options.version);
584 return Err(Error::VersionNotSupported);
585 }
586
587 // Try to find the entry point and corresponding index
588 let ep_idx = module
589 .entry_points
590 .iter()
591 .position(|ep| {
592 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
593 })
594 .ok_or(Error::EntryPointNotFound)?;
595
596 // Generate a map with names required to write the module
597 let mut names = crate::FastHashMap::default();
598 let mut namer = proc::Namer::default();
599 namer.reset(
600 module,
601 keywords::RESERVED_KEYWORDS,
602 &[],
603 &[],
604 &[
605 "gl_", // all GL built-in variables
606 "_group", // all normal bindings
607 "_push_constant_binding_", // all push constant bindings
608 ],
609 &mut names,
610 );
611
612 // Build the instance
613 let mut this = Self {
614 module,
615 info,
616 out,
617 options,
618 policies,
619
620 namer,
621 features: FeaturesManager::new(),
622 names,
623 reflection_names_globals: crate::FastHashMap::default(),
624 entry_point: &module.entry_points[ep_idx],
625 entry_point_idx: ep_idx as u16,
626 multiview: pipeline_options.multiview,
627 block_id: IdGenerator::default(),
628 named_expressions: Default::default(),
629 need_bake_expressions: Default::default(),
630 continue_ctx: back::continue_forward::ContinueCtx::default(),
631 varying: Default::default(),
632 };
633
634 // Find all features required to print this module
635 this.collect_required_features()?;
636
637 Ok(this)
638 }
639
640 /// Writes the [`Module`](crate::Module) as glsl to the output
641 ///
642 /// # Notes
643 /// If an error occurs while writing, the output might have been written partially
644 ///
645 /// # Panics
646 /// Might panic if the module is invalid
647 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
648 // We use `writeln!(self.out)` throughout the write to add newlines
649 // to make the output more readable
650
651 let es = self.options.version.is_es();
652
653 // Write the version (It must be the first thing or it isn't a valid glsl output)
654 writeln!(self.out, "#version {}", self.options.version)?;
655 // Write all the needed extensions
656 //
657 // This used to be the last thing being written as it allowed to search for features while
658 // writing the module saving some loops but some older versions (420 or less) required the
659 // extensions to appear before being used, even though extensions are part of the
660 // preprocessor not the processor ¯\_(ツ)_/¯
661 self.features.write(self.options, &mut self.out)?;
662
663 // glsl es requires a precision to be specified for floats and ints
664 // TODO: Should this be user configurable?
665 if es {
666 writeln!(self.out)?;
667 writeln!(self.out, "precision highp float;")?;
668 writeln!(self.out, "precision highp int;")?;
669 writeln!(self.out)?;
670 }
671
672 if self.entry_point.stage == ShaderStage::Compute {
673 let workgroup_size = self.entry_point.workgroup_size;
674 writeln!(
675 self.out,
676 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
677 workgroup_size[0], workgroup_size[1], workgroup_size[2]
678 )?;
679 writeln!(self.out)?;
680 }
681
682 if self.entry_point.stage == ShaderStage::Vertex
683 && !self
684 .options
685 .writer_flags
686 .contains(WriterFlags::DRAW_PARAMETERS)
687 && self.features.contains(Features::INSTANCE_INDEX)
688 {
689 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
690 writeln!(self.out)?;
691 }
692
693 // Enable early depth tests if needed
694 if let Some(depth_test) = self.entry_point.early_depth_test {
695 // If early depth test is supported for this version of GLSL
696 if self.options.version.supports_early_depth_test() {
697 writeln!(self.out, "layout(early_fragment_tests) in;")?;
698
699 if let Some(conservative) = depth_test.conservative {
700 use crate::ConservativeDepth as Cd;
701
702 let depth = match conservative {
703 Cd::GreaterEqual => "greater",
704 Cd::LessEqual => "less",
705 Cd::Unchanged => "unchanged",
706 };
707 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
708 }
709 writeln!(self.out)?;
710 } else {
711 log::warn!(
712 "Early depth testing is not supported for this version of GLSL: {}",
713 self.options.version
714 );
715 }
716 }
717
718 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
719 if let Some(multiview) = self.multiview.as_ref() {
720 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
721 writeln!(self.out)?;
722 }
723 }
724
725 // Write struct types.
726 //
727 // This are always ordered because the IR is structured in a way that
728 // you can't make a struct without adding all of its members first.
729 for (handle, ty) in self.module.types.iter() {
730 if let TypeInner::Struct { ref members, .. } = ty.inner {
731 // Structures ending with runtime-sized arrays can only be
732 // rendered as shader storage blocks in GLSL, not stand-alone
733 // struct types.
734 if !self.module.types[members.last().unwrap().ty]
735 .inner
736 .is_dynamically_sized(&self.module.types)
737 {
738 let name = &self.names[&NameKey::Type(handle)];
739 write!(self.out, "struct {name} ")?;
740 self.write_struct_body(handle, members)?;
741 writeln!(self.out, ";")?;
742 }
743 }
744 }
745
746 // Write functions to create special types.
747 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
748 match type_key {
749 &crate::PredeclaredType::ModfResult { size, width }
750 | &crate::PredeclaredType::FrexpResult { size, width } => {
751 let arg_type_name_owner;
752 let arg_type_name = if let Some(size) = size {
753 arg_type_name_owner =
754 format!("{}vec{}", if width == 8 { "d" } else { "" }, size as u8);
755 &arg_type_name_owner
756 } else if width == 8 {
757 "double"
758 } else {
759 "float"
760 };
761
762 let other_type_name_owner;
763 let (defined_func_name, called_func_name, other_type_name) =
764 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
765 (MODF_FUNCTION, "modf", arg_type_name)
766 } else {
767 let other_type_name = if let Some(size) = size {
768 other_type_name_owner = format!("ivec{}", size as u8);
769 &other_type_name_owner
770 } else {
771 "int"
772 };
773 (FREXP_FUNCTION, "frexp", other_type_name)
774 };
775
776 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
777
778 writeln!(self.out)?;
779 if !self.options.version.supports_frexp_function()
780 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
781 {
782 writeln!(
783 self.out,
784 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
785 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
786 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
787 return {struct_name}(fract, other);
788}}",
789 )?;
790 } else {
791 writeln!(
792 self.out,
793 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
794 {other_type_name} other;
795 {arg_type_name} fract = {called_func_name}(arg, other);
796 return {struct_name}(fract, other);
797}}",
798 )?;
799 }
800 }
801 &crate::PredeclaredType::AtomicCompareExchangeWeakResult { .. } => {}
802 }
803 }
804
805 // Write all named constants
806 let mut constants = self
807 .module
808 .constants
809 .iter()
810 .filter(|&(_, c)| c.name.is_some())
811 .peekable();
812 while let Some((handle, _)) = constants.next() {
813 self.write_global_constant(handle)?;
814 // Add extra newline for readability on last iteration
815 if constants.peek().is_none() {
816 writeln!(self.out)?;
817 }
818 }
819
820 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
821
822 // Write the globals
823 //
824 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
825 // we filter all globals that aren't used by the selected entry point as they might be
826 // interfere with each other (i.e. two globals with the same location but different with
827 // different classes)
828 let include_unused = self
829 .options
830 .writer_flags
831 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
832 for (handle, global) in self.module.global_variables.iter() {
833 let is_unused = ep_info[handle].is_empty();
834 if !include_unused && is_unused {
835 continue;
836 }
837
838 match self.module.types[global.ty].inner {
839 // We treat images separately because they might require
840 // writing the storage format
841 TypeInner::Image {
842 mut dim,
843 arrayed,
844 class,
845 } => {
846 // Gather the storage format if needed
847 let storage_format_access = match self.module.types[global.ty].inner {
848 TypeInner::Image {
849 class: crate::ImageClass::Storage { format, access },
850 ..
851 } => Some((format, access)),
852 _ => None,
853 };
854
855 if dim == crate::ImageDimension::D1 && es {
856 dim = crate::ImageDimension::D2
857 }
858
859 // Gether the location if needed
860 let layout_binding = if self.options.version.supports_explicit_locations() {
861 let br = global.binding.as_ref().unwrap();
862 self.options.binding_map.get(br).cloned()
863 } else {
864 None
865 };
866
867 // Write all the layout qualifiers
868 if layout_binding.is_some() || storage_format_access.is_some() {
869 write!(self.out, "layout(")?;
870 if let Some(binding) = layout_binding {
871 write!(self.out, "binding = {binding}")?;
872 }
873 if let Some((format, _)) = storage_format_access {
874 let format_str = glsl_storage_format(format)?;
875 let separator = match layout_binding {
876 Some(_) => ",",
877 None => "",
878 };
879 write!(self.out, "{separator}{format_str}")?;
880 }
881 write!(self.out, ") ")?;
882 }
883
884 if let Some((_, access)) = storage_format_access {
885 self.write_storage_access(access)?;
886 }
887
888 // All images in glsl are `uniform`
889 // The trailing space is important
890 write!(self.out, "uniform ")?;
891
892 // write the type
893 //
894 // This is way we need the leading space because `write_image_type` doesn't add
895 // any spaces at the beginning or end
896 self.write_image_type(dim, arrayed, class)?;
897
898 // Finally write the name and end the global with a `;`
899 // The leading space is important
900 let global_name = self.get_global_name(handle, global);
901 writeln!(self.out, " {global_name};")?;
902 writeln!(self.out)?;
903
904 self.reflection_names_globals.insert(handle, global_name);
905 }
906 // glsl has no concept of samplers so we just ignore it
907 TypeInner::Sampler { .. } => continue,
908 // All other globals are written by `write_global`
909 _ => {
910 self.write_global(handle, global)?;
911 // Add a newline (only for readability)
912 writeln!(self.out)?;
913 }
914 }
915 }
916
917 for arg in self.entry_point.function.arguments.iter() {
918 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
919 }
920 if let Some(ref result) = self.entry_point.function.result {
921 self.write_varying(result.binding.as_ref(), result.ty, true)?;
922 }
923 writeln!(self.out)?;
924
925 // Write all regular functions
926 for (handle, function) in self.module.functions.iter() {
927 // Check that the function doesn't use globals that aren't supported
928 // by the current entry point
929 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
930 continue;
931 }
932
933 let fun_info = &self.info[handle];
934
935 // Skip functions that that are not compatible with this entry point's stage.
936 //
937 // When validation is enabled, it rejects modules whose entry points try to call
938 // incompatible functions, so if we got this far, then any functions incompatible
939 // with our selected entry point must not be used.
940 //
941 // When validation is disabled, `fun_info.available_stages` is always just
942 // `ShaderStages::all()`, so this will write all functions in the module, and
943 // the downstream GLSL compiler will catch any problems.
944 if !fun_info.available_stages.contains(ep_info.available_stages) {
945 continue;
946 }
947
948 // Write the function
949 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
950
951 writeln!(self.out)?;
952 }
953
954 self.write_function(
955 back::FunctionType::EntryPoint(self.entry_point_idx),
956 &self.entry_point.function,
957 ep_info,
958 )?;
959
960 // Add newline at the end of file
961 writeln!(self.out)?;
962
963 // Collect all reflection info and return it to the user
964 self.collect_reflection_info()
965 }
966
967 fn write_array_size(
968 &mut self,
969 base: Handle<crate::Type>,
970 size: crate::ArraySize,
971 ) -> BackendResult {
972 write!(self.out, "[")?;
973
974 // Write the array size
975 // Writes nothing if `ArraySize::Dynamic`
976 match size {
977 crate::ArraySize::Constant(size) => {
978 write!(self.out, "{size}")?;
979 }
980 crate::ArraySize::Dynamic => (),
981 }
982
983 write!(self.out, "]")?;
984
985 if let TypeInner::Array {
986 base: next_base,
987 size: next_size,
988 ..
989 } = self.module.types[base].inner
990 {
991 self.write_array_size(next_base, next_size)?;
992 }
993
994 Ok(())
995 }
996
997 /// Helper method used to write value types
998 ///
999 /// # Notes
1000 /// Adds no trailing or leading whitespace
1001 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
1002 match *inner {
1003 // Scalars are simple we just get the full name from `glsl_scalar`
1004 TypeInner::Scalar(scalar)
1005 | TypeInner::Atomic(scalar)
1006 | TypeInner::ValuePointer {
1007 size: None,
1008 scalar,
1009 space: _,
1010 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
1011 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
1012 TypeInner::Vector { size, scalar }
1013 | TypeInner::ValuePointer {
1014 size: Some(size),
1015 scalar,
1016 space: _,
1017 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
1018 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
1019 // doubles are allowed), `M` is the columns count and `N` is the rows count
1020 //
1021 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
1022 // extra branch to write matrices this way
1023 TypeInner::Matrix {
1024 columns,
1025 rows,
1026 scalar,
1027 } => write!(
1028 self.out,
1029 "{}mat{}x{}",
1030 glsl_scalar(scalar)?.prefix,
1031 columns as u8,
1032 rows as u8
1033 )?,
1034 // GLSL arrays are written as `type name[size]`
1035 // Here we only write the size of the array i.e. `[size]`
1036 // Base `type` and `name` should be written outside
1037 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
1038 // Write all variants instead of `_` so that if new variants are added a
1039 // no exhaustiveness error is thrown
1040 TypeInner::Pointer { .. }
1041 | TypeInner::Struct { .. }
1042 | TypeInner::Image { .. }
1043 | TypeInner::Sampler { .. }
1044 | TypeInner::AccelerationStructure
1045 | TypeInner::RayQuery
1046 | TypeInner::BindingArray { .. } => {
1047 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
1048 }
1049 }
1050
1051 Ok(())
1052 }
1053
1054 /// Helper method used to write non image/sampler types
1055 ///
1056 /// # Notes
1057 /// Adds no trailing or leading whitespace
1058 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
1059 match self.module.types[ty].inner {
1060 // glsl has no pointer types so just write types as normal and loads are skipped
1061 TypeInner::Pointer { base, .. } => self.write_type(base),
1062 // glsl structs are written as just the struct name
1063 TypeInner::Struct { .. } => {
1064 // Get the struct name
1065 let name = &self.names[&NameKey::Type(ty)];
1066 write!(self.out, "{name}")?;
1067 Ok(())
1068 }
1069 // glsl array has the size separated from the base type
1070 TypeInner::Array { base, .. } => self.write_type(base),
1071 ref other => self.write_value_type(other),
1072 }
1073 }
1074
1075 /// Helper method to write a image type
1076 ///
1077 /// # Notes
1078 /// Adds no leading or trailing whitespace
1079 fn write_image_type(
1080 &mut self,
1081 dim: crate::ImageDimension,
1082 arrayed: bool,
1083 class: crate::ImageClass,
1084 ) -> BackendResult {
1085 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
1086 // and modifiers
1087 //
1088 // There exists two image types
1089 // - sampler - for sampled images
1090 // - image - for storage images
1091 //
1092 // There are three possible modifiers that can be used together and must be written in
1093 // this order to be valid
1094 // - MS - used if it's a multisampled image
1095 // - Array - used if it's an image array
1096 // - Shadow - used if it's a depth image
1097 use crate::ImageClass as Ic;
1098 use crate::Scalar as S;
1099 let float = S {
1100 kind: crate::ScalarKind::Float,
1101 width: 4,
1102 };
1103 let (base, scalar, ms, comparison) = match class {
1104 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
1105 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
1106 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
1107 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
1108 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
1109 };
1110
1111 let precision = if self.options.version.is_es() {
1112 "highp "
1113 } else {
1114 ""
1115 };
1116
1117 write!(
1118 self.out,
1119 "{}{}{}{}{}{}{}",
1120 precision,
1121 glsl_scalar(scalar)?.prefix,
1122 base,
1123 glsl_dimension(dim),
1124 ms,
1125 if arrayed { "Array" } else { "" },
1126 comparison
1127 )?;
1128
1129 Ok(())
1130 }
1131
1132 /// Helper method used to write non images/sampler globals
1133 ///
1134 /// # Notes
1135 /// Adds a newline
1136 ///
1137 /// # Panics
1138 /// If the global has type sampler
1139 fn write_global(
1140 &mut self,
1141 handle: Handle<crate::GlobalVariable>,
1142 global: &crate::GlobalVariable,
1143 ) -> BackendResult {
1144 if self.options.version.supports_explicit_locations() {
1145 if let Some(ref br) = global.binding {
1146 match self.options.binding_map.get(br) {
1147 Some(binding) => {
1148 let layout = match global.space {
1149 crate::AddressSpace::Storage { .. } => {
1150 if self.options.version.supports_std430_layout() {
1151 "std430, "
1152 } else {
1153 "std140, "
1154 }
1155 }
1156 crate::AddressSpace::Uniform => "std140, ",
1157 _ => "",
1158 };
1159 write!(self.out, "layout({layout}binding = {binding}) ")?
1160 }
1161 None => {
1162 log::debug!("unassigned binding for {:?}", global.name);
1163 if let crate::AddressSpace::Storage { .. } = global.space {
1164 if self.options.version.supports_std430_layout() {
1165 write!(self.out, "layout(std430) ")?
1166 }
1167 }
1168 }
1169 }
1170 }
1171 }
1172
1173 if let crate::AddressSpace::Storage { access } = global.space {
1174 self.write_storage_access(access)?;
1175 }
1176
1177 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
1178 write!(self.out, "{storage_qualifier} ")?;
1179 }
1180
1181 match global.space {
1182 crate::AddressSpace::Private => {
1183 self.write_simple_global(handle, global)?;
1184 }
1185 crate::AddressSpace::WorkGroup => {
1186 self.write_simple_global(handle, global)?;
1187 }
1188 crate::AddressSpace::PushConstant => {
1189 self.write_simple_global(handle, global)?;
1190 }
1191 crate::AddressSpace::Uniform => {
1192 self.write_interface_block(handle, global)?;
1193 }
1194 crate::AddressSpace::Storage { .. } => {
1195 self.write_interface_block(handle, global)?;
1196 }
1197 // A global variable in the `Function` address space is a
1198 // contradiction in terms.
1199 crate::AddressSpace::Function => unreachable!(),
1200 // Textures and samplers are handled directly in `Writer::write`.
1201 crate::AddressSpace::Handle => unreachable!(),
1202 }
1203
1204 Ok(())
1205 }
1206
1207 fn write_simple_global(
1208 &mut self,
1209 handle: Handle<crate::GlobalVariable>,
1210 global: &crate::GlobalVariable,
1211 ) -> BackendResult {
1212 self.write_type(global.ty)?;
1213 write!(self.out, " ")?;
1214 self.write_global_name(handle, global)?;
1215
1216 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1217 self.write_array_size(base, size)?;
1218 }
1219
1220 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
1221 write!(self.out, " = ")?;
1222 if let Some(init) = global.init {
1223 self.write_const_expr(init)?;
1224 } else {
1225 self.write_zero_init_value(global.ty)?;
1226 }
1227 }
1228
1229 writeln!(self.out, ";")?;
1230
1231 if let crate::AddressSpace::PushConstant = global.space {
1232 let global_name = self.get_global_name(handle, global);
1233 self.reflection_names_globals.insert(handle, global_name);
1234 }
1235
1236 Ok(())
1237 }
1238
1239 /// Write an interface block for a single Naga global.
1240 ///
1241 /// Write `block_name { members }`. Since `block_name` must be unique
1242 /// between blocks and structs, we add `_block_ID` where `ID` is a
1243 /// `IdGenerator` generated number. Write `members` in the same way we write
1244 /// a struct's members.
1245 fn write_interface_block(
1246 &mut self,
1247 handle: Handle<crate::GlobalVariable>,
1248 global: &crate::GlobalVariable,
1249 ) -> BackendResult {
1250 // Write the block name, it's just the struct name appended with `_block_ID`
1251 let ty_name = &self.names[&NameKey::Type(global.ty)];
1252 let block_name = format!(
1253 "{}_block_{}{:?}",
1254 // avoid double underscores as they are reserved in GLSL
1255 ty_name.trim_end_matches('_'),
1256 self.block_id.generate(),
1257 self.entry_point.stage,
1258 );
1259 write!(self.out, "{block_name} ")?;
1260 self.reflection_names_globals.insert(handle, block_name);
1261
1262 match self.module.types[global.ty].inner {
1263 TypeInner::Struct { ref members, .. }
1264 if self.module.types[members.last().unwrap().ty]
1265 .inner
1266 .is_dynamically_sized(&self.module.types) =>
1267 {
1268 // Structs with dynamically sized arrays must have their
1269 // members lifted up as members of the interface block. GLSL
1270 // can't write such struct types anyway.
1271 self.write_struct_body(global.ty, members)?;
1272 write!(self.out, " ")?;
1273 self.write_global_name(handle, global)?;
1274 }
1275 _ => {
1276 // A global of any other type is written as the sole member
1277 // of the interface block. Since the interface block is
1278 // anonymous, this becomes visible in the global scope.
1279 write!(self.out, "{{ ")?;
1280 self.write_type(global.ty)?;
1281 write!(self.out, " ")?;
1282 self.write_global_name(handle, global)?;
1283 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1284 self.write_array_size(base, size)?;
1285 }
1286 write!(self.out, "; }}")?;
1287 }
1288 }
1289
1290 writeln!(self.out, ";")?;
1291
1292 Ok(())
1293 }
1294
1295 /// Helper method used to find which expressions of a given function require baking
1296 ///
1297 /// # Notes
1298 /// Clears `need_bake_expressions` set before adding to it
1299 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
1300 use crate::Expression;
1301 self.need_bake_expressions.clear();
1302 for (fun_handle, expr) in func.expressions.iter() {
1303 let expr_info = &info[fun_handle];
1304 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
1305 if min_ref_count <= expr_info.ref_count {
1306 self.need_bake_expressions.insert(fun_handle);
1307 }
1308
1309 let inner = expr_info.ty.inner_with(&self.module.types);
1310
1311 if let Expression::Math {
1312 fun,
1313 arg,
1314 arg1,
1315 arg2,
1316 ..
1317 } = *expr
1318 {
1319 match fun {
1320 crate::MathFunction::Dot => {
1321 // if the expression is a Dot product with integer arguments,
1322 // then the args needs baking as well
1323 if let TypeInner::Scalar(crate::Scalar {
1324 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
1325 ..
1326 }) = *inner
1327 {
1328 self.need_bake_expressions.insert(arg);
1329 self.need_bake_expressions.insert(arg1.unwrap());
1330 }
1331 }
1332 crate::MathFunction::Pack4xI8
1333 | crate::MathFunction::Pack4xU8
1334 | crate::MathFunction::Unpack4xI8
1335 | crate::MathFunction::Unpack4xU8 => {
1336 self.need_bake_expressions.insert(arg);
1337 }
1338 crate::MathFunction::ExtractBits => {
1339 // Only argument 1 is re-used.
1340 self.need_bake_expressions.insert(arg1.unwrap());
1341 }
1342 crate::MathFunction::InsertBits => {
1343 // Only argument 2 is re-used.
1344 self.need_bake_expressions.insert(arg2.unwrap());
1345 }
1346 crate::MathFunction::CountLeadingZeros => {
1347 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
1348 self.need_bake_expressions.insert(arg);
1349 }
1350 }
1351 _ => {}
1352 }
1353 }
1354 }
1355 }
1356
1357 /// Helper method used to get a name for a global
1358 ///
1359 /// Globals have different naming schemes depending on their binding:
1360 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
1361 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
1362 /// is the group and `Y` is the binding
1363 fn get_global_name(
1364 &self,
1365 handle: Handle<crate::GlobalVariable>,
1366 global: &crate::GlobalVariable,
1367 ) -> String {
1368 match (&global.binding, global.space) {
1369 (&Some(ref br), _) => {
1370 format!(
1371 "_group_{}_binding_{}_{}",
1372 br.group,
1373 br.binding,
1374 self.entry_point.stage.to_str()
1375 )
1376 }
1377 (&None, crate::AddressSpace::PushConstant) => {
1378 format!("_push_constant_binding_{}", self.entry_point.stage.to_str())
1379 }
1380 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
1381 }
1382 }
1383
1384 /// Helper method used to write a name for a global without additional heap allocation
1385 fn write_global_name(
1386 &mut self,
1387 handle: Handle<crate::GlobalVariable>,
1388 global: &crate::GlobalVariable,
1389 ) -> BackendResult {
1390 match (&global.binding, global.space) {
1391 (&Some(ref br), _) => write!(
1392 self.out,
1393 "_group_{}_binding_{}_{}",
1394 br.group,
1395 br.binding,
1396 self.entry_point.stage.to_str()
1397 )?,
1398 (&None, crate::AddressSpace::PushConstant) => write!(
1399 self.out,
1400 "_push_constant_binding_{}",
1401 self.entry_point.stage.to_str()
1402 )?,
1403 (&None, _) => write!(
1404 self.out,
1405 "{}",
1406 &self.names[&NameKey::GlobalVariable(handle)]
1407 )?,
1408 }
1409
1410 Ok(())
1411 }
1412
1413 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
1414 ///
1415 /// A Naga entry point's arguments and return value are rendered in GLSL as
1416 /// variables at global scope with the `in` and `out` storage qualifiers.
1417 /// The code we generate for `main` loads from all the `in` globals into
1418 /// appropriately named locals. Before it returns, `main` assigns the
1419 /// components of its return value into all the `out` globals.
1420 ///
1421 /// This function writes a declaration for one such GLSL global,
1422 /// representing a value passed into or returned from [`self.entry_point`]
1423 /// that has a [`Location`] binding. The global's name is generated based on
1424 /// the location index and the shader stages being connected; see
1425 /// [`VaryingName`]. This means we don't need to know the names of
1426 /// arguments, just their types and bindings.
1427 ///
1428 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1429 /// bindings; `main` will read from or assign to the appropriate GLSL
1430 /// special variable; these are pre-declared. As an exception, we do declare
1431 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1432 /// needed.
1433 ///
1434 /// Use `output` together with [`self.entry_point.stage`] to determine which
1435 /// shader stages are being connected, and choose the `in` or `out` storage
1436 /// qualifier.
1437 ///
1438 /// [`self.entry_point`]: Writer::entry_point
1439 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1440 /// [`Location`]: crate::Binding::Location
1441 /// [`BuiltIn`]: crate::Binding::BuiltIn
1442 fn write_varying(
1443 &mut self,
1444 binding: Option<&crate::Binding>,
1445 ty: Handle<crate::Type>,
1446 output: bool,
1447 ) -> Result<(), Error> {
1448 // For a struct, emit a separate global for each member with a binding.
1449 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1450 for member in members {
1451 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1452 }
1453 return Ok(());
1454 }
1455
1456 let binding = match binding {
1457 None => return Ok(()),
1458 Some(binding) => binding,
1459 };
1460
1461 let (location, interpolation, sampling, second_blend_source) = match *binding {
1462 crate::Binding::Location {
1463 location,
1464 interpolation,
1465 sampling,
1466 second_blend_source,
1467 } => (location, interpolation, sampling, second_blend_source),
1468 crate::Binding::BuiltIn(built_in) => {
1469 if let crate::BuiltIn::Position { invariant: true } = built_in {
1470 match (self.options.version, self.entry_point.stage) {
1471 (
1472 Version::Embedded {
1473 version: 300,
1474 is_webgl: true,
1475 },
1476 ShaderStage::Fragment,
1477 ) => {
1478 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1479 // OpenGL ES in general (waiting on confirmation).
1480 //
1481 // See https://github.com/KhronosGroup/WebGL/issues/3518
1482 }
1483 _ => {
1484 writeln!(
1485 self.out,
1486 "invariant {};",
1487 glsl_built_in(
1488 built_in,
1489 VaryingOptions::from_writer_options(self.options, output)
1490 )
1491 )?;
1492 }
1493 }
1494 }
1495 return Ok(());
1496 }
1497 };
1498
1499 // Write the interpolation modifier if needed
1500 //
1501 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1502 // shaders' input globals or vertex shaders' output globals.
1503 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1504 ShaderStage::Vertex => output,
1505 ShaderStage::Fragment => !output,
1506 ShaderStage::Compute => false,
1507 };
1508
1509 // Write the I/O locations, if allowed
1510 let io_location = if self.options.version.supports_explicit_locations()
1511 || !emit_interpolation_and_auxiliary
1512 {
1513 if self.options.version.supports_io_locations() {
1514 if second_blend_source {
1515 write!(self.out, "layout(location = {location}, index = 1) ")?;
1516 } else {
1517 write!(self.out, "layout(location = {location}) ")?;
1518 }
1519 None
1520 } else {
1521 Some(VaryingLocation {
1522 location,
1523 index: second_blend_source as u32,
1524 })
1525 }
1526 } else {
1527 None
1528 };
1529
1530 // Write the interpolation qualifier.
1531 if let Some(interp) = interpolation {
1532 if emit_interpolation_and_auxiliary {
1533 write!(self.out, "{} ", glsl_interpolation(interp))?;
1534 }
1535 }
1536
1537 // Write the sampling auxiliary qualifier.
1538 //
1539 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1540 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1541 // here, regardless of the version.
1542 if let Some(sampling) = sampling {
1543 if emit_interpolation_and_auxiliary {
1544 if let Some(qualifier) = glsl_sampling(sampling)? {
1545 write!(self.out, "{qualifier} ")?;
1546 }
1547 }
1548 }
1549
1550 // Write the input/output qualifier.
1551 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1552
1553 // Write the type
1554 // `write_type` adds no leading or trailing spaces
1555 self.write_type(ty)?;
1556
1557 // Finally write the global name and end the global with a `;` and a newline
1558 // Leading space is important
1559 let vname = VaryingName {
1560 binding: &crate::Binding::Location {
1561 location,
1562 interpolation: None,
1563 sampling: None,
1564 second_blend_source,
1565 },
1566 stage: self.entry_point.stage,
1567 options: VaryingOptions::from_writer_options(self.options, output),
1568 };
1569 writeln!(self.out, " {vname};")?;
1570
1571 if let Some(location) = io_location {
1572 self.varying.insert(vname.to_string(), location);
1573 }
1574
1575 Ok(())
1576 }
1577
1578 /// Helper method used to write functions (both entry points and regular functions)
1579 ///
1580 /// # Notes
1581 /// Adds a newline
1582 fn write_function(
1583 &mut self,
1584 ty: back::FunctionType,
1585 func: &crate::Function,
1586 info: &valid::FunctionInfo,
1587 ) -> BackendResult {
1588 // Create a function context for the function being written
1589 let ctx = back::FunctionCtx {
1590 ty,
1591 info,
1592 expressions: &func.expressions,
1593 named_expressions: &func.named_expressions,
1594 expr_kind_tracker: ExpressionKindTracker::from_arena(&func.expressions),
1595 };
1596
1597 self.named_expressions.clear();
1598 self.update_expressions_to_bake(func, info);
1599
1600 // Write the function header
1601 //
1602 // glsl headers are the same as in c:
1603 // `ret_type name(args)`
1604 // `ret_type` is the return type
1605 // `name` is the function name
1606 // `args` is a comma separated list of `type name`
1607 // | - `type` is the argument type
1608 // | - `name` is the argument name
1609
1610 // Start by writing the return type if any otherwise write void
1611 // This is the only place where `void` is a valid type
1612 // (though it's more a keyword than a type)
1613 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1614 write!(self.out, "void")?;
1615 } else if let Some(ref result) = func.result {
1616 self.write_type(result.ty)?;
1617 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1618 self.write_array_size(base, size)?
1619 }
1620 } else {
1621 write!(self.out, "void")?;
1622 }
1623
1624 // Write the function name and open parentheses for the argument list
1625 let function_name = match ctx.ty {
1626 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1627 back::FunctionType::EntryPoint(_) => "main",
1628 };
1629 write!(self.out, " {function_name}(")?;
1630
1631 // Write the comma separated argument list
1632 //
1633 // We need access to `Self` here so we use the reference passed to the closure as an
1634 // argument instead of capturing as that would cause a borrow checker error
1635 let arguments = match ctx.ty {
1636 back::FunctionType::EntryPoint(_) => &[][..],
1637 back::FunctionType::Function(_) => &func.arguments,
1638 };
1639 let arguments: Vec<_> = arguments
1640 .iter()
1641 .enumerate()
1642 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1643 TypeInner::Sampler { .. } => false,
1644 _ => true,
1645 })
1646 .collect();
1647 self.write_slice(&arguments, |this, _, &(i, arg)| {
1648 // Write the argument type
1649 match this.module.types[arg.ty].inner {
1650 // We treat images separately because they might require
1651 // writing the storage format
1652 TypeInner::Image {
1653 dim,
1654 arrayed,
1655 class,
1656 } => {
1657 // Write the storage format if needed
1658 if let TypeInner::Image {
1659 class: crate::ImageClass::Storage { format, .. },
1660 ..
1661 } = this.module.types[arg.ty].inner
1662 {
1663 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1664 }
1665
1666 // write the type
1667 //
1668 // This is way we need the leading space because `write_image_type` doesn't add
1669 // any spaces at the beginning or end
1670 this.write_image_type(dim, arrayed, class)?;
1671 }
1672 TypeInner::Pointer { base, .. } => {
1673 // write parameter qualifiers
1674 write!(this.out, "inout ")?;
1675 this.write_type(base)?;
1676 }
1677 // All other types are written by `write_type`
1678 _ => {
1679 this.write_type(arg.ty)?;
1680 }
1681 }
1682
1683 // Write the argument name
1684 // The leading space is important
1685 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1686
1687 // Write array size
1688 match this.module.types[arg.ty].inner {
1689 TypeInner::Array { base, size, .. } => {
1690 this.write_array_size(base, size)?;
1691 }
1692 TypeInner::Pointer { base, .. } => {
1693 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1694 this.write_array_size(base, size)?;
1695 }
1696 }
1697 _ => {}
1698 }
1699
1700 Ok(())
1701 })?;
1702
1703 // Close the parentheses and open braces to start the function body
1704 writeln!(self.out, ") {{")?;
1705
1706 if self.options.zero_initialize_workgroup_memory
1707 && ctx.ty.is_compute_entry_point(self.module)
1708 {
1709 self.write_workgroup_variables_initialization(&ctx)?;
1710 }
1711
1712 // Compose the function arguments from globals, in case of an entry point.
1713 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1714 let stage = self.module.entry_points[ep_index as usize].stage;
1715 for (index, arg) in func.arguments.iter().enumerate() {
1716 write!(self.out, "{}", back::INDENT)?;
1717 self.write_type(arg.ty)?;
1718 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1719 write!(self.out, " {name}")?;
1720 write!(self.out, " = ")?;
1721 match self.module.types[arg.ty].inner {
1722 TypeInner::Struct { ref members, .. } => {
1723 self.write_type(arg.ty)?;
1724 write!(self.out, "(")?;
1725 for (index, member) in members.iter().enumerate() {
1726 let varying_name = VaryingName {
1727 binding: member.binding.as_ref().unwrap(),
1728 stage,
1729 options: VaryingOptions::from_writer_options(self.options, false),
1730 };
1731 if index != 0 {
1732 write!(self.out, ", ")?;
1733 }
1734 write!(self.out, "{varying_name}")?;
1735 }
1736 writeln!(self.out, ");")?;
1737 }
1738 _ => {
1739 let varying_name = VaryingName {
1740 binding: arg.binding.as_ref().unwrap(),
1741 stage,
1742 options: VaryingOptions::from_writer_options(self.options, false),
1743 };
1744 writeln!(self.out, "{varying_name};")?;
1745 }
1746 }
1747 }
1748 }
1749
1750 // Write all function locals
1751 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1752 //
1753 // Always adds a newline
1754 for (handle, local) in func.local_variables.iter() {
1755 // Write indentation (only for readability) and the type
1756 // `write_type` adds no trailing space
1757 write!(self.out, "{}", back::INDENT)?;
1758 self.write_type(local.ty)?;
1759
1760 // Write the local name
1761 // The leading space is important
1762 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1763 // Write size for array type
1764 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1765 self.write_array_size(base, size)?;
1766 }
1767 // Write the local initializer if needed
1768 if let Some(init) = local.init {
1769 // Put the equal signal only if there's a initializer
1770 // The leading and trailing spaces aren't needed but help with readability
1771 write!(self.out, " = ")?;
1772
1773 // Write the constant
1774 // `write_constant` adds no trailing or leading space/newline
1775 self.write_expr(init, &ctx)?;
1776 } else if is_value_init_supported(self.module, local.ty) {
1777 write!(self.out, " = ")?;
1778 self.write_zero_init_value(local.ty)?;
1779 }
1780
1781 // Finish the local with `;` and add a newline (only for readability)
1782 writeln!(self.out, ";")?
1783 }
1784
1785 // Write the function body (statement list)
1786 for sta in func.body.iter() {
1787 // Write a statement, the indentation should always be 1 when writing the function body
1788 // `write_stmt` adds a newline
1789 self.write_stmt(sta, &ctx, back::Level(1))?;
1790 }
1791
1792 // Close braces and add a newline
1793 writeln!(self.out, "}}")?;
1794
1795 Ok(())
1796 }
1797
1798 fn write_workgroup_variables_initialization(
1799 &mut self,
1800 ctx: &back::FunctionCtx,
1801 ) -> BackendResult {
1802 let mut vars = self
1803 .module
1804 .global_variables
1805 .iter()
1806 .filter(|&(handle, var)| {
1807 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1808 })
1809 .peekable();
1810
1811 if vars.peek().is_some() {
1812 let level = back::Level(1);
1813
1814 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1815
1816 for (handle, var) in vars {
1817 let name = &self.names[&NameKey::GlobalVariable(handle)];
1818 write!(self.out, "{}{} = ", level.next(), name)?;
1819 self.write_zero_init_value(var.ty)?;
1820 writeln!(self.out, ";")?;
1821 }
1822
1823 writeln!(self.out, "{level}}}")?;
1824 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
1825 }
1826
1827 Ok(())
1828 }
1829
1830 /// Write a list of comma separated `T` values using a writer function `F`.
1831 ///
1832 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1833 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1834 /// second argument is the 0 based index of the element on the list, and the last element is
1835 /// a reference to the element `T` being written
1836 ///
1837 /// # Notes
1838 /// - Adds no newlines or leading/trailing whitespace
1839 /// - The last element won't have a trailing `,`
1840 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1841 &mut self,
1842 data: &[T],
1843 mut f: F,
1844 ) -> BackendResult {
1845 // Loop through `data` invoking `f` for each element
1846 for (index, item) in data.iter().enumerate() {
1847 if index != 0 {
1848 write!(self.out, ", ")?;
1849 }
1850 f(self, index as u32, item)?;
1851 }
1852
1853 Ok(())
1854 }
1855
1856 /// Helper method used to write global constants
1857 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1858 write!(self.out, "const ")?;
1859 let constant = &self.module.constants[handle];
1860 self.write_type(constant.ty)?;
1861 let name = &self.names[&NameKey::Constant(handle)];
1862 write!(self.out, " {name}")?;
1863 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1864 self.write_array_size(base, size)?;
1865 }
1866 write!(self.out, " = ")?;
1867 self.write_const_expr(constant.init)?;
1868 writeln!(self.out, ";")?;
1869 Ok(())
1870 }
1871
1872 /// Helper method used to output a dot product as an arithmetic expression
1873 ///
1874 fn write_dot_product(
1875 &mut self,
1876 arg: Handle<crate::Expression>,
1877 arg1: Handle<crate::Expression>,
1878 size: usize,
1879 ctx: &back::FunctionCtx,
1880 ) -> BackendResult {
1881 // Write parentheses around the dot product expression to prevent operators
1882 // with different precedences from applying earlier.
1883 write!(self.out, "(")?;
1884
1885 // Cycle through all the components of the vector
1886 for index in 0..size {
1887 let component = back::COMPONENTS[index];
1888 // Write the addition to the previous product
1889 // This will print an extra '+' at the beginning but that is fine in glsl
1890 write!(self.out, " + ")?;
1891 // Write the first vector expression, this expression is marked to be
1892 // cached so unless it can't be cached (for example, it's a Constant)
1893 // it shouldn't produce large expressions.
1894 self.write_expr(arg, ctx)?;
1895 // Access the current component on the first vector
1896 write!(self.out, ".{component} * ")?;
1897 // Write the second vector expression, this expression is marked to be
1898 // cached so unless it can't be cached (for example, it's a Constant)
1899 // it shouldn't produce large expressions.
1900 self.write_expr(arg1, ctx)?;
1901 // Access the current component on the second vector
1902 write!(self.out, ".{component}")?;
1903 }
1904
1905 write!(self.out, ")")?;
1906 Ok(())
1907 }
1908
1909 /// Helper method used to write structs
1910 ///
1911 /// # Notes
1912 /// Ends in a newline
1913 fn write_struct_body(
1914 &mut self,
1915 handle: Handle<crate::Type>,
1916 members: &[crate::StructMember],
1917 ) -> BackendResult {
1918 // glsl structs are written as in C
1919 // `struct name() { members };`
1920 // | `struct` is a keyword
1921 // | `name` is the struct name
1922 // | `members` is a semicolon separated list of `type name`
1923 // | `type` is the member type
1924 // | `name` is the member name
1925 writeln!(self.out, "{{")?;
1926
1927 for (idx, member) in members.iter().enumerate() {
1928 // The indentation is only for readability
1929 write!(self.out, "{}", back::INDENT)?;
1930
1931 match self.module.types[member.ty].inner {
1932 TypeInner::Array {
1933 base,
1934 size,
1935 stride: _,
1936 } => {
1937 self.write_type(base)?;
1938 write!(
1939 self.out,
1940 " {}",
1941 &self.names[&NameKey::StructMember(handle, idx as u32)]
1942 )?;
1943 // Write [size]
1944 self.write_array_size(base, size)?;
1945 // Newline is important
1946 writeln!(self.out, ";")?;
1947 }
1948 _ => {
1949 // Write the member type
1950 // Adds no trailing space
1951 self.write_type(member.ty)?;
1952
1953 // Write the member name and put a semicolon
1954 // The leading space is important
1955 // All members must have a semicolon even the last one
1956 writeln!(
1957 self.out,
1958 " {};",
1959 &self.names[&NameKey::StructMember(handle, idx as u32)]
1960 )?;
1961 }
1962 }
1963 }
1964
1965 write!(self.out, "}}")?;
1966 Ok(())
1967 }
1968
1969 /// Helper method used to write statements
1970 ///
1971 /// # Notes
1972 /// Always adds a newline
1973 fn write_stmt(
1974 &mut self,
1975 sta: &crate::Statement,
1976 ctx: &back::FunctionCtx,
1977 level: back::Level,
1978 ) -> BackendResult {
1979 use crate::Statement;
1980
1981 match *sta {
1982 // This is where we can generate intermediate constants for some expression types.
1983 Statement::Emit(ref range) => {
1984 for handle in range.clone() {
1985 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1986 let expr_name = if ptr_class.is_some() {
1987 // GLSL can't save a pointer-valued expression in a variable,
1988 // but we shouldn't ever need to: they should never be named expressions,
1989 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1990 None
1991 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1992 // Front end provides names for all variables at the start of writing.
1993 // But we write them to step by step. We need to recache them
1994 // Otherwise, we could accidentally write variable name instead of full expression.
1995 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1996 Some(self.namer.call(name))
1997 } else if self.need_bake_expressions.contains(&handle) {
1998 Some(Baked(handle).to_string())
1999 } else {
2000 None
2001 };
2002
2003 // If we are going to write an `ImageLoad` next and the target image
2004 // is sampled and we are using the `Restrict` policy for bounds
2005 // checking images we need to write a local holding the clamped lod.
2006 if let crate::Expression::ImageLoad {
2007 image,
2008 level: Some(level_expr),
2009 ..
2010 } = ctx.expressions[handle]
2011 {
2012 if let TypeInner::Image {
2013 class: crate::ImageClass::Sampled { .. },
2014 ..
2015 } = *ctx.resolve_type(image, &self.module.types)
2016 {
2017 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
2018 write!(self.out, "{level}")?;
2019 self.write_clamped_lod(ctx, handle, image, level_expr)?
2020 }
2021 }
2022 }
2023
2024 if let Some(name) = expr_name {
2025 write!(self.out, "{level}")?;
2026 self.write_named_expr(handle, name, handle, ctx)?;
2027 }
2028 }
2029 }
2030 // Blocks are simple we just need to write the block statements between braces
2031 // We could also just print the statements but this is more readable and maps more
2032 // closely to the IR
2033 Statement::Block(ref block) => {
2034 write!(self.out, "{level}")?;
2035 writeln!(self.out, "{{")?;
2036 for sta in block.iter() {
2037 // Increase the indentation to help with readability
2038 self.write_stmt(sta, ctx, level.next())?
2039 }
2040 writeln!(self.out, "{level}}}")?
2041 }
2042 // Ifs are written as in C:
2043 // ```
2044 // if(condition) {
2045 // accept
2046 // } else {
2047 // reject
2048 // }
2049 // ```
2050 Statement::If {
2051 condition,
2052 ref accept,
2053 ref reject,
2054 } => {
2055 write!(self.out, "{level}")?;
2056 write!(self.out, "if (")?;
2057 self.write_expr(condition, ctx)?;
2058 writeln!(self.out, ") {{")?;
2059
2060 for sta in accept {
2061 // Increase indentation to help with readability
2062 self.write_stmt(sta, ctx, level.next())?;
2063 }
2064
2065 // If there are no statements in the reject block we skip writing it
2066 // This is only for readability
2067 if !reject.is_empty() {
2068 writeln!(self.out, "{level}}} else {{")?;
2069
2070 for sta in reject {
2071 // Increase indentation to help with readability
2072 self.write_stmt(sta, ctx, level.next())?;
2073 }
2074 }
2075
2076 writeln!(self.out, "{level}}}")?
2077 }
2078 // Switch are written as in C:
2079 // ```
2080 // switch (selector) {
2081 // // Fallthrough
2082 // case label:
2083 // block
2084 // // Non fallthrough
2085 // case label:
2086 // block
2087 // break;
2088 // default:
2089 // block
2090 // }
2091 // ```
2092 // Where the `default` case happens isn't important but we put it last
2093 // so that we don't need to print a `break` for it
2094 Statement::Switch {
2095 selector,
2096 ref cases,
2097 } => {
2098 let l2 = level.next();
2099 // Some GLSL consumers may not handle switches with a single
2100 // body correctly: See wgpu#4514. Write such switch statements
2101 // as a `do {} while(false);` loop instead.
2102 //
2103 // Since doing so may inadvertently capture `continue`
2104 // statements in the switch body, we must apply continue
2105 // forwarding. See the `naga::back::continue_forward` module
2106 // docs for details.
2107 let one_body = cases
2108 .iter()
2109 .rev()
2110 .skip(1)
2111 .all(|case| case.fall_through && case.body.is_empty());
2112 if one_body {
2113 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
2114 // about [`Switch`] statements that are being rendered as
2115 // `do-while` loops.
2116 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
2117 writeln!(self.out, "{level}bool {variable} = false;",)?;
2118 };
2119 writeln!(self.out, "{level}do {{")?;
2120 // Note: Expressions have no side-effects so we don't need to emit selector expression.
2121
2122 // Body
2123 if let Some(case) = cases.last() {
2124 for sta in case.body.iter() {
2125 self.write_stmt(sta, ctx, l2)?;
2126 }
2127 }
2128 // End do-while
2129 writeln!(self.out, "{level}}} while(false);")?;
2130
2131 // Handle any forwarded continue statements.
2132 use back::continue_forward::ExitControlFlow;
2133 let op = match self.continue_ctx.exit_switch() {
2134 ExitControlFlow::None => None,
2135 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
2136 ExitControlFlow::Break { variable } => Some(("break", variable)),
2137 };
2138 if let Some((control_flow, variable)) = op {
2139 writeln!(self.out, "{level}if ({variable}) {{")?;
2140 writeln!(self.out, "{l2}{control_flow};")?;
2141 writeln!(self.out, "{level}}}")?;
2142 }
2143 } else {
2144 // Start the switch
2145 write!(self.out, "{level}")?;
2146 write!(self.out, "switch(")?;
2147 self.write_expr(selector, ctx)?;
2148 writeln!(self.out, ") {{")?;
2149
2150 // Write all cases
2151 for case in cases {
2152 match case.value {
2153 crate::SwitchValue::I32(value) => {
2154 write!(self.out, "{l2}case {value}:")?
2155 }
2156 crate::SwitchValue::U32(value) => {
2157 write!(self.out, "{l2}case {value}u:")?
2158 }
2159 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
2160 }
2161
2162 let write_block_braces = !(case.fall_through && case.body.is_empty());
2163 if write_block_braces {
2164 writeln!(self.out, " {{")?;
2165 } else {
2166 writeln!(self.out)?;
2167 }
2168
2169 for sta in case.body.iter() {
2170 self.write_stmt(sta, ctx, l2.next())?;
2171 }
2172
2173 if !case.fall_through
2174 && case.body.last().map_or(true, |s| !s.is_terminator())
2175 {
2176 writeln!(self.out, "{}break;", l2.next())?;
2177 }
2178
2179 if write_block_braces {
2180 writeln!(self.out, "{l2}}}")?;
2181 }
2182 }
2183
2184 writeln!(self.out, "{level}}}")?
2185 }
2186 }
2187 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
2188 // while true loop and appending the continuing block to the body resulting on:
2189 // ```
2190 // bool loop_init = true;
2191 // while(true) {
2192 // if (!loop_init) { <continuing> }
2193 // loop_init = false;
2194 // <body>
2195 // }
2196 // ```
2197 Statement::Loop {
2198 ref body,
2199 ref continuing,
2200 break_if,
2201 } => {
2202 self.continue_ctx.enter_loop();
2203 if !continuing.is_empty() || break_if.is_some() {
2204 let gate_name = self.namer.call("loop_init");
2205 writeln!(self.out, "{level}bool {gate_name} = true;")?;
2206 writeln!(self.out, "{level}while(true) {{")?;
2207 let l2 = level.next();
2208 let l3 = l2.next();
2209 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
2210 for sta in continuing {
2211 self.write_stmt(sta, ctx, l3)?;
2212 }
2213 if let Some(condition) = break_if {
2214 write!(self.out, "{l3}if (")?;
2215 self.write_expr(condition, ctx)?;
2216 writeln!(self.out, ") {{")?;
2217 writeln!(self.out, "{}break;", l3.next())?;
2218 writeln!(self.out, "{l3}}}")?;
2219 }
2220 writeln!(self.out, "{l2}}}")?;
2221 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
2222 } else {
2223 writeln!(self.out, "{level}while(true) {{")?;
2224 }
2225 for sta in body {
2226 self.write_stmt(sta, ctx, level.next())?;
2227 }
2228 writeln!(self.out, "{level}}}")?;
2229 self.continue_ctx.exit_loop();
2230 }
2231 // Break, continue and return as written as in C
2232 // `break;`
2233 Statement::Break => {
2234 write!(self.out, "{level}")?;
2235 writeln!(self.out, "break;")?
2236 }
2237 // `continue;`
2238 Statement::Continue => {
2239 // Sometimes we must render a `Continue` statement as a `break`.
2240 // See the docs for the `back::continue_forward` module.
2241 if let Some(variable) = self.continue_ctx.continue_encountered() {
2242 writeln!(self.out, "{level}{variable} = true;",)?;
2243 writeln!(self.out, "{level}break;")?
2244 } else {
2245 writeln!(self.out, "{level}continue;")?
2246 }
2247 }
2248 // `return expr;`, `expr` is optional
2249 Statement::Return { value } => {
2250 write!(self.out, "{level}")?;
2251 match ctx.ty {
2252 back::FunctionType::Function(_) => {
2253 write!(self.out, "return")?;
2254 // Write the expression to be returned if needed
2255 if let Some(expr) = value {
2256 write!(self.out, " ")?;
2257 self.write_expr(expr, ctx)?;
2258 }
2259 writeln!(self.out, ";")?;
2260 }
2261 back::FunctionType::EntryPoint(ep_index) => {
2262 let mut has_point_size = false;
2263 let ep = &self.module.entry_points[ep_index as usize];
2264 if let Some(ref result) = ep.function.result {
2265 let value = value.unwrap();
2266 match self.module.types[result.ty].inner {
2267 TypeInner::Struct { ref members, .. } => {
2268 let temp_struct_name = match ctx.expressions[value] {
2269 crate::Expression::Compose { .. } => {
2270 let return_struct = "_tmp_return";
2271 write!(
2272 self.out,
2273 "{} {} = ",
2274 &self.names[&NameKey::Type(result.ty)],
2275 return_struct
2276 )?;
2277 self.write_expr(value, ctx)?;
2278 writeln!(self.out, ";")?;
2279 write!(self.out, "{level}")?;
2280 Some(return_struct)
2281 }
2282 _ => None,
2283 };
2284
2285 for (index, member) in members.iter().enumerate() {
2286 if let Some(crate::Binding::BuiltIn(
2287 crate::BuiltIn::PointSize,
2288 )) = member.binding
2289 {
2290 has_point_size = true;
2291 }
2292
2293 let varying_name = VaryingName {
2294 binding: member.binding.as_ref().unwrap(),
2295 stage: ep.stage,
2296 options: VaryingOptions::from_writer_options(
2297 self.options,
2298 true,
2299 ),
2300 };
2301 write!(self.out, "{varying_name} = ")?;
2302
2303 if let Some(struct_name) = temp_struct_name {
2304 write!(self.out, "{struct_name}")?;
2305 } else {
2306 self.write_expr(value, ctx)?;
2307 }
2308
2309 // Write field name
2310 writeln!(
2311 self.out,
2312 ".{};",
2313 &self.names
2314 [&NameKey::StructMember(result.ty, index as u32)]
2315 )?;
2316 write!(self.out, "{level}")?;
2317 }
2318 }
2319 _ => {
2320 let name = VaryingName {
2321 binding: result.binding.as_ref().unwrap(),
2322 stage: ep.stage,
2323 options: VaryingOptions::from_writer_options(
2324 self.options,
2325 true,
2326 ),
2327 };
2328 write!(self.out, "{name} = ")?;
2329 self.write_expr(value, ctx)?;
2330 writeln!(self.out, ";")?;
2331 write!(self.out, "{level}")?;
2332 }
2333 }
2334 }
2335
2336 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
2337 == ShaderStage::Vertex;
2338 if is_vertex_stage
2339 && self
2340 .options
2341 .writer_flags
2342 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
2343 {
2344 writeln!(
2345 self.out,
2346 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
2347 )?;
2348 write!(self.out, "{level}")?;
2349 }
2350
2351 if is_vertex_stage
2352 && self
2353 .options
2354 .writer_flags
2355 .contains(WriterFlags::FORCE_POINT_SIZE)
2356 && !has_point_size
2357 {
2358 writeln!(self.out, "gl_PointSize = 1.0;")?;
2359 write!(self.out, "{level}")?;
2360 }
2361 writeln!(self.out, "return;")?;
2362 }
2363 }
2364 }
2365 // This is one of the places were glsl adds to the syntax of C in this case the discard
2366 // keyword which ceases all further processing in a fragment shader, it's called OpKill
2367 // in spir-v that's why it's called `Statement::Kill`
2368 Statement::Kill => writeln!(self.out, "{level}discard;")?,
2369 Statement::Barrier(flags) => {
2370 self.write_barrier(flags, level)?;
2371 }
2372 // Stores in glsl are just variable assignments written as `pointer = value;`
2373 Statement::Store { pointer, value } => {
2374 write!(self.out, "{level}")?;
2375 self.write_expr(pointer, ctx)?;
2376 write!(self.out, " = ")?;
2377 self.write_expr(value, ctx)?;
2378 writeln!(self.out, ";")?
2379 }
2380 Statement::WorkGroupUniformLoad { pointer, result } => {
2381 // GLSL doesn't have pointers, which means that this backend needs to ensure that
2382 // the actual "loading" is happening between the two barriers.
2383 // This is done in `Emit` by never emitting a variable name for pointer variables
2384 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2385
2386 let result_name = Baked(result).to_string();
2387 write!(self.out, "{level}")?;
2388 // Expressions cannot have side effects, so just writing the expression here is fine.
2389 self.write_named_expr(pointer, result_name, result, ctx)?;
2390
2391 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2392 }
2393 // Stores a value into an image.
2394 Statement::ImageStore {
2395 image,
2396 coordinate,
2397 array_index,
2398 value,
2399 } => {
2400 write!(self.out, "{level}")?;
2401 self.write_image_store(ctx, image, coordinate, array_index, value)?
2402 }
2403 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2404 Statement::Call {
2405 function,
2406 ref arguments,
2407 result,
2408 } => {
2409 write!(self.out, "{level}")?;
2410 if let Some(expr) = result {
2411 let name = Baked(expr).to_string();
2412 let result = self.module.functions[function].result.as_ref().unwrap();
2413 self.write_type(result.ty)?;
2414 write!(self.out, " {name}")?;
2415 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2416 {
2417 self.write_array_size(base, size)?
2418 }
2419 write!(self.out, " = ")?;
2420 self.named_expressions.insert(expr, name);
2421 }
2422 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2423 let arguments: Vec<_> = arguments
2424 .iter()
2425 .enumerate()
2426 .filter_map(|(i, arg)| {
2427 let arg_ty = self.module.functions[function].arguments[i].ty;
2428 match self.module.types[arg_ty].inner {
2429 TypeInner::Sampler { .. } => None,
2430 _ => Some(*arg),
2431 }
2432 })
2433 .collect();
2434 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2435 writeln!(self.out, ");")?
2436 }
2437 Statement::Atomic {
2438 pointer,
2439 ref fun,
2440 value,
2441 result,
2442 } => {
2443 write!(self.out, "{level}")?;
2444 if let Some(result) = result {
2445 let res_name = Baked(result).to_string();
2446 let res_ty = ctx.resolve_type(result, &self.module.types);
2447 self.write_value_type(res_ty)?;
2448 write!(self.out, " {res_name} = ")?;
2449 self.named_expressions.insert(result, res_name);
2450 }
2451
2452 let fun_str = fun.to_glsl();
2453 write!(self.out, "atomic{fun_str}(")?;
2454 self.write_expr(pointer, ctx)?;
2455 write!(self.out, ", ")?;
2456 // handle the special cases
2457 match *fun {
2458 crate::AtomicFunction::Subtract => {
2459 // we just wrote `InterlockedAdd`, so negate the argument
2460 write!(self.out, "-")?;
2461 }
2462 crate::AtomicFunction::Exchange { compare: Some(_) } => {
2463 return Err(Error::Custom(
2464 "atomic CompareExchange is not implemented".to_string(),
2465 ));
2466 }
2467 _ => {}
2468 }
2469 self.write_expr(value, ctx)?;
2470 writeln!(self.out, ");")?;
2471 }
2472 Statement::RayQuery { .. } => unreachable!(),
2473 Statement::SubgroupBallot { result, predicate } => {
2474 write!(self.out, "{level}")?;
2475 let res_name = Baked(result).to_string();
2476 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2477 self.write_value_type(res_ty)?;
2478 write!(self.out, " {res_name} = ")?;
2479 self.named_expressions.insert(result, res_name);
2480
2481 write!(self.out, "subgroupBallot(")?;
2482 match predicate {
2483 Some(predicate) => self.write_expr(predicate, ctx)?,
2484 None => write!(self.out, "true")?,
2485 }
2486 writeln!(self.out, ");")?;
2487 }
2488 Statement::SubgroupCollectiveOperation {
2489 op,
2490 collective_op,
2491 argument,
2492 result,
2493 } => {
2494 write!(self.out, "{level}")?;
2495 let res_name = Baked(result).to_string();
2496 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2497 self.write_value_type(res_ty)?;
2498 write!(self.out, " {res_name} = ")?;
2499 self.named_expressions.insert(result, res_name);
2500
2501 match (collective_op, op) {
2502 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2503 write!(self.out, "subgroupAll(")?
2504 }
2505 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2506 write!(self.out, "subgroupAny(")?
2507 }
2508 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2509 write!(self.out, "subgroupAdd(")?
2510 }
2511 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2512 write!(self.out, "subgroupMul(")?
2513 }
2514 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2515 write!(self.out, "subgroupMax(")?
2516 }
2517 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2518 write!(self.out, "subgroupMin(")?
2519 }
2520 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2521 write!(self.out, "subgroupAnd(")?
2522 }
2523 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2524 write!(self.out, "subgroupOr(")?
2525 }
2526 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2527 write!(self.out, "subgroupXor(")?
2528 }
2529 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2530 write!(self.out, "subgroupExclusiveAdd(")?
2531 }
2532 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2533 write!(self.out, "subgroupExclusiveMul(")?
2534 }
2535 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2536 write!(self.out, "subgroupInclusiveAdd(")?
2537 }
2538 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2539 write!(self.out, "subgroupInclusiveMul(")?
2540 }
2541 _ => unimplemented!(),
2542 }
2543 self.write_expr(argument, ctx)?;
2544 writeln!(self.out, ");")?;
2545 }
2546 Statement::SubgroupGather {
2547 mode,
2548 argument,
2549 result,
2550 } => {
2551 write!(self.out, "{level}")?;
2552 let res_name = Baked(result).to_string();
2553 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2554 self.write_value_type(res_ty)?;
2555 write!(self.out, " {res_name} = ")?;
2556 self.named_expressions.insert(result, res_name);
2557
2558 match mode {
2559 crate::GatherMode::BroadcastFirst => {
2560 write!(self.out, "subgroupBroadcastFirst(")?;
2561 }
2562 crate::GatherMode::Broadcast(_) => {
2563 write!(self.out, "subgroupBroadcast(")?;
2564 }
2565 crate::GatherMode::Shuffle(_) => {
2566 write!(self.out, "subgroupShuffle(")?;
2567 }
2568 crate::GatherMode::ShuffleDown(_) => {
2569 write!(self.out, "subgroupShuffleDown(")?;
2570 }
2571 crate::GatherMode::ShuffleUp(_) => {
2572 write!(self.out, "subgroupShuffleUp(")?;
2573 }
2574 crate::GatherMode::ShuffleXor(_) => {
2575 write!(self.out, "subgroupShuffleXor(")?;
2576 }
2577 }
2578 self.write_expr(argument, ctx)?;
2579 match mode {
2580 crate::GatherMode::BroadcastFirst => {}
2581 crate::GatherMode::Broadcast(index)
2582 | crate::GatherMode::Shuffle(index)
2583 | crate::GatherMode::ShuffleDown(index)
2584 | crate::GatherMode::ShuffleUp(index)
2585 | crate::GatherMode::ShuffleXor(index) => {
2586 write!(self.out, ", ")?;
2587 self.write_expr(index, ctx)?;
2588 }
2589 }
2590 writeln!(self.out, ");")?;
2591 }
2592 }
2593
2594 Ok(())
2595 }
2596
2597 /// Write a const expression.
2598 ///
2599 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2600 /// constant expression arena, as GLSL expression.
2601 ///
2602 /// # Notes
2603 /// Adds no newlines or leading/trailing whitespace
2604 ///
2605 /// [`Expression`]: crate::Expression
2606 /// [`Module`]: crate::Module
2607 fn write_const_expr(&mut self, expr: Handle<crate::Expression>) -> BackendResult {
2608 self.write_possibly_const_expr(
2609 expr,
2610 &self.module.global_expressions,
2611 |expr| &self.info[expr],
2612 |writer, expr| writer.write_const_expr(expr),
2613 )
2614 }
2615
2616 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2617 ///
2618 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2619 /// as as GLSL expression. This must be one of the [`Expression`] variants
2620 /// that is allowed to occur in constant expressions.
2621 ///
2622 /// Use `write_expression` to write subexpressions.
2623 ///
2624 /// This is the common code for `write_expr`, which handles arbitrary
2625 /// runtime expressions, and `write_const_expr`, which only handles
2626 /// const-expressions. Each of those callers passes itself (essentially) as
2627 /// the `write_expression` callback, so that subexpressions are restricted
2628 /// to the appropriate variants.
2629 ///
2630 /// # Notes
2631 /// Adds no newlines or leading/trailing whitespace
2632 ///
2633 /// [`Expression`]: crate::Expression
2634 fn write_possibly_const_expr<'w, I, E>(
2635 &'w mut self,
2636 expr: Handle<crate::Expression>,
2637 expressions: &crate::Arena<crate::Expression>,
2638 info: I,
2639 write_expression: E,
2640 ) -> BackendResult
2641 where
2642 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2643 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2644 {
2645 use crate::Expression;
2646
2647 match expressions[expr] {
2648 Expression::Literal(literal) => {
2649 match literal {
2650 // Floats are written using `Debug` instead of `Display` because it always appends the
2651 // decimal part even it's zero which is needed for a valid glsl float constant
2652 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2653 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2654 // Unsigned integers need a `u` at the end
2655 //
2656 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2657 // always write it as the extra branch wouldn't have any benefit in readability
2658 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2659 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2660 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2661 crate::Literal::I64(_) => {
2662 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2663 }
2664 crate::Literal::U64(_) => {
2665 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2666 }
2667 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2668 return Err(Error::Custom(
2669 "Abstract types should not appear in IR presented to backends".into(),
2670 ));
2671 }
2672 }
2673 }
2674 Expression::Constant(handle) => {
2675 let constant = &self.module.constants[handle];
2676 if constant.name.is_some() {
2677 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2678 } else {
2679 self.write_const_expr(constant.init)?;
2680 }
2681 }
2682 Expression::ZeroValue(ty) => {
2683 self.write_zero_init_value(ty)?;
2684 }
2685 Expression::Compose { ty, ref components } => {
2686 self.write_type(ty)?;
2687
2688 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2689 self.write_array_size(base, size)?;
2690 }
2691
2692 write!(self.out, "(")?;
2693 for (index, component) in components.iter().enumerate() {
2694 if index != 0 {
2695 write!(self.out, ", ")?;
2696 }
2697 write_expression(self, *component)?;
2698 }
2699 write!(self.out, ")")?
2700 }
2701 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2702 Expression::Splat { size: _, value } => {
2703 let resolved = info(expr).inner_with(&self.module.types);
2704 self.write_value_type(resolved)?;
2705 write!(self.out, "(")?;
2706 write_expression(self, value)?;
2707 write!(self.out, ")")?
2708 }
2709 _ => unreachable!(),
2710 }
2711
2712 Ok(())
2713 }
2714
2715 /// Helper method to write expressions
2716 ///
2717 /// # Notes
2718 /// Doesn't add any newlines or leading/trailing spaces
2719 fn write_expr(
2720 &mut self,
2721 expr: Handle<crate::Expression>,
2722 ctx: &back::FunctionCtx,
2723 ) -> BackendResult {
2724 use crate::Expression;
2725
2726 if let Some(name) = self.named_expressions.get(&expr) {
2727 write!(self.out, "{name}")?;
2728 return Ok(());
2729 }
2730
2731 match ctx.expressions[expr] {
2732 Expression::Literal(_)
2733 | Expression::Constant(_)
2734 | Expression::ZeroValue(_)
2735 | Expression::Compose { .. }
2736 | Expression::Splat { .. } => {
2737 self.write_possibly_const_expr(
2738 expr,
2739 ctx.expressions,
2740 |expr| &ctx.info[expr].ty,
2741 |writer, expr| writer.write_expr(expr, ctx),
2742 )?;
2743 }
2744 Expression::Override(_) => return Err(Error::Override),
2745 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2746 Expression::Access { base, index } => {
2747 self.write_expr(base, ctx)?;
2748 write!(self.out, "[")?;
2749 self.write_expr(index, ctx)?;
2750 write!(self.out, "]")?
2751 }
2752 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2753 // be applied to structs, in this case we need to find the name of the field at that
2754 // index and write `base.field_name`
2755 Expression::AccessIndex { base, index } => {
2756 self.write_expr(base, ctx)?;
2757
2758 let base_ty_res = &ctx.info[base].ty;
2759 let mut resolved = base_ty_res.inner_with(&self.module.types);
2760 let base_ty_handle = match *resolved {
2761 TypeInner::Pointer { base, space: _ } => {
2762 resolved = &self.module.types[base].inner;
2763 Some(base)
2764 }
2765 _ => base_ty_res.handle(),
2766 };
2767
2768 match *resolved {
2769 TypeInner::Vector { .. } => {
2770 // Write vector access as a swizzle
2771 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2772 }
2773 TypeInner::Matrix { .. }
2774 | TypeInner::Array { .. }
2775 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2776 TypeInner::Struct { .. } => {
2777 // This will never panic in case the type is a `Struct`, this is not true
2778 // for other types so we can only check while inside this match arm
2779 let ty = base_ty_handle.unwrap();
2780
2781 write!(
2782 self.out,
2783 ".{}",
2784 &self.names[&NameKey::StructMember(ty, index)]
2785 )?
2786 }
2787 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2788 }
2789 }
2790 // `Swizzle` adds a few letters behind the dot.
2791 Expression::Swizzle {
2792 size,
2793 vector,
2794 pattern,
2795 } => {
2796 self.write_expr(vector, ctx)?;
2797 write!(self.out, ".")?;
2798 for &sc in pattern[..size as usize].iter() {
2799 self.out.write_char(back::COMPONENTS[sc as usize])?;
2800 }
2801 }
2802 // Function arguments are written as the argument name
2803 Expression::FunctionArgument(pos) => {
2804 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2805 }
2806 // Global variables need some special work for their name but
2807 // `get_global_name` does the work for us
2808 Expression::GlobalVariable(handle) => {
2809 let global = &self.module.global_variables[handle];
2810 self.write_global_name(handle, global)?
2811 }
2812 // A local is written as it's name
2813 Expression::LocalVariable(handle) => {
2814 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2815 }
2816 // glsl has no pointers so there's no load operation, just write the pointer expression
2817 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2818 // `ImageSample` is a bit complicated compared to the rest of the IR.
2819 //
2820 // First there are three variations depending whether the sample level is explicitly set,
2821 // if it's automatic or it it's bias:
2822 // `texture(image, coordinate)` - Automatic sample level
2823 // `texture(image, coordinate, bias)` - Bias sample level
2824 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2825 //
2826 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2827 Expression::ImageSample {
2828 image,
2829 sampler: _, //TODO?
2830 gather,
2831 coordinate,
2832 array_index,
2833 offset,
2834 level,
2835 depth_ref,
2836 } => {
2837 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2838 TypeInner::Image {
2839 dim,
2840 class,
2841 arrayed,
2842 ..
2843 } => (dim, class, arrayed),
2844 _ => unreachable!(),
2845 };
2846 let mut err = None;
2847 if dim == crate::ImageDimension::Cube {
2848 if offset.is_some() {
2849 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2850 }
2851 if arrayed
2852 && matches!(class, crate::ImageClass::Depth { .. })
2853 && matches!(level, crate::SampleLevel::Gradient { .. })
2854 {
2855 err = Some("samplerCubeArrayShadow don't support textureGrad");
2856 }
2857 }
2858 if gather.is_some() && level != crate::SampleLevel::Zero {
2859 err = Some("textureGather doesn't support LOD parameters");
2860 }
2861 if let Some(err) = err {
2862 return Err(Error::Custom(String::from(err)));
2863 }
2864
2865 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2866 // unless `GL_EXT_texture_shadow_lod` is present.
2867 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2868 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2869 || (dim == crate::ImageDimension::D2 && arrayed))
2870 && level == crate::SampleLevel::Zero
2871 && matches!(class, crate::ImageClass::Depth { .. })
2872 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2873
2874 // Write the function to be used depending on the sample level
2875 let fun_name = match level {
2876 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2877 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2878 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2879 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2880 crate::SampleLevel::Gradient { .. } => "textureGrad",
2881 };
2882 let offset_name = match offset {
2883 Some(_) => "Offset",
2884 None => "",
2885 };
2886
2887 write!(self.out, "{fun_name}{offset_name}(")?;
2888
2889 // Write the image that will be used
2890 self.write_expr(image, ctx)?;
2891 // The space here isn't required but it helps with readability
2892 write!(self.out, ", ")?;
2893
2894 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2895 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2896 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2897 TypeInner::Vector { size, .. } => size as u8,
2898 TypeInner::Scalar { .. } => 1,
2899 _ => unreachable!(),
2900 };
2901
2902 if array_index.is_some() {
2903 coord_dim += 1;
2904 }
2905 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2906 if merge_depth_ref {
2907 coord_dim += 1;
2908 }
2909
2910 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2911 let is_vec = tex_1d_hack || coord_dim != 1;
2912 // Compose a new texture coordinates vector
2913 if is_vec {
2914 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2915 }
2916 self.write_expr(coordinate, ctx)?;
2917 if tex_1d_hack {
2918 write!(self.out, ", 0.0")?;
2919 }
2920 if let Some(expr) = array_index {
2921 write!(self.out, ", ")?;
2922 self.write_expr(expr, ctx)?;
2923 }
2924 if merge_depth_ref {
2925 write!(self.out, ", ")?;
2926 self.write_expr(depth_ref.unwrap(), ctx)?;
2927 }
2928 if is_vec {
2929 write!(self.out, ")")?;
2930 }
2931
2932 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2933 write!(self.out, ", ")?;
2934 self.write_expr(expr, ctx)?;
2935 }
2936
2937 match level {
2938 // Auto needs no more arguments
2939 crate::SampleLevel::Auto => (),
2940 // Zero needs level set to 0
2941 crate::SampleLevel::Zero => {
2942 if workaround_lod_with_grad {
2943 let vec_dim = match dim {
2944 crate::ImageDimension::Cube => 3,
2945 _ => 2,
2946 };
2947 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2948 } else if gather.is_none() {
2949 write!(self.out, ", 0.0")?;
2950 }
2951 }
2952 // Exact and bias require another argument
2953 crate::SampleLevel::Exact(expr) => {
2954 write!(self.out, ", ")?;
2955 self.write_expr(expr, ctx)?;
2956 }
2957 crate::SampleLevel::Bias(_) => {
2958 // This needs to be done after the offset writing
2959 }
2960 crate::SampleLevel::Gradient { x, y } => {
2961 // If we are using sampler2D to replace sampler1D, we also
2962 // need to make sure to use vec2 gradients
2963 if tex_1d_hack {
2964 write!(self.out, ", vec2(")?;
2965 self.write_expr(x, ctx)?;
2966 write!(self.out, ", 0.0)")?;
2967 write!(self.out, ", vec2(")?;
2968 self.write_expr(y, ctx)?;
2969 write!(self.out, ", 0.0)")?;
2970 } else {
2971 write!(self.out, ", ")?;
2972 self.write_expr(x, ctx)?;
2973 write!(self.out, ", ")?;
2974 self.write_expr(y, ctx)?;
2975 }
2976 }
2977 }
2978
2979 if let Some(constant) = offset {
2980 write!(self.out, ", ")?;
2981 if tex_1d_hack {
2982 write!(self.out, "ivec2(")?;
2983 }
2984 self.write_const_expr(constant)?;
2985 if tex_1d_hack {
2986 write!(self.out, ", 0)")?;
2987 }
2988 }
2989
2990 // Bias is always the last argument
2991 if let crate::SampleLevel::Bias(expr) = level {
2992 write!(self.out, ", ")?;
2993 self.write_expr(expr, ctx)?;
2994 }
2995
2996 if let (Some(component), None) = (gather, depth_ref) {
2997 write!(self.out, ", {}", component as usize)?;
2998 }
2999
3000 // End the function
3001 write!(self.out, ")")?
3002 }
3003 Expression::ImageLoad {
3004 image,
3005 coordinate,
3006 array_index,
3007 sample,
3008 level,
3009 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
3010 // Query translates into one of the:
3011 // - textureSize/imageSize
3012 // - textureQueryLevels
3013 // - textureSamples/imageSamples
3014 Expression::ImageQuery { image, query } => {
3015 use crate::ImageClass;
3016
3017 // This will only panic if the module is invalid
3018 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
3019 TypeInner::Image {
3020 dim,
3021 arrayed: _,
3022 class,
3023 } => (dim, class),
3024 _ => unreachable!(),
3025 };
3026 let components = match dim {
3027 crate::ImageDimension::D1 => 1,
3028 crate::ImageDimension::D2 => 2,
3029 crate::ImageDimension::D3 => 3,
3030 crate::ImageDimension::Cube => 2,
3031 };
3032
3033 if let crate::ImageQuery::Size { .. } = query {
3034 match components {
3035 1 => write!(self.out, "uint(")?,
3036 _ => write!(self.out, "uvec{components}(")?,
3037 }
3038 } else {
3039 write!(self.out, "uint(")?;
3040 }
3041
3042 match query {
3043 crate::ImageQuery::Size { level } => {
3044 match class {
3045 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
3046 write!(self.out, "textureSize(")?;
3047 self.write_expr(image, ctx)?;
3048 if let Some(expr) = level {
3049 let cast_to_int = matches!(
3050 *ctx.resolve_type(expr, &self.module.types),
3051 TypeInner::Scalar(crate::Scalar {
3052 kind: crate::ScalarKind::Uint,
3053 ..
3054 })
3055 );
3056
3057 write!(self.out, ", ")?;
3058
3059 if cast_to_int {
3060 write!(self.out, "int(")?;
3061 }
3062
3063 self.write_expr(expr, ctx)?;
3064
3065 if cast_to_int {
3066 write!(self.out, ")")?;
3067 }
3068 } else if !multi {
3069 // All textureSize calls requires an lod argument
3070 // except for multisampled samplers
3071 write!(self.out, ", 0")?;
3072 }
3073 }
3074 ImageClass::Storage { .. } => {
3075 write!(self.out, "imageSize(")?;
3076 self.write_expr(image, ctx)?;
3077 }
3078 }
3079 write!(self.out, ")")?;
3080 if components != 1 || self.options.version.is_es() {
3081 write!(self.out, ".{}", &"xyz"[..components])?;
3082 }
3083 }
3084 crate::ImageQuery::NumLevels => {
3085 write!(self.out, "textureQueryLevels(",)?;
3086 self.write_expr(image, ctx)?;
3087 write!(self.out, ")",)?;
3088 }
3089 crate::ImageQuery::NumLayers => {
3090 let fun_name = match class {
3091 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
3092 ImageClass::Storage { .. } => "imageSize",
3093 };
3094 write!(self.out, "{fun_name}(")?;
3095 self.write_expr(image, ctx)?;
3096 // All textureSize calls requires an lod argument
3097 // except for multisampled samplers
3098 if class.is_multisampled() {
3099 write!(self.out, ", 0")?;
3100 }
3101 write!(self.out, ")")?;
3102 if components != 1 || self.options.version.is_es() {
3103 write!(self.out, ".{}", back::COMPONENTS[components])?;
3104 }
3105 }
3106 crate::ImageQuery::NumSamples => {
3107 let fun_name = match class {
3108 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
3109 "textureSamples"
3110 }
3111 ImageClass::Storage { .. } => "imageSamples",
3112 };
3113 write!(self.out, "{fun_name}(")?;
3114 self.write_expr(image, ctx)?;
3115 write!(self.out, ")",)?;
3116 }
3117 }
3118
3119 write!(self.out, ")")?;
3120 }
3121 Expression::Unary { op, expr } => {
3122 let operator_or_fn = match op {
3123 crate::UnaryOperator::Negate => "-",
3124 crate::UnaryOperator::LogicalNot => {
3125 match *ctx.resolve_type(expr, &self.module.types) {
3126 TypeInner::Vector { .. } => "not",
3127 _ => "!",
3128 }
3129 }
3130 crate::UnaryOperator::BitwiseNot => "~",
3131 };
3132 write!(self.out, "{operator_or_fn}(")?;
3133
3134 self.write_expr(expr, ctx)?;
3135
3136 write!(self.out, ")")?
3137 }
3138 // `Binary` we just write `left op right`, except when dealing with
3139 // comparison operations on vectors as they are implemented with
3140 // builtin functions.
3141 // Once again we wrap everything in parentheses to avoid precedence issues
3142 Expression::Binary {
3143 mut op,
3144 left,
3145 right,
3146 } => {
3147 // Holds `Some(function_name)` if the binary operation is
3148 // implemented as a function call
3149 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
3150
3151 let left_inner = ctx.resolve_type(left, &self.module.types);
3152 let right_inner = ctx.resolve_type(right, &self.module.types);
3153
3154 let function = match (left_inner, right_inner) {
3155 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
3156 Bo::Less
3157 | Bo::LessEqual
3158 | Bo::Greater
3159 | Bo::GreaterEqual
3160 | Bo::Equal
3161 | Bo::NotEqual => BinaryOperation::VectorCompare,
3162 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
3163 Bo::And if scalar.kind == Sk::Bool => {
3164 op = crate::BinaryOperator::LogicalAnd;
3165 BinaryOperation::VectorComponentWise
3166 }
3167 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
3168 op = crate::BinaryOperator::LogicalOr;
3169 BinaryOperation::VectorComponentWise
3170 }
3171 _ => BinaryOperation::Other,
3172 },
3173 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
3174 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
3175 Bo::Modulo => BinaryOperation::Modulo,
3176 _ => BinaryOperation::Other,
3177 },
3178 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
3179 Bo::InclusiveOr => {
3180 op = crate::BinaryOperator::LogicalOr;
3181 BinaryOperation::Other
3182 }
3183 Bo::And => {
3184 op = crate::BinaryOperator::LogicalAnd;
3185 BinaryOperation::Other
3186 }
3187 _ => BinaryOperation::Other,
3188 },
3189 _ => BinaryOperation::Other,
3190 },
3191 };
3192
3193 match function {
3194 BinaryOperation::VectorCompare => {
3195 let op_str = match op {
3196 Bo::Less => "lessThan(",
3197 Bo::LessEqual => "lessThanEqual(",
3198 Bo::Greater => "greaterThan(",
3199 Bo::GreaterEqual => "greaterThanEqual(",
3200 Bo::Equal => "equal(",
3201 Bo::NotEqual => "notEqual(",
3202 _ => unreachable!(),
3203 };
3204 write!(self.out, "{op_str}")?;
3205 self.write_expr(left, ctx)?;
3206 write!(self.out, ", ")?;
3207 self.write_expr(right, ctx)?;
3208 write!(self.out, ")")?;
3209 }
3210 BinaryOperation::VectorComponentWise => {
3211 self.write_value_type(left_inner)?;
3212 write!(self.out, "(")?;
3213
3214 let size = match *left_inner {
3215 Ti::Vector { size, .. } => size,
3216 _ => unreachable!(),
3217 };
3218
3219 for i in 0..size as usize {
3220 if i != 0 {
3221 write!(self.out, ", ")?;
3222 }
3223
3224 self.write_expr(left, ctx)?;
3225 write!(self.out, ".{}", back::COMPONENTS[i])?;
3226
3227 write!(self.out, " {} ", back::binary_operation_str(op))?;
3228
3229 self.write_expr(right, ctx)?;
3230 write!(self.out, ".{}", back::COMPONENTS[i])?;
3231 }
3232
3233 write!(self.out, ")")?;
3234 }
3235 // TODO: handle undefined behavior of BinaryOperator::Modulo
3236 //
3237 // sint:
3238 // if right == 0 return 0
3239 // if left == min(type_of(left)) && right == -1 return 0
3240 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
3241 //
3242 // uint:
3243 // if right == 0 return 0
3244 //
3245 // float:
3246 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
3247 BinaryOperation::Modulo => {
3248 write!(self.out, "(")?;
3249
3250 // write `e1 - e2 * trunc(e1 / e2)`
3251 self.write_expr(left, ctx)?;
3252 write!(self.out, " - ")?;
3253 self.write_expr(right, ctx)?;
3254 write!(self.out, " * ")?;
3255 write!(self.out, "trunc(")?;
3256 self.write_expr(left, ctx)?;
3257 write!(self.out, " / ")?;
3258 self.write_expr(right, ctx)?;
3259 write!(self.out, ")")?;
3260
3261 write!(self.out, ")")?;
3262 }
3263 BinaryOperation::Other => {
3264 write!(self.out, "(")?;
3265
3266 self.write_expr(left, ctx)?;
3267 write!(self.out, " {} ", back::binary_operation_str(op))?;
3268 self.write_expr(right, ctx)?;
3269
3270 write!(self.out, ")")?;
3271 }
3272 }
3273 }
3274 // `Select` is written as `condition ? accept : reject`
3275 // We wrap everything in parentheses to avoid precedence issues
3276 Expression::Select {
3277 condition,
3278 accept,
3279 reject,
3280 } => {
3281 let cond_ty = ctx.resolve_type(condition, &self.module.types);
3282 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
3283 true
3284 } else {
3285 false
3286 };
3287
3288 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
3289 if vec_select {
3290 // Glsl defines that for mix when the condition is a boolean the first element
3291 // is picked if condition is false and the second if condition is true
3292 write!(self.out, "mix(")?;
3293 self.write_expr(reject, ctx)?;
3294 write!(self.out, ", ")?;
3295 self.write_expr(accept, ctx)?;
3296 write!(self.out, ", ")?;
3297 self.write_expr(condition, ctx)?;
3298 } else {
3299 write!(self.out, "(")?;
3300 self.write_expr(condition, ctx)?;
3301 write!(self.out, " ? ")?;
3302 self.write_expr(accept, ctx)?;
3303 write!(self.out, " : ")?;
3304 self.write_expr(reject, ctx)?;
3305 }
3306
3307 write!(self.out, ")")?
3308 }
3309 // `Derivative` is a function call to a glsl provided function
3310 Expression::Derivative { axis, ctrl, expr } => {
3311 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
3312 let fun_name = if self.options.version.supports_derivative_control() {
3313 match (axis, ctrl) {
3314 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
3315 (Axis::X, Ctrl::Fine) => "dFdxFine",
3316 (Axis::X, Ctrl::None) => "dFdx",
3317 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
3318 (Axis::Y, Ctrl::Fine) => "dFdyFine",
3319 (Axis::Y, Ctrl::None) => "dFdy",
3320 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
3321 (Axis::Width, Ctrl::Fine) => "fwidthFine",
3322 (Axis::Width, Ctrl::None) => "fwidth",
3323 }
3324 } else {
3325 match axis {
3326 Axis::X => "dFdx",
3327 Axis::Y => "dFdy",
3328 Axis::Width => "fwidth",
3329 }
3330 };
3331 write!(self.out, "{fun_name}(")?;
3332 self.write_expr(expr, ctx)?;
3333 write!(self.out, ")")?
3334 }
3335 // `Relational` is a normal function call to some glsl provided functions
3336 Expression::Relational { fun, argument } => {
3337 use crate::RelationalFunction as Rf;
3338
3339 let fun_name = match fun {
3340 Rf::IsInf => "isinf",
3341 Rf::IsNan => "isnan",
3342 Rf::All => "all",
3343 Rf::Any => "any",
3344 };
3345 write!(self.out, "{fun_name}(")?;
3346
3347 self.write_expr(argument, ctx)?;
3348
3349 write!(self.out, ")")?
3350 }
3351 Expression::Math {
3352 fun,
3353 arg,
3354 arg1,
3355 arg2,
3356 arg3,
3357 } => {
3358 use crate::MathFunction as Mf;
3359
3360 let fun_name = match fun {
3361 // comparison
3362 Mf::Abs => "abs",
3363 Mf::Min => "min",
3364 Mf::Max => "max",
3365 Mf::Clamp => {
3366 let scalar_kind = ctx
3367 .resolve_type(arg, &self.module.types)
3368 .scalar_kind()
3369 .unwrap();
3370 match scalar_kind {
3371 crate::ScalarKind::Float => "clamp",
3372 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3373 // instruction to determine the value. This is fine according to the WGSL spec for float
3374 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3375 _ => {
3376 write!(self.out, "min(max(")?;
3377 self.write_expr(arg, ctx)?;
3378 write!(self.out, ", ")?;
3379 self.write_expr(arg1.unwrap(), ctx)?;
3380 write!(self.out, "), ")?;
3381 self.write_expr(arg2.unwrap(), ctx)?;
3382 write!(self.out, ")")?;
3383
3384 return Ok(());
3385 }
3386 }
3387 }
3388 Mf::Saturate => {
3389 write!(self.out, "clamp(")?;
3390
3391 self.write_expr(arg, ctx)?;
3392
3393 match *ctx.resolve_type(arg, &self.module.types) {
3394 TypeInner::Vector { size, .. } => write!(
3395 self.out,
3396 ", vec{}(0.0), vec{0}(1.0)",
3397 back::vector_size_str(size)
3398 )?,
3399 _ => write!(self.out, ", 0.0, 1.0")?,
3400 }
3401
3402 write!(self.out, ")")?;
3403
3404 return Ok(());
3405 }
3406 // trigonometry
3407 Mf::Cos => "cos",
3408 Mf::Cosh => "cosh",
3409 Mf::Sin => "sin",
3410 Mf::Sinh => "sinh",
3411 Mf::Tan => "tan",
3412 Mf::Tanh => "tanh",
3413 Mf::Acos => "acos",
3414 Mf::Asin => "asin",
3415 Mf::Atan => "atan",
3416 Mf::Asinh => "asinh",
3417 Mf::Acosh => "acosh",
3418 Mf::Atanh => "atanh",
3419 Mf::Radians => "radians",
3420 Mf::Degrees => "degrees",
3421 // glsl doesn't have atan2 function
3422 // use two-argument variation of the atan function
3423 Mf::Atan2 => "atan",
3424 // decomposition
3425 Mf::Ceil => "ceil",
3426 Mf::Floor => "floor",
3427 Mf::Round => "roundEven",
3428 Mf::Fract => "fract",
3429 Mf::Trunc => "trunc",
3430 Mf::Modf => MODF_FUNCTION,
3431 Mf::Frexp => FREXP_FUNCTION,
3432 Mf::Ldexp => "ldexp",
3433 // exponent
3434 Mf::Exp => "exp",
3435 Mf::Exp2 => "exp2",
3436 Mf::Log => "log",
3437 Mf::Log2 => "log2",
3438 Mf::Pow => "pow",
3439 // geometry
3440 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3441 TypeInner::Vector {
3442 scalar:
3443 crate::Scalar {
3444 kind: crate::ScalarKind::Float,
3445 ..
3446 },
3447 ..
3448 } => "dot",
3449 TypeInner::Vector { size, .. } => {
3450 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3451 }
3452 _ => unreachable!(
3453 "Correct TypeInner for dot product should be already validated"
3454 ),
3455 },
3456 Mf::Outer => "outerProduct",
3457 Mf::Cross => "cross",
3458 Mf::Distance => "distance",
3459 Mf::Length => "length",
3460 Mf::Normalize => "normalize",
3461 Mf::FaceForward => "faceforward",
3462 Mf::Reflect => "reflect",
3463 Mf::Refract => "refract",
3464 // computational
3465 Mf::Sign => "sign",
3466 Mf::Fma => {
3467 if self.options.version.supports_fma_function() {
3468 // Use the fma function when available
3469 "fma"
3470 } else {
3471 // No fma support. Transform the function call into an arithmetic expression
3472 write!(self.out, "(")?;
3473
3474 self.write_expr(arg, ctx)?;
3475 write!(self.out, " * ")?;
3476
3477 let arg1 =
3478 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3479 self.write_expr(arg1, ctx)?;
3480 write!(self.out, " + ")?;
3481
3482 let arg2 =
3483 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3484 self.write_expr(arg2, ctx)?;
3485 write!(self.out, ")")?;
3486
3487 return Ok(());
3488 }
3489 }
3490 Mf::Mix => "mix",
3491 Mf::Step => "step",
3492 Mf::SmoothStep => "smoothstep",
3493 Mf::Sqrt => "sqrt",
3494 Mf::InverseSqrt => "inversesqrt",
3495 Mf::Inverse => "inverse",
3496 Mf::Transpose => "transpose",
3497 Mf::Determinant => "determinant",
3498 // bits
3499 Mf::CountTrailingZeros => {
3500 match *ctx.resolve_type(arg, &self.module.types) {
3501 TypeInner::Vector { size, scalar, .. } => {
3502 let s = back::vector_size_str(size);
3503 if let crate::ScalarKind::Uint = scalar.kind {
3504 write!(self.out, "min(uvec{s}(findLSB(")?;
3505 self.write_expr(arg, ctx)?;
3506 write!(self.out, ")), uvec{s}(32u))")?;
3507 } else {
3508 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3509 self.write_expr(arg, ctx)?;
3510 write!(self.out, ")), uvec{s}(32u)))")?;
3511 }
3512 }
3513 TypeInner::Scalar(scalar) => {
3514 if let crate::ScalarKind::Uint = scalar.kind {
3515 write!(self.out, "min(uint(findLSB(")?;
3516 self.write_expr(arg, ctx)?;
3517 write!(self.out, ")), 32u)")?;
3518 } else {
3519 write!(self.out, "int(min(uint(findLSB(")?;
3520 self.write_expr(arg, ctx)?;
3521 write!(self.out, ")), 32u))")?;
3522 }
3523 }
3524 _ => unreachable!(),
3525 };
3526 return Ok(());
3527 }
3528 Mf::CountLeadingZeros => {
3529 if self.options.version.supports_integer_functions() {
3530 match *ctx.resolve_type(arg, &self.module.types) {
3531 TypeInner::Vector { size, scalar } => {
3532 let s = back::vector_size_str(size);
3533
3534 if let crate::ScalarKind::Uint = scalar.kind {
3535 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3536 self.write_expr(arg, ctx)?;
3537 write!(self.out, "))")?;
3538 } else {
3539 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3540 self.write_expr(arg, ctx)?;
3541 write!(self.out, "), ivec{s}(0), lessThan(")?;
3542 self.write_expr(arg, ctx)?;
3543 write!(self.out, ", ivec{s}(0)))")?;
3544 }
3545 }
3546 TypeInner::Scalar(scalar) => {
3547 if let crate::ScalarKind::Uint = scalar.kind {
3548 write!(self.out, "uint(31 - findMSB(")?;
3549 } else {
3550 write!(self.out, "(")?;
3551 self.write_expr(arg, ctx)?;
3552 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3553 }
3554
3555 self.write_expr(arg, ctx)?;
3556 write!(self.out, "))")?;
3557 }
3558 _ => unreachable!(),
3559 };
3560 } else {
3561 match *ctx.resolve_type(arg, &self.module.types) {
3562 TypeInner::Vector { size, scalar } => {
3563 let s = back::vector_size_str(size);
3564
3565 if let crate::ScalarKind::Uint = scalar.kind {
3566 write!(self.out, "uvec{s}(")?;
3567 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3568 self.write_expr(arg, ctx)?;
3569 write!(self.out, ") + 0.5)))")?;
3570 } else {
3571 write!(self.out, "ivec{s}(")?;
3572 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3573 self.write_expr(arg, ctx)?;
3574 write!(self.out, ") + 0.5)), ")?;
3575 write!(self.out, "vec{s}(0.0), lessThan(")?;
3576 self.write_expr(arg, ctx)?;
3577 write!(self.out, ", ivec{s}(0u))))")?;
3578 }
3579 }
3580 TypeInner::Scalar(scalar) => {
3581 if let crate::ScalarKind::Uint = scalar.kind {
3582 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3583 self.write_expr(arg, ctx)?;
3584 write!(self.out, ") + 0.5)))")?;
3585 } else {
3586 write!(self.out, "(")?;
3587 self.write_expr(arg, ctx)?;
3588 write!(self.out, " < 0 ? 0 : int(")?;
3589 write!(self.out, "31.0 - floor(log2(float(")?;
3590 self.write_expr(arg, ctx)?;
3591 write!(self.out, ") + 0.5))))")?;
3592 }
3593 }
3594 _ => unreachable!(),
3595 };
3596 }
3597
3598 return Ok(());
3599 }
3600 Mf::CountOneBits => "bitCount",
3601 Mf::ReverseBits => "bitfieldReverse",
3602 Mf::ExtractBits => {
3603 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3604 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3605 // will return out-of-spec values if the extracted range is not within the bit width.
3606 //
3607 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3608 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3609 //
3610 // w = sizeof(x) * 8
3611 // o = min(offset, w)
3612 // c = min(count, w - o)
3613 //
3614 // bitfieldExtract(x, o, c)
3615 //
3616 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3617 let scalar_bits = ctx
3618 .resolve_type(arg, &self.module.types)
3619 .scalar_width()
3620 .unwrap()
3621 * 8;
3622
3623 write!(self.out, "bitfieldExtract(")?;
3624 self.write_expr(arg, ctx)?;
3625 write!(self.out, ", int(min(")?;
3626 self.write_expr(arg1.unwrap(), ctx)?;
3627 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3628 self.write_expr(arg2.unwrap(), ctx)?;
3629 write!(self.out, ", {scalar_bits}u - min(")?;
3630 self.write_expr(arg1.unwrap(), ctx)?;
3631 write!(self.out, ", {scalar_bits}u))))")?;
3632
3633 return Ok(());
3634 }
3635 Mf::InsertBits => {
3636 // InsertBits has the same considerations as ExtractBits above
3637 let scalar_bits = ctx
3638 .resolve_type(arg, &self.module.types)
3639 .scalar_width()
3640 .unwrap()
3641 * 8;
3642
3643 write!(self.out, "bitfieldInsert(")?;
3644 self.write_expr(arg, ctx)?;
3645 write!(self.out, ", ")?;
3646 self.write_expr(arg1.unwrap(), ctx)?;
3647 write!(self.out, ", int(min(")?;
3648 self.write_expr(arg2.unwrap(), ctx)?;
3649 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3650 self.write_expr(arg3.unwrap(), ctx)?;
3651 write!(self.out, ", {scalar_bits}u - min(")?;
3652 self.write_expr(arg2.unwrap(), ctx)?;
3653 write!(self.out, ", {scalar_bits}u))))")?;
3654
3655 return Ok(());
3656 }
3657 Mf::FirstTrailingBit => "findLSB",
3658 Mf::FirstLeadingBit => "findMSB",
3659 // data packing
3660 Mf::Pack4x8snorm => "packSnorm4x8",
3661 Mf::Pack4x8unorm => "packUnorm4x8",
3662 Mf::Pack2x16snorm => "packSnorm2x16",
3663 Mf::Pack2x16unorm => "packUnorm2x16",
3664 Mf::Pack2x16float => "packHalf2x16",
3665 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8) => {
3666 let was_signed = match fun {
3667 Mf::Pack4xI8 => true,
3668 Mf::Pack4xU8 => false,
3669 _ => unreachable!(),
3670 };
3671 let const_suffix = if was_signed { "" } else { "u" };
3672 if was_signed {
3673 write!(self.out, "uint(")?;
3674 }
3675 write!(self.out, "(")?;
3676 self.write_expr(arg, ctx)?;
3677 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3678 self.write_expr(arg, ctx)?;
3679 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3680 self.write_expr(arg, ctx)?;
3681 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3682 self.write_expr(arg, ctx)?;
3683 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3684 if was_signed {
3685 write!(self.out, ")")?;
3686 }
3687
3688 return Ok(());
3689 }
3690 // data unpacking
3691 Mf::Unpack4x8snorm => "unpackSnorm4x8",
3692 Mf::Unpack4x8unorm => "unpackUnorm4x8",
3693 Mf::Unpack2x16snorm => "unpackSnorm2x16",
3694 Mf::Unpack2x16unorm => "unpackUnorm2x16",
3695 Mf::Unpack2x16float => "unpackHalf2x16",
3696 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3697 let sign_prefix = match fun {
3698 Mf::Unpack4xI8 => 'i',
3699 Mf::Unpack4xU8 => 'u',
3700 _ => unreachable!(),
3701 };
3702 write!(self.out, "{sign_prefix}vec4(")?;
3703 for i in 0..4 {
3704 write!(self.out, "bitfieldExtract(")?;
3705 // Since bitfieldExtract only sign extends if the value is signed, this
3706 // cast is needed
3707 match fun {
3708 Mf::Unpack4xI8 => {
3709 write!(self.out, "int(")?;
3710 self.write_expr(arg, ctx)?;
3711 write!(self.out, ")")?;
3712 }
3713 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3714 _ => unreachable!(),
3715 };
3716 write!(self.out, ", {}, 8)", i * 8)?;
3717 if i != 3 {
3718 write!(self.out, ", ")?;
3719 }
3720 }
3721 write!(self.out, ")")?;
3722
3723 return Ok(());
3724 }
3725 };
3726
3727 let extract_bits = fun == Mf::ExtractBits;
3728 let insert_bits = fun == Mf::InsertBits;
3729
3730 // Some GLSL functions always return signed integers (like findMSB),
3731 // so they need to be cast to uint if the argument is also an uint.
3732 let ret_might_need_int_to_uint = matches!(
3733 fun,
3734 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3735 );
3736
3737 // Some GLSL functions only accept signed integers (like abs),
3738 // so they need their argument cast from uint to int.
3739 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3740
3741 // Check if the argument is an unsigned integer and return the vector size
3742 // in case it's a vector
3743 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3744 TypeInner::Scalar(crate::Scalar {
3745 kind: crate::ScalarKind::Uint,
3746 ..
3747 }) => Some(None),
3748 TypeInner::Vector {
3749 scalar:
3750 crate::Scalar {
3751 kind: crate::ScalarKind::Uint,
3752 ..
3753 },
3754 size,
3755 } => Some(Some(size)),
3756 _ => None,
3757 };
3758
3759 // Cast to uint if the function needs it
3760 if ret_might_need_int_to_uint {
3761 if let Some(maybe_size) = maybe_uint_size {
3762 match maybe_size {
3763 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3764 None => write!(self.out, "uint(")?,
3765 }
3766 }
3767 }
3768
3769 write!(self.out, "{fun_name}(")?;
3770
3771 // Cast to int if the function needs it
3772 if arg_might_need_uint_to_int {
3773 if let Some(maybe_size) = maybe_uint_size {
3774 match maybe_size {
3775 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3776 None => write!(self.out, "int(")?,
3777 }
3778 }
3779 }
3780
3781 self.write_expr(arg, ctx)?;
3782
3783 // Close the cast from uint to int
3784 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3785 write!(self.out, ")")?
3786 }
3787
3788 if let Some(arg) = arg1 {
3789 write!(self.out, ", ")?;
3790 if extract_bits {
3791 write!(self.out, "int(")?;
3792 self.write_expr(arg, ctx)?;
3793 write!(self.out, ")")?;
3794 } else {
3795 self.write_expr(arg, ctx)?;
3796 }
3797 }
3798 if let Some(arg) = arg2 {
3799 write!(self.out, ", ")?;
3800 if extract_bits || insert_bits {
3801 write!(self.out, "int(")?;
3802 self.write_expr(arg, ctx)?;
3803 write!(self.out, ")")?;
3804 } else {
3805 self.write_expr(arg, ctx)?;
3806 }
3807 }
3808 if let Some(arg) = arg3 {
3809 write!(self.out, ", ")?;
3810 if insert_bits {
3811 write!(self.out, "int(")?;
3812 self.write_expr(arg, ctx)?;
3813 write!(self.out, ")")?;
3814 } else {
3815 self.write_expr(arg, ctx)?;
3816 }
3817 }
3818 write!(self.out, ")")?;
3819
3820 // Close the cast from int to uint
3821 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3822 write!(self.out, ")")?
3823 }
3824 }
3825 // `As` is always a call.
3826 // If `convert` is true the function name is the type
3827 // Else the function name is one of the glsl provided bitcast functions
3828 Expression::As {
3829 expr,
3830 kind: target_kind,
3831 convert,
3832 } => {
3833 let inner = ctx.resolve_type(expr, &self.module.types);
3834 match convert {
3835 Some(width) => {
3836 // this is similar to `write_type`, but with the target kind
3837 let scalar = glsl_scalar(crate::Scalar {
3838 kind: target_kind,
3839 width,
3840 })?;
3841 match *inner {
3842 TypeInner::Matrix { columns, rows, .. } => write!(
3843 self.out,
3844 "{}mat{}x{}",
3845 scalar.prefix, columns as u8, rows as u8
3846 )?,
3847 TypeInner::Vector { size, .. } => {
3848 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3849 }
3850 _ => write!(self.out, "{}", scalar.full)?,
3851 }
3852
3853 write!(self.out, "(")?;
3854 self.write_expr(expr, ctx)?;
3855 write!(self.out, ")")?
3856 }
3857 None => {
3858 use crate::ScalarKind as Sk;
3859
3860 let target_vector_type = match *inner {
3861 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3862 size,
3863 scalar: crate::Scalar {
3864 kind: target_kind,
3865 width: scalar.width,
3866 },
3867 }),
3868 _ => None,
3869 };
3870
3871 let source_kind = inner.scalar_kind().unwrap();
3872
3873 match (source_kind, target_kind, target_vector_type) {
3874 // No conversion needed
3875 (Sk::Sint, Sk::Sint, _)
3876 | (Sk::Uint, Sk::Uint, _)
3877 | (Sk::Float, Sk::Float, _)
3878 | (Sk::Bool, Sk::Bool, _) => {
3879 self.write_expr(expr, ctx)?;
3880 return Ok(());
3881 }
3882
3883 // Cast to/from floats
3884 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3885 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3886 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3887 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3888
3889 // Cast between vector types
3890 (_, _, Some(vector)) => {
3891 self.write_value_type(&vector)?;
3892 }
3893
3894 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3895 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3896 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3897 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3898 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3899 write!(self.out, "bool")?
3900 }
3901
3902 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3903 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3904 };
3905
3906 write!(self.out, "(")?;
3907 self.write_expr(expr, ctx)?;
3908 write!(self.out, ")")?;
3909 }
3910 }
3911 }
3912 // These expressions never show up in `Emit`.
3913 Expression::CallResult(_)
3914 | Expression::AtomicResult { .. }
3915 | Expression::RayQueryProceedResult
3916 | Expression::WorkGroupUniformLoadResult { .. }
3917 | Expression::SubgroupOperationResult { .. }
3918 | Expression::SubgroupBallotResult => unreachable!(),
3919 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3920 Expression::ArrayLength(expr) => {
3921 write!(self.out, "uint(")?;
3922 self.write_expr(expr, ctx)?;
3923 write!(self.out, ".length())")?
3924 }
3925 // not supported yet
3926 Expression::RayQueryGetIntersection { .. } => unreachable!(),
3927 }
3928
3929 Ok(())
3930 }
3931
3932 /// Helper function to write the local holding the clamped lod
3933 fn write_clamped_lod(
3934 &mut self,
3935 ctx: &back::FunctionCtx,
3936 expr: Handle<crate::Expression>,
3937 image: Handle<crate::Expression>,
3938 level_expr: Handle<crate::Expression>,
3939 ) -> Result<(), Error> {
3940 // Define our local and start a call to `clamp`
3941 write!(
3942 self.out,
3943 "int {}{} = clamp(",
3944 Baked(expr),
3945 CLAMPED_LOD_SUFFIX
3946 )?;
3947 // Write the lod that will be clamped
3948 self.write_expr(level_expr, ctx)?;
3949 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3950 // the maximum value
3951 write!(self.out, ", 0, textureQueryLevels(")?;
3952 // Write the target image as an argument to `textureQueryLevels`
3953 self.write_expr(image, ctx)?;
3954 // Close the call to `textureQueryLevels` subtract 1 from it since
3955 // the lod argument is 0 based, close the `clamp` call and end the
3956 // local declaration statement.
3957 writeln!(self.out, ") - 1);")?;
3958
3959 Ok(())
3960 }
3961
3962 // Helper method used to retrieve how many elements a coordinate vector
3963 // for the images operations need.
3964 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3965 // openGL es doesn't have 1D images so we need workaround it
3966 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3967 // Get how many components the coordinate vector needs for the dimensions only
3968 let tex_coord_size = match dim {
3969 crate::ImageDimension::D1 => 1,
3970 crate::ImageDimension::D2 => 2,
3971 crate::ImageDimension::D3 => 3,
3972 crate::ImageDimension::Cube => 2,
3973 };
3974 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3975 // and another 1 if we need to workaround 1D images by making them 2D
3976 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3977 }
3978
3979 /// Helper method to write the coordinate vector for image operations
3980 fn write_texture_coord(
3981 &mut self,
3982 ctx: &back::FunctionCtx,
3983 vector_size: u8,
3984 coordinate: Handle<crate::Expression>,
3985 array_index: Option<Handle<crate::Expression>>,
3986 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3987 tex_1d_hack: bool,
3988 ) -> Result<(), Error> {
3989 match array_index {
3990 // If the image needs an array indice we need to add it to the end of our
3991 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3992 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3993 // is important for 1D arrayed images).
3994 Some(layer_expr) => {
3995 write!(self.out, "ivec{vector_size}(")?;
3996 self.write_expr(coordinate, ctx)?;
3997 write!(self.out, ", ")?;
3998 // If we are replacing sampler1D with sampler2D we also need
3999 // to add another zero to the coordinates vector for the y component
4000 if tex_1d_hack {
4001 write!(self.out, "0, ")?;
4002 }
4003 self.write_expr(layer_expr, ctx)?;
4004 write!(self.out, ")")?;
4005 }
4006 // Otherwise write just the expression (and the 1D hack if needed)
4007 None => {
4008 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
4009 TypeInner::Scalar(crate::Scalar {
4010 kind: crate::ScalarKind::Uint,
4011 ..
4012 }) => Some(None),
4013 TypeInner::Vector {
4014 size,
4015 scalar:
4016 crate::Scalar {
4017 kind: crate::ScalarKind::Uint,
4018 ..
4019 },
4020 } => Some(Some(size as u32)),
4021 _ => None,
4022 };
4023 if tex_1d_hack {
4024 write!(self.out, "ivec2(")?;
4025 } else if uvec_size.is_some() {
4026 match uvec_size {
4027 Some(None) => write!(self.out, "int(")?,
4028 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
4029 _ => {}
4030 }
4031 }
4032 self.write_expr(coordinate, ctx)?;
4033 if tex_1d_hack {
4034 write!(self.out, ", 0)")?;
4035 } else if uvec_size.is_some() {
4036 write!(self.out, ")")?;
4037 }
4038 }
4039 }
4040
4041 Ok(())
4042 }
4043
4044 /// Helper method to write the `ImageStore` statement
4045 fn write_image_store(
4046 &mut self,
4047 ctx: &back::FunctionCtx,
4048 image: Handle<crate::Expression>,
4049 coordinate: Handle<crate::Expression>,
4050 array_index: Option<Handle<crate::Expression>>,
4051 value: Handle<crate::Expression>,
4052 ) -> Result<(), Error> {
4053 use crate::ImageDimension as IDim;
4054
4055 // NOTE: openGL requires that `imageStore`s have no effets when the texel is invalid
4056 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4057
4058 // This will only panic if the module is invalid
4059 let dim = match *ctx.resolve_type(image, &self.module.types) {
4060 TypeInner::Image { dim, .. } => dim,
4061 _ => unreachable!(),
4062 };
4063
4064 // Begin our call to `imageStore`
4065 write!(self.out, "imageStore(")?;
4066 self.write_expr(image, ctx)?;
4067 // Separate the image argument from the coordinates
4068 write!(self.out, ", ")?;
4069
4070 // openGL es doesn't have 1D images so we need workaround it
4071 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4072 // Write the coordinate vector
4073 self.write_texture_coord(
4074 ctx,
4075 // Get the size of the coordinate vector
4076 self.get_coordinate_vector_size(dim, array_index.is_some()),
4077 coordinate,
4078 array_index,
4079 tex_1d_hack,
4080 )?;
4081
4082 // Separate the coordinate from the value to write and write the expression
4083 // of the value to write.
4084 write!(self.out, ", ")?;
4085 self.write_expr(value, ctx)?;
4086 // End the call to `imageStore` and the statement.
4087 writeln!(self.out, ");")?;
4088
4089 Ok(())
4090 }
4091
4092 /// Helper method for writing an `ImageLoad` expression.
4093 #[allow(clippy::too_many_arguments)]
4094 fn write_image_load(
4095 &mut self,
4096 handle: Handle<crate::Expression>,
4097 ctx: &back::FunctionCtx,
4098 image: Handle<crate::Expression>,
4099 coordinate: Handle<crate::Expression>,
4100 array_index: Option<Handle<crate::Expression>>,
4101 sample: Option<Handle<crate::Expression>>,
4102 level: Option<Handle<crate::Expression>>,
4103 ) -> Result<(), Error> {
4104 use crate::ImageDimension as IDim;
4105
4106 // `ImageLoad` is a bit complicated.
4107 // There are two functions one for sampled
4108 // images another for storage images, the former uses `texelFetch` and the
4109 // latter uses `imageLoad`.
4110 //
4111 // Furthermore we have `level` which is always `Some` for sampled images
4112 // and `None` for storage images, so we end up with two functions:
4113 // - `texelFetch(image, coordinate, level)` for sampled images
4114 // - `imageLoad(image, coordinate)` for storage images
4115 //
4116 // Finally we also have to consider bounds checking, for storage images
4117 // this is easy since openGL requires that invalid texels always return
4118 // 0, for sampled images we need to either verify that all arguments are
4119 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4120
4121 // This will only panic if the module is invalid
4122 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4123 TypeInner::Image {
4124 dim,
4125 arrayed: _,
4126 class,
4127 } => (dim, class),
4128 _ => unreachable!(),
4129 };
4130
4131 // Get the name of the function to be used for the load operation
4132 // and the policy to be used with it.
4133 let (fun_name, policy) = match class {
4134 // Sampled images inherit the policy from the user passed policies
4135 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4136 crate::ImageClass::Storage { .. } => {
4137 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4138 // "Invalid image loads will return a vector where the value of R, G, and B components
4139 // is 0 and the value of the A component is undefined."
4140 //
4141 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4142 // "Invalid image loads will return zero."
4143 //
4144 // So, we only inject bounds checks for ES
4145 let policy = if self.options.version.is_es() {
4146 self.policies.image_load
4147 } else {
4148 proc::BoundsCheckPolicy::Unchecked
4149 };
4150 ("imageLoad", policy)
4151 }
4152 // TODO: Is there even a function for this?
4153 crate::ImageClass::Depth { multi: _ } => {
4154 return Err(Error::Custom(
4155 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4156 ))
4157 }
4158 };
4159
4160 // openGL es doesn't have 1D images so we need workaround it
4161 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4162 // Get the size of the coordinate vector
4163 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4164
4165 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4166 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4167 // ternary operator since we are in the middle of an expression and
4168 // need to return a value.
4169 //
4170 // NOTE: glsl does short circuit when evaluating logical
4171 // expressions so we can be sure that after we test a
4172 // condition it will be true for the next ones
4173
4174 // Write parentheses around the ternary operator to prevent problems with
4175 // expressions emitted before or after it having more precedence
4176 write!(self.out, "(",)?;
4177
4178 // The lod check needs to precede the size check since we need
4179 // to use the lod to get the size of the image at that level.
4180 if let Some(level_expr) = level {
4181 self.write_expr(level_expr, ctx)?;
4182 write!(self.out, " < textureQueryLevels(",)?;
4183 self.write_expr(image, ctx)?;
4184 // Chain the next check
4185 write!(self.out, ") && ")?;
4186 }
4187
4188 // Check that the sample arguments doesn't exceed the number of samples
4189 if let Some(sample_expr) = sample {
4190 self.write_expr(sample_expr, ctx)?;
4191 write!(self.out, " < textureSamples(",)?;
4192 self.write_expr(image, ctx)?;
4193 // Chain the next check
4194 write!(self.out, ") && ")?;
4195 }
4196
4197 // We now need to write the size checks for the coordinates and array index
4198 // first we write the comparison function in case the image is 1D non arrayed
4199 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4200 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4201 // need to use the `lessThan` function but it returns a vector of booleans (one
4202 // for each comparison) so we need to fold it all in one scalar boolean, since
4203 // we want all comparisons to pass we use the `all` function which will only
4204 // return `true` if all the elements of the boolean vector are also `true`.
4205 //
4206 // So we'll end with one of the following forms
4207 // - `coord < textureSize(image, lod)` for 1D images
4208 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4209 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4210 // for arrayed images
4211 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4212
4213 if vector_size != 1 {
4214 write!(self.out, "all(lessThan(")?;
4215 }
4216
4217 // Write the coordinate vector
4218 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4219
4220 if vector_size != 1 {
4221 // If we used the `lessThan` function we need to separate the
4222 // coordinates from the image size.
4223 write!(self.out, ", ")?;
4224 } else {
4225 // If we didn't use it (ie. 1D images) we perform the comparison
4226 // using the less than operator.
4227 write!(self.out, " < ")?;
4228 }
4229
4230 // Call `textureSize` to get our image size
4231 write!(self.out, "textureSize(")?;
4232 self.write_expr(image, ctx)?;
4233 // `textureSize` uses the lod as a second argument for mipmapped images
4234 if let Some(level_expr) = level {
4235 // Separate the image from the lod
4236 write!(self.out, ", ")?;
4237 self.write_expr(level_expr, ctx)?;
4238 }
4239 // Close the `textureSize` call
4240 write!(self.out, ")")?;
4241
4242 if vector_size != 1 {
4243 // Close the `all` and `lessThan` calls
4244 write!(self.out, "))")?;
4245 }
4246
4247 // Finally end the condition part of the ternary operator
4248 write!(self.out, " ? ")?;
4249 }
4250
4251 // Begin the call to the function used to load the texel
4252 write!(self.out, "{fun_name}(")?;
4253 self.write_expr(image, ctx)?;
4254 write!(self.out, ", ")?;
4255
4256 // If we are using `Restrict` bounds checking we need to pass valid texel
4257 // coordinates, to do so we use the `clamp` function to get a value between
4258 // 0 and the image size - 1 (indexing begins at 0)
4259 if let proc::BoundsCheckPolicy::Restrict = policy {
4260 write!(self.out, "clamp(")?;
4261 }
4262
4263 // Write the coordinate vector
4264 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4265
4266 // If we are using `Restrict` bounds checking we need to write the rest of the
4267 // clamp we initiated before writing the coordinates.
4268 if let proc::BoundsCheckPolicy::Restrict = policy {
4269 // Write the min value 0
4270 if vector_size == 1 {
4271 write!(self.out, ", 0")?;
4272 } else {
4273 write!(self.out, ", ivec{vector_size}(0)")?;
4274 }
4275 // Start the `textureSize` call to use as the max value.
4276 write!(self.out, ", textureSize(")?;
4277 self.write_expr(image, ctx)?;
4278 // If the image is mipmapped we need to add the lod argument to the
4279 // `textureSize` call, but this needs to be the clamped lod, this should
4280 // have been generated earlier and put in a local.
4281 if class.is_mipmapped() {
4282 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4283 }
4284 // Close the `textureSize` call
4285 write!(self.out, ")")?;
4286
4287 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4288 if vector_size == 1 {
4289 write!(self.out, " - 1")?;
4290 } else {
4291 write!(self.out, " - ivec{vector_size}(1)")?;
4292 }
4293
4294 // Close the `clamp` call
4295 write!(self.out, ")")?;
4296
4297 // Add the clamped lod (if present) as the second argument to the
4298 // image load function.
4299 if level.is_some() {
4300 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4301 }
4302
4303 // If a sample argument is needed we need to clamp it between 0 and
4304 // the number of samples the image has.
4305 if let Some(sample_expr) = sample {
4306 write!(self.out, ", clamp(")?;
4307 self.write_expr(sample_expr, ctx)?;
4308 // Set the min value to 0 and start the call to `textureSamples`
4309 write!(self.out, ", 0, textureSamples(")?;
4310 self.write_expr(image, ctx)?;
4311 // Close the `textureSamples` call, subtract 1 from it since the sample
4312 // argument is zero based, and close the `clamp` call
4313 writeln!(self.out, ") - 1)")?;
4314 }
4315 } else if let Some(sample_or_level) = sample.or(level) {
4316 // If no bounds checking is need just add the sample or level argument
4317 // after the coordinates
4318 write!(self.out, ", ")?;
4319 self.write_expr(sample_or_level, ctx)?;
4320 }
4321
4322 // Close the image load function.
4323 write!(self.out, ")")?;
4324
4325 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4326 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4327 // second branch which is just a 0 value.
4328 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4329 // Get the kind of the output value.
4330 let kind = match class {
4331 // Only sampled images can reach here since storage images
4332 // don't need bounds checks and depth images aren't implemented
4333 crate::ImageClass::Sampled { kind, .. } => kind,
4334 _ => unreachable!(),
4335 };
4336
4337 // End the first branch
4338 write!(self.out, " : ")?;
4339 // Write the 0 value
4340 write!(
4341 self.out,
4342 "{}vec4(",
4343 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4344 )?;
4345 self.write_zero_init_scalar(kind)?;
4346 // Close the zero value constructor
4347 write!(self.out, ")")?;
4348 // Close the parentheses surrounding our ternary
4349 write!(self.out, ")")?;
4350 }
4351
4352 Ok(())
4353 }
4354
4355 fn write_named_expr(
4356 &mut self,
4357 handle: Handle<crate::Expression>,
4358 name: String,
4359 // The expression which is being named.
4360 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4361 named: Handle<crate::Expression>,
4362 ctx: &back::FunctionCtx,
4363 ) -> BackendResult {
4364 match ctx.info[named].ty {
4365 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4366 TypeInner::Struct { .. } => {
4367 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4368 write!(self.out, "{ty_name}")?;
4369 }
4370 _ => {
4371 self.write_type(ty_handle)?;
4372 }
4373 },
4374 proc::TypeResolution::Value(ref inner) => {
4375 self.write_value_type(inner)?;
4376 }
4377 }
4378
4379 let resolved = ctx.resolve_type(named, &self.module.types);
4380
4381 write!(self.out, " {name}")?;
4382 if let TypeInner::Array { base, size, .. } = *resolved {
4383 self.write_array_size(base, size)?;
4384 }
4385 write!(self.out, " = ")?;
4386 self.write_expr(handle, ctx)?;
4387 writeln!(self.out, ";")?;
4388 self.named_expressions.insert(named, name);
4389
4390 Ok(())
4391 }
4392
4393 /// Helper function that write string with default zero initialization for supported types
4394 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4395 let inner = &self.module.types[ty].inner;
4396 match *inner {
4397 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4398 self.write_zero_init_scalar(scalar.kind)?;
4399 }
4400 TypeInner::Vector { scalar, .. } => {
4401 self.write_value_type(inner)?;
4402 write!(self.out, "(")?;
4403 self.write_zero_init_scalar(scalar.kind)?;
4404 write!(self.out, ")")?;
4405 }
4406 TypeInner::Matrix { .. } => {
4407 self.write_value_type(inner)?;
4408 write!(self.out, "(")?;
4409 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4410 write!(self.out, ")")?;
4411 }
4412 TypeInner::Array { base, size, .. } => {
4413 let count = match size
4414 .to_indexable_length(self.module)
4415 .expect("Bad array size")
4416 {
4417 proc::IndexableLength::Known(count) => count,
4418 proc::IndexableLength::Dynamic => return Ok(()),
4419 };
4420 self.write_type(base)?;
4421 self.write_array_size(base, size)?;
4422 write!(self.out, "(")?;
4423 for _ in 1..count {
4424 self.write_zero_init_value(base)?;
4425 write!(self.out, ", ")?;
4426 }
4427 // write last parameter without comma and space
4428 self.write_zero_init_value(base)?;
4429 write!(self.out, ")")?;
4430 }
4431 TypeInner::Struct { ref members, .. } => {
4432 let name = &self.names[&NameKey::Type(ty)];
4433 write!(self.out, "{name}(")?;
4434 for (index, member) in members.iter().enumerate() {
4435 if index != 0 {
4436 write!(self.out, ", ")?;
4437 }
4438 self.write_zero_init_value(member.ty)?;
4439 }
4440 write!(self.out, ")")?;
4441 }
4442 _ => unreachable!(),
4443 }
4444
4445 Ok(())
4446 }
4447
4448 /// Helper function that write string with zero initialization for scalar
4449 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4450 match kind {
4451 crate::ScalarKind::Bool => write!(self.out, "false")?,
4452 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4453 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4454 crate::ScalarKind::Sint => write!(self.out, "0")?,
4455 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4456 return Err(Error::Custom(
4457 "Abstract types should not appear in IR presented to backends".to_string(),
4458 ))
4459 }
4460 }
4461
4462 Ok(())
4463 }
4464
4465 /// Issue a memory barrier. Please note that to ensure visibility,
4466 /// OpenGL always requires a call to the `barrier()` function after a `memoryBarrier*()`
4467 fn write_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4468 if flags.contains(crate::Barrier::STORAGE) {
4469 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4470 }
4471 if flags.contains(crate::Barrier::WORK_GROUP) {
4472 writeln!(self.out, "{level}memoryBarrierShared();")?;
4473 }
4474 if flags.contains(crate::Barrier::SUB_GROUP) {
4475 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4476 }
4477 writeln!(self.out, "{level}barrier();")?;
4478 Ok(())
4479 }
4480
4481 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4482 ///
4483 /// glsl allows adding both `readonly` and `writeonly` but this means that
4484 /// they can only be used to query information about the resource which isn't what
4485 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4486 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4487 if !storage_access.contains(crate::StorageAccess::STORE) {
4488 write!(self.out, "readonly ")?;
4489 }
4490 if !storage_access.contains(crate::StorageAccess::LOAD) {
4491 write!(self.out, "writeonly ")?;
4492 }
4493 Ok(())
4494 }
4495
4496 /// Helper method used to produce the reflection info that's returned to the user
4497 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4498 use std::collections::hash_map::Entry;
4499 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4500 let mut texture_mapping = crate::FastHashMap::default();
4501 let mut uniforms = crate::FastHashMap::default();
4502
4503 for sampling in info.sampling_set.iter() {
4504 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4505
4506 match texture_mapping.entry(tex_name) {
4507 Entry::Vacant(v) => {
4508 v.insert(TextureMapping {
4509 texture: sampling.image,
4510 sampler: Some(sampling.sampler),
4511 });
4512 }
4513 Entry::Occupied(e) => {
4514 if e.get().sampler != Some(sampling.sampler) {
4515 log::error!("Conflicting samplers for {}", e.key());
4516 return Err(Error::ImageMultipleSamplers);
4517 }
4518 }
4519 }
4520 }
4521
4522 let mut push_constant_info = None;
4523 for (handle, var) in self.module.global_variables.iter() {
4524 if info[handle].is_empty() {
4525 continue;
4526 }
4527 match self.module.types[var.ty].inner {
4528 TypeInner::Image { .. } => {
4529 let tex_name = self.reflection_names_globals[&handle].clone();
4530 match texture_mapping.entry(tex_name) {
4531 Entry::Vacant(v) => {
4532 v.insert(TextureMapping {
4533 texture: handle,
4534 sampler: None,
4535 });
4536 }
4537 Entry::Occupied(_) => {
4538 // already used with a sampler, do nothing
4539 }
4540 }
4541 }
4542 _ => match var.space {
4543 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4544 let name = self.reflection_names_globals[&handle].clone();
4545 uniforms.insert(handle, name);
4546 }
4547 crate::AddressSpace::PushConstant => {
4548 let name = self.reflection_names_globals[&handle].clone();
4549 push_constant_info = Some((name, var.ty));
4550 }
4551 _ => (),
4552 },
4553 }
4554 }
4555
4556 let mut push_constant_segments = Vec::new();
4557 let mut push_constant_items = vec![];
4558
4559 if let Some((name, ty)) = push_constant_info {
4560 // We don't have a layouter available to us, so we need to create one.
4561 //
4562 // This is potentially a bit wasteful, but the set of types in the program
4563 // shouldn't be too large.
4564 let mut layouter = proc::Layouter::default();
4565 layouter.update(self.module.to_ctx()).unwrap();
4566
4567 // We start with the name of the binding itself.
4568 push_constant_segments.push(name);
4569
4570 // We then recursively collect all the uniform fields of the push constant.
4571 self.collect_push_constant_items(
4572 ty,
4573 &mut push_constant_segments,
4574 &layouter,
4575 &mut 0,
4576 &mut push_constant_items,
4577 );
4578 }
4579
4580 Ok(ReflectionInfo {
4581 texture_mapping,
4582 uniforms,
4583 varying: mem::take(&mut self.varying),
4584 push_constant_items,
4585 })
4586 }
4587
4588 fn collect_push_constant_items(
4589 &mut self,
4590 ty: Handle<crate::Type>,
4591 segments: &mut Vec<String>,
4592 layouter: &proc::Layouter,
4593 offset: &mut u32,
4594 items: &mut Vec<PushConstantItem>,
4595 ) {
4596 // At this point in the recursion, `segments` contains the path
4597 // needed to access `ty` from the root.
4598
4599 let layout = &layouter[ty];
4600 *offset = layout.alignment.round_up(*offset);
4601 match self.module.types[ty].inner {
4602 // All these types map directly to GL uniforms.
4603 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4604 // Build the full name, by combining all current segments.
4605 let name: String = segments.iter().map(String::as_str).collect();
4606 items.push(PushConstantItem {
4607 access_path: name,
4608 offset: *offset,
4609 ty,
4610 });
4611 *offset += layout.size;
4612 }
4613 // Arrays are recursed into.
4614 TypeInner::Array { base, size, .. } => {
4615 let crate::ArraySize::Constant(count) = size else {
4616 unreachable!("Cannot have dynamic arrays in push constants");
4617 };
4618
4619 for i in 0..count.get() {
4620 // Add the array accessor and recurse.
4621 segments.push(format!("[{i}]"));
4622 self.collect_push_constant_items(base, segments, layouter, offset, items);
4623 segments.pop();
4624 }
4625
4626 // Ensure the stride is kept by rounding up to the alignment.
4627 *offset = layout.alignment.round_up(*offset)
4628 }
4629 TypeInner::Struct { ref members, .. } => {
4630 for (index, member) in members.iter().enumerate() {
4631 // Add struct accessor and recurse.
4632 segments.push(format!(
4633 ".{}",
4634 self.names[&NameKey::StructMember(ty, index as u32)]
4635 ));
4636 self.collect_push_constant_items(member.ty, segments, layouter, offset, items);
4637 segments.pop();
4638 }
4639
4640 // Ensure ending padding is kept by rounding up to the alignment.
4641 *offset = layout.alignment.round_up(*offset)
4642 }
4643 _ => unreachable!(),
4644 }
4645 }
4646}
4647
4648/// Structure returned by [`glsl_scalar`]
4649///
4650/// It contains both a prefix used in other types and the full type name
4651struct ScalarString<'a> {
4652 /// The prefix used to compose other types
4653 prefix: &'a str,
4654 /// The name of the scalar type
4655 full: &'a str,
4656}
4657
4658/// Helper function that returns scalar related strings
4659///
4660/// Check [`ScalarString`] for the information provided
4661///
4662/// # Errors
4663/// If a [`Float`](crate::ScalarKind::Float) with an width that isn't 4 or 8
4664const fn glsl_scalar(scalar: crate::Scalar) -> Result<ScalarString<'static>, Error> {
4665 use crate::ScalarKind as Sk;
4666
4667 Ok(match scalar.kind {
4668 Sk::Sint => ScalarString {
4669 prefix: "i",
4670 full: "int",
4671 },
4672 Sk::Uint => ScalarString {
4673 prefix: "u",
4674 full: "uint",
4675 },
4676 Sk::Float => match scalar.width {
4677 4 => ScalarString {
4678 prefix: "",
4679 full: "float",
4680 },
4681 8 => ScalarString {
4682 prefix: "d",
4683 full: "double",
4684 },
4685 _ => return Err(Error::UnsupportedScalar(scalar)),
4686 },
4687 Sk::Bool => ScalarString {
4688 prefix: "b",
4689 full: "bool",
4690 },
4691 Sk::AbstractInt | Sk::AbstractFloat => {
4692 return Err(Error::UnsupportedScalar(scalar));
4693 }
4694 })
4695}
4696
4697/// Helper function that returns the glsl variable name for a builtin
4698const fn glsl_built_in(built_in: crate::BuiltIn, options: VaryingOptions) -> &'static str {
4699 use crate::BuiltIn as Bi;
4700
4701 match built_in {
4702 Bi::Position { .. } => {
4703 if options.output {
4704 "gl_Position"
4705 } else {
4706 "gl_FragCoord"
4707 }
4708 }
4709 Bi::ViewIndex if options.targeting_webgl => "int(gl_ViewID_OVR)",
4710 Bi::ViewIndex => "gl_ViewIndex",
4711 // vertex
4712 Bi::BaseInstance => "uint(gl_BaseInstance)",
4713 Bi::BaseVertex => "uint(gl_BaseVertex)",
4714 Bi::ClipDistance => "gl_ClipDistance",
4715 Bi::CullDistance => "gl_CullDistance",
4716 Bi::InstanceIndex => {
4717 if options.draw_parameters {
4718 "(uint(gl_InstanceID) + uint(gl_BaseInstanceARB))"
4719 } else {
4720 // Must match FIRST_INSTANCE_BINDING
4721 "(uint(gl_InstanceID) + naga_vs_first_instance)"
4722 }
4723 }
4724 Bi::PointSize => "gl_PointSize",
4725 Bi::VertexIndex => "uint(gl_VertexID)",
4726 Bi::DrawID => "gl_DrawID",
4727 // fragment
4728 Bi::FragDepth => "gl_FragDepth",
4729 Bi::PointCoord => "gl_PointCoord",
4730 Bi::FrontFacing => "gl_FrontFacing",
4731 Bi::PrimitiveIndex => "uint(gl_PrimitiveID)",
4732 Bi::SampleIndex => "gl_SampleID",
4733 Bi::SampleMask => {
4734 if options.output {
4735 "gl_SampleMask"
4736 } else {
4737 "gl_SampleMaskIn"
4738 }
4739 }
4740 // compute
4741 Bi::GlobalInvocationId => "gl_GlobalInvocationID",
4742 Bi::LocalInvocationId => "gl_LocalInvocationID",
4743 Bi::LocalInvocationIndex => "gl_LocalInvocationIndex",
4744 Bi::WorkGroupId => "gl_WorkGroupID",
4745 Bi::WorkGroupSize => "gl_WorkGroupSize",
4746 Bi::NumWorkGroups => "gl_NumWorkGroups",
4747 // subgroup
4748 Bi::NumSubgroups => "gl_NumSubgroups",
4749 Bi::SubgroupId => "gl_SubgroupID",
4750 Bi::SubgroupSize => "gl_SubgroupSize",
4751 Bi::SubgroupInvocationId => "gl_SubgroupInvocationID",
4752 }
4753}
4754
4755/// Helper function that returns the string corresponding to the address space
4756const fn glsl_storage_qualifier(space: crate::AddressSpace) -> Option<&'static str> {
4757 use crate::AddressSpace as As;
4758
4759 match space {
4760 As::Function => None,
4761 As::Private => None,
4762 As::Storage { .. } => Some("buffer"),
4763 As::Uniform => Some("uniform"),
4764 As::Handle => Some("uniform"),
4765 As::WorkGroup => Some("shared"),
4766 As::PushConstant => Some("uniform"),
4767 }
4768}
4769
4770/// Helper function that returns the string corresponding to the glsl interpolation qualifier
4771const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str {
4772 use crate::Interpolation as I;
4773
4774 match interpolation {
4775 I::Perspective => "smooth",
4776 I::Linear => "noperspective",
4777 I::Flat => "flat",
4778 }
4779}
4780
4781/// Return the GLSL auxiliary qualifier for the given sampling value.
4782const fn glsl_sampling(sampling: crate::Sampling) -> BackendResult<Option<&'static str>> {
4783 use crate::Sampling as S;
4784
4785 Ok(match sampling {
4786 S::First => return Err(Error::FirstSamplingNotSupported),
4787 S::Center | S::Either => None,
4788 S::Centroid => Some("centroid"),
4789 S::Sample => Some("sample"),
4790 })
4791}
4792
4793/// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension)
4794const fn glsl_dimension(dim: crate::ImageDimension) -> &'static str {
4795 use crate::ImageDimension as IDim;
4796
4797 match dim {
4798 IDim::D1 => "1D",
4799 IDim::D2 => "2D",
4800 IDim::D3 => "3D",
4801 IDim::Cube => "Cube",
4802 }
4803}
4804
4805/// Helper function that returns the glsl storage format string of [`StorageFormat`](crate::StorageFormat)
4806fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Error> {
4807 use crate::StorageFormat as Sf;
4808
4809 Ok(match format {
4810 Sf::R8Unorm => "r8",
4811 Sf::R8Snorm => "r8_snorm",
4812 Sf::R8Uint => "r8ui",
4813 Sf::R8Sint => "r8i",
4814 Sf::R16Uint => "r16ui",
4815 Sf::R16Sint => "r16i",
4816 Sf::R16Float => "r16f",
4817 Sf::Rg8Unorm => "rg8",
4818 Sf::Rg8Snorm => "rg8_snorm",
4819 Sf::Rg8Uint => "rg8ui",
4820 Sf::Rg8Sint => "rg8i",
4821 Sf::R32Uint => "r32ui",
4822 Sf::R32Sint => "r32i",
4823 Sf::R32Float => "r32f",
4824 Sf::Rg16Uint => "rg16ui",
4825 Sf::Rg16Sint => "rg16i",
4826 Sf::Rg16Float => "rg16f",
4827 Sf::Rgba8Unorm => "rgba8",
4828 Sf::Rgba8Snorm => "rgba8_snorm",
4829 Sf::Rgba8Uint => "rgba8ui",
4830 Sf::Rgba8Sint => "rgba8i",
4831 Sf::Rgb10a2Uint => "rgb10_a2ui",
4832 Sf::Rgb10a2Unorm => "rgb10_a2",
4833 Sf::Rg11b10Ufloat => "r11f_g11f_b10f",
4834 Sf::Rg32Uint => "rg32ui",
4835 Sf::Rg32Sint => "rg32i",
4836 Sf::Rg32Float => "rg32f",
4837 Sf::Rgba16Uint => "rgba16ui",
4838 Sf::Rgba16Sint => "rgba16i",
4839 Sf::Rgba16Float => "rgba16f",
4840 Sf::Rgba32Uint => "rgba32ui",
4841 Sf::Rgba32Sint => "rgba32i",
4842 Sf::Rgba32Float => "rgba32f",
4843 Sf::R16Unorm => "r16",
4844 Sf::R16Snorm => "r16_snorm",
4845 Sf::Rg16Unorm => "rg16",
4846 Sf::Rg16Snorm => "rg16_snorm",
4847 Sf::Rgba16Unorm => "rgba16",
4848 Sf::Rgba16Snorm => "rgba16_snorm",
4849
4850 Sf::Bgra8Unorm => {
4851 return Err(Error::Custom(
4852 "Support format BGRA8 is not implemented".into(),
4853 ))
4854 }
4855 })
4856}
4857
4858fn is_value_init_supported(module: &crate::Module, ty: Handle<crate::Type>) -> bool {
4859 match module.types[ty].inner {
4860 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => true,
4861 TypeInner::Array { base, size, .. } => {
4862 size != crate::ArraySize::Dynamic && is_value_init_supported(module, base)
4863 }
4864 TypeInner::Struct { ref members, .. } => members
4865 .iter()
4866 .all(|member| is_value_init_supported(module, member.ty)),
4867 _ => false,
4868 }
4869}