naga/back/glsl/mod.rs
1/*!
2Backend for [GLSL][glsl] (OpenGL Shading Language).
3
4The main structure is [`Writer`], it maintains internal state that is used
5to output a [`Module`](crate::Module) into glsl
6
7# Supported versions
8### Core
9- 330
10- 400
11- 410
12- 420
13- 430
14- 450
15
16### ES
17- 300
18- 310
19
20[glsl]: https://www.khronos.org/registry/OpenGL/index_gl.php
21*/
22
23// GLSL is mostly a superset of C but it also removes some parts of it this is a list of relevant
24// aspects for this backend.
25//
26// The most notable change is the introduction of the version preprocessor directive that must
27// always be the first line of a glsl file and is written as
28// `#version number profile`
29// `number` is the version itself (i.e. 300) and `profile` is the
30// shader profile we only support "core" and "es", the former is used in desktop applications and
31// the later is used in embedded contexts, mobile devices and browsers. Each one as it's own
32// versions (at the time of writing this the latest version for "core" is 460 and for "es" is 320)
33//
34// Other important preprocessor addition is the extension directive which is written as
35// `#extension name: behaviour`
36// Extensions provide increased features in a plugin fashion but they aren't required to be
37// supported hence why they are called extensions, that's why `behaviour` is used it specifies
38// whether the extension is strictly required or if it should only be enabled if needed. In our case
39// when we use extensions we set behaviour to `require` always.
40//
41// The only thing that glsl removes that makes a difference are pointers.
42//
43// Additions that are relevant for the backend are the discard keyword, the introduction of
44// vector, matrices, samplers, image types and functions that provide common shader operations
45
46pub use features::Features;
47
48use crate::{
49 back::{self, Baked},
50 proc::{self, ExpressionKindTracker, NameKey},
51 valid, Handle, ShaderStage, TypeInner,
52};
53use features::FeaturesManager;
54use std::{
55 cmp::Ordering,
56 fmt::{self, Error as FmtError, Write},
57 mem,
58};
59use thiserror::Error;
60
61/// Contains the features related code and the features querying method
62mod features;
63/// Contains a constant with a slice of all the reserved keywords RESERVED_KEYWORDS
64mod keywords;
65
66/// List of supported `core` GLSL versions.
67pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460];
68/// List of supported `es` GLSL versions.
69pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
70
71/// The suffix of the variable that will hold the calculated clamped level
72/// of detail for bounds checking in `ImageLoad`
73const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod";
74
75pub(crate) const MODF_FUNCTION: &str = "naga_modf";
76pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
77
78// Must match code in glsl_built_in
79pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance";
80
81/// Mapping between resources and bindings.
82pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>;
83
84impl crate::AtomicFunction {
85 const fn to_glsl(self) -> &'static str {
86 match self {
87 Self::Add | Self::Subtract => "Add",
88 Self::And => "And",
89 Self::InclusiveOr => "Or",
90 Self::ExclusiveOr => "Xor",
91 Self::Min => "Min",
92 Self::Max => "Max",
93 Self::Exchange { compare: None } => "Exchange",
94 Self::Exchange { compare: Some(_) } => "", //TODO
95 }
96 }
97}
98
99impl crate::AddressSpace {
100 const fn is_buffer(&self) -> bool {
101 match *self {
102 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => true,
103 _ => false,
104 }
105 }
106
107 /// Whether a variable with this address space can be initialized
108 const fn initializable(&self) -> bool {
109 match *self {
110 crate::AddressSpace::Function | crate::AddressSpace::Private => true,
111 crate::AddressSpace::WorkGroup
112 | crate::AddressSpace::Uniform
113 | crate::AddressSpace::Storage { .. }
114 | crate::AddressSpace::Handle
115 | crate::AddressSpace::PushConstant => false,
116 }
117 }
118}
119
120/// A GLSL version.
121#[derive(Debug, Copy, Clone, PartialEq)]
122#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
123#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
124pub enum Version {
125 /// `core` GLSL.
126 Desktop(u16),
127 /// `es` GLSL.
128 Embedded { version: u16, is_webgl: bool },
129}
130
131impl Version {
132 /// Create a new gles version
133 pub const fn new_gles(version: u16) -> Self {
134 Self::Embedded {
135 version,
136 is_webgl: false,
137 }
138 }
139
140 /// Returns true if self is `Version::Embedded` (i.e. is a es version)
141 const fn is_es(&self) -> bool {
142 match *self {
143 Version::Desktop(_) => false,
144 Version::Embedded { .. } => true,
145 }
146 }
147
148 /// Returns true if targeting WebGL
149 const fn is_webgl(&self) -> bool {
150 match *self {
151 Version::Desktop(_) => false,
152 Version::Embedded { is_webgl, .. } => is_webgl,
153 }
154 }
155
156 /// Checks the list of currently supported versions and returns true if it contains the
157 /// specified version
158 ///
159 /// # Notes
160 /// As an invalid version number will never be added to the supported version list
161 /// so this also checks for version validity
162 fn is_supported(&self) -> bool {
163 match *self {
164 Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v),
165 Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v),
166 }
167 }
168
169 fn supports_io_locations(&self) -> bool {
170 *self >= Version::Desktop(330) || *self >= Version::new_gles(300)
171 }
172
173 /// Checks if the version supports all of the explicit layouts:
174 /// - `location=` qualifiers for bindings
175 /// - `binding=` qualifiers for resources
176 ///
177 /// Note: `location=` for vertex inputs and fragment outputs is supported
178 /// unconditionally for GLES 300.
179 fn supports_explicit_locations(&self) -> bool {
180 *self >= Version::Desktop(420) || *self >= Version::new_gles(310)
181 }
182
183 fn supports_early_depth_test(&self) -> bool {
184 *self >= Version::Desktop(130) || *self >= Version::new_gles(310)
185 }
186
187 fn supports_std430_layout(&self) -> bool {
188 *self >= Version::Desktop(430) || *self >= Version::new_gles(310)
189 }
190
191 fn supports_fma_function(&self) -> bool {
192 *self >= Version::Desktop(400) || *self >= Version::new_gles(320)
193 }
194
195 fn supports_integer_functions(&self) -> bool {
196 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
197 }
198
199 fn supports_frexp_function(&self) -> bool {
200 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
201 }
202
203 fn supports_derivative_control(&self) -> bool {
204 *self >= Version::Desktop(450)
205 }
206}
207
208impl PartialOrd for Version {
209 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
210 match (*self, *other) {
211 (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)),
212 (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => {
213 Some(x.cmp(&y))
214 }
215 _ => None,
216 }
217 }
218}
219
220impl fmt::Display for Version {
221 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
222 match *self {
223 Version::Desktop(v) => write!(f, "{v} core"),
224 Version::Embedded { version: v, .. } => write!(f, "{v} es"),
225 }
226 }
227}
228
229bitflags::bitflags! {
230 /// Configuration flags for the [`Writer`].
231 #[cfg_attr(feature = "serialize", derive(serde::Serialize))]
232 #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
233 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
234 pub struct WriterFlags: u32 {
235 /// Flip output Y and extend Z from (0, 1) to (-1, 1).
236 const ADJUST_COORDINATE_SPACE = 0x1;
237 /// Supports GL_EXT_texture_shadow_lod on the host, which provides
238 /// additional functions on shadows and arrays of shadows.
239 const TEXTURE_SHADOW_LOD = 0x2;
240 /// Supports ARB_shader_draw_parameters on the host, which provides
241 /// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, `gl_DrawIDARB`, and `gl_DrawID`.
242 const DRAW_PARAMETERS = 0x4;
243 /// Include unused global variables, constants and functions. By default the output will exclude
244 /// global variables that are not used in the specified entrypoint (including indirect use),
245 /// all constant declarations, and functions that use excluded global variables.
246 const INCLUDE_UNUSED_ITEMS = 0x10;
247 /// Emit `PointSize` output builtin to vertex shaders, which is
248 /// required for drawing with `PointList` topology.
249 ///
250 /// https://registry.khronos.org/OpenGL/specs/es/3.2/GLSL_ES_Specification_3.20.html#built-in-language-variables
251 /// The variable gl_PointSize is intended for a shader to write the size of the point to be rasterized. It is measured in pixels.
252 /// If gl_PointSize is not written to, its value is undefined in subsequent pipe stages.
253 const FORCE_POINT_SIZE = 0x20;
254 }
255}
256
257/// Configuration used in the [`Writer`].
258#[derive(Debug, Clone)]
259#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
260#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
261#[cfg_attr(feature = "deserialize", serde(default))]
262pub struct Options {
263 /// The GLSL version to be used.
264 pub version: Version,
265 /// Configuration flags for the [`Writer`].
266 pub writer_flags: WriterFlags,
267 /// Map of resources association to binding locations.
268 pub binding_map: BindingMap,
269 /// Should workgroup variables be zero initialized (by polyfilling)?
270 pub zero_initialize_workgroup_memory: bool,
271}
272
273impl Default for Options {
274 fn default() -> Self {
275 Options {
276 version: Version::new_gles(310),
277 writer_flags: WriterFlags::ADJUST_COORDINATE_SPACE,
278 binding_map: BindingMap::default(),
279 zero_initialize_workgroup_memory: true,
280 }
281 }
282}
283
284/// A subset of options meant to be changed per pipeline.
285#[derive(Debug, Clone)]
286#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
287#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
288pub struct PipelineOptions {
289 /// The stage of the entry point.
290 pub shader_stage: ShaderStage,
291 /// The name of the entry point.
292 ///
293 /// If no entry point that matches is found while creating a [`Writer`], a error will be thrown.
294 pub entry_point: String,
295 /// How many views to render to, if doing multiview rendering.
296 pub multiview: Option<std::num::NonZeroU32>,
297}
298
299#[derive(Debug)]
300pub struct VaryingLocation {
301 /// The location of the global.
302 /// This corresponds to `layout(location = ..)` in GLSL.
303 pub location: u32,
304 /// The index which can be used for dual source blending.
305 /// This corresponds to `layout(index = ..)` in GLSL.
306 pub index: u32,
307}
308
309/// Reflection info for texture mappings and uniforms.
310#[derive(Debug)]
311pub struct ReflectionInfo {
312 /// Mapping between texture names and variables/samplers.
313 pub texture_mapping: crate::FastHashMap<String, TextureMapping>,
314 /// Mapping between uniform variables and names.
315 pub uniforms: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
316 /// Mapping between names and attribute locations.
317 pub varying: crate::FastHashMap<String, VaryingLocation>,
318 /// List of push constant items in the shader.
319 pub push_constant_items: Vec<PushConstantItem>,
320}
321
322/// Mapping between a texture and its sampler, if it exists.
323///
324/// GLSL pre-Vulkan has no concept of separate textures and samplers. Instead, everything is a
325/// `gsamplerN` where `g` is the scalar type and `N` is the dimension. But naga uses separate textures
326/// and samplers in the IR, so the backend produces a [`FastHashMap`](crate::FastHashMap) with the texture name
327/// as a key and a [`TextureMapping`] as a value. This way, the user knows where to bind.
328///
329/// [`Storage`](crate::ImageClass::Storage) images produce `gimageN` and don't have an associated sampler,
330/// so the [`sampler`](Self::sampler) field will be [`None`].
331#[derive(Debug, Clone)]
332pub struct TextureMapping {
333 /// Handle to the image global variable.
334 pub texture: Handle<crate::GlobalVariable>,
335 /// Handle to the associated sampler global variable, if it exists.
336 pub sampler: Option<Handle<crate::GlobalVariable>>,
337}
338
339/// All information to bind a single uniform value to the shader.
340///
341/// Push constants are emulated using traditional uniforms in OpenGL.
342///
343/// These are composed of a set of primitives (scalar, vector, matrix) that
344/// are given names. Because they are not backed by the concept of a buffer,
345/// we must do the work of calculating the offset of each primitive in the
346/// push constant block.
347#[derive(Debug, Clone)]
348pub struct PushConstantItem {
349 /// GL uniform name for the item. This name is the same as if you were
350 /// to access it directly from a GLSL shader.
351 ///
352 /// The with the following example, the following names will be generated,
353 /// one name per GLSL uniform.
354 ///
355 /// ```glsl
356 /// struct InnerStruct {
357 /// value: f32,
358 /// }
359 ///
360 /// struct PushConstant {
361 /// InnerStruct inner;
362 /// vec4 array[2];
363 /// }
364 ///
365 /// uniform PushConstants _push_constant_binding_cs;
366 /// ```
367 ///
368 /// ```text
369 /// - _push_constant_binding_cs.inner.value
370 /// - _push_constant_binding_cs.array[0]
371 /// - _push_constant_binding_cs.array[1]
372 /// ```
373 ///
374 pub access_path: String,
375 /// Type of the uniform. This will only ever be a scalar, vector, or matrix.
376 pub ty: Handle<crate::Type>,
377 /// The offset in the push constant memory block this uniform maps to.
378 ///
379 /// The size of the uniform can be derived from the type.
380 pub offset: u32,
381}
382
383/// Helper structure that generates a number
384#[derive(Default)]
385struct IdGenerator(u32);
386
387impl IdGenerator {
388 /// Generates a number that's guaranteed to be unique for this `IdGenerator`
389 fn generate(&mut self) -> u32 {
390 // It's just an increasing number but it does the job
391 let ret = self.0;
392 self.0 += 1;
393 ret
394 }
395}
396
397/// Assorted options needed for generating varyings.
398#[derive(Clone, Copy)]
399struct VaryingOptions {
400 output: bool,
401 targeting_webgl: bool,
402 draw_parameters: bool,
403}
404
405impl VaryingOptions {
406 const fn from_writer_options(options: &Options, output: bool) -> Self {
407 Self {
408 output,
409 targeting_webgl: options.version.is_webgl(),
410 draw_parameters: options.writer_flags.contains(WriterFlags::DRAW_PARAMETERS),
411 }
412 }
413}
414
415/// Helper wrapper used to get a name for a varying
416///
417/// Varying have different naming schemes depending on their binding:
418/// - Varyings with builtin bindings get the from [`glsl_built_in`].
419/// - Varyings with location bindings are named `_S_location_X` where `S` is a
420/// prefix identifying which pipeline stage the varying connects, and `X` is
421/// the location.
422struct VaryingName<'a> {
423 binding: &'a crate::Binding,
424 stage: ShaderStage,
425 options: VaryingOptions,
426}
427impl fmt::Display for VaryingName<'_> {
428 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
429 match *self.binding {
430 crate::Binding::Location {
431 second_blend_source: true,
432 ..
433 } => {
434 write!(f, "_fs2p_location1",)
435 }
436 crate::Binding::Location { location, .. } => {
437 let prefix = match (self.stage, self.options.output) {
438 (ShaderStage::Compute, _) => unreachable!(),
439 // pipeline to vertex
440 (ShaderStage::Vertex, false) => "p2vs",
441 // vertex to fragment
442 (ShaderStage::Vertex, true) | (ShaderStage::Fragment, false) => "vs2fs",
443 // fragment to pipeline
444 (ShaderStage::Fragment, true) => "fs2p",
445 };
446 write!(f, "_{prefix}_location{location}",)
447 }
448 crate::Binding::BuiltIn(built_in) => {
449 write!(f, "{}", glsl_built_in(built_in, self.options))
450 }
451 }
452 }
453}
454
455impl ShaderStage {
456 const fn to_str(self) -> &'static str {
457 match self {
458 ShaderStage::Compute => "cs",
459 ShaderStage::Fragment => "fs",
460 ShaderStage::Vertex => "vs",
461 }
462 }
463}
464
465/// Shorthand result used internally by the backend
466type BackendResult<T = ()> = Result<T, Error>;
467
468/// A GLSL compilation error.
469#[derive(Debug, Error)]
470pub enum Error {
471 /// A error occurred while writing to the output.
472 #[error("Format error")]
473 FmtError(#[from] FmtError),
474 /// The specified [`Version`] doesn't have all required [`Features`].
475 ///
476 /// Contains the missing [`Features`].
477 #[error("The selected version doesn't support {0:?}")]
478 MissingFeatures(Features),
479 /// [`AddressSpace::PushConstant`](crate::AddressSpace::PushConstant) was used more than
480 /// once in the entry point, which isn't supported.
481 #[error("Multiple push constants aren't supported")]
482 MultiplePushConstants,
483 /// The specified [`Version`] isn't supported.
484 #[error("The specified version isn't supported")]
485 VersionNotSupported,
486 /// The entry point couldn't be found.
487 #[error("The requested entry point couldn't be found")]
488 EntryPointNotFound,
489 /// A call was made to an unsupported external.
490 #[error("A call was made to an unsupported external: {0}")]
491 UnsupportedExternal(String),
492 /// A scalar with an unsupported width was requested.
493 #[error("A scalar with an unsupported width was requested: {0:?}")]
494 UnsupportedScalar(crate::Scalar),
495 /// A image was used with multiple samplers, which isn't supported.
496 #[error("A image was used with multiple samplers")]
497 ImageMultipleSamplers,
498 #[error("{0}")]
499 Custom(String),
500 #[error("overrides should not be present at this stage")]
501 Override,
502 /// [`crate::Sampling::First`] is unsupported.
503 #[error("`{:?}` sampling is unsupported", crate::Sampling::First)]
504 FirstSamplingNotSupported,
505}
506
507/// Binary operation with a different logic on the GLSL side.
508enum BinaryOperation {
509 /// Vector comparison should use the function like `greaterThan()`, etc.
510 VectorCompare,
511 /// Vector component wise operation; used to polyfill unsupported ops like `|` and `&` for `bvecN`'s
512 VectorComponentWise,
513 /// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`.
514 Modulo,
515 /// Any plain operation. No additional logic required.
516 Other,
517}
518
519/// Writer responsible for all code generation.
520pub struct Writer<'a, W> {
521 // Inputs
522 /// The module being written.
523 module: &'a crate::Module,
524 /// The module analysis.
525 info: &'a valid::ModuleInfo,
526 /// The output writer.
527 out: W,
528 /// User defined configuration to be used.
529 options: &'a Options,
530 /// The bound checking policies to be used
531 policies: proc::BoundsCheckPolicies,
532
533 // Internal State
534 /// Features manager used to store all the needed features and write them.
535 features: FeaturesManager,
536 namer: proc::Namer,
537 /// A map with all the names needed for writing the module
538 /// (generated by a [`Namer`](crate::proc::Namer)).
539 names: crate::FastHashMap<NameKey, String>,
540 /// A map with the names of global variables needed for reflections.
541 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
542 /// The selected entry point.
543 entry_point: &'a crate::EntryPoint,
544 /// The index of the selected entry point.
545 entry_point_idx: proc::EntryPointIndex,
546 /// A generator for unique block numbers.
547 block_id: IdGenerator,
548 /// Set of expressions that have associated temporary variables.
549 named_expressions: crate::NamedExpressions,
550 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
551 need_bake_expressions: back::NeedBakeExpressions,
552 /// Information about nesting of loops and switches.
553 ///
554 /// Used for forwarding continue statements in switches that have been
555 /// transformed to `do {} while(false);` loops.
556 continue_ctx: back::continue_forward::ContinueCtx,
557 /// How many views to render to, if doing multiview rendering.
558 multiview: Option<std::num::NonZeroU32>,
559 /// Mapping of varying variables to their location. Needed for reflections.
560 varying: crate::FastHashMap<String, VaryingLocation>,
561}
562
563impl<'a, W: Write> Writer<'a, W> {
564 /// Creates a new [`Writer`] instance.
565 ///
566 /// # Errors
567 /// - If the version specified is invalid or supported.
568 /// - If the entry point couldn't be found in the module.
569 /// - If the version specified doesn't support some used features.
570 pub fn new(
571 out: W,
572 module: &'a crate::Module,
573 info: &'a valid::ModuleInfo,
574 options: &'a Options,
575 pipeline_options: &'a PipelineOptions,
576 policies: proc::BoundsCheckPolicies,
577 ) -> Result<Self, Error> {
578 if !module.overrides.is_empty() {
579 return Err(Error::Override);
580 }
581
582 // Check if the requested version is supported
583 if !options.version.is_supported() {
584 log::error!("Version {}", options.version);
585 return Err(Error::VersionNotSupported);
586 }
587
588 // Try to find the entry point and corresponding index
589 let ep_idx = module
590 .entry_points
591 .iter()
592 .position(|ep| {
593 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
594 })
595 .ok_or(Error::EntryPointNotFound)?;
596
597 // Generate a map with names required to write the module
598 let mut names = crate::FastHashMap::default();
599 let mut namer = proc::Namer::default();
600 namer.reset(
601 module,
602 keywords::RESERVED_KEYWORDS,
603 &[],
604 &[],
605 &[
606 "gl_", // all GL built-in variables
607 "_group", // all normal bindings
608 "_push_constant_binding_", // all push constant bindings
609 ],
610 &mut names,
611 );
612
613 // Build the instance
614 let mut this = Self {
615 module,
616 info,
617 out,
618 options,
619 policies,
620
621 namer,
622 features: FeaturesManager::new(),
623 names,
624 reflection_names_globals: crate::FastHashMap::default(),
625 entry_point: &module.entry_points[ep_idx],
626 entry_point_idx: ep_idx as u16,
627 multiview: pipeline_options.multiview,
628 block_id: IdGenerator::default(),
629 named_expressions: Default::default(),
630 need_bake_expressions: Default::default(),
631 continue_ctx: back::continue_forward::ContinueCtx::default(),
632 varying: Default::default(),
633 };
634
635 // Find all features required to print this module
636 this.collect_required_features()?;
637
638 Ok(this)
639 }
640
641 /// Writes the [`Module`](crate::Module) as glsl to the output
642 ///
643 /// # Notes
644 /// If an error occurs while writing, the output might have been written partially
645 ///
646 /// # Panics
647 /// Might panic if the module is invalid
648 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
649 // We use `writeln!(self.out)` throughout the write to add newlines
650 // to make the output more readable
651
652 let es = self.options.version.is_es();
653
654 // Write the version (It must be the first thing or it isn't a valid glsl output)
655 writeln!(self.out, "#version {}", self.options.version)?;
656 // Write all the needed extensions
657 //
658 // This used to be the last thing being written as it allowed to search for features while
659 // writing the module saving some loops but some older versions (420 or less) required the
660 // extensions to appear before being used, even though extensions are part of the
661 // preprocessor not the processor ¯\_(ツ)_/¯
662 self.features.write(self.options, &mut self.out)?;
663
664 // glsl es requires a precision to be specified for floats and ints
665 // TODO: Should this be user configurable?
666 if es {
667 writeln!(self.out)?;
668 writeln!(self.out, "precision highp float;")?;
669 writeln!(self.out, "precision highp int;")?;
670 writeln!(self.out)?;
671 }
672
673 if self.entry_point.stage == ShaderStage::Compute {
674 let workgroup_size = self.entry_point.workgroup_size;
675 writeln!(
676 self.out,
677 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
678 workgroup_size[0], workgroup_size[1], workgroup_size[2]
679 )?;
680 writeln!(self.out)?;
681 }
682
683 if self.entry_point.stage == ShaderStage::Vertex
684 && !self
685 .options
686 .writer_flags
687 .contains(WriterFlags::DRAW_PARAMETERS)
688 && self.features.contains(Features::INSTANCE_INDEX)
689 {
690 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
691 writeln!(self.out)?;
692 }
693
694 // Enable early depth tests if needed
695 if let Some(depth_test) = self.entry_point.early_depth_test {
696 // If early depth test is supported for this version of GLSL
697 if self.options.version.supports_early_depth_test() {
698 writeln!(self.out, "layout(early_fragment_tests) in;")?;
699
700 if let Some(conservative) = depth_test.conservative {
701 use crate::ConservativeDepth as Cd;
702
703 let depth = match conservative {
704 Cd::GreaterEqual => "greater",
705 Cd::LessEqual => "less",
706 Cd::Unchanged => "unchanged",
707 };
708 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
709 }
710 writeln!(self.out)?;
711 } else {
712 log::warn!(
713 "Early depth testing is not supported for this version of GLSL: {}",
714 self.options.version
715 );
716 }
717 }
718
719 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
720 if let Some(multiview) = self.multiview.as_ref() {
721 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
722 writeln!(self.out)?;
723 }
724 }
725
726 // Write struct types.
727 //
728 // This are always ordered because the IR is structured in a way that
729 // you can't make a struct without adding all of its members first.
730 for (handle, ty) in self.module.types.iter() {
731 if let TypeInner::Struct { ref members, .. } = ty.inner {
732 // Structures ending with runtime-sized arrays can only be
733 // rendered as shader storage blocks in GLSL, not stand-alone
734 // struct types.
735 if !self.module.types[members.last().unwrap().ty]
736 .inner
737 .is_dynamically_sized(&self.module.types)
738 {
739 let name = &self.names[&NameKey::Type(handle)];
740 write!(self.out, "struct {name} ")?;
741 self.write_struct_body(handle, members)?;
742 writeln!(self.out, ";")?;
743 }
744 }
745 }
746
747 // Write functions to create special types.
748 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
749 match type_key {
750 &crate::PredeclaredType::ModfResult { size, scalar }
751 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
752 let arg_type_name_owner;
753 let arg_type_name = if let Some(size) = size {
754 arg_type_name_owner = format!(
755 "{}vec{}",
756 if scalar.width == 8 { "d" } else { "" },
757 size as u8
758 );
759 &arg_type_name_owner
760 } else if scalar.width == 8 {
761 "double"
762 } else {
763 "float"
764 };
765
766 let other_type_name_owner;
767 let (defined_func_name, called_func_name, other_type_name) =
768 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
769 (MODF_FUNCTION, "modf", arg_type_name)
770 } else {
771 let other_type_name = if let Some(size) = size {
772 other_type_name_owner = format!("ivec{}", size as u8);
773 &other_type_name_owner
774 } else {
775 "int"
776 };
777 (FREXP_FUNCTION, "frexp", other_type_name)
778 };
779
780 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
781
782 writeln!(self.out)?;
783 if !self.options.version.supports_frexp_function()
784 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
785 {
786 writeln!(
787 self.out,
788 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
789 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
790 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
791 return {struct_name}(fract, other);
792}}",
793 )?;
794 } else {
795 writeln!(
796 self.out,
797 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
798 {other_type_name} other;
799 {arg_type_name} fract = {called_func_name}(arg, other);
800 return {struct_name}(fract, other);
801}}",
802 )?;
803 }
804 }
805 &crate::PredeclaredType::AtomicCompareExchangeWeakResult { .. } => {}
806 }
807 }
808
809 // Write all named constants
810 let mut constants = self
811 .module
812 .constants
813 .iter()
814 .filter(|&(_, c)| c.name.is_some())
815 .peekable();
816 while let Some((handle, _)) = constants.next() {
817 self.write_global_constant(handle)?;
818 // Add extra newline for readability on last iteration
819 if constants.peek().is_none() {
820 writeln!(self.out)?;
821 }
822 }
823
824 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
825
826 // Write the globals
827 //
828 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
829 // we filter all globals that aren't used by the selected entry point as they might be
830 // interfere with each other (i.e. two globals with the same location but different with
831 // different classes)
832 let include_unused = self
833 .options
834 .writer_flags
835 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
836 for (handle, global) in self.module.global_variables.iter() {
837 let is_unused = ep_info[handle].is_empty();
838 if !include_unused && is_unused {
839 continue;
840 }
841
842 match self.module.types[global.ty].inner {
843 // We treat images separately because they might require
844 // writing the storage format
845 TypeInner::Image {
846 mut dim,
847 arrayed,
848 class,
849 } => {
850 // Gather the storage format if needed
851 let storage_format_access = match self.module.types[global.ty].inner {
852 TypeInner::Image {
853 class: crate::ImageClass::Storage { format, access },
854 ..
855 } => Some((format, access)),
856 _ => None,
857 };
858
859 if dim == crate::ImageDimension::D1 && es {
860 dim = crate::ImageDimension::D2
861 }
862
863 // Gether the location if needed
864 let layout_binding = if self.options.version.supports_explicit_locations() {
865 let br = global.binding.as_ref().unwrap();
866 self.options.binding_map.get(br).cloned()
867 } else {
868 None
869 };
870
871 // Write all the layout qualifiers
872 if layout_binding.is_some() || storage_format_access.is_some() {
873 write!(self.out, "layout(")?;
874 if let Some(binding) = layout_binding {
875 write!(self.out, "binding = {binding}")?;
876 }
877 if let Some((format, _)) = storage_format_access {
878 let format_str = glsl_storage_format(format)?;
879 let separator = match layout_binding {
880 Some(_) => ",",
881 None => "",
882 };
883 write!(self.out, "{separator}{format_str}")?;
884 }
885 write!(self.out, ") ")?;
886 }
887
888 if let Some((_, access)) = storage_format_access {
889 self.write_storage_access(access)?;
890 }
891
892 // All images in glsl are `uniform`
893 // The trailing space is important
894 write!(self.out, "uniform ")?;
895
896 // write the type
897 //
898 // This is way we need the leading space because `write_image_type` doesn't add
899 // any spaces at the beginning or end
900 self.write_image_type(dim, arrayed, class)?;
901
902 // Finally write the name and end the global with a `;`
903 // The leading space is important
904 let global_name = self.get_global_name(handle, global);
905 writeln!(self.out, " {global_name};")?;
906 writeln!(self.out)?;
907
908 self.reflection_names_globals.insert(handle, global_name);
909 }
910 // glsl has no concept of samplers so we just ignore it
911 TypeInner::Sampler { .. } => continue,
912 // All other globals are written by `write_global`
913 _ => {
914 self.write_global(handle, global)?;
915 // Add a newline (only for readability)
916 writeln!(self.out)?;
917 }
918 }
919 }
920
921 for arg in self.entry_point.function.arguments.iter() {
922 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
923 }
924 if let Some(ref result) = self.entry_point.function.result {
925 self.write_varying(result.binding.as_ref(), result.ty, true)?;
926 }
927 writeln!(self.out)?;
928
929 // Write all regular functions
930 for (handle, function) in self.module.functions.iter() {
931 // Check that the function doesn't use globals that aren't supported
932 // by the current entry point
933 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
934 continue;
935 }
936
937 let fun_info = &self.info[handle];
938
939 // Skip functions that that are not compatible with this entry point's stage.
940 //
941 // When validation is enabled, it rejects modules whose entry points try to call
942 // incompatible functions, so if we got this far, then any functions incompatible
943 // with our selected entry point must not be used.
944 //
945 // When validation is disabled, `fun_info.available_stages` is always just
946 // `ShaderStages::all()`, so this will write all functions in the module, and
947 // the downstream GLSL compiler will catch any problems.
948 if !fun_info.available_stages.contains(ep_info.available_stages) {
949 continue;
950 }
951
952 // Write the function
953 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
954
955 writeln!(self.out)?;
956 }
957
958 self.write_function(
959 back::FunctionType::EntryPoint(self.entry_point_idx),
960 &self.entry_point.function,
961 ep_info,
962 )?;
963
964 // Add newline at the end of file
965 writeln!(self.out)?;
966
967 // Collect all reflection info and return it to the user
968 self.collect_reflection_info()
969 }
970
971 fn write_array_size(
972 &mut self,
973 base: Handle<crate::Type>,
974 size: crate::ArraySize,
975 ) -> BackendResult {
976 write!(self.out, "[")?;
977
978 // Write the array size
979 // Writes nothing if `ArraySize::Dynamic`
980 match size {
981 crate::ArraySize::Constant(size) => {
982 write!(self.out, "{size}")?;
983 }
984 crate::ArraySize::Pending(_) => unreachable!(),
985 crate::ArraySize::Dynamic => (),
986 }
987
988 write!(self.out, "]")?;
989
990 if let TypeInner::Array {
991 base: next_base,
992 size: next_size,
993 ..
994 } = self.module.types[base].inner
995 {
996 self.write_array_size(next_base, next_size)?;
997 }
998
999 Ok(())
1000 }
1001
1002 /// Helper method used to write value types
1003 ///
1004 /// # Notes
1005 /// Adds no trailing or leading whitespace
1006 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
1007 match *inner {
1008 // Scalars are simple we just get the full name from `glsl_scalar`
1009 TypeInner::Scalar(scalar)
1010 | TypeInner::Atomic(scalar)
1011 | TypeInner::ValuePointer {
1012 size: None,
1013 scalar,
1014 space: _,
1015 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
1016 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
1017 TypeInner::Vector { size, scalar }
1018 | TypeInner::ValuePointer {
1019 size: Some(size),
1020 scalar,
1021 space: _,
1022 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
1023 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
1024 // doubles are allowed), `M` is the columns count and `N` is the rows count
1025 //
1026 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
1027 // extra branch to write matrices this way
1028 TypeInner::Matrix {
1029 columns,
1030 rows,
1031 scalar,
1032 } => write!(
1033 self.out,
1034 "{}mat{}x{}",
1035 glsl_scalar(scalar)?.prefix,
1036 columns as u8,
1037 rows as u8
1038 )?,
1039 // GLSL arrays are written as `type name[size]`
1040 // Here we only write the size of the array i.e. `[size]`
1041 // Base `type` and `name` should be written outside
1042 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
1043 // Write all variants instead of `_` so that if new variants are added a
1044 // no exhaustiveness error is thrown
1045 TypeInner::Pointer { .. }
1046 | TypeInner::Struct { .. }
1047 | TypeInner::Image { .. }
1048 | TypeInner::Sampler { .. }
1049 | TypeInner::AccelerationStructure
1050 | TypeInner::RayQuery
1051 | TypeInner::BindingArray { .. } => {
1052 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
1053 }
1054 }
1055
1056 Ok(())
1057 }
1058
1059 /// Helper method used to write non image/sampler types
1060 ///
1061 /// # Notes
1062 /// Adds no trailing or leading whitespace
1063 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
1064 match self.module.types[ty].inner {
1065 // glsl has no pointer types so just write types as normal and loads are skipped
1066 TypeInner::Pointer { base, .. } => self.write_type(base),
1067 // glsl structs are written as just the struct name
1068 TypeInner::Struct { .. } => {
1069 // Get the struct name
1070 let name = &self.names[&NameKey::Type(ty)];
1071 write!(self.out, "{name}")?;
1072 Ok(())
1073 }
1074 // glsl array has the size separated from the base type
1075 TypeInner::Array { base, .. } => self.write_type(base),
1076 ref other => self.write_value_type(other),
1077 }
1078 }
1079
1080 /// Helper method to write a image type
1081 ///
1082 /// # Notes
1083 /// Adds no leading or trailing whitespace
1084 fn write_image_type(
1085 &mut self,
1086 dim: crate::ImageDimension,
1087 arrayed: bool,
1088 class: crate::ImageClass,
1089 ) -> BackendResult {
1090 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
1091 // and modifiers
1092 //
1093 // There exists two image types
1094 // - sampler - for sampled images
1095 // - image - for storage images
1096 //
1097 // There are three possible modifiers that can be used together and must be written in
1098 // this order to be valid
1099 // - MS - used if it's a multisampled image
1100 // - Array - used if it's an image array
1101 // - Shadow - used if it's a depth image
1102 use crate::ImageClass as Ic;
1103 use crate::Scalar as S;
1104 let float = S {
1105 kind: crate::ScalarKind::Float,
1106 width: 4,
1107 };
1108 let (base, scalar, ms, comparison) = match class {
1109 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
1110 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
1111 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
1112 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
1113 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
1114 };
1115
1116 let precision = if self.options.version.is_es() {
1117 "highp "
1118 } else {
1119 ""
1120 };
1121
1122 write!(
1123 self.out,
1124 "{}{}{}{}{}{}{}",
1125 precision,
1126 glsl_scalar(scalar)?.prefix,
1127 base,
1128 glsl_dimension(dim),
1129 ms,
1130 if arrayed { "Array" } else { "" },
1131 comparison
1132 )?;
1133
1134 Ok(())
1135 }
1136
1137 /// Helper method used to write non images/sampler globals
1138 ///
1139 /// # Notes
1140 /// Adds a newline
1141 ///
1142 /// # Panics
1143 /// If the global has type sampler
1144 fn write_global(
1145 &mut self,
1146 handle: Handle<crate::GlobalVariable>,
1147 global: &crate::GlobalVariable,
1148 ) -> BackendResult {
1149 if self.options.version.supports_explicit_locations() {
1150 if let Some(ref br) = global.binding {
1151 match self.options.binding_map.get(br) {
1152 Some(binding) => {
1153 let layout = match global.space {
1154 crate::AddressSpace::Storage { .. } => {
1155 if self.options.version.supports_std430_layout() {
1156 "std430, "
1157 } else {
1158 "std140, "
1159 }
1160 }
1161 crate::AddressSpace::Uniform => "std140, ",
1162 _ => "",
1163 };
1164 write!(self.out, "layout({layout}binding = {binding}) ")?
1165 }
1166 None => {
1167 log::debug!("unassigned binding for {:?}", global.name);
1168 if let crate::AddressSpace::Storage { .. } = global.space {
1169 if self.options.version.supports_std430_layout() {
1170 write!(self.out, "layout(std430) ")?
1171 }
1172 }
1173 }
1174 }
1175 }
1176 }
1177
1178 if let crate::AddressSpace::Storage { access } = global.space {
1179 self.write_storage_access(access)?;
1180 }
1181
1182 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
1183 write!(self.out, "{storage_qualifier} ")?;
1184 }
1185
1186 match global.space {
1187 crate::AddressSpace::Private => {
1188 self.write_simple_global(handle, global)?;
1189 }
1190 crate::AddressSpace::WorkGroup => {
1191 self.write_simple_global(handle, global)?;
1192 }
1193 crate::AddressSpace::PushConstant => {
1194 self.write_simple_global(handle, global)?;
1195 }
1196 crate::AddressSpace::Uniform => {
1197 self.write_interface_block(handle, global)?;
1198 }
1199 crate::AddressSpace::Storage { .. } => {
1200 self.write_interface_block(handle, global)?;
1201 }
1202 // A global variable in the `Function` address space is a
1203 // contradiction in terms.
1204 crate::AddressSpace::Function => unreachable!(),
1205 // Textures and samplers are handled directly in `Writer::write`.
1206 crate::AddressSpace::Handle => unreachable!(),
1207 }
1208
1209 Ok(())
1210 }
1211
1212 fn write_simple_global(
1213 &mut self,
1214 handle: Handle<crate::GlobalVariable>,
1215 global: &crate::GlobalVariable,
1216 ) -> BackendResult {
1217 self.write_type(global.ty)?;
1218 write!(self.out, " ")?;
1219 self.write_global_name(handle, global)?;
1220
1221 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1222 self.write_array_size(base, size)?;
1223 }
1224
1225 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
1226 write!(self.out, " = ")?;
1227 if let Some(init) = global.init {
1228 self.write_const_expr(init)?;
1229 } else {
1230 self.write_zero_init_value(global.ty)?;
1231 }
1232 }
1233
1234 writeln!(self.out, ";")?;
1235
1236 if let crate::AddressSpace::PushConstant = global.space {
1237 let global_name = self.get_global_name(handle, global);
1238 self.reflection_names_globals.insert(handle, global_name);
1239 }
1240
1241 Ok(())
1242 }
1243
1244 /// Write an interface block for a single Naga global.
1245 ///
1246 /// Write `block_name { members }`. Since `block_name` must be unique
1247 /// between blocks and structs, we add `_block_ID` where `ID` is a
1248 /// `IdGenerator` generated number. Write `members` in the same way we write
1249 /// a struct's members.
1250 fn write_interface_block(
1251 &mut self,
1252 handle: Handle<crate::GlobalVariable>,
1253 global: &crate::GlobalVariable,
1254 ) -> BackendResult {
1255 // Write the block name, it's just the struct name appended with `_block_ID`
1256 let ty_name = &self.names[&NameKey::Type(global.ty)];
1257 let block_name = format!(
1258 "{}_block_{}{:?}",
1259 // avoid double underscores as they are reserved in GLSL
1260 ty_name.trim_end_matches('_'),
1261 self.block_id.generate(),
1262 self.entry_point.stage,
1263 );
1264 write!(self.out, "{block_name} ")?;
1265 self.reflection_names_globals.insert(handle, block_name);
1266
1267 match self.module.types[global.ty].inner {
1268 TypeInner::Struct { ref members, .. }
1269 if self.module.types[members.last().unwrap().ty]
1270 .inner
1271 .is_dynamically_sized(&self.module.types) =>
1272 {
1273 // Structs with dynamically sized arrays must have their
1274 // members lifted up as members of the interface block. GLSL
1275 // can't write such struct types anyway.
1276 self.write_struct_body(global.ty, members)?;
1277 write!(self.out, " ")?;
1278 self.write_global_name(handle, global)?;
1279 }
1280 _ => {
1281 // A global of any other type is written as the sole member
1282 // of the interface block. Since the interface block is
1283 // anonymous, this becomes visible in the global scope.
1284 write!(self.out, "{{ ")?;
1285 self.write_type(global.ty)?;
1286 write!(self.out, " ")?;
1287 self.write_global_name(handle, global)?;
1288 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1289 self.write_array_size(base, size)?;
1290 }
1291 write!(self.out, "; }}")?;
1292 }
1293 }
1294
1295 writeln!(self.out, ";")?;
1296
1297 Ok(())
1298 }
1299
1300 /// Helper method used to find which expressions of a given function require baking
1301 ///
1302 /// # Notes
1303 /// Clears `need_bake_expressions` set before adding to it
1304 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
1305 use crate::Expression;
1306 self.need_bake_expressions.clear();
1307 for (fun_handle, expr) in func.expressions.iter() {
1308 let expr_info = &info[fun_handle];
1309 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
1310 if min_ref_count <= expr_info.ref_count {
1311 self.need_bake_expressions.insert(fun_handle);
1312 }
1313
1314 let inner = expr_info.ty.inner_with(&self.module.types);
1315
1316 if let Expression::Math {
1317 fun,
1318 arg,
1319 arg1,
1320 arg2,
1321 ..
1322 } = *expr
1323 {
1324 match fun {
1325 crate::MathFunction::Dot => {
1326 // if the expression is a Dot product with integer arguments,
1327 // then the args needs baking as well
1328 if let TypeInner::Scalar(crate::Scalar {
1329 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
1330 ..
1331 }) = *inner
1332 {
1333 self.need_bake_expressions.insert(arg);
1334 self.need_bake_expressions.insert(arg1.unwrap());
1335 }
1336 }
1337 crate::MathFunction::Pack4xI8
1338 | crate::MathFunction::Pack4xU8
1339 | crate::MathFunction::Unpack4xI8
1340 | crate::MathFunction::Unpack4xU8
1341 | crate::MathFunction::QuantizeToF16 => {
1342 self.need_bake_expressions.insert(arg);
1343 }
1344 crate::MathFunction::ExtractBits => {
1345 // Only argument 1 is re-used.
1346 self.need_bake_expressions.insert(arg1.unwrap());
1347 }
1348 crate::MathFunction::InsertBits => {
1349 // Only argument 2 is re-used.
1350 self.need_bake_expressions.insert(arg2.unwrap());
1351 }
1352 crate::MathFunction::CountLeadingZeros => {
1353 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
1354 self.need_bake_expressions.insert(arg);
1355 }
1356 }
1357 _ => {}
1358 }
1359 }
1360 }
1361 }
1362
1363 /// Helper method used to get a name for a global
1364 ///
1365 /// Globals have different naming schemes depending on their binding:
1366 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
1367 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
1368 /// is the group and `Y` is the binding
1369 fn get_global_name(
1370 &self,
1371 handle: Handle<crate::GlobalVariable>,
1372 global: &crate::GlobalVariable,
1373 ) -> String {
1374 match (&global.binding, global.space) {
1375 (&Some(ref br), _) => {
1376 format!(
1377 "_group_{}_binding_{}_{}",
1378 br.group,
1379 br.binding,
1380 self.entry_point.stage.to_str()
1381 )
1382 }
1383 (&None, crate::AddressSpace::PushConstant) => {
1384 format!("_push_constant_binding_{}", self.entry_point.stage.to_str())
1385 }
1386 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
1387 }
1388 }
1389
1390 /// Helper method used to write a name for a global without additional heap allocation
1391 fn write_global_name(
1392 &mut self,
1393 handle: Handle<crate::GlobalVariable>,
1394 global: &crate::GlobalVariable,
1395 ) -> BackendResult {
1396 match (&global.binding, global.space) {
1397 (&Some(ref br), _) => write!(
1398 self.out,
1399 "_group_{}_binding_{}_{}",
1400 br.group,
1401 br.binding,
1402 self.entry_point.stage.to_str()
1403 )?,
1404 (&None, crate::AddressSpace::PushConstant) => write!(
1405 self.out,
1406 "_push_constant_binding_{}",
1407 self.entry_point.stage.to_str()
1408 )?,
1409 (&None, _) => write!(
1410 self.out,
1411 "{}",
1412 &self.names[&NameKey::GlobalVariable(handle)]
1413 )?,
1414 }
1415
1416 Ok(())
1417 }
1418
1419 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
1420 ///
1421 /// A Naga entry point's arguments and return value are rendered in GLSL as
1422 /// variables at global scope with the `in` and `out` storage qualifiers.
1423 /// The code we generate for `main` loads from all the `in` globals into
1424 /// appropriately named locals. Before it returns, `main` assigns the
1425 /// components of its return value into all the `out` globals.
1426 ///
1427 /// This function writes a declaration for one such GLSL global,
1428 /// representing a value passed into or returned from [`self.entry_point`]
1429 /// that has a [`Location`] binding. The global's name is generated based on
1430 /// the location index and the shader stages being connected; see
1431 /// [`VaryingName`]. This means we don't need to know the names of
1432 /// arguments, just their types and bindings.
1433 ///
1434 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1435 /// bindings; `main` will read from or assign to the appropriate GLSL
1436 /// special variable; these are pre-declared. As an exception, we do declare
1437 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1438 /// needed.
1439 ///
1440 /// Use `output` together with [`self.entry_point.stage`] to determine which
1441 /// shader stages are being connected, and choose the `in` or `out` storage
1442 /// qualifier.
1443 ///
1444 /// [`self.entry_point`]: Writer::entry_point
1445 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1446 /// [`Location`]: crate::Binding::Location
1447 /// [`BuiltIn`]: crate::Binding::BuiltIn
1448 fn write_varying(
1449 &mut self,
1450 binding: Option<&crate::Binding>,
1451 ty: Handle<crate::Type>,
1452 output: bool,
1453 ) -> Result<(), Error> {
1454 // For a struct, emit a separate global for each member with a binding.
1455 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1456 for member in members {
1457 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1458 }
1459 return Ok(());
1460 }
1461
1462 let binding = match binding {
1463 None => return Ok(()),
1464 Some(binding) => binding,
1465 };
1466
1467 let (location, interpolation, sampling, second_blend_source) = match *binding {
1468 crate::Binding::Location {
1469 location,
1470 interpolation,
1471 sampling,
1472 second_blend_source,
1473 } => (location, interpolation, sampling, second_blend_source),
1474 crate::Binding::BuiltIn(built_in) => {
1475 if let crate::BuiltIn::Position { invariant: true } = built_in {
1476 match (self.options.version, self.entry_point.stage) {
1477 (
1478 Version::Embedded {
1479 version: 300,
1480 is_webgl: true,
1481 },
1482 ShaderStage::Fragment,
1483 ) => {
1484 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1485 // OpenGL ES in general (waiting on confirmation).
1486 //
1487 // See https://github.com/KhronosGroup/WebGL/issues/3518
1488 }
1489 _ => {
1490 writeln!(
1491 self.out,
1492 "invariant {};",
1493 glsl_built_in(
1494 built_in,
1495 VaryingOptions::from_writer_options(self.options, output)
1496 )
1497 )?;
1498 }
1499 }
1500 }
1501 return Ok(());
1502 }
1503 };
1504
1505 // Write the interpolation modifier if needed
1506 //
1507 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1508 // shaders' input globals or vertex shaders' output globals.
1509 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1510 ShaderStage::Vertex => output,
1511 ShaderStage::Fragment => !output,
1512 ShaderStage::Compute => false,
1513 };
1514
1515 // Write the I/O locations, if allowed
1516 let io_location = if self.options.version.supports_explicit_locations()
1517 || !emit_interpolation_and_auxiliary
1518 {
1519 if self.options.version.supports_io_locations() {
1520 if second_blend_source {
1521 write!(self.out, "layout(location = {location}, index = 1) ")?;
1522 } else {
1523 write!(self.out, "layout(location = {location}) ")?;
1524 }
1525 None
1526 } else {
1527 Some(VaryingLocation {
1528 location,
1529 index: second_blend_source as u32,
1530 })
1531 }
1532 } else {
1533 None
1534 };
1535
1536 // Write the interpolation qualifier.
1537 if let Some(interp) = interpolation {
1538 if emit_interpolation_and_auxiliary {
1539 write!(self.out, "{} ", glsl_interpolation(interp))?;
1540 }
1541 }
1542
1543 // Write the sampling auxiliary qualifier.
1544 //
1545 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1546 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1547 // here, regardless of the version.
1548 if let Some(sampling) = sampling {
1549 if emit_interpolation_and_auxiliary {
1550 if let Some(qualifier) = glsl_sampling(sampling)? {
1551 write!(self.out, "{qualifier} ")?;
1552 }
1553 }
1554 }
1555
1556 // Write the input/output qualifier.
1557 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1558
1559 // Write the type
1560 // `write_type` adds no leading or trailing spaces
1561 self.write_type(ty)?;
1562
1563 // Finally write the global name and end the global with a `;` and a newline
1564 // Leading space is important
1565 let vname = VaryingName {
1566 binding: &crate::Binding::Location {
1567 location,
1568 interpolation: None,
1569 sampling: None,
1570 second_blend_source,
1571 },
1572 stage: self.entry_point.stage,
1573 options: VaryingOptions::from_writer_options(self.options, output),
1574 };
1575 writeln!(self.out, " {vname};")?;
1576
1577 if let Some(location) = io_location {
1578 self.varying.insert(vname.to_string(), location);
1579 }
1580
1581 Ok(())
1582 }
1583
1584 /// Helper method used to write functions (both entry points and regular functions)
1585 ///
1586 /// # Notes
1587 /// Adds a newline
1588 fn write_function(
1589 &mut self,
1590 ty: back::FunctionType,
1591 func: &crate::Function,
1592 info: &valid::FunctionInfo,
1593 ) -> BackendResult {
1594 // Create a function context for the function being written
1595 let ctx = back::FunctionCtx {
1596 ty,
1597 info,
1598 expressions: &func.expressions,
1599 named_expressions: &func.named_expressions,
1600 expr_kind_tracker: ExpressionKindTracker::from_arena(&func.expressions),
1601 };
1602
1603 self.named_expressions.clear();
1604 self.update_expressions_to_bake(func, info);
1605
1606 // Write the function header
1607 //
1608 // glsl headers are the same as in c:
1609 // `ret_type name(args)`
1610 // `ret_type` is the return type
1611 // `name` is the function name
1612 // `args` is a comma separated list of `type name`
1613 // | - `type` is the argument type
1614 // | - `name` is the argument name
1615
1616 // Start by writing the return type if any otherwise write void
1617 // This is the only place where `void` is a valid type
1618 // (though it's more a keyword than a type)
1619 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1620 write!(self.out, "void")?;
1621 } else if let Some(ref result) = func.result {
1622 self.write_type(result.ty)?;
1623 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1624 self.write_array_size(base, size)?
1625 }
1626 } else {
1627 write!(self.out, "void")?;
1628 }
1629
1630 // Write the function name and open parentheses for the argument list
1631 let function_name = match ctx.ty {
1632 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1633 back::FunctionType::EntryPoint(_) => "main",
1634 };
1635 write!(self.out, " {function_name}(")?;
1636
1637 // Write the comma separated argument list
1638 //
1639 // We need access to `Self` here so we use the reference passed to the closure as an
1640 // argument instead of capturing as that would cause a borrow checker error
1641 let arguments = match ctx.ty {
1642 back::FunctionType::EntryPoint(_) => &[][..],
1643 back::FunctionType::Function(_) => &func.arguments,
1644 };
1645 let arguments: Vec<_> = arguments
1646 .iter()
1647 .enumerate()
1648 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1649 TypeInner::Sampler { .. } => false,
1650 _ => true,
1651 })
1652 .collect();
1653 self.write_slice(&arguments, |this, _, &(i, arg)| {
1654 // Write the argument type
1655 match this.module.types[arg.ty].inner {
1656 // We treat images separately because they might require
1657 // writing the storage format
1658 TypeInner::Image {
1659 dim,
1660 arrayed,
1661 class,
1662 } => {
1663 // Write the storage format if needed
1664 if let TypeInner::Image {
1665 class: crate::ImageClass::Storage { format, .. },
1666 ..
1667 } = this.module.types[arg.ty].inner
1668 {
1669 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1670 }
1671
1672 // write the type
1673 //
1674 // This is way we need the leading space because `write_image_type` doesn't add
1675 // any spaces at the beginning or end
1676 this.write_image_type(dim, arrayed, class)?;
1677 }
1678 TypeInner::Pointer { base, .. } => {
1679 // write parameter qualifiers
1680 write!(this.out, "inout ")?;
1681 this.write_type(base)?;
1682 }
1683 // All other types are written by `write_type`
1684 _ => {
1685 this.write_type(arg.ty)?;
1686 }
1687 }
1688
1689 // Write the argument name
1690 // The leading space is important
1691 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1692
1693 // Write array size
1694 match this.module.types[arg.ty].inner {
1695 TypeInner::Array { base, size, .. } => {
1696 this.write_array_size(base, size)?;
1697 }
1698 TypeInner::Pointer { base, .. } => {
1699 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1700 this.write_array_size(base, size)?;
1701 }
1702 }
1703 _ => {}
1704 }
1705
1706 Ok(())
1707 })?;
1708
1709 // Close the parentheses and open braces to start the function body
1710 writeln!(self.out, ") {{")?;
1711
1712 if self.options.zero_initialize_workgroup_memory
1713 && ctx.ty.is_compute_entry_point(self.module)
1714 {
1715 self.write_workgroup_variables_initialization(&ctx)?;
1716 }
1717
1718 // Compose the function arguments from globals, in case of an entry point.
1719 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1720 let stage = self.module.entry_points[ep_index as usize].stage;
1721 for (index, arg) in func.arguments.iter().enumerate() {
1722 write!(self.out, "{}", back::INDENT)?;
1723 self.write_type(arg.ty)?;
1724 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1725 write!(self.out, " {name}")?;
1726 write!(self.out, " = ")?;
1727 match self.module.types[arg.ty].inner {
1728 TypeInner::Struct { ref members, .. } => {
1729 self.write_type(arg.ty)?;
1730 write!(self.out, "(")?;
1731 for (index, member) in members.iter().enumerate() {
1732 let varying_name = VaryingName {
1733 binding: member.binding.as_ref().unwrap(),
1734 stage,
1735 options: VaryingOptions::from_writer_options(self.options, false),
1736 };
1737 if index != 0 {
1738 write!(self.out, ", ")?;
1739 }
1740 write!(self.out, "{varying_name}")?;
1741 }
1742 writeln!(self.out, ");")?;
1743 }
1744 _ => {
1745 let varying_name = VaryingName {
1746 binding: arg.binding.as_ref().unwrap(),
1747 stage,
1748 options: VaryingOptions::from_writer_options(self.options, false),
1749 };
1750 writeln!(self.out, "{varying_name};")?;
1751 }
1752 }
1753 }
1754 }
1755
1756 // Write all function locals
1757 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1758 //
1759 // Always adds a newline
1760 for (handle, local) in func.local_variables.iter() {
1761 // Write indentation (only for readability) and the type
1762 // `write_type` adds no trailing space
1763 write!(self.out, "{}", back::INDENT)?;
1764 self.write_type(local.ty)?;
1765
1766 // Write the local name
1767 // The leading space is important
1768 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1769 // Write size for array type
1770 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1771 self.write_array_size(base, size)?;
1772 }
1773 // Write the local initializer if needed
1774 if let Some(init) = local.init {
1775 // Put the equal signal only if there's a initializer
1776 // The leading and trailing spaces aren't needed but help with readability
1777 write!(self.out, " = ")?;
1778
1779 // Write the constant
1780 // `write_constant` adds no trailing or leading space/newline
1781 self.write_expr(init, &ctx)?;
1782 } else if is_value_init_supported(self.module, local.ty) {
1783 write!(self.out, " = ")?;
1784 self.write_zero_init_value(local.ty)?;
1785 }
1786
1787 // Finish the local with `;` and add a newline (only for readability)
1788 writeln!(self.out, ";")?
1789 }
1790
1791 // Write the function body (statement list)
1792 for sta in func.body.iter() {
1793 // Write a statement, the indentation should always be 1 when writing the function body
1794 // `write_stmt` adds a newline
1795 self.write_stmt(sta, &ctx, back::Level(1))?;
1796 }
1797
1798 // Close braces and add a newline
1799 writeln!(self.out, "}}")?;
1800
1801 Ok(())
1802 }
1803
1804 fn write_workgroup_variables_initialization(
1805 &mut self,
1806 ctx: &back::FunctionCtx,
1807 ) -> BackendResult {
1808 let mut vars = self
1809 .module
1810 .global_variables
1811 .iter()
1812 .filter(|&(handle, var)| {
1813 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1814 })
1815 .peekable();
1816
1817 if vars.peek().is_some() {
1818 let level = back::Level(1);
1819
1820 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1821
1822 for (handle, var) in vars {
1823 let name = &self.names[&NameKey::GlobalVariable(handle)];
1824 write!(self.out, "{}{} = ", level.next(), name)?;
1825 self.write_zero_init_value(var.ty)?;
1826 writeln!(self.out, ";")?;
1827 }
1828
1829 writeln!(self.out, "{level}}}")?;
1830 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
1831 }
1832
1833 Ok(())
1834 }
1835
1836 /// Write a list of comma separated `T` values using a writer function `F`.
1837 ///
1838 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1839 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1840 /// second argument is the 0 based index of the element on the list, and the last element is
1841 /// a reference to the element `T` being written
1842 ///
1843 /// # Notes
1844 /// - Adds no newlines or leading/trailing whitespace
1845 /// - The last element won't have a trailing `,`
1846 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1847 &mut self,
1848 data: &[T],
1849 mut f: F,
1850 ) -> BackendResult {
1851 // Loop through `data` invoking `f` for each element
1852 for (index, item) in data.iter().enumerate() {
1853 if index != 0 {
1854 write!(self.out, ", ")?;
1855 }
1856 f(self, index as u32, item)?;
1857 }
1858
1859 Ok(())
1860 }
1861
1862 /// Helper method used to write global constants
1863 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1864 write!(self.out, "const ")?;
1865 let constant = &self.module.constants[handle];
1866 self.write_type(constant.ty)?;
1867 let name = &self.names[&NameKey::Constant(handle)];
1868 write!(self.out, " {name}")?;
1869 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1870 self.write_array_size(base, size)?;
1871 }
1872 write!(self.out, " = ")?;
1873 self.write_const_expr(constant.init)?;
1874 writeln!(self.out, ";")?;
1875 Ok(())
1876 }
1877
1878 /// Helper method used to output a dot product as an arithmetic expression
1879 ///
1880 fn write_dot_product(
1881 &mut self,
1882 arg: Handle<crate::Expression>,
1883 arg1: Handle<crate::Expression>,
1884 size: usize,
1885 ctx: &back::FunctionCtx,
1886 ) -> BackendResult {
1887 // Write parentheses around the dot product expression to prevent operators
1888 // with different precedences from applying earlier.
1889 write!(self.out, "(")?;
1890
1891 // Cycle through all the components of the vector
1892 for index in 0..size {
1893 let component = back::COMPONENTS[index];
1894 // Write the addition to the previous product
1895 // This will print an extra '+' at the beginning but that is fine in glsl
1896 write!(self.out, " + ")?;
1897 // Write the first vector expression, this expression is marked to be
1898 // cached so unless it can't be cached (for example, it's a Constant)
1899 // it shouldn't produce large expressions.
1900 self.write_expr(arg, ctx)?;
1901 // Access the current component on the first vector
1902 write!(self.out, ".{component} * ")?;
1903 // Write the second vector expression, this expression is marked to be
1904 // cached so unless it can't be cached (for example, it's a Constant)
1905 // it shouldn't produce large expressions.
1906 self.write_expr(arg1, ctx)?;
1907 // Access the current component on the second vector
1908 write!(self.out, ".{component}")?;
1909 }
1910
1911 write!(self.out, ")")?;
1912 Ok(())
1913 }
1914
1915 /// Helper method used to write structs
1916 ///
1917 /// # Notes
1918 /// Ends in a newline
1919 fn write_struct_body(
1920 &mut self,
1921 handle: Handle<crate::Type>,
1922 members: &[crate::StructMember],
1923 ) -> BackendResult {
1924 // glsl structs are written as in C
1925 // `struct name() { members };`
1926 // | `struct` is a keyword
1927 // | `name` is the struct name
1928 // | `members` is a semicolon separated list of `type name`
1929 // | `type` is the member type
1930 // | `name` is the member name
1931 writeln!(self.out, "{{")?;
1932
1933 for (idx, member) in members.iter().enumerate() {
1934 // The indentation is only for readability
1935 write!(self.out, "{}", back::INDENT)?;
1936
1937 match self.module.types[member.ty].inner {
1938 TypeInner::Array {
1939 base,
1940 size,
1941 stride: _,
1942 } => {
1943 self.write_type(base)?;
1944 write!(
1945 self.out,
1946 " {}",
1947 &self.names[&NameKey::StructMember(handle, idx as u32)]
1948 )?;
1949 // Write [size]
1950 self.write_array_size(base, size)?;
1951 // Newline is important
1952 writeln!(self.out, ";")?;
1953 }
1954 _ => {
1955 // Write the member type
1956 // Adds no trailing space
1957 self.write_type(member.ty)?;
1958
1959 // Write the member name and put a semicolon
1960 // The leading space is important
1961 // All members must have a semicolon even the last one
1962 writeln!(
1963 self.out,
1964 " {};",
1965 &self.names[&NameKey::StructMember(handle, idx as u32)]
1966 )?;
1967 }
1968 }
1969 }
1970
1971 write!(self.out, "}}")?;
1972 Ok(())
1973 }
1974
1975 /// Helper method used to write statements
1976 ///
1977 /// # Notes
1978 /// Always adds a newline
1979 fn write_stmt(
1980 &mut self,
1981 sta: &crate::Statement,
1982 ctx: &back::FunctionCtx,
1983 level: back::Level,
1984 ) -> BackendResult {
1985 use crate::Statement;
1986
1987 match *sta {
1988 // This is where we can generate intermediate constants for some expression types.
1989 Statement::Emit(ref range) => {
1990 for handle in range.clone() {
1991 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1992 let expr_name = if ptr_class.is_some() {
1993 // GLSL can't save a pointer-valued expression in a variable,
1994 // but we shouldn't ever need to: they should never be named expressions,
1995 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1996 None
1997 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1998 // Front end provides names for all variables at the start of writing.
1999 // But we write them to step by step. We need to recache them
2000 // Otherwise, we could accidentally write variable name instead of full expression.
2001 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
2002 Some(self.namer.call(name))
2003 } else if self.need_bake_expressions.contains(&handle) {
2004 Some(Baked(handle).to_string())
2005 } else {
2006 None
2007 };
2008
2009 // If we are going to write an `ImageLoad` next and the target image
2010 // is sampled and we are using the `Restrict` policy for bounds
2011 // checking images we need to write a local holding the clamped lod.
2012 if let crate::Expression::ImageLoad {
2013 image,
2014 level: Some(level_expr),
2015 ..
2016 } = ctx.expressions[handle]
2017 {
2018 if let TypeInner::Image {
2019 class: crate::ImageClass::Sampled { .. },
2020 ..
2021 } = *ctx.resolve_type(image, &self.module.types)
2022 {
2023 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
2024 write!(self.out, "{level}")?;
2025 self.write_clamped_lod(ctx, handle, image, level_expr)?
2026 }
2027 }
2028 }
2029
2030 if let Some(name) = expr_name {
2031 write!(self.out, "{level}")?;
2032 self.write_named_expr(handle, name, handle, ctx)?;
2033 }
2034 }
2035 }
2036 // Blocks are simple we just need to write the block statements between braces
2037 // We could also just print the statements but this is more readable and maps more
2038 // closely to the IR
2039 Statement::Block(ref block) => {
2040 write!(self.out, "{level}")?;
2041 writeln!(self.out, "{{")?;
2042 for sta in block.iter() {
2043 // Increase the indentation to help with readability
2044 self.write_stmt(sta, ctx, level.next())?
2045 }
2046 writeln!(self.out, "{level}}}")?
2047 }
2048 // Ifs are written as in C:
2049 // ```
2050 // if(condition) {
2051 // accept
2052 // } else {
2053 // reject
2054 // }
2055 // ```
2056 Statement::If {
2057 condition,
2058 ref accept,
2059 ref reject,
2060 } => {
2061 write!(self.out, "{level}")?;
2062 write!(self.out, "if (")?;
2063 self.write_expr(condition, ctx)?;
2064 writeln!(self.out, ") {{")?;
2065
2066 for sta in accept {
2067 // Increase indentation to help with readability
2068 self.write_stmt(sta, ctx, level.next())?;
2069 }
2070
2071 // If there are no statements in the reject block we skip writing it
2072 // This is only for readability
2073 if !reject.is_empty() {
2074 writeln!(self.out, "{level}}} else {{")?;
2075
2076 for sta in reject {
2077 // Increase indentation to help with readability
2078 self.write_stmt(sta, ctx, level.next())?;
2079 }
2080 }
2081
2082 writeln!(self.out, "{level}}}")?
2083 }
2084 // Switch are written as in C:
2085 // ```
2086 // switch (selector) {
2087 // // Fallthrough
2088 // case label:
2089 // block
2090 // // Non fallthrough
2091 // case label:
2092 // block
2093 // break;
2094 // default:
2095 // block
2096 // }
2097 // ```
2098 // Where the `default` case happens isn't important but we put it last
2099 // so that we don't need to print a `break` for it
2100 Statement::Switch {
2101 selector,
2102 ref cases,
2103 } => {
2104 let l2 = level.next();
2105 // Some GLSL consumers may not handle switches with a single
2106 // body correctly: See wgpu#4514. Write such switch statements
2107 // as a `do {} while(false);` loop instead.
2108 //
2109 // Since doing so may inadvertently capture `continue`
2110 // statements in the switch body, we must apply continue
2111 // forwarding. See the `naga::back::continue_forward` module
2112 // docs for details.
2113 let one_body = cases
2114 .iter()
2115 .rev()
2116 .skip(1)
2117 .all(|case| case.fall_through && case.body.is_empty());
2118 if one_body {
2119 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
2120 // about [`Switch`] statements that are being rendered as
2121 // `do-while` loops.
2122 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
2123 writeln!(self.out, "{level}bool {variable} = false;",)?;
2124 };
2125 writeln!(self.out, "{level}do {{")?;
2126 // Note: Expressions have no side-effects so we don't need to emit selector expression.
2127
2128 // Body
2129 if let Some(case) = cases.last() {
2130 for sta in case.body.iter() {
2131 self.write_stmt(sta, ctx, l2)?;
2132 }
2133 }
2134 // End do-while
2135 writeln!(self.out, "{level}}} while(false);")?;
2136
2137 // Handle any forwarded continue statements.
2138 use back::continue_forward::ExitControlFlow;
2139 let op = match self.continue_ctx.exit_switch() {
2140 ExitControlFlow::None => None,
2141 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
2142 ExitControlFlow::Break { variable } => Some(("break", variable)),
2143 };
2144 if let Some((control_flow, variable)) = op {
2145 writeln!(self.out, "{level}if ({variable}) {{")?;
2146 writeln!(self.out, "{l2}{control_flow};")?;
2147 writeln!(self.out, "{level}}}")?;
2148 }
2149 } else {
2150 // Start the switch
2151 write!(self.out, "{level}")?;
2152 write!(self.out, "switch(")?;
2153 self.write_expr(selector, ctx)?;
2154 writeln!(self.out, ") {{")?;
2155
2156 // Write all cases
2157 for case in cases {
2158 match case.value {
2159 crate::SwitchValue::I32(value) => {
2160 write!(self.out, "{l2}case {value}:")?
2161 }
2162 crate::SwitchValue::U32(value) => {
2163 write!(self.out, "{l2}case {value}u:")?
2164 }
2165 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
2166 }
2167
2168 let write_block_braces = !(case.fall_through && case.body.is_empty());
2169 if write_block_braces {
2170 writeln!(self.out, " {{")?;
2171 } else {
2172 writeln!(self.out)?;
2173 }
2174
2175 for sta in case.body.iter() {
2176 self.write_stmt(sta, ctx, l2.next())?;
2177 }
2178
2179 if !case.fall_through
2180 && case.body.last().map_or(true, |s| !s.is_terminator())
2181 {
2182 writeln!(self.out, "{}break;", l2.next())?;
2183 }
2184
2185 if write_block_braces {
2186 writeln!(self.out, "{l2}}}")?;
2187 }
2188 }
2189
2190 writeln!(self.out, "{level}}}")?
2191 }
2192 }
2193 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
2194 // while true loop and appending the continuing block to the body resulting on:
2195 // ```
2196 // bool loop_init = true;
2197 // while(true) {
2198 // if (!loop_init) { <continuing> }
2199 // loop_init = false;
2200 // <body>
2201 // }
2202 // ```
2203 Statement::Loop {
2204 ref body,
2205 ref continuing,
2206 break_if,
2207 } => {
2208 self.continue_ctx.enter_loop();
2209 if !continuing.is_empty() || break_if.is_some() {
2210 let gate_name = self.namer.call("loop_init");
2211 writeln!(self.out, "{level}bool {gate_name} = true;")?;
2212 writeln!(self.out, "{level}while(true) {{")?;
2213 let l2 = level.next();
2214 let l3 = l2.next();
2215 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
2216 for sta in continuing {
2217 self.write_stmt(sta, ctx, l3)?;
2218 }
2219 if let Some(condition) = break_if {
2220 write!(self.out, "{l3}if (")?;
2221 self.write_expr(condition, ctx)?;
2222 writeln!(self.out, ") {{")?;
2223 writeln!(self.out, "{}break;", l3.next())?;
2224 writeln!(self.out, "{l3}}}")?;
2225 }
2226 writeln!(self.out, "{l2}}}")?;
2227 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
2228 } else {
2229 writeln!(self.out, "{level}while(true) {{")?;
2230 }
2231 for sta in body {
2232 self.write_stmt(sta, ctx, level.next())?;
2233 }
2234 writeln!(self.out, "{level}}}")?;
2235 self.continue_ctx.exit_loop();
2236 }
2237 // Break, continue and return as written as in C
2238 // `break;`
2239 Statement::Break => {
2240 write!(self.out, "{level}")?;
2241 writeln!(self.out, "break;")?
2242 }
2243 // `continue;`
2244 Statement::Continue => {
2245 // Sometimes we must render a `Continue` statement as a `break`.
2246 // See the docs for the `back::continue_forward` module.
2247 if let Some(variable) = self.continue_ctx.continue_encountered() {
2248 writeln!(self.out, "{level}{variable} = true;",)?;
2249 writeln!(self.out, "{level}break;")?
2250 } else {
2251 writeln!(self.out, "{level}continue;")?
2252 }
2253 }
2254 // `return expr;`, `expr` is optional
2255 Statement::Return { value } => {
2256 write!(self.out, "{level}")?;
2257 match ctx.ty {
2258 back::FunctionType::Function(_) => {
2259 write!(self.out, "return")?;
2260 // Write the expression to be returned if needed
2261 if let Some(expr) = value {
2262 write!(self.out, " ")?;
2263 self.write_expr(expr, ctx)?;
2264 }
2265 writeln!(self.out, ";")?;
2266 }
2267 back::FunctionType::EntryPoint(ep_index) => {
2268 let mut has_point_size = false;
2269 let ep = &self.module.entry_points[ep_index as usize];
2270 if let Some(ref result) = ep.function.result {
2271 let value = value.unwrap();
2272 match self.module.types[result.ty].inner {
2273 TypeInner::Struct { ref members, .. } => {
2274 let temp_struct_name = match ctx.expressions[value] {
2275 crate::Expression::Compose { .. } => {
2276 let return_struct = "_tmp_return";
2277 write!(
2278 self.out,
2279 "{} {} = ",
2280 &self.names[&NameKey::Type(result.ty)],
2281 return_struct
2282 )?;
2283 self.write_expr(value, ctx)?;
2284 writeln!(self.out, ";")?;
2285 write!(self.out, "{level}")?;
2286 Some(return_struct)
2287 }
2288 _ => None,
2289 };
2290
2291 for (index, member) in members.iter().enumerate() {
2292 if let Some(crate::Binding::BuiltIn(
2293 crate::BuiltIn::PointSize,
2294 )) = member.binding
2295 {
2296 has_point_size = true;
2297 }
2298
2299 let varying_name = VaryingName {
2300 binding: member.binding.as_ref().unwrap(),
2301 stage: ep.stage,
2302 options: VaryingOptions::from_writer_options(
2303 self.options,
2304 true,
2305 ),
2306 };
2307 write!(self.out, "{varying_name} = ")?;
2308
2309 if let Some(struct_name) = temp_struct_name {
2310 write!(self.out, "{struct_name}")?;
2311 } else {
2312 self.write_expr(value, ctx)?;
2313 }
2314
2315 // Write field name
2316 writeln!(
2317 self.out,
2318 ".{};",
2319 &self.names
2320 [&NameKey::StructMember(result.ty, index as u32)]
2321 )?;
2322 write!(self.out, "{level}")?;
2323 }
2324 }
2325 _ => {
2326 let name = VaryingName {
2327 binding: result.binding.as_ref().unwrap(),
2328 stage: ep.stage,
2329 options: VaryingOptions::from_writer_options(
2330 self.options,
2331 true,
2332 ),
2333 };
2334 write!(self.out, "{name} = ")?;
2335 self.write_expr(value, ctx)?;
2336 writeln!(self.out, ";")?;
2337 write!(self.out, "{level}")?;
2338 }
2339 }
2340 }
2341
2342 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
2343 == ShaderStage::Vertex;
2344 if is_vertex_stage
2345 && self
2346 .options
2347 .writer_flags
2348 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
2349 {
2350 writeln!(
2351 self.out,
2352 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
2353 )?;
2354 write!(self.out, "{level}")?;
2355 }
2356
2357 if is_vertex_stage
2358 && self
2359 .options
2360 .writer_flags
2361 .contains(WriterFlags::FORCE_POINT_SIZE)
2362 && !has_point_size
2363 {
2364 writeln!(self.out, "gl_PointSize = 1.0;")?;
2365 write!(self.out, "{level}")?;
2366 }
2367 writeln!(self.out, "return;")?;
2368 }
2369 }
2370 }
2371 // This is one of the places were glsl adds to the syntax of C in this case the discard
2372 // keyword which ceases all further processing in a fragment shader, it's called OpKill
2373 // in spir-v that's why it's called `Statement::Kill`
2374 Statement::Kill => writeln!(self.out, "{level}discard;")?,
2375 Statement::Barrier(flags) => {
2376 self.write_barrier(flags, level)?;
2377 }
2378 // Stores in glsl are just variable assignments written as `pointer = value;`
2379 Statement::Store { pointer, value } => {
2380 write!(self.out, "{level}")?;
2381 self.write_expr(pointer, ctx)?;
2382 write!(self.out, " = ")?;
2383 self.write_expr(value, ctx)?;
2384 writeln!(self.out, ";")?
2385 }
2386 Statement::WorkGroupUniformLoad { pointer, result } => {
2387 // GLSL doesn't have pointers, which means that this backend needs to ensure that
2388 // the actual "loading" is happening between the two barriers.
2389 // This is done in `Emit` by never emitting a variable name for pointer variables
2390 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2391
2392 let result_name = Baked(result).to_string();
2393 write!(self.out, "{level}")?;
2394 // Expressions cannot have side effects, so just writing the expression here is fine.
2395 self.write_named_expr(pointer, result_name, result, ctx)?;
2396
2397 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2398 }
2399 // Stores a value into an image.
2400 Statement::ImageStore {
2401 image,
2402 coordinate,
2403 array_index,
2404 value,
2405 } => {
2406 write!(self.out, "{level}")?;
2407 self.write_image_store(ctx, image, coordinate, array_index, value)?
2408 }
2409 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2410 Statement::Call {
2411 function,
2412 ref arguments,
2413 result,
2414 } => {
2415 write!(self.out, "{level}")?;
2416 if let Some(expr) = result {
2417 let name = Baked(expr).to_string();
2418 let result = self.module.functions[function].result.as_ref().unwrap();
2419 self.write_type(result.ty)?;
2420 write!(self.out, " {name}")?;
2421 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2422 {
2423 self.write_array_size(base, size)?
2424 }
2425 write!(self.out, " = ")?;
2426 self.named_expressions.insert(expr, name);
2427 }
2428 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2429 let arguments: Vec<_> = arguments
2430 .iter()
2431 .enumerate()
2432 .filter_map(|(i, arg)| {
2433 let arg_ty = self.module.functions[function].arguments[i].ty;
2434 match self.module.types[arg_ty].inner {
2435 TypeInner::Sampler { .. } => None,
2436 _ => Some(*arg),
2437 }
2438 })
2439 .collect();
2440 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2441 writeln!(self.out, ");")?
2442 }
2443 Statement::Atomic {
2444 pointer,
2445 ref fun,
2446 value,
2447 result,
2448 } => {
2449 write!(self.out, "{level}")?;
2450 if let Some(result) = result {
2451 let res_name = Baked(result).to_string();
2452 let res_ty = ctx.resolve_type(result, &self.module.types);
2453 self.write_value_type(res_ty)?;
2454 write!(self.out, " {res_name} = ")?;
2455 self.named_expressions.insert(result, res_name);
2456 }
2457
2458 let fun_str = fun.to_glsl();
2459 write!(self.out, "atomic{fun_str}(")?;
2460 self.write_expr(pointer, ctx)?;
2461 write!(self.out, ", ")?;
2462 // handle the special cases
2463 match *fun {
2464 crate::AtomicFunction::Subtract => {
2465 // we just wrote `InterlockedAdd`, so negate the argument
2466 write!(self.out, "-")?;
2467 }
2468 crate::AtomicFunction::Exchange { compare: Some(_) } => {
2469 return Err(Error::Custom(
2470 "atomic CompareExchange is not implemented".to_string(),
2471 ));
2472 }
2473 _ => {}
2474 }
2475 self.write_expr(value, ctx)?;
2476 writeln!(self.out, ");")?;
2477 }
2478 // Stores a value into an image.
2479 Statement::ImageAtomic {
2480 image,
2481 coordinate,
2482 array_index,
2483 fun,
2484 value,
2485 } => {
2486 write!(self.out, "{level}")?;
2487 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2488 }
2489 Statement::RayQuery { .. } => unreachable!(),
2490 Statement::SubgroupBallot { result, predicate } => {
2491 write!(self.out, "{level}")?;
2492 let res_name = Baked(result).to_string();
2493 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2494 self.write_value_type(res_ty)?;
2495 write!(self.out, " {res_name} = ")?;
2496 self.named_expressions.insert(result, res_name);
2497
2498 write!(self.out, "subgroupBallot(")?;
2499 match predicate {
2500 Some(predicate) => self.write_expr(predicate, ctx)?,
2501 None => write!(self.out, "true")?,
2502 }
2503 writeln!(self.out, ");")?;
2504 }
2505 Statement::SubgroupCollectiveOperation {
2506 op,
2507 collective_op,
2508 argument,
2509 result,
2510 } => {
2511 write!(self.out, "{level}")?;
2512 let res_name = Baked(result).to_string();
2513 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2514 self.write_value_type(res_ty)?;
2515 write!(self.out, " {res_name} = ")?;
2516 self.named_expressions.insert(result, res_name);
2517
2518 match (collective_op, op) {
2519 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2520 write!(self.out, "subgroupAll(")?
2521 }
2522 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2523 write!(self.out, "subgroupAny(")?
2524 }
2525 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2526 write!(self.out, "subgroupAdd(")?
2527 }
2528 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2529 write!(self.out, "subgroupMul(")?
2530 }
2531 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2532 write!(self.out, "subgroupMax(")?
2533 }
2534 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2535 write!(self.out, "subgroupMin(")?
2536 }
2537 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2538 write!(self.out, "subgroupAnd(")?
2539 }
2540 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2541 write!(self.out, "subgroupOr(")?
2542 }
2543 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2544 write!(self.out, "subgroupXor(")?
2545 }
2546 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2547 write!(self.out, "subgroupExclusiveAdd(")?
2548 }
2549 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2550 write!(self.out, "subgroupExclusiveMul(")?
2551 }
2552 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2553 write!(self.out, "subgroupInclusiveAdd(")?
2554 }
2555 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2556 write!(self.out, "subgroupInclusiveMul(")?
2557 }
2558 _ => unimplemented!(),
2559 }
2560 self.write_expr(argument, ctx)?;
2561 writeln!(self.out, ");")?;
2562 }
2563 Statement::SubgroupGather {
2564 mode,
2565 argument,
2566 result,
2567 } => {
2568 write!(self.out, "{level}")?;
2569 let res_name = Baked(result).to_string();
2570 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2571 self.write_value_type(res_ty)?;
2572 write!(self.out, " {res_name} = ")?;
2573 self.named_expressions.insert(result, res_name);
2574
2575 match mode {
2576 crate::GatherMode::BroadcastFirst => {
2577 write!(self.out, "subgroupBroadcastFirst(")?;
2578 }
2579 crate::GatherMode::Broadcast(_) => {
2580 write!(self.out, "subgroupBroadcast(")?;
2581 }
2582 crate::GatherMode::Shuffle(_) => {
2583 write!(self.out, "subgroupShuffle(")?;
2584 }
2585 crate::GatherMode::ShuffleDown(_) => {
2586 write!(self.out, "subgroupShuffleDown(")?;
2587 }
2588 crate::GatherMode::ShuffleUp(_) => {
2589 write!(self.out, "subgroupShuffleUp(")?;
2590 }
2591 crate::GatherMode::ShuffleXor(_) => {
2592 write!(self.out, "subgroupShuffleXor(")?;
2593 }
2594 }
2595 self.write_expr(argument, ctx)?;
2596 match mode {
2597 crate::GatherMode::BroadcastFirst => {}
2598 crate::GatherMode::Broadcast(index)
2599 | crate::GatherMode::Shuffle(index)
2600 | crate::GatherMode::ShuffleDown(index)
2601 | crate::GatherMode::ShuffleUp(index)
2602 | crate::GatherMode::ShuffleXor(index) => {
2603 write!(self.out, ", ")?;
2604 self.write_expr(index, ctx)?;
2605 }
2606 }
2607 writeln!(self.out, ");")?;
2608 }
2609 }
2610
2611 Ok(())
2612 }
2613
2614 /// Write a const expression.
2615 ///
2616 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2617 /// constant expression arena, as GLSL expression.
2618 ///
2619 /// # Notes
2620 /// Adds no newlines or leading/trailing whitespace
2621 ///
2622 /// [`Expression`]: crate::Expression
2623 /// [`Module`]: crate::Module
2624 fn write_const_expr(&mut self, expr: Handle<crate::Expression>) -> BackendResult {
2625 self.write_possibly_const_expr(
2626 expr,
2627 &self.module.global_expressions,
2628 |expr| &self.info[expr],
2629 |writer, expr| writer.write_const_expr(expr),
2630 )
2631 }
2632
2633 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2634 ///
2635 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2636 /// as as GLSL expression. This must be one of the [`Expression`] variants
2637 /// that is allowed to occur in constant expressions.
2638 ///
2639 /// Use `write_expression` to write subexpressions.
2640 ///
2641 /// This is the common code for `write_expr`, which handles arbitrary
2642 /// runtime expressions, and `write_const_expr`, which only handles
2643 /// const-expressions. Each of those callers passes itself (essentially) as
2644 /// the `write_expression` callback, so that subexpressions are restricted
2645 /// to the appropriate variants.
2646 ///
2647 /// # Notes
2648 /// Adds no newlines or leading/trailing whitespace
2649 ///
2650 /// [`Expression`]: crate::Expression
2651 fn write_possibly_const_expr<'w, I, E>(
2652 &'w mut self,
2653 expr: Handle<crate::Expression>,
2654 expressions: &crate::Arena<crate::Expression>,
2655 info: I,
2656 write_expression: E,
2657 ) -> BackendResult
2658 where
2659 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2660 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2661 {
2662 use crate::Expression;
2663
2664 match expressions[expr] {
2665 Expression::Literal(literal) => {
2666 match literal {
2667 // Floats are written using `Debug` instead of `Display` because it always appends the
2668 // decimal part even it's zero which is needed for a valid glsl float constant
2669 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2670 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2671 // Unsigned integers need a `u` at the end
2672 //
2673 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2674 // always write it as the extra branch wouldn't have any benefit in readability
2675 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2676 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2677 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2678 crate::Literal::I64(_) => {
2679 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2680 }
2681 crate::Literal::U64(_) => {
2682 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2683 }
2684 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2685 return Err(Error::Custom(
2686 "Abstract types should not appear in IR presented to backends".into(),
2687 ));
2688 }
2689 }
2690 }
2691 Expression::Constant(handle) => {
2692 let constant = &self.module.constants[handle];
2693 if constant.name.is_some() {
2694 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2695 } else {
2696 self.write_const_expr(constant.init)?;
2697 }
2698 }
2699 Expression::ZeroValue(ty) => {
2700 self.write_zero_init_value(ty)?;
2701 }
2702 Expression::Compose { ty, ref components } => {
2703 self.write_type(ty)?;
2704
2705 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2706 self.write_array_size(base, size)?;
2707 }
2708
2709 write!(self.out, "(")?;
2710 for (index, component) in components.iter().enumerate() {
2711 if index != 0 {
2712 write!(self.out, ", ")?;
2713 }
2714 write_expression(self, *component)?;
2715 }
2716 write!(self.out, ")")?
2717 }
2718 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2719 Expression::Splat { size: _, value } => {
2720 let resolved = info(expr).inner_with(&self.module.types);
2721 self.write_value_type(resolved)?;
2722 write!(self.out, "(")?;
2723 write_expression(self, value)?;
2724 write!(self.out, ")")?
2725 }
2726 _ => unreachable!(),
2727 }
2728
2729 Ok(())
2730 }
2731
2732 /// Helper method to write expressions
2733 ///
2734 /// # Notes
2735 /// Doesn't add any newlines or leading/trailing spaces
2736 fn write_expr(
2737 &mut self,
2738 expr: Handle<crate::Expression>,
2739 ctx: &back::FunctionCtx,
2740 ) -> BackendResult {
2741 use crate::Expression;
2742
2743 if let Some(name) = self.named_expressions.get(&expr) {
2744 write!(self.out, "{name}")?;
2745 return Ok(());
2746 }
2747
2748 match ctx.expressions[expr] {
2749 Expression::Literal(_)
2750 | Expression::Constant(_)
2751 | Expression::ZeroValue(_)
2752 | Expression::Compose { .. }
2753 | Expression::Splat { .. } => {
2754 self.write_possibly_const_expr(
2755 expr,
2756 ctx.expressions,
2757 |expr| &ctx.info[expr].ty,
2758 |writer, expr| writer.write_expr(expr, ctx),
2759 )?;
2760 }
2761 Expression::Override(_) => return Err(Error::Override),
2762 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2763 Expression::Access { base, index } => {
2764 self.write_expr(base, ctx)?;
2765 write!(self.out, "[")?;
2766 self.write_expr(index, ctx)?;
2767 write!(self.out, "]")?
2768 }
2769 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2770 // be applied to structs, in this case we need to find the name of the field at that
2771 // index and write `base.field_name`
2772 Expression::AccessIndex { base, index } => {
2773 self.write_expr(base, ctx)?;
2774
2775 let base_ty_res = &ctx.info[base].ty;
2776 let mut resolved = base_ty_res.inner_with(&self.module.types);
2777 let base_ty_handle = match *resolved {
2778 TypeInner::Pointer { base, space: _ } => {
2779 resolved = &self.module.types[base].inner;
2780 Some(base)
2781 }
2782 _ => base_ty_res.handle(),
2783 };
2784
2785 match *resolved {
2786 TypeInner::Vector { .. } => {
2787 // Write vector access as a swizzle
2788 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2789 }
2790 TypeInner::Matrix { .. }
2791 | TypeInner::Array { .. }
2792 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2793 TypeInner::Struct { .. } => {
2794 // This will never panic in case the type is a `Struct`, this is not true
2795 // for other types so we can only check while inside this match arm
2796 let ty = base_ty_handle.unwrap();
2797
2798 write!(
2799 self.out,
2800 ".{}",
2801 &self.names[&NameKey::StructMember(ty, index)]
2802 )?
2803 }
2804 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2805 }
2806 }
2807 // `Swizzle` adds a few letters behind the dot.
2808 Expression::Swizzle {
2809 size,
2810 vector,
2811 pattern,
2812 } => {
2813 self.write_expr(vector, ctx)?;
2814 write!(self.out, ".")?;
2815 for &sc in pattern[..size as usize].iter() {
2816 self.out.write_char(back::COMPONENTS[sc as usize])?;
2817 }
2818 }
2819 // Function arguments are written as the argument name
2820 Expression::FunctionArgument(pos) => {
2821 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2822 }
2823 // Global variables need some special work for their name but
2824 // `get_global_name` does the work for us
2825 Expression::GlobalVariable(handle) => {
2826 let global = &self.module.global_variables[handle];
2827 self.write_global_name(handle, global)?
2828 }
2829 // A local is written as it's name
2830 Expression::LocalVariable(handle) => {
2831 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2832 }
2833 // glsl has no pointers so there's no load operation, just write the pointer expression
2834 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2835 // `ImageSample` is a bit complicated compared to the rest of the IR.
2836 //
2837 // First there are three variations depending whether the sample level is explicitly set,
2838 // if it's automatic or it it's bias:
2839 // `texture(image, coordinate)` - Automatic sample level
2840 // `texture(image, coordinate, bias)` - Bias sample level
2841 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2842 //
2843 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2844 Expression::ImageSample {
2845 image,
2846 sampler: _, //TODO?
2847 gather,
2848 coordinate,
2849 array_index,
2850 offset,
2851 level,
2852 depth_ref,
2853 } => {
2854 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2855 TypeInner::Image {
2856 dim,
2857 class,
2858 arrayed,
2859 ..
2860 } => (dim, class, arrayed),
2861 _ => unreachable!(),
2862 };
2863 let mut err = None;
2864 if dim == crate::ImageDimension::Cube {
2865 if offset.is_some() {
2866 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2867 }
2868 if arrayed
2869 && matches!(class, crate::ImageClass::Depth { .. })
2870 && matches!(level, crate::SampleLevel::Gradient { .. })
2871 {
2872 err = Some("samplerCubeArrayShadow don't support textureGrad");
2873 }
2874 }
2875 if gather.is_some() && level != crate::SampleLevel::Zero {
2876 err = Some("textureGather doesn't support LOD parameters");
2877 }
2878 if let Some(err) = err {
2879 return Err(Error::Custom(String::from(err)));
2880 }
2881
2882 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2883 // unless `GL_EXT_texture_shadow_lod` is present.
2884 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2885 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2886 || (dim == crate::ImageDimension::D2 && arrayed))
2887 && level == crate::SampleLevel::Zero
2888 && matches!(class, crate::ImageClass::Depth { .. })
2889 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2890
2891 // Write the function to be used depending on the sample level
2892 let fun_name = match level {
2893 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2894 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2895 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2896 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2897 crate::SampleLevel::Gradient { .. } => "textureGrad",
2898 };
2899 let offset_name = match offset {
2900 Some(_) => "Offset",
2901 None => "",
2902 };
2903
2904 write!(self.out, "{fun_name}{offset_name}(")?;
2905
2906 // Write the image that will be used
2907 self.write_expr(image, ctx)?;
2908 // The space here isn't required but it helps with readability
2909 write!(self.out, ", ")?;
2910
2911 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2912 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2913 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2914 TypeInner::Vector { size, .. } => size as u8,
2915 TypeInner::Scalar { .. } => 1,
2916 _ => unreachable!(),
2917 };
2918
2919 if array_index.is_some() {
2920 coord_dim += 1;
2921 }
2922 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2923 if merge_depth_ref {
2924 coord_dim += 1;
2925 }
2926
2927 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2928 let is_vec = tex_1d_hack || coord_dim != 1;
2929 // Compose a new texture coordinates vector
2930 if is_vec {
2931 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2932 }
2933 self.write_expr(coordinate, ctx)?;
2934 if tex_1d_hack {
2935 write!(self.out, ", 0.0")?;
2936 }
2937 if let Some(expr) = array_index {
2938 write!(self.out, ", ")?;
2939 self.write_expr(expr, ctx)?;
2940 }
2941 if merge_depth_ref {
2942 write!(self.out, ", ")?;
2943 self.write_expr(depth_ref.unwrap(), ctx)?;
2944 }
2945 if is_vec {
2946 write!(self.out, ")")?;
2947 }
2948
2949 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2950 write!(self.out, ", ")?;
2951 self.write_expr(expr, ctx)?;
2952 }
2953
2954 match level {
2955 // Auto needs no more arguments
2956 crate::SampleLevel::Auto => (),
2957 // Zero needs level set to 0
2958 crate::SampleLevel::Zero => {
2959 if workaround_lod_with_grad {
2960 let vec_dim = match dim {
2961 crate::ImageDimension::Cube => 3,
2962 _ => 2,
2963 };
2964 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2965 } else if gather.is_none() {
2966 write!(self.out, ", 0.0")?;
2967 }
2968 }
2969 // Exact and bias require another argument
2970 crate::SampleLevel::Exact(expr) => {
2971 write!(self.out, ", ")?;
2972 self.write_expr(expr, ctx)?;
2973 }
2974 crate::SampleLevel::Bias(_) => {
2975 // This needs to be done after the offset writing
2976 }
2977 crate::SampleLevel::Gradient { x, y } => {
2978 // If we are using sampler2D to replace sampler1D, we also
2979 // need to make sure to use vec2 gradients
2980 if tex_1d_hack {
2981 write!(self.out, ", vec2(")?;
2982 self.write_expr(x, ctx)?;
2983 write!(self.out, ", 0.0)")?;
2984 write!(self.out, ", vec2(")?;
2985 self.write_expr(y, ctx)?;
2986 write!(self.out, ", 0.0)")?;
2987 } else {
2988 write!(self.out, ", ")?;
2989 self.write_expr(x, ctx)?;
2990 write!(self.out, ", ")?;
2991 self.write_expr(y, ctx)?;
2992 }
2993 }
2994 }
2995
2996 if let Some(constant) = offset {
2997 write!(self.out, ", ")?;
2998 if tex_1d_hack {
2999 write!(self.out, "ivec2(")?;
3000 }
3001 self.write_const_expr(constant)?;
3002 if tex_1d_hack {
3003 write!(self.out, ", 0)")?;
3004 }
3005 }
3006
3007 // Bias is always the last argument
3008 if let crate::SampleLevel::Bias(expr) = level {
3009 write!(self.out, ", ")?;
3010 self.write_expr(expr, ctx)?;
3011 }
3012
3013 if let (Some(component), None) = (gather, depth_ref) {
3014 write!(self.out, ", {}", component as usize)?;
3015 }
3016
3017 // End the function
3018 write!(self.out, ")")?
3019 }
3020 Expression::ImageLoad {
3021 image,
3022 coordinate,
3023 array_index,
3024 sample,
3025 level,
3026 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
3027 // Query translates into one of the:
3028 // - textureSize/imageSize
3029 // - textureQueryLevels
3030 // - textureSamples/imageSamples
3031 Expression::ImageQuery { image, query } => {
3032 use crate::ImageClass;
3033
3034 // This will only panic if the module is invalid
3035 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
3036 TypeInner::Image {
3037 dim,
3038 arrayed: _,
3039 class,
3040 } => (dim, class),
3041 _ => unreachable!(),
3042 };
3043 let components = match dim {
3044 crate::ImageDimension::D1 => 1,
3045 crate::ImageDimension::D2 => 2,
3046 crate::ImageDimension::D3 => 3,
3047 crate::ImageDimension::Cube => 2,
3048 };
3049
3050 if let crate::ImageQuery::Size { .. } = query {
3051 match components {
3052 1 => write!(self.out, "uint(")?,
3053 _ => write!(self.out, "uvec{components}(")?,
3054 }
3055 } else {
3056 write!(self.out, "uint(")?;
3057 }
3058
3059 match query {
3060 crate::ImageQuery::Size { level } => {
3061 match class {
3062 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
3063 write!(self.out, "textureSize(")?;
3064 self.write_expr(image, ctx)?;
3065 if let Some(expr) = level {
3066 let cast_to_int = matches!(
3067 *ctx.resolve_type(expr, &self.module.types),
3068 TypeInner::Scalar(crate::Scalar {
3069 kind: crate::ScalarKind::Uint,
3070 ..
3071 })
3072 );
3073
3074 write!(self.out, ", ")?;
3075
3076 if cast_to_int {
3077 write!(self.out, "int(")?;
3078 }
3079
3080 self.write_expr(expr, ctx)?;
3081
3082 if cast_to_int {
3083 write!(self.out, ")")?;
3084 }
3085 } else if !multi {
3086 // All textureSize calls requires an lod argument
3087 // except for multisampled samplers
3088 write!(self.out, ", 0")?;
3089 }
3090 }
3091 ImageClass::Storage { .. } => {
3092 write!(self.out, "imageSize(")?;
3093 self.write_expr(image, ctx)?;
3094 }
3095 }
3096 write!(self.out, ")")?;
3097 if components != 1 || self.options.version.is_es() {
3098 write!(self.out, ".{}", &"xyz"[..components])?;
3099 }
3100 }
3101 crate::ImageQuery::NumLevels => {
3102 write!(self.out, "textureQueryLevels(",)?;
3103 self.write_expr(image, ctx)?;
3104 write!(self.out, ")",)?;
3105 }
3106 crate::ImageQuery::NumLayers => {
3107 let fun_name = match class {
3108 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
3109 ImageClass::Storage { .. } => "imageSize",
3110 };
3111 write!(self.out, "{fun_name}(")?;
3112 self.write_expr(image, ctx)?;
3113 // All textureSize calls requires an lod argument
3114 // except for multisampled samplers
3115 if !class.is_multisampled() {
3116 write!(self.out, ", 0")?;
3117 }
3118 write!(self.out, ")")?;
3119 if components != 1 || self.options.version.is_es() {
3120 write!(self.out, ".{}", back::COMPONENTS[components])?;
3121 }
3122 }
3123 crate::ImageQuery::NumSamples => {
3124 let fun_name = match class {
3125 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
3126 "textureSamples"
3127 }
3128 ImageClass::Storage { .. } => "imageSamples",
3129 };
3130 write!(self.out, "{fun_name}(")?;
3131 self.write_expr(image, ctx)?;
3132 write!(self.out, ")",)?;
3133 }
3134 }
3135
3136 write!(self.out, ")")?;
3137 }
3138 Expression::Unary { op, expr } => {
3139 let operator_or_fn = match op {
3140 crate::UnaryOperator::Negate => "-",
3141 crate::UnaryOperator::LogicalNot => {
3142 match *ctx.resolve_type(expr, &self.module.types) {
3143 TypeInner::Vector { .. } => "not",
3144 _ => "!",
3145 }
3146 }
3147 crate::UnaryOperator::BitwiseNot => "~",
3148 };
3149 write!(self.out, "{operator_or_fn}(")?;
3150
3151 self.write_expr(expr, ctx)?;
3152
3153 write!(self.out, ")")?
3154 }
3155 // `Binary` we just write `left op right`, except when dealing with
3156 // comparison operations on vectors as they are implemented with
3157 // builtin functions.
3158 // Once again we wrap everything in parentheses to avoid precedence issues
3159 Expression::Binary {
3160 mut op,
3161 left,
3162 right,
3163 } => {
3164 // Holds `Some(function_name)` if the binary operation is
3165 // implemented as a function call
3166 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
3167
3168 let left_inner = ctx.resolve_type(left, &self.module.types);
3169 let right_inner = ctx.resolve_type(right, &self.module.types);
3170
3171 let function = match (left_inner, right_inner) {
3172 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
3173 Bo::Less
3174 | Bo::LessEqual
3175 | Bo::Greater
3176 | Bo::GreaterEqual
3177 | Bo::Equal
3178 | Bo::NotEqual => BinaryOperation::VectorCompare,
3179 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
3180 Bo::And if scalar.kind == Sk::Bool => {
3181 op = crate::BinaryOperator::LogicalAnd;
3182 BinaryOperation::VectorComponentWise
3183 }
3184 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
3185 op = crate::BinaryOperator::LogicalOr;
3186 BinaryOperation::VectorComponentWise
3187 }
3188 _ => BinaryOperation::Other,
3189 },
3190 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
3191 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
3192 Bo::Modulo => BinaryOperation::Modulo,
3193 _ => BinaryOperation::Other,
3194 },
3195 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
3196 Bo::InclusiveOr => {
3197 op = crate::BinaryOperator::LogicalOr;
3198 BinaryOperation::Other
3199 }
3200 Bo::And => {
3201 op = crate::BinaryOperator::LogicalAnd;
3202 BinaryOperation::Other
3203 }
3204 _ => BinaryOperation::Other,
3205 },
3206 _ => BinaryOperation::Other,
3207 },
3208 };
3209
3210 match function {
3211 BinaryOperation::VectorCompare => {
3212 let op_str = match op {
3213 Bo::Less => "lessThan(",
3214 Bo::LessEqual => "lessThanEqual(",
3215 Bo::Greater => "greaterThan(",
3216 Bo::GreaterEqual => "greaterThanEqual(",
3217 Bo::Equal => "equal(",
3218 Bo::NotEqual => "notEqual(",
3219 _ => unreachable!(),
3220 };
3221 write!(self.out, "{op_str}")?;
3222 self.write_expr(left, ctx)?;
3223 write!(self.out, ", ")?;
3224 self.write_expr(right, ctx)?;
3225 write!(self.out, ")")?;
3226 }
3227 BinaryOperation::VectorComponentWise => {
3228 self.write_value_type(left_inner)?;
3229 write!(self.out, "(")?;
3230
3231 let size = match *left_inner {
3232 Ti::Vector { size, .. } => size,
3233 _ => unreachable!(),
3234 };
3235
3236 for i in 0..size as usize {
3237 if i != 0 {
3238 write!(self.out, ", ")?;
3239 }
3240
3241 self.write_expr(left, ctx)?;
3242 write!(self.out, ".{}", back::COMPONENTS[i])?;
3243
3244 write!(self.out, " {} ", back::binary_operation_str(op))?;
3245
3246 self.write_expr(right, ctx)?;
3247 write!(self.out, ".{}", back::COMPONENTS[i])?;
3248 }
3249
3250 write!(self.out, ")")?;
3251 }
3252 // TODO: handle undefined behavior of BinaryOperator::Modulo
3253 //
3254 // sint:
3255 // if right == 0 return 0
3256 // if left == min(type_of(left)) && right == -1 return 0
3257 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
3258 //
3259 // uint:
3260 // if right == 0 return 0
3261 //
3262 // float:
3263 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
3264 BinaryOperation::Modulo => {
3265 write!(self.out, "(")?;
3266
3267 // write `e1 - e2 * trunc(e1 / e2)`
3268 self.write_expr(left, ctx)?;
3269 write!(self.out, " - ")?;
3270 self.write_expr(right, ctx)?;
3271 write!(self.out, " * ")?;
3272 write!(self.out, "trunc(")?;
3273 self.write_expr(left, ctx)?;
3274 write!(self.out, " / ")?;
3275 self.write_expr(right, ctx)?;
3276 write!(self.out, ")")?;
3277
3278 write!(self.out, ")")?;
3279 }
3280 BinaryOperation::Other => {
3281 write!(self.out, "(")?;
3282
3283 self.write_expr(left, ctx)?;
3284 write!(self.out, " {} ", back::binary_operation_str(op))?;
3285 self.write_expr(right, ctx)?;
3286
3287 write!(self.out, ")")?;
3288 }
3289 }
3290 }
3291 // `Select` is written as `condition ? accept : reject`
3292 // We wrap everything in parentheses to avoid precedence issues
3293 Expression::Select {
3294 condition,
3295 accept,
3296 reject,
3297 } => {
3298 let cond_ty = ctx.resolve_type(condition, &self.module.types);
3299 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
3300 true
3301 } else {
3302 false
3303 };
3304
3305 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
3306 if vec_select {
3307 // Glsl defines that for mix when the condition is a boolean the first element
3308 // is picked if condition is false and the second if condition is true
3309 write!(self.out, "mix(")?;
3310 self.write_expr(reject, ctx)?;
3311 write!(self.out, ", ")?;
3312 self.write_expr(accept, ctx)?;
3313 write!(self.out, ", ")?;
3314 self.write_expr(condition, ctx)?;
3315 } else {
3316 write!(self.out, "(")?;
3317 self.write_expr(condition, ctx)?;
3318 write!(self.out, " ? ")?;
3319 self.write_expr(accept, ctx)?;
3320 write!(self.out, " : ")?;
3321 self.write_expr(reject, ctx)?;
3322 }
3323
3324 write!(self.out, ")")?
3325 }
3326 // `Derivative` is a function call to a glsl provided function
3327 Expression::Derivative { axis, ctrl, expr } => {
3328 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
3329 let fun_name = if self.options.version.supports_derivative_control() {
3330 match (axis, ctrl) {
3331 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
3332 (Axis::X, Ctrl::Fine) => "dFdxFine",
3333 (Axis::X, Ctrl::None) => "dFdx",
3334 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
3335 (Axis::Y, Ctrl::Fine) => "dFdyFine",
3336 (Axis::Y, Ctrl::None) => "dFdy",
3337 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
3338 (Axis::Width, Ctrl::Fine) => "fwidthFine",
3339 (Axis::Width, Ctrl::None) => "fwidth",
3340 }
3341 } else {
3342 match axis {
3343 Axis::X => "dFdx",
3344 Axis::Y => "dFdy",
3345 Axis::Width => "fwidth",
3346 }
3347 };
3348 write!(self.out, "{fun_name}(")?;
3349 self.write_expr(expr, ctx)?;
3350 write!(self.out, ")")?
3351 }
3352 // `Relational` is a normal function call to some glsl provided functions
3353 Expression::Relational { fun, argument } => {
3354 use crate::RelationalFunction as Rf;
3355
3356 let fun_name = match fun {
3357 Rf::IsInf => "isinf",
3358 Rf::IsNan => "isnan",
3359 Rf::All => "all",
3360 Rf::Any => "any",
3361 };
3362 write!(self.out, "{fun_name}(")?;
3363
3364 self.write_expr(argument, ctx)?;
3365
3366 write!(self.out, ")")?
3367 }
3368 Expression::Math {
3369 fun,
3370 arg,
3371 arg1,
3372 arg2,
3373 arg3,
3374 } => {
3375 use crate::MathFunction as Mf;
3376
3377 let fun_name = match fun {
3378 // comparison
3379 Mf::Abs => "abs",
3380 Mf::Min => "min",
3381 Mf::Max => "max",
3382 Mf::Clamp => {
3383 let scalar_kind = ctx
3384 .resolve_type(arg, &self.module.types)
3385 .scalar_kind()
3386 .unwrap();
3387 match scalar_kind {
3388 crate::ScalarKind::Float => "clamp",
3389 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3390 // instruction to determine the value. This is fine according to the WGSL spec for float
3391 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3392 _ => {
3393 write!(self.out, "min(max(")?;
3394 self.write_expr(arg, ctx)?;
3395 write!(self.out, ", ")?;
3396 self.write_expr(arg1.unwrap(), ctx)?;
3397 write!(self.out, "), ")?;
3398 self.write_expr(arg2.unwrap(), ctx)?;
3399 write!(self.out, ")")?;
3400
3401 return Ok(());
3402 }
3403 }
3404 }
3405 Mf::Saturate => {
3406 write!(self.out, "clamp(")?;
3407
3408 self.write_expr(arg, ctx)?;
3409
3410 match *ctx.resolve_type(arg, &self.module.types) {
3411 TypeInner::Vector { size, .. } => write!(
3412 self.out,
3413 ", vec{}(0.0), vec{0}(1.0)",
3414 back::vector_size_str(size)
3415 )?,
3416 _ => write!(self.out, ", 0.0, 1.0")?,
3417 }
3418
3419 write!(self.out, ")")?;
3420
3421 return Ok(());
3422 }
3423 // trigonometry
3424 Mf::Cos => "cos",
3425 Mf::Cosh => "cosh",
3426 Mf::Sin => "sin",
3427 Mf::Sinh => "sinh",
3428 Mf::Tan => "tan",
3429 Mf::Tanh => "tanh",
3430 Mf::Acos => "acos",
3431 Mf::Asin => "asin",
3432 Mf::Atan => "atan",
3433 Mf::Asinh => "asinh",
3434 Mf::Acosh => "acosh",
3435 Mf::Atanh => "atanh",
3436 Mf::Radians => "radians",
3437 Mf::Degrees => "degrees",
3438 // glsl doesn't have atan2 function
3439 // use two-argument variation of the atan function
3440 Mf::Atan2 => "atan",
3441 // decomposition
3442 Mf::Ceil => "ceil",
3443 Mf::Floor => "floor",
3444 Mf::Round => "roundEven",
3445 Mf::Fract => "fract",
3446 Mf::Trunc => "trunc",
3447 Mf::Modf => MODF_FUNCTION,
3448 Mf::Frexp => FREXP_FUNCTION,
3449 Mf::Ldexp => "ldexp",
3450 // exponent
3451 Mf::Exp => "exp",
3452 Mf::Exp2 => "exp2",
3453 Mf::Log => "log",
3454 Mf::Log2 => "log2",
3455 Mf::Pow => "pow",
3456 // geometry
3457 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3458 TypeInner::Vector {
3459 scalar:
3460 crate::Scalar {
3461 kind: crate::ScalarKind::Float,
3462 ..
3463 },
3464 ..
3465 } => "dot",
3466 TypeInner::Vector { size, .. } => {
3467 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3468 }
3469 _ => unreachable!(
3470 "Correct TypeInner for dot product should be already validated"
3471 ),
3472 },
3473 Mf::Outer => "outerProduct",
3474 Mf::Cross => "cross",
3475 Mf::Distance => "distance",
3476 Mf::Length => "length",
3477 Mf::Normalize => "normalize",
3478 Mf::FaceForward => "faceforward",
3479 Mf::Reflect => "reflect",
3480 Mf::Refract => "refract",
3481 // computational
3482 Mf::Sign => "sign",
3483 Mf::Fma => {
3484 if self.options.version.supports_fma_function() {
3485 // Use the fma function when available
3486 "fma"
3487 } else {
3488 // No fma support. Transform the function call into an arithmetic expression
3489 write!(self.out, "(")?;
3490
3491 self.write_expr(arg, ctx)?;
3492 write!(self.out, " * ")?;
3493
3494 let arg1 =
3495 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3496 self.write_expr(arg1, ctx)?;
3497 write!(self.out, " + ")?;
3498
3499 let arg2 =
3500 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3501 self.write_expr(arg2, ctx)?;
3502 write!(self.out, ")")?;
3503
3504 return Ok(());
3505 }
3506 }
3507 Mf::Mix => "mix",
3508 Mf::Step => "step",
3509 Mf::SmoothStep => "smoothstep",
3510 Mf::Sqrt => "sqrt",
3511 Mf::InverseSqrt => "inversesqrt",
3512 Mf::Inverse => "inverse",
3513 Mf::Transpose => "transpose",
3514 Mf::Determinant => "determinant",
3515 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3516 TypeInner::Scalar { .. } => {
3517 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3518 self.write_expr(arg, ctx)?;
3519 write!(self.out, "))).x")?;
3520 return Ok(());
3521 }
3522 TypeInner::Vector {
3523 size: crate::VectorSize::Bi,
3524 ..
3525 } => {
3526 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3527 self.write_expr(arg, ctx)?;
3528 write!(self.out, "))")?;
3529 return Ok(());
3530 }
3531 TypeInner::Vector {
3532 size: crate::VectorSize::Tri,
3533 ..
3534 } => {
3535 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3536 self.write_expr(arg, ctx)?;
3537 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3538 self.write_expr(arg, ctx)?;
3539 write!(self.out, ".zz)).x)")?;
3540 return Ok(());
3541 }
3542 TypeInner::Vector {
3543 size: crate::VectorSize::Quad,
3544 ..
3545 } => {
3546 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3547 self.write_expr(arg, ctx)?;
3548 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3549 self.write_expr(arg, ctx)?;
3550 write!(self.out, ".zw)))")?;
3551 return Ok(());
3552 }
3553 _ => unreachable!(
3554 "Correct TypeInner for QuantizeToF16 should be already validated"
3555 ),
3556 },
3557 // bits
3558 Mf::CountTrailingZeros => {
3559 match *ctx.resolve_type(arg, &self.module.types) {
3560 TypeInner::Vector { size, scalar, .. } => {
3561 let s = back::vector_size_str(size);
3562 if let crate::ScalarKind::Uint = scalar.kind {
3563 write!(self.out, "min(uvec{s}(findLSB(")?;
3564 self.write_expr(arg, ctx)?;
3565 write!(self.out, ")), uvec{s}(32u))")?;
3566 } else {
3567 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3568 self.write_expr(arg, ctx)?;
3569 write!(self.out, ")), uvec{s}(32u)))")?;
3570 }
3571 }
3572 TypeInner::Scalar(scalar) => {
3573 if let crate::ScalarKind::Uint = scalar.kind {
3574 write!(self.out, "min(uint(findLSB(")?;
3575 self.write_expr(arg, ctx)?;
3576 write!(self.out, ")), 32u)")?;
3577 } else {
3578 write!(self.out, "int(min(uint(findLSB(")?;
3579 self.write_expr(arg, ctx)?;
3580 write!(self.out, ")), 32u))")?;
3581 }
3582 }
3583 _ => unreachable!(),
3584 };
3585 return Ok(());
3586 }
3587 Mf::CountLeadingZeros => {
3588 if self.options.version.supports_integer_functions() {
3589 match *ctx.resolve_type(arg, &self.module.types) {
3590 TypeInner::Vector { size, scalar } => {
3591 let s = back::vector_size_str(size);
3592
3593 if let crate::ScalarKind::Uint = scalar.kind {
3594 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3595 self.write_expr(arg, ctx)?;
3596 write!(self.out, "))")?;
3597 } else {
3598 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3599 self.write_expr(arg, ctx)?;
3600 write!(self.out, "), ivec{s}(0), lessThan(")?;
3601 self.write_expr(arg, ctx)?;
3602 write!(self.out, ", ivec{s}(0)))")?;
3603 }
3604 }
3605 TypeInner::Scalar(scalar) => {
3606 if let crate::ScalarKind::Uint = scalar.kind {
3607 write!(self.out, "uint(31 - findMSB(")?;
3608 } else {
3609 write!(self.out, "(")?;
3610 self.write_expr(arg, ctx)?;
3611 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3612 }
3613
3614 self.write_expr(arg, ctx)?;
3615 write!(self.out, "))")?;
3616 }
3617 _ => unreachable!(),
3618 };
3619 } else {
3620 match *ctx.resolve_type(arg, &self.module.types) {
3621 TypeInner::Vector { size, scalar } => {
3622 let s = back::vector_size_str(size);
3623
3624 if let crate::ScalarKind::Uint = scalar.kind {
3625 write!(self.out, "uvec{s}(")?;
3626 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3627 self.write_expr(arg, ctx)?;
3628 write!(self.out, ") + 0.5)))")?;
3629 } else {
3630 write!(self.out, "ivec{s}(")?;
3631 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3632 self.write_expr(arg, ctx)?;
3633 write!(self.out, ") + 0.5)), ")?;
3634 write!(self.out, "vec{s}(0.0), lessThan(")?;
3635 self.write_expr(arg, ctx)?;
3636 write!(self.out, ", ivec{s}(0u))))")?;
3637 }
3638 }
3639 TypeInner::Scalar(scalar) => {
3640 if let crate::ScalarKind::Uint = scalar.kind {
3641 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3642 self.write_expr(arg, ctx)?;
3643 write!(self.out, ") + 0.5)))")?;
3644 } else {
3645 write!(self.out, "(")?;
3646 self.write_expr(arg, ctx)?;
3647 write!(self.out, " < 0 ? 0 : int(")?;
3648 write!(self.out, "31.0 - floor(log2(float(")?;
3649 self.write_expr(arg, ctx)?;
3650 write!(self.out, ") + 0.5))))")?;
3651 }
3652 }
3653 _ => unreachable!(),
3654 };
3655 }
3656
3657 return Ok(());
3658 }
3659 Mf::CountOneBits => "bitCount",
3660 Mf::ReverseBits => "bitfieldReverse",
3661 Mf::ExtractBits => {
3662 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3663 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3664 // will return out-of-spec values if the extracted range is not within the bit width.
3665 //
3666 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3667 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3668 //
3669 // w = sizeof(x) * 8
3670 // o = min(offset, w)
3671 // c = min(count, w - o)
3672 //
3673 // bitfieldExtract(x, o, c)
3674 //
3675 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3676 let scalar_bits = ctx
3677 .resolve_type(arg, &self.module.types)
3678 .scalar_width()
3679 .unwrap()
3680 * 8;
3681
3682 write!(self.out, "bitfieldExtract(")?;
3683 self.write_expr(arg, ctx)?;
3684 write!(self.out, ", int(min(")?;
3685 self.write_expr(arg1.unwrap(), ctx)?;
3686 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3687 self.write_expr(arg2.unwrap(), ctx)?;
3688 write!(self.out, ", {scalar_bits}u - min(")?;
3689 self.write_expr(arg1.unwrap(), ctx)?;
3690 write!(self.out, ", {scalar_bits}u))))")?;
3691
3692 return Ok(());
3693 }
3694 Mf::InsertBits => {
3695 // InsertBits has the same considerations as ExtractBits above
3696 let scalar_bits = ctx
3697 .resolve_type(arg, &self.module.types)
3698 .scalar_width()
3699 .unwrap()
3700 * 8;
3701
3702 write!(self.out, "bitfieldInsert(")?;
3703 self.write_expr(arg, ctx)?;
3704 write!(self.out, ", ")?;
3705 self.write_expr(arg1.unwrap(), ctx)?;
3706 write!(self.out, ", int(min(")?;
3707 self.write_expr(arg2.unwrap(), ctx)?;
3708 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3709 self.write_expr(arg3.unwrap(), ctx)?;
3710 write!(self.out, ", {scalar_bits}u - min(")?;
3711 self.write_expr(arg2.unwrap(), ctx)?;
3712 write!(self.out, ", {scalar_bits}u))))")?;
3713
3714 return Ok(());
3715 }
3716 Mf::FirstTrailingBit => "findLSB",
3717 Mf::FirstLeadingBit => "findMSB",
3718 // data packing
3719 Mf::Pack4x8snorm => "packSnorm4x8",
3720 Mf::Pack4x8unorm => "packUnorm4x8",
3721 Mf::Pack2x16snorm => "packSnorm2x16",
3722 Mf::Pack2x16unorm => "packUnorm2x16",
3723 Mf::Pack2x16float => "packHalf2x16",
3724 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8) => {
3725 let was_signed = match fun {
3726 Mf::Pack4xI8 => true,
3727 Mf::Pack4xU8 => false,
3728 _ => unreachable!(),
3729 };
3730 let const_suffix = if was_signed { "" } else { "u" };
3731 if was_signed {
3732 write!(self.out, "uint(")?;
3733 }
3734 write!(self.out, "(")?;
3735 self.write_expr(arg, ctx)?;
3736 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3737 self.write_expr(arg, ctx)?;
3738 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3739 self.write_expr(arg, ctx)?;
3740 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3741 self.write_expr(arg, ctx)?;
3742 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3743 if was_signed {
3744 write!(self.out, ")")?;
3745 }
3746
3747 return Ok(());
3748 }
3749 // data unpacking
3750 Mf::Unpack4x8snorm => "unpackSnorm4x8",
3751 Mf::Unpack4x8unorm => "unpackUnorm4x8",
3752 Mf::Unpack2x16snorm => "unpackSnorm2x16",
3753 Mf::Unpack2x16unorm => "unpackUnorm2x16",
3754 Mf::Unpack2x16float => "unpackHalf2x16",
3755 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3756 let sign_prefix = match fun {
3757 Mf::Unpack4xI8 => 'i',
3758 Mf::Unpack4xU8 => 'u',
3759 _ => unreachable!(),
3760 };
3761 write!(self.out, "{sign_prefix}vec4(")?;
3762 for i in 0..4 {
3763 write!(self.out, "bitfieldExtract(")?;
3764 // Since bitfieldExtract only sign extends if the value is signed, this
3765 // cast is needed
3766 match fun {
3767 Mf::Unpack4xI8 => {
3768 write!(self.out, "int(")?;
3769 self.write_expr(arg, ctx)?;
3770 write!(self.out, ")")?;
3771 }
3772 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3773 _ => unreachable!(),
3774 };
3775 write!(self.out, ", {}, 8)", i * 8)?;
3776 if i != 3 {
3777 write!(self.out, ", ")?;
3778 }
3779 }
3780 write!(self.out, ")")?;
3781
3782 return Ok(());
3783 }
3784 };
3785
3786 let extract_bits = fun == Mf::ExtractBits;
3787 let insert_bits = fun == Mf::InsertBits;
3788
3789 // Some GLSL functions always return signed integers (like findMSB),
3790 // so they need to be cast to uint if the argument is also an uint.
3791 let ret_might_need_int_to_uint = matches!(
3792 fun,
3793 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3794 );
3795
3796 // Some GLSL functions only accept signed integers (like abs),
3797 // so they need their argument cast from uint to int.
3798 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3799
3800 // Check if the argument is an unsigned integer and return the vector size
3801 // in case it's a vector
3802 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3803 TypeInner::Scalar(crate::Scalar {
3804 kind: crate::ScalarKind::Uint,
3805 ..
3806 }) => Some(None),
3807 TypeInner::Vector {
3808 scalar:
3809 crate::Scalar {
3810 kind: crate::ScalarKind::Uint,
3811 ..
3812 },
3813 size,
3814 } => Some(Some(size)),
3815 _ => None,
3816 };
3817
3818 // Cast to uint if the function needs it
3819 if ret_might_need_int_to_uint {
3820 if let Some(maybe_size) = maybe_uint_size {
3821 match maybe_size {
3822 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3823 None => write!(self.out, "uint(")?,
3824 }
3825 }
3826 }
3827
3828 write!(self.out, "{fun_name}(")?;
3829
3830 // Cast to int if the function needs it
3831 if arg_might_need_uint_to_int {
3832 if let Some(maybe_size) = maybe_uint_size {
3833 match maybe_size {
3834 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3835 None => write!(self.out, "int(")?,
3836 }
3837 }
3838 }
3839
3840 self.write_expr(arg, ctx)?;
3841
3842 // Close the cast from uint to int
3843 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3844 write!(self.out, ")")?
3845 }
3846
3847 if let Some(arg) = arg1 {
3848 write!(self.out, ", ")?;
3849 if extract_bits {
3850 write!(self.out, "int(")?;
3851 self.write_expr(arg, ctx)?;
3852 write!(self.out, ")")?;
3853 } else {
3854 self.write_expr(arg, ctx)?;
3855 }
3856 }
3857 if let Some(arg) = arg2 {
3858 write!(self.out, ", ")?;
3859 if extract_bits || insert_bits {
3860 write!(self.out, "int(")?;
3861 self.write_expr(arg, ctx)?;
3862 write!(self.out, ")")?;
3863 } else {
3864 self.write_expr(arg, ctx)?;
3865 }
3866 }
3867 if let Some(arg) = arg3 {
3868 write!(self.out, ", ")?;
3869 if insert_bits {
3870 write!(self.out, "int(")?;
3871 self.write_expr(arg, ctx)?;
3872 write!(self.out, ")")?;
3873 } else {
3874 self.write_expr(arg, ctx)?;
3875 }
3876 }
3877 write!(self.out, ")")?;
3878
3879 // Close the cast from int to uint
3880 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3881 write!(self.out, ")")?
3882 }
3883 }
3884 // `As` is always a call.
3885 // If `convert` is true the function name is the type
3886 // Else the function name is one of the glsl provided bitcast functions
3887 Expression::As {
3888 expr,
3889 kind: target_kind,
3890 convert,
3891 } => {
3892 let inner = ctx.resolve_type(expr, &self.module.types);
3893 match convert {
3894 Some(width) => {
3895 // this is similar to `write_type`, but with the target kind
3896 let scalar = glsl_scalar(crate::Scalar {
3897 kind: target_kind,
3898 width,
3899 })?;
3900 match *inner {
3901 TypeInner::Matrix { columns, rows, .. } => write!(
3902 self.out,
3903 "{}mat{}x{}",
3904 scalar.prefix, columns as u8, rows as u8
3905 )?,
3906 TypeInner::Vector { size, .. } => {
3907 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3908 }
3909 _ => write!(self.out, "{}", scalar.full)?,
3910 }
3911
3912 write!(self.out, "(")?;
3913 self.write_expr(expr, ctx)?;
3914 write!(self.out, ")")?
3915 }
3916 None => {
3917 use crate::ScalarKind as Sk;
3918
3919 let target_vector_type = match *inner {
3920 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3921 size,
3922 scalar: crate::Scalar {
3923 kind: target_kind,
3924 width: scalar.width,
3925 },
3926 }),
3927 _ => None,
3928 };
3929
3930 let source_kind = inner.scalar_kind().unwrap();
3931
3932 match (source_kind, target_kind, target_vector_type) {
3933 // No conversion needed
3934 (Sk::Sint, Sk::Sint, _)
3935 | (Sk::Uint, Sk::Uint, _)
3936 | (Sk::Float, Sk::Float, _)
3937 | (Sk::Bool, Sk::Bool, _) => {
3938 self.write_expr(expr, ctx)?;
3939 return Ok(());
3940 }
3941
3942 // Cast to/from floats
3943 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3944 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3945 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3946 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3947
3948 // Cast between vector types
3949 (_, _, Some(vector)) => {
3950 self.write_value_type(&vector)?;
3951 }
3952
3953 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3954 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3955 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3956 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3957 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3958 write!(self.out, "bool")?
3959 }
3960
3961 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3962 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3963 };
3964
3965 write!(self.out, "(")?;
3966 self.write_expr(expr, ctx)?;
3967 write!(self.out, ")")?;
3968 }
3969 }
3970 }
3971 // These expressions never show up in `Emit`.
3972 Expression::CallResult(_)
3973 | Expression::AtomicResult { .. }
3974 | Expression::RayQueryProceedResult
3975 | Expression::WorkGroupUniformLoadResult { .. }
3976 | Expression::SubgroupOperationResult { .. }
3977 | Expression::SubgroupBallotResult => unreachable!(),
3978 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3979 Expression::ArrayLength(expr) => {
3980 write!(self.out, "uint(")?;
3981 self.write_expr(expr, ctx)?;
3982 write!(self.out, ".length())")?
3983 }
3984 // not supported yet
3985 Expression::RayQueryGetIntersection { .. } => unreachable!(),
3986 }
3987
3988 Ok(())
3989 }
3990
3991 /// Helper function to write the local holding the clamped lod
3992 fn write_clamped_lod(
3993 &mut self,
3994 ctx: &back::FunctionCtx,
3995 expr: Handle<crate::Expression>,
3996 image: Handle<crate::Expression>,
3997 level_expr: Handle<crate::Expression>,
3998 ) -> Result<(), Error> {
3999 // Define our local and start a call to `clamp`
4000 write!(
4001 self.out,
4002 "int {}{} = clamp(",
4003 Baked(expr),
4004 CLAMPED_LOD_SUFFIX
4005 )?;
4006 // Write the lod that will be clamped
4007 self.write_expr(level_expr, ctx)?;
4008 // Set the min value to 0 and start a call to `textureQueryLevels` to get
4009 // the maximum value
4010 write!(self.out, ", 0, textureQueryLevels(")?;
4011 // Write the target image as an argument to `textureQueryLevels`
4012 self.write_expr(image, ctx)?;
4013 // Close the call to `textureQueryLevels` subtract 1 from it since
4014 // the lod argument is 0 based, close the `clamp` call and end the
4015 // local declaration statement.
4016 writeln!(self.out, ") - 1);")?;
4017
4018 Ok(())
4019 }
4020
4021 // Helper method used to retrieve how many elements a coordinate vector
4022 // for the images operations need.
4023 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
4024 // openGL es doesn't have 1D images so we need workaround it
4025 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
4026 // Get how many components the coordinate vector needs for the dimensions only
4027 let tex_coord_size = match dim {
4028 crate::ImageDimension::D1 => 1,
4029 crate::ImageDimension::D2 => 2,
4030 crate::ImageDimension::D3 => 3,
4031 crate::ImageDimension::Cube => 2,
4032 };
4033 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
4034 // and another 1 if we need to workaround 1D images by making them 2D
4035 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
4036 }
4037
4038 /// Helper method to write the coordinate vector for image operations
4039 fn write_texture_coord(
4040 &mut self,
4041 ctx: &back::FunctionCtx,
4042 vector_size: u8,
4043 coordinate: Handle<crate::Expression>,
4044 array_index: Option<Handle<crate::Expression>>,
4045 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
4046 tex_1d_hack: bool,
4047 ) -> Result<(), Error> {
4048 match array_index {
4049 // If the image needs an array indice we need to add it to the end of our
4050 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
4051 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
4052 // is important for 1D arrayed images).
4053 Some(layer_expr) => {
4054 write!(self.out, "ivec{vector_size}(")?;
4055 self.write_expr(coordinate, ctx)?;
4056 write!(self.out, ", ")?;
4057 // If we are replacing sampler1D with sampler2D we also need
4058 // to add another zero to the coordinates vector for the y component
4059 if tex_1d_hack {
4060 write!(self.out, "0, ")?;
4061 }
4062 self.write_expr(layer_expr, ctx)?;
4063 write!(self.out, ")")?;
4064 }
4065 // Otherwise write just the expression (and the 1D hack if needed)
4066 None => {
4067 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
4068 TypeInner::Scalar(crate::Scalar {
4069 kind: crate::ScalarKind::Uint,
4070 ..
4071 }) => Some(None),
4072 TypeInner::Vector {
4073 size,
4074 scalar:
4075 crate::Scalar {
4076 kind: crate::ScalarKind::Uint,
4077 ..
4078 },
4079 } => Some(Some(size as u32)),
4080 _ => None,
4081 };
4082 if tex_1d_hack {
4083 write!(self.out, "ivec2(")?;
4084 } else if uvec_size.is_some() {
4085 match uvec_size {
4086 Some(None) => write!(self.out, "int(")?,
4087 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
4088 _ => {}
4089 }
4090 }
4091 self.write_expr(coordinate, ctx)?;
4092 if tex_1d_hack {
4093 write!(self.out, ", 0)")?;
4094 } else if uvec_size.is_some() {
4095 write!(self.out, ")")?;
4096 }
4097 }
4098 }
4099
4100 Ok(())
4101 }
4102
4103 /// Helper method to write the `ImageStore` statement
4104 fn write_image_store(
4105 &mut self,
4106 ctx: &back::FunctionCtx,
4107 image: Handle<crate::Expression>,
4108 coordinate: Handle<crate::Expression>,
4109 array_index: Option<Handle<crate::Expression>>,
4110 value: Handle<crate::Expression>,
4111 ) -> Result<(), Error> {
4112 use crate::ImageDimension as IDim;
4113
4114 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
4115 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4116
4117 // This will only panic if the module is invalid
4118 let dim = match *ctx.resolve_type(image, &self.module.types) {
4119 TypeInner::Image { dim, .. } => dim,
4120 _ => unreachable!(),
4121 };
4122
4123 // Begin our call to `imageStore`
4124 write!(self.out, "imageStore(")?;
4125 self.write_expr(image, ctx)?;
4126 // Separate the image argument from the coordinates
4127 write!(self.out, ", ")?;
4128
4129 // openGL es doesn't have 1D images so we need workaround it
4130 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4131 // Write the coordinate vector
4132 self.write_texture_coord(
4133 ctx,
4134 // Get the size of the coordinate vector
4135 self.get_coordinate_vector_size(dim, array_index.is_some()),
4136 coordinate,
4137 array_index,
4138 tex_1d_hack,
4139 )?;
4140
4141 // Separate the coordinate from the value to write and write the expression
4142 // of the value to write.
4143 write!(self.out, ", ")?;
4144 self.write_expr(value, ctx)?;
4145 // End the call to `imageStore` and the statement.
4146 writeln!(self.out, ");")?;
4147
4148 Ok(())
4149 }
4150
4151 /// Helper method to write the `ImageAtomic` statement
4152 fn write_image_atomic(
4153 &mut self,
4154 ctx: &back::FunctionCtx,
4155 image: Handle<crate::Expression>,
4156 coordinate: Handle<crate::Expression>,
4157 array_index: Option<Handle<crate::Expression>>,
4158 fun: crate::AtomicFunction,
4159 value: Handle<crate::Expression>,
4160 ) -> Result<(), Error> {
4161 use crate::ImageDimension as IDim;
4162
4163 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
4164 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4165
4166 // This will only panic if the module is invalid
4167 let dim = match *ctx.resolve_type(image, &self.module.types) {
4168 TypeInner::Image { dim, .. } => dim,
4169 _ => unreachable!(),
4170 };
4171
4172 // Begin our call to `imageAtomic`
4173 let fun_str = fun.to_glsl();
4174 write!(self.out, "imageAtomic{fun_str}(")?;
4175 self.write_expr(image, ctx)?;
4176 // Separate the image argument from the coordinates
4177 write!(self.out, ", ")?;
4178
4179 // openGL es doesn't have 1D images so we need workaround it
4180 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4181 // Write the coordinate vector
4182 self.write_texture_coord(
4183 ctx,
4184 // Get the size of the coordinate vector
4185 self.get_coordinate_vector_size(dim, false),
4186 coordinate,
4187 array_index,
4188 tex_1d_hack,
4189 )?;
4190
4191 // Separate the coordinate from the value to write and write the expression
4192 // of the value to write.
4193 write!(self.out, ", ")?;
4194 self.write_expr(value, ctx)?;
4195 // End the call to `imageAtomic` and the statement.
4196 writeln!(self.out, ");")?;
4197
4198 Ok(())
4199 }
4200
4201 /// Helper method for writing an `ImageLoad` expression.
4202 #[allow(clippy::too_many_arguments)]
4203 fn write_image_load(
4204 &mut self,
4205 handle: Handle<crate::Expression>,
4206 ctx: &back::FunctionCtx,
4207 image: Handle<crate::Expression>,
4208 coordinate: Handle<crate::Expression>,
4209 array_index: Option<Handle<crate::Expression>>,
4210 sample: Option<Handle<crate::Expression>>,
4211 level: Option<Handle<crate::Expression>>,
4212 ) -> Result<(), Error> {
4213 use crate::ImageDimension as IDim;
4214
4215 // `ImageLoad` is a bit complicated.
4216 // There are two functions one for sampled
4217 // images another for storage images, the former uses `texelFetch` and the
4218 // latter uses `imageLoad`.
4219 //
4220 // Furthermore we have `level` which is always `Some` for sampled images
4221 // and `None` for storage images, so we end up with two functions:
4222 // - `texelFetch(image, coordinate, level)` for sampled images
4223 // - `imageLoad(image, coordinate)` for storage images
4224 //
4225 // Finally we also have to consider bounds checking, for storage images
4226 // this is easy since openGL requires that invalid texels always return
4227 // 0, for sampled images we need to either verify that all arguments are
4228 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4229
4230 // This will only panic if the module is invalid
4231 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4232 TypeInner::Image {
4233 dim,
4234 arrayed: _,
4235 class,
4236 } => (dim, class),
4237 _ => unreachable!(),
4238 };
4239
4240 // Get the name of the function to be used for the load operation
4241 // and the policy to be used with it.
4242 let (fun_name, policy) = match class {
4243 // Sampled images inherit the policy from the user passed policies
4244 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4245 crate::ImageClass::Storage { .. } => {
4246 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4247 // "Invalid image loads will return a vector where the value of R, G, and B components
4248 // is 0 and the value of the A component is undefined."
4249 //
4250 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4251 // "Invalid image loads will return zero."
4252 //
4253 // So, we only inject bounds checks for ES
4254 let policy = if self.options.version.is_es() {
4255 self.policies.image_load
4256 } else {
4257 proc::BoundsCheckPolicy::Unchecked
4258 };
4259 ("imageLoad", policy)
4260 }
4261 // TODO: Is there even a function for this?
4262 crate::ImageClass::Depth { multi: _ } => {
4263 return Err(Error::Custom(
4264 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4265 ))
4266 }
4267 };
4268
4269 // openGL es doesn't have 1D images so we need workaround it
4270 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4271 // Get the size of the coordinate vector
4272 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4273
4274 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4275 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4276 // ternary operator since we are in the middle of an expression and
4277 // need to return a value.
4278 //
4279 // NOTE: glsl does short circuit when evaluating logical
4280 // expressions so we can be sure that after we test a
4281 // condition it will be true for the next ones
4282
4283 // Write parentheses around the ternary operator to prevent problems with
4284 // expressions emitted before or after it having more precedence
4285 write!(self.out, "(",)?;
4286
4287 // The lod check needs to precede the size check since we need
4288 // to use the lod to get the size of the image at that level.
4289 if let Some(level_expr) = level {
4290 self.write_expr(level_expr, ctx)?;
4291 write!(self.out, " < textureQueryLevels(",)?;
4292 self.write_expr(image, ctx)?;
4293 // Chain the next check
4294 write!(self.out, ") && ")?;
4295 }
4296
4297 // Check that the sample arguments doesn't exceed the number of samples
4298 if let Some(sample_expr) = sample {
4299 self.write_expr(sample_expr, ctx)?;
4300 write!(self.out, " < textureSamples(",)?;
4301 self.write_expr(image, ctx)?;
4302 // Chain the next check
4303 write!(self.out, ") && ")?;
4304 }
4305
4306 // We now need to write the size checks for the coordinates and array index
4307 // first we write the comparison function in case the image is 1D non arrayed
4308 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4309 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4310 // need to use the `lessThan` function but it returns a vector of booleans (one
4311 // for each comparison) so we need to fold it all in one scalar boolean, since
4312 // we want all comparisons to pass we use the `all` function which will only
4313 // return `true` if all the elements of the boolean vector are also `true`.
4314 //
4315 // So we'll end with one of the following forms
4316 // - `coord < textureSize(image, lod)` for 1D images
4317 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4318 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4319 // for arrayed images
4320 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4321
4322 if vector_size != 1 {
4323 write!(self.out, "all(lessThan(")?;
4324 }
4325
4326 // Write the coordinate vector
4327 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4328
4329 if vector_size != 1 {
4330 // If we used the `lessThan` function we need to separate the
4331 // coordinates from the image size.
4332 write!(self.out, ", ")?;
4333 } else {
4334 // If we didn't use it (ie. 1D images) we perform the comparison
4335 // using the less than operator.
4336 write!(self.out, " < ")?;
4337 }
4338
4339 // Call `textureSize` to get our image size
4340 write!(self.out, "textureSize(")?;
4341 self.write_expr(image, ctx)?;
4342 // `textureSize` uses the lod as a second argument for mipmapped images
4343 if let Some(level_expr) = level {
4344 // Separate the image from the lod
4345 write!(self.out, ", ")?;
4346 self.write_expr(level_expr, ctx)?;
4347 }
4348 // Close the `textureSize` call
4349 write!(self.out, ")")?;
4350
4351 if vector_size != 1 {
4352 // Close the `all` and `lessThan` calls
4353 write!(self.out, "))")?;
4354 }
4355
4356 // Finally end the condition part of the ternary operator
4357 write!(self.out, " ? ")?;
4358 }
4359
4360 // Begin the call to the function used to load the texel
4361 write!(self.out, "{fun_name}(")?;
4362 self.write_expr(image, ctx)?;
4363 write!(self.out, ", ")?;
4364
4365 // If we are using `Restrict` bounds checking we need to pass valid texel
4366 // coordinates, to do so we use the `clamp` function to get a value between
4367 // 0 and the image size - 1 (indexing begins at 0)
4368 if let proc::BoundsCheckPolicy::Restrict = policy {
4369 write!(self.out, "clamp(")?;
4370 }
4371
4372 // Write the coordinate vector
4373 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4374
4375 // If we are using `Restrict` bounds checking we need to write the rest of the
4376 // clamp we initiated before writing the coordinates.
4377 if let proc::BoundsCheckPolicy::Restrict = policy {
4378 // Write the min value 0
4379 if vector_size == 1 {
4380 write!(self.out, ", 0")?;
4381 } else {
4382 write!(self.out, ", ivec{vector_size}(0)")?;
4383 }
4384 // Start the `textureSize` call to use as the max value.
4385 write!(self.out, ", textureSize(")?;
4386 self.write_expr(image, ctx)?;
4387 // If the image is mipmapped we need to add the lod argument to the
4388 // `textureSize` call, but this needs to be the clamped lod, this should
4389 // have been generated earlier and put in a local.
4390 if class.is_mipmapped() {
4391 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4392 }
4393 // Close the `textureSize` call
4394 write!(self.out, ")")?;
4395
4396 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4397 if vector_size == 1 {
4398 write!(self.out, " - 1")?;
4399 } else {
4400 write!(self.out, " - ivec{vector_size}(1)")?;
4401 }
4402
4403 // Close the `clamp` call
4404 write!(self.out, ")")?;
4405
4406 // Add the clamped lod (if present) as the second argument to the
4407 // image load function.
4408 if level.is_some() {
4409 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4410 }
4411
4412 // If a sample argument is needed we need to clamp it between 0 and
4413 // the number of samples the image has.
4414 if let Some(sample_expr) = sample {
4415 write!(self.out, ", clamp(")?;
4416 self.write_expr(sample_expr, ctx)?;
4417 // Set the min value to 0 and start the call to `textureSamples`
4418 write!(self.out, ", 0, textureSamples(")?;
4419 self.write_expr(image, ctx)?;
4420 // Close the `textureSamples` call, subtract 1 from it since the sample
4421 // argument is zero based, and close the `clamp` call
4422 writeln!(self.out, ") - 1)")?;
4423 }
4424 } else if let Some(sample_or_level) = sample.or(level) {
4425 // If no bounds checking is need just add the sample or level argument
4426 // after the coordinates
4427 write!(self.out, ", ")?;
4428 self.write_expr(sample_or_level, ctx)?;
4429 }
4430
4431 // Close the image load function.
4432 write!(self.out, ")")?;
4433
4434 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4435 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4436 // second branch which is just a 0 value.
4437 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4438 // Get the kind of the output value.
4439 let kind = match class {
4440 // Only sampled images can reach here since storage images
4441 // don't need bounds checks and depth images aren't implemented
4442 crate::ImageClass::Sampled { kind, .. } => kind,
4443 _ => unreachable!(),
4444 };
4445
4446 // End the first branch
4447 write!(self.out, " : ")?;
4448 // Write the 0 value
4449 write!(
4450 self.out,
4451 "{}vec4(",
4452 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4453 )?;
4454 self.write_zero_init_scalar(kind)?;
4455 // Close the zero value constructor
4456 write!(self.out, ")")?;
4457 // Close the parentheses surrounding our ternary
4458 write!(self.out, ")")?;
4459 }
4460
4461 Ok(())
4462 }
4463
4464 fn write_named_expr(
4465 &mut self,
4466 handle: Handle<crate::Expression>,
4467 name: String,
4468 // The expression which is being named.
4469 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4470 named: Handle<crate::Expression>,
4471 ctx: &back::FunctionCtx,
4472 ) -> BackendResult {
4473 match ctx.info[named].ty {
4474 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4475 TypeInner::Struct { .. } => {
4476 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4477 write!(self.out, "{ty_name}")?;
4478 }
4479 _ => {
4480 self.write_type(ty_handle)?;
4481 }
4482 },
4483 proc::TypeResolution::Value(ref inner) => {
4484 self.write_value_type(inner)?;
4485 }
4486 }
4487
4488 let resolved = ctx.resolve_type(named, &self.module.types);
4489
4490 write!(self.out, " {name}")?;
4491 if let TypeInner::Array { base, size, .. } = *resolved {
4492 self.write_array_size(base, size)?;
4493 }
4494 write!(self.out, " = ")?;
4495 self.write_expr(handle, ctx)?;
4496 writeln!(self.out, ";")?;
4497 self.named_expressions.insert(named, name);
4498
4499 Ok(())
4500 }
4501
4502 /// Helper function that write string with default zero initialization for supported types
4503 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4504 let inner = &self.module.types[ty].inner;
4505 match *inner {
4506 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4507 self.write_zero_init_scalar(scalar.kind)?;
4508 }
4509 TypeInner::Vector { scalar, .. } => {
4510 self.write_value_type(inner)?;
4511 write!(self.out, "(")?;
4512 self.write_zero_init_scalar(scalar.kind)?;
4513 write!(self.out, ")")?;
4514 }
4515 TypeInner::Matrix { .. } => {
4516 self.write_value_type(inner)?;
4517 write!(self.out, "(")?;
4518 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4519 write!(self.out, ")")?;
4520 }
4521 TypeInner::Array { base, size, .. } => {
4522 let count = match size
4523 .to_indexable_length(self.module)
4524 .expect("Bad array size")
4525 {
4526 proc::IndexableLength::Known(count) => count,
4527 proc::IndexableLength::Pending => unreachable!(),
4528 proc::IndexableLength::Dynamic => return Ok(()),
4529 };
4530 self.write_type(base)?;
4531 self.write_array_size(base, size)?;
4532 write!(self.out, "(")?;
4533 for _ in 1..count {
4534 self.write_zero_init_value(base)?;
4535 write!(self.out, ", ")?;
4536 }
4537 // write last parameter without comma and space
4538 self.write_zero_init_value(base)?;
4539 write!(self.out, ")")?;
4540 }
4541 TypeInner::Struct { ref members, .. } => {
4542 let name = &self.names[&NameKey::Type(ty)];
4543 write!(self.out, "{name}(")?;
4544 for (index, member) in members.iter().enumerate() {
4545 if index != 0 {
4546 write!(self.out, ", ")?;
4547 }
4548 self.write_zero_init_value(member.ty)?;
4549 }
4550 write!(self.out, ")")?;
4551 }
4552 _ => unreachable!(),
4553 }
4554
4555 Ok(())
4556 }
4557
4558 /// Helper function that write string with zero initialization for scalar
4559 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4560 match kind {
4561 crate::ScalarKind::Bool => write!(self.out, "false")?,
4562 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4563 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4564 crate::ScalarKind::Sint => write!(self.out, "0")?,
4565 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4566 return Err(Error::Custom(
4567 "Abstract types should not appear in IR presented to backends".to_string(),
4568 ))
4569 }
4570 }
4571
4572 Ok(())
4573 }
4574
4575 /// Issue a memory barrier. Please note that to ensure visibility,
4576 /// OpenGL always requires a call to the `barrier()` function after a `memoryBarrier*()`
4577 fn write_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4578 if flags.contains(crate::Barrier::STORAGE) {
4579 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4580 }
4581 if flags.contains(crate::Barrier::WORK_GROUP) {
4582 writeln!(self.out, "{level}memoryBarrierShared();")?;
4583 }
4584 if flags.contains(crate::Barrier::SUB_GROUP) {
4585 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4586 }
4587 writeln!(self.out, "{level}barrier();")?;
4588 Ok(())
4589 }
4590
4591 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4592 ///
4593 /// glsl allows adding both `readonly` and `writeonly` but this means that
4594 /// they can only be used to query information about the resource which isn't what
4595 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4596 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4597 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4598 return Ok(());
4599 }
4600 if !storage_access.contains(crate::StorageAccess::STORE) {
4601 write!(self.out, "readonly ")?;
4602 }
4603 if !storage_access.contains(crate::StorageAccess::LOAD) {
4604 write!(self.out, "writeonly ")?;
4605 }
4606 Ok(())
4607 }
4608
4609 /// Helper method used to produce the reflection info that's returned to the user
4610 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4611 use std::collections::hash_map::Entry;
4612 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4613 let mut texture_mapping = crate::FastHashMap::default();
4614 let mut uniforms = crate::FastHashMap::default();
4615
4616 for sampling in info.sampling_set.iter() {
4617 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4618
4619 match texture_mapping.entry(tex_name) {
4620 Entry::Vacant(v) => {
4621 v.insert(TextureMapping {
4622 texture: sampling.image,
4623 sampler: Some(sampling.sampler),
4624 });
4625 }
4626 Entry::Occupied(e) => {
4627 if e.get().sampler != Some(sampling.sampler) {
4628 log::error!("Conflicting samplers for {}", e.key());
4629 return Err(Error::ImageMultipleSamplers);
4630 }
4631 }
4632 }
4633 }
4634
4635 let mut push_constant_info = None;
4636 for (handle, var) in self.module.global_variables.iter() {
4637 if info[handle].is_empty() {
4638 continue;
4639 }
4640 match self.module.types[var.ty].inner {
4641 TypeInner::Image { .. } => {
4642 let tex_name = self.reflection_names_globals[&handle].clone();
4643 match texture_mapping.entry(tex_name) {
4644 Entry::Vacant(v) => {
4645 v.insert(TextureMapping {
4646 texture: handle,
4647 sampler: None,
4648 });
4649 }
4650 Entry::Occupied(_) => {
4651 // already used with a sampler, do nothing
4652 }
4653 }
4654 }
4655 _ => match var.space {
4656 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4657 let name = self.reflection_names_globals[&handle].clone();
4658 uniforms.insert(handle, name);
4659 }
4660 crate::AddressSpace::PushConstant => {
4661 let name = self.reflection_names_globals[&handle].clone();
4662 push_constant_info = Some((name, var.ty));
4663 }
4664 _ => (),
4665 },
4666 }
4667 }
4668
4669 let mut push_constant_segments = Vec::new();
4670 let mut push_constant_items = vec![];
4671
4672 if let Some((name, ty)) = push_constant_info {
4673 // We don't have a layouter available to us, so we need to create one.
4674 //
4675 // This is potentially a bit wasteful, but the set of types in the program
4676 // shouldn't be too large.
4677 let mut layouter = proc::Layouter::default();
4678 layouter.update(self.module.to_ctx()).unwrap();
4679
4680 // We start with the name of the binding itself.
4681 push_constant_segments.push(name);
4682
4683 // We then recursively collect all the uniform fields of the push constant.
4684 self.collect_push_constant_items(
4685 ty,
4686 &mut push_constant_segments,
4687 &layouter,
4688 &mut 0,
4689 &mut push_constant_items,
4690 );
4691 }
4692
4693 Ok(ReflectionInfo {
4694 texture_mapping,
4695 uniforms,
4696 varying: mem::take(&mut self.varying),
4697 push_constant_items,
4698 })
4699 }
4700
4701 fn collect_push_constant_items(
4702 &mut self,
4703 ty: Handle<crate::Type>,
4704 segments: &mut Vec<String>,
4705 layouter: &proc::Layouter,
4706 offset: &mut u32,
4707 items: &mut Vec<PushConstantItem>,
4708 ) {
4709 // At this point in the recursion, `segments` contains the path
4710 // needed to access `ty` from the root.
4711
4712 let layout = &layouter[ty];
4713 *offset = layout.alignment.round_up(*offset);
4714 match self.module.types[ty].inner {
4715 // All these types map directly to GL uniforms.
4716 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4717 // Build the full name, by combining all current segments.
4718 let name: String = segments.iter().map(String::as_str).collect();
4719 items.push(PushConstantItem {
4720 access_path: name,
4721 offset: *offset,
4722 ty,
4723 });
4724 *offset += layout.size;
4725 }
4726 // Arrays are recursed into.
4727 TypeInner::Array { base, size, .. } => {
4728 let crate::ArraySize::Constant(count) = size else {
4729 unreachable!("Cannot have dynamic arrays in push constants");
4730 };
4731
4732 for i in 0..count.get() {
4733 // Add the array accessor and recurse.
4734 segments.push(format!("[{i}]"));
4735 self.collect_push_constant_items(base, segments, layouter, offset, items);
4736 segments.pop();
4737 }
4738
4739 // Ensure the stride is kept by rounding up to the alignment.
4740 *offset = layout.alignment.round_up(*offset)
4741 }
4742 TypeInner::Struct { ref members, .. } => {
4743 for (index, member) in members.iter().enumerate() {
4744 // Add struct accessor and recurse.
4745 segments.push(format!(
4746 ".{}",
4747 self.names[&NameKey::StructMember(ty, index as u32)]
4748 ));
4749 self.collect_push_constant_items(member.ty, segments, layouter, offset, items);
4750 segments.pop();
4751 }
4752
4753 // Ensure ending padding is kept by rounding up to the alignment.
4754 *offset = layout.alignment.round_up(*offset)
4755 }
4756 _ => unreachable!(),
4757 }
4758 }
4759}
4760
4761/// Structure returned by [`glsl_scalar`]
4762///
4763/// It contains both a prefix used in other types and the full type name
4764struct ScalarString<'a> {
4765 /// The prefix used to compose other types
4766 prefix: &'a str,
4767 /// The name of the scalar type
4768 full: &'a str,
4769}
4770
4771/// Helper function that returns scalar related strings
4772///
4773/// Check [`ScalarString`] for the information provided
4774///
4775/// # Errors
4776/// If a [`Float`](crate::ScalarKind::Float) with an width that isn't 4 or 8
4777const fn glsl_scalar(scalar: crate::Scalar) -> Result<ScalarString<'static>, Error> {
4778 use crate::ScalarKind as Sk;
4779
4780 Ok(match scalar.kind {
4781 Sk::Sint => ScalarString {
4782 prefix: "i",
4783 full: "int",
4784 },
4785 Sk::Uint => ScalarString {
4786 prefix: "u",
4787 full: "uint",
4788 },
4789 Sk::Float => match scalar.width {
4790 4 => ScalarString {
4791 prefix: "",
4792 full: "float",
4793 },
4794 8 => ScalarString {
4795 prefix: "d",
4796 full: "double",
4797 },
4798 _ => return Err(Error::UnsupportedScalar(scalar)),
4799 },
4800 Sk::Bool => ScalarString {
4801 prefix: "b",
4802 full: "bool",
4803 },
4804 Sk::AbstractInt | Sk::AbstractFloat => {
4805 return Err(Error::UnsupportedScalar(scalar));
4806 }
4807 })
4808}
4809
4810/// Helper function that returns the glsl variable name for a builtin
4811const fn glsl_built_in(built_in: crate::BuiltIn, options: VaryingOptions) -> &'static str {
4812 use crate::BuiltIn as Bi;
4813
4814 match built_in {
4815 Bi::Position { .. } => {
4816 if options.output {
4817 "gl_Position"
4818 } else {
4819 "gl_FragCoord"
4820 }
4821 }
4822 Bi::ViewIndex if options.targeting_webgl => "int(gl_ViewID_OVR)",
4823 Bi::ViewIndex => "gl_ViewIndex",
4824 // vertex
4825 Bi::BaseInstance => "uint(gl_BaseInstance)",
4826 Bi::BaseVertex => "uint(gl_BaseVertex)",
4827 Bi::ClipDistance => "gl_ClipDistance",
4828 Bi::CullDistance => "gl_CullDistance",
4829 Bi::InstanceIndex => {
4830 if options.draw_parameters {
4831 "(uint(gl_InstanceID) + uint(gl_BaseInstanceARB))"
4832 } else {
4833 // Must match FIRST_INSTANCE_BINDING
4834 "(uint(gl_InstanceID) + naga_vs_first_instance)"
4835 }
4836 }
4837 Bi::PointSize => "gl_PointSize",
4838 Bi::VertexIndex => "uint(gl_VertexID)",
4839 Bi::DrawID => "gl_DrawID",
4840 // fragment
4841 Bi::FragDepth => "gl_FragDepth",
4842 Bi::PointCoord => "gl_PointCoord",
4843 Bi::FrontFacing => "gl_FrontFacing",
4844 Bi::PrimitiveIndex => "uint(gl_PrimitiveID)",
4845 Bi::SampleIndex => "gl_SampleID",
4846 Bi::SampleMask => {
4847 if options.output {
4848 "gl_SampleMask"
4849 } else {
4850 "gl_SampleMaskIn"
4851 }
4852 }
4853 // compute
4854 Bi::GlobalInvocationId => "gl_GlobalInvocationID",
4855 Bi::LocalInvocationId => "gl_LocalInvocationID",
4856 Bi::LocalInvocationIndex => "gl_LocalInvocationIndex",
4857 Bi::WorkGroupId => "gl_WorkGroupID",
4858 Bi::WorkGroupSize => "gl_WorkGroupSize",
4859 Bi::NumWorkGroups => "gl_NumWorkGroups",
4860 // subgroup
4861 Bi::NumSubgroups => "gl_NumSubgroups",
4862 Bi::SubgroupId => "gl_SubgroupID",
4863 Bi::SubgroupSize => "gl_SubgroupSize",
4864 Bi::SubgroupInvocationId => "gl_SubgroupInvocationID",
4865 }
4866}
4867
4868/// Helper function that returns the string corresponding to the address space
4869const fn glsl_storage_qualifier(space: crate::AddressSpace) -> Option<&'static str> {
4870 use crate::AddressSpace as As;
4871
4872 match space {
4873 As::Function => None,
4874 As::Private => None,
4875 As::Storage { .. } => Some("buffer"),
4876 As::Uniform => Some("uniform"),
4877 As::Handle => Some("uniform"),
4878 As::WorkGroup => Some("shared"),
4879 As::PushConstant => Some("uniform"),
4880 }
4881}
4882
4883/// Helper function that returns the string corresponding to the glsl interpolation qualifier
4884const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str {
4885 use crate::Interpolation as I;
4886
4887 match interpolation {
4888 I::Perspective => "smooth",
4889 I::Linear => "noperspective",
4890 I::Flat => "flat",
4891 }
4892}
4893
4894/// Return the GLSL auxiliary qualifier for the given sampling value.
4895const fn glsl_sampling(sampling: crate::Sampling) -> BackendResult<Option<&'static str>> {
4896 use crate::Sampling as S;
4897
4898 Ok(match sampling {
4899 S::First => return Err(Error::FirstSamplingNotSupported),
4900 S::Center | S::Either => None,
4901 S::Centroid => Some("centroid"),
4902 S::Sample => Some("sample"),
4903 })
4904}
4905
4906/// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension)
4907const fn glsl_dimension(dim: crate::ImageDimension) -> &'static str {
4908 use crate::ImageDimension as IDim;
4909
4910 match dim {
4911 IDim::D1 => "1D",
4912 IDim::D2 => "2D",
4913 IDim::D3 => "3D",
4914 IDim::Cube => "Cube",
4915 }
4916}
4917
4918/// Helper function that returns the glsl storage format string of [`StorageFormat`](crate::StorageFormat)
4919fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Error> {
4920 use crate::StorageFormat as Sf;
4921
4922 Ok(match format {
4923 Sf::R8Unorm => "r8",
4924 Sf::R8Snorm => "r8_snorm",
4925 Sf::R8Uint => "r8ui",
4926 Sf::R8Sint => "r8i",
4927 Sf::R16Uint => "r16ui",
4928 Sf::R16Sint => "r16i",
4929 Sf::R16Float => "r16f",
4930 Sf::Rg8Unorm => "rg8",
4931 Sf::Rg8Snorm => "rg8_snorm",
4932 Sf::Rg8Uint => "rg8ui",
4933 Sf::Rg8Sint => "rg8i",
4934 Sf::R32Uint => "r32ui",
4935 Sf::R32Sint => "r32i",
4936 Sf::R32Float => "r32f",
4937 Sf::Rg16Uint => "rg16ui",
4938 Sf::Rg16Sint => "rg16i",
4939 Sf::Rg16Float => "rg16f",
4940 Sf::Rgba8Unorm => "rgba8",
4941 Sf::Rgba8Snorm => "rgba8_snorm",
4942 Sf::Rgba8Uint => "rgba8ui",
4943 Sf::Rgba8Sint => "rgba8i",
4944 Sf::Rgb10a2Uint => "rgb10_a2ui",
4945 Sf::Rgb10a2Unorm => "rgb10_a2",
4946 Sf::Rg11b10Ufloat => "r11f_g11f_b10f",
4947 Sf::R64Uint => "r64ui",
4948 Sf::Rg32Uint => "rg32ui",
4949 Sf::Rg32Sint => "rg32i",
4950 Sf::Rg32Float => "rg32f",
4951 Sf::Rgba16Uint => "rgba16ui",
4952 Sf::Rgba16Sint => "rgba16i",
4953 Sf::Rgba16Float => "rgba16f",
4954 Sf::Rgba32Uint => "rgba32ui",
4955 Sf::Rgba32Sint => "rgba32i",
4956 Sf::Rgba32Float => "rgba32f",
4957 Sf::R16Unorm => "r16",
4958 Sf::R16Snorm => "r16_snorm",
4959 Sf::Rg16Unorm => "rg16",
4960 Sf::Rg16Snorm => "rg16_snorm",
4961 Sf::Rgba16Unorm => "rgba16",
4962 Sf::Rgba16Snorm => "rgba16_snorm",
4963
4964 Sf::Bgra8Unorm => {
4965 return Err(Error::Custom(
4966 "Support format BGRA8 is not implemented".into(),
4967 ))
4968 }
4969 })
4970}
4971
4972fn is_value_init_supported(module: &crate::Module, ty: Handle<crate::Type>) -> bool {
4973 match module.types[ty].inner {
4974 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => true,
4975 TypeInner::Array { base, size, .. } => {
4976 size != crate::ArraySize::Dynamic && is_value_init_supported(module, base)
4977 }
4978 TypeInner::Struct { ref members, .. } => members
4979 .iter()
4980 .all(|member| is_value_init_supported(module, member.ty)),
4981 _ => false,
4982 }
4983}