1use crate::{
2 render_resource::*,
3 renderer::{RenderAdapter, RenderDevice, WgpuWrapper},
4 Extract,
5};
6use alloc::{borrow::Cow, sync::Arc};
7use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
8use bevy_ecs::{
9 message::MessageReader,
10 resource::Resource,
11 system::{Res, ResMut},
12};
13use bevy_platform::collections::{HashMap, HashSet};
14use bevy_shader::{
15 CachedPipelineId, PipelineCacheError, Shader, ShaderCache, ShaderCacheSource, ShaderDefVal,
16 ValidateShader,
17};
18use bevy_tasks::Task;
19use bevy_utils::default;
20use core::{future::Future, hash::Hash, mem};
21use std::sync::{Mutex, PoisonError};
22use tracing::error;
23use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};
24
25#[derive(Debug)]
29pub enum PipelineDescriptor {
30 RenderPipelineDescriptor(Box<RenderPipelineDescriptor>),
31 ComputePipelineDescriptor(Box<ComputePipelineDescriptor>),
32}
33
34#[derive(Debug)]
38pub enum Pipeline {
39 RenderPipeline(RenderPipeline),
40 ComputePipeline(ComputePipeline),
41}
42
43#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
45pub struct CachedRenderPipelineId(CachedPipelineId);
46
47impl CachedRenderPipelineId {
48 pub const INVALID: Self = CachedRenderPipelineId(usize::MAX);
50
51 #[inline]
52 pub fn id(&self) -> usize {
53 self.0
54 }
55}
56
57#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
59pub struct CachedComputePipelineId(CachedPipelineId);
60
61impl CachedComputePipelineId {
62 pub const INVALID: Self = CachedComputePipelineId(usize::MAX);
64
65 #[inline]
66 pub fn id(&self) -> usize {
67 self.0
68 }
69}
70
71pub struct CachedPipeline {
72 pub descriptor: PipelineDescriptor,
73 pub state: CachedPipelineState,
74}
75
76#[cfg_attr(
78 not(target_arch = "wasm32"),
79 expect(
80 clippy::large_enum_variant,
81 reason = "See https://github.com/bevyengine/bevy/issues/19220"
82 )
83)]
84#[derive(Debug)]
85pub enum CachedPipelineState {
86 Queued,
88 Creating(Task<Result<Pipeline, PipelineCacheError>>),
90 Ok(Pipeline),
92 Err(PipelineCacheError),
94}
95
96impl CachedPipelineState {
97 pub fn unwrap(&self) -> &Pipeline {
108 match self {
109 CachedPipelineState::Ok(pipeline) => pipeline,
110 CachedPipelineState::Queued => {
111 panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")
112 }
113 CachedPipelineState::Creating(..) => {
114 panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")
115 }
116 CachedPipelineState::Err(err) => panic!("{}", err),
117 }
118 }
119}
120
121type LayoutCacheKey = (Vec<BindGroupLayoutId>, Vec<PushConstantRange>);
122#[derive(Default)]
123struct LayoutCache {
124 layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,
125}
126
127impl LayoutCache {
128 fn get(
129 &mut self,
130 render_device: &RenderDevice,
131 bind_group_layouts: &[BindGroupLayout],
132 push_constant_ranges: Vec<PushConstantRange>,
133 ) -> Arc<WgpuWrapper<PipelineLayout>> {
134 let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();
135 self.layouts
136 .entry((bind_group_ids, push_constant_ranges))
137 .or_insert_with_key(|(_, push_constant_ranges)| {
138 let bind_group_layouts = bind_group_layouts
139 .iter()
140 .map(BindGroupLayout::value)
141 .collect::<Vec<_>>();
142 Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(
143 &PipelineLayoutDescriptor {
144 bind_group_layouts: &bind_group_layouts,
145 push_constant_ranges,
146 ..default()
147 },
148 )))
149 })
150 .clone()
151 }
152}
153
154#[expect(
155 clippy::result_large_err,
156 reason = "See https://github.com/bevyengine/bevy/issues/19220"
157)]
158fn load_module(
159 render_device: &RenderDevice,
160 shader_source: ShaderCacheSource,
161 validate_shader: &ValidateShader,
162) -> Result<WgpuWrapper<ShaderModule>, PipelineCacheError> {
163 let shader_source = match shader_source {
164 #[cfg(feature = "shader_format_spirv")]
165 ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),
166 #[cfg(not(feature = "shader_format_spirv"))]
167 ShaderCacheSource::SpirV(_) => {
168 unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")
169 }
170 ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),
171 #[cfg(not(feature = "decoupled_naga"))]
172 ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),
173 };
174 let module_descriptor = ShaderModuleDescriptor {
175 label: None,
176 source: shader_source,
177 };
178
179 render_device
180 .wgpu_device()
181 .push_error_scope(wgpu::ErrorFilter::Validation);
182
183 let shader_module = WgpuWrapper::new(match validate_shader {
184 ValidateShader::Enabled => {
185 render_device.create_and_validate_shader_module(module_descriptor)
186 }
187 ValidateShader::Disabled => unsafe {
191 render_device.create_shader_module(module_descriptor)
192 },
193 });
194
195 let error = render_device.wgpu_device().pop_error_scope();
196
197 if let Some(Some(wgpu::Error::Validation { description, .. })) =
202 bevy_tasks::futures::now_or_never(error)
203 {
204 return Err(PipelineCacheError::CreateShaderModule(description));
205 }
206
207 Ok(shader_module)
208}
209
210#[derive(Default)]
211struct BindGroupLayoutCache {
212 bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>,
213}
214
215impl BindGroupLayoutCache {
216 fn get(
217 &mut self,
218 render_device: &RenderDevice,
219 descriptor: BindGroupLayoutDescriptor,
220 ) -> BindGroupLayout {
221 self.bgls
222 .entry(descriptor)
223 .or_insert_with_key(|descriptor| {
224 render_device
225 .create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries)
226 })
227 .clone()
228 }
229}
230
231#[derive(Resource)]
244pub struct PipelineCache {
245 layout_cache: Arc<Mutex<LayoutCache>>,
246 bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>,
247 shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,
248 device: RenderDevice,
249 pipelines: Vec<CachedPipeline>,
250 waiting_pipelines: HashSet<CachedPipelineId>,
251 new_pipelines: Mutex<Vec<CachedPipeline>>,
252 global_shader_defs: Vec<ShaderDefVal>,
253 synchronous_pipeline_compilation: bool,
256}
257
258impl PipelineCache {
259 pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {
261 self.pipelines.iter()
262 }
263
264 pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {
266 self.waiting_pipelines.iter().copied()
267 }
268
269 pub fn new(
271 device: RenderDevice,
272 render_adapter: RenderAdapter,
273 synchronous_pipeline_compilation: bool,
274 ) -> Self {
275 let mut global_shader_defs = Vec::new();
276 #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
277 {
278 global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());
279 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
280 global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
281 }
282
283 if cfg!(target_abi = "sim") {
284 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
285 }
286
287 global_shader_defs.push(ShaderDefVal::UInt(
288 String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),
289 device.limits().max_storage_buffers_per_shader_stage,
290 ));
291
292 Self {
293 shader_cache: Arc::new(Mutex::new(ShaderCache::new(
294 device.features(),
295 render_adapter.get_downlevel_capabilities().flags,
296 load_module,
297 ))),
298 device,
299 layout_cache: default(),
300 bindgroup_layout_cache: default(),
301 waiting_pipelines: default(),
302 new_pipelines: default(),
303 pipelines: default(),
304 global_shader_defs,
305 synchronous_pipeline_compilation,
306 }
307 }
308
309 #[inline]
313 pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {
314 self.pipelines
316 .get(id.0)
317 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
318 }
319
320 #[inline]
324 pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {
325 self.pipelines
327 .get(id.0)
328 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
329 }
330
331 #[inline]
338 pub fn get_render_pipeline_descriptor(
339 &self,
340 id: CachedRenderPipelineId,
341 ) -> &RenderPipelineDescriptor {
342 match &self.pipelines[id.0].descriptor {
343 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,
344 PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),
345 }
346 }
347
348 #[inline]
355 pub fn get_compute_pipeline_descriptor(
356 &self,
357 id: CachedComputePipelineId,
358 ) -> &ComputePipelineDescriptor {
359 match &self.pipelines[id.0].descriptor {
360 PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),
361 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,
362 }
363 }
364
365 #[inline]
373 pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {
374 if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =
375 &self.pipelines.get(id.0)?.state
376 {
377 Some(pipeline)
378 } else {
379 None
380 }
381 }
382
383 #[inline]
385 pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {
386 if self.pipelines.len() <= id.0 {
387 self.process_queue();
388 }
389
390 let state = &mut self.pipelines[id.0].state;
391 if let CachedPipelineState::Creating(task) = state {
392 *state = match bevy_tasks::block_on(task) {
393 Ok(p) => CachedPipelineState::Ok(p),
394 Err(e) => CachedPipelineState::Err(e),
395 };
396 }
397 }
398
399 #[inline]
407 pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {
408 if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =
409 &self.pipelines.get(id.0)?.state
410 {
411 Some(pipeline)
412 } else {
413 None
414 }
415 }
416
417 pub fn queue_render_pipeline(
431 &self,
432 descriptor: RenderPipelineDescriptor,
433 ) -> CachedRenderPipelineId {
434 let mut new_pipelines = self
435 .new_pipelines
436 .lock()
437 .unwrap_or_else(PoisonError::into_inner);
438 let id = CachedRenderPipelineId(self.pipelines.len() + new_pipelines.len());
439 new_pipelines.push(CachedPipeline {
440 descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),
441 state: CachedPipelineState::Queued,
442 });
443 id
444 }
445
446 pub fn queue_compute_pipeline(
460 &self,
461 descriptor: ComputePipelineDescriptor,
462 ) -> CachedComputePipelineId {
463 let mut new_pipelines = self
464 .new_pipelines
465 .lock()
466 .unwrap_or_else(PoisonError::into_inner);
467 let id = CachedComputePipelineId(self.pipelines.len() + new_pipelines.len());
468 new_pipelines.push(CachedPipeline {
469 descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),
470 state: CachedPipelineState::Queued,
471 });
472 id
473 }
474
475 pub fn get_bind_group_layout(
476 &self,
477 bind_group_layout_descriptor: &BindGroupLayoutDescriptor,
478 ) -> BindGroupLayout {
479 self.bindgroup_layout_cache
480 .lock()
481 .unwrap()
482 .get(&self.device, bind_group_layout_descriptor.clone())
483 }
484
485 fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {
486 let mut shader_cache = self.shader_cache.lock().unwrap();
487 let pipelines_to_queue = shader_cache.set_shader(id, shader);
488 for cached_pipeline in pipelines_to_queue {
489 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
490 self.waiting_pipelines.insert(cached_pipeline);
491 }
492 }
493
494 fn remove_shader(&mut self, shader: AssetId<Shader>) {
495 let mut shader_cache = self.shader_cache.lock().unwrap();
496 let pipelines_to_queue = shader_cache.remove(shader);
497 for cached_pipeline in pipelines_to_queue {
498 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
499 self.waiting_pipelines.insert(cached_pipeline);
500 }
501 }
502
503 fn start_create_render_pipeline(
504 &mut self,
505 id: CachedPipelineId,
506 descriptor: RenderPipelineDescriptor,
507 ) -> CachedPipelineState {
508 let device = self.device.clone();
509 let shader_cache = self.shader_cache.clone();
510 let layout_cache = self.layout_cache.clone();
511 let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
512 let bind_group_layout = descriptor
513 .layout
514 .iter()
515 .map(|bind_group_layout_descriptor| {
516 bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
517 })
518 .collect::<Vec<_>>();
519
520 create_pipeline_task(
521 async move {
522 let mut shader_cache = shader_cache.lock().unwrap();
523 let mut layout_cache = layout_cache.lock().unwrap();
524
525 let vertex_module = match shader_cache.get(
526 &device,
527 id,
528 descriptor.vertex.shader.id(),
529 &descriptor.vertex.shader_defs,
530 ) {
531 Ok(module) => module,
532 Err(err) => return Err(err),
533 };
534
535 let fragment_module = match &descriptor.fragment {
536 Some(fragment) => {
537 match shader_cache.get(
538 &device,
539 id,
540 fragment.shader.id(),
541 &fragment.shader_defs,
542 ) {
543 Ok(module) => Some(module),
544 Err(err) => return Err(err),
545 }
546 }
547 None => None,
548 };
549
550 let layout =
551 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
552 None
553 } else {
554 Some(layout_cache.get(
555 &device,
556 &bind_group_layout,
557 descriptor.push_constant_ranges.to_vec(),
558 ))
559 };
560
561 drop((shader_cache, layout_cache));
562
563 let vertex_buffer_layouts = descriptor
564 .vertex
565 .buffers
566 .iter()
567 .map(|layout| RawVertexBufferLayout {
568 array_stride: layout.array_stride,
569 attributes: &layout.attributes,
570 step_mode: layout.step_mode,
571 })
572 .collect::<Vec<_>>();
573
574 let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
575 (
576 fragment_module.unwrap(),
577 fragment.entry_point.as_deref(),
578 fragment.targets.as_slice(),
579 )
580 });
581
582 let compilation_options = PipelineCompilationOptions {
584 constants: &[],
585 zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,
586 };
587
588 let descriptor = RawRenderPipelineDescriptor {
589 multiview: None,
590 depth_stencil: descriptor.depth_stencil.clone(),
591 label: descriptor.label.as_deref(),
592 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
593 multisample: descriptor.multisample,
594 primitive: descriptor.primitive,
595 vertex: RawVertexState {
596 buffers: &vertex_buffer_layouts,
597 entry_point: descriptor.vertex.entry_point.as_deref(),
598 module: &vertex_module,
599 compilation_options: compilation_options.clone(),
601 },
602 fragment: fragment_data
603 .as_ref()
604 .map(|(module, entry_point, targets)| RawFragmentState {
605 entry_point: entry_point.as_deref(),
606 module,
607 targets,
608 compilation_options,
610 }),
611 cache: None,
612 };
613
614 Ok(Pipeline::RenderPipeline(
615 device.create_render_pipeline(&descriptor),
616 ))
617 },
618 self.synchronous_pipeline_compilation,
619 )
620 }
621
622 fn start_create_compute_pipeline(
623 &mut self,
624 id: CachedPipelineId,
625 descriptor: ComputePipelineDescriptor,
626 ) -> CachedPipelineState {
627 let device = self.device.clone();
628 let shader_cache = self.shader_cache.clone();
629 let layout_cache = self.layout_cache.clone();
630 let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
631 let bind_group_layout = descriptor
632 .layout
633 .iter()
634 .map(|bind_group_layout_descriptor| {
635 bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
636 })
637 .collect::<Vec<_>>();
638
639 create_pipeline_task(
640 async move {
641 let mut shader_cache = shader_cache.lock().unwrap();
642 let mut layout_cache = layout_cache.lock().unwrap();
643
644 let compute_module = match shader_cache.get(
645 &device,
646 id,
647 descriptor.shader.id(),
648 &descriptor.shader_defs,
649 ) {
650 Ok(module) => module,
651 Err(err) => return Err(err),
652 };
653
654 let layout =
655 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
656 None
657 } else {
658 Some(layout_cache.get(
659 &device,
660 &bind_group_layout,
661 descriptor.push_constant_ranges.to_vec(),
662 ))
663 };
664
665 drop((shader_cache, layout_cache));
666
667 let descriptor = RawComputePipelineDescriptor {
668 label: descriptor.label.as_deref(),
669 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
670 module: &compute_module,
671 entry_point: descriptor.entry_point.as_deref(),
672 compilation_options: PipelineCompilationOptions {
674 constants: &[],
675 zero_initialize_workgroup_memory: descriptor
676 .zero_initialize_workgroup_memory,
677 },
678 cache: None,
679 };
680
681 Ok(Pipeline::ComputePipeline(
682 device.create_compute_pipeline(&descriptor),
683 ))
684 },
685 self.synchronous_pipeline_compilation,
686 )
687 }
688
689 pub fn process_queue(&mut self) {
696 let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);
697 let mut pipelines = mem::take(&mut self.pipelines);
698
699 {
700 let mut new_pipelines = self
701 .new_pipelines
702 .lock()
703 .unwrap_or_else(PoisonError::into_inner);
704 for new_pipeline in new_pipelines.drain(..) {
705 let id = pipelines.len();
706 pipelines.push(new_pipeline);
707 waiting_pipelines.insert(id);
708 }
709 }
710
711 for id in waiting_pipelines {
712 self.process_pipeline(&mut pipelines[id], id);
713 }
714
715 self.pipelines = pipelines;
716 }
717
718 fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {
719 match &mut cached_pipeline.state {
720 CachedPipelineState::Queued => {
721 cached_pipeline.state = match &cached_pipeline.descriptor {
722 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {
723 self.start_create_render_pipeline(id, *descriptor.clone())
724 }
725 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {
726 self.start_create_compute_pipeline(id, *descriptor.clone())
727 }
728 };
729 }
730
731 CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {
732 Some(Ok(pipeline)) => {
733 cached_pipeline.state = CachedPipelineState::Ok(pipeline);
734 return;
735 }
736 Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),
737 _ => (),
738 },
739
740 CachedPipelineState::Err(err) => match err {
741 PipelineCacheError::ShaderNotLoaded(_)
743 | PipelineCacheError::ShaderImportNotYetAvailable => {
744 cached_pipeline.state = CachedPipelineState::Queued;
745 }
746
747 PipelineCacheError::ProcessShaderError(err) => {
749 let error_detail =
750 err.emit_to_string(&self.shader_cache.lock().unwrap().composer);
751 if std::env::var("VERBOSE_SHADER_ERROR")
752 .is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))
753 {
754 error!("{}", pipeline_error_context(cached_pipeline));
755 }
756 error!("failed to process shader error:\n{}", error_detail);
757 return;
758 }
759 PipelineCacheError::CreateShaderModule(description) => {
760 error!("failed to create shader module: {}", description);
761 return;
762 }
763 },
764
765 CachedPipelineState::Ok(_) => return,
766 }
767
768 self.waiting_pipelines.insert(id);
770 }
771
772 pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {
773 cache.process_queue();
774 }
775
776 pub(crate) fn extract_shaders(
777 mut cache: ResMut<Self>,
778 shaders: Extract<Res<Assets<Shader>>>,
779 mut events: Extract<MessageReader<AssetEvent<Shader>>>,
780 ) {
781 for event in events.read() {
782 #[expect(
783 clippy::match_same_arms,
784 reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."
785 )]
786 match event {
787 AssetEvent::Added { id } | AssetEvent::Modified { id } => {
789 if let Some(shader) = shaders.get(*id) {
790 let mut shader = shader.clone();
791 shader.shader_defs.extend(cache.global_shader_defs.clone());
792
793 cache.set_shader(*id, shader);
794 }
795 }
796 AssetEvent::Removed { id } => cache.remove_shader(*id),
797 AssetEvent::Unused { .. } => {}
798 AssetEvent::LoadedWithDependencies { .. } => {
799 }
801 }
802 }
803 }
804}
805
806fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {
807 fn format(
808 shader: &Handle<Shader>,
809 entry: &Option<Cow<'static, str>>,
810 shader_defs: &[ShaderDefVal],
811 ) -> String {
812 let source = match shader.path() {
813 Some(path) => path.path().to_string_lossy().to_string(),
814 None => String::new(),
815 };
816 let entry = match entry {
817 Some(entry) => entry.to_string(),
818 None => String::new(),
819 };
820 let shader_defs = shader_defs
821 .iter()
822 .flat_map(|def| match def {
823 ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),
824 ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),
825 ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),
826 _ => None,
827 })
828 .collect::<Vec<_>>()
829 .join(", ");
830 format!("{source}:{entry}\nshader defs: {shader_defs}")
831 }
832 match &cached_pipeline.descriptor {
833 PipelineDescriptor::RenderPipelineDescriptor(desc) => {
834 let vert = &desc.vertex;
835 let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);
836 let Some(frag) = desc.fragment.as_ref() else {
837 return vert_str;
838 };
839 let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);
840 format!("vertex {vert_str}\nfragment {frag_str}")
841 }
842 PipelineDescriptor::ComputePipelineDescriptor(desc) => {
843 format(&desc.shader, &desc.entry_point, &desc.shader_defs)
844 }
845 }
846}
847
848#[cfg(all(
849 not(target_arch = "wasm32"),
850 not(target_os = "macos"),
851 feature = "multi_threaded"
852))]
853fn create_pipeline_task(
854 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
855 sync: bool,
856) -> CachedPipelineState {
857 if !sync {
858 return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
859 }
860
861 match bevy_tasks::block_on(task) {
862 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
863 Err(err) => CachedPipelineState::Err(err),
864 }
865}
866
867#[cfg(any(
868 target_arch = "wasm32",
869 target_os = "macos",
870 not(feature = "multi_threaded")
871))]
872fn create_pipeline_task(
873 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
874 _sync: bool,
875) -> CachedPipelineState {
876 match bevy_tasks::block_on(task) {
877 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
878 Err(err) => CachedPipelineState::Err(err),
879 }
880}