1use crate::{
2 render_resource::*,
3 renderer::{RenderAdapter, RenderDevice, WgpuWrapper},
4 Extract,
5};
6use alloc::{borrow::Cow, sync::Arc};
7use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
8use bevy_ecs::{
9 message::MessageReader,
10 resource::Resource,
11 system::{Res, ResMut},
12};
13use bevy_platform::collections::{HashMap, HashSet};
14use bevy_shader::{
15 CachedPipelineId, PipelineCacheError, Shader, ShaderCache, ShaderCacheSource, ShaderDefVal,
16 ValidateShader,
17};
18use bevy_tasks::Task;
19use bevy_utils::default;
20use core::{future::Future, hash::Hash, mem};
21use std::sync::{Mutex, PoisonError};
22use tracing::error;
23use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};
24
25#[derive(Debug)]
29pub enum PipelineDescriptor {
30 RenderPipelineDescriptor(Box<RenderPipelineDescriptor>),
31 ComputePipelineDescriptor(Box<ComputePipelineDescriptor>),
32}
33
34#[derive(Debug)]
38pub enum Pipeline {
39 RenderPipeline(RenderPipeline),
40 ComputePipeline(ComputePipeline),
41}
42
43#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
45pub struct CachedRenderPipelineId(CachedPipelineId);
46
47impl CachedRenderPipelineId {
48 pub const INVALID: Self = CachedRenderPipelineId(usize::MAX);
50
51 #[inline]
52 pub fn id(&self) -> usize {
53 self.0
54 }
55}
56
57#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
59pub struct CachedComputePipelineId(CachedPipelineId);
60
61impl CachedComputePipelineId {
62 pub const INVALID: Self = CachedComputePipelineId(usize::MAX);
64
65 #[inline]
66 pub fn id(&self) -> usize {
67 self.0
68 }
69}
70
71pub struct CachedPipeline {
72 pub descriptor: PipelineDescriptor,
73 pub state: CachedPipelineState,
74}
75
76#[cfg_attr(
78 not(target_arch = "wasm32"),
79 expect(
80 clippy::large_enum_variant,
81 reason = "See https://github.com/bevyengine/bevy/issues/19220"
82 )
83)]
84#[derive(Debug)]
85pub enum CachedPipelineState {
86 Queued,
88 Creating(Task<Result<Pipeline, PipelineCacheError>>),
90 Ok(Pipeline),
92 Err(PipelineCacheError),
94}
95
96impl CachedPipelineState {
97 pub fn unwrap(&self) -> &Pipeline {
108 match self {
109 CachedPipelineState::Ok(pipeline) => pipeline,
110 CachedPipelineState::Queued => {
111 panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")
112 }
113 CachedPipelineState::Creating(..) => {
114 panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")
115 }
116 CachedPipelineState::Err(err) => panic!("{}", err),
117 }
118 }
119}
120
121type LayoutCacheKey = (Vec<BindGroupLayoutId>, Vec<PushConstantRange>);
122#[derive(Default)]
123struct LayoutCache {
124 layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,
125}
126
127impl LayoutCache {
128 fn get(
129 &mut self,
130 render_device: &RenderDevice,
131 bind_group_layouts: &[BindGroupLayout],
132 push_constant_ranges: Vec<PushConstantRange>,
133 ) -> Arc<WgpuWrapper<PipelineLayout>> {
134 let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();
135 self.layouts
136 .entry((bind_group_ids, push_constant_ranges))
137 .or_insert_with_key(|(_, push_constant_ranges)| {
138 let bind_group_layouts = bind_group_layouts
139 .iter()
140 .map(BindGroupLayout::value)
141 .collect::<Vec<_>>();
142 Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(
143 &PipelineLayoutDescriptor {
144 bind_group_layouts: &bind_group_layouts,
145 push_constant_ranges,
146 ..default()
147 },
148 )))
149 })
150 .clone()
151 }
152}
153
154#[expect(
155 clippy::result_large_err,
156 reason = "See https://github.com/bevyengine/bevy/issues/19220"
157)]
158fn load_module(
159 render_device: &RenderDevice,
160 shader_source: ShaderCacheSource,
161 validate_shader: &ValidateShader,
162) -> Result<WgpuWrapper<ShaderModule>, PipelineCacheError> {
163 let shader_source = match shader_source {
164 #[cfg(feature = "shader_format_spirv")]
165 ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),
166 #[cfg(not(feature = "shader_format_spirv"))]
167 ShaderCacheSource::SpirV(_) => {
168 unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")
169 }
170 ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),
171 #[cfg(not(feature = "decoupled_naga"))]
172 ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),
173 };
174 let module_descriptor = ShaderModuleDescriptor {
175 label: None,
176 source: shader_source,
177 };
178
179 render_device
180 .wgpu_device()
181 .push_error_scope(wgpu::ErrorFilter::Validation);
182
183 let shader_module = WgpuWrapper::new(match validate_shader {
184 ValidateShader::Enabled => {
185 render_device.create_and_validate_shader_module(module_descriptor)
186 }
187 ValidateShader::Disabled => unsafe {
191 render_device.create_shader_module(module_descriptor)
192 },
193 });
194
195 let error = render_device.wgpu_device().pop_error_scope();
196
197 if let Some(Some(wgpu::Error::Validation { description, .. })) =
202 bevy_tasks::futures::now_or_never(error)
203 {
204 return Err(PipelineCacheError::CreateShaderModule(description));
205 }
206
207 Ok(shader_module)
208}
209
210#[derive(Resource)]
223pub struct PipelineCache {
224 layout_cache: Arc<Mutex<LayoutCache>>,
225 shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,
226 device: RenderDevice,
227 pipelines: Vec<CachedPipeline>,
228 waiting_pipelines: HashSet<CachedPipelineId>,
229 new_pipelines: Mutex<Vec<CachedPipeline>>,
230 global_shader_defs: Vec<ShaderDefVal>,
231 synchronous_pipeline_compilation: bool,
234}
235
236impl PipelineCache {
237 pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {
239 self.pipelines.iter()
240 }
241
242 pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {
244 self.waiting_pipelines.iter().copied()
245 }
246
247 pub fn new(
249 device: RenderDevice,
250 render_adapter: RenderAdapter,
251 synchronous_pipeline_compilation: bool,
252 ) -> Self {
253 let mut global_shader_defs = Vec::new();
254 #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
255 {
256 global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());
257 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
258 global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
259 }
260
261 if cfg!(target_abi = "sim") {
262 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
263 }
264
265 global_shader_defs.push(ShaderDefVal::UInt(
266 String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),
267 device.limits().max_storage_buffers_per_shader_stage,
268 ));
269
270 Self {
271 shader_cache: Arc::new(Mutex::new(ShaderCache::new(
272 device.features(),
273 render_adapter.get_downlevel_capabilities().flags,
274 load_module,
275 ))),
276 device,
277 layout_cache: default(),
278 waiting_pipelines: default(),
279 new_pipelines: default(),
280 pipelines: default(),
281 global_shader_defs,
282 synchronous_pipeline_compilation,
283 }
284 }
285
286 #[inline]
290 pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {
291 self.pipelines
293 .get(id.0)
294 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
295 }
296
297 #[inline]
301 pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {
302 self.pipelines
304 .get(id.0)
305 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
306 }
307
308 #[inline]
315 pub fn get_render_pipeline_descriptor(
316 &self,
317 id: CachedRenderPipelineId,
318 ) -> &RenderPipelineDescriptor {
319 match &self.pipelines[id.0].descriptor {
320 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,
321 PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),
322 }
323 }
324
325 #[inline]
332 pub fn get_compute_pipeline_descriptor(
333 &self,
334 id: CachedComputePipelineId,
335 ) -> &ComputePipelineDescriptor {
336 match &self.pipelines[id.0].descriptor {
337 PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),
338 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,
339 }
340 }
341
342 #[inline]
350 pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {
351 if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =
352 &self.pipelines.get(id.0)?.state
353 {
354 Some(pipeline)
355 } else {
356 None
357 }
358 }
359
360 #[inline]
362 pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {
363 if self.pipelines.len() <= id.0 {
364 self.process_queue();
365 }
366
367 let state = &mut self.pipelines[id.0].state;
368 if let CachedPipelineState::Creating(task) = state {
369 *state = match bevy_tasks::block_on(task) {
370 Ok(p) => CachedPipelineState::Ok(p),
371 Err(e) => CachedPipelineState::Err(e),
372 };
373 }
374 }
375
376 #[inline]
384 pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {
385 if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =
386 &self.pipelines.get(id.0)?.state
387 {
388 Some(pipeline)
389 } else {
390 None
391 }
392 }
393
394 pub fn queue_render_pipeline(
408 &self,
409 descriptor: RenderPipelineDescriptor,
410 ) -> CachedRenderPipelineId {
411 let mut new_pipelines = self
412 .new_pipelines
413 .lock()
414 .unwrap_or_else(PoisonError::into_inner);
415 let id = CachedRenderPipelineId(self.pipelines.len() + new_pipelines.len());
416 new_pipelines.push(CachedPipeline {
417 descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),
418 state: CachedPipelineState::Queued,
419 });
420 id
421 }
422
423 pub fn queue_compute_pipeline(
437 &self,
438 descriptor: ComputePipelineDescriptor,
439 ) -> CachedComputePipelineId {
440 let mut new_pipelines = self
441 .new_pipelines
442 .lock()
443 .unwrap_or_else(PoisonError::into_inner);
444 let id = CachedComputePipelineId(self.pipelines.len() + new_pipelines.len());
445 new_pipelines.push(CachedPipeline {
446 descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),
447 state: CachedPipelineState::Queued,
448 });
449 id
450 }
451
452 fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {
453 let mut shader_cache = self.shader_cache.lock().unwrap();
454 let pipelines_to_queue = shader_cache.set_shader(id, shader);
455 for cached_pipeline in pipelines_to_queue {
456 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
457 self.waiting_pipelines.insert(cached_pipeline);
458 }
459 }
460
461 fn remove_shader(&mut self, shader: AssetId<Shader>) {
462 let mut shader_cache = self.shader_cache.lock().unwrap();
463 let pipelines_to_queue = shader_cache.remove(shader);
464 for cached_pipeline in pipelines_to_queue {
465 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
466 self.waiting_pipelines.insert(cached_pipeline);
467 }
468 }
469
470 fn start_create_render_pipeline(
471 &mut self,
472 id: CachedPipelineId,
473 descriptor: RenderPipelineDescriptor,
474 ) -> CachedPipelineState {
475 let device = self.device.clone();
476 let shader_cache = self.shader_cache.clone();
477 let layout_cache = self.layout_cache.clone();
478
479 create_pipeline_task(
480 async move {
481 let mut shader_cache = shader_cache.lock().unwrap();
482 let mut layout_cache = layout_cache.lock().unwrap();
483
484 let vertex_module = match shader_cache.get(
485 &device,
486 id,
487 descriptor.vertex.shader.id(),
488 &descriptor.vertex.shader_defs,
489 ) {
490 Ok(module) => module,
491 Err(err) => return Err(err),
492 };
493
494 let fragment_module = match &descriptor.fragment {
495 Some(fragment) => {
496 match shader_cache.get(
497 &device,
498 id,
499 fragment.shader.id(),
500 &fragment.shader_defs,
501 ) {
502 Ok(module) => Some(module),
503 Err(err) => return Err(err),
504 }
505 }
506 None => None,
507 };
508
509 let layout =
510 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
511 None
512 } else {
513 Some(layout_cache.get(
514 &device,
515 &descriptor.layout,
516 descriptor.push_constant_ranges.to_vec(),
517 ))
518 };
519
520 drop((shader_cache, layout_cache));
521
522 let vertex_buffer_layouts = descriptor
523 .vertex
524 .buffers
525 .iter()
526 .map(|layout| RawVertexBufferLayout {
527 array_stride: layout.array_stride,
528 attributes: &layout.attributes,
529 step_mode: layout.step_mode,
530 })
531 .collect::<Vec<_>>();
532
533 let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
534 (
535 fragment_module.unwrap(),
536 fragment.entry_point.as_deref(),
537 fragment.targets.as_slice(),
538 )
539 });
540
541 let compilation_options = PipelineCompilationOptions {
543 constants: &[],
544 zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,
545 };
546
547 let descriptor = RawRenderPipelineDescriptor {
548 multiview: None,
549 depth_stencil: descriptor.depth_stencil.clone(),
550 label: descriptor.label.as_deref(),
551 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
552 multisample: descriptor.multisample,
553 primitive: descriptor.primitive,
554 vertex: RawVertexState {
555 buffers: &vertex_buffer_layouts,
556 entry_point: descriptor.vertex.entry_point.as_deref(),
557 module: &vertex_module,
558 compilation_options: compilation_options.clone(),
560 },
561 fragment: fragment_data
562 .as_ref()
563 .map(|(module, entry_point, targets)| RawFragmentState {
564 entry_point: entry_point.as_deref(),
565 module,
566 targets,
567 compilation_options,
569 }),
570 cache: None,
571 };
572
573 Ok(Pipeline::RenderPipeline(
574 device.create_render_pipeline(&descriptor),
575 ))
576 },
577 self.synchronous_pipeline_compilation,
578 )
579 }
580
581 fn start_create_compute_pipeline(
582 &mut self,
583 id: CachedPipelineId,
584 descriptor: ComputePipelineDescriptor,
585 ) -> CachedPipelineState {
586 let device = self.device.clone();
587 let shader_cache = self.shader_cache.clone();
588 let layout_cache = self.layout_cache.clone();
589
590 create_pipeline_task(
591 async move {
592 let mut shader_cache = shader_cache.lock().unwrap();
593 let mut layout_cache = layout_cache.lock().unwrap();
594
595 let compute_module = match shader_cache.get(
596 &device,
597 id,
598 descriptor.shader.id(),
599 &descriptor.shader_defs,
600 ) {
601 Ok(module) => module,
602 Err(err) => return Err(err),
603 };
604
605 let layout =
606 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
607 None
608 } else {
609 Some(layout_cache.get(
610 &device,
611 &descriptor.layout,
612 descriptor.push_constant_ranges.to_vec(),
613 ))
614 };
615
616 drop((shader_cache, layout_cache));
617
618 let descriptor = RawComputePipelineDescriptor {
619 label: descriptor.label.as_deref(),
620 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
621 module: &compute_module,
622 entry_point: descriptor.entry_point.as_deref(),
623 compilation_options: PipelineCompilationOptions {
625 constants: &[],
626 zero_initialize_workgroup_memory: descriptor
627 .zero_initialize_workgroup_memory,
628 },
629 cache: None,
630 };
631
632 Ok(Pipeline::ComputePipeline(
633 device.create_compute_pipeline(&descriptor),
634 ))
635 },
636 self.synchronous_pipeline_compilation,
637 )
638 }
639
640 pub fn process_queue(&mut self) {
647 let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);
648 let mut pipelines = mem::take(&mut self.pipelines);
649
650 {
651 let mut new_pipelines = self
652 .new_pipelines
653 .lock()
654 .unwrap_or_else(PoisonError::into_inner);
655 for new_pipeline in new_pipelines.drain(..) {
656 let id = pipelines.len();
657 pipelines.push(new_pipeline);
658 waiting_pipelines.insert(id);
659 }
660 }
661
662 for id in waiting_pipelines {
663 self.process_pipeline(&mut pipelines[id], id);
664 }
665
666 self.pipelines = pipelines;
667 }
668
669 fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {
670 match &mut cached_pipeline.state {
671 CachedPipelineState::Queued => {
672 cached_pipeline.state = match &cached_pipeline.descriptor {
673 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {
674 self.start_create_render_pipeline(id, *descriptor.clone())
675 }
676 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {
677 self.start_create_compute_pipeline(id, *descriptor.clone())
678 }
679 };
680 }
681
682 CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {
683 Some(Ok(pipeline)) => {
684 cached_pipeline.state = CachedPipelineState::Ok(pipeline);
685 return;
686 }
687 Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),
688 _ => (),
689 },
690
691 CachedPipelineState::Err(err) => match err {
692 PipelineCacheError::ShaderNotLoaded(_)
694 | PipelineCacheError::ShaderImportNotYetAvailable => {
695 cached_pipeline.state = CachedPipelineState::Queued;
696 }
697
698 PipelineCacheError::ProcessShaderError(err) => {
700 let error_detail =
701 err.emit_to_string(&self.shader_cache.lock().unwrap().composer);
702 if std::env::var("VERBOSE_SHADER_ERROR")
703 .is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))
704 {
705 error!("{}", pipeline_error_context(cached_pipeline));
706 }
707 error!("failed to process shader error:\n{}", error_detail);
708 return;
709 }
710 PipelineCacheError::CreateShaderModule(description) => {
711 error!("failed to create shader module: {}", description);
712 return;
713 }
714 },
715
716 CachedPipelineState::Ok(_) => return,
717 }
718
719 self.waiting_pipelines.insert(id);
721 }
722
723 pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {
724 cache.process_queue();
725 }
726
727 pub(crate) fn extract_shaders(
728 mut cache: ResMut<Self>,
729 shaders: Extract<Res<Assets<Shader>>>,
730 mut events: Extract<MessageReader<AssetEvent<Shader>>>,
731 ) {
732 for event in events.read() {
733 #[expect(
734 clippy::match_same_arms,
735 reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."
736 )]
737 match event {
738 AssetEvent::Added { id } | AssetEvent::Modified { id } => {
740 if let Some(shader) = shaders.get(*id) {
741 let mut shader = shader.clone();
742 shader.shader_defs.extend(cache.global_shader_defs.clone());
743
744 cache.set_shader(*id, shader);
745 }
746 }
747 AssetEvent::Removed { id } => cache.remove_shader(*id),
748 AssetEvent::Unused { .. } => {}
749 AssetEvent::LoadedWithDependencies { .. } => {
750 }
752 }
753 }
754 }
755}
756
757fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {
758 fn format(
759 shader: &Handle<Shader>,
760 entry: &Option<Cow<'static, str>>,
761 shader_defs: &[ShaderDefVal],
762 ) -> String {
763 let source = match shader.path() {
764 Some(path) => path.path().to_string_lossy().to_string(),
765 None => String::new(),
766 };
767 let entry = match entry {
768 Some(entry) => entry.to_string(),
769 None => String::new(),
770 };
771 let shader_defs = shader_defs
772 .iter()
773 .flat_map(|def| match def {
774 ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),
775 ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),
776 ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),
777 _ => None,
778 })
779 .collect::<Vec<_>>()
780 .join(", ");
781 format!("{source}:{entry}\nshader defs: {shader_defs}")
782 }
783 match &cached_pipeline.descriptor {
784 PipelineDescriptor::RenderPipelineDescriptor(desc) => {
785 let vert = &desc.vertex;
786 let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);
787 let Some(frag) = desc.fragment.as_ref() else {
788 return vert_str;
789 };
790 let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);
791 format!("vertex {vert_str}\nfragment {frag_str}")
792 }
793 PipelineDescriptor::ComputePipelineDescriptor(desc) => {
794 format(&desc.shader, &desc.entry_point, &desc.shader_defs)
795 }
796 }
797}
798
799#[cfg(all(
800 not(target_arch = "wasm32"),
801 not(target_os = "macos"),
802 feature = "multi_threaded"
803))]
804fn create_pipeline_task(
805 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
806 sync: bool,
807) -> CachedPipelineState {
808 if !sync {
809 return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
810 }
811
812 match bevy_tasks::block_on(task) {
813 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
814 Err(err) => CachedPipelineState::Err(err),
815 }
816}
817
818#[cfg(any(
819 target_arch = "wasm32",
820 target_os = "macos",
821 not(feature = "multi_threaded")
822))]
823fn create_pipeline_task(
824 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
825 _sync: bool,
826) -> CachedPipelineState {
827 match bevy_tasks::block_on(task) {
828 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
829 Err(err) => CachedPipelineState::Err(err),
830 }
831}