bevy_render/renderer/
render_device.rs1use super::RenderQueue;
2use crate::render_resource::{
3 BindGroup, BindGroupLayout, Buffer, ComputePipeline, RawRenderPipelineDescriptor,
4 RenderPipeline, Sampler, Texture,
5};
6use crate::WgpuWrapper;
7use alloc::sync::Arc;
8use bevy_ecs::system::Resource;
9use wgpu::{
10 util::DeviceExt, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor,
11 BindGroupLayoutEntry, BufferAsyncError, BufferBindingType, MaintainResult,
12};
13
14#[derive(Resource, Clone)]
16pub struct RenderDevice {
17 device: Arc<WgpuWrapper<wgpu::Device>>,
18}
19
20impl From<wgpu::Device> for RenderDevice {
21 fn from(device: wgpu::Device) -> Self {
22 Self::new(Arc::new(WgpuWrapper::new(device)))
23 }
24}
25
26impl RenderDevice {
27 pub fn new(device: Arc<WgpuWrapper<wgpu::Device>>) -> Self {
28 Self { device }
29 }
30
31 #[inline]
35 pub fn features(&self) -> wgpu::Features {
36 self.device.features()
37 }
38
39 #[inline]
43 pub fn limits(&self) -> wgpu::Limits {
44 self.device.limits()
45 }
46
47 #[inline]
49 pub fn create_shader_module(&self, desc: wgpu::ShaderModuleDescriptor) -> wgpu::ShaderModule {
50 #[cfg(feature = "spirv_shader_passthrough")]
51 match &desc.source {
52 wgpu::ShaderSource::SpirV(source)
53 if self
54 .features()
55 .contains(wgpu::Features::SPIRV_SHADER_PASSTHROUGH) =>
56 {
57 unsafe {
61 self.device
62 .create_shader_module_spirv(&wgpu::ShaderModuleDescriptorSpirV {
63 label: desc.label,
64 source: source.clone(),
65 })
66 }
67 }
68 _ => self.device.create_shader_module(desc),
69 }
70
71 #[cfg(not(feature = "spirv_shader_passthrough"))]
72 self.device.create_shader_module(desc)
73 }
74
75 #[inline]
85 pub fn poll(&self, maintain: wgpu::Maintain) -> MaintainResult {
86 self.device.poll(maintain)
87 }
88
89 #[inline]
91 pub fn create_command_encoder(
92 &self,
93 desc: &wgpu::CommandEncoderDescriptor,
94 ) -> wgpu::CommandEncoder {
95 self.device.create_command_encoder(desc)
96 }
97
98 #[inline]
100 pub fn create_render_bundle_encoder(
101 &self,
102 desc: &wgpu::RenderBundleEncoderDescriptor,
103 ) -> wgpu::RenderBundleEncoder {
104 self.device.create_render_bundle_encoder(desc)
105 }
106
107 #[inline]
109 pub fn create_bind_group<'a>(
110 &self,
111 label: impl Into<wgpu::Label<'a>>,
112 layout: &'a BindGroupLayout,
113 entries: &'a [BindGroupEntry<'a>],
114 ) -> BindGroup {
115 let wgpu_bind_group = self.device.create_bind_group(&BindGroupDescriptor {
116 label: label.into(),
117 layout,
118 entries,
119 });
120 BindGroup::from(wgpu_bind_group)
121 }
122
123 #[inline]
125 pub fn create_bind_group_layout<'a>(
126 &self,
127 label: impl Into<wgpu::Label<'a>>,
128 entries: &'a [BindGroupLayoutEntry],
129 ) -> BindGroupLayout {
130 BindGroupLayout::from(
131 self.device
132 .create_bind_group_layout(&BindGroupLayoutDescriptor {
133 label: label.into(),
134 entries,
135 }),
136 )
137 }
138
139 #[inline]
141 pub fn create_pipeline_layout(
142 &self,
143 desc: &wgpu::PipelineLayoutDescriptor,
144 ) -> wgpu::PipelineLayout {
145 self.device.create_pipeline_layout(desc)
146 }
147
148 #[inline]
150 pub fn create_render_pipeline(&self, desc: &RawRenderPipelineDescriptor) -> RenderPipeline {
151 let wgpu_render_pipeline = self.device.create_render_pipeline(desc);
152 RenderPipeline::from(wgpu_render_pipeline)
153 }
154
155 #[inline]
157 pub fn create_compute_pipeline(
158 &self,
159 desc: &wgpu::ComputePipelineDescriptor,
160 ) -> ComputePipeline {
161 let wgpu_compute_pipeline = self.device.create_compute_pipeline(desc);
162 ComputePipeline::from(wgpu_compute_pipeline)
163 }
164
165 pub fn create_buffer(&self, desc: &wgpu::BufferDescriptor) -> Buffer {
167 let wgpu_buffer = self.device.create_buffer(desc);
168 Buffer::from(wgpu_buffer)
169 }
170
171 pub fn create_buffer_with_data(&self, desc: &wgpu::util::BufferInitDescriptor) -> Buffer {
173 let wgpu_buffer = self.device.create_buffer_init(desc);
174 Buffer::from(wgpu_buffer)
175 }
176
177 pub fn create_texture_with_data(
182 &self,
183 render_queue: &RenderQueue,
184 desc: &wgpu::TextureDescriptor,
185 order: wgpu::util::TextureDataOrder,
186 data: &[u8],
187 ) -> Texture {
188 let wgpu_texture =
189 self.device
190 .create_texture_with_data(render_queue.as_ref(), desc, order, data);
191 Texture::from(wgpu_texture)
192 }
193
194 pub fn create_texture(&self, desc: &wgpu::TextureDescriptor) -> Texture {
198 let wgpu_texture = self.device.create_texture(desc);
199 Texture::from(wgpu_texture)
200 }
201
202 pub fn create_sampler(&self, desc: &wgpu::SamplerDescriptor) -> Sampler {
206 let wgpu_sampler = self.device.create_sampler(desc);
207 Sampler::from(wgpu_sampler)
208 }
209
210 pub fn configure_surface(&self, surface: &wgpu::Surface, config: &wgpu::SurfaceConfiguration) {
217 surface.configure(&self.device, config);
218 }
219
220 pub fn wgpu_device(&self) -> &wgpu::Device {
222 &self.device
223 }
224
225 pub fn map_buffer(
226 &self,
227 buffer: &wgpu::BufferSlice,
228 map_mode: wgpu::MapMode,
229 callback: impl FnOnce(Result<(), BufferAsyncError>) + Send + 'static,
230 ) {
231 buffer.map_async(map_mode, callback);
232 }
233
234 pub const fn align_copy_bytes_per_row(row_bytes: usize) -> usize {
236 let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
237
238 let over_aligned = row_bytes + align - 1;
241
242 (over_aligned / align) * align
244 }
245
246 pub fn get_supported_read_only_binding_type(
247 &self,
248 buffers_per_shader_stage: u32,
249 ) -> BufferBindingType {
250 if self.limits().max_storage_buffers_per_shader_stage >= buffers_per_shader_stage {
251 BufferBindingType::Storage { read_only: true }
252 } else {
253 BufferBindingType::Uniform
254 }
255 }
256}
257
258#[cfg(test)]
259mod tests {
260 use super::*;
261
262 #[test]
263 fn align_copy_bytes_per_row() {
264 let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
266
267 assert_eq!(RenderDevice::align_copy_bytes_per_row(0), 0);
268 assert_eq!(RenderDevice::align_copy_bytes_per_row(1), align);
269 assert_eq!(RenderDevice::align_copy_bytes_per_row(align + 1), align * 2);
270 assert_eq!(RenderDevice::align_copy_bytes_per_row(align), align);
271 }
272}