bevy_render/render_resource/
buffer.rs1use crate::define_atomic_id;
2use crate::renderer::WgpuWrapper;
3use alloc::sync::Arc;
4use core::ops::{Bound, Deref, RangeBounds};
5
6define_atomic_id!(BufferId);
7
8#[derive(Clone, Debug)]
9pub struct Buffer {
10 id: BufferId,
11 value: Arc<WgpuWrapper<wgpu::Buffer>>,
12 size: wgpu::BufferAddress,
13}
14
15impl Buffer {
16 #[inline]
17 pub fn id(&self) -> BufferId {
18 self.id
19 }
20
21 pub fn slice(&self, bounds: impl RangeBounds<wgpu::BufferAddress>) -> BufferSlice {
22 let offset = match bounds.start_bound() {
24 Bound::Included(&bound) => bound,
25 Bound::Excluded(&bound) => bound + 1,
26 Bound::Unbounded => 0,
27 };
28 let size = match bounds.end_bound() {
29 Bound::Included(&bound) => bound + 1,
30 Bound::Excluded(&bound) => bound,
31 Bound::Unbounded => self.size,
32 } - offset;
33 BufferSlice {
34 id: self.id,
35 offset,
36 size,
37 value: self.value.slice(bounds),
38 }
39 }
40
41 #[inline]
42 pub fn unmap(&self) {
43 self.value.unmap();
44 }
45}
46
47impl From<wgpu::Buffer> for Buffer {
48 fn from(value: wgpu::Buffer) -> Self {
49 Buffer {
50 id: BufferId::new(),
51 size: value.size(),
52 value: Arc::new(WgpuWrapper::new(value)),
53 }
54 }
55}
56
57impl Deref for Buffer {
58 type Target = wgpu::Buffer;
59
60 #[inline]
61 fn deref(&self) -> &Self::Target {
62 &self.value
63 }
64}
65
66#[derive(Clone, Debug)]
67pub struct BufferSlice<'a> {
68 id: BufferId,
69 offset: wgpu::BufferAddress,
70 value: wgpu::BufferSlice<'a>,
71 size: wgpu::BufferAddress,
72}
73
74impl<'a> BufferSlice<'a> {
75 #[inline]
76 pub fn id(&self) -> BufferId {
77 self.id
78 }
79
80 #[inline]
81 pub fn offset(&self) -> wgpu::BufferAddress {
82 self.offset
83 }
84
85 #[inline]
86 pub fn size(&self) -> wgpu::BufferAddress {
87 self.size
88 }
89}
90
91impl<'a> Deref for BufferSlice<'a> {
92 type Target = wgpu::BufferSlice<'a>;
93
94 #[inline]
95 fn deref(&self) -> &Self::Target {
96 &self.value
97 }
98}