1use {
2 crate::{align_down, align_up, error::MapError},
3 alloc::sync::Arc,
4 core::{
5 convert::TryFrom as _,
6 ptr::{copy_nonoverlapping, NonNull},
7 },
9 gpu_alloc_types::{MappedMemoryRange, MemoryDevice, MemoryPropertyFlags},
10};
11
12#[derive(Debug)]
13struct Relevant;
14
15impl Drop for Relevant {
16 fn drop(&mut self) {
17 report_error_on_drop!("Memory block wasn't deallocated");
18 }
19}
20
21#[derive(Debug)]
23pub struct MemoryBlock<M> {
24 memory_type: u32,
25 props: MemoryPropertyFlags,
26 offset: u64,
27 size: u64,
28 atom_mask: u64,
29 mapped: bool,
30 flavor: MemoryBlockFlavor<M>,
31 relevant: Relevant,
32}
33
34impl<M> MemoryBlock<M> {
35 pub(crate) fn new(
36 memory_type: u32,
37 props: MemoryPropertyFlags,
38 offset: u64,
39 size: u64,
40 atom_mask: u64,
41 flavor: MemoryBlockFlavor<M>,
42 ) -> Self {
43 isize::try_from(atom_mask).expect("`atom_mask` is too large");
44 MemoryBlock {
45 memory_type,
46 props,
47 offset,
48 size,
49 atom_mask,
50 flavor,
51 mapped: false,
52 relevant: Relevant,
53 }
54 }
55
56 pub(crate) fn deallocate(self) -> MemoryBlockFlavor<M> {
57 core::mem::forget(self.relevant);
58 self.flavor
59 }
60}
61
62unsafe impl<M> Sync for MemoryBlock<M> where M: Sync {}
63unsafe impl<M> Send for MemoryBlock<M> where M: Send {}
64
65#[derive(Debug)]
66pub(crate) enum MemoryBlockFlavor<M> {
67 Dedicated {
68 memory: M,
69 },
70 Buddy {
71 chunk: usize,
72 index: usize,
73 ptr: Option<NonNull<u8>>,
74 memory: Arc<M>,
75 },
76 FreeList {
77 chunk: u64,
78 ptr: Option<NonNull<u8>>,
79 memory: Arc<M>,
80 },
81}
82
83impl<M> MemoryBlock<M> {
84 #[inline(always)]
86 pub fn memory(&self) -> &M {
87 match &self.flavor {
88 MemoryBlockFlavor::Dedicated { memory } => memory,
89 MemoryBlockFlavor::Buddy { memory, .. } => memory,
90 MemoryBlockFlavor::FreeList { memory, .. } => memory,
91 }
92 }
93
94 #[inline(always)]
96 pub fn offset(&self) -> u64 {
97 self.offset
98 }
99
100 #[inline(always)]
102 pub fn size(&self) -> u64 {
103 self.size
104 }
105
106 #[inline(always)]
108 pub fn props(&self) -> MemoryPropertyFlags {
109 self.props
110 }
111
112 #[inline(always)]
114 pub fn memory_type(&self) -> u32 {
115 self.memory_type
116 }
117
118 #[inline(always)]
138 pub unsafe fn map(
139 &mut self,
140 device: &impl MemoryDevice<M>,
141 offset: u64,
142 size: usize,
143 ) -> Result<NonNull<u8>, MapError> {
144 let size_u64 = u64::try_from(size).expect("`size` doesn't fit device address space");
145 assert!(offset < self.size, "`offset` is out of memory block bounds");
146 assert!(
147 size_u64 <= self.size - offset,
148 "`offset + size` is out of memory block bounds"
149 );
150
151 let ptr = match &mut self.flavor {
152 MemoryBlockFlavor::Dedicated { memory } => {
153 let end = align_up(offset + size_u64, self.atom_mask)
154 .expect("mapping end doesn't fit device address space");
155 let aligned_offset = align_down(offset, self.atom_mask);
156
157 if !acquire_mapping(&mut self.mapped) {
158 return Err(MapError::AlreadyMapped);
159 }
160 let result =
161 device.map_memory(memory, self.offset + aligned_offset, end - aligned_offset);
162
163 match result {
164 Ok(ptr) => {
166 let ptr_offset = (offset - aligned_offset) as isize;
167 ptr.as_ptr().offset(ptr_offset)
168 }
169 Err(err) => {
170 release_mapping(&mut self.mapped);
171 return Err(err.into());
172 }
173 }
174 }
175 MemoryBlockFlavor::FreeList { ptr: Some(ptr), .. }
176 | MemoryBlockFlavor::Buddy { ptr: Some(ptr), .. } => {
177 if !acquire_mapping(&mut self.mapped) {
178 return Err(MapError::AlreadyMapped);
179 }
180 let offset_isize = isize::try_from(offset)
181 .expect("Buddy and linear block should fit host address space");
182 ptr.as_ptr().offset(offset_isize)
183 }
184 _ => return Err(MapError::NonHostVisible),
185 };
186
187 Ok(NonNull::new_unchecked(ptr))
188 }
189
190 #[inline(always)]
201 pub unsafe fn unmap(&mut self, device: &impl MemoryDevice<M>) -> bool {
202 if !release_mapping(&mut self.mapped) {
203 return false;
204 }
205 match &mut self.flavor {
206 MemoryBlockFlavor::Dedicated { memory } => {
207 device.unmap_memory(memory);
208 }
209 MemoryBlockFlavor::Buddy { .. } => {}
210 MemoryBlockFlavor::FreeList { .. } => {}
211 }
212 true
213 }
214
215 #[inline(always)]
227 pub unsafe fn write_bytes(
228 &mut self,
229 device: &impl MemoryDevice<M>,
230 offset: u64,
231 data: &[u8],
232 ) -> Result<(), MapError> {
233 let size = data.len();
234 let ptr = self.map(device, offset, size)?;
235
236 copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), size);
237 let result = if !self.coherent() {
238 let aligned_offset = align_down(offset, self.atom_mask);
239 let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap();
240
241 device.flush_memory_ranges(&[MappedMemoryRange {
242 memory: self.memory(),
243 offset: self.offset + aligned_offset,
244 size: end - aligned_offset,
245 }])
246 } else {
247 Ok(())
248 };
249
250 self.unmap(device);
251 result.map_err(Into::into)
252 }
253
254 #[inline(always)]
266 pub unsafe fn read_bytes(
267 &mut self,
268 device: &impl MemoryDevice<M>,
269 offset: u64,
270 data: &mut [u8],
271 ) -> Result<(), MapError> {
272 #[cfg(feature = "tracing")]
273 {
274 if !self.cached() {
275 tracing::warn!("Reading from non-cached memory may be slow. Consider allocating HOST_CACHED memory block for host reads.")
276 }
277 }
278
279 let size = data.len();
280 let ptr = self.map(device, offset, size)?;
281 let result = if !self.coherent() {
282 let aligned_offset = align_down(offset, self.atom_mask);
283 let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap();
284
285 device.invalidate_memory_ranges(&[MappedMemoryRange {
286 memory: self.memory(),
287 offset: self.offset + aligned_offset,
288 size: end - aligned_offset,
289 }])
290 } else {
291 Ok(())
292 };
293 if result.is_ok() {
294 copy_nonoverlapping(ptr.as_ptr(), data.as_mut_ptr(), size);
295 }
296
297 self.unmap(device);
298 result.map_err(Into::into)
299 }
300
301 fn coherent(&self) -> bool {
302 self.props.contains(MemoryPropertyFlags::HOST_COHERENT)
303 }
304
305 #[cfg(feature = "tracing")]
306 fn cached(&self) -> bool {
307 self.props.contains(MemoryPropertyFlags::HOST_CACHED)
308 }
309}
310
311fn acquire_mapping(mapped: &mut bool) -> bool {
312 if *mapped {
313 false
314 } else {
315 *mapped = true;
316 true
317 }
318}
319
320fn release_mapping(mapped: &mut bool) -> bool {
321 if *mapped {
322 *mapped = false;
323 true
324 } else {
325 false
326 }
327}