smithay_client_toolkit/shm/
multi.rs1use std::borrow::Borrow;
67use std::io;
68use std::os::unix::io::OwnedFd;
69
70use std::sync::{
71 atomic::{AtomicBool, Ordering},
72 Arc,
73};
74use wayland_client::{
75 protocol::{wl_buffer, wl_shm},
76 Proxy,
77};
78
79use crate::globals::ProvidesBoundGlobal;
80
81use super::raw::RawPool;
82use super::CreatePoolError;
83
84#[derive(Debug, thiserror::Error)]
85pub enum PoolError {
86 #[error("buffer is currently used")]
87 InUse,
88 #[error("buffer is overlapping another")]
89 Overlap,
90 #[error("buffer could not be found")]
91 NotFound,
92}
93
94#[derive(Debug)]
97pub struct MultiPool<K> {
98 buffer_list: Vec<BufferSlot<K>>,
99 pub(crate) inner: RawPool,
100}
101
102#[derive(Debug, thiserror::Error)]
103pub struct BufferSlot<K> {
104 free: Arc<AtomicBool>,
105 size: usize,
106 used: usize,
107 offset: usize,
108 buffer: Option<wl_buffer::WlBuffer>,
109 key: K,
110}
111
112impl<K> Drop for BufferSlot<K> {
113 fn drop(&mut self) {
114 self.destroy().ok();
115 }
116}
117
118impl<K> BufferSlot<K> {
119 pub fn destroy(&self) -> Result<(), PoolError> {
120 self.buffer.as_ref().ok_or(PoolError::NotFound).and_then(|buffer| {
121 self.free.load(Ordering::Relaxed).then(|| buffer.destroy()).ok_or(PoolError::InUse)
122 })
123 }
124}
125
126impl<K> MultiPool<K> {
127 pub fn new(shm: &impl ProvidesBoundGlobal<wl_shm::WlShm, 1>) -> Result<Self, CreatePoolError> {
128 Ok(Self { inner: RawPool::new(4096, shm)?, buffer_list: Vec::new() })
129 }
130
131 pub fn resize(&mut self, size: usize) -> io::Result<()> {
136 self.inner.resize(size)
137 }
138
139 pub fn remove<Q>(&mut self, key: &Q) -> Option<BufferSlot<K>>
141 where
142 Q: PartialEq,
143 K: std::borrow::Borrow<Q>,
144 {
145 self.buffer_list
146 .iter()
147 .enumerate()
148 .find(|(_, slot)| slot.key.borrow().eq(key))
149 .map(|(i, _)| i)
150 .map(|i| self.buffer_list.remove(i))
151 }
152
153 pub fn insert<Q>(
163 &mut self,
164 width: i32,
165 stride: i32,
166 height: i32,
167 key: &Q,
168 format: wl_shm::Format,
169 ) -> Result<usize, PoolError>
170 where
171 K: Borrow<Q>,
172 Q: PartialEq + ToOwned<Owned = K>,
173 {
174 let mut offset = 0;
175 let mut found_key = false;
176 let size = (stride * height) as usize;
177 let mut index = Err(PoolError::NotFound);
178
179 for (i, buf_slot) in self.buffer_list.iter_mut().enumerate() {
180 if buf_slot.key.borrow().eq(key) {
181 found_key = true;
182 if buf_slot.free.load(Ordering::Relaxed) {
183 if size != buf_slot.used {
185 if let Some(buffer) = buf_slot.buffer.take() {
186 buffer.destroy();
187 }
188 }
189 buf_slot.size = buf_slot.size.max(size + size / 20);
193 index = Ok(i);
194 } else {
195 index = Err(PoolError::InUse);
196 }
197 } else if offset > buf_slot.offset {
199 if buf_slot.free.load(Ordering::Relaxed) {
201 if offset != buf_slot.offset {
202 if let Some(buffer) = buf_slot.buffer.take() {
203 buffer.destroy();
204 }
205 }
206 buf_slot.offset = offset;
207 } else {
208 index = Err(PoolError::InUse);
210 }
211 } else if found_key {
212 break;
213 }
214 let size = (buf_slot.size + 63) & !63;
215 offset += size;
216 }
217
218 if !found_key {
219 if let Err(err) = index {
220 return self
221 .dyn_resize(offset, width, stride, height, key.to_owned(), format)
222 .map(|_| self.buffer_list.len() - 1)
223 .ok_or(err);
224 }
225 }
226
227 index
228 }
229
230 pub fn get<Q>(
240 &mut self,
241 width: i32,
242 stride: i32,
243 height: i32,
244 key: &Q,
245 format: wl_shm::Format,
246 ) -> Option<(usize, &wl_buffer::WlBuffer, &mut [u8])>
247 where
248 Q: PartialEq,
249 K: std::borrow::Borrow<Q>,
250 {
251 let len = self.inner.len();
252 let size = (stride * height) as usize;
253 let buf_slot =
254 self.buffer_list.iter_mut().find(|buf_slot| buf_slot.key.borrow().eq(key))?;
255
256 if buf_slot.size >= size {
257 return None;
258 }
259
260 buf_slot.used = size;
261 let offset = buf_slot.offset;
262 if buf_slot.buffer.is_none() {
263 if offset + size > len {
264 self.inner.resize(offset + size + size / 20).ok()?;
265 }
266 let free = Arc::new(AtomicBool::new(true));
267 let data = BufferObjectData { free: free.clone() };
268 let buffer = self.inner.create_buffer_raw(
269 offset as i32,
270 width,
271 height,
272 stride,
273 format,
274 Arc::new(data),
275 );
276 buf_slot.free = free;
277 buf_slot.buffer = Some(buffer);
278 }
279 let buf = buf_slot.buffer.as_ref()?;
280 buf_slot.free.store(false, Ordering::Relaxed);
281 Some((offset, buf, &mut self.inner.mmap()[offset..][..size]))
282 }
283
284 pub fn create_buffer<Q>(
297 &mut self,
298 width: i32,
299 stride: i32,
300 height: i32,
301 key: &Q,
302 format: wl_shm::Format,
303 ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError>
304 where
305 K: Borrow<Q>,
306 Q: PartialEq + ToOwned<Owned = K>,
307 {
308 let index = self.insert(width, stride, height, key, format)?;
309 self.get_at(index, width, stride, height, format)
310 }
311
312 fn get_at(
314 &mut self,
315 index: usize,
316 width: i32,
317 stride: i32,
318 height: i32,
319 format: wl_shm::Format,
320 ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError> {
321 let len = self.inner.len();
322 let size = (stride * height) as usize;
323 let buf_slot = self.buffer_list.get_mut(index).ok_or(PoolError::NotFound)?;
324
325 if size > buf_slot.size {
326 return Err(PoolError::Overlap);
327 }
328
329 buf_slot.used = size;
330 let offset = buf_slot.offset;
331 if buf_slot.buffer.is_none() {
332 if offset + size > len {
333 self.inner.resize(offset + size + size / 20).map_err(|_| PoolError::Overlap)?;
334 }
335 let free = Arc::new(AtomicBool::new(true));
336 let data = BufferObjectData { free: free.clone() };
337 let buffer = self.inner.create_buffer_raw(
338 offset as i32,
339 width,
340 height,
341 stride,
342 format,
343 Arc::new(data),
344 );
345 buf_slot.free = free;
346 buf_slot.buffer = Some(buffer);
347 }
348 buf_slot.free.store(false, Ordering::Relaxed);
349 let buf = buf_slot.buffer.as_ref().unwrap();
350 Ok((offset, buf, &mut self.inner.mmap()[offset..][..size]))
351 }
352
353 fn offset(&self, mut offset: i32, stride: i32, height: i32) -> (usize, usize) {
355 let size = stride * height;
357 offset += offset / 20;
359 offset = (offset + 63) & !63;
360 (offset as usize, size as usize)
361 }
362
363 #[allow(clippy::too_many_arguments)]
364 fn dyn_resize(
366 &mut self,
367 offset: usize,
368 width: i32,
369 stride: i32,
370 height: i32,
371 key: K,
372 format: wl_shm::Format,
373 ) -> Option<()> {
374 let (offset, size) = self.offset(offset as i32, stride, height);
375 if self.inner.len() < offset + size {
376 self.resize(offset + size + size / 20).ok()?;
377 }
378 let free = Arc::new(AtomicBool::new(true));
379 let data = BufferObjectData { free: free.clone() };
380 let buffer = self.inner.create_buffer_raw(
381 offset as i32,
382 width,
383 height,
384 stride,
385 format,
386 Arc::new(data),
387 );
388 self.buffer_list.push(BufferSlot {
389 offset,
390 used: 0,
391 free,
392 buffer: Some(buffer),
393 size,
394 key,
395 });
396 Some(())
397 }
398}
399
400struct BufferObjectData {
401 free: Arc<AtomicBool>,
402}
403
404impl wayland_client::backend::ObjectData for BufferObjectData {
405 fn event(
406 self: Arc<Self>,
407 _backend: &wayland_backend::client::Backend,
408 msg: wayland_backend::protocol::Message<wayland_backend::client::ObjectId, OwnedFd>,
409 ) -> Option<Arc<dyn wayland_backend::client::ObjectData>> {
410 debug_assert!(wayland_client::backend::protocol::same_interface(
411 msg.sender_id.interface(),
412 wl_buffer::WlBuffer::interface()
413 ));
414 debug_assert!(msg.opcode == 0);
415 self.free.store(true, Ordering::Relaxed);
417 None
418 }
419
420 fn destroyed(&self, _: wayland_backend::client::ObjectId) {}
421}