rkyv/ser/serializers/
core.rs

1use crate::{
2    ser::{ScratchSpace, Serializer},
3    Fallible,
4};
5use core::{
6    alloc::Layout,
7    fmt,
8    ops::DerefMut,
9    ptr::{copy_nonoverlapping, NonNull},
10};
11
12/// The error type returned by an [`BufferSerializer`].
13#[derive(Debug)]
14pub enum BufferSerializerError {
15    /// Writing has overflowed the internal buffer.
16    Overflow {
17        /// The position of the serializer
18        pos: usize,
19        /// The number of bytes needed
20        bytes_needed: usize,
21        /// The total length of the archive
22        archive_len: usize,
23    },
24}
25
26impl fmt::Display for BufferSerializerError {
27    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
28        match self {
29            Self::Overflow {
30                pos,
31                bytes_needed,
32                archive_len,
33            } => write!(
34                f,
35                "writing has overflowed the serializer buffer: pos {}, needed {}, total length {}",
36                pos, bytes_needed, archive_len
37            ),
38        }
39    }
40}
41
42#[cfg(feature = "std")]
43const _: () = {
44    use std::error::Error;
45
46    impl Error for BufferSerializerError {}
47};
48
49/// Wraps a byte buffer and equips it with [`Serializer`].
50///
51/// Common uses include archiving in `#![no_std]` environments and archiving small objects without
52/// allocating.
53///
54/// # Examples
55/// ```
56/// use rkyv::{
57///     archived_value,
58///     ser::{Serializer, serializers::BufferSerializer},
59///     AlignedBytes,
60///     AlignedVec,
61///     Archive,
62///     Archived,
63///     Serialize,
64/// };
65///
66/// #[derive(Archive, Serialize)]
67/// enum Event {
68///     Spawn,
69///     Speak(String),
70///     Die,
71/// }
72///
73/// let mut serializer = BufferSerializer::new(AlignedBytes([0u8; 256]));
74/// let pos = serializer.serialize_value(&Event::Speak("Help me!".to_string()))
75///     .expect("failed to archive event");
76/// let buf = serializer.into_inner();
77/// let archived = unsafe { archived_value::<Event>(buf.as_ref(), pos) };
78/// if let Archived::<Event>::Speak(message) = archived {
79///     assert_eq!(message.as_str(), "Help me!");
80/// } else {
81///     panic!("archived event was of the wrong type");
82/// }
83/// ```
84#[derive(Debug)]
85pub struct BufferSerializer<T> {
86    inner: T,
87    pos: usize,
88}
89
90impl<T> BufferSerializer<T> {
91    /// Creates a new archive buffer from a byte buffer.
92    #[inline]
93    pub fn new(inner: T) -> Self {
94        Self::with_pos(inner, 0)
95    }
96
97    /// Creates a new archive buffer from a byte buffer. The buffer will start writing at the given
98    /// position, but the buffer must contain all bytes (otherwise the alignments of types may not
99    /// be correct).
100    #[inline]
101    pub fn with_pos(inner: T, pos: usize) -> Self {
102        Self { inner, pos }
103    }
104
105    /// Consumes the serializer and returns the underlying type.
106    #[inline]
107    pub fn into_inner(self) -> T {
108        self.inner
109    }
110}
111
112impl<T: Default> Default for BufferSerializer<T> {
113    #[inline]
114    fn default() -> Self {
115        Self::new(T::default())
116    }
117}
118
119impl<T> Fallible for BufferSerializer<T> {
120    type Error = BufferSerializerError;
121}
122
123impl<T: AsMut<[u8]>> Serializer for BufferSerializer<T> {
124    #[inline]
125    fn pos(&self) -> usize {
126        self.pos
127    }
128
129    fn write(&mut self, bytes: &[u8]) -> Result<(), Self::Error> {
130        let end_pos = self.pos + bytes.len();
131        let archive_len = self.inner.as_mut().len();
132        if end_pos > archive_len {
133            Err(BufferSerializerError::Overflow {
134                pos: self.pos,
135                bytes_needed: bytes.len(),
136                archive_len,
137            })
138        } else {
139            unsafe {
140                copy_nonoverlapping(
141                    bytes.as_ptr(),
142                    self.inner.as_mut().as_mut_ptr().add(self.pos),
143                    bytes.len(),
144                );
145            }
146            self.pos = end_pos;
147            Ok(())
148        }
149    }
150}
151
152/// Errors that can occur when using a fixed-size allocator.
153///
154/// Pairing a fixed-size allocator with a fallback allocator can help prevent running out of scratch
155/// space unexpectedly.
156#[derive(Debug)]
157pub enum FixedSizeScratchError {
158    /// The allocator ran out of scratch space.
159    OutOfScratch(Layout),
160    /// Scratch space was not popped in reverse order.
161    NotPoppedInReverseOrder {
162        /// The current position of the start of free memory
163        pos: usize,
164        /// The next position according to the erroneous pop
165        next_pos: usize,
166        /// The size of the memory according to the erroneous pop
167        next_size: usize,
168    },
169    /// The given allocation did not belong to the scratch allocator.
170    UnownedAllocation,
171}
172
173impl fmt::Display for FixedSizeScratchError {
174    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
175        match self {
176            Self::OutOfScratch(layout) => write!(
177                f,
178                "out of scratch: requested scratch space with size {} and align {}",
179                layout.size(),
180                layout.align()
181            ),
182            Self::NotPoppedInReverseOrder {
183                pos,
184                next_pos,
185                next_size,
186            } => write!(
187                f,
188                "scratch space was not popped in reverse order: pos {}, next pos {}, next size {}",
189                pos, next_pos, next_size
190            ),
191            Self::UnownedAllocation => write!(f, "unowned allocation"),
192        }
193    }
194}
195
196#[cfg(feature = "std")]
197impl std::error::Error for FixedSizeScratchError {}
198
199/// Scratch space that allocates within a buffer.
200#[derive(Debug)]
201pub struct BufferScratch<T> {
202    buffer: T,
203    pos: usize,
204    // TODO: Compute this pointer eagerly in a future version of rkyv.
205    ptr: Option<NonNull<[u8]>>,
206}
207
208unsafe impl<T> Send for BufferScratch<T> where T: Send {}
209unsafe impl<T> Sync for BufferScratch<T> where T: Sync {}
210
211impl<T> BufferScratch<T> {
212    /// Creates a new buffer scratch allocator.
213    pub fn new(buffer: T) -> Self {
214        Self {
215            buffer,
216            pos: 0,
217            ptr: None,
218        }
219    }
220
221    /// Resets the scratch space to its initial state.
222    pub fn clear(&mut self) {
223        self.pos = 0;
224    }
225
226    /// Consumes the buffer scratch allocator, returning the underlying buffer.
227    pub fn into_inner(self) -> T {
228        self.buffer
229    }
230}
231
232impl<T: Default> Default for BufferScratch<T> {
233    fn default() -> Self {
234        Self::new(T::default())
235    }
236}
237
238impl<T> Fallible for BufferScratch<T> {
239    type Error = FixedSizeScratchError;
240}
241
242impl<T: DerefMut<Target = U>, U: AsMut<[u8]>> ScratchSpace for BufferScratch<T> {
243    #[inline]
244    unsafe fn push_scratch(&mut self, layout: Layout) -> Result<NonNull<[u8]>, Self::Error> {
245        if self.ptr.is_none() {
246            self.ptr = Some(NonNull::from(self.buffer.as_mut()));
247        }
248        let bytes = self.ptr.unwrap().as_ptr();
249
250        let start = bytes.cast::<u8>().add(self.pos);
251        let pad = match (start as usize) & (layout.align() - 1) {
252            0 => 0,
253            x => layout.align() - x,
254        };
255        if pad + layout.size() <= ptr_meta::metadata(bytes) - self.pos {
256            self.pos += pad;
257            let result_slice = ptr_meta::from_raw_parts_mut(
258                bytes.cast::<u8>().add(self.pos).cast(),
259                layout.size(),
260            );
261            let result = NonNull::new_unchecked(result_slice);
262            self.pos += layout.size();
263            Ok(result)
264        } else {
265            Err(FixedSizeScratchError::OutOfScratch(layout))
266        }
267    }
268
269    #[inline]
270    unsafe fn pop_scratch(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), Self::Error> {
271        let bytes = self.ptr.unwrap().as_ptr();
272
273        let ptr = ptr.as_ptr();
274        if ptr >= bytes.cast::<u8>() && ptr < bytes.cast::<u8>().add(ptr_meta::metadata(bytes)) {
275            let next_pos = ptr.offset_from(bytes.cast::<u8>()) as usize;
276            if next_pos + layout.size() <= self.pos {
277                self.pos = next_pos;
278                Ok(())
279            } else {
280                Err(FixedSizeScratchError::NotPoppedInReverseOrder {
281                    pos: self.pos,
282                    next_pos,
283                    next_size: layout.size(),
284                })
285            }
286        } else {
287            Err(FixedSizeScratchError::UnownedAllocation)
288        }
289    }
290}
291
292/// Allocates scratch space with a main and backup scratch.
293#[derive(Debug)]
294pub struct FallbackScratch<M, F> {
295    main: M,
296    fallback: F,
297}
298
299impl<M, F> FallbackScratch<M, F> {
300    /// Creates fallback scratch from a main and backup scratch.
301    pub fn new(main: M, fallback: F) -> Self {
302        Self { main, fallback }
303    }
304}
305
306impl<M: Default, F: Default> Default for FallbackScratch<M, F> {
307    fn default() -> Self {
308        Self {
309            main: M::default(),
310            fallback: F::default(),
311        }
312    }
313}
314
315impl<M, F: Fallible> Fallible for FallbackScratch<M, F> {
316    type Error = F::Error;
317}
318
319impl<M: ScratchSpace, F: ScratchSpace> ScratchSpace for FallbackScratch<M, F> {
320    #[inline]
321    unsafe fn push_scratch(&mut self, layout: Layout) -> Result<NonNull<[u8]>, Self::Error> {
322        self.main
323            .push_scratch(layout)
324            .or_else(|_| self.fallback.push_scratch(layout))
325    }
326
327    #[inline]
328    unsafe fn pop_scratch(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), Self::Error> {
329        self.main
330            .pop_scratch(ptr, layout)
331            .or_else(|_| self.fallback.pop_scratch(ptr, layout))
332    }
333}
334
335/// A passthrough scratch space allocator that tracks scratch space usage.
336#[derive(Debug)]
337pub struct ScratchTracker<T> {
338    inner: T,
339    bytes_allocated: usize,
340    allocations: usize,
341    max_bytes_allocated: usize,
342    max_allocations: usize,
343    max_alignment: usize,
344}
345
346impl<T> ScratchTracker<T> {
347    /// Creates a new scratch tracker from the given inner scratch space.
348    pub fn new(inner: T) -> Self {
349        Self {
350            inner,
351            bytes_allocated: 0,
352            allocations: 0,
353            max_bytes_allocated: 0,
354            max_allocations: 0,
355            max_alignment: 1,
356        }
357    }
358
359    /// Returns the maximum number of bytes that were concurrently allocated during serialization.
360    pub fn max_bytes_allocated(&self) -> usize {
361        self.max_bytes_allocated
362    }
363
364    /// Returns the maximum number of concurrent allocations during serialization.
365    pub fn max_allocations(&self) -> usize {
366        self.max_allocations
367    }
368
369    /// Returns the maximum alignment of scratch space requested during serialization.
370    pub fn max_alignment(&self) -> usize {
371        self.max_alignment
372    }
373
374    /// Returns the minimum buffer size required to serialize the same data.
375    ///
376    /// This calculation takes into account packing efficiency for slab allocated scratch space. It
377    /// is not exact, and has an error bound of `max_allocations * (max_alignment - 1)` bytes. This
378    /// should be suitably small for most use cases.
379    pub fn min_buffer_size(&self) -> usize {
380        self.max_bytes_allocated + self.min_buffer_size_max_error()
381    }
382
383    /// Returns the maximum error term for the minimum buffer size calculation.
384    pub fn min_buffer_size_max_error(&self) -> usize {
385        self.max_allocations * (self.max_alignment - 1)
386    }
387}
388
389impl<T: Fallible> Fallible for ScratchTracker<T> {
390    type Error = T::Error;
391}
392
393impl<T: ScratchSpace> ScratchSpace for ScratchTracker<T> {
394    #[inline]
395    unsafe fn push_scratch(&mut self, layout: Layout) -> Result<NonNull<[u8]>, Self::Error> {
396        let result = self.inner.push_scratch(layout)?;
397
398        self.bytes_allocated += layout.size();
399        self.allocations += 1;
400        self.max_bytes_allocated = usize::max(self.bytes_allocated, self.max_bytes_allocated);
401        self.max_allocations = usize::max(self.allocations, self.max_allocations);
402        self.max_alignment = usize::max(self.max_alignment, layout.align());
403
404        Ok(result)
405    }
406
407    #[inline]
408    unsafe fn pop_scratch(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), Self::Error> {
409        self.inner.pop_scratch(ptr, layout)?;
410
411        self.bytes_allocated -= layout.size();
412        self.allocations -= 1;
413
414        Ok(())
415    }
416}
417
418impl<T> From<T> for ScratchTracker<T> {
419    fn from(inner: T) -> Self {
420        Self::new(inner)
421    }
422}