#![deny(clippy::all, clippy::pedantic)] use std::alloc::{alloc, dealloc, handle_alloc_error, Layout}; use std::any::{Any, TypeId}; use std::cmp::max; use std::mem::MaybeUninit; use std::ptr::NonNull; use crate::util::MaybeUninitByteSlice; mod util; pub struct OwnedAnyPtr { ptr: *mut dyn Any, drop_in_place: unsafe fn(NonNull>), } impl OwnedAnyPtr { pub fn new(value: Value) -> Self { Self::from_boxed(Box::new(value)) } pub fn from_boxed(boxed_value: Box) -> Self { Self { ptr: Box::into_raw(boxed_value), drop_in_place: |ptr| unsafe { std::ptr::drop_in_place(ptr.cast::().as_ptr()) }, } } pub fn as_ptr(&self) -> *const dyn Any { self.ptr } pub fn size(&self) -> usize { size_of_val(unsafe { &*self.ptr }) } pub fn alignment(&self) -> usize { align_of_val(unsafe { &*self.ptr }) } pub fn id(&self) -> TypeId { unsafe { &*self.ptr }.type_id() } } impl Drop for OwnedAnyPtr { fn drop(&mut self) { if self.size() == 0 { return; } unsafe { dealloc( self.ptr.cast::(), Layout::from_size_align(self.size(), self.alignment()).unwrap(), ); } } } /// A list of `ItemT`. This data structure stores a list for every field of `ItemT`, /// reducing memory usage if `ItemT` contains padding and improves memory cache usage if /// only certain fields are needed when iterating. /// /// Inspired by Zig's `MultiArrayList`. /// /// Note: All of the lists are stored in the same allocation. /// /// For example, if you have three of the following struct: /// ``` /// struct Person /// { /// first_name: String, /// age: u8, /// } /// ``` /// /// It would be stored like this in memory: /// ```text /// first_name, first_name, first_name, /// age, age, age, /// ``` #[derive(Debug)] pub struct MultiVec { ptr: NonNull>, field_arr_byte_offsets: Vec, field_metadata: Vec, length: usize, capacity: usize, layout: Option, } impl MultiVec { fn get_min_non_zero_cap(fields: impl AsRef<[OwnedAnyPtr]>) -> usize { let total_size = fields .as_ref() .iter() .fold(0usize, |acc, field| acc + field.size()); // The following is borrow from std's RawVec implementation: // Skip to: // - 8 if the element size is 1, because any heap allocators is likely to round up // a request of less than 8 bytes to at least 8 bytes. // - 4 if elements are moderate-sized (<= 1 KiB). // - 1 otherwise, to avoid wasting too much space for very short Vecs. if total_size == 1 { 8 } else if total_size <= 1024 { 4 } else { 1 } } /// Returns a new `MultiVec`. This function does not allocate any memory. #[must_use] pub const fn new() -> Self { Self { ptr: NonNull::dangling(), field_arr_byte_offsets: Vec::new(), field_metadata: Vec::new(), length: 0, capacity: 0, layout: None, } } ///// Returns a new `MultiVec` with a capacity for `capacity` items. This function ///// will allocate memory. //#[must_use] //pub fn with_capacity(capacity: usize) -> Self //{ // let mut this = Self { // _pd: PhantomData, // ptr: NonNull::dangling(), // field_arr_byte_offsets: Vec::new(), // length: 0, // capacity: 0, // layout: None, // }; // // this.do_first_alloc(capacity); // // this //} /// Pushes a item to the `MultiVec`. /// /// ## Note on performance /// Pushing can be pretty slow. Since all of the field lists are stored in the same /// allocation, when pushing and the `MultiVec` needs to grow, all lists except the /// first has to be moved to new locations for them to not overlap. pub fn push( &mut self, fields: impl AsRef<[OwnedAnyPtr]> + IntoIterator, ) { if self.capacity != 0 { assert_eq!(fields.as_ref().len(), self.field_arr_byte_offsets.len()); if self.capacity == self.length { self.grow_amortized(1, &fields); } self.write_item(self.length, fields); self.length += 1; return; } self.field_metadata = fields .as_ref() .iter() .map(|field| FieldMetadata { size: field.size(), type_id: field.id(), drop_in_place: field.drop_in_place, }) .collect(); self.do_first_alloc(1, &fields); self.write_item(0, fields); self.length = 1; } ///// Returns a field of the item with the given index. ///// ///// This function is equivalant to doing `.get_all().get(index)` //#[must_use] //pub fn get( // &self, // index: usize, //) -> Option<&>::Field> //where // FieldSel: ItemFieldSelection, //{ // if index >= self.length { // return None; // } // // let field_metadata = FieldSel::metadata(); // // let field_arr_byte_offset = self.field_arr_byte_offsets[FieldSel::INDEX]; // // let field_arr_ptr = unsafe { self.ptr.byte_add(field_arr_byte_offset) }; // // let field_ptr = unsafe { field_arr_ptr.add(field_metadata.size * index) }; // // Some(unsafe { field_ptr.cast().as_ref() }) //} /// Returns a slice containing the specified field of all items. #[must_use] pub fn get_field_slice(&self, field_index: usize) -> FieldSlice<'_> { let field_arr_byte_offset = self.field_arr_byte_offsets[field_index]; let field_metadata = &self.field_metadata[field_index]; let field_arr_ptr = unsafe { self.ptr.byte_add(field_arr_byte_offset) }; let bytes = unsafe { std::slice::from_raw_parts( field_arr_ptr.as_ptr().cast(), self.len() * field_metadata.size, ) }; FieldSlice { bytes, len: self.len(), field_metadata, } } /// Returns a slice containing the specified field of all items. #[must_use] pub fn get_field_slice_mut(&mut self, field_index: usize) -> FieldSliceMut<'_> { let field_arr_byte_offset = self.field_arr_byte_offsets[field_index]; let field_metadata = &self.field_metadata[field_index]; let field_arr_ptr = unsafe { self.ptr.byte_add(field_arr_byte_offset) }; let bytes = unsafe { std::slice::from_raw_parts_mut( field_arr_ptr.as_ptr().cast(), self.len() * field_metadata.size, ) }; FieldSliceMut { bytes, len: self.len(), field_metadata, } } /// Returns the number of items stored in this `MultiVec`. #[must_use] pub fn len(&self) -> usize { self.length } /// Returns how many fields each item in this `MultiVec` has. pub fn field_cnt(&self) -> usize { self.field_arr_byte_offsets.len() } /// Returns how many items this `MultiVec` has capacity for. #[must_use] pub fn capacity(&self) -> usize { self.capacity } /// Returns whether this `MultiVec` is empty. #[must_use] pub fn is_empty(&self) -> bool { self.length == 0 } fn grow_amortized(&mut self, additional: usize, fields: impl AsRef<[OwnedAnyPtr]>) { let required_cap = self.capacity.checked_add(additional).unwrap(); // This guarantees exponential growth. The doubling cannot overflow // because `cap <= isize::MAX` and the type of `cap` is `usize`. let new_capacity = max(self.capacity * 2, required_cap); let new_capacity = max(Self::get_min_non_zero_cap(&fields), new_capacity); let layout = &self.layout.unwrap(); let (new_layout, new_field_arr_byte_offsets) = Self::create_layout(new_capacity, &fields); let Some(new_ptr) = NonNull::new(if layout.size() == 0 { std::ptr::dangling_mut() } else { unsafe { alloc(new_layout) } }) else { handle_alloc_error(new_layout); }; for field_index in 0..self.field_cnt() { let field_slice = self.get_field_slice(field_index); let new_byte_offset = new_field_arr_byte_offsets[field_index]; unsafe { std::ptr::copy_nonoverlapping( field_slice.bytes.as_ptr(), new_ptr .byte_add(new_byte_offset) .cast::>() .as_ptr(), field_slice.bytes.len(), ); } } unsafe { self.dealloc(); } self.ptr = new_ptr.cast::>(); self.layout = Some(new_layout); self.capacity = new_capacity; self.field_arr_byte_offsets = new_field_arr_byte_offsets; } fn do_first_alloc(&mut self, capacity: usize, fields: impl AsRef<[OwnedAnyPtr]>) { let (layout, field_arr_byte_offsets) = Self::create_layout(capacity, fields); let Some(ptr) = NonNull::new(if layout.size() == 0 { std::ptr::dangling_mut() } else { unsafe { alloc(layout) } }) else { handle_alloc_error(layout); }; self.ptr = ptr.cast::>(); self.capacity = capacity; self.field_arr_byte_offsets = field_arr_byte_offsets; self.layout = Some(layout); } fn create_layout( length: usize, fields: impl AsRef<[OwnedAnyPtr]>, ) -> (Layout, Vec) { let mut field_iter = fields.as_ref().iter(); let first_field = field_iter.next().unwrap(); let mut layout = array_layout(first_field.size(), first_field.alignment(), length).unwrap(); let mut field_arr_byte_offsets = Vec::with_capacity(fields.as_ref().len()); field_arr_byte_offsets.push(0); for field in field_iter { let (new_layout, array_byte_offset) = layout .extend(array_layout(field.size(), field.alignment(), length).unwrap()) .unwrap(); layout = new_layout; field_arr_byte_offsets.push(array_byte_offset); } (layout.pad_to_align(), field_arr_byte_offsets) } fn write_item(&mut self, index: usize, fields: impl IntoIterator) { for (field_index, item_field) in fields.into_iter().enumerate() { let field_size = item_field.size(); let field_arr_byte_offset = self.field_arr_byte_offsets[field_index]; let field_arr_ptr = unsafe { self.ptr.byte_add(field_arr_byte_offset) }; let field_dst_ptr = unsafe { field_arr_ptr.add(field_size * index) }; let item_field_ptr = item_field.as_ptr().cast::(); unsafe { std::ptr::copy_nonoverlapping( item_field_ptr, field_dst_ptr.as_ptr().cast::(), field_size, ); } } } unsafe fn dealloc(&mut self) { let Some(layout) = self.layout else { return; }; if layout.size() == 0 { return; } unsafe { std::alloc::dealloc(self.ptr.as_ptr().cast::(), layout); } } } //impl FromIterator for MultiVec //where // ItemT: Item, //{ // fn from_iter>(iter: ItemIter) -> Self // { // let iter = iter.into_iter(); // // let initial_capacity = // max(Self::MIN_NON_ZERO_CAP, iter.size_hint().0.saturating_add(1)); // // let mut this = Self::with_capacity(initial_capacity); // // for item in iter { // if this.capacity == this.length { // this.grow_amortized(1); // } // // this.write_item(this.length, item); // // this.length += 1; // } // // this // } //} impl Default for MultiVec { fn default() -> Self { Self::new() } } impl Drop for MultiVec { fn drop(&mut self) { assert_eq!(self.field_metadata.len(), self.field_arr_byte_offsets.len()); for field_index in 0..self.field_arr_byte_offsets.len() { for field in self.get_field_slice_mut(field_index).iter_mut() { let field_ptr = field.bytes.as_mut_ptr(); unsafe { (field.field_metadata.drop_in_place)( NonNull::new(field_ptr).unwrap(), ); } } } unsafe { self.dealloc(); } } } pub struct FieldSlice<'mv> { bytes: &'mv [MaybeUninit], len: usize, field_metadata: &'mv FieldMetadata, } impl FieldSlice<'_> { pub fn as_slice(&self) -> &[Item] { assert_eq!(TypeId::of::(), self.field_metadata.type_id); unsafe { self.bytes.cast::() } } pub fn iter(&self) -> FieldSliceIter<'_> { FieldSliceIter { bytes: self.bytes, index: 0, len: self.len, field_metadata: self.field_metadata, } } } pub struct FieldSliceIter<'mv> { bytes: &'mv [MaybeUninit], index: usize, len: usize, field_metadata: &'mv FieldMetadata, } impl<'mv> Iterator for FieldSliceIter<'mv> { type Item = Field<'mv>; fn next(&mut self) -> Option { let start_off = self.index * self.field_metadata.size; if self.index >= self.len { return None; } let field_bytes = self .bytes .get(start_off..start_off + self.field_metadata.size)?; self.index += 1; Some(Field { bytes: field_bytes, field_metadata: self.field_metadata, }) } } pub struct Field<'mv> { bytes: &'mv [MaybeUninit], field_metadata: &'mv FieldMetadata, } impl Field<'_> { pub fn cast(&mut self) -> &T { assert_eq!(TypeId::of::(), self.field_metadata.type_id); unsafe { &*self.bytes.as_ptr().cast::() } } } pub struct FieldSliceMut<'mv> { bytes: &'mv mut [MaybeUninit], len: usize, field_metadata: &'mv FieldMetadata, } impl FieldSliceMut<'_> { pub fn get_item_mut(&mut self, item_index: usize) -> Option> { let start_off = item_index * self.field_metadata.size; if item_index >= self.len { return None; } let field_bytes = self .bytes .get_mut(start_off..start_off + self.field_metadata.size)?; Some(FieldMut { bytes: field_bytes, field_metadata: self.field_metadata, }) } pub fn iter_mut(&mut self) -> FieldSliceIterMut<'_> { FieldSliceIterMut { bytes: self.bytes, index: 0, len: self.len, field_metadata: self.field_metadata, } } } pub struct FieldSliceIterMut<'mv> { bytes: &'mv mut [MaybeUninit], index: usize, len: usize, field_metadata: &'mv FieldMetadata, } impl<'mv> Iterator for FieldSliceIterMut<'mv> { type Item = FieldMut<'mv>; fn next(&mut self) -> Option { let start_off = self.index * self.field_metadata.size; if self.index >= self.len { return None; } let field_bytes_a = self .bytes .get_mut(start_off..start_off + self.field_metadata.size)?; let field_bytes = unsafe { std::slice::from_raw_parts_mut( field_bytes_a.as_mut_ptr(), //self.bytes.as_mut_ptr().byte_add(start_off), self.field_metadata.size, ) }; self.index += 1; Some(FieldMut { bytes: field_bytes, field_metadata: self.field_metadata, }) } } pub struct FieldMut<'mv> { bytes: &'mv mut [MaybeUninit], field_metadata: &'mv FieldMetadata, } impl FieldMut<'_> { pub fn cast_mut(&mut self) -> &mut T { assert_eq!(TypeId::of::(), self.field_metadata.type_id); unsafe { &mut *self.bytes.as_mut_ptr().cast::() } } } #[derive(Debug)] struct FieldMetadata { size: usize, type_id: TypeId, drop_in_place: unsafe fn(NonNull>), } #[inline] const fn array_layout( element_size: usize, align: usize, n: usize, ) -> Result { // We need to check two things about the size: // - That the total size won't overflow a `usize`, and // - That the total size still fits in an `isize`. // By using division we can check them both with a single threshold. // That'd usually be a bad idea, but thankfully here the element size // and alignment are constants, so the compiler will fold all of it. if element_size != 0 && n > max_size_for_align(align) / element_size { return Err(CoolLayoutError); } // SAFETY: We just checked that we won't overflow `usize` when we multiply. // This is a useless hint inside this function, but after inlining this helps // deduplicate checks for whether the overall capacity is zero (e.g., in RawVec's // allocation path) before/after this multiplication. let array_size = unsafe { element_size.unchecked_mul(n) }; // SAFETY: We just checked above that the `array_size` will not // exceed `isize::MAX` even when rounded up to the alignment. // And `Alignment` guarantees it's a power of two. unsafe { Ok(Layout::from_size_align_unchecked(array_size, align)) } } #[allow(clippy::inline_always)] #[inline(always)] const fn max_size_for_align(align: usize) -> usize { // (power-of-two implies align != 0.) // Rounded up size is: // size_rounded_up = (size + align - 1) & !(align - 1); // // We know from above that align != 0. If adding (align - 1) // does not overflow, then rounding up will be fine. // // Conversely, &-masking with !(align - 1) will subtract off // only low-order-bits. Thus if overflow occurs with the sum, // the &-mask cannot subtract enough to undo that overflow. // // Above implies that checking for summation overflow is both // necessary and sufficient. isize::MAX as usize - (align - 1) } #[derive(Debug)] struct CoolLayoutError; #[cfg(test)] mod tests { use std::any::TypeId; use std::mem::offset_of; use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use crate::{FieldMetadata, MultiVec, OwnedAnyPtr}; macro_rules! multi_vec_with_data { ( data = &mut $data: ident, { $($field_name: ident: $field_type: ty = $field_values: expr,)* }, length = $length: literal ) => {{ #[repr(C)] struct Data { $($field_name: [$field_type; $length],)* } $data = Data { $($field_name: $field_values.map(|val| val.into()),)* }; let mut multi_vec = MultiVec::new(); multi_vec.ptr = NonNull::from(&mut $data).cast(); std::mem::forget($data); multi_vec.field_arr_byte_offsets = vec![$(offset_of!(Data, $field_name),)*]; multi_vec.field_metadata = vec![$( FieldMetadata { size: size_of::<$field_type>(), type_id: TypeId::of::<$field_type>(), drop_in_place: |ptr| unsafe { std::ptr::drop_in_place(ptr.cast::<$field_type>().as_ptr()); }, }, )*]; multi_vec.length = $length; multi_vec.capacity = multi_vec.length; multi_vec }}; } #[test] fn single_push_works() { let mut multi_vec = MultiVec::new(); multi_vec.push([OwnedAnyPtr::new(123), OwnedAnyPtr::new(654)]); assert_eq!(multi_vec.capacity, 1); assert_eq!(multi_vec.length, 1); assert_eq!(multi_vec.field_arr_byte_offsets, [0, size_of::()]); assert_eq!( unsafe { std::slice::from_raw_parts::(multi_vec.ptr.as_ptr().cast(), 1) }, [123] ); assert_eq!( unsafe { std::slice::from_raw_parts::( multi_vec.ptr.as_ptr().byte_add(size_of::()).cast(), 1, ) }, [654] ); } #[test] fn multiple_pushes_works() { let mut multi_vec = MultiVec::new(); multi_vec.push([OwnedAnyPtr::new(u32::MAX / 2), OwnedAnyPtr::new::(654)]); multi_vec.push([OwnedAnyPtr::new(765), OwnedAnyPtr::new::(u16::MAX / 3)]); multi_vec.push([OwnedAnyPtr::new(u32::MAX / 5), OwnedAnyPtr::new::(337)]); assert_eq!(multi_vec.capacity, 4); assert_eq!(multi_vec.length, 3); assert_eq!(multi_vec.field_arr_byte_offsets, [0, size_of::() * 4]); assert_eq!( unsafe { std::slice::from_raw_parts::(multi_vec.ptr.as_ptr().cast(), 3) }, [u32::MAX / 2, 765, u32::MAX / 5] ); assert_eq!( unsafe { std::slice::from_raw_parts::( multi_vec.ptr.as_ptr().byte_add(size_of::() * 4).cast(), 3, ) }, [654, u16::MAX / 3, 337] ); } #[test] fn push_with_all_unsized_fields_works() { struct UnsizedThing; let mut multi_vec = MultiVec::new(); multi_vec.push([OwnedAnyPtr::new(()), OwnedAnyPtr::new(UnsizedThing)]); multi_vec.push([OwnedAnyPtr::new(()), OwnedAnyPtr::new(UnsizedThing)]); assert_eq!(multi_vec.field_arr_byte_offsets, [0, 0]); } #[test] fn push_with_some_unsized_fields_works() { struct UnsizedThing; #[derive(Debug, PartialEq, Eq)] struct CustomerName { name: &'static str, } #[derive(Debug, PartialEq, Eq)] struct CustomerAge { age: u8, } let mut multi_vec = MultiVec::new(); multi_vec.push([ OwnedAnyPtr::new(CustomerName { name: "Bob Vance" }), OwnedAnyPtr::new(UnsizedThing), OwnedAnyPtr::new(CustomerAge { age: 54 }), ]); multi_vec.push([ OwnedAnyPtr::new(CustomerName { name: "Andy Bernard" }), OwnedAnyPtr::new(UnsizedThing), OwnedAnyPtr::new(CustomerAge { age: 40 }), ]); assert_eq!(multi_vec.capacity, 4); assert_eq!(multi_vec.length, 2); assert_eq!( multi_vec.field_arr_byte_offsets, [ 0, size_of::() * multi_vec.capacity, size_of::() * multi_vec.capacity, ] ); assert_eq!( unsafe { std::slice::from_raw_parts::( multi_vec.ptr.as_ptr().cast(), 2, ) }, [ CustomerName { name: "Bob Vance" }, CustomerName { name: "Andy Bernard" } ] ); assert_eq!( unsafe { std::slice::from_raw_parts::( multi_vec .ptr .as_ptr() .byte_add(multi_vec.field_arr_byte_offsets[2]) .cast(), 2, ) }, [CustomerAge { age: 54 }, CustomerAge { age: 40 }] ); } //#[test] //fn multiple_pushes_in_preallocated_works() //{ // let mut multi_vec = MultiVec::::with_capacity(2); // // multi_vec.push(Foo { num_a: 83710000, num_b: 654 }); // multi_vec.push(Foo { num_a: 765, num_b: u16::MAX / 7 }); // // assert_eq!(multi_vec.capacity, 2); // assert_eq!(multi_vec.length, 2); // // assert_eq!(multi_vec.field_arr_byte_offsets, [0, size_of::() * 2]); // // assert_eq!( // unsafe { // std::slice::from_raw_parts::(multi_vec.ptr.as_ptr().cast(), 2) // }, // [83710000, 765] // ); // // assert_eq!( // unsafe { // std::slice::from_raw_parts::( // multi_vec.ptr.as_ptr().byte_add(size_of::() * 2).cast(), // 2, // ) // }, // [654, u16::MAX / 7] // ); //} //#[test] //fn get_works() //{ // let mut multi_vec = MultiVec::::new(); // // #[repr(packed)] // #[allow(dead_code)] // struct Data // { // num_a: [u32; 3], // num_b: [u16; 3], // } // // let data = Data { // num_a: [u32::MAX - 3000, 901, 5560000], // num_b: [20210, 7120, 1010], // }; // // multi_vec.ptr = NonNull::from(&data).cast(); // multi_vec.field_arr_byte_offsets = vec![0, size_of::() * 3]; // multi_vec.length = 3; // multi_vec.capacity = 3; // // assert_eq!( // multi_vec.get::(0).copied(), // Some(u32::MAX - 3000) // ); // assert_eq!(multi_vec.get::(0).copied(), Some(20210)); // // assert_eq!(multi_vec.get::(1).copied(), Some(901)); // assert_eq!(multi_vec.get::(1).copied(), Some(7120)); // // assert_eq!(multi_vec.get::(2).copied(), Some(5560000)); // assert_eq!(multi_vec.get::(2).copied(), Some(1010)); //} //#[test] //fn from_iter_works() //{ // let multi_vec = MultiVec::::from_iter([ // Foo { num_a: 456456, num_b: 9090 }, // Foo { num_a: 79541, num_b: 2233 }, // Foo { num_a: 1761919, num_b: u16::MAX - 75 }, // Foo { num_a: u32::MAX / 9, num_b: 8182 }, // ]); // // assert_eq!(multi_vec.length, 4); // assert_eq!(multi_vec.capacity, 5); // // assert_eq!(multi_vec.field_arr_byte_offsets, [0, size_of::() * 5]); // // assert_eq!( // unsafe { // std::slice::from_raw_parts::(multi_vec.ptr.as_ptr().cast(), 4) // }, // [456456, 79541, 1761919, u32::MAX / 9] // ); // // assert_eq!( // unsafe { // std::slice::from_raw_parts::( // multi_vec.ptr.as_ptr().byte_add(size_of::() * 5).cast(), // 4, // ) // }, // [9090, 2233, u16::MAX - 75, 8182] // ); //} #[test] fn get_field_slice_works_when_two_fields() { let mut data; let multi_vec = multi_vec_with_data!( data = &mut data, { _a: u32 = [u32::MAX - 3000, 901, 5560000], _b: u16 = [20210u16, 7120, 1010], }, length = 3 ); assert_eq!( multi_vec.get_field_slice(0).as_slice::(), [u32::MAX - 3000, 901, 5560000] ); assert_eq!( multi_vec.get_field_slice(1).as_slice::(), [20210, 7120, 1010] ); } #[test] fn get_field_slice_works_when_three_fields() { let mut data; let multi_vec = multi_vec_with_data!( data = &mut data, { _a: u32 = [123u32, 888, 1910, 11144, 770077], _b: String = ["No,", "I", "am", "your", "father"], _c: u8 = [120, 88, 54, 3, 7], }, length = 5 ); assert_eq!( multi_vec.get_field_slice(0).as_slice::(), [123, 888, 1910, 11144, 770077] ); assert_eq!( multi_vec.get_field_slice(1).as_slice::(), ["No,", "I", "am", "your", "father",] ); assert_eq!( multi_vec.get_field_slice(2).as_slice::(), [120, 88, 54, 3, 7] ); } #[test] fn fields_are_dropped() { static THING_DROPPED_CNT: AtomicUsize = AtomicUsize::new(0); struct Thing { _num: u32, } impl Drop for Thing { fn drop(&mut self) { THING_DROPPED_CNT.fetch_add(1, Ordering::Relaxed); } } let mut data; let multi_vec = multi_vec_with_data!( data = &mut data, { _a: Thing = [Thing { _num: 567}, Thing { _num: 890}, Thing { _num: 345}], }, length = 3 ); drop(multi_vec); assert_eq!(THING_DROPPED_CNT.load(Ordering::Relaxed), 3); } #[test] fn zero_sized_fields_are_dropped() { static THING_DROPPED_CNT: AtomicUsize = AtomicUsize::new(0); struct Thing; impl Drop for Thing { fn drop(&mut self) { THING_DROPPED_CNT.fetch_add(1, Ordering::Relaxed); } } let mut data; let multi_vec = multi_vec_with_data!( data = &mut data, { _a: Thing = [const {Thing}; 3], }, length = 3 ); drop(multi_vec); assert_eq!(THING_DROPPED_CNT.load(Ordering::Relaxed), 3); } }