summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHampusM <hampus@hampusmat.com>2024-08-26 20:06:36 +0200
committerHampusM <hampus@hampusmat.com>2024-08-26 20:06:36 +0200
commitd41734115eee6af64d894b46f5a5e628ed127477 (patch)
tree109f74c97f515e7d629b889922b50eda04fd4950 /src
parent0f51ae2df6b8101297fce8540cdc51166c34482b (diff)
feat: add amortization
Diffstat (limited to 'src')
-rw-r--r--src/lib.rs45
1 files changed, 37 insertions, 8 deletions
diff --git a/src/lib.rs b/src/lib.rs
index 098d726..424b366 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,5 @@
use std::alloc::{alloc, handle_alloc_error, realloc, Layout};
+use std::cmp::max;
use std::marker::PhantomData;
use std::mem::{forget, needs_drop};
use std::ptr::NonNull;
@@ -34,6 +35,7 @@ where
ptr: NonNull<u8>,
field_arr_byte_offsets: Vec<usize>,
length: usize,
+ capacity: usize,
layout: Option<Layout>,
}
@@ -41,6 +43,20 @@ impl<ItemT> MultiVec<ItemT>
where
ItemT: Item,
{
+ // The following is borrow from std's RawVec implementation:
+ // Skip to:
+ // - 8 if the element size is 1, because any heap allocators is likely to round up a
+ // request of less than 8 bytes to at least 8 bytes.
+ // - 4 if elements are moderate-sized (<= 1 KiB).
+ // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+ const MIN_NON_ZERO_CAP: usize = if size_of::<ItemT>() == 1 {
+ 8
+ } else if size_of::<ItemT>() <= 1024 {
+ 4
+ } else {
+ 1
+ };
+
pub const fn new() -> Self
{
Self {
@@ -48,22 +64,30 @@ where
ptr: NonNull::dangling(),
field_arr_byte_offsets: Vec::new(),
length: 0,
+ capacity: 0,
layout: None,
}
}
pub fn push(&mut self, item: ItemT)
{
- if self.length != 0 {
- self.reallocate_for_more(1);
+ if self.capacity != 0 {
+ if self.capacity == self.length {
+ self.grow_amortized(1);
+ }
self.write_item(self.length - 1, item);
+
+ self.length += 1;
+
return;
}
self.do_first_alloc();
self.write_item(0, item);
+
+ self.length = 1;
}
pub fn get<FieldSel>(
@@ -91,13 +115,18 @@ where
unsafe { field_ptr.cast().as_ref() }
}
- fn reallocate_for_more(&mut self, item_cnt_inc: usize)
+ fn grow_amortized(&mut self, additional: usize)
{
- let new_length = self.length + item_cnt_inc;
+ let required_cap = self.capacity.checked_add(additional).unwrap();
+
+ // This guarantees exponential growth. The doubling cannot overflow
+ // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+ let new_capacity = max(self.capacity * 2, required_cap);
+ let new_capacity = max(Self::MIN_NON_ZERO_CAP, new_capacity);
let layout = &self.layout.unwrap();
- let (new_layout, new_field_arr_byte_offsets) = Self::create_layout(new_length);
+ let (new_layout, new_field_arr_byte_offsets) = Self::create_layout(new_capacity);
let Some(new_ptr) = NonNull::new(unsafe {
realloc(self.ptr.as_ptr(), *layout, new_layout.size())
@@ -121,14 +150,14 @@ where
std::ptr::copy(
old_field_arr_ptr.as_ptr(),
new_field_arr_ptr.as_ptr(),
- field_metadata.size * self.length,
+ field_metadata.size * self.capacity,
);
}
}
self.ptr = new_ptr;
self.layout = Some(new_layout);
- self.length = new_length;
+ self.capacity = new_capacity;
self.field_arr_byte_offsets = new_field_arr_byte_offsets;
}
@@ -141,7 +170,7 @@ where
};
self.ptr = ptr;
- self.length = 1;
+ self.capacity = 1;
self.field_arr_byte_offsets = field_arr_byte_offsets;
self.layout = Some(layout);
}