Skip to content

Commit 9f27958

Browse files
committed
mem: Fix some bugs in growable queue
1 parent 234784f commit 9f27958

File tree

2 files changed

+92
-12
lines changed

2 files changed

+92
-12
lines changed

Diff for: kernel/src/mem/array.rs

+22
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,28 @@ impl<T: Clone + Copy, const N: usize> Vec<T, N> {
170170
Ok(())
171171
}
172172

173+
pub fn reserve_total_capacity(&mut self, total_capacity: usize) -> Result<(), KernelError> {
174+
// Check if we already have enough space
175+
if self.capacity() >= total_capacity {
176+
return Ok(());
177+
}
178+
179+
// If we don't have enough space, we need to grow the extra storage.
180+
let new_out_of_line_cap = total_capacity - N;
181+
let mut new_extra = Box::new_slice_uninit(new_out_of_line_cap)?;
182+
183+
// Check that the new extra storage has the requested length.
184+
BUG_ON!(new_extra.len() != new_out_of_line_cap);
185+
186+
let curr_out_of_line_size = self.extra.len();
187+
// Copy the old extra storage into the new one.
188+
new_extra[..curr_out_of_line_size].copy_from_slice(&self.extra);
189+
190+
// Replace the old extra storage with the new one. The old one will be dropped.
191+
self.extra = new_extra;
192+
Ok(())
193+
}
194+
173195
/// Create a new Vec with the given length and value.
174196
///
175197
/// `length` - The length of the Vec.

Diff for: kernel/src/mem/queue.rs

+70-12
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
3434
if self.data.len() != self.data.capacity() {
3535
self.data.push(value)?;
3636
} else {
37-
self.insert((self.front + self.len) % self.data.capacity(), value)?;
37+
self.insert(self.len - 1, value)?;
3838
}
3939

4040
self.len += 1;
@@ -66,8 +66,9 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
6666
if index >= self.len() {
6767
return Err(KernelError::InvalidAddress);
6868
}
69+
let real_idx = (self.front + index) % self.data.capacity();
6970
self.data
70-
.at_mut((self.front + index) % self.data.capacity())
71+
.at_mut(real_idx)
7172
.map(|insertion_point| *insertion_point = value)
7273
.ok_or(KernelError::InvalidAddress)
7374
}
@@ -102,7 +103,7 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
102103
}
103104

104105
pub fn grow_capacity(&mut self, new_size: usize) -> Result<(), KernelError> {
105-
if new_size < self.data.capacity() {
106+
if new_size <= self.data.capacity() {
106107
return Ok(());
107108
}
108109
// if the queue wraps
@@ -111,8 +112,11 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
111112
// When the queue wraps around the end, the wrapping would not happen anymore with the new size
112113

113114
// we could do some complicated in-place swapping here instead of using a potentially expensive temporary storage
114-
let mut swap_helper = Box::new_slice_uninit(self.data.capacity() - self.front)?;
115+
let non_wrapping_queue_start_len = self.data.capacity() - self.front;
116+
let mut swap_helper = Box::new_slice_uninit(non_wrapping_queue_start_len)?;
117+
BUG_ON!(swap_helper.len() != non_wrapping_queue_start_len);
115118

119+
// we take the start of the queue (which is located at the end of the curr memory region) and copy it to temp storage
116120
for i in 0..swap_helper.len() {
117121
// Returning an error here should never happen if the queue is in a consistant state prior. If not no guarantees about contents are made.
118122
swap_helper[i].write(
@@ -122,21 +126,23 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
122126
.ok_or(KernelError::InvalidAddress)?,
123127
);
124128
}
129+
// One past the logically last element of the queue
125130
let end = (self.front + self.len) % self.data.capacity();
131+
// now move the logical end of the queue further back to make space for the logical start
126132
for i in 0..end {
127-
BUG_ON!(i + swap_helper.len() >= self.data.capacity());
128-
self.data.swap(i, i + swap_helper.len());
133+
BUG_ON!(i + non_wrapping_queue_start_len >= self.data.capacity());
134+
self.data.swap(i, i + non_wrapping_queue_start_len);
129135
}
130136
// now copy the data back from the temp helper
131-
for i in 0..swap_helper.len() {
132-
// Safety: values copied into our helper are part of the active queue, must therefore be initedF
137+
for i in 0..non_wrapping_queue_start_len {
138+
// Safety: values copied into our helper are part of the active queue, must therefore be inited
133139
self.data
134140
.at_mut(i)
135141
.map(|el| *el = unsafe { swap_helper[i].assume_init() });
136142
}
137143
self.front = 0;
138144
}
139-
self.data.reserve(new_size - self.data.capacity())
145+
self.data.reserve_total_capacity(new_size)
140146
}
141147
}
142148

@@ -145,16 +151,68 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
145151
#[cfg(test)]
146152
mod tests {
147153
use super::*;
154+
use crate::mem::GLOBAL_ALLOCATOR;
155+
use core::ops::Range;
156+
157+
fn alloc_range(length: usize) -> Range<usize> {
158+
let alloc_range = std::alloc::Layout::from_size_align(length, align_of::<u128>()).unwrap();
159+
let ptr = unsafe { std::alloc::alloc(alloc_range) };
160+
ptr as usize..ptr as usize + length
161+
}
162+
163+
fn setup_memory(mem_size: usize) {
164+
unsafe {
165+
GLOBAL_ALLOCATOR
166+
.lock()
167+
.add_range(alloc_range(mem_size))
168+
.unwrap()
169+
};
170+
}
148171

149172
#[test]
150173
fn growing_retains_queue_state_without_wrapping() {
174+
setup_memory(1000);
151175
let mut queue = Queue::<usize, 10>::new();
152176
for i in 0..10 {
153-
queue.push_back(i).unwrap();
177+
assert_eq!(queue.push_back(i), Ok(()));
154178
}
155-
queue.grow_capacity(20).unwrap();
179+
180+
assert_eq!(queue.grow_capacity(20), Ok(()));
156181
for i in 0..10 {
157-
assert_eq!(queue.pop_front().unwrap(), i);
182+
assert_eq!(queue.pop_front(), Some(i));
183+
}
184+
}
185+
186+
#[test]
187+
fn growing_retains_queue_state_with_wrapping() {
188+
setup_memory(1000);
189+
let mut queue = Queue::<usize, 10>::new();
190+
for i in 0..10 {
191+
queue.push_back(i).unwrap();
192+
}
193+
// sanity check that queue really is full
194+
assert_eq!(queue.push_back(1), Err(KernelError::OutOfMemory));
195+
assert_eq!(queue.len(), 10);
196+
197+
// pop and subsequently push more elements to make queue wrap
198+
for i in 0..5 {
199+
assert_eq!(queue.pop_front(), Some(i));
200+
}
201+
202+
assert_eq!(*queue.front().unwrap(), 5);
203+
assert_eq!(*queue.back().unwrap(), 9);
204+
assert_eq!(queue.len(), 5);
205+
206+
for i in 10..15 {
207+
assert_eq!(queue.push_back(i), Ok(()));
208+
}
209+
210+
assert_eq!(queue.len(), 10);
211+
assert_eq!(*queue.front().unwrap(), 5);
212+
assert_eq!(*queue.back().unwrap(), 14);
213+
assert_eq!(queue.grow_capacity(20), Ok(()));
214+
for i in 5..15 {
215+
assert_eq!(queue.pop_front(), Some(i));
158216
}
159217
}
160218
}

0 commit comments

Comments
 (0)