@@ -34,7 +34,7 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
34
34
if self . data . len ( ) != self . data . capacity ( ) {
35
35
self . data . push ( value) ?;
36
36
} else {
37
- self . insert ( ( self . front + self . len ) % self . data . capacity ( ) , value) ?;
37
+ self . insert ( self . len - 1 , value) ?;
38
38
}
39
39
40
40
self . len += 1 ;
@@ -66,8 +66,9 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
66
66
if index >= self . len ( ) {
67
67
return Err ( KernelError :: InvalidAddress ) ;
68
68
}
69
+ let real_idx = ( self . front + index) % self . data . capacity ( ) ;
69
70
self . data
70
- . at_mut ( ( self . front + index ) % self . data . capacity ( ) )
71
+ . at_mut ( real_idx )
71
72
. map ( |insertion_point| * insertion_point = value)
72
73
. ok_or ( KernelError :: InvalidAddress )
73
74
}
@@ -102,7 +103,7 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
102
103
}
103
104
104
105
pub fn grow_capacity ( & mut self , new_size : usize ) -> Result < ( ) , KernelError > {
105
- if new_size < self . data . capacity ( ) {
106
+ if new_size <= self . data . capacity ( ) {
106
107
return Ok ( ( ) ) ;
107
108
}
108
109
// if the queue wraps
@@ -111,8 +112,11 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
111
112
// When the queue wraps around the end, the wrapping would not happen anymore with the new size
112
113
113
114
// we could do some complicated in-place swapping here instead of using a potentially expensive temporary storage
114
- let mut swap_helper = Box :: new_slice_uninit ( self . data . capacity ( ) - self . front ) ?;
115
+ let non_wrapping_queue_start_len = self . data . capacity ( ) - self . front ;
116
+ let mut swap_helper = Box :: new_slice_uninit ( non_wrapping_queue_start_len) ?;
117
+ BUG_ON ! ( swap_helper. len( ) != non_wrapping_queue_start_len) ;
115
118
119
+ // we take the start of the queue (which is located at the end of the curr memory region) and copy it to temp storage
116
120
for i in 0 ..swap_helper. len ( ) {
117
121
// Returning an error here should never happen if the queue is in a consistant state prior. If not no guarantees about contents are made.
118
122
swap_helper[ i] . write (
@@ -122,21 +126,23 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
122
126
. ok_or ( KernelError :: InvalidAddress ) ?,
123
127
) ;
124
128
}
129
+ // One past the logically last element of the queue
125
130
let end = ( self . front + self . len ) % self . data . capacity ( ) ;
131
+ // now move the logical end of the queue further back to make space for the logical start
126
132
for i in 0 ..end {
127
- BUG_ON ! ( i + swap_helper . len ( ) >= self . data. capacity( ) ) ;
128
- self . data . swap ( i, i + swap_helper . len ( ) ) ;
133
+ BUG_ON ! ( i + non_wrapping_queue_start_len >= self . data. capacity( ) ) ;
134
+ self . data . swap ( i, i + non_wrapping_queue_start_len ) ;
129
135
}
130
136
// now copy the data back from the temp helper
131
- for i in 0 ..swap_helper . len ( ) {
132
- // Safety: values copied into our helper are part of the active queue, must therefore be initedF
137
+ for i in 0 ..non_wrapping_queue_start_len {
138
+ // Safety: values copied into our helper are part of the active queue, must therefore be inited
133
139
self . data
134
140
. at_mut ( i)
135
141
. map ( |el| * el = unsafe { swap_helper[ i] . assume_init ( ) } ) ;
136
142
}
137
143
self . front = 0 ;
138
144
}
139
- self . data . reserve ( new_size - self . data . capacity ( ) )
145
+ self . data . reserve_total_capacity ( new_size)
140
146
}
141
147
}
142
148
@@ -145,16 +151,68 @@ impl<T: Clone + Copy, const N: usize> Queue<T, N> {
145
151
#[ cfg( test) ]
146
152
mod tests {
147
153
use super :: * ;
154
+ use crate :: mem:: GLOBAL_ALLOCATOR ;
155
+ use core:: ops:: Range ;
156
+
157
+ fn alloc_range ( length : usize ) -> Range < usize > {
158
+ let alloc_range = std:: alloc:: Layout :: from_size_align ( length, align_of :: < u128 > ( ) ) . unwrap ( ) ;
159
+ let ptr = unsafe { std:: alloc:: alloc ( alloc_range) } ;
160
+ ptr as usize ..ptr as usize + length
161
+ }
162
+
163
+ fn setup_memory ( mem_size : usize ) {
164
+ unsafe {
165
+ GLOBAL_ALLOCATOR
166
+ . lock ( )
167
+ . add_range ( alloc_range ( mem_size) )
168
+ . unwrap ( )
169
+ } ;
170
+ }
148
171
149
172
#[ test]
150
173
fn growing_retains_queue_state_without_wrapping ( ) {
174
+ setup_memory ( 1000 ) ;
151
175
let mut queue = Queue :: < usize , 10 > :: new ( ) ;
152
176
for i in 0 ..10 {
153
- queue. push_back ( i) . unwrap ( ) ;
177
+ assert_eq ! ( queue. push_back( i) , Ok ( ( ) ) ) ;
154
178
}
155
- queue. grow_capacity ( 20 ) . unwrap ( ) ;
179
+
180
+ assert_eq ! ( queue. grow_capacity( 20 ) , Ok ( ( ) ) ) ;
156
181
for i in 0 ..10 {
157
- assert_eq ! ( queue. pop_front( ) . unwrap( ) , i) ;
182
+ assert_eq ! ( queue. pop_front( ) , Some ( i) ) ;
183
+ }
184
+ }
185
+
186
+ #[ test]
187
+ fn growing_retains_queue_state_with_wrapping ( ) {
188
+ setup_memory ( 1000 ) ;
189
+ let mut queue = Queue :: < usize , 10 > :: new ( ) ;
190
+ for i in 0 ..10 {
191
+ queue. push_back ( i) . unwrap ( ) ;
192
+ }
193
+ // sanity check that queue really is full
194
+ assert_eq ! ( queue. push_back( 1 ) , Err ( KernelError :: OutOfMemory ) ) ;
195
+ assert_eq ! ( queue. len( ) , 10 ) ;
196
+
197
+ // pop and subsequently push more elements to make queue wrap
198
+ for i in 0 ..5 {
199
+ assert_eq ! ( queue. pop_front( ) , Some ( i) ) ;
200
+ }
201
+
202
+ assert_eq ! ( * queue. front( ) . unwrap( ) , 5 ) ;
203
+ assert_eq ! ( * queue. back( ) . unwrap( ) , 9 ) ;
204
+ assert_eq ! ( queue. len( ) , 5 ) ;
205
+
206
+ for i in 10 ..15 {
207
+ assert_eq ! ( queue. push_back( i) , Ok ( ( ) ) ) ;
208
+ }
209
+
210
+ assert_eq ! ( queue. len( ) , 10 ) ;
211
+ assert_eq ! ( * queue. front( ) . unwrap( ) , 5 ) ;
212
+ assert_eq ! ( * queue. back( ) . unwrap( ) , 14 ) ;
213
+ assert_eq ! ( queue. grow_capacity( 20 ) , Ok ( ( ) ) ) ;
214
+ for i in 5 ..15 {
215
+ assert_eq ! ( queue. pop_front( ) , Some ( i) ) ;
158
216
}
159
217
}
160
218
}
0 commit comments