|
1 | | -use core::alloc::{AllocError, Allocator, Layout, LayoutError}; |
| 1 | +use core::alloc::{AllocError, Allocator, Layout}; |
2 | 2 | use core::ptr; |
3 | 3 | use core::ptr::NonNull; |
4 | 4 |
|
@@ -252,73 +252,74 @@ unsafe impl<S: PageSize> FrameAllocator<S> for LockedPhysicalMemoryAllocator<'_> |
252 | 252 | } |
253 | 253 | } |
254 | 254 |
|
255 | | -/// Error type used in `allocate_zeroed_buffer`. |
256 | | -#[derive(Debug, Clone)] |
257 | | -pub(crate) enum AllocZeroedBufferError { |
258 | | - LayoutError(LayoutError), |
259 | | - AllocError(AllocError), |
260 | | -} |
261 | | - |
262 | 255 | /// Physically contiguous buffer of memory. Allocates by page, so it can |
263 | 256 | /// allocate more memory than requested. Useful for e.g. Direct Memory Access |
264 | 257 | /// (DMA) like with VirtIO buffers. |
| 258 | +/// |
| 259 | +/// NOTE: This type implements `Drop` and will free the allocated memory when |
| 260 | +/// it goes out of scope. |
265 | 261 | #[derive(Debug)] |
266 | 262 | pub(crate) struct PhysicalBuffer { |
267 | | - ptr: NonNull<[u8]>, |
| 263 | + start_page: usize, |
| 264 | + num_pages: usize, |
268 | 265 | } |
269 | 266 |
|
270 | 267 | impl PhysicalBuffer { |
271 | | - pub(crate) fn allocate( |
272 | | - len_bytes: usize, |
273 | | - alignment: usize, |
274 | | - ) -> Result<Self, AllocZeroedBufferError> { |
275 | | - let layout = Layout::from_size_align(len_bytes, alignment) |
276 | | - .map_err(AllocZeroedBufferError::LayoutError)?; |
277 | | - let ptr = KERNEL_PHYSICAL_ALLOCATOR |
278 | | - .allocate_zeroed(layout) |
279 | | - .map_err(AllocZeroedBufferError::AllocError)?; |
280 | | - Ok(Self { ptr }) |
| 268 | + // Don't need to expose this b/c allocate_zeroed is safer. |
| 269 | + fn allocate(min_bytes: usize) -> Result<Self, AllocError> { |
| 270 | + let num_pages = min_bytes.div_ceil(PhysicalMemoryAllocator::PAGE_SIZE); |
| 271 | + let start_page = KERNEL_PHYSICAL_ALLOCATOR.with_lock(|allocator| { |
| 272 | + allocator |
| 273 | + .allocator |
| 274 | + .allocate_contiguous(num_pages) |
| 275 | + .ok_or(AllocError) |
| 276 | + })?; |
| 277 | + Ok(Self { |
| 278 | + start_page, |
| 279 | + num_pages, |
| 280 | + }) |
281 | 281 | } |
282 | 282 |
|
283 | | - pub(crate) fn allocate_value<T>(val: T) -> Result<Self, AllocZeroedBufferError> { |
284 | | - let layout = Layout::new::<T>(); |
285 | | - let ptr = KERNEL_PHYSICAL_ALLOCATOR |
286 | | - .allocate_zeroed(layout) |
287 | | - .map_err(AllocZeroedBufferError::AllocError)?; |
| 283 | + pub(crate) fn allocate_zeroed(min_bytes: usize) -> Result<Self, AllocError> { |
| 284 | + let buffer = Self::allocate(min_bytes)?; |
| 285 | + let ptr = buffer.address().as_u64() as *mut u8; |
288 | 286 | unsafe { |
289 | | - ptr::write_volatile(ptr.as_ptr().cast::<T>(), val); |
| 287 | + ptr::write_bytes(ptr, 0, buffer.len_bytes()); |
290 | 288 | } |
291 | | - Ok(Self { ptr }) |
| 289 | + Ok(buffer) |
292 | 290 | } |
293 | 291 |
|
294 | | - /// Consumes the buffer and returns the underlying physical address and |
295 | | - /// length in bytes. NOTE: It is up to the caller to free this memory, |
296 | | - /// ideally by constructing a new buffer with |
297 | | - /// `PhysicalBuffer::from_raw_parts` and letting that `Drop`. |
298 | | - pub(crate) fn into_raw_parts(self) -> (PhysAddr, usize) { |
299 | | - let buf = core::mem::ManuallyDrop::new(self); |
300 | | - let addr = PhysAddr::new(buf.ptr.addr().get() as u64); |
301 | | - let len_bytes = buf.ptr.len(); |
302 | | - (addr, len_bytes) |
| 292 | + pub(crate) fn address(&self) -> PhysAddr { |
| 293 | + PhysAddr::new(self.start_page as u64 * PhysicalMemoryAllocator::PAGE_SIZE as u64) |
303 | 294 | } |
304 | 295 |
|
305 | | - pub(crate) unsafe fn from_raw_parts(addr: PhysAddr, len_bytes: usize) -> Self { |
306 | | - let ptr = unsafe { nonnull_ptr_slice_from_addr_len(addr.as_u64() as usize, len_bytes) }; |
307 | | - Self { ptr } |
| 296 | + pub(crate) fn len_bytes(&self) -> usize { |
| 297 | + self.num_pages * PhysicalMemoryAllocator::PAGE_SIZE |
| 298 | + } |
| 299 | + |
| 300 | + pub(crate) unsafe fn write_offset<T>(&mut self, offset: usize, val: T) { |
| 301 | + let buffer_len = self.len_bytes(); |
| 302 | + assert!( |
| 303 | + offset < self.len_bytes(), |
| 304 | + "tried to write value at offset {offset} but buffer only has {buffer_len} bytes" |
| 305 | + ); |
| 306 | + let ptr = (self.address().as_u64() + offset as u64) as *mut T; |
| 307 | + ptr::write_volatile(ptr, val); |
| 308 | + } |
| 309 | + |
| 310 | + // TODO: Don't allow leaking. We are only doing this temporarily. |
| 311 | + pub(crate) fn leak(self) -> PhysAddr { |
| 312 | + let buf = core::mem::ManuallyDrop::new(self); |
| 313 | + buf.address() |
308 | 314 | } |
309 | 315 | } |
310 | 316 |
|
311 | 317 | impl Drop for PhysicalBuffer { |
312 | 318 | fn drop(&mut self) { |
313 | | - // TODO: Is this correct? DRY with the `deallocate`, and perhaps add |
314 | | - // some types to ensure that we are converting to pages correctly. Also |
315 | | - // ensure that we do indeed "own" the entire page we are de-allocating. |
316 | | - let layout = unsafe { |
317 | | - Layout::from_size_align_unchecked(self.ptr.len(), PhysicalMemoryAllocator::PAGE_SIZE) |
318 | | - }; |
319 | | - let u8_ptr = self.ptr.cast::<u8>(); |
320 | | - unsafe { |
321 | | - KERNEL_PHYSICAL_ALLOCATOR.deallocate(u8_ptr, layout); |
322 | | - }; |
| 319 | + KERNEL_PHYSICAL_ALLOCATOR.with_lock(|allocator| { |
| 320 | + allocator |
| 321 | + .allocator |
| 322 | + .free_contiguous(self.start_page, self.num_pages); |
| 323 | + }); |
323 | 324 | } |
324 | 325 | } |
0 commit comments