66
77#![ allow( clippy:: needless_range_loop) ]
88
9+ use core:: {
10+ alloc:: { AllocError , Allocator , Layout } ,
11+ ptr:: NonNull ,
12+ } ;
13+
914const OOM_MSG : & str = "requires more memory space to initialize BuddyAlloc" ;
1015const LEAF_ALIGN_ERROR_MSG : & str = "leaf size must be aligned to 16 bytes" ;
1116/// required to align to 16 bytes, since Node takes 16 bytes on 64-bits machine.
@@ -299,11 +304,56 @@ impl BuddyAlloc {
299304 self . unavailable = end_addr - base_addr;
300305 }
301306
302- pub fn malloc ( & mut self , nbytes : usize ) -> * mut u8 {
307+ /// available bytes
308+ pub fn available_bytes ( & self ) -> usize {
309+ self . end_addr - self . unavailable - self . base_addr
310+ }
311+
312+ fn entry ( & self , i : usize ) -> & Entry {
313+ debug_assert ! ( i < self . entries_size, "index out of range" ) ;
314+ unsafe { self . entries . add ( i) . as_ref ( ) . expect ( "entry" ) }
315+ }
316+
317+ /// find k for p
318+ fn find_k_for_p ( & self , p : * const u8 ) -> usize {
319+ for k in 0 ..( self . entries_size - 1 ) {
320+ if bit_isset ( self . entry ( k + 1 ) . split , self . block_index ( k + 1 , p) ) {
321+ debug_assert ! ( bit_isset( self . entry( k) . alloc, self . block_index( k, p) ) ) ;
322+ return k;
323+ }
324+ }
325+ 0
326+ }
327+
328+ /// block index of p under k
329+ fn block_index ( & self , k : usize , p : * const u8 ) -> usize {
330+ if ( p as usize ) < self . base_addr {
331+ // TODO handle this outside
332+ panic ! ( "out of memory" ) ;
333+ }
334+ let n = p as usize - self . base_addr ;
335+ // equal to: n / block_size_2base(k, self.leaf2base);
336+ let index = ( n >> k) >> self . leaf2base ;
337+ debug_assert ! ( index < nblock( k, self . entries_size) ) ;
338+ index
339+ }
340+
341+ /// block addr of index under k
342+ fn block_addr ( & self , k : usize , i : usize ) -> usize {
343+ // equal to: i * block_size_2base(k, self.leaf2base);
344+ let n = ( i << k) << self . leaf2base ;
345+ self . base_addr + n
346+ }
347+ }
348+
349+ unsafe impl Allocator for BuddyAlloc {
350+ fn allocate ( & self , layout : Layout ) -> Result < NonNull < [ u8 ] > , AllocError > {
351+ let nbytes = layout. size ( ) ;
352+ // TODO: alignment!
303353 let fk = first_up_k ( nbytes, 1 << self . leaf2base ) ;
304354 let mut k = match ( fk..self . entries_size ) . find ( |& k| !Node :: is_empty ( self . entry ( k) . free ) ) {
305355 Some ( k) => k,
306- None => return core :: ptr :: null_mut ( ) ,
356+ None => return Err ( AllocError ) ,
307357 } ;
308358 let p: * mut u8 = Node :: pop ( self . entry ( k) . free ) as * mut u8 ;
309359 bit_set ( self . entry ( k) . alloc , self . block_index ( k, p) ) ;
@@ -321,10 +371,15 @@ impl BuddyAlloc {
321371 p as usize ,
322372 "misalignment"
323373 ) ;
324- p
374+
375+ Ok ( NonNull :: slice_from_raw_parts (
376+ unsafe { NonNull :: new_unchecked ( p) } ,
377+ layout. size ( ) ,
378+ ) )
325379 }
326380
327- pub fn free ( & mut self , mut p : * mut u8 ) {
381+ unsafe fn deallocate ( & self , ptr : NonNull < u8 > , _layout : Layout ) {
382+ let mut p = ptr. as_ptr ( ) ;
328383 let mut k = self . find_k_for_p ( p) ;
329384 while k < ( self . entries_size - 1 ) {
330385 let block_index = self . block_index ( k, p) ;
@@ -355,45 +410,4 @@ impl BuddyAlloc {
355410 debug_assert ! ( !bit_isset( self . entry( k) . alloc, self . block_index( k, p) ) ) ;
356411 Node :: push ( self . entry ( k) . free , p) ;
357412 }
358-
359- /// available bytes
360- pub fn available_bytes ( & self ) -> usize {
361- self . end_addr - self . unavailable - self . base_addr
362- }
363-
364- fn entry ( & self , i : usize ) -> & Entry {
365- debug_assert ! ( i < self . entries_size, "index out of range" ) ;
366- unsafe { self . entries . add ( i) . as_ref ( ) . expect ( "entry" ) }
367- }
368-
369- /// find k for p
370- fn find_k_for_p ( & self , p : * const u8 ) -> usize {
371- for k in 0 ..( self . entries_size - 1 ) {
372- if bit_isset ( self . entry ( k + 1 ) . split , self . block_index ( k + 1 , p) ) {
373- debug_assert ! ( bit_isset( self . entry( k) . alloc, self . block_index( k, p) ) ) ;
374- return k;
375- }
376- }
377- 0
378- }
379-
380- /// block index of p under k
381- fn block_index ( & self , k : usize , p : * const u8 ) -> usize {
382- if ( p as usize ) < self . base_addr {
383- // TODO handle this outside
384- panic ! ( "out of memory" ) ;
385- }
386- let n = p as usize - self . base_addr ;
387- // equal to: n / block_size_2base(k, self.leaf2base);
388- let index = ( n >> k) >> self . leaf2base ;
389- debug_assert ! ( index < nblock( k, self . entries_size) ) ;
390- index
391- }
392-
393- /// block addr of index under k
394- fn block_addr ( & self , k : usize , i : usize ) -> usize {
395- // equal to: i * block_size_2base(k, self.leaf2base);
396- let n = ( i << k) << self . leaf2base ;
397- self . base_addr + n
398- }
399413}
0 commit comments