35
35
extern crate alloc;
36
36
extern crate rustc_data_structures;
37
37
38
- use rustc_data_structures:: sync:: MTLock ;
38
+ use rustc_data_structures:: defer_deallocs:: DeferDeallocs ;
39
+ use rustc_data_structures:: sync:: { MTLock , WorkerLocal } ;
39
40
40
41
use std:: cell:: { Cell , RefCell } ;
41
42
use std:: cmp;
@@ -44,7 +45,6 @@ use std::marker::{PhantomData, Send};
44
45
use std:: mem;
45
46
use std:: ptr;
46
47
use std:: slice;
47
-
48
48
use alloc:: raw_vec:: RawVec ;
49
49
50
50
/// An arena that can hold objects of only one type.
@@ -132,30 +132,54 @@ impl<T> TypedArena<T> {
132
132
/// Allocates an object in the `TypedArena`, returning a reference to it.
133
133
#[ inline]
134
134
pub fn alloc ( & self , object : T ) -> & mut T {
135
- if self . ptr == self . end {
136
- self . grow ( 1 )
137
- }
135
+ // Zero sized path
136
+ if mem:: size_of :: < T > ( ) == 0 {
137
+ if self . ptr == self . end {
138
+ self . grow ( 1 )
139
+ }
138
140
139
- unsafe {
140
- if mem:: size_of :: < T > ( ) == 0 {
141
+ unsafe {
141
142
self . ptr
142
143
. set ( intrinsics:: arith_offset ( self . ptr . get ( ) as * mut u8 , 1 )
143
144
as * mut T ) ;
144
145
let ptr = mem:: align_of :: < T > ( ) as * mut T ;
145
146
// Don't drop the object. This `write` is equivalent to `forget`.
146
147
ptr:: write ( ptr, object) ;
147
- & mut * ptr
148
+ return & mut * ptr;
149
+ }
150
+ }
151
+
152
+ let ptr = self . ptr . get ( ) ;
153
+
154
+ unsafe {
155
+ if std:: intrinsics:: unlikely ( ptr == self . end . get ( ) ) {
156
+ self . grow_and_alloc ( object)
148
157
} else {
149
- let ptr = self . ptr . get ( ) ;
150
- // Advance the pointer.
151
- self . ptr . set ( self . ptr . get ( ) . offset ( 1 ) ) ;
152
- // Write into uninitialized memory.
153
- ptr:: write ( ptr, object) ;
154
- & mut * ptr
158
+ self . alloc_unchecked ( ptr, object)
155
159
}
156
160
}
157
161
}
158
162
163
+ #[ inline( always) ]
164
+ unsafe fn alloc_unchecked ( & self , ptr : * mut T , object : T ) -> & mut T {
165
+ // Advance the pointer.
166
+ self . ptr . set ( ptr. offset ( 1 ) ) ;
167
+ // Write into uninitialized memory.
168
+ ptr:: write ( ptr, object) ;
169
+ & mut * ptr
170
+ }
171
+
172
+ #[ inline( never) ]
173
+ #[ cold]
174
+ fn grow_and_alloc ( & self , object : T ) -> & mut T {
175
+ // We move the object in this function so if it has a destructor
176
+ // the fast path need not have an unwind handler to destroy it
177
+ self . grow ( 1 ) ;
178
+ unsafe {
179
+ self . alloc_unchecked ( self . ptr . get ( ) , object)
180
+ }
181
+ }
182
+
159
183
/// Allocates a slice of objects that are copied into the `TypedArena`, returning a mutable
160
184
/// reference to it. Will panic if passed a zero-sized types.
161
185
///
@@ -174,7 +198,7 @@ impl<T> TypedArena<T> {
174
198
let available_capacity_bytes = self . end . get ( ) as usize - self . ptr . get ( ) as usize ;
175
199
let at_least_bytes = slice. len ( ) * mem:: size_of :: < T > ( ) ;
176
200
if available_capacity_bytes < at_least_bytes {
177
- self . grow ( slice. len ( ) ) ;
201
+ self . grow_slice ( slice. len ( ) ) ;
178
202
}
179
203
180
204
unsafe {
@@ -186,9 +210,14 @@ impl<T> TypedArena<T> {
186
210
}
187
211
}
188
212
189
- /// Grows the arena.
190
213
#[ inline( never) ]
191
214
#[ cold]
215
+ fn grow_slice ( & self , n : usize ) {
216
+ self . grow ( n)
217
+ }
218
+
219
+ /// Grows the arena.
220
+ #[ inline( always) ]
192
221
fn grow ( & self , n : usize ) {
193
222
unsafe {
194
223
let mut chunks = self . chunks . borrow_mut ( ) ;
@@ -283,6 +312,22 @@ unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
283
312
284
313
unsafe impl < T : Send > Send for TypedArena < T > { }
285
314
315
+ type BackingType = usize ;
316
+ const BLOCK_SIZE : usize = std:: mem:: size_of :: < BackingType > ( ) ;
317
+
318
+ #[ inline( always) ]
319
+ fn required_backing_types ( bytes : usize ) -> usize {
320
+ assert ! ( BLOCK_SIZE . is_power_of_two( ) ) ;
321
+ // FIXME: This addition could overflow
322
+ ( bytes + BLOCK_SIZE - 1 ) / BLOCK_SIZE
323
+ }
324
+
325
+ #[ inline( always) ]
326
+ fn align ( val : usize , align : usize ) -> usize {
327
+ assert ! ( align. is_power_of_two( ) ) ;
328
+ ( val + align - 1 ) & !( align - 1 )
329
+ }
330
+
286
331
pub struct DroplessArena {
287
332
/// A pointer to the next object to be allocated.
288
333
ptr : Cell < * mut u8 > ,
@@ -292,7 +337,42 @@ pub struct DroplessArena {
292
337
end : Cell < * mut u8 > ,
293
338
294
339
/// A vector of arena chunks.
295
- chunks : RefCell < Vec < TypedArenaChunk < u8 > > > ,
340
+ chunks : RefCell < Vec < TypedArenaChunk < BackingType > > > ,
341
+ }
342
+
343
+ #[ no_mangle]
344
+ pub fn tatest1 ( a : & TypedArena < usize > ) -> & usize {
345
+ a. alloc ( 64usize )
346
+ }
347
+
348
+ #[ no_mangle]
349
+ pub fn atest1 ( a : & DroplessArena ) -> & usize {
350
+ a. alloc ( 64usize )
351
+ }
352
+
353
+ #[ no_mangle]
354
+ pub fn atest2 ( a : & SyncDroplessArena , b : Box < usize > ) -> & Box < usize > {
355
+ a. promote ( b)
356
+ }
357
+
358
+ #[ no_mangle]
359
+ pub fn atest6 ( a : & SyncDroplessArena , b : usize ) -> & usize {
360
+ a. promote ( b)
361
+ }
362
+
363
+ #[ no_mangle]
364
+ pub fn atest3 ( a : & DroplessArena ) {
365
+ a. align ( 8 ) ;
366
+ }
367
+
368
+ #[ no_mangle]
369
+ pub fn atest4 ( a : & DroplessArena ) {
370
+ a. align ( 16 ) ;
371
+ }
372
+
373
+ #[ no_mangle]
374
+ pub fn atest5 ( a : & DroplessArena ) {
375
+ a. align ( 4 ) ;
296
376
}
297
377
298
378
unsafe impl Send for DroplessArena { }
@@ -310,7 +390,7 @@ impl Default for DroplessArena {
310
390
311
391
impl DroplessArena {
312
392
pub fn in_arena < T : ?Sized > ( & self , ptr : * const T ) -> bool {
313
- let ptr = ptr as * const u8 as * mut u8 ;
393
+ let ptr = ptr as * const u8 as * mut BackingType ;
314
394
for chunk in & * self . chunks . borrow ( ) {
315
395
if chunk. start ( ) <= ptr && ptr < chunk. end ( ) {
316
396
return true ;
@@ -322,62 +402,93 @@ impl DroplessArena {
322
402
323
403
#[ inline]
324
404
fn align ( & self , align : usize ) {
405
+ // FIXME: The addition of `align` could overflow, in which case final_address
406
+ // will be 0. Do we have any guarantee that our chunk won't end up as the final
407
+ // bytes in our memory space?
325
408
let final_address = ( ( self . ptr . get ( ) as usize ) + align - 1 ) & !( align - 1 ) ;
326
409
self . ptr . set ( final_address as * mut u8 ) ;
327
- assert ! ( self . ptr <= self . end) ;
410
+
411
+ // Aligning to the block_size cannot go outside our current chuck, just to its end
412
+ if align > BLOCK_SIZE {
413
+ // For larger alignments we have to check that we didn't go out of bounds
414
+ assert ! ( self . ptr <= self . end) ;
415
+ }
328
416
}
329
417
330
- #[ inline( never) ]
331
- #[ cold]
332
418
fn grow ( & self , needed_bytes : usize ) {
333
419
unsafe {
420
+ let needed_vals = required_backing_types ( needed_bytes) ;
334
421
let mut chunks = self . chunks . borrow_mut ( ) ;
335
422
let ( chunk, mut new_capacity) ;
336
423
if let Some ( last_chunk) = chunks. last_mut ( ) {
337
424
let used_bytes = self . ptr . get ( ) as usize - last_chunk. start ( ) as usize ;
425
+ let used_vals = required_backing_types ( used_bytes) ;
338
426
if last_chunk
339
427
. storage
340
- . reserve_in_place ( used_bytes , needed_bytes )
428
+ . reserve_in_place ( used_vals , needed_vals )
341
429
{
342
- self . end . set ( last_chunk. end ( ) ) ;
430
+ self . end . set ( last_chunk. end ( ) as * mut u8 ) ;
343
431
return ;
344
432
} else {
345
433
new_capacity = last_chunk. storage . cap ( ) ;
346
434
loop {
347
435
new_capacity = new_capacity. checked_mul ( 2 ) . unwrap ( ) ;
348
- if new_capacity >= used_bytes + needed_bytes {
436
+ if new_capacity >= used_vals + needed_vals {
349
437
break ;
350
438
}
351
439
}
352
440
}
353
441
} else {
354
- new_capacity = cmp:: max ( needed_bytes , PAGE ) ;
442
+ new_capacity = cmp:: max ( needed_vals , required_backing_types ( PAGE ) ) ;
355
443
}
356
- chunk = TypedArenaChunk :: < u8 > :: new ( new_capacity) ;
357
- self . ptr . set ( chunk. start ( ) ) ;
358
- self . end . set ( chunk. end ( ) ) ;
444
+ chunk = TypedArenaChunk :: < BackingType > :: new ( new_capacity) ;
445
+ self . ptr . set ( chunk. start ( ) as * mut u8 ) ;
446
+ self . end . set ( chunk. end ( ) as * mut u8 ) ;
359
447
chunks. push ( chunk) ;
360
448
}
361
449
}
362
450
451
+ #[ inline( never) ]
452
+ #[ cold]
453
+ fn grow_and_alloc_raw ( & self , bytes : usize ) -> & mut [ u8 ] {
454
+ self . grow ( bytes) ;
455
+ unsafe {
456
+ self . alloc_raw_unchecked ( self . ptr . get ( ) , bytes)
457
+ }
458
+ }
459
+
460
+ #[ inline( always) ]
461
+ unsafe fn alloc_raw_unchecked ( & self , start : * mut u8 , bytes : usize ) -> & mut [ u8 ] {
462
+ // Tell LLVM that `start` is aligned to BLOCK_SIZE
463
+ std:: intrinsics:: assume ( start as usize == align ( start as usize , BLOCK_SIZE ) ) ;
464
+
465
+ // Set the pointer past ourselves and align it
466
+ let end = start. offset ( bytes as isize ) as usize ;
467
+ let end = align ( end, BLOCK_SIZE ) as * mut u8 ;
468
+ self . ptr . set ( end) ;
469
+
470
+ // Return the result
471
+ slice:: from_raw_parts_mut ( start, bytes)
472
+ }
473
+
363
474
#[ inline]
364
475
pub fn alloc_raw ( & self , bytes : usize , align : usize ) -> & mut [ u8 ] {
476
+ // FIXME: Always align to 8 bytes here? Or usize alignment
365
477
unsafe {
366
478
assert ! ( bytes != 0 ) ;
479
+ assert ! ( align <= BLOCK_SIZE ) ;
480
+ assert ! ( std:: mem:: align_of:: <BackingType >( ) == std:: mem:: size_of:: <BackingType >( ) ) ;
481
+ // FIXME: Check that `bytes` fit in a isize
367
482
368
- self . align ( align) ;
369
-
370
- let future_end = intrinsics:: arith_offset ( self . ptr . get ( ) , bytes as isize ) ;
371
- if ( future_end as * mut u8 ) >= self . end . get ( ) {
372
- self . grow ( bytes) ;
373
- }
374
-
483
+ // FIXME: arith_offset could overflow here.
484
+ // Find some way to guarantee this doesn't happen for small fixed size types
375
485
let ptr = self . ptr . get ( ) ;
376
- // Set the pointer past ourselves
377
- self . ptr . set (
378
- intrinsics:: arith_offset ( self . ptr . get ( ) , bytes as isize ) as * mut u8 ,
379
- ) ;
380
- slice:: from_raw_parts_mut ( ptr, bytes)
486
+ let future_end = intrinsics:: arith_offset ( ptr, bytes as isize ) ;
487
+ if std:: intrinsics:: unlikely ( ( future_end as * mut u8 ) >= self . end . get ( ) ) {
488
+ self . grow_and_alloc_raw ( bytes)
489
+ } else {
490
+ self . alloc_raw_unchecked ( ptr, bytes)
491
+ }
381
492
}
382
493
}
383
494
@@ -452,12 +563,39 @@ impl<T> SyncTypedArena<T> {
452
563
}
453
564
}
454
565
455
- #[ derive( Default ) ]
566
+ struct DropType {
567
+ drop_fn : unsafe fn ( * mut u8 ) ,
568
+ obj : * mut u8 ,
569
+ }
570
+
571
+ unsafe fn drop_for_type < T > ( to_drop : * mut u8 ) {
572
+ std:: ptr:: drop_in_place ( to_drop as * mut T )
573
+ }
574
+
575
+ impl Drop for DropType {
576
+ fn drop ( & mut self ) {
577
+ unsafe {
578
+ ( self . drop_fn ) ( self . obj )
579
+ }
580
+ }
581
+ }
582
+
456
583
pub struct SyncDroplessArena {
584
+ // Ordered so `deferred` gets dropped before the arena
585
+ // since its destructor can reference memory in the arena
586
+ deferred : WorkerLocal < TypedArena < DropType > > ,
457
587
lock : MTLock < DroplessArena > ,
458
588
}
459
589
460
590
impl SyncDroplessArena {
591
+ #[ inline]
592
+ pub fn new ( ) -> Self {
593
+ SyncDroplessArena {
594
+ lock : Default :: default ( ) ,
595
+ deferred : WorkerLocal :: new ( |_| Default :: default ( ) ) ,
596
+ }
597
+ }
598
+
461
599
#[ inline( always) ]
462
600
pub fn in_arena < T : ?Sized > ( & self , ptr : * const T ) -> bool {
463
601
self . lock . lock ( ) . in_arena ( ptr)
@@ -483,6 +621,28 @@ impl SyncDroplessArena {
483
621
// Extend the lifetime of the result since it's limited to the lock guard
484
622
unsafe { & mut * ( self . lock . lock ( ) . alloc_slice ( slice) as * mut [ T ] ) }
485
623
}
624
+
625
+ #[ inline]
626
+ pub fn promote < T : DeferDeallocs > ( & self , object : T ) -> & T {
627
+ let mem = self . alloc_raw ( mem:: size_of :: < T > ( ) , mem:: align_of :: < T > ( ) ) as * mut _ as * mut T ;
628
+ let result = unsafe {
629
+ // Write into uninitialized memory.
630
+ ptr:: write ( mem, object) ;
631
+ & mut * mem
632
+ } ;
633
+ // Record the destructor after doing the allocation as that may panic
634
+ // and would cause `object` destuctor to run twice if it was recorded before
635
+ self . deferred . alloc ( DropType {
636
+ drop_fn : drop_for_type :: < T > ,
637
+ obj : result as * mut T as * mut u8 ,
638
+ } ) ;
639
+ result
640
+ }
641
+
642
+ #[ inline( always) ]
643
+ pub fn promote_vec < T : DeferDeallocs > ( & self , vec : Vec < T > ) -> & [ T ] {
644
+ & self . promote ( vec) [ ..]
645
+ }
486
646
}
487
647
488
648
#[ cfg( test) ]
0 commit comments