diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 1739b2236e89d..0a8aca865941b 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use rustc_data_structures::cold_path; -use rustc_data_structures::sync::MTLock; +use rustc_data_structures::sync::{SharedWorkerLocal, WorkerLocal, Lock}; use smallvec::SmallVec; use std::cell::{Cell, RefCell}; @@ -123,11 +123,6 @@ impl Default for TypedArena { } impl TypedArena { - pub fn in_arena(&self, ptr: *const T) -> bool { - let ptr = ptr as *const T as *mut T; - - self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end()) - } /// Allocates an object in the `TypedArena`, returning a reference to it. #[inline] pub fn alloc(&self, object: T) -> &mut T { @@ -378,12 +373,6 @@ impl Default for DroplessArena { } impl DroplessArena { - pub fn in_arena(&self, ptr: *const T) -> bool { - let ptr = ptr as *const u8 as *mut u8; - - self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end()) - } - #[inline] fn align(&self, align: usize) { let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1); @@ -555,64 +544,164 @@ impl DroplessArena { } } -#[derive(Default)] -// FIXME(@Zoxc): this type is entirely unused in rustc -pub struct SyncTypedArena { - lock: MTLock>, +struct CurrentChunk { + /// A pointer to the next object to be allocated. + ptr: Cell<*mut T>, + + /// A pointer to the end of the allocated area. When this pointer is + /// reached, a new chunk is allocated. + end: Cell<*mut T>, } -impl SyncTypedArena { - #[inline(always)] - pub fn alloc(&self, object: T) -> &mut T { - // Extend the lifetime of the result since it's limited to the lock guard - unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } +impl Default for CurrentChunk { + #[inline] + fn default() -> Self { + CurrentChunk { + // We set both `ptr` and `end` to 0 so that the first call to + // alloc() will trigger a grow(). + ptr: Cell::new(0 as *mut T), + end: Cell::new(0 as *mut T), + } } +} - #[inline(always)] - pub fn alloc_slice(&self, slice: &[T]) -> &mut [T] - where - T: Copy, - { - // Extend the lifetime of the result since it's limited to the lock guard - unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } +impl CurrentChunk { + #[inline] + fn align(&self, align: usize) { + let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1); + self.ptr.set(final_address as *mut T); + assert!(self.ptr <= self.end); } + /// Grows the arena. #[inline(always)] - pub fn clear(&mut self) { - self.lock.get_mut().clear(); + fn grow(&self, n: usize, chunks: &mut Vec>) { + unsafe { + let (chunk, mut new_capacity); + if let Some(last_chunk) = chunks.last_mut() { + let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize; + let currently_used_cap = used_bytes / mem::size_of::(); + last_chunk.entries = currently_used_cap; + if last_chunk.storage.reserve_in_place(currently_used_cap, n) { + self.end.set(last_chunk.end()); + return; + } else { + new_capacity = last_chunk.storage.cap(); + loop { + new_capacity = new_capacity.checked_mul(2).unwrap(); + if new_capacity >= currently_used_cap + n { + break; + } + } + } + } else { + let elem_size = cmp::max(1, mem::size_of::()); + new_capacity = cmp::max(n, PAGE / elem_size); + } + chunk = TypedArenaChunk::::new(new_capacity); + self.ptr.set(chunk.start()); + self.end.set(chunk.end()); + chunks.push(chunk); + } } } -#[derive(Default)] pub struct SyncDroplessArena { - lock: MTLock, + /// Pointers to the current chunk + current: WorkerLocal>, + + /// A vector of arena chunks. + chunks: Lock>>>, +} + +impl Default for SyncDroplessArena { + #[inline] + fn default() -> SyncDroplessArena { + SyncDroplessArena { + current: WorkerLocal::new(|_| CurrentChunk::default()), + chunks: Default::default(), + } + } } impl SyncDroplessArena { - #[inline(always)] pub fn in_arena(&self, ptr: *const T) -> bool { - self.lock.lock().in_arena(ptr) + let ptr = ptr as *const u8 as *mut u8; + + self.chunks.lock().iter().any(|chunks| chunks.iter().any(|chunk| { + chunk.start() <= ptr && ptr < chunk.end() + })) } - #[inline(always)] + #[inline(never)] + #[cold] + fn grow(&self, needed_bytes: usize) { + self.current.grow(needed_bytes, &mut **self.chunks.lock()); + } + + #[inline] pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] { - // Extend the lifetime of the result since it's limited to the lock guard - unsafe { &mut *(self.lock.lock().alloc_raw(bytes, align) as *mut [u8]) } + unsafe { + assert!(bytes != 0); + + let current = &*self.current; + + current.align(align); + + let future_end = intrinsics::arith_offset(current.ptr.get(), bytes as isize); + if (future_end as *mut u8) >= current.end.get() { + self.grow(bytes); + } + + let ptr = current.ptr.get(); + // Set the pointer past ourselves + current.ptr.set( + intrinsics::arith_offset(current.ptr.get(), bytes as isize) as *mut u8, + ); + slice::from_raw_parts_mut(ptr, bytes) + } } - #[inline(always)] + #[inline] pub fn alloc(&self, object: T) -> &mut T { - // Extend the lifetime of the result since it's limited to the lock guard - unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } + assert!(!mem::needs_drop::()); + + let mem = self.alloc_raw( + mem::size_of::(), + mem::align_of::()) as *mut _ as *mut T; + + unsafe { + // Write into uninitialized memory. + ptr::write(mem, object); + &mut *mem + } } - #[inline(always)] + /// Allocates a slice of objects that are copied into the `SyncDroplessArena`, returning a + /// mutable reference to it. Will panic if passed a zero-sized type. + /// + /// Panics: + /// + /// - Zero-sized types + /// - Zero-length slices + #[inline] pub fn alloc_slice(&self, slice: &[T]) -> &mut [T] where T: Copy, { - // Extend the lifetime of the result since it's limited to the lock guard - unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } + assert!(!mem::needs_drop::()); + assert!(mem::size_of::() != 0); + assert!(!slice.is_empty()); + + let mem = self.alloc_raw( + slice.len() * mem::size_of::(), + mem::align_of::()) as *mut _ as *mut T; + + unsafe { + let arena_slice = slice::from_raw_parts_mut(mem, slice.len()); + arena_slice.copy_from_slice(slice); + arena_slice + } } } diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index 73247c1469efd..d33880ef96a16 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -218,6 +218,45 @@ cfg_if! { } } + #[derive(Debug, Default)] + pub struct SharedWorkerLocal(T); + + impl SharedWorkerLocal { + /// Creates a new worker local where the `initial` closure computes the + /// value this worker local should take for each thread in the thread pool. + #[inline] + pub fn new T>(mut f: F) -> SharedWorkerLocal { + SharedWorkerLocal(f(0)) + } + + #[inline] + pub fn iter(&self) -> impl Iterator { + Some(&self.0).into_iter() + } + + /// Returns the worker-local value for each thread + #[inline] + pub fn into_inner(self) -> Vec { + vec![self.0] + } + } + + impl Deref for SharedWorkerLocal { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + &self.0 + } + } + + impl DerefMut for SharedWorkerLocal { + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } + } + pub type MTRef<'a, T> = &'a mut T; #[derive(Debug, Default)] @@ -337,6 +376,54 @@ cfg_if! { } pub use rayon_core::WorkerLocal; + pub use rayon_core::Registry; + use rayon_core::current_thread_index; + + #[derive(Debug)] + pub struct SharedWorkerLocal(Vec); + + impl SharedWorkerLocal { + /// Creates a new worker local where the `initial` closure computes the + /// value this worker local should take for each thread in the thread pool. + #[inline] + pub fn new T>(mut f: F) -> SharedWorkerLocal { + SharedWorkerLocal((0..Registry::current_num_threads()).map(|i| f(i)).collect()) + } + + #[inline] + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the worker-local value for each thread + #[inline] + pub fn into_inner(self) -> Vec { + self.0 + } + } + + impl Default for SharedWorkerLocal { + #[inline] + fn default() -> Self { + SharedWorkerLocal::new(|_| Default::default()) + } + } + + impl Deref for SharedWorkerLocal { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + &self.0[current_thread_index().unwrap()] + } + } + + impl DerefMut for SharedWorkerLocal { + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { + &mut self.0[current_thread_index().unwrap()] + } + } pub use rayon::iter::ParallelIterator; use rayon::iter::IntoParallelIterator;