Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Tweak query code for performance #56509

Closed
wants to merge 30 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock
Original file line number Diff line number Diff line change
@@ -1975,6 +1975,7 @@ dependencies = [
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_apfloat 0.0.0",
1 change: 1 addition & 0 deletions src/librustc/Cargo.toml
Original file line number Diff line number Diff line change
@@ -20,6 +20,7 @@ log = { version = "0.4", features = ["release_max_level_info", "std"] }
polonius-engine = "0.5.0"
rustc-rayon = "0.1.1"
rustc-rayon-core = "0.1.1"
rustc-hash = "1.0.1"
rustc_apfloat = { path = "../librustc_apfloat" }
rustc_target = { path = "../librustc_target" }
rustc_data_structures = { path = "../librustc_data_structures" }
35 changes: 23 additions & 12 deletions src/librustc/dep_graph/dep_node.rs
Original file line number Diff line number Diff line change
@@ -162,7 +162,9 @@ macro_rules! define_dep_nodes {
}
}

#[inline]
// FIXME: Make `is_anon`, `is_input`, `is_eval_always` and `has_params` properties
// of queries
#[inline(always)]
pub fn is_anon(&self) -> bool {
match *self {
$(
@@ -171,16 +173,20 @@ macro_rules! define_dep_nodes {
}
}

#[inline]
pub fn is_input(&self) -> bool {
#[inline(always)]
pub fn is_input_inlined(&self) -> bool {
match *self {
$(
DepKind :: $variant => { contains_input_attr!($($attr),*) }
)*
}
}

#[inline]
pub fn is_input(&self) -> bool {
self.is_input_inlined()
}

#[inline(always)]
pub fn is_eval_always(&self) -> bool {
match *self {
$(
@@ -190,8 +196,8 @@ macro_rules! define_dep_nodes {
}

#[allow(unreachable_code)]
#[inline]
pub fn has_params(&self) -> bool {
#[inline(always)]
pub fn has_params_inlined(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
@@ -212,6 +218,10 @@ macro_rules! define_dep_nodes {
)*
}
}

pub fn has_params(&self) -> bool {
self.has_params_inlined()
}
}

pub enum DepConstructor<$tcx> {
@@ -230,7 +240,8 @@ macro_rules! define_dep_nodes {

impl DepNode {
#[allow(unreachable_code, non_snake_case)]
pub fn new<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
#[inline(always)]
pub fn new_inlined<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
dep: DepConstructor<'gcx>)
-> DepNode
where 'gcx: 'a + 'tcx,
@@ -299,7 +310,7 @@ macro_rules! define_dep_nodes {
/// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter.
#[inline]
#[inline(always)]
pub fn from_def_path_hash(kind: DepKind,
def_path_hash: DefPathHash)
-> DepNode {
@@ -313,9 +324,9 @@ macro_rules! define_dep_nodes {
/// Create a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters.
#[inline]
#[inline(always)]
pub fn new_no_params(kind: DepKind) -> DepNode {
assert!(!kind.has_params());
assert!(!kind.has_params_inlined());
DepNode {
kind,
hash: Fingerprint::ZERO,
@@ -418,14 +429,14 @@ impl fmt::Debug for DepNode {


impl DefPathHash {
#[inline]
#[inline(always)]
pub fn to_dep_node(self, kind: DepKind) -> DepNode {
DepNode::from_def_path_hash(kind, self)
}
}

impl DefId {
#[inline]
#[inline(always)]
pub fn to_dep_node(self, tcx: TyCtxt<'_, '_, '_>, kind: DepKind) -> DepNode {
DepNode::from_def_path_hash(kind, tcx.def_path_hash(self))
}
469 changes: 292 additions & 177 deletions src/librustc/dep_graph/graph.rs

Large diffs are not rendered by default.

32 changes: 16 additions & 16 deletions src/librustc/hir/map/collector.rs
Original file line number Diff line number Diff line change
@@ -83,20 +83,20 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {
body_ids: _,
} = *krate;

root_mod_sig_dep_index = dep_graph.input_task(
root_mod_sig_dep_index = dep_graph.input_dep_index(
root_mod_def_path_hash.to_dep_node(DepKind::Hir),
&hcx,
HirItemLike { item_like: (module, attrs, span), hash_bodies: false },
).1;
root_mod_full_dep_index = dep_graph.input_task(
&HirItemLike { item_like: (module, attrs, span), hash_bodies: false },
);
root_mod_full_dep_index = dep_graph.input_dep_index(
root_mod_def_path_hash.to_dep_node(DepKind::HirBody),
&hcx,
HirItemLike { item_like: (module, attrs, span), hash_bodies: true },
).1;
&HirItemLike { item_like: (module, attrs, span), hash_bodies: true },
);
}

{
dep_graph.input_task(
dep_graph.input_dep_index(
DepNode::new_no_params(DepKind::AllLocalTraitImpls),
&hcx,
&krate.trait_impls,
@@ -169,11 +169,11 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {

source_file_names.sort_unstable();

let (_, crate_dep_node_index) = self
let crate_dep_node_index = self
.dep_graph
.input_task(DepNode::new_no_params(DepKind::Krate),
.input_dep_index(DepNode::new_no_params(DepKind::Krate),
&self.hcx,
(((node_hashes, upstream_crates), source_file_names),
&(((node_hashes, upstream_crates), source_file_names),
(commandline_args_hash,
crate_disambiguator.to_fingerprint())));

@@ -261,17 +261,17 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {

let def_path_hash = self.definitions.def_path_hash(dep_node_owner);

self.current_signature_dep_index = self.dep_graph.input_task(
self.current_signature_dep_index = self.dep_graph.input_dep_index(
def_path_hash.to_dep_node(DepKind::Hir),
&self.hcx,
HirItemLike { item_like, hash_bodies: false },
).1;
&HirItemLike { item_like, hash_bodies: false },
);

self.current_full_dep_index = self.dep_graph.input_task(
self.current_full_dep_index = self.dep_graph.input_dep_index(
def_path_hash.to_dep_node(DepKind::HirBody),
&self.hcx,
HirItemLike { item_like, hash_bodies: true },
).1;
&HirItemLike { item_like, hash_bodies: true },
);

self.hir_body_nodes.push((def_path_hash, self.current_full_dep_index));

4 changes: 4 additions & 0 deletions src/librustc/hir/map/mod.rs
Original file line number Diff line number Diff line change
@@ -159,6 +159,10 @@ impl Forest {
self.dep_graph.read(DepNode::new_no_params(DepKind::Krate));
&self.krate
}

pub fn untracked_krate<'hir>(&'hir self) -> &'hir Crate {
&self.krate
}
}

/// Represents a mapping from Node IDs to AST elements and their parent
1 change: 1 addition & 0 deletions src/librustc/ich/hcx.rs
Original file line number Diff line number Diff line change
@@ -86,6 +86,7 @@ impl<'a> StableHashingContext<'a> {
// The `krate` here is only used for mapping BodyIds to Bodies.
// Don't use it for anything else or you'll run the risk of
// leaking data out of the tracking system.
#[inline]
pub fn new(sess: &'a Session,
krate: &'a hir::Crate,
definitions: &'a Definitions,
6 changes: 6 additions & 0 deletions src/librustc/lib.rs
Original file line number Diff line number Diff line change
@@ -59,7 +59,9 @@
#![feature(slice_patterns)]
#![feature(slice_sort_by_cached_key)]
#![feature(specialization)]
#![feature(stmt_expr_attributes)]
#![feature(unboxed_closures)]
#![feature(thread_local)]
#![feature(trace_macros)]
#![feature(trusted_len)]
#![feature(vec_remove_item)]
@@ -69,12 +71,16 @@
#![feature(in_band_lifetimes)]
#![feature(crate_visibility_modifier)]
#![feature(transpose_result)]
#![feature(arbitrary_self_types)]
#![feature(hash_raw_entry)]
#![feature(maybe_uninit)]

#![recursion_limit="512"]

#![warn(elided_lifetimes_in_paths)]

extern crate arena;
extern crate rustc_hash;
#[macro_use] extern crate bitflags;
extern crate core;
extern crate fmt_macros;
65 changes: 46 additions & 19 deletions src/librustc/session/mod.rs
Original file line number Diff line number Diff line change
@@ -26,7 +26,10 @@ use util::common::{duration_to_secs_str, ErrorReported};
use util::common::ProfileQueriesMsg;

use rustc_data_structures::base_n;
use rustc_data_structures::sync::{self, Lrc, Lock, LockCell, OneThread, Once, RwLock};
use rustc_data_structures::sync::{
self, Lrc, Lock, OneThread, Once, RwLock, AtomicU64, AtomicUsize, AtomicBool, Ordering,
Ordering::SeqCst,
};

use errors::{self, DiagnosticBuilder, DiagnosticId, Applicability};
use errors::emitter::{Emitter, EmitterWriter};
@@ -51,7 +54,6 @@ use std::io::Write;
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::sync::mpsc;
use std::sync::atomic::{AtomicUsize, Ordering};

mod code_stats;
pub mod config;
@@ -128,6 +130,9 @@ pub struct Session {
/// Used by -Z profile-queries in util::common
pub profile_channel: Lock<Option<mpsc::Sender<ProfileQueriesMsg>>>,

/// Used by -Z self-profile
pub self_profiling_active: bool,

/// Used by -Z self-profile
pub self_profiling: Lock<SelfProfiler>,

@@ -142,15 +147,15 @@ pub struct Session {
/// If -zfuel=crate=n is specified, Some(crate).
optimization_fuel_crate: Option<String>,
/// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
optimization_fuel_limit: LockCell<u64>,
optimization_fuel_limit: AtomicU64,
/// We're rejecting all further optimizations.
out_of_fuel: LockCell<bool>,
out_of_fuel: AtomicBool,

// The next two are public because the driver needs to read them.
/// If -zprint-fuel=crate, Some(crate).
pub print_fuel_crate: Option<String>,
/// Always set to zero and incremented so that we can print fuel expended by a crate.
pub print_fuel: LockCell<u64>,
pub print_fuel: AtomicU64,

/// Loaded up early on in the initialization of this `Session` to avoid
/// false positives about a job server in our environment.
@@ -825,10 +830,17 @@ impl Session {
}
}

#[inline(never)]
#[cold]
fn profiler_active<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
let mut profiler = self.self_profiling.borrow_mut();
f(&mut profiler);
}

#[inline(always)]
pub fn profiler<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
if self.opts.debugging_opts.self_profile || self.opts.debugging_opts.profile_json {
let mut profiler = self.self_profiling.borrow_mut();
f(&mut profiler);
if unlikely!(self.self_profiling_active) {
self.profiler_active(f)
}
}

@@ -859,32 +871,43 @@ impl Session {
self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed));
}

/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
/// This expends fuel if applicable, and records fuel if applicable.
pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
#[inline(never)]
#[cold]
pub fn consider_optimizing_cold<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
let mut ret = true;
if let Some(ref c) = self.optimization_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
let fuel = self.optimization_fuel_limit.get();
let fuel = self.optimization_fuel_limit.load(SeqCst);
ret = fuel != 0;
if fuel == 0 && !self.out_of_fuel.get() {
if fuel == 0 && !self.out_of_fuel.load(SeqCst) {
eprintln!("optimization-fuel-exhausted: {}", msg());
self.out_of_fuel.set(true);
self.out_of_fuel.store(true, SeqCst);
} else if fuel > 0 {
self.optimization_fuel_limit.set(fuel - 1);
self.optimization_fuel_limit.store(fuel - 1, SeqCst);
}
}
}
if let Some(ref c) = self.print_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
self.print_fuel.set(self.print_fuel.get() + 1);
self.print_fuel.store(self.print_fuel.load(SeqCst) + 1, SeqCst);
}
}
ret
}

/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
/// This expends fuel if applicable, and records fuel if applicable.
#[inline(always)]
pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
if likely!(self.optimization_fuel_crate.is_none() && self.print_fuel_crate.is_none()) {
true
} else {
self.consider_optimizing_cold(crate_name, msg)
}
}

/// Returns the number of query threads that should be used for this
/// compilation
pub fn query_threads_from_opts(opts: &config::Options) -> usize {
@@ -1121,9 +1144,9 @@ pub fn build_session_(

let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone());
let optimization_fuel_limit =
LockCell::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0));
AtomicU64::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0));
let print_fuel_crate = sopts.debugging_opts.print_fuel.clone();
let print_fuel = LockCell::new(0);
let print_fuel = AtomicU64::new(0);

let working_dir = env::current_dir().unwrap_or_else(|e|
p_s.span_diagnostic
@@ -1138,6 +1161,9 @@ pub fn build_session_(
CguReuseTracker::new_disabled()
};

let self_profiling_active = sopts.debugging_opts.self_profile ||
sopts.debugging_opts.profile_json;

let sess = Session {
target: target_cfg,
host,
@@ -1168,6 +1194,7 @@ pub fn build_session_(
imported_macro_spans: OneThread::new(RefCell::new(FxHashMap::default())),
incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
cgu_reuse_tracker,
self_profiling_active,
self_profiling: Lock::new(SelfProfiler::new()),
profile_channel: Lock::new(None),
perf_stats: PerfStats {
@@ -1182,7 +1209,7 @@ pub fn build_session_(
optimization_fuel_limit,
print_fuel_crate,
print_fuel,
out_of_fuel: LockCell::new(false),
out_of_fuel: AtomicBool::new(false),
// Note that this is unsafe because it may misinterpret file descriptors
// on Unix as jobserver file descriptors. We hopefully execute this near
// the beginning of the process though to ensure we don't get false
54 changes: 43 additions & 11 deletions src/librustc/ty/context.rs
Original file line number Diff line number Diff line change
@@ -1331,8 +1331,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
self.cstore.crate_data_as_rc_any(cnum)
}

#[inline(always)]
pub fn create_stable_hashing_context(self) -> StableHashingContext<'a> {
let krate = self.dep_graph.with_ignore(|| self.gcx.hir.krate());
let krate = self.gcx.hir.forest.untracked_krate();

StableHashingContext::new(self.sess,
krate,
@@ -1349,7 +1350,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// We cannot use the query versions of crates() and crate_hash(), since
// those would need the DepNodes that we are allocating here.
for cnum in self.cstore.crates_untracked() {
let dep_node = DepNode::new(self, DepConstructor::CrateMetadata(cnum));
let dep_node = DepNode::new_inlined(self, DepConstructor::CrateMetadata(cnum));
let crate_hash = self.cstore.crate_hash_untracked(cnum);
self.dep_graph.with_task(dep_node,
self,
@@ -1622,7 +1623,8 @@ impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> {
ty::tls::with_related_context(tcx.global_tcx(), |icx| {
let new_icx = ty::tls::ImplicitCtxt {
tcx,
query: icx.query.clone(),
query: icx.query,
diagnostics: icx.diagnostics,
layout_depth: icx.layout_depth,
task: icx.task,
};
@@ -1871,7 +1873,7 @@ pub mod tls {
use ty::query;
use errors::{Diagnostic, TRACK_DIAGNOSTICS};
use rustc_data_structures::OnDrop;
use rustc_data_structures::sync::{self, Lrc, Lock};
use rustc_data_structures::sync::{self, Lock, LrcRef};
use dep_graph::OpenTask;

#[cfg(not(parallel_queries))]
@@ -1893,7 +1895,11 @@ pub mod tls {

/// The current query job, if any. This is updated by start_job in
/// ty::query::plumbing when executing a query
pub query: Option<Lrc<query::QueryJob<'gcx>>>,
pub query: Option<LrcRef<'a, query::QueryJob<'gcx>>>,

/// Where to store diagnostics for the current query job, if any.
/// This is updated by start_job in ty::query::plumbing when executing a query
pub diagnostics: Option<&'a Lock<Option<Box<Vec<Diagnostic>>>>>,

/// Used to prevent layout from recursing too deeply.
pub layout_depth: usize,
@@ -1907,36 +1913,49 @@ pub mod tls {
/// to `value` during the call to `f`. It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(parallel_queries)]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
rayon_core::tlv::with(value, f)
}

/// Gets Rayon's thread local variable which is preserved for Rayon jobs.
/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(parallel_queries)]
#[inline]
fn get_tlv() -> usize {
rayon_core::tlv::get()
}

/// A thread local variable which stores a pointer to the current ImplicitCtxt
#[cfg(not(parallel_queries))]
thread_local!(static TLV: Cell<usize> = Cell::new(0));
// Accessing `thread_local` in another crate is bugged, so we have
// two accessors `set_raw_tlv` and `get_tlv` which do not have an
// inline attribute to prevent that
#[thread_local]
static TLV: Cell<usize> = Cell::new(0);

/// This is used to set the pointer to the current ImplicitCtxt.
#[cfg(not(parallel_queries))]
fn set_raw_tlv(value: usize) {
TLV.set(value)
}

/// Sets TLV to `value` during the call to `f`.
/// It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(not(parallel_queries))]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
let old = get_tlv();
let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old)));
TLV.with(|tlv| tlv.set(value));
let _reset = OnDrop(move || set_raw_tlv(old));
set_raw_tlv(value);
f()
}

/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(not(parallel_queries))]
fn get_tlv() -> usize {
TLV.with(|tlv| tlv.get())
TLV.get()
}

/// This is a callback from libsyntax as it cannot access the implicit state
@@ -1957,8 +1976,12 @@ pub mod tls {
fn track_diagnostic(diagnostic: &Diagnostic) {
with_context_opt(|icx| {
if let Some(icx) = icx {
if let Some(ref query) = icx.query {
query.diagnostics.lock().push(diagnostic.clone());
if let Some(ref diagnostics) = icx.diagnostics {
let mut diagnostics = diagnostics.lock();
if diagnostics.is_none() {
*diagnostics = Some(Box::new(Vec::new()));
}
diagnostics.as_mut().unwrap().push(diagnostic.clone());
}
}
})
@@ -1990,6 +2013,7 @@ pub mod tls {
}

/// Sets `context` as the new current ImplicitCtxt for the duration of the function `f`
#[inline]
pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>,
f: F) -> R
where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R
@@ -2023,6 +2047,7 @@ pub mod tls {
let icx = ImplicitCtxt {
tcx,
query: None,
diagnostics: None,
layout_depth: 0,
task: &OpenTask::Ignore,
};
@@ -2051,6 +2076,7 @@ pub mod tls {
};
let icx = ImplicitCtxt {
query: None,
diagnostics: None,
tcx,
layout_depth: 0,
task: &OpenTask::Ignore,
@@ -2059,6 +2085,7 @@ pub mod tls {
}

/// Allows access to the current ImplicitCtxt in a closure if one is available
#[inline]
pub fn with_context_opt<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R
{
@@ -2076,6 +2103,7 @@ pub mod tls {

/// Allows access to the current ImplicitCtxt.
/// Panics if there is no ImplicitCtxt available
#[inline]
pub fn with_context<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R
{
@@ -2087,6 +2115,7 @@ pub mod tls {
/// with the same 'gcx lifetime as the TyCtxt passed in.
/// This will panic if you pass it a TyCtxt which has a different global interner from
/// the current ImplicitCtxt's tcx field.
#[inline]
pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R
where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R
{
@@ -2105,6 +2134,7 @@ pub mod tls {
/// is given an ImplicitCtxt with the same 'tcx and 'gcx lifetimes as the TyCtxt passed in.
/// This will panic if you pass it a TyCtxt which has a different global interner or
/// a different local interner from the current ImplicitCtxt's tcx field.
#[inline]
pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R
where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R
{
@@ -2122,6 +2152,7 @@ pub mod tls {

/// Allows access to the TyCtxt in the current ImplicitCtxt.
/// Panics if there is no ImplicitCtxt available
#[inline]
pub fn with<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
{
@@ -2130,6 +2161,7 @@ pub mod tls {

/// Allows access to the TyCtxt in the current ImplicitCtxt.
/// The closure is passed None if there is no ImplicitCtxt available
#[inline]
pub fn with_opt<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(Option<TyCtxt<'a, 'gcx, 'tcx>>) -> R
{
3 changes: 2 additions & 1 deletion src/librustc/ty/query/config.rs
Original file line number Diff line number Diff line change
@@ -52,7 +52,8 @@ pub(super) trait QueryAccessors<'tcx>: QueryConfig<'tcx> {
fn to_dep_node(tcx: TyCtxt<'_, 'tcx, '_>, key: &Self::Key) -> DepNode;

// Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute(tcx: TyCtxt<'_, 'tcx, '_>, key: Self::Key) -> Self::Value;
#[inline(always)]
fn compute(tcx: TyCtxt<'_, 'tcx, 'tcx>, key: Self::Key) -> Self::Value;

fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value;
}
91 changes: 46 additions & 45 deletions src/librustc/ty/query/job.rs
Original file line number Diff line number Diff line change
@@ -12,14 +12,18 @@

use std::mem;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak};
use rustc_data_structures::sync::{Lock, LockGuard, Lrc, LrcRef, Weak};
use rustc_data_structures::OnDrop;
use syntax_pos::Span;
use ty::tls;
use ty::query::Query;
use ty::query::plumbing::CycleError;
#[cfg(not(parallel_queries))]
use ty::query::{
plumbing::TryGetJob,
config::QueryDescription,
};
use ty::context::TyCtxt;
use errors::Diagnostic;
use std::process;
use std::{fmt, ptr};

@@ -35,15 +39,6 @@ use {
rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable},
};

/// Indicates the state of a query for a given key in a query map
pub(super) enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion
Started(Lrc<QueryJob<'tcx>>),

/// The query panicked. Queries trying to wait on this will raise a fatal error / silently panic
Poisoned,
}

/// A span and a query key
#[derive(Clone, Debug)]
pub struct QueryInfo<'tcx> {
@@ -59,9 +54,6 @@ pub struct QueryJob<'tcx> {
/// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<Lrc<QueryJob<'tcx>>>,

/// Diagnostic messages which are emitted while the query executes
pub diagnostics: Lock<Vec<Diagnostic>>,

/// The latch which is used to wait on this job
#[cfg(parallel_queries)]
latch: QueryLatch<'tcx>,
@@ -71,7 +63,6 @@ impl<'tcx> QueryJob<'tcx> {
/// Creates a new query job
pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
QueryJob {
diagnostics: Lock::new(Vec::new()),
info,
parent,
#[cfg(parallel_queries)]
@@ -83,46 +74,56 @@ impl<'tcx> QueryJob<'tcx> {
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
pub(super) fn await<'lcx>(
#[cfg(not(parallel_queries))]
#[inline(never)]
#[cold]
pub(super) fn await<'lcx, 'a, D: QueryDescription<'tcx>>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,
) -> Result<(), CycleError<'tcx>> {
#[cfg(not(parallel_queries))]
{
self.find_cycle_in_stack(tcx, span)
}
) -> TryGetJob<'a, 'tcx, D> {
TryGetJob::JobCompleted(Err(Box::new(self.find_cycle_in_stack(tcx, span))))
}

#[cfg(parallel_queries)]
{
tls::with_related_context(tcx, move |icx| {
let mut waiter = Lrc::new(QueryWaiter {
query: icx.query.clone(),
span,
cycle: Lock::new(None),
condvar: Condvar::new(),
});
self.latch.await(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
// although another thread may still have a Lrc reference so we cannot
// use Lrc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(cycle)
}
})
}
/// Awaits for the query job to complete.
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
#[cfg(parallel_queries)]
pub(super) fn await<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,
) -> Result<(), Box<CycleError<'tcx>>> {
tls::with_related_context(tcx, move |icx| {
let mut waiter = Lrc::new(QueryWaiter {
query: icx.query.map(|q| LrcRef::into(q)),
span,
cycle: Lock::new(None),
condvar: Condvar::new(),
});
self.latch.await(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
// although another thread may still have a Lrc reference so we cannot
// use Lrc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(Box::new(cycle))
}
})
}

#[cfg(not(parallel_queries))]
fn find_cycle_in_stack<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,
) -> Result<(), CycleError<'tcx>> {
) -> CycleError<'tcx> {
// Get the current executing query (waiter) and find the waitee amongst its parents
let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone());
let mut current_job = tls::with_related_context(tcx, |icx| {
icx.query.map(|q| LrcRef::into(q))
});
let mut cycle = Vec::new();

while let Some(job) = current_job {
@@ -140,10 +141,10 @@ impl<'tcx> QueryJob<'tcx> {
let usage = job.parent.as_ref().map(|parent| {
(job.info.span, parent.info.query.clone())
});
return Err(CycleError { usage, cycle });
return CycleError { usage, cycle };
}

current_job = job.parent.clone();
current_job = job.parent.as_ref().map(|parent| parent.clone());
}

panic!("did not find a cycle")
12 changes: 6 additions & 6 deletions src/librustc/ty/query/mod.rs
Original file line number Diff line number Diff line change
@@ -705,22 +705,22 @@ impl<'a, 'tcx, 'lcx> TyCtxt<'a, 'tcx, 'lcx> {
self,
span: Span,
key: DefId,
) -> Result<&'tcx [Ty<'tcx>], DiagnosticBuilder<'a>> {
self.try_get_query::<queries::adt_sized_constraint<'_>>(span, key)
) -> Result<&'tcx [Ty<'tcx>], Box<DiagnosticBuilder<'a>>> {
self.global_tcx().try_get_query::<queries::adt_sized_constraint<'_>>(span, key)
}
pub fn try_needs_drop_raw(
self,
span: Span,
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> Result<bool, DiagnosticBuilder<'a>> {
self.try_get_query::<queries::needs_drop_raw<'_>>(span, key)
) -> Result<bool, Box<DiagnosticBuilder<'a>>> {
self.global_tcx().try_get_query::<queries::needs_drop_raw<'_>>(span, key)
}
pub fn try_optimized_mir(
self,
span: Span,
key: DefId,
) -> Result<&'tcx mir::Mir<'tcx>, DiagnosticBuilder<'a>> {
self.try_get_query::<queries::optimized_mir<'_>>(span, key)
) -> Result<&'tcx mir::Mir<'tcx>, Box<DiagnosticBuilder<'a>>> {
self.global_tcx().try_get_query::<queries::optimized_mir<'_>>(span, key)
}
}

12 changes: 8 additions & 4 deletions src/librustc/ty/query/on_disk_cache.rs
Original file line number Diff line number Diff line change
@@ -351,11 +351,13 @@ impl<'sess> OnDiskCache<'sess> {
/// Store a diagnostic emitted during the current compilation session.
/// Anything stored like this will be available via `load_diagnostics` in
/// the next compilation session.
#[inline(never)]
#[cold]
pub fn store_diagnostics(&self,
dep_node_index: DepNodeIndex,
diagnostics: Vec<Diagnostic>) {
diagnostics: Box<Vec<Diagnostic>>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut();
let prev = current_diagnostics.insert(dep_node_index, diagnostics);
let prev = current_diagnostics.insert(dep_node_index, *diagnostics);
debug_assert!(prev.is_none());
}

@@ -377,13 +379,15 @@ impl<'sess> OnDiskCache<'sess> {
/// Since many anonymous queries can share the same `DepNode`, we aggregate
/// them -- as opposed to regular queries where we assume that there is a
/// 1:1 relationship between query-key and `DepNode`.
#[inline(never)]
#[cold]
pub fn store_diagnostics_for_anon_node(&self,
dep_node_index: DepNodeIndex,
mut diagnostics: Vec<Diagnostic>) {
mut diagnostics: Box<Vec<Diagnostic>>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut();

let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| {
mem::replace(&mut diagnostics, Vec::new())
mem::replace(&mut *diagnostics, Vec::new())
});

x.extend(diagnostics.into_iter());
476 changes: 298 additions & 178 deletions src/librustc/ty/query/plumbing.rs

Large diffs are not rendered by default.

47 changes: 47 additions & 0 deletions src/librustc_data_structures/by_move.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@

use std::mem::ManuallyDrop;
use std::mem::MaybeUninit;
use std::ops::{Deref, DerefMut};

pub type MoveSlot<'a, T> = Move<'a, MaybeUninit<T>>;

pub struct Move<'a, T>(&'a mut ManuallyDrop<T>);

impl<'a, T> Move<'a, MaybeUninit<T>> {
pub fn uninit(ptr: &'a mut ManuallyDrop<MaybeUninit<T>>) -> Self {
Move(ptr)
}

// Assumes that MaybeUninit is #[repr(transparent)]
pub fn init(&mut self, value: T) -> Move<'a, T> {
*self.0 = ManuallyDrop::new(MaybeUninit::new(value));
Move(unsafe { &mut *(self.0 as *mut _ as *mut ManuallyDrop<T>) })
}
}

#[macro_export]
#[allow_internal_unstable]
macro_rules! uninit_slot {
() => (&mut std::mem::ManuallyDrop::new(std::mem::MaybeUninit::uninitialized()))
}

impl<'a, T> Deref for Move<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&*self.0
}
}

impl<'a, T> DerefMut for Move<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.0
}
}

impl<'a, T> Drop for Move<'a, T> {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.0)
}
}
}
1 change: 1 addition & 0 deletions src/librustc_data_structures/fingerprint.rs
Original file line number Diff line number Diff line change
@@ -86,6 +86,7 @@ impl ::std::fmt::Display for Fingerprint {
}

impl stable_hasher::StableHasherResult for Fingerprint {
#[inline]
fn finish(hasher: stable_hasher::StableHasher<Self>) -> Self {
let (_0, _1) = hasher.finalize();
Fingerprint(_0, _1)
28 changes: 28 additions & 0 deletions src/librustc_data_structures/lib.rs
Original file line number Diff line number Diff line change
@@ -30,6 +30,10 @@
#![feature(allow_internal_unstable)]
#![feature(vec_resize_with)]
#![feature(hash_raw_entry)]
#![feature(stmt_expr_attributes)]
#![feature(core_intrinsics)]
#![feature(integer_atomics)]
#![feature(maybe_uninit)]

#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]
@@ -58,6 +62,26 @@ extern crate rustc_cratesio_shim;

pub use rustc_serialize::hex::ToHex;

#[macro_export]
macro_rules! likely {
($e:expr) => {
#[allow(unused_unsafe)]
{
unsafe { std::intrinsics::likely($e) }
}
}
}

#[macro_export]
macro_rules! unlikely {
($e:expr) => {
#[allow(unused_unsafe)]
{
unsafe { std::intrinsics::unlikely($e) }
}
}
}

pub mod macros;
pub mod svh;
pub mod base_n;
@@ -80,6 +104,8 @@ pub mod sorted_map;
pub mod sync;
pub mod tiny_list;
pub mod thin_vec;
pub mod vec;
pub mod by_move;
pub mod transitive_relation;
pub use ena::unify;
pub mod vec_linked_list;
@@ -91,12 +117,14 @@ pub struct OnDrop<F: Fn()>(pub F);
impl<F: Fn()> OnDrop<F> {
/// Forgets the function which prevents it from running.
/// Ensure that the function owns no memory, otherwise it will be leaked.
#[inline]
pub fn disable(self) {
std::mem::forget(self);
}
}

impl<F: Fn()> Drop for OnDrop<F> {
#[inline]
fn drop(&mut self) {
(self.0)();
}
248 changes: 99 additions & 149 deletions src/librustc_data_structures/sync.rs
Original file line number Diff line number Diff line change
@@ -20,10 +20,6 @@
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true,
//! `RefCell` otherwise.
//!
//! `LockCell` is a thread safe version of `Cell`, with `set` and `get` operations.
//! It can never deadlock. It uses `Cell` when
//! cfg!(parallel_queries) is false, otherwise it is a `Lock`.
//!
//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false.
//!
//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise.
@@ -33,12 +29,9 @@
use std::collections::HashMap;
use std::hash::{Hash, BuildHasher};
use std::cmp::Ordering;
use std::marker::PhantomData;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt;
use std::ops::{Deref, DerefMut};
use std::mem::ManuallyDrop;
use owning_ref::{Erased, OwningRef};

pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
@@ -64,6 +57,9 @@ pub fn serial_scope<F, R>(f: F) -> R
f(&SerialScope)
}

pub use std::sync::atomic::Ordering::SeqCst;
pub use std::sync::atomic::Ordering;

cfg_if! {
if #[cfg(not(parallel_queries))] {
pub auto trait Send {}
@@ -79,6 +75,62 @@ cfg_if! {
}
}

use std::ops::Add;

#[derive(Debug)]
pub struct Atomic<T: Copy>(Cell<T>);

impl<T: Copy> Atomic<T> {
pub fn new(v: T) -> Self {
Atomic(Cell::new(v))
}
}

impl<T: Copy + PartialEq> Atomic<T> {
pub fn into_inner(self) -> T {
self.0.into_inner()
}

pub fn load(&self, _: Ordering) -> T {
self.0.get()
}

pub fn store(&self, val: T, _: Ordering) {
self.0.set(val)
}

pub fn swap(&self, val: T, _: Ordering) -> T {
self.0.replace(val)
}

pub fn compare_exchange(&self,
current: T,
new: T,
_: Ordering,
_: Ordering)
-> Result<T, T> {
let read = self.0.get();
if read == current {
self.0.set(new);
Ok(read)
} else {
Err(read)
}
}
}

impl<T: Add<Output=T> + Copy> Atomic<T> {
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
let old = self.0.get();
self.0.set(old + val);
old
}
}

pub type AtomicUsize = Atomic<usize>;
pub type AtomicBool = Atomic<bool>;
pub type AtomicU64 = Atomic<u64>;

pub use self::serial_join as join;
pub use self::serial_scope as scope;

@@ -170,47 +222,6 @@ cfg_if! {
MTLock(self.0.clone())
}
}

pub struct LockCell<T>(Cell<T>);

impl<T> LockCell<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
LockCell(Cell::new(inner))
}

#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}

#[inline(always)]
pub fn set(&self, new_inner: T) {
self.0.set(new_inner);
}

#[inline(always)]
pub fn get(&self) -> T where T: Copy {
self.0.get()
}

#[inline(always)]
pub fn set_mut(&mut self, new_inner: T) {
self.0.set(new_inner);
}

#[inline(always)]
pub fn get_mut(&mut self) -> T where T: Copy {
self.0.get()
}
}

impl<T> LockCell<Option<T>> {
#[inline(always)]
pub fn take(&self) -> Option<T> {
unsafe { (*self.0.as_ptr()).take() }
}
}
} else {
pub use std::marker::Send as Send;
pub use std::marker::Sync as Sync;
@@ -223,6 +234,8 @@ cfg_if! {
pub use parking_lot::MutexGuard as LockGuard;
pub use parking_lot::MappedMutexGuard as MappedLockGuard;

pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU64};

pub use std::sync::Arc as Lrc;
pub use std::sync::Weak as Weak;

@@ -288,47 +301,6 @@ cfg_if! {
v.erase_send_sync_owner()
}}
}

pub struct LockCell<T>(Lock<T>);

impl<T> LockCell<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
LockCell(Lock::new(inner))
}

#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}

#[inline(always)]
pub fn set(&self, new_inner: T) {
*self.0.lock() = new_inner;
}

#[inline(always)]
pub fn get(&self) -> T where T: Copy {
*self.0.lock()
}

#[inline(always)]
pub fn set_mut(&mut self, new_inner: T) {
*self.0.get_mut() = new_inner;
}

#[inline(always)]
pub fn get_mut(&mut self) -> T where T: Copy {
*self.0.get_mut()
}
}

impl<T> LockCell<Option<T>> {
#[inline(always)]
pub fn take(&self) -> Option<T> {
self.0.lock().take()
}
}
}
}

@@ -476,65 +448,6 @@ impl<T> Once<T> {
}
}

impl<T: Copy + Debug> Debug for LockCell<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("LockCell")
.field("value", &self.get())
.finish()
}
}

impl<T:Default> Default for LockCell<T> {
/// Creates a `LockCell<T>`, with the `Default` value for T.
#[inline]
fn default() -> LockCell<T> {
LockCell::new(Default::default())
}
}

impl<T:PartialEq + Copy> PartialEq for LockCell<T> {
#[inline]
fn eq(&self, other: &LockCell<T>) -> bool {
self.get() == other.get()
}
}

impl<T:Eq + Copy> Eq for LockCell<T> {}

impl<T:PartialOrd + Copy> PartialOrd for LockCell<T> {
#[inline]
fn partial_cmp(&self, other: &LockCell<T>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}

#[inline]
fn lt(&self, other: &LockCell<T>) -> bool {
self.get() < other.get()
}

#[inline]
fn le(&self, other: &LockCell<T>) -> bool {
self.get() <= other.get()
}

#[inline]
fn gt(&self, other: &LockCell<T>) -> bool {
self.get() > other.get()
}

#[inline]
fn ge(&self, other: &LockCell<T>) -> bool {
self.get() >= other.get()
}
}

impl<T:Ord + Copy> Ord for LockCell<T> {
#[inline]
fn cmp(&self, other: &LockCell<T>) -> Ordering {
self.get().cmp(&other.get())
}
}

#[derive(Debug)]
pub struct Lock<T>(InnerLock<T>);

@@ -757,3 +670,40 @@ impl<T> DerefMut for OneThread<T> {
&mut self.inner
}
}

pub struct LrcRef<'a, T: ?Sized>(&'a T);

impl<'a, T: 'a + ?Sized> Clone for LrcRef<'a, T> {
fn clone(&self) -> Self {
LrcRef(self.0)
}
}
impl<'a, T: 'a + ?Sized> Copy for LrcRef<'a, T> {}

impl<'a, T: 'a + ?Sized> LrcRef<'a, T> {
#[inline]
pub fn new(lrc: &Lrc<T>) -> LrcRef<'_, T> {
LrcRef(&*lrc)
}

#[inline]
pub fn into(self) -> Lrc<T> {
unsafe {
// We know that we have a reference to an Lrc here.
// Pretend to take ownership of the Lrc with from_raw
// and the clone a new one.
// We use ManuallyDrop to ensure the reference count
// isn't decreased
let lrc = ManuallyDrop::new(Lrc::from_raw(self.0));
(*lrc).clone()
}
}
}

impl<'a, T: ?Sized> Deref for LrcRef<'a, T> {
type Target = T;

fn deref(&self) -> &T {
self.0
}
}
48 changes: 48 additions & 0 deletions src/librustc_data_structures/vec.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
use smallvec::{SmallVec, Array};
use std::ptr;

pub trait SmallVecExt<A: Array> {
fn push_light(&mut self, v: A::Item);
}

#[inline(never)]
#[cold]
fn push_light_cold<A: Array>(vec: &mut SmallVec<A>, v: A::Item) {
if likely!(vec.spilled()) {
let len = vec.len();
if likely!(len < vec.capacity()) {
unsafe {
ptr::write(vec.as_mut_ptr().offset(len as isize), v);
vec.set_len(len + 1);
}
} else {
vec.push(v)
}
} else {
unsafe {
std::intrinsics::assume(vec.capacity() == A::size());
}
vec.push(v)
}
}

impl<A: Array> SmallVecExt<A> for SmallVec<A> {
#[inline(always)]
fn push_light(&mut self, v: A::Item) {
let (free, len) = if !self.spilled() {
let len = self.len();
(len < A::size(), len)
} else {
(false, 0)
};
if likely!(free) {
unsafe {
ptr::write(self.as_mut_ptr().offset(len as isize), v);
std::intrinsics::assume(!self.spilled());
self.set_len(self.len() + 1);
}
} else {
push_light_cold(self, v);
}
}
}
5 changes: 3 additions & 2 deletions src/librustc_driver/lib.rs
Original file line number Diff line number Diff line change
@@ -27,6 +27,7 @@
#![feature(set_stdio)]
#![feature(rustc_stack_internals)]
#![feature(no_debug)]
#![feature(integer_atomics)]

#![recursion_limit="256"]

@@ -78,7 +79,7 @@ use pretty::{PpMode, UserIdentifiedItem};
use rustc_resolve as resolve;
use rustc_save_analysis as save;
use rustc_save_analysis::DumpHandler;
use rustc_data_structures::sync::{self, Lrc};
use rustc_data_structures::sync::{self, Lrc, Ordering::SeqCst};
use rustc_data_structures::OnDrop;
use rustc::session::{self, config, Session, build_session, CompileResult};
use rustc::session::CompileIncomplete;
@@ -954,7 +955,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
let sess = state.session;
eprintln!("Fuel used by {}: {}",
sess.print_fuel_crate.as_ref().unwrap(),
sess.print_fuel.get());
sess.print_fuel.load(SeqCst));
}
}
control
14 changes: 6 additions & 8 deletions src/librustc_errors/lib.rs
Original file line number Diff line number Diff line change
@@ -36,15 +36,13 @@ use self::Level::*;

use emitter::{Emitter, EmitterWriter};

use rustc_data_structures::sync::{self, Lrc, Lock, LockCell};
use rustc_data_structures::sync::{self, Lrc, Lock, AtomicUsize, AtomicBool, SeqCst};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::stable_hasher::StableHasher;

use std::borrow::Cow;
use std::cell::Cell;
use std::{error, fmt};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::panic;

use termcolor::{ColorSpec, Color};
@@ -281,7 +279,7 @@ pub struct Handler {

err_count: AtomicUsize,
emitter: Lock<Box<dyn Emitter + sync::Send>>,
continue_after_error: LockCell<bool>,
continue_after_error: AtomicBool,
delayed_span_bugs: Lock<Vec<Diagnostic>>,

// This set contains the `DiagnosticId` of all emitted diagnostics to avoid
@@ -380,7 +378,7 @@ impl Handler {
flags,
err_count: AtomicUsize::new(0),
emitter: Lock::new(e),
continue_after_error: LockCell::new(true),
continue_after_error: AtomicBool::new(true),
delayed_span_bugs: Lock::new(Vec::new()),
taught_diagnostics: Default::default(),
emitted_diagnostic_codes: Default::default(),
@@ -389,7 +387,7 @@ impl Handler {
}

pub fn set_continue_after_error(&self, continue_after_error: bool) {
self.continue_after_error.set(continue_after_error);
self.continue_after_error.store(continue_after_error, SeqCst);
}

/// Resets the diagnostic error count as well as the cached emitted diagnostics.
@@ -668,7 +666,7 @@ impl Handler {
let mut db = DiagnosticBuilder::new(self, lvl, msg);
db.set_span(msp.clone());
db.emit();
if !self.continue_after_error.get() {
if !self.continue_after_error.load(SeqCst) {
self.abort_if_errors();
}
}
@@ -679,7 +677,7 @@ impl Handler {
let mut db = DiagnosticBuilder::new_with_code(self, lvl, Some(code), msg);
db.set_span(msp.clone());
db.emit();
if !self.continue_after_error.get() {
if !self.continue_after_error.load(SeqCst) {
self.abort_if_errors();
}
}
2 changes: 1 addition & 1 deletion src/librustc_mir/monomorphize/partitioning.rs
Original file line number Diff line number Diff line change
@@ -212,7 +212,7 @@ pub trait CodegenUnitExt<'tcx> {
}

fn codegen_dep_node(&self, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> DepNode {
DepNode::new(tcx, DepConstructor::CompileCodegenUnit(self.name().clone()))
DepNode::new_inlined(tcx, DepConstructor::CompileCodegenUnit(self.name().clone()))
}
}

1 change: 1 addition & 0 deletions src/libserialize/opaque.rs
Original file line number Diff line number Diff line change
@@ -172,6 +172,7 @@ pub struct Decoder<'a> {
}

impl<'a> Decoder<'a> {
#[inline]
pub fn new(data: &'a [u8], position: usize) -> Decoder<'a> {
Decoder {
data,
3 changes: 3 additions & 0 deletions src/libstd/collections/hash/table.rs
Original file line number Diff line number Diff line change
@@ -740,6 +740,7 @@ impl<K, V> RawTable<K, V> {
}
}

#[inline]
fn new_internal(
capacity: usize,
fallibility: Fallibility,
@@ -755,12 +756,14 @@ impl<K, V> RawTable<K, V> {

/// Tries to create a new raw table from a given capacity. If it cannot allocate,
/// it returns with AllocErr.
#[inline]
pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
Self::new_internal(capacity, Fallible)
}

/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
#[inline]
pub fn new(capacity: usize) -> RawTable<K, V> {
match Self::new_internal(capacity, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
1 change: 1 addition & 0 deletions src/libsyntax/parse/mod.rs
Original file line number Diff line number Diff line change
@@ -81,6 +81,7 @@ impl ParseSess {
}
}

#[inline]
pub fn source_map(&self) -> &SourceMap {
&self.source_map
}