diff --git a/.travis.yml b/.travis.yml
index d315546930330..ca28aabc41fde 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,8 @@
 language: shell
 sudo: required
 dist: trusty
+# FIXME(#44398) shouldn't need to be here
+group: deprecated-2017Q3
 services:
   - docker
 
@@ -124,10 +126,11 @@ matrix:
 
 env:
   global:
-    - SCCACHE_BUCKET=rust-lang-ci-sccache
-    - AWS_ACCESS_KEY_ID=AKIAIMX7VLAS3PZAVLUQ
+    - SCCACHE_BUCKET=rust-lang-ci-sccache2
+    - SCCACHE_REGION=us-west-1
+    - AWS_ACCESS_KEY_ID=AKIAJAMV3QAMMA6AXHFQ
     # AWS_SECRET_ACCESS_KEY=...
-    - secure: "Pixhh0hXDqGCdOyLtGFjli3J2AtDWIpyb2btIrLe956nCBDRutRoMm6rv5DI9sFZN07Mms7VzNNvhc9wCW1y63JAm414d2Co7Ob8kWMZlz9l9t7ACHuktUiis8yr+S4Quq1Vqd6pqi7pf2J++UxC8R/uLeqVrubzr6+X7AbmEFE="
+    - secure: "j96XxTVOSUf4s4r4htIxn/fvIa5DWbMgLqWl7r8z2QfgUwscmkMXAwXuFNc7s7bGTpV/+CgDiMFFM6BAFLGKutytIF6oA02s9b+usQYnM0th7YQ2AIgm9GtMTJCJp4AoyfFmh8F2faUICBZlfVLUJ34udHEe35vOklix+0k4WDo="
 
 before_install:
   # If we are building a pull request, do the build if $ALLOW_PR == 1
@@ -159,7 +162,7 @@ install:
     else
       case "$TRAVIS_OS_NAME" in
         linux)
-          travis_retry curl -fo $HOME/stamp https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-17-stamp-x86_64-unknown-linux-musl &&
+          travis_retry curl -fo $HOME/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-unknown-linux-musl &&
             chmod +x $HOME/stamp &&
             export PATH=$PATH:$HOME
           ;;
@@ -168,9 +171,9 @@ install:
             travis_retry brew update &&
             travis_retry brew install xz;
           fi &&
-          travis_retry curl -fo /usr/local/bin/sccache https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-05-12-sccache-x86_64-apple-darwin &&
+          travis_retry curl -fo /usr/local/bin/sccache https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-apple-darwin &&
             chmod +x /usr/local/bin/sccache &&
-          travis_retry curl -fo /usr/local/bin/stamp https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-17-stamp-x86_64-apple-darwin &&
+          travis_retry curl -fo /usr/local/bin/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-apple-darwin &&
             chmod +x /usr/local/bin/stamp
           ;;
       esac
@@ -254,29 +257,29 @@ before_deploy:
 
 deploy:
   - provider: s3
-    bucket: rust-lang-ci
+    bucket: rust-lang-ci2
     skip_cleanup: true
     local_dir: deploy
     upload_dir: rustc-builds
     acl: public_read
-    region: us-east-1
-    access_key_id: AKIAIPQVNYF2T3DTYIWQ
+    region: us-west-1
+    access_key_id: AKIAJVBODR3IA4O72THQ
     secret_access_key:
-      secure: "FBqDqOTeIPMu6v/WYPf4CFSlh9rLRZGKVtpLa5KkyuOhXRTrnEzBduEtS8/FMIxdQImvurhSvxWvqRybMOi4qoVfjMqqpHAI7uBbidbrvAcJoHNsx6BgUNVCIoH6a0UsAjTUtm6/YPIpzbHoLZXPL0GrHPMk6Mu04qVSmcYNWn4="
+      secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
     on:
       branch: auto
       condition: $DEPLOY = 1
 
   - provider: s3
-    bucket: rust-lang-ci
+    bucket: rust-lang-ci2
     skip_cleanup: true
     local_dir: deploy
     upload_dir: rustc-builds-try
     acl: public_read
-    region: us-east-1
-    access_key_id: AKIAIPQVNYF2T3DTYIWQ
+    region: us-west-1
+    access_key_id: AKIAJVBODR3IA4O72THQ
     secret_access_key:
-      secure: "FBqDqOTeIPMu6v/WYPf4CFSlh9rLRZGKVtpLa5KkyuOhXRTrnEzBduEtS8/FMIxdQImvurhSvxWvqRybMOi4qoVfjMqqpHAI7uBbidbrvAcJoHNsx6BgUNVCIoH6a0UsAjTUtm6/YPIpzbHoLZXPL0GrHPMk6Mu04qVSmcYNWn4="
+      secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
     on:
       branch: try
       condition: $DEPLOY = 1 && $ALLOW_TRY = 1
@@ -284,15 +287,15 @@ deploy:
   # this is the same as the above deployment provider except that it uploads to
   # a slightly different directory and has a different trigger
   - provider: s3
-    bucket: rust-lang-ci
+    bucket: rust-lang-ci2
     skip_cleanup: true
     local_dir: deploy
     upload_dir: rustc-builds-alt
     acl: public_read
-    region: us-east-1
-    access_key_id: AKIAIPQVNYF2T3DTYIWQ
+    region: us-west-1
+    access_key_id: AKIAJVBODR3IA4O72THQ
     secret_access_key:
-      secure: "FBqDqOTeIPMu6v/WYPf4CFSlh9rLRZGKVtpLa5KkyuOhXRTrnEzBduEtS8/FMIxdQImvurhSvxWvqRybMOi4qoVfjMqqpHAI7uBbidbrvAcJoHNsx6BgUNVCIoH6a0UsAjTUtm6/YPIpzbHoLZXPL0GrHPMk6Mu04qVSmcYNWn4="
+      secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
     on:
       branch: auto
       condition: $DEPLOY_ALT = 1
diff --git a/appveyor.yml b/appveyor.yml
index f548d6694c80f..599d1b40ceb1e 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,8 +1,9 @@
 environment:
-  SCCACHE_BUCKET: rust-lang-ci-sccache
-  AWS_ACCESS_KEY_ID: AKIAIMX7VLAS3PZAVLUQ
+  SCCACHE_BUCKET: rust-lang-ci-sccache2
+  SCCACHE_REGION: us-west-1
+  AWS_ACCESS_KEY_ID: AKIAJAMV3QAMMA6AXHFQ
   AWS_SECRET_ACCESS_KEY:
-    secure: 1UkmbiDd15tWtYbMm5O2Uqm0b0Ur8v1MoSlydxl4ojcroPeerRMlUges0l57py8c
+    secure: 7Y+JiquYedOAgnUU26uL0DPzrxmTtR+qIwG6rNKSuWDffqU3vVZxbGXim9QpTO80
   SCCACHE_DIGEST: f808afabb4a4eb1d7112bcb3fa6be03b61e93412890c88e177c667eb37f46353d7ec294e559b16f9f4b5e894f2185fe7670a0df15fd064889ecbd80f0c34166c
 
   # By default schannel checks revocation of certificates unlike some other SSL
@@ -40,13 +41,13 @@ environment:
   - MSYS_BITS: 32
     RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu
     SCRIPT: python x.py test
-    MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
+    MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
     MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z
     MINGW_DIR: mingw32
   - MSYS_BITS: 64
     SCRIPT: python x.py test
     RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu
-    MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
+    MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
     MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z
     MINGW_DIR: mingw64
 
@@ -67,14 +68,14 @@ environment:
   - MSYS_BITS: 32
     RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-extended
     SCRIPT: python x.py dist
-    MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
+    MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
     MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z
     MINGW_DIR: mingw32
     DEPLOY: 1
   - MSYS_BITS: 64
     SCRIPT: python x.py dist
     RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-extended
-    MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
+    MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
     MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z
     MINGW_DIR: mingw64
     DEPLOY: 1
@@ -132,25 +133,25 @@ install:
   - set PATH=C:\Python27;%PATH%
 
   # Download and install sccache
-  - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-05-12-sccache-x86_64-pc-windows-msvc
+  - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-pc-windows-msvc
   - mv 2017-05-12-sccache-x86_64-pc-windows-msvc sccache.exe
   - set PATH=%PATH%;%CD%
 
   # Download and install ninja
   #
   # Note that this is originally from the github releases patch of Ninja
-  - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-15-ninja-win.zip
+  - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-15-ninja-win.zip
   - 7z x 2017-03-15-ninja-win.zip
   - set RUST_CONFIGURE_ARGS=%RUST_CONFIGURE_ARGS% --enable-ninja
   # - set PATH=%PATH%;%CD% -- this already happens above for sccache
 
   # Install InnoSetup to get `iscc` used to produce installers
-  - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-08-22-is.exe
+  - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-08-22-is.exe
   - 2017-08-22-is.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART /SP-
   - set PATH="C:\Program Files (x86)\Inno Setup 5";%PATH%
 
   # Help debug some handle issues on AppVeyor
-  - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-05-15-Handle.zip
+  - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-15-Handle.zip
   - mkdir handle
   - 7z x -ohandle 2017-05-15-Handle.zip
   - set PATH=%PATH%;%CD%\handle
@@ -185,12 +186,12 @@ before_deploy:
 deploy:
   - provider: S3
     skip_cleanup: true
-    access_key_id: AKIAIPQVNYF2T3DTYIWQ
+    access_key_id: AKIAJVBODR3IA4O72THQ
     secret_access_key:
-      secure: +11jsUNFTQ9dq5Ad1i2+PeUJaXluFJ0zIJAXESE1dFT3Kdjku4/eDdgyjgsB6GnV
-    bucket: rust-lang-ci
+      secure: tQWIE+DJHjXaV4np/3YeETkEmXngtIuIgAO/LYKQaUshGLgN8cBCFGG3cHx5lKLt
+    bucket: rust-lang-ci2
     set_public: true
-    region: us-east-1
+    region: us-west-1
     artifact: /.*/
     folder: rustc-builds
     on:
@@ -202,12 +203,12 @@ deploy:
   # different upload directory and a slightly different trigger
   - provider: S3
     skip_cleanup: true
-    access_key_id: AKIAIPQVNYF2T3DTYIWQ
+    access_key_id: AKIAJVBODR3IA4O72THQ
     secret_access_key:
-      secure: +11jsUNFTQ9dq5Ad1i2+PeUJaXluFJ0zIJAXESE1dFT3Kdjku4/eDdgyjgsB6GnV
-    bucket: rust-lang-ci
+      secure: tQWIE+DJHjXaV4np/3YeETkEmXngtIuIgAO/LYKQaUshGLgN8cBCFGG3cHx5lKLt
+    bucket: rust-lang-ci2
     set_public: true
-    region: us-east-1
+    region: us-west-1
     artifact: /.*/
     folder: rustc-builds-alt
     on:
diff --git a/src/Cargo.lock b/src/Cargo.lock
index 123c884585c19..19f3042c70a92 100644
--- a/src/Cargo.lock
+++ b/src/Cargo.lock
@@ -173,12 +173,6 @@ dependencies = [
  "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
-[[package]]
-name = "cargo"
-version = "0.22.0"
-source = "git+https://github.com/rust-lang/cargo#bcf3997b1fa177afc5b6c632a6fbbf6cc75df427"
-replace = "cargo 0.22.0"
-
 [[package]]
 name = "cargo"
 version = "0.22.0"
@@ -1168,9 +1162,9 @@ version = "0.1.0"
 
 [[package]]
 name = "rls"
-version = "0.1.0"
+version = "0.121.0"
 dependencies = [
- "cargo 0.22.0 (git+https://github.com/rust-lang/cargo)",
+ "cargo 0.22.0",
  "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "jsonrpc-core 7.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "languageserver-types 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1179,15 +1173,14 @@ dependencies = [
  "racer 2.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-rustc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "rustfmt-nightly 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -1213,6 +1206,11 @@ dependencies = [
  "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
+[[package]]
+name = "rls-rustc"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
 [[package]]
 name = "rls-span"
 version = "0.4.0"
@@ -2198,7 +2196,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4"
 "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
 "checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32"
-"checksum cargo 0.22.0 (git+https://github.com/rust-lang/cargo)" = "<none>"
 "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
 "checksum clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2267a8fdd4dce6956ba6649e130f62fb279026e5e84b92aa939ac8f85ce3f9f0"
 "checksum cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b8ebbb35d3dc9cd09497168f33de1acb79b265d350ab0ac34133b98f8509af1f"
@@ -2283,6 +2280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
 "checksum rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cb40c0371765897ae428b5706bb17135705ad4f6d1b8b6afbaabcf8c9b5cff"
 "checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534"
+"checksum rls-rustc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5fa757c9d547d460427ceff01875f9cac5f5acd8fc6543946e9b0335ba29d537"
 "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
 "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff"
 "checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e"
diff --git a/src/Cargo.toml b/src/Cargo.toml
index ffec3eb075500..8754d5b2b6d05 100644
--- a/src/Cargo.toml
+++ b/src/Cargo.toml
@@ -55,6 +55,3 @@ debug-assertions = false
 [profile.test]
 debug = false
 debug-assertions = false
-
-[replace]
-"https://github.com/rust-lang/cargo#0.22.0" = { path = "tools/cargo" }
diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs
index 9c1ae83d38281..0c22cc2f6c439 100644
--- a/src/bootstrap/channel.rs
+++ b/src/bootstrap/channel.rs
@@ -29,7 +29,7 @@ pub const CFG_RELEASE_NUM: &str = "1.21.0";
 // An optional number to put after the label, e.g. '.2' -> '-beta.2'
 // Be sure to make this starts with a dot to conform to semver pre-release
 // versions (section 9)
-pub const CFG_PRERELEASE_VERSION: &str = ".1";
+pub const CFG_PRERELEASE_VERSION: &str = ".3";
 
 pub struct GitInfo {
     inner: Option<Info>,
diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs
index bfcfb5f9a37f8..65a59d78d7c5f 100644
--- a/src/bootstrap/dist.rs
+++ b/src/bootstrap/dist.rs
@@ -1081,8 +1081,14 @@ impl Step for Rls {
            .arg("--output-dir").arg(&distdir(build))
            .arg("--non-installed-overlay").arg(&overlay)
            .arg(format!("--package-name={}-{}", name, target))
-           .arg("--component-name=rls")
            .arg("--legacy-manifest-dirs=rustlib,cargo");
+
+        if build.config.channel == "nightly" {
+            cmd.arg("--component-name=rls");
+        } else {
+            cmd.arg("--component-name=rls-preview");
+        }
+
         build.run(&mut cmd);
         distdir(build).join(format!("{}-{}.tar.gz", name, target))
     }
@@ -1279,9 +1285,12 @@ impl Step for Extended {
             cp_r(&work.join(&format!("{}-{}", pkgname(build, "rust-std"), target))
                         .join(format!("rust-std-{}", target)),
                     &exe.join("rust-std"));
-            cp_r(&work.join(&format!("{}-{}", pkgname(build, "rls"), target))
-                        .join("rls"),
-                    &exe.join("rls"));
+            let rls_path = if build.config.channel == "nightly" {
+                work.join(&format!("{}-{}", pkgname(build, "rls"), target)).join("rls")
+            } else {
+                work.join(&format!("{}-{}", pkgname(build, "rls"), target)).join("rls-preview")
+            };
+            cp_r(&rls_path, &exe.join("rls"));
             cp_r(&work.join(&format!("{}-{}", pkgname(build, "rust-analysis"), target))
                         .join(format!("rust-analysis-{}", target)),
                     &exe.join("rust-analysis"));
diff --git a/src/bootstrap/install.rs b/src/bootstrap/install.rs
index 89690e444d1f6..608924c9c28d1 100644
--- a/src/bootstrap/install.rs
+++ b/src/bootstrap/install.rs
@@ -200,7 +200,7 @@ install!((self, builder, _config),
         builder.ensure(dist::Src);
         install_src(builder, self.stage);
     }, ONLY_BUILD;
-    Rustc, "src/librustc", _config.extended, only_hosts: true, {
+    Rustc, "src/librustc", true, only_hosts: true, {
         builder.ensure(dist::Rustc {
             compiler: builder.compiler(self.stage, self.target),
         });
diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs
index 55358f2ffcb73..84a9e56b644c8 100644
--- a/src/bootstrap/lib.rs
+++ b/src/bootstrap/lib.rs
@@ -123,7 +123,6 @@ extern crate build_helper;
 extern crate serde_derive;
 #[macro_use]
 extern crate lazy_static;
-extern crate serde;
 extern crate serde_json;
 extern crate cmake;
 extern crate filetime;
diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs
index 0a307e72bf61d..8429b64513dcf 100644
--- a/src/bootstrap/native.rs
+++ b/src/bootstrap/native.rs
@@ -349,7 +349,7 @@ impl Step for Openssl {
         if !tarball.exists() {
             let tmp = tarball.with_extension("tmp");
             // originally from https://www.openssl.org/source/...
-            let url = format!("https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/{}",
+            let url = format!("https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/{}",
                               name);
             let mut ok = false;
             for _ in 0..3 {
diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs
index d798e8de3dffa..e759f1a3e6f85 100644
--- a/src/bootstrap/tool.rs
+++ b/src/bootstrap/tool.rs
@@ -198,7 +198,7 @@ tool!(
     Linkchecker, "src/tools/linkchecker", "linkchecker", Mode::Libstd;
     CargoTest, "src/tools/cargotest", "cargotest", Mode::Libstd;
     Compiletest, "src/tools/compiletest", "compiletest", Mode::Libtest;
-    BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Librustc;
+    BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Libstd;
     RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::Libstd;
     RustInstaller, "src/tools/rust-installer", "rust-installer", Mode::Libstd;
 );
diff --git a/src/ci/docker/cross/install-mips-musl.sh b/src/ci/docker/cross/install-mips-musl.sh
index 416bb75155e74..eeb4aacbbb74c 100755
--- a/src/ci/docker/cross/install-mips-musl.sh
+++ b/src/ci/docker/cross/install-mips-musl.sh
@@ -15,7 +15,7 @@ mkdir /usr/local/mips-linux-musl
 # originally from
 # https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/
 # OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2
-URL="https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror"
+URL="https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror"
 FILE="OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2"
 curl -L "$URL/$FILE" | tar xjf - -C /usr/local/mips-linux-musl --strip-components=2
 
diff --git a/src/ci/docker/cross/install-mipsel-musl.sh b/src/ci/docker/cross/install-mipsel-musl.sh
index 9744b242fb919..74b6a10e77a67 100755
--- a/src/ci/docker/cross/install-mipsel-musl.sh
+++ b/src/ci/docker/cross/install-mipsel-musl.sh
@@ -15,7 +15,7 @@ mkdir /usr/local/mipsel-linux-musl
 # Note that this originally came from:
 # https://downloads.openwrt.org/snapshots/trunk/malta/generic/
 # OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2
-URL="https://s3.amazonaws.com/rust-lang-ci/libc"
+URL="https://s3-us-west-1.amazonaws.com/rust-lang-ci2/libc"
 FILE="OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2"
 curl -L "$URL/$FILE" | tar xjf - -C /usr/local/mipsel-linux-musl --strip-components=2
 
diff --git a/src/ci/docker/dist-i686-linux/build-openssl.sh b/src/ci/docker/dist-i686-linux/build-openssl.sh
index 27cd064f901a0..34da0ed631093 100755
--- a/src/ci/docker/dist-i686-linux/build-openssl.sh
+++ b/src/ci/docker/dist-i686-linux/build-openssl.sh
@@ -13,7 +13,7 @@ set -ex
 source shared.sh
 
 VERSION=1.0.2k
-URL=https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/openssl-$VERSION.tar.gz
+URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/openssl-$VERSION.tar.gz
 
 curl $URL | tar xzf -
 
diff --git a/src/ci/docker/dist-x86_64-linux/build-openssl.sh b/src/ci/docker/dist-x86_64-linux/build-openssl.sh
index 27cd064f901a0..34da0ed631093 100755
--- a/src/ci/docker/dist-x86_64-linux/build-openssl.sh
+++ b/src/ci/docker/dist-x86_64-linux/build-openssl.sh
@@ -13,7 +13,7 @@ set -ex
 source shared.sh
 
 VERSION=1.0.2k
-URL=https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/openssl-$VERSION.tar.gz
+URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/openssl-$VERSION.tar.gz
 
 curl $URL | tar xzf -
 
diff --git a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh
index ea335a249736c..54100b49cb9f5 100755
--- a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh
+++ b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh
@@ -35,7 +35,7 @@ cd netbsd
 
 mkdir -p /x-tools/x86_64-unknown-netbsd/sysroot
 
-URL=https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
+URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
 
 # Originally from ftp://ftp.netbsd.org/pub/NetBSD/NetBSD-$BSD/source/sets/*.tgz
 curl $URL/2017-03-17-netbsd-src.tgz | tar xzf -
diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh
index 5eba81ff60a22..7087033e117a2 100755
--- a/src/ci/docker/run.sh
+++ b/src/ci/docker/run.sh
@@ -57,9 +57,10 @@ mkdir -p $objdir/tmp
 
 args=
 if [ "$SCCACHE_BUCKET" != "" ]; then
-    args="$args --env SCCACHE_BUCKET=$SCCACHE_BUCKET"
-    args="$args --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
-    args="$args --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
+    args="$args --env SCCACHE_BUCKET"
+    args="$args --env SCCACHE_REGION"
+    args="$args --env AWS_ACCESS_KEY_ID"
+    args="$args --env AWS_SECRET_ACCESS_KEY"
     args="$args --env SCCACHE_ERROR_LOG=/tmp/sccache/sccache.log"
     args="$args --volume $objdir/tmp:/tmp/sccache"
 else
@@ -82,10 +83,10 @@ exec docker \
   --env SRC=/checkout \
   $args \
   --env CARGO_HOME=/cargo \
-  --env DEPLOY=$DEPLOY \
-  --env DEPLOY_ALT=$DEPLOY_ALT \
+  --env DEPLOY \
+  --env DEPLOY_ALT \
   --env LOCAL_USER_ID=`id -u` \
-  --env TRAVIS=${TRAVIS-false} \
+  --env TRAVIS \
   --env TRAVIS_BRANCH \
   --volume "$HOME/.cargo:/cargo" \
   --volume "$HOME/rustsrc:$HOME/rustsrc" \
diff --git a/src/ci/docker/scripts/sccache.sh b/src/ci/docker/scripts/sccache.sh
index 98b0ed712c02a..ce2d45563f7b5 100644
--- a/src/ci/docker/scripts/sccache.sh
+++ b/src/ci/docker/scripts/sccache.sh
@@ -8,9 +8,11 @@
 # option. This file may not be copied, modified, or distributed
 # except according to those terms.
 
+# ignore-tidy-linelength
+
 set -ex
 
 curl -fo /usr/local/bin/sccache \
-  https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-05-12-sccache-x86_64-unknown-linux-musl
+  https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-unknown-linux-musl
 
 chmod +x /usr/local/bin/sccache
diff --git a/src/ci/run.sh b/src/ci/run.sh
index 39fb4e4407812..2a44011cecaf2 100755
--- a/src/ci/run.sh
+++ b/src/ci/run.sh
@@ -43,7 +43,7 @@ fi
 # FIXME: need a scheme for changing this `nightly` value to `beta` and `stable`
 #        either automatically or manually.
 if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then
-  RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=nightly"
+  RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=beta"
   RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-static-stdcpp"
 
   if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then
diff --git a/src/doc/unstable-book/src/library-features/compiler-fences.md b/src/doc/unstable-book/src/library-features/compiler-fences.md
deleted file mode 100644
index b1e36ab13d5ae..0000000000000
--- a/src/doc/unstable-book/src/library-features/compiler-fences.md
+++ /dev/null
@@ -1,106 +0,0 @@
-# `compiler_fences`
-
-The tracking issue for this feature is: [#41091]
-
-[#41091]: https://github.com/rust-lang/rust/issues/41091
-
-------------------------
-
-The `compiler_fences` feature exposes the `compiler_fence` function
-in `std::sync::atomic`. This function is conceptually similar to C++'s
-`atomic_signal_fence`, which can currently only be accessed in nightly
-Rust using the `atomic_singlethreadfence_*` instrinsic functions in
-`core`, or through the mostly equivalent literal assembly:
-
-```rust
-#![feature(asm)]
-unsafe { asm!("" ::: "memory" : "volatile") };
-```
-
-A `compiler_fence` restricts the kinds of memory re-ordering the
-compiler is allowed to do. Specifically, depending on the given ordering
-semantics, the compiler may be disallowed from moving reads or writes
-from before or after the call to the other side of the call to
-`compiler_fence`. Note that it does **not** prevent the *hardware*
-from doing such re-ordering. This is not a problem in a single-threaded,
-execution context, but when other threads may modify memory at the same
-time, stronger synchronization primitives are required.
-
-## Examples
-
-`compiler_fence` is generally only useful for preventing a thread from
-racing *with itself*. That is, if a given thread is executing one piece
-of code, and is then interrupted, and starts executing code elsewhere
-(while still in the same thread, and conceptually still on the same
-core). In traditional programs, this can only occur when a signal
-handler is registered. In more low-level code, such situations can also
-arise when handling interrupts, when implementing green threads with
-pre-emption, etc.
-
-To give a straightforward example of when a `compiler_fence` is
-necessary, consider the following example:
-
-```rust
-# use std::sync::atomic::{AtomicBool, AtomicUsize};
-# use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
-# use std::sync::atomic::Ordering;
-static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
-static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
-
-fn main() {
-    IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
-    IS_READY.store(true, Ordering::Relaxed);
-}
-
-fn signal_handler() {
-    if IS_READY.load(Ordering::Relaxed) {
-        assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
-    }
-}
-```
-
-The way it is currently written, the `assert_eq!` is *not* guaranteed to
-succeed, despite everything happening in a single thread. To see why,
-remember that the compiler is free to swap the stores to
-`IMPORTANT_VARIABLE` and `IS_READ` since they are both
-`Ordering::Relaxed`. If it does, and the signal handler is invoked right
-after `IS_READY` is updated, then the signal handler will see
-`IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
-
-Using a `compiler_fence`, we can remedy this situation:
-
-```rust
-#![feature(compiler_fences)]
-# use std::sync::atomic::{AtomicBool, AtomicUsize};
-# use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
-# use std::sync::atomic::Ordering;
-use std::sync::atomic::compiler_fence;
-
-static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
-static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
-
-fn main() {
-    IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
-    // prevent earlier writes from being moved beyond this point
-    compiler_fence(Ordering::Release);
-    IS_READY.store(true, Ordering::Relaxed);
-}
-
-fn signal_handler() {
-    if IS_READY.load(Ordering::Relaxed) {
-        assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
-    }
-}
-```
-
-A deeper discussion of compiler barriers with various re-ordering
-semantics (such as `Ordering::SeqCst`) is beyond the scope of this text.
-Curious readers are encouraged to read the Linux kernel's discussion of
-[memory barriers][1], the C++ references on [`std::memory_order`][2] and
-[`atomic_signal_fence`][3], and [this StackOverflow answer][4] for
-further details.
-
-[1]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
-[2]: http://en.cppreference.com/w/cpp/atomic/memory_order
-[3]: http://www.cplusplus.com/reference/atomic/atomic_signal_fence/
-[4]: http://stackoverflow.com/a/18454971/472927
diff --git a/src/doc/unstable-book/src/library-features/iterator-for-each.md b/src/doc/unstable-book/src/library-features/iterator-for-each.md
deleted file mode 100644
index ebeb5f6a1de51..0000000000000
--- a/src/doc/unstable-book/src/library-features/iterator-for-each.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# `iterator_for_each`
-
-The tracking issue for this feature is: [#42986]
-
-[#42986]: https://github.com/rust-lang/rust/issues/42986
-
-------------------------
-
-To call a closure on each element of an iterator, you can use `for_each`:
-
-```rust
-#![feature(iterator_for_each)]
-
-fn main() {
-    (0..10).for_each(|i| println!("{}", i));
-}
-```
diff --git a/src/doc/unstable-book/src/library-features/splice.md b/src/doc/unstable-book/src/library-features/splice.md
index ca7f78a8f79e5..3d33f02076802 100644
--- a/src/doc/unstable-book/src/library-features/splice.md
+++ b/src/doc/unstable-book/src/library-features/splice.md
@@ -1,14 +1,13 @@
 # `splice`
 
-The tracking issue for this feature is: [#32310]
+The tracking issue for this feature is: [#44643]
 
-[#32310]: https://github.com/rust-lang/rust/issues/32310
+[#44643]: https://github.com/rust-lang/rust/issues/44643
 
 ------------------------
 
-The `splice()` method on `Vec` and `String` allows you to replace a range
-of values in a vector or string with another range of values, and returns
-the replaced values.
+The `splice()` method on `String` allows you to replace a range
+of values in a string with another range of values.
 
 A simple example:
 
@@ -21,4 +20,4 @@ let beta_offset = s.find('β').unwrap_or(s.len());
 let t: String = s.splice(..beta_offset, "Α is capital alpha; ").collect();
 assert_eq!(t, "α is alpha, ");
 assert_eq!(s, "Α is capital alpha; β is beta");
-```
\ No newline at end of file
+```
diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs
index f14f27023249e..5a9cd82b9d119 100644
--- a/src/liballoc/allocator.rs
+++ b/src/liballoc/allocator.rs
@@ -464,6 +464,29 @@ impl fmt::Display for CannotReallocInPlace {
 ///  * if a layout `k` fits a memory block (denoted by `ptr`)
 ///    currently allocated via an allocator `a`, then it is legal to
 ///    use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`.
+///
+/// # Unsafety
+///
+/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and
+/// implementors must ensure that they adhere to these contracts:
+///
+/// * Pointers returned from allocation functions must point to valid memory and
+///   retain their validity until at least the instance of `Alloc` is dropped
+///   itself.
+///
+/// * It's undefined behavior if global allocators unwind.  This restriction may
+///   be lifted in the future, but currently a panic from any of these
+///   functions may lead to memory unsafety. Note that as of the time of this
+///   writing allocators *not* intending to be global allocators can still panic
+///   in their implementation without violating memory safety.
+///
+/// * `Layout` queries and calculations in general must be correct. Callers of
+///   this trait are allowed to rely on the contracts defined on each method,
+///   and implementors must ensure such contracts remain true.
+///
+/// Note that this list may get tweaked over time as clarifications are made in
+/// the future. Additionally global allocators may gain unique requirements for
+/// how to safely implement one in the future as well.
 pub unsafe trait Alloc {
 
     // (Note: existing allocators have unspecified but well-defined
diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs
index 820f2d958d9a8..b2bd9d7d8fafa 100644
--- a/src/liballoc/heap.rs
+++ b/src/liballoc/heap.rs
@@ -27,24 +27,32 @@ pub mod __core {
 
 extern "Rust" {
     #[allocator]
+    #[rustc_allocator_nounwind]
     fn __rust_alloc(size: usize, align: usize, err: *mut u8) -> *mut u8;
     #[cold]
+    #[rustc_allocator_nounwind]
     fn __rust_oom(err: *const u8) -> !;
+    #[rustc_allocator_nounwind]
     fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
+    #[rustc_allocator_nounwind]
     fn __rust_usable_size(layout: *const u8,
                           min: *mut usize,
                           max: *mut usize);
+    #[rustc_allocator_nounwind]
     fn __rust_realloc(ptr: *mut u8,
                       old_size: usize,
                       old_align: usize,
                       new_size: usize,
                       new_align: usize,
                       err: *mut u8) -> *mut u8;
+    #[rustc_allocator_nounwind]
     fn __rust_alloc_zeroed(size: usize, align: usize, err: *mut u8) -> *mut u8;
+    #[rustc_allocator_nounwind]
     fn __rust_alloc_excess(size: usize,
                            align: usize,
                            excess: *mut usize,
                            err: *mut u8) -> *mut u8;
+    #[rustc_allocator_nounwind]
     fn __rust_realloc_excess(ptr: *mut u8,
                              old_size: usize,
                              old_align: usize,
@@ -52,11 +60,13 @@ extern "Rust" {
                              new_align: usize,
                              excess: *mut usize,
                              err: *mut u8) -> *mut u8;
+    #[rustc_allocator_nounwind]
     fn __rust_grow_in_place(ptr: *mut u8,
                             old_size: usize,
                             old_align: usize,
                             new_size: usize,
                             new_align: usize) -> u8;
+    #[rustc_allocator_nounwind]
     fn __rust_shrink_in_place(ptr: *mut u8,
                               old_size: usize,
                               old_align: usize,
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 2d41ed648102b..8b0dd4e56c664 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -105,6 +105,7 @@
 #![feature(pattern)]
 #![feature(placement_in_syntax)]
 #![feature(placement_new_protocol)]
+#![feature(rustc_attrs)]
 #![feature(shared)]
 #![feature(slice_get_slice)]
 #![feature(slice_patterns)]
diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs
index b1919c7c968c9..db8b4f1f23178 100644
--- a/src/liballoc/string.rs
+++ b/src/liballoc/string.rs
@@ -1420,7 +1420,7 @@ impl String {
     /// assert_eq!(t, "α is alpha, ");
     /// assert_eq!(s, "Α is capital alpha; β is beta");
     /// ```
-    #[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+    #[unstable(feature = "splice", reason = "recently added", issue = "44643")]
     pub fn splice<'a, 'b, R>(&'a mut self, range: R, replace_with: &'b str) -> Splice<'a, 'b>
         where R: RangeArgument<usize>
     {
@@ -2250,7 +2250,7 @@ impl<'a> FusedIterator for Drain<'a> {}
 /// [`splice()`]: struct.String.html#method.splice
 /// [`String`]: struct.String.html
 #[derive(Debug)]
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 pub struct Splice<'a, 'b> {
     /// Will be used as &'a mut String in the destructor
     string: *mut String,
@@ -2263,12 +2263,12 @@ pub struct Splice<'a, 'b> {
     replace_with: &'b str,
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 unsafe impl<'a, 'b> Sync for Splice<'a, 'b> {}
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 unsafe impl<'a, 'b> Send for Splice<'a, 'b> {}
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 impl<'a, 'b> Drop for Splice<'a, 'b> {
     fn drop(&mut self) {
         unsafe {
@@ -2278,7 +2278,7 @@ impl<'a, 'b> Drop for Splice<'a, 'b> {
     }
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 impl<'a, 'b> Iterator for Splice<'a, 'b> {
     type Item = char;
 
@@ -2292,7 +2292,7 @@ impl<'a, 'b> Iterator for Splice<'a, 'b> {
     }
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[unstable(feature = "splice", reason = "recently added", issue = "44643")]
 impl<'a, 'b> DoubleEndedIterator for Splice<'a, 'b> {
     #[inline]
     fn next_back(&mut self) -> Option<char> {
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index 8141851b8c9af..3bfd78e6c06d0 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -1942,7 +1942,6 @@ impl<T> Vec<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(splice)]
     /// let mut v = vec![1, 2, 3];
     /// let new = [7, 8];
     /// let u: Vec<_> = v.splice(..2, new.iter().cloned()).collect();
@@ -1950,7 +1949,7 @@ impl<T> Vec<T> {
     /// assert_eq!(u, &[1, 2]);
     /// ```
     #[inline]
-    #[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+    #[stable(feature = "vec_splice", since = "1.21.0")]
     pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<I::IntoIter>
         where R: RangeArgument<usize>, I: IntoIterator<Item=T>
     {
@@ -2550,13 +2549,13 @@ impl<'a, T> InPlace<T> for PlaceBack<'a, T> {
 /// [`splice()`]: struct.Vec.html#method.splice
 /// [`Vec`]: struct.Vec.html
 #[derive(Debug)]
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[stable(feature = "vec_splice", since = "1.21.0")]
 pub struct Splice<'a, I: Iterator + 'a> {
     drain: Drain<'a, I::Item>,
     replace_with: I,
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[stable(feature = "vec_splice", since = "1.21.0")]
 impl<'a, I: Iterator> Iterator for Splice<'a, I> {
     type Item = I::Item;
 
@@ -2569,18 +2568,18 @@ impl<'a, I: Iterator> Iterator for Splice<'a, I> {
     }
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[stable(feature = "vec_splice", since = "1.21.0")]
 impl<'a, I: Iterator> DoubleEndedIterator for Splice<'a, I> {
     fn next_back(&mut self) -> Option<Self::Item> {
         self.drain.next_back()
     }
 }
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[stable(feature = "vec_splice", since = "1.21.0")]
 impl<'a, I: Iterator> ExactSizeIterator for Splice<'a, I> {}
 
 
-#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+#[stable(feature = "vec_splice", since = "1.21.0")]
 impl<'a, I: Iterator> Drop for Splice<'a, I> {
     fn drop(&mut self) {
         // exhaust drain first
diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs
index 96fcc81e8e6ed..2be7b1bc2e17c 100644
--- a/src/libarena/lib.rs
+++ b/src/libarena/lib.rs
@@ -28,7 +28,6 @@
 #![feature(core_intrinsics)]
 #![feature(dropck_eyepatch)]
 #![feature(generic_param_attrs)]
-#![feature(needs_drop)]
 #![cfg_attr(test, feature(test))]
 
 #![allow(deprecated)]
diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs
index ec6525485f7a1..e012cbd76ff91 100644
--- a/src/libcore/cmp.rs
+++ b/src/libcore/cmp.rs
@@ -453,12 +453,10 @@ pub trait Ord: Eq + PartialOrd<Self> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(ord_max_min)]
-    ///
     /// assert_eq!(2, 1.max(2));
     /// assert_eq!(2, 2.max(2));
     /// ```
-    #[unstable(feature = "ord_max_min", issue = "25663")]
+    #[stable(feature = "ord_max_min", since = "1.21.0")]
     fn max(self, other: Self) -> Self
     where Self: Sized {
         if other >= self { other } else { self }
@@ -471,12 +469,10 @@ pub trait Ord: Eq + PartialOrd<Self> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(ord_max_min)]
-    ///
     /// assert_eq!(1, 1.min(2));
     /// assert_eq!(2, 2.min(2));
     /// ```
-    #[unstable(feature = "ord_max_min", issue = "25663")]
+    #[stable(feature = "ord_max_min", since = "1.21.0")]
     fn min(self, other: Self) -> Self
     where Self: Sized {
         if self <= other { self } else { other }
diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs
index 7c009114afefb..ceb2a3f1d56b5 100644
--- a/src/libcore/iter/iterator.rs
+++ b/src/libcore/iter/iterator.rs
@@ -498,8 +498,6 @@ pub trait Iterator {
     /// Basic usage:
     ///
     /// ```
-    /// #![feature(iterator_for_each)]
-    ///
     /// use std::sync::mpsc::channel;
     ///
     /// let (tx, rx) = channel();
@@ -514,15 +512,13 @@ pub trait Iterator {
     /// might be preferable to keep a functional style with longer iterators:
     ///
     /// ```
-    /// #![feature(iterator_for_each)]
-    ///
     /// (0..5).flat_map(|x| x * 100 .. x * 110)
     ///       .enumerate()
     ///       .filter(|&(i, x)| (i + x) % 3 == 0)
     ///       .for_each(|(i, x)| println!("{}:{}", i, x));
     /// ```
     #[inline]
-    #[unstable(feature = "iterator_for_each", issue = "42986")]
+    #[stable(feature = "iterator_for_each", since = "1.21.0")]
     fn for_each<F>(self, mut f: F) where
         Self: Sized, F: FnMut(Self::Item),
     {
diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs
index 4b866cab1eae2..bd08bd1a8fc63 100644
--- a/src/libcore/mem.rs
+++ b/src/libcore/mem.rs
@@ -372,7 +372,6 @@ pub fn align_of_val<T: ?Sized>(val: &T) -> usize {
 /// Here's an example of how a collection might make use of needs_drop:
 ///
 /// ```
-/// #![feature(needs_drop)]
 /// use std::{mem, ptr};
 ///
 /// pub struct MyCollection<T> {
@@ -399,7 +398,7 @@ pub fn align_of_val<T: ?Sized>(val: &T) -> usize {
 /// }
 /// ```
 #[inline]
-#[unstable(feature = "needs_drop", issue = "41890")]
+#[stable(feature = "needs_drop", since = "1.21.0")]
 pub fn needs_drop<T>() -> bool {
     unsafe { intrinsics::needs_drop::<T>() }
 }
@@ -754,39 +753,39 @@ pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
 /// Opaque type representing the discriminant of an enum.
 ///
 /// See the `discriminant` function in this module for more information.
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 pub struct Discriminant<T>(u64, PhantomData<*const T>);
 
 // N.B. These trait implementations cannot be derived because we don't want any bounds on T.
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> Copy for Discriminant<T> {}
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> clone::Clone for Discriminant<T> {
     fn clone(&self) -> Self {
         *self
     }
 }
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> cmp::PartialEq for Discriminant<T> {
     fn eq(&self, rhs: &Self) -> bool {
         self.0 == rhs.0
     }
 }
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> cmp::Eq for Discriminant<T> {}
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> hash::Hash for Discriminant<T> {
     fn hash<H: hash::Hasher>(&self, state: &mut H) {
         self.0.hash(state);
     }
 }
 
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 impl<T> fmt::Debug for Discriminant<T> {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         fmt.debug_tuple("Discriminant")
@@ -811,7 +810,6 @@ impl<T> fmt::Debug for Discriminant<T> {
 /// the actual data:
 ///
 /// ```
-/// #![feature(discriminant_value)]
 /// use std::mem;
 ///
 /// enum Foo { A(&'static str), B(i32), C(i32) }
@@ -820,7 +818,7 @@ impl<T> fmt::Debug for Discriminant<T> {
 /// assert!(mem::discriminant(&Foo::B(1))     == mem::discriminant(&Foo::B(2)));
 /// assert!(mem::discriminant(&Foo::B(3))     != mem::discriminant(&Foo::C(3)));
 /// ```
-#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+#[stable(feature = "discriminant_value", since = "1.21.0")]
 pub fn discriminant<T>(v: &T) -> Discriminant<T> {
     unsafe {
         Discriminant(intrinsics::discriminant_value(v), PhantomData)
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs
index 510e01db0e965..450afbe3fbe8d 100644
--- a/src/libcore/sync/atomic.rs
+++ b/src/libcore/sync/atomic.rs
@@ -1666,10 +1666,14 @@ pub fn fence(order: Ordering) {
 
 /// A compiler memory fence.
 ///
-/// `compiler_fence` does not emit any machine code, but prevents the compiler from re-ordering
-/// memory operations across this point. Which reorderings are disallowed is dictated by the given
-/// [`Ordering`]. Note that `compiler_fence` does *not* introduce inter-thread memory
-/// synchronization; for that, a [`fence`] is needed.
+/// `compiler_fence` does not emit any machine code, but restricts the kinds
+/// of memory re-ordering the compiler is allowed to do. Specifically, depending on
+/// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
+/// or writes from before or after the call to the other side of the call to
+/// `compiler_fence`. Note that it does **not** prevent the *hardware*
+/// from doing such re-ordering. This is not a problem in a single-threaded,
+/// execution context, but when other threads may modify memory at the same
+/// time, stronger synchronization primitives such as [`fence`] are required.
 ///
 /// The re-ordering prevented by the different ordering semantics are:
 ///
@@ -1678,10 +1682,54 @@ pub fn fence(order: Ordering) {
 ///  - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
 ///  - with [`AcqRel`], both of the above rules are enforced.
 ///
+/// `compiler_fence` is generally only useful for preventing a thread from
+/// racing *with itself*. That is, if a given thread is executing one piece
+/// of code, and is then interrupted, and starts executing code elsewhere
+/// (while still in the same thread, and conceptually still on the same
+/// core). In traditional programs, this can only occur when a signal
+/// handler is registered. In more low-level code, such situations can also
+/// arise when handling interrupts, when implementing green threads with
+/// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
+/// discussion of [memory barriers].
+///
 /// # Panics
 ///
 /// Panics if `order` is [`Relaxed`].
 ///
+/// # Examples
+///
+/// Without `compiler_fence`, the `assert_eq!` in following code
+/// is *not* guaranteed to succeed, despite everything happening in a single thread.
+/// To see why, remember that the compiler is free to swap the stores to
+/// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
+/// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
+/// after `IS_READY` is updated, then the signal handler will see
+/// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
+/// Using a `compiler_fence` remedies this situation.
+///
+/// ```
+/// use std::sync::atomic::{AtomicBool, AtomicUsize};
+/// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
+/// use std::sync::atomic::Ordering;
+/// use std::sync::atomic::compiler_fence;
+///
+/// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
+/// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
+///
+/// fn main() {
+///     IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
+///     // prevent earlier writes from being moved beyond this point
+///     compiler_fence(Ordering::Release);
+///     IS_READY.store(true, Ordering::Relaxed);
+/// }
+///
+/// fn signal_handler() {
+///     if IS_READY.load(Ordering::Relaxed) {
+///         assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
+///     }
+/// }
+/// ```
+///
 /// [`fence`]: fn.fence.html
 /// [`Ordering`]: enum.Ordering.html
 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
@@ -1689,8 +1737,9 @@ pub fn fence(order: Ordering) {
 /// [`Release`]: enum.Ordering.html#variant.Release
 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
+/// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
 #[inline]
-#[unstable(feature = "compiler_fences", issue = "41091")]
+#[stable(feature = "compiler_fences", since = "1.21.0")]
 pub fn compiler_fence(order: Ordering) {
     unsafe {
         match order {
diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs
index ab2022b1824ca..1ba9d78f9de89 100644
--- a/src/libcore/tests/lib.rs
+++ b/src/libcore/tests/lib.rs
@@ -27,7 +27,6 @@
 #![feature(inclusive_range_syntax)]
 #![feature(iter_rfind)]
 #![feature(nonzero)]
-#![feature(ord_max_min)]
 #![feature(rand)]
 #![feature(raw)]
 #![feature(refcell_replace_swap)]
diff --git a/src/liblibc b/src/liblibc
index 2a5b50b7f7f53..d64716407e3ee 160000
--- a/src/liblibc
+++ b/src/liblibc
@@ -1 +1 @@
-Subproject commit 2a5b50b7f7f539a0fd201331d6c1e0534aa332f5
+Subproject commit d64716407e3ee430fce7a008cc7d19a3072dca6c
diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs
index 41858088f7e70..0fd4327cc6036 100644
--- a/src/librustc/infer/freshen.rs
+++ b/src/librustc/infer/freshen.rs
@@ -19,10 +19,21 @@
 //! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
 //! on what type that type variable is ultimately assigned, the match may or may not succeed.
 //!
+//! To handle closures, freshened types also have to contain the signature and kind of any
+//! closure in the local inference context, as otherwise the cache key might be invalidated.
+//! The way this is done is somewhat hacky - the closure signature is appended to the substs,
+//! as well as the closure kind "encoded" as a type. Also, special handling is needed when
+//! the closure signature contains a reference to the original closure.
+//!
 //! Note that you should be careful not to allow the output of freshening to leak to the user in
 //! error messages or in any other form. Freshening is only really useful as an internal detail.
 //!
-//! __An important detail concerning regions.__ The freshener also replaces *all* regions with
+//! Because of the manipulation required to handle closures, doing arbitrary operations on
+//! freshened types is not recommended. However, in addition to doing equality/hash
+//! comparisons (for caching), it is possible to do a `ty::_match` operation between
+//! 2 freshened types - this works even with the closure encoding.
+//!
+//! __An important detail concerning regions.__ The freshener also replaces *all* free regions with
 //! 'erased. The reason behind this is that, in general, we do not take region relationships into
 //! account when making type-overloaded decisions. This is important because of the design of the
 //! region inferencer, which is not based on unification but rather on accumulating and then
@@ -33,6 +44,8 @@
 use ty::{self, Ty, TyCtxt, TypeFoldable};
 use ty::fold::TypeFolder;
 use util::nodemap::FxHashMap;
+use hir::def_id::DefId;
+
 use std::collections::hash_map::Entry;
 
 use super::InferCtxt;
@@ -42,6 +55,7 @@ pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
     infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
     freshen_count: u32,
     freshen_map: FxHashMap<ty::InferTy, Ty<'tcx>>,
+    closure_set: Vec<DefId>,
 }
 
 impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
@@ -51,6 +65,7 @@ impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
             infcx,
             freshen_count: 0,
             freshen_map: FxHashMap(),
+            closure_set: vec![],
         }
     }
 
@@ -76,6 +91,16 @@ impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
             }
         }
     }
+
+    fn next_fresh<F>(&mut self,
+                     freshener: F)
+                     -> Ty<'tcx>
+        where F: FnOnce(u32) -> ty::InferTy,
+    {
+        let index = self.freshen_count;
+        self.freshen_count += 1;
+        self.infcx.tcx.mk_infer(freshener(index))
+    }
 }
 
 impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
@@ -105,7 +130,8 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
     }
 
     fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.needs_infer() && !t.has_erasable_regions() {
+        if !t.needs_infer() && !t.has_erasable_regions() &&
+            !(t.has_closure_types() && self.infcx.in_progress_tables.is_some()) {
             return t;
         }
 
@@ -150,6 +176,82 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
                 t
             }
 
+            ty::TyClosure(def_id, substs) => {
+                let closure_in_progress = self.infcx.in_progress_tables.map_or(false, |tables| {
+                    tcx.hir.as_local_node_id(def_id).map_or(false, |closure_id| {
+                        tables.borrow().local_id_root ==
+                            Some(DefId::local(tcx.hir.node_to_hir_id(closure_id).owner))
+                    })
+                });
+
+                if !closure_in_progress {
+                    // If this closure belongs to another infcx, its kind etc. were
+                    // fully inferred and its signature/kind are exactly what's listed
+                    // in its infcx. So we don't need to add the markers for them.
+                    return t.super_fold_with(self);
+                }
+
+                // We are encoding a closure in progress. Because we want our freshening
+                // key to contain all inference information needed to make sense of our
+                // value, we need to encode the closure signature and kind. The way
+                // we do that is to add them as 2 variables to the closure substs,
+                // basically because it's there (and nobody cares about adding extra stuff
+                // to substs).
+                //
+                // This means the "freshened" closure substs ends up looking like
+                //     fresh_substs = [PARENT_SUBSTS* ; UPVARS* ; SIG_MARKER ; KIND_MARKER]
+
+                let closure_sig_marker = if self.closure_set.contains(&def_id) {
+                    // We found the closure def-id within its own signature. Just
+                    // leave a new freshened type - any matching operations would
+                    // have found and compared the exterior closure already to
+                    // get here.
+                    //
+                    // In that case, we already know what the signature would
+                    // be - the parent closure on the stack already contains a
+                    // "copy" of the signature, so there is no reason to encode
+                    // it again for injectivity. Just use a fresh type variable
+                    // to make everything comparable.
+                    //
+                    // For example (closure kinds omitted for clarity)
+                    //     t=[closure FOO sig=[closure BAR sig=[closure FOO ..]]]
+                    // Would get encoded to
+                    //     t=[closure FOO sig=[closure BAR sig=[closure FOO sig=$0]]]
+                    //
+                    // and we can decode by having
+                    //     $0=[closure BAR {sig doesn't exist in decode}]
+                    // and get
+                    //     t=[closure FOO]
+                    //     sig[FOO] = [closure BAR]
+                    //     sig[BAR] = [closure FOO]
+                    self.next_fresh(ty::FreshTy)
+                } else {
+                    self.closure_set.push(def_id);
+                    let closure_sig = self.infcx.fn_sig(def_id);
+                    let closure_sig_marker = tcx.mk_fn_ptr(closure_sig.fold_with(self));
+                    self.closure_set.pop();
+                    closure_sig_marker
+                };
+
+                // HACK: use a "random" integer type to mark the kind. Because different
+                // closure kinds shouldn't get unified during selection, the "subtyping"
+                // relationship (where any kind is better than no kind) shouldn't
+                // matter here, just that the types are different.
+                let closure_kind = self.infcx.closure_kind(def_id);
+                let closure_kind_marker = match closure_kind {
+                    None => tcx.types.i8,
+                    Some(ty::ClosureKind::Fn) => tcx.types.i16,
+                    Some(ty::ClosureKind::FnMut) => tcx.types.i32,
+                    Some(ty::ClosureKind::FnOnce) => tcx.types.i64,
+                };
+
+                let params = tcx.mk_substs(
+                    substs.substs.iter().map(|k| k.fold_with(self)).chain(
+                        [closure_sig_marker, closure_kind_marker].iter().cloned().map(From::from)
+                    ));
+                tcx.mk_closure(def_id, params)
+            }
+
             ty::TyBool |
             ty::TyChar |
             ty::TyInt(..) |
@@ -165,7 +267,6 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
             ty::TyFnDef(..) |
             ty::TyFnPtr(_) |
             ty::TyDynamic(..) |
-            ty::TyClosure(..) |
             ty::TyNever |
             ty::TyTuple(..) |
             ty::TyProjection(..) |
diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs
index 6c9b9d853f403..56b5f6c45eb15 100644
--- a/src/librustc/infer/mod.rs
+++ b/src/librustc/infer/mod.rs
@@ -1160,6 +1160,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
         value.fold_with(&mut r)
     }
 
+    /// Returns true if `T` contains unresolved type variables. In the
+    /// process of visiting `T`, this will resolve (where possible)
+    /// type variables in `T`, but it never constructs the final,
+    /// resolved type, so it's more efficient than
+    /// `resolve_type_vars_if_possible()`.
+    pub fn any_unresolved_type_vars<T>(&self, value: &T) -> bool
+        where T: TypeFoldable<'tcx>
+    {
+        let mut r = resolve::UnresolvedTypeFinder::new(self);
+        value.visit_with(&mut r)
+    }
+
     pub fn resolve_type_and_region_vars_if_possible<T>(&self, value: &T) -> T
         where T: TypeFoldable<'tcx>
     {
diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs
index 639a330dc6e67..10899e42afb81 100644
--- a/src/librustc/infer/resolve.rs
+++ b/src/librustc/infer/resolve.rs
@@ -10,7 +10,7 @@
 
 use super::{InferCtxt, FixupError, FixupResult};
 use ty::{self, Ty, TyCtxt, TypeFoldable};
-use ty::fold::TypeFolder;
+use ty::fold::{TypeFolder, TypeVisitor};
 
 ///////////////////////////////////////////////////////////////////////////
 // OPPORTUNISTIC TYPE RESOLVER
@@ -80,6 +80,43 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeAndRegionResolv
     }
 }
 
+///////////////////////////////////////////////////////////////////////////
+// UNRESOLVED TYPE FINDER
+
+/// The unresolved type **finder** walks your type and searches for
+/// type variables that don't yet have a value. They get pushed into a
+/// vector. It does not construct the fully resolved type (which might
+/// involve some hashing and so forth).
+pub struct UnresolvedTypeFinder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+    infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+}
+
+impl<'a, 'gcx, 'tcx> UnresolvedTypeFinder<'a, 'gcx, 'tcx> {
+    pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self {
+        UnresolvedTypeFinder { infcx }
+    }
+}
+
+impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'gcx, 'tcx> {
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        let t = self.infcx.shallow_resolve(t);
+        if t.has_infer_types() {
+            if let ty::TyInfer(_) = t.sty {
+                // Since we called `shallow_resolve` above, this must
+                // be an (as yet...) unresolved inference variable.
+                true
+            } else {
+                // Otherwise, visit its contents.
+                t.super_visit_with(self)
+            }
+        } else {
+            // Micro-optimize: no inference types at all Can't have unresolved type
+            // variables, no need to visit the contents.
+            false
+        }
+    }
+}
+
 ///////////////////////////////////////////////////////////////////////////
 // FULL TYPE RESOLUTION
 
diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs
index 152b2e2aa5ebc..82f01c36fee7f 100644
--- a/src/librustc/lib.rs
+++ b/src/librustc/lib.rs
@@ -24,7 +24,6 @@
 #![feature(conservative_impl_trait)]
 #![feature(const_fn)]
 #![feature(core_intrinsics)]
-#![feature(discriminant_value)]
 #![feature(i128_type)]
 #![cfg_attr(windows, feature(libc))]
 #![feature(never_type)]
@@ -34,7 +33,6 @@
 #![feature(slice_patterns)]
 #![feature(specialization)]
 #![feature(unboxed_closures)]
-#![feature(discriminant_value)]
 #![feature(trace_macros)]
 #![feature(test)]
 
diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs
index 811bf9776101d..cbe642a9a76a6 100644
--- a/src/librustc/lint/builtin.rs
+++ b/src/librustc/lint/builtin.rs
@@ -30,7 +30,7 @@ declare_lint! {
 
 declare_lint! {
     pub UNUSED_EXTERN_CRATES,
-    Warn,
+    Allow,
     "extern crates that are never used"
 }
 
diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs
index 823a637c7e0d4..7ff9d202c1808 100644
--- a/src/librustc/session/mod.rs
+++ b/src/librustc/session/mod.rs
@@ -642,12 +642,16 @@ pub fn build_session_with_codemap(sopts: config::Options,
     // FIXME: This is not general enough to make the warning lint completely override
     // normal diagnostic warnings, since the warning lint can also be denied and changed
     // later via the source code.
-    let can_print_warnings = sopts.lint_opts
+    let warnings_allow = sopts.lint_opts
         .iter()
         .filter(|&&(ref key, _)| *key == "warnings")
-        .map(|&(_, ref level)| *level != lint::Allow)
+        .map(|&(_, ref level)| *level == lint::Allow)
         .last()
-        .unwrap_or(true);
+        .unwrap_or(false);
+    let cap_lints_allow = sopts.lint_cap.map_or(false, |cap| cap == lint::Allow);
+
+    let can_print_warnings = !(warnings_allow || cap_lints_allow);
+
     let treat_err_as_bug = sopts.debugging_opts.treat_err_as_bug;
 
     let emitter: Box<Emitter> = match (sopts.error_format, emitter_dest) {
diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs
index 78e47693caaf1..fbc393cbd96f2 100644
--- a/src/librustc/traits/fulfill.rs
+++ b/src/librustc/traits/fulfill.rs
@@ -251,6 +251,9 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
             });
             debug!("select: outcome={:?}", outcome);
 
+            // FIXME: if we kept the original cache key, we could mark projection
+            // obligations as complete for the projection cache here.
+
             errors.extend(
                 outcome.errors.into_iter()
                               .map(|e| to_fulfillment_error(e)));
diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs
index e70258007e463..bd511f1acb173 100644
--- a/src/librustc/traits/project.rs
+++ b/src/librustc/traits/project.rs
@@ -24,7 +24,7 @@ use super::VtableImplData;
 use super::util;
 
 use hir::def_id::DefId;
-use infer::InferOk;
+use infer::{InferCtxt, InferOk};
 use infer::type_variable::TypeVariableOrigin;
 use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap};
 use syntax::ast;
@@ -121,11 +121,13 @@ struct ProjectionTyCandidateSet<'tcx> {
 ///
 ///     for<...> <T as Trait>::U == V
 ///
-/// If successful, this may result in additional obligations.
+/// If successful, this may result in additional obligations. Also returns
+/// the projection cache key used to track these additional obligations.
 pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
     obligation: &PolyProjectionObligation<'tcx>)
-    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
+    -> Result<Option<Vec<PredicateObligation<'tcx>>>,
+              MismatchedProjectionTypes<'tcx>>
 {
     debug!("poly_project_and_unify_type(obligation={:?})",
            obligation);
@@ -161,7 +163,8 @@ pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>(
 fn project_and_unify_type<'cx, 'gcx, 'tcx>(
     selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
     obligation: &ProjectionObligation<'tcx>)
-    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
+    -> Result<Option<Vec<PredicateObligation<'tcx>>>,
+              MismatchedProjectionTypes<'tcx>>
 {
     debug!("project_and_unify_type(obligation={:?})",
            obligation);
@@ -396,6 +399,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
     let infcx = selcx.infcx();
 
     let projection_ty = infcx.resolve_type_vars_if_possible(&projection_ty);
+    let cache_key = ProjectionCacheKey { ty: projection_ty };
 
     debug!("opt_normalize_projection_type(\
            projection_ty={:?}, \
@@ -411,7 +415,8 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
     // bounds. It might be the case that we want two distinct caches,
     // or else another kind of cache entry.
 
-    match infcx.projection_cache.borrow_mut().try_start(projection_ty) {
+    let cache_result = infcx.projection_cache.borrow_mut().try_start(cache_key);
+    match cache_result {
         Ok(()) => { }
         Err(ProjectionCacheEntry::Ambiguous) => {
             // If we found ambiguity the last time, that generally
@@ -461,7 +466,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
                                                     projection_ty);
             selcx.infcx().report_overflow_error(&obligation, false);
         }
-        Err(ProjectionCacheEntry::NormalizedTy(ty)) => {
+        Err(ProjectionCacheEntry::NormalizedTy(mut ty)) => {
             // If we find the value in the cache, then return it along
             // with the obligations that went along with it. Note
             // that, when using a fulfillment context, these
@@ -474,6 +479,21 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
             debug!("opt_normalize_projection_type: \
                     found normalized ty `{:?}`",
                    ty);
+
+            // Once we have inferred everything we need to know, we
+            // can ignore the `obligations` from that point on.
+            if !infcx.any_unresolved_type_vars(&ty.value) {
+                infcx.projection_cache.borrow_mut().complete(cache_key);
+                ty.obligations = vec![];
+            }
+
+            push_paranoid_cache_value_obligation(infcx,
+                                                 param_env,
+                                                 projection_ty,
+                                                 cause,
+                                                 depth,
+                                                 &mut ty);
+
             return Some(ty);
         }
         Err(ProjectionCacheEntry::Error) => {
@@ -522,7 +542,10 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
                     obligations,
                 }
             };
-            infcx.projection_cache.borrow_mut().complete(projection_ty, &result);
+
+            let cache_value = prune_cache_value_obligations(infcx, &result);
+            infcx.projection_cache.borrow_mut().insert_ty(cache_key, cache_value);
+
             Some(result)
         }
         Ok(ProjectedTy::NoProgress(projected_ty)) => {
@@ -533,14 +556,14 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
                 value: projected_ty,
                 obligations: vec![]
             };
-            infcx.projection_cache.borrow_mut().complete(projection_ty, &result);
+            infcx.projection_cache.borrow_mut().insert_ty(cache_key, result.clone());
             Some(result)
         }
         Err(ProjectionTyError::TooManyCandidates) => {
             debug!("opt_normalize_projection_type: \
                     too many candidates");
             infcx.projection_cache.borrow_mut()
-                                  .ambiguous(projection_ty);
+                                  .ambiguous(cache_key);
             None
         }
         Err(ProjectionTyError::TraitSelectionError(_)) => {
@@ -551,12 +574,88 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
             // reported later
 
             infcx.projection_cache.borrow_mut()
-                                  .error(projection_ty);
+                                  .error(cache_key);
             Some(normalize_to_error(selcx, param_env, projection_ty, cause, depth))
         }
     }
 }
 
+/// If there are unresolved type variables, then we need to include
+/// any subobligations that bind them, at least until those type
+/// variables are fully resolved.
+fn prune_cache_value_obligations<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+                                                 result: &NormalizedTy<'tcx>)
+                                                 -> NormalizedTy<'tcx> {
+    if !infcx.any_unresolved_type_vars(&result.value) {
+        return NormalizedTy { value: result.value, obligations: vec![] };
+    }
+
+    let mut obligations: Vec<_> =
+        result.obligations
+              .iter()
+              .filter(|obligation| match obligation.predicate {
+                  // We found a `T: Foo<X = U>` predicate, let's check
+                  // if `U` references any unresolved type
+                  // variables. In principle, we only care if this
+                  // projection can help resolve any of the type
+                  // variables found in `result.value` -- but we just
+                  // check for any type variables here, for fear of
+                  // indirect obligations (e.g., we project to `?0`,
+                  // but we have `T: Foo<X = ?1>` and `?1: Bar<X =
+                  // ?0>`).
+                  ty::Predicate::Projection(ref data) =>
+                      !infcx.any_unresolved_type_vars(&data.ty()),
+
+                  // We are only interested in `T: Foo<X = U>` predicates, whre
+                  // `U` references one of `unresolved_type_vars`. =)
+                  _ => false,
+              })
+              .cloned()
+              .collect();
+
+    obligations.shrink_to_fit();
+
+    NormalizedTy { value: result.value, obligations }
+}
+
+/// Whenever we give back a cache result for a projection like `<T as
+/// Trait>::Item ==> X`, we *always* include the obligation to prove
+/// that `T: Trait` (we may also include some other obligations). This
+/// may or may not be necessary -- in principle, all the obligations
+/// that must be proven to show that `T: Trait` were also returned
+/// when the cache was first populated. But there are some vague concerns,
+/// and so we take the precatuionary measure of including `T: Trait` in
+/// the result:
+///
+/// Concern #1. The current setup is fragile. Perhaps someone could
+/// have failed to prove the concerns from when the cache was
+/// populated, but also not have used a snapshot, in which case the
+/// cache could remain populated even though `T: Trait` has not been
+/// shown. In this case, the "other code" is at fault -- when you
+/// project something, you are supposed to either have a snapshot or
+/// else prove all the resulting obligations -- but it's still easy to
+/// get wrong.
+///
+/// Concern #2. Even within the snapshot, if those original
+/// obligations are not yet proven, then we are able to do projections
+/// that may yet turn out to be wrong.  This *may* lead to some sort
+/// of trouble, though we don't have a concrete example of how that
+/// can occur yet.  But it seems risky at best.
+fn push_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+                                                        param_env: ty::ParamEnv<'tcx>,
+                                                        projection_ty: ty::ProjectionTy<'tcx>,
+                                                        cause: ObligationCause<'tcx>,
+                                                        depth: usize,
+                                                        result: &mut NormalizedTy<'tcx>)
+{
+    let trait_ref = projection_ty.trait_ref(infcx.tcx).to_poly_trait_ref();
+    let trait_obligation = Obligation { cause,
+                                        recursion_depth: depth,
+                                        param_env,
+                                        predicate: trait_ref.to_predicate() };
+    result.obligations.push(trait_obligation);
+}
+
 /// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
 /// hold. In various error cases, we cannot generate a valid
 /// normalized projection. Therefore, we create an inference variable
@@ -1323,8 +1422,62 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>(
 
 // # Cache
 
+/// The projection cache. Unlike the standard caches, this can
+/// include infcx-dependent type variables - therefore, we have to roll
+/// the cache back each time we roll a snapshot back, to avoid assumptions
+/// on yet-unresolved inference variables. Types with skolemized regions
+/// also have to be removed when the respective snapshot ends.
+///
+/// Because of that, projection cache entries can be "stranded" and left
+/// inaccessible when type variables inside the key are resolved. We make no
+/// attempt to recover or remove "stranded" entries, but rather let them be
+/// (for the lifetime of the infcx).
+///
+/// Entries in the projection cache might contain inference variables
+/// that will be resolved by obligations on the projection cache entry - e.g.
+/// when a type parameter in the associated type is constrained through
+/// an "RFC 447" projection on the impl.
+///
+/// When working with a fulfillment context, the derived obligations of each
+/// projection cache entry will be registered on the fulfillcx, so any users
+/// that can wait for a fulfillcx fixed point need not care about this. However,
+/// users that don't wait for a fixed point (e.g. trait evaluation) have to
+/// resolve the obligations themselves to make sure the projected result is
+/// ok and avoid issues like #43132.
+///
+/// If that is done, after evaluation the obligations, it is a good idea to
+/// call `ProjectionCache::complete` to make sure the obligations won't be
+/// re-evaluated and avoid an exponential worst-case.
+///
+/// FIXME: we probably also want some sort of cross-infcx cache here to
+/// reduce the amount of duplication. Let's see what we get with the Chalk
+/// reforms.
 pub struct ProjectionCache<'tcx> {
-    map: SnapshotMap<ty::ProjectionTy<'tcx>, ProjectionCacheEntry<'tcx>>,
+    map: SnapshotMap<ProjectionCacheKey<'tcx>, ProjectionCacheEntry<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub struct ProjectionCacheKey<'tcx> {
+    ty: ty::ProjectionTy<'tcx>
+}
+
+impl<'cx, 'gcx, 'tcx> ProjectionCacheKey<'tcx> {
+    pub fn from_poly_projection_predicate(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
+                                          predicate: &ty::PolyProjectionPredicate<'tcx>)
+                                          -> Option<Self>
+    {
+        let infcx = selcx.infcx();
+        // We don't do cross-snapshot caching of obligations with escaping regions,
+        // so there's no cache key to use
+        infcx.tcx.no_late_bound_regions(&predicate)
+            .map(|predicate| ProjectionCacheKey {
+                // We don't attempt to match up with a specific type-variable state
+                // from a specific call to `opt_normalize_projection_type` - if
+                // there's no precise match, the original cache entry is "stranded"
+                // anyway.
+                ty: infcx.resolve_type_vars_if_possible(&predicate.projection_ty)
+            })
+    }
 }
 
 #[derive(Clone, Debug)]
@@ -1337,7 +1490,7 @@ enum ProjectionCacheEntry<'tcx> {
 
 // NB: intentionally not Clone
 pub struct ProjectionCacheSnapshot {
-    snapshot: Snapshot
+    snapshot: Snapshot,
 }
 
 impl<'tcx> ProjectionCache<'tcx> {
@@ -1356,7 +1509,7 @@ impl<'tcx> ProjectionCache<'tcx> {
     }
 
     pub fn rollback_skolemized(&mut self, snapshot: &ProjectionCacheSnapshot) {
-        self.map.partial_rollback(&snapshot.snapshot, &|k| k.has_re_skol());
+        self.map.partial_rollback(&snapshot.snapshot, &|k| k.ty.has_re_skol());
     }
 
     pub fn commit(&mut self, snapshot: ProjectionCacheSnapshot) {
@@ -1366,7 +1519,7 @@ impl<'tcx> ProjectionCache<'tcx> {
     /// Try to start normalize `key`; returns an error if
     /// normalization already occurred (this error corresponds to a
     /// cache hit, so it's actually a good thing).
-    fn try_start(&mut self, key: ty::ProjectionTy<'tcx>)
+    fn try_start(&mut self, key: ProjectionCacheKey<'tcx>)
                  -> Result<(), ProjectionCacheEntry<'tcx>> {
         if let Some(entry) = self.map.get(&key) {
             return Err(entry.clone());
@@ -1377,25 +1530,51 @@ impl<'tcx> ProjectionCache<'tcx> {
     }
 
     /// Indicates that `key` was normalized to `value`.
-    fn complete(&mut self, key: ty::ProjectionTy<'tcx>, value: &NormalizedTy<'tcx>) {
-        debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}",
+    fn insert_ty(&mut self, key: ProjectionCacheKey<'tcx>, value: NormalizedTy<'tcx>) {
+        debug!("ProjectionCacheEntry::insert_ty: adding cache entry: key={:?}, value={:?}",
                key, value);
-        let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.clone()));
+        let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value));
         assert!(!fresh_key, "never started projecting `{:?}`", key);
     }
 
+    /// Mark the relevant projection cache key as having its derived obligations
+    /// complete, so they won't have to be re-computed (this is OK to do in a
+    /// snapshot - if the snapshot is rolled back, the obligations will be
+    /// marked as incomplete again).
+    pub fn complete(&mut self, key: ProjectionCacheKey<'tcx>) {
+        let ty = match self.map.get(&key) {
+            Some(&ProjectionCacheEntry::NormalizedTy(ref ty)) => {
+                debug!("ProjectionCacheEntry::complete({:?}) - completing {:?}",
+                       key, ty);
+                ty.value
+            }
+            ref value => {
+                // Type inference could "strand behind" old cache entries. Leave
+                // them alone for now.
+                debug!("ProjectionCacheEntry::complete({:?}) - ignoring {:?}",
+                       key, value);
+                return
+            }
+        };
+
+        self.map.insert(key, ProjectionCacheEntry::NormalizedTy(Normalized {
+            value: ty,
+            obligations: vec![]
+        }));
+    }
+
     /// Indicates that trying to normalize `key` resulted in
     /// ambiguity. No point in trying it again then until we gain more
     /// type information (in which case, the "fully resolved" key will
     /// be different).
-    fn ambiguous(&mut self, key: ty::ProjectionTy<'tcx>) {
+    fn ambiguous(&mut self, key: ProjectionCacheKey<'tcx>) {
         let fresh = self.map.insert(key, ProjectionCacheEntry::Ambiguous);
         assert!(!fresh, "never started projecting `{:?}`", key);
     }
 
     /// Indicates that trying to normalize `key` resulted in
     /// error.
-    fn error(&mut self, key: ty::ProjectionTy<'tcx>) {
+    fn error(&mut self, key: ProjectionCacheKey<'tcx>) {
         let fresh = self.map.insert(key, ProjectionCacheEntry::Error);
         assert!(!fresh, "never started projecting `{:?}`", key);
     }
diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs
index 46bdb1344b2fe..551bfb5db47ad 100644
--- a/src/librustc/traits/select.rs
+++ b/src/librustc/traits/select.rs
@@ -16,7 +16,7 @@ use self::EvaluationResult::*;
 use super::coherence;
 use super::DerivedObligationCause;
 use super::project;
-use super::project::{normalize_with_depth, Normalized};
+use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey};
 use super::{PredicateObligation, TraitObligation, ObligationCause};
 use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation};
 use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch};
@@ -655,8 +655,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
                 let project_obligation = obligation.with(data.clone());
                 match project::poly_project_and_unify_type(self, &project_obligation) {
                     Ok(Some(subobligations)) => {
-                        self.evaluate_predicates_recursively(previous_stack,
-                                                             subobligations.iter())
+                        let result = self.evaluate_predicates_recursively(previous_stack,
+                                                                          subobligations.iter());
+                        if let Some(key) =
+                            ProjectionCacheKey::from_poly_projection_predicate(self, data)
+                        {
+                            self.infcx.projection_cache.borrow_mut().complete(key);
+                        }
+                        result
                     }
                     Ok(None) => {
                         EvaluatedToAmbig
@@ -888,14 +894,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
                                dep_node: DepNodeIndex,
                                result: EvaluationResult)
     {
-        // Avoid caching results that depend on more than just the trait-ref:
-        // The stack can create recursion, and closure signatures
-        // being yet uninferred can create "spurious" EvaluatedToAmbig
-        // and EvaluatedToOk.
-        if result.is_stack_dependent() ||
-            ((result == EvaluatedToAmbig || result == EvaluatedToOk)
-             && trait_ref.has_closure_types())
-        {
+        // Avoid caching results that depend on more than just the trait-ref
+        // - the stack can create recursion.
+        if result.is_stack_dependent() {
             return;
         }
 
@@ -955,15 +956,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
             this.candidate_from_obligation_no_cache(stack)
         });
 
-        if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) {
-            debug!("CACHE MISS: SELECT({:?})={:?}",
-                   cache_fresh_trait_pred, candidate);
-            self.insert_candidate_cache(stack.obligation.param_env,
-                                        cache_fresh_trait_pred,
-                                        dep_node,
-                                        candidate.clone());
-        }
-
+        debug!("CACHE MISS: SELECT({:?})={:?}",
+               cache_fresh_trait_pred, candidate);
+        self.insert_candidate_cache(stack.obligation.param_env,
+                                    cache_fresh_trait_pred,
+                                    dep_node,
+                                    candidate.clone());
         candidate
     }
 
@@ -1203,45 +1201,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
                                   .insert(trait_ref, WithDepNode::new(dep_node, candidate));
     }
 
-    fn should_update_candidate_cache(&mut self,
-                                     cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
-                                     candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>)
-                                     -> bool
-    {
-        // In general, it's a good idea to cache results, even
-        // ambiguous ones, to save us some trouble later. But we have
-        // to be careful not to cache results that could be
-        // invalidated later by advances in inference. Normally, this
-        // is not an issue, because any inference variables whose
-        // types are not yet bound are "freshened" in the cache key,
-        // which means that if we later get the same request once that
-        // type variable IS bound, we'll have a different cache key.
-        // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is
-        // not yet known, we may cache the result as `None`. But if
-        // later `_#0t` is bound to `Bar`, then when we freshen we'll
-        // have `Vec<Bar> : Foo` as the cache key.
-        //
-        // HOWEVER, it CAN happen that we get an ambiguity result in
-        // one particular case around closures where the cache key
-        // would not change. That is when the precise types of the
-        // upvars that a closure references have not yet been figured
-        // out (i.e., because it is not yet known if they are captured
-        // by ref, and if by ref, what kind of ref). In these cases,
-        // when matching a builtin bound, we will yield back an
-        // ambiguous result. But the *cache key* is just the closure type,
-        // it doesn't capture the state of the upvar computation.
-        //
-        // To avoid this trap, just don't cache ambiguous results if
-        // the self-type contains no inference byproducts (that really
-        // shouldn't happen in other circumstances anyway, given
-        // coherence).
-
-        match *candidate {
-            Ok(Some(_)) | Err(_) => true,
-            Ok(None) => cache_fresh_trait_pred.has_infer_types()
-        }
-    }
-
     fn assemble_candidates<'o>(&mut self,
                                stack: &TraitObligationStack<'o, 'tcx>)
                                -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>>
diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs
index 6597dccf25816..c7b51af341336 100644
--- a/src/librustc/ty/mod.rs
+++ b/src/librustc/ty/mod.rs
@@ -1015,6 +1015,10 @@ impl<'tcx> PolyProjectionPredicate<'tcx> {
         // levels.
         ty::Binder(self.0.projection_ty.trait_ref(tcx))
     }
+
+    pub fn ty(&self) -> Binder<Ty<'tcx>> {
+        Binder(self.skip_binder().ty) // preserves binding levels
+    }
 }
 
 pub trait ToPolyTraitRef<'tcx> {
diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs
index da00ebc4b9ee9..47061883425e2 100644
--- a/src/librustc_data_structures/lib.rs
+++ b/src/librustc_data_structures/lib.rs
@@ -29,7 +29,6 @@
 #![feature(unsize)]
 #![feature(i128_type)]
 #![feature(conservative_impl_trait)]
-#![feature(discriminant_value)]
 #![feature(specialization)]
 
 #![cfg_attr(unix, feature(libc))]
diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs
index f79abecf9da4b..f4e6f57c43777 100644
--- a/src/librustc_metadata/lib.rs
+++ b/src/librustc_metadata/lib.rs
@@ -21,7 +21,6 @@
 #![feature(quote)]
 #![feature(rustc_diagnostic_macros)]
 #![feature(specialization)]
-#![feature(discriminant_value)]
 #![feature(rustc_private)]
 
 #[macro_use]
diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs
index 8863d4ea5ea8a..b6ca1460a7d0a 100644
--- a/src/librustc_trans/attributes.rs
+++ b/src/librustc_trans/attributes.rs
@@ -119,6 +119,8 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe
                 llvm::AttributePlace::ReturnValue(), llfn);
         } else if attr.check_name("unwind") {
             unwind(llfn, true);
+        } else if attr.check_name("rustc_allocator_nounwind") {
+            unwind(llfn, false);
         }
     }
     if !target_features.is_empty() {
diff --git a/src/librustc_trans/back/link.rs b/src/librustc_trans/back/link.rs
index 4e211d83cff3e..5de48fbce9da9 100644
--- a/src/librustc_trans/back/link.rs
+++ b/src/librustc_trans/back/link.rs
@@ -106,14 +106,32 @@ pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap) -> LinkMet
 pub fn get_linker(sess: &Session) -> (String, Command, Vec<(OsString, OsString)>) {
     let envs = vec![("PATH".into(), command_path(sess))];
 
+    // If our linker looks like a batch script on Windows then to execute this
+    // we'll need to spawn `cmd` explicitly. This is primarily done to handle
+    // emscripten where the linker is `emcc.bat` and needs to be spawned as
+    // `cmd /c emcc.bat ...`.
+    //
+    // This worked historically but is needed manually since #42436 (regression
+    // was tagged as #42791) and some more info can be found on #44443 for
+    // emscripten itself.
+    let cmd = |linker: &str| {
+        if cfg!(windows) && linker.ends_with(".bat") {
+            let mut cmd = Command::new("cmd");
+            cmd.arg("/c").arg(linker);
+            cmd
+        } else {
+            Command::new(linker)
+        }
+    };
+
     if let Some(ref linker) = sess.opts.cg.linker {
-        (linker.clone(), Command::new(linker), envs)
+        (linker.clone(), cmd(linker), envs)
     } else if sess.target.target.options.is_like_msvc {
         let (cmd, envs) = msvc_link_exe_cmd(sess);
         ("link.exe".to_string(), cmd, envs)
     } else {
         let linker = &sess.target.target.options.linker;
-        (linker.clone(), Command::new(&linker), envs)
+        (linker.clone(), cmd(linker), envs)
     }
 }
 
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index b57067e35e9d9..95fdac315da21 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -276,7 +276,6 @@
 #![feature(macro_reexport)]
 #![feature(macro_vis_matcher)]
 #![feature(needs_panic_runtime)]
-#![feature(needs_drop)]
 #![feature(never_type)]
 #![feature(num_bits_bytes)]
 #![feature(old_wrapping)]
diff --git a/src/libstd/net/tcp.rs b/src/libstd/net/tcp.rs
index 2eabb46441b32..4d0ae13c65e3f 100644
--- a/src/libstd/net/tcp.rs
+++ b/src/libstd/net/tcp.rs
@@ -147,7 +147,7 @@ impl TcpStream {
     /// connection request.
     ///
     /// [`SocketAddr`]: ../../std/net/enum.SocketAddr.html
-    #[unstable(feature = "tcpstream_connect_timeout", issue = "43079")]
+    #[stable(feature = "tcpstream_connect_timeout", since = "1.21.0")]
     pub fn connect_timeout(addr: &SocketAddr, timeout: Duration) -> io::Result<TcpStream> {
         net_imp::TcpStream::connect_timeout(addr, timeout).map(TcpStream)
     }
diff --git a/src/libstd/os/raw.rs b/src/libstd/os/raw.rs
index c34491941d690..fe0427d4e5f9c 100644
--- a/src/libstd/os/raw.rs
+++ b/src/libstd/os/raw.rs
@@ -14,8 +14,7 @@
 
 use fmt;
 
-#[cfg(any(target_os = "emscripten",
-          all(target_os = "linux", any(target_arch = "aarch64",
+#[cfg(any(all(target_os = "linux", any(target_arch = "aarch64",
                                        target_arch = "arm",
                                        target_arch = "powerpc",
                                        target_arch = "powerpc64",
@@ -24,8 +23,7 @@ use fmt;
                                          target_arch = "arm")),
           all(target_os = "fuchsia", target_arch = "aarch64")))]
 #[stable(feature = "raw_os", since = "1.1.0")] pub type c_char = u8;
-#[cfg(not(any(target_os = "emscripten",
-              all(target_os = "linux", any(target_arch = "aarch64",
+#[cfg(not(any(all(target_os = "linux", any(target_arch = "aarch64",
                                            target_arch = "arm",
                                            target_arch = "powerpc",
                                            target_arch = "powerpc64",
diff --git a/src/libstd/sys/unix/fd.rs b/src/libstd/sys/unix/fd.rs
index 138087f165142..f50b093acc848 100644
--- a/src/libstd/sys/unix/fd.rs
+++ b/src/libstd/sys/unix/fd.rs
@@ -71,13 +71,21 @@ impl FileDesc {
         #[cfg(target_os = "android")]
         use super::android::cvt_pread64;
 
-        #[cfg(not(target_os = "android"))]
+        #[cfg(target_os = "emscripten")]
         unsafe fn cvt_pread64(fd: c_int, buf: *mut c_void, count: usize, offset: i64)
             -> io::Result<isize>
         {
-            #[cfg(any(target_os = "linux", target_os = "emscripten"))]
             use libc::pread64;
-            #[cfg(not(any(target_os = "linux", target_os = "emscripten")))]
+            cvt(pread64(fd, buf, count, offset as i32))
+        }
+
+        #[cfg(not(any(target_os = "android", target_os = "emscripten")))]
+        unsafe fn cvt_pread64(fd: c_int, buf: *mut c_void, count: usize, offset: i64)
+            -> io::Result<isize>
+        {
+            #[cfg(target_os = "linux")]
+            use libc::pread64;
+            #[cfg(not(target_os = "linux"))]
             use libc::pread as pread64;
             cvt(pread64(fd, buf, count, offset))
         }
@@ -104,13 +112,21 @@ impl FileDesc {
         #[cfg(target_os = "android")]
         use super::android::cvt_pwrite64;
 
-        #[cfg(not(target_os = "android"))]
+        #[cfg(target_os = "emscripten")]
+        unsafe fn cvt_pwrite64(fd: c_int, buf: *const c_void, count: usize, offset: i64)
+            -> io::Result<isize>
+        {
+            use libc::pwrite64;
+            cvt(pwrite64(fd, buf, count, offset as i32))
+        }
+
+        #[cfg(not(any(target_os = "android", target_os = "emscripten")))]
         unsafe fn cvt_pwrite64(fd: c_int, buf: *const c_void, count: usize, offset: i64)
             -> io::Result<isize>
         {
-            #[cfg(any(target_os = "linux", target_os = "emscripten"))]
+            #[cfg(target_os = "linux")]
             use libc::pwrite64;
-            #[cfg(not(any(target_os = "linux", target_os = "emscripten")))]
+            #[cfg(not(target_os = "linux"))]
             use libc::pwrite as pwrite64;
             cvt(pwrite64(fd, buf, count, offset))
         }
diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs
index cb0f687e0721c..f94af4913324f 100644
--- a/src/libstd/sys/unix/fs.rs
+++ b/src/libstd/sys/unix/fs.rs
@@ -514,6 +514,8 @@ impl File {
             SeekFrom::End(off) => (libc::SEEK_END, off),
             SeekFrom::Current(off) => (libc::SEEK_CUR, off),
         };
+        #[cfg(target_os = "emscripten")]
+        let pos = pos as i32;
         let n = cvt(unsafe { lseek64(self.0.raw(), pos, whence) })?;
         Ok(n as u64)
     }
diff --git a/src/libstd/sys/unix/process/process_unix.rs b/src/libstd/sys/unix/process/process_unix.rs
index edd322ca6fa07..ae24021fb6c3a 100644
--- a/src/libstd/sys/unix/process/process_unix.rs
+++ b/src/libstd/sys/unix/process/process_unix.rs
@@ -10,7 +10,6 @@
 
 use io::{self, Error, ErrorKind};
 use libc::{self, c_int, gid_t, pid_t, uid_t};
-use mem;
 use ptr;
 
 use sys::cvt;
@@ -184,7 +183,9 @@ impl Command {
         }
 
         // NaCl has no signal support.
-        if cfg!(not(any(target_os = "nacl", target_os = "emscripten"))) {
+        #[cfg(not(any(target_os = "nacl", target_os = "emscripten")))]
+        {
+            use mem;
             // Reset signal handling so the child process starts in a
             // standardized state. libstd ignores SIGPIPE, and signal-handling
             // libraries often set a mask. Child processes inherit ignored
diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs
index d1172b1b2ce94..171b0a22e9fc2 100644
--- a/src/libsyntax/ext/expand.rs
+++ b/src/libsyntax/ext/expand.rs
@@ -736,12 +736,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
                     item: Annotatable,
                     kind: ExpansionKind)
                     -> Expansion {
-        if !traits.is_empty() &&
-           (kind == ExpansionKind::TraitItems || kind == ExpansionKind::ImplItems) {
-            self.cx.span_err(traits[0].span, "`derive` can be only be applied to items");
-            return kind.expect_from_annotatables(::std::iter::once(item));
-        }
-        self.collect(kind, InvocationKind::Attr { attr: attr, traits: traits, item: item })
+        self.collect(kind, InvocationKind::Attr { attr, traits, item })
     }
 
     // If `item` is an attr invocation, remove and return the macro attribute.
diff --git a/src/stage0.txt b/src/stage0.txt
index e49b301abbf6b..76169f3725d5a 100644
--- a/src/stage0.txt
+++ b/src/stage0.txt
@@ -12,9 +12,9 @@
 # source tarball for a stable release you'll likely see `1.x.0` for rustc and
 # `0.x.0` for Cargo where they were released on `date`.
 
-date: 2017-07-18
-rustc: beta
-cargo: beta
+date: 2017-08-31
+rustc: 1.20.0
+cargo: 0.21.0
 
 # When making a stable release the process currently looks like:
 #
diff --git a/src/test/codegen/dealloc-no-unwind.rs b/src/test/codegen/dealloc-no-unwind.rs
new file mode 100644
index 0000000000000..551b66e103a11
--- /dev/null
+++ b/src/test/codegen/dealloc-no-unwind.rs
@@ -0,0 +1,32 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// no-system-llvm
+// compile-flags: -O
+
+#![crate_type="lib"]
+
+struct A;
+
+impl Drop for A {
+    fn drop(&mut self) {
+        extern { fn foo(); }
+        unsafe { foo(); }
+    }
+}
+
+#[no_mangle]
+pub fn a(a: Box<i32>) {
+    // CHECK-LABEL: define void @a
+    // CHECK: call void @__rust_dealloc
+    // CHECK-NEXT: call void @foo
+    let _a = A;
+    drop(a);
+}
diff --git a/src/test/compile-fail/feature-gate-fn_must_use-cap-lints-allow.rs b/src/test/compile-fail/feature-gate-fn_must_use-cap-lints-allow.rs
new file mode 100644
index 0000000000000..1c04199c05f7c
--- /dev/null
+++ b/src/test/compile-fail/feature-gate-fn_must_use-cap-lints-allow.rs
@@ -0,0 +1,22 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cap-lints allow
+
+// This tests that the fn_must_use feature-gate warning respects the lint
+// cap. (See discussion in Issue #44213.)
+
+#![feature(rustc_attrs)]
+
+#[must_use] // (no feature-gate warning because of the lint cap!)
+fn need_to_use_it() -> bool { true }
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
diff --git a/src/test/compile-fail/issue-43023.rs b/src/test/compile-fail/issue-43023.rs
new file mode 100644
index 0000000000000..6a5f7a1136aaa
--- /dev/null
+++ b/src/test/compile-fail/issue-43023.rs
@@ -0,0 +1,28 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct S;
+
+impl S {
+    #[derive(Debug)] //~ ERROR `derive` may only be applied to structs, enums and unions
+    fn f() {
+        file!();
+    }
+}
+
+trait Tr1 {
+    #[derive(Debug)] //~ ERROR `derive` may only be applied to structs, enums and unions
+    fn f();
+}
+
+trait Tr2 {
+    #[derive(Debug)] //~ ERROR `derive` may only be applied to structs, enums and unions
+    type F;
+}
diff --git a/src/test/run-pass/discriminant_value-wrapper.rs b/src/test/run-pass/discriminant_value-wrapper.rs
index 2dbda0be18d98..d7a32423710fd 100644
--- a/src/test/run-pass/discriminant_value-wrapper.rs
+++ b/src/test/run-pass/discriminant_value-wrapper.rs
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(discriminant_value)]
-
 use std::mem;
 
 enum ADT {
diff --git a/src/tools/build-manifest/src/main.rs b/src/tools/build-manifest/src/main.rs
index 7a09ae48b91bd..e2be021e7cc39 100644
--- a/src/tools/build-manifest/src/main.rs
+++ b/src/tools/build-manifest/src/main.rs
@@ -239,7 +239,12 @@ impl Builder {
         self.package("rust-std", &mut manifest.pkg, TARGETS);
         self.package("rust-docs", &mut manifest.pkg, TARGETS);
         self.package("rust-src", &mut manifest.pkg, &["*"]);
-        self.package("rls", &mut manifest.pkg, HOSTS);
+        let rls_package_name = if self.rust_release == "nightly" {
+            "rls"
+        } else {
+            "rls-preview"
+        };
+        self.package(rls_package_name, &mut manifest.pkg, HOSTS);
         self.package("rust-analysis", &mut manifest.pkg, TARGETS);
 
         let mut pkg = Package {
@@ -276,7 +281,7 @@ impl Builder {
             }
 
             extensions.push(Component {
-                pkg: "rls".to_string(),
+                pkg: rls_package_name.to_string(),
                 target: host.to_string(),
             });
             extensions.push(Component {
@@ -353,7 +358,7 @@ impl Builder {
             format!("rust-src-{}.tar.gz", self.rust_release)
         } else if component == "cargo" {
             format!("cargo-{}-{}.tar.gz", self.cargo_release, target)
-        } else if component == "rls" {
+        } else if component == "rls" || component == "rls-preview" {
             format!("rls-{}-{}.tar.gz", self.rls_release, target)
         } else {
             format!("{}-{}-{}.tar.gz", component, self.rust_release, target)
@@ -363,7 +368,7 @@ impl Builder {
     fn cached_version(&self, component: &str) -> &str {
         if component == "cargo" {
             &self.cargo_version
-        } else if component == "rls" {
+        } else if component == "rls" || component == "rls-preview" {
             &self.rls_version
         } else {
             &self.rust_version
diff --git a/src/tools/rls b/src/tools/rls
index 25ffb3a3d7809..86ff0b1413ef4 160000
--- a/src/tools/rls
+++ b/src/tools/rls
@@ -1 +1 @@
-Subproject commit 25ffb3a3d7809b4fa112f3e04e926eb539dd5e90
+Subproject commit 86ff0b1413ef45aa05ccca968242ac1f09c12817