diff --git a/.travis.yml b/.travis.yml index 26cabf92bdac8..15b610833b0ef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,9 +15,10 @@ matrix: # Linux builders, all docker images - env: IMAGE=arm-android - env: IMAGE=cross + - env: IMAGE=dist-arm-unknown-linux-gnueabi + - env: IMAGE=dist-x86_64-unknown-freebsd - env: IMAGE=i686-gnu - env: IMAGE=i686-gnu-nopt - - env: IMAGE=x86_64-freebsd - env: IMAGE=x86_64-gnu - env: IMAGE=x86_64-gnu-full-bootstrap - env: IMAGE=x86_64-gnu-aux diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml index 35f8fb43f7b56..1eda1608c4709 100644 --- a/src/bootstrap/Cargo.toml +++ b/src/bootstrap/Cargo.toml @@ -6,18 +6,22 @@ version = "0.0.0" [lib] name = "bootstrap" path = "lib.rs" +doctest = false [[bin]] name = "bootstrap" path = "bin/main.rs" +test = false [[bin]] name = "rustc" path = "bin/rustc.rs" +test = false [[bin]] name = "rustdoc" path = "bin/rustdoc.rs" +test = false [dependencies] build_helper = { path = "../build_helper" } diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 6db1afa54a625..f2fddf6e2ef3a 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -568,3 +568,14 @@ pub fn distcheck(build: &Build) { .arg("check") .current_dir(&dir)); } + +/// Test the build system itself +pub fn bootstrap(build: &Build) { + let mut cmd = Command::new(&build.cargo); + cmd.arg("test") + .current_dir(build.src.join("src/bootstrap")) + .env("CARGO_TARGET_DIR", build.out.join("bootstrap")) + .env("RUSTC", &build.rustc); + cmd.arg("--").args(&build.flags.cmd.test_args()); + build.run(&mut cmd); +} diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 9b0c7a04d6be2..ad851e448ea7c 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -354,14 +354,9 @@ pub fn analysis(build: &Build, compiler: &Compiler, target: &str) { } /// Creates the `rust-src` installer component and the plain source tarball -pub fn rust_src(build: &Build, host: &str) { +pub fn rust_src(build: &Build) { println!("Dist src"); - if host != build.config.build { - println!("\tskipping, not a build host"); - return - } - let plain_name = format!("rustc-{}-src", package_vers(build)); let name = format!("rust-src-{}", package_vers(build)); let image = tmpdir(build).join(format!("{}-image", name)); diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index bbbf5cba8a1ab..42eae6d24f13a 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -57,12 +57,12 @@ pub fn rustbook(build: &Build, target: &str, name: &str) { /// `STAMP` alongw ith providing the various header/footer HTML we've cutomized. /// /// In the end, this is just a glorified wrapper around rustdoc! -pub fn standalone(build: &Build, stage: u32, target: &str) { - println!("Documenting stage{} standalone ({})", stage, target); +pub fn standalone(build: &Build, target: &str) { + println!("Documenting standalone ({})", target); let out = build.doc_out(target); t!(fs::create_dir_all(&out)); - let compiler = Compiler::new(stage, &build.config.build); + let compiler = Compiler::new(0, &build.config.build); let favicon = build.src.join("src/doc/favicon.inc"); let footer = build.src.join("src/doc/footer.inc"); diff --git a/src/bootstrap/step.rs b/src/bootstrap/step.rs index 6a81f759dc73f..bf815a817ed87 100644 --- a/src/bootstrap/step.rs +++ b/src/bootstrap/step.rs @@ -365,6 +365,8 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { suite("check-rpass-full", "src/test/run-pass-fulldeps", "run-pass", "run-pass-fulldeps"); + suite("check-rfail-full", "src/test/run-fail-fulldeps", + "run-fail", "run-fail-fulldeps"); suite("check-cfail-full", "src/test/compile-fail-fulldeps", "compile-fail", "compile-fail-fulldeps"); suite("check-rmake", "src/test/run-make", "run-make", "run-make"); @@ -459,6 +461,7 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { .dep(|s| s.name("tool-tidy").stage(0)) .default(true) .host(true) + .only_build(true) .run(move |s| check::tidy(build, s.target)); rules.test("check-error-index", "src/tools/error_index_generator") .dep(|s| s.name("libstd")) @@ -482,6 +485,12 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { .dep(|s| s.name("libtest")) .run(move |s| check::android_copy_libs(build, &s.compiler(), s.target)); + rules.test("check-bootstrap", "src/bootstrap") + .default(true) + .host(true) + .only_build(true) + .run(move |_| check::bootstrap(build)); + // ======================================================================== // Build tools // @@ -516,9 +525,14 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { .default(build.config.docs) .run(move |s| doc::rustbook(build, s.target, "nomicon")); rules.doc("doc-standalone", "src/doc") - .dep(move |s| s.name("rustc").host(&build.config.build).target(&build.config.build)) + .dep(move |s| { + s.name("rustc") + .host(&build.config.build) + .target(&build.config.build) + .stage(0) + }) .default(build.config.docs) - .run(move |s| doc::standalone(build, s.stage, s.target)); + .run(move |s| doc::standalone(build, s.target)); rules.doc("doc-error-index", "src/tools/error_index_generator") .dep(move |s| s.name("tool-error-index").target(&build.config.build).stage(0)) .dep(move |s| s.name("librustc-link").stage(0)) @@ -550,6 +564,7 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { rules.dist("dist-rustc", "src/librustc") .dep(move |s| s.name("rustc").host(&build.config.build)) .host(true) + .only_host_build(true) .default(true) .run(move |s| dist::rustc(build, s.stage, s.target)); rules.dist("dist-std", "src/libstd") @@ -564,9 +579,11 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { } }) .default(true) + .only_host_build(true) .run(move |s| dist::std(build, &s.compiler(), s.target)); rules.dist("dist-mingw", "path/to/nowhere") .default(true) + .only_host_build(true) .run(move |s| { if s.target.contains("pc-windows-gnu") { dist::mingw(build, s.target) @@ -575,14 +592,18 @@ pub fn build_rules<'a>(build: &'a Build) -> Rules { rules.dist("dist-src", "src") .default(true) .host(true) - .run(move |s| dist::rust_src(build, s.target)); + .only_build(true) + .only_host_build(true) + .run(move |_| dist::rust_src(build)); rules.dist("dist-docs", "src/doc") .default(true) + .only_host_build(true) .dep(|s| s.name("default:doc")) .run(move |s| dist::docs(build, s.stage, s.target)); rules.dist("dist-analysis", "analysis") .dep(|s| s.name("dist-std")) .default(true) + .only_host_build(true) .run(move |s| dist::analysis(build, &s.compiler(), s.target)); rules.dist("install", "src") .dep(|s| s.name("default:dist")) @@ -671,6 +692,14 @@ struct Rule<'a> { /// only intended for compiler hosts and not for targets that are being /// generated. host: bool, + + /// Whether this rule is only for steps where the host is the build triple, + /// not anything in hosts or targets. + only_host_build: bool, + + /// Whether this rule is only for the build triple, not anything in hosts or + /// targets. + only_build: bool, } #[derive(PartialEq)] @@ -692,6 +721,8 @@ impl<'a> Rule<'a> { kind: kind, default: false, host: false, + only_host_build: false, + only_build: false, } } } @@ -727,6 +758,16 @@ impl<'a, 'b> RuleBuilder<'a, 'b> { self.rule.host = host; self } + + fn only_build(&mut self, only_build: bool) -> &mut Self { + self.rule.only_build = only_build; + self + } + + fn only_host_build(&mut self, only_host_build: bool) -> &mut Self { + self.rule.only_host_build = only_host_build; + self + } } impl<'a, 'b> Drop for RuleBuilder<'a, 'b> { @@ -896,19 +937,12 @@ invalid rule dependency graph detected, was a rule added and maybe typo'd? path.ends_with(rule.path) }) }).flat_map(|rule| { - let hosts = if self.build.flags.host.len() > 0 { + let hosts = if rule.only_host_build || rule.only_build { + &self.build.config.host[..1] + } else if self.build.flags.host.len() > 0 { &self.build.flags.host } else { - if kind == Kind::Dist { - // For 'dist' steps we only distribute artifacts built from - // the build platform, so only consider that in the hosts - // array. - // NOTE: This relies on the fact that the build triple is - // always placed first, as done in `config.rs`. - &self.build.config.host[..1] - } else { - &self.build.config.host - } + &self.build.config.host }; let targets = if self.build.flags.target.len() > 0 { &self.build.flags.target @@ -928,6 +962,8 @@ invalid rule dependency graph detected, was a rule added and maybe typo'd? &self.build.flags.host[..] } else if self.build.flags.target.len() > 0 { &[] + } else if rule.only_build { + &self.build.config.host[..1] } else { &self.build.config.host[..] } @@ -955,12 +991,7 @@ invalid rule dependency graph detected, was a rule added and maybe typo'd? // Using `steps` as the top-level targets, make a topological ordering // of what we need to do. - let mut order = Vec::new(); - let mut added = HashSet::new(); - added.insert(Step::noop()); - for step in steps.iter().cloned() { - self.fill(step, &mut order, &mut added); - } + let order = self.expand(steps); // Print out what we're doing for debugging self.build.verbose("bootstrap build plan:"); @@ -979,6 +1010,18 @@ invalid rule dependency graph detected, was a rule added and maybe typo'd? } } + /// From the top level targets `steps` generate a topological ordering of + /// all steps needed to run those steps. + fn expand(&self, steps: &[Step<'a>]) -> Vec> { + let mut order = Vec::new(); + let mut added = HashSet::new(); + added.insert(Step::noop()); + for step in steps.iter().cloned() { + self.fill(step, &mut order, &mut added); + } + return order + } + /// Performs topological sort of dependencies rooted at the `step` /// specified, pushing all results onto the `order` vector provided. /// @@ -1015,3 +1058,367 @@ invalid rule dependency graph detected, was a rule added and maybe typo'd? order.push(step); } } + +#[cfg(test)] +mod tests { + use std::env; + + use Build; + use config::Config; + use flags::Flags; + + macro_rules! a { + ($($a:expr),*) => (vec![$($a.to_string()),*]) + } + + fn build(args: &[&str], + extra_host: &[&str], + extra_target: &[&str]) -> Build { + let mut args = args.iter().map(|s| s.to_string()).collect::>(); + args.push("--build".to_string()); + args.push("A".to_string()); + let flags = Flags::parse(&args); + + let mut config = Config::default(); + config.docs = true; + config.build = "A".to_string(); + config.host = vec![config.build.clone()]; + config.host.extend(extra_host.iter().map(|s| s.to_string())); + config.target = config.host.clone(); + config.target.extend(extra_target.iter().map(|s| s.to_string())); + + let mut build = Build::new(flags, config); + let cwd = env::current_dir().unwrap(); + build.crates.insert("std_shim".to_string(), ::Crate { + name: "std_shim".to_string(), + deps: Vec::new(), + path: cwd.join("src/std_shim"), + doc_step: "doc-std_shim".to_string(), + build_step: "build-crate-std_shim".to_string(), + test_step: "test-std_shim".to_string(), + bench_step: "bench-std_shim".to_string(), + }); + build.crates.insert("test_shim".to_string(), ::Crate { + name: "test_shim".to_string(), + deps: Vec::new(), + path: cwd.join("src/test_shim"), + doc_step: "doc-test_shim".to_string(), + build_step: "build-crate-test_shim".to_string(), + test_step: "test-test_shim".to_string(), + bench_step: "bench-test_shim".to_string(), + }); + build.crates.insert("rustc-main".to_string(), ::Crate { + name: "rustc-main".to_string(), + deps: Vec::new(), + path: cwd.join("src/rustc-main"), + doc_step: "doc-rustc-main".to_string(), + build_step: "build-crate-rustc-main".to_string(), + test_step: "test-rustc-main".to_string(), + bench_step: "bench-rustc-main".to_string(), + }); + return build + } + + #[test] + fn dist_baseline() { + let build = build(&["dist"], &[], &[]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + assert!(plan.iter().all(|s| s.host == "A" )); + assert!(plan.iter().all(|s| s.target == "A" )); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(plan.contains(&step.name("dist-docs"))); + assert!(plan.contains(&step.name("dist-mingw"))); + assert!(plan.contains(&step.name("dist-rustc"))); + assert!(plan.contains(&step.name("dist-std"))); + assert!(plan.contains(&step.name("dist-src"))); + } + + #[test] + fn dist_with_targets() { + let build = build(&["dist"], &[], &["B"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + assert!(plan.iter().all(|s| s.host == "A" )); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(plan.contains(&step.name("dist-docs"))); + assert!(plan.contains(&step.name("dist-mingw"))); + assert!(plan.contains(&step.name("dist-rustc"))); + assert!(plan.contains(&step.name("dist-std"))); + assert!(plan.contains(&step.name("dist-src"))); + + assert!(plan.contains(&step.target("B").name("dist-docs"))); + assert!(plan.contains(&step.target("B").name("dist-mingw"))); + assert!(!plan.contains(&step.target("B").name("dist-rustc"))); + assert!(plan.contains(&step.target("B").name("dist-std"))); + assert!(!plan.contains(&step.target("B").name("dist-src"))); + } + + #[test] + fn dist_with_hosts() { + let build = build(&["dist"], &["B"], &[]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(!plan.iter().any(|s| s.host == "B")); + + assert!(plan.contains(&step.name("dist-docs"))); + assert!(plan.contains(&step.name("dist-mingw"))); + assert!(plan.contains(&step.name("dist-rustc"))); + assert!(plan.contains(&step.name("dist-std"))); + assert!(plan.contains(&step.name("dist-src"))); + + assert!(plan.contains(&step.target("B").name("dist-docs"))); + assert!(plan.contains(&step.target("B").name("dist-mingw"))); + assert!(plan.contains(&step.target("B").name("dist-rustc"))); + assert!(plan.contains(&step.target("B").name("dist-std"))); + assert!(!plan.contains(&step.target("B").name("dist-src"))); + } + + #[test] + fn dist_with_targets_and_hosts() { + let build = build(&["dist"], &["B"], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(!plan.iter().any(|s| s.host == "B")); + assert!(!plan.iter().any(|s| s.host == "C")); + + assert!(plan.contains(&step.name("dist-docs"))); + assert!(plan.contains(&step.name("dist-mingw"))); + assert!(plan.contains(&step.name("dist-rustc"))); + assert!(plan.contains(&step.name("dist-std"))); + assert!(plan.contains(&step.name("dist-src"))); + + assert!(plan.contains(&step.target("B").name("dist-docs"))); + assert!(plan.contains(&step.target("B").name("dist-mingw"))); + assert!(plan.contains(&step.target("B").name("dist-rustc"))); + assert!(plan.contains(&step.target("B").name("dist-std"))); + assert!(!plan.contains(&step.target("B").name("dist-src"))); + + assert!(plan.contains(&step.target("C").name("dist-docs"))); + assert!(plan.contains(&step.target("C").name("dist-mingw"))); + assert!(!plan.contains(&step.target("C").name("dist-rustc"))); + assert!(plan.contains(&step.target("C").name("dist-std"))); + assert!(!plan.contains(&step.target("C").name("dist-src"))); + } + + #[test] + fn dist_target_with_target_flag() { + let build = build(&["dist", "--target=C"], &["B"], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(!plan.iter().any(|s| s.target == "A")); + assert!(!plan.iter().any(|s| s.target == "B")); + assert!(!plan.iter().any(|s| s.host == "B")); + assert!(!plan.iter().any(|s| s.host == "C")); + + assert!(plan.contains(&step.target("C").name("dist-docs"))); + assert!(plan.contains(&step.target("C").name("dist-mingw"))); + assert!(!plan.contains(&step.target("C").name("dist-rustc"))); + assert!(plan.contains(&step.target("C").name("dist-std"))); + assert!(!plan.contains(&step.target("C").name("dist-src"))); + } + + #[test] + fn dist_host_with_target_flag() { + let build = build(&["dist", "--host=B", "--target=B"], &["B"], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + assert!(!plan.iter().any(|s| s.target == "A")); + assert!(!plan.iter().any(|s| s.target == "C")); + assert!(!plan.iter().any(|s| s.host == "B")); + assert!(!plan.iter().any(|s| s.host == "C")); + + assert!(plan.contains(&step.target("B").name("dist-docs"))); + assert!(plan.contains(&step.target("B").name("dist-mingw"))); + assert!(plan.contains(&step.target("B").name("dist-rustc"))); + assert!(plan.contains(&step.target("B").name("dist-std"))); + assert!(plan.contains(&step.target("B").name("dist-src"))); + + let all = rules.expand(&plan); + println!("all rules: {:#?}", all); + assert!(!all.contains(&step.name("rustc"))); + assert!(!all.contains(&step.name("build-crate-std_shim").stage(1))); + } + + #[test] + fn build_default() { + let build = build(&["build"], &["B"], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + let step = super::Step { + name: "", + stage: 2, + host: &build.config.build, + target: &build.config.build, + }; + + // rustc built for all for of (A, B) x (A, B) + assert!(plan.contains(&step.name("librustc"))); + assert!(plan.contains(&step.target("B").name("librustc"))); + assert!(plan.contains(&step.host("B").target("A").name("librustc"))); + assert!(plan.contains(&step.host("B").target("B").name("librustc"))); + + // rustc never built for C + assert!(!plan.iter().any(|s| { + s.name.contains("rustc") && (s.host == "C" || s.target == "C") + })); + + // test built for everything + assert!(plan.contains(&step.name("libtest"))); + assert!(plan.contains(&step.target("B").name("libtest"))); + assert!(plan.contains(&step.host("B").target("A").name("libtest"))); + assert!(plan.contains(&step.host("B").target("B").name("libtest"))); + assert!(plan.contains(&step.host("A").target("C").name("libtest"))); + assert!(plan.contains(&step.host("B").target("C").name("libtest"))); + + let all = rules.expand(&plan); + println!("all rules: {:#?}", all); + assert!(all.contains(&step.name("rustc"))); + assert!(all.contains(&step.name("libstd"))); + } + + #[test] + fn build_filtered() { + let build = build(&["build", "--target=C"], &["B"], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + + assert!(!plan.iter().any(|s| s.name.contains("rustc"))); + assert!(plan.iter().all(|s| { + !s.name.contains("test_shim") || s.target == "C" + })); + } + + #[test] + fn test_default() { + let build = build(&["test"], &[], &[]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + assert!(plan.iter().all(|s| s.host == "A")); + assert!(plan.iter().all(|s| s.target == "A")); + + assert!(plan.iter().any(|s| s.name.contains("-ui"))); + assert!(plan.iter().any(|s| s.name.contains("cfail"))); + assert!(plan.iter().any(|s| s.name.contains("cfail"))); + assert!(plan.iter().any(|s| s.name.contains("cfail-full"))); + assert!(plan.iter().any(|s| s.name.contains("codegen-units"))); + assert!(plan.iter().any(|s| s.name.contains("debuginfo"))); + assert!(plan.iter().any(|s| s.name.contains("docs"))); + assert!(plan.iter().any(|s| s.name.contains("error-index"))); + assert!(plan.iter().any(|s| s.name.contains("incremental"))); + assert!(plan.iter().any(|s| s.name.contains("linkchecker"))); + assert!(plan.iter().any(|s| s.name.contains("mir-opt"))); + assert!(plan.iter().any(|s| s.name.contains("pfail"))); + assert!(plan.iter().any(|s| s.name.contains("rfail"))); + assert!(plan.iter().any(|s| s.name.contains("rfail-full"))); + assert!(plan.iter().any(|s| s.name.contains("rmake"))); + assert!(plan.iter().any(|s| s.name.contains("rpass"))); + assert!(plan.iter().any(|s| s.name.contains("rpass-full"))); + assert!(plan.iter().any(|s| s.name.contains("rustc-all"))); + assert!(plan.iter().any(|s| s.name.contains("rustdoc"))); + assert!(plan.iter().any(|s| s.name.contains("std-all"))); + assert!(plan.iter().any(|s| s.name.contains("test-all"))); + assert!(plan.iter().any(|s| s.name.contains("tidy"))); + assert!(plan.iter().any(|s| s.name.contains("valgrind"))); + } + + #[test] + fn test_with_a_target() { + let build = build(&["test", "--target=C"], &[], &["C"]); + let rules = super::build_rules(&build); + let plan = rules.plan(); + println!("rules: {:#?}", plan); + assert!(plan.iter().all(|s| s.stage == 2)); + assert!(plan.iter().all(|s| s.host == "A")); + assert!(plan.iter().all(|s| s.target == "C")); + + assert!(plan.iter().any(|s| s.name.contains("-ui"))); + assert!(plan.iter().any(|s| s.name.contains("cfail"))); + assert!(plan.iter().any(|s| s.name.contains("cfail"))); + assert!(!plan.iter().any(|s| s.name.contains("cfail-full"))); + assert!(plan.iter().any(|s| s.name.contains("codegen-units"))); + assert!(plan.iter().any(|s| s.name.contains("debuginfo"))); + assert!(!plan.iter().any(|s| s.name.contains("docs"))); + assert!(!plan.iter().any(|s| s.name.contains("error-index"))); + assert!(plan.iter().any(|s| s.name.contains("incremental"))); + assert!(!plan.iter().any(|s| s.name.contains("linkchecker"))); + assert!(plan.iter().any(|s| s.name.contains("mir-opt"))); + assert!(plan.iter().any(|s| s.name.contains("pfail"))); + assert!(plan.iter().any(|s| s.name.contains("rfail"))); + assert!(!plan.iter().any(|s| s.name.contains("rfail-full"))); + assert!(!plan.iter().any(|s| s.name.contains("rmake"))); + assert!(plan.iter().any(|s| s.name.contains("rpass"))); + assert!(!plan.iter().any(|s| s.name.contains("rpass-full"))); + assert!(!plan.iter().any(|s| s.name.contains("rustc-all"))); + assert!(!plan.iter().any(|s| s.name.contains("rustdoc"))); + assert!(plan.iter().any(|s| s.name.contains("std-all"))); + assert!(plan.iter().any(|s| s.name.contains("test-all"))); + assert!(!plan.iter().any(|s| s.name.contains("tidy"))); + assert!(plan.iter().any(|s| s.name.contains("valgrind"))); + } +} diff --git a/src/ci/docker/dist-arm-unknown-linux-gnueabi/Dockerfile b/src/ci/docker/dist-arm-unknown-linux-gnueabi/Dockerfile new file mode 100644 index 0000000000000..9b0f1b7a0a763 --- /dev/null +++ b/src/ci/docker/dist-arm-unknown-linux-gnueabi/Dockerfile @@ -0,0 +1,30 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + gdb \ + xz-utils \ + g++-arm-linux-gnueabi + +ENV SCCACHE_DIGEST=7237e38e029342fa27b7ac25412cb9d52554008b12389727320bd533fd7f05b6a96d55485f305caf95e5c8f5f97c3313e10012ccad3e752aba2518f3522ba783 +RUN curl -L https://api.pub.build.mozilla.org/tooltool/sha512/$SCCACHE_DIGEST | \ + tar xJf - -C /usr/local/bin --strip-components=1 + +RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \ + dpkg -i dumb-init_*.deb && \ + rm dumb-init_*.deb +ENTRYPOINT ["/usr/bin/dumb-init", "--"] + +ENV RUST_CONFIGURE_ARGS --host=arm-unknown-linux-gnueabi +ENV XPY_RUN \ + dist \ + --host arm-unknown-linux-gnueabi \ + --target arm-unknown-linux-gnueabi diff --git a/src/ci/docker/x86_64-freebsd/Dockerfile b/src/ci/docker/dist-x86_64-unknown-freebsd/Dockerfile similarity index 74% rename from src/ci/docker/x86_64-freebsd/Dockerfile rename to src/ci/docker/dist-x86_64-unknown-freebsd/Dockerfile index 86efa74ba3b6f..f1a6ccf9ebcf4 100644 --- a/src/ci/docker/x86_64-freebsd/Dockerfile +++ b/src/ci/docker/dist-x86_64-unknown-freebsd/Dockerfile @@ -28,7 +28,11 @@ RUN curl -L https://api.pub.build.mozilla.org/tooltool/sha512/$SCCACHE_DIGEST | ENV \ AR_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-ar \ - CC_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-gcc + CC_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-gcc \ + CXX_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-g++ -ENV RUST_CONFIGURE_ARGS --target=x86_64-unknown-freebsd -ENV RUST_CHECK_TARGET "" +ENV RUST_CONFIGURE_ARGS --host=x86_64-unknown-freebsd +ENV XPY_RUN \ + dist \ + --host x86_64-unknown-freebsd \ + --target x86_64-unknown-freebsd diff --git a/src/ci/docker/x86_64-freebsd/build-toolchain.sh b/src/ci/docker/dist-x86_64-unknown-freebsd/build-toolchain.sh similarity index 98% rename from src/ci/docker/x86_64-freebsd/build-toolchain.sh rename to src/ci/docker/dist-x86_64-unknown-freebsd/build-toolchain.sh index d4bc886d50ea4..0fd6beaf4c1c0 100644 --- a/src/ci/docker/x86_64-freebsd/build-toolchain.sh +++ b/src/ci/docker/dist-x86_64-unknown-freebsd/build-toolchain.sh @@ -77,7 +77,7 @@ cd gcc-$GCC mkdir ../gcc-build cd ../gcc-build ../gcc-$GCC/configure \ - --enable-languages=c \ + --enable-languages=c,c++ \ --target=$ARCH-unknown-freebsd10 \ --disable-multilib \ --disable-nls \ diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 9af10966eda4b..ed01b93f1335a 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -234,12 +234,10 @@ pub trait Unsize { /// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's /// managing some resource besides its own [`size_of::()`] bytes. /// -/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get a -/// compile-time error. Specifically, with structs you'll get [E0204] and with enums you'll get -/// [E0205]. +/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get +/// the error [E0204]. /// /// [E0204]: ../../error-index.html#E0204 -/// [E0205]: ../../error-index.html#E0205 /// /// ## When *should* my type be `Copy`? /// diff --git a/src/liblibc b/src/liblibc index 98589876259e1..7d57bdcdbb565 160000 --- a/src/liblibc +++ b/src/liblibc @@ -1 +1 @@ -Subproject commit 98589876259e19f13eab81b033ced95bbb6deca0 +Subproject commit 7d57bdcdbb56540f37afe5a934ce12d33a6ca7fc diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 667c2590fa996..96fb168581b21 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -70,6 +70,12 @@ declare_lint! { "detects unreachable code paths" } +declare_lint! { + pub UNREACHABLE_PATTERNS, + Warn, + "detects unreachable patterns" +} + declare_lint! { pub WARNINGS, Warn, @@ -239,6 +245,7 @@ impl LintPass for HardwiredLints { UNUSED_ASSIGNMENTS, DEAD_CODE, UNREACHABLE_CODE, + UNREACHABLE_PATTERNS, WARNINGS, UNUSED_FEATURES, STABLE_FEATURES, diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index f45e86f2f4b96..e6c9d2c36d013 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -18,7 +18,7 @@ use hir::map as hir_map; use lint; use hir::def::Def; use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, DefIndex, LOCAL_CRATE}; -use ty::TyCtxt; +use ty::{self, TyCtxt}; use middle::privacy::AccessLevels; use syntax::symbol::Symbol; use syntax_pos::{Span, DUMMY_SP}; @@ -432,6 +432,36 @@ struct Checker<'a, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + // (See issue #38412) + fn skip_stability_check_due_to_privacy(self, def_id: DefId) -> bool { + let visibility = { + // Check if `def_id` is a trait method. + match self.sess.cstore.associated_item(def_id) { + Some(ty::AssociatedItem { container: ty::TraitContainer(trait_def_id), .. }) => { + // Trait methods do not declare visibility (even + // for visibility info in cstore). Use containing + // trait instead, so methods of pub traits are + // themselves considered pub. + self.sess.cstore.visibility(trait_def_id) + } + _ => { + // Otherwise, cstore info works directly. + self.sess.cstore.visibility(def_id) + } + } + }; + + match visibility { + // must check stability for pub items. + ty::Visibility::Public => false, + + // these are not visible outside crate; therefore + // stability markers are irrelevant, if even present. + ty::Visibility::Restricted(..) | + ty::Visibility::Invisible => true, + } + } + pub fn check_stability(self, def_id: DefId, id: NodeId, span: Span) { if self.sess.codemap().span_allows_unstable(span) { debug!("stability: \ @@ -492,6 +522,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.stability.borrow_mut().used_features.insert(feature.clone(), level.clone()); } + // Issue 38412: private items lack stability markers. + if self.skip_stability_check_due_to_privacy(def_id) { + return + } + match stability { Some(&Stability { level: attr::Unstable {ref reason, issue}, ref feature, .. }) => { if !self.stability.borrow().active_features.contains(feature) { diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 3cd3580473292..1d0e95245ea2d 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -888,6 +888,10 @@ impl<'tcx> Lvalue<'tcx> { self.elem(ProjectionElem::Deref) } + pub fn downcast(self, adt_def: &'tcx AdtDef, variant_index: usize) -> Lvalue<'tcx> { + self.elem(ProjectionElem::Downcast(adt_def, variant_index)) + } + pub fn index(self, index: Operand<'tcx>) -> Lvalue<'tcx> { self.elem(ProjectionElem::Index(index)) } diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index f58d7dcb45f30..644df8741e853 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -33,6 +33,7 @@ use ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; use ty::layout::{Layout, TargetDataLayout}; +use ty::inhabitedness::DefIdForest; use ty::maps; use util::common::MemoizationMap; use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet}; @@ -459,6 +460,8 @@ pub struct GlobalCtxt<'tcx> { // FIXME dep tracking -- should be harmless enough pub normalized_cache: RefCell, Ty<'tcx>>>, + pub inhabitedness_cache: RefCell, DefIdForest>>, + pub lang_items: middle::lang_items::LanguageItems, /// Maps from def-id of a type or region parameter to its @@ -760,6 +763,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { associated_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())), ty_param_defs: RefCell::new(NodeMap()), normalized_cache: RefCell::new(FxHashMap()), + inhabitedness_cache: RefCell::new(FxHashMap()), lang_items: lang_items, inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())), used_unsafe: RefCell::new(NodeSet()), diff --git a/src/librustc/ty/inhabitedness/def_id_forest.rs b/src/librustc/ty/inhabitedness/def_id_forest.rs new file mode 100644 index 0000000000000..16bc65603f135 --- /dev/null +++ b/src/librustc/ty/inhabitedness/def_id_forest.rs @@ -0,0 +1,133 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::mem; +use rustc_data_structures::small_vec::SmallVec; +use syntax::ast::CRATE_NODE_ID; +use ty::context::TyCtxt; +use ty::{DefId, DefIdTree}; + +/// Represents a forest of DefIds closed under the ancestor relation. That is, +/// if a DefId representing a module is contained in the forest then all +/// DefIds defined in that module or submodules are also implicitly contained +/// in the forest. +/// +/// This is used to represent a set of modules in which a type is visibly +/// uninhabited. +#[derive(Clone)] +pub struct DefIdForest { + /// The minimal set of DefIds required to represent the whole set. + /// If A and B are DefIds in the DefIdForest, and A is a desecendant + /// of B, then only B will be in root_ids. + /// We use a SmallVec here because (for its use for cacheing inhabitedness) + /// its rare that this will contain even two ids. + root_ids: SmallVec<[DefId; 1]>, +} + +impl<'a, 'gcx, 'tcx> DefIdForest { + /// Create an empty forest. + pub fn empty() -> DefIdForest { + DefIdForest { + root_ids: SmallVec::new(), + } + } + + /// Create a forest consisting of a single tree representing the entire + /// crate. + #[inline] + pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest { + let crate_id = tcx.map.local_def_id(CRATE_NODE_ID); + DefIdForest::from_id(crate_id) + } + + /// Create a forest containing a DefId and all its descendants. + pub fn from_id(id: DefId) -> DefIdForest { + let mut root_ids = SmallVec::new(); + root_ids.push(id); + DefIdForest { + root_ids: root_ids, + } + } + + /// Test whether the forest is empty. + pub fn is_empty(&self) -> bool { + self.root_ids.is_empty() + } + + /// Test whether the forest conains a given DefId. + pub fn contains(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + id: DefId) -> bool + { + for root_id in self.root_ids.iter() { + if tcx.is_descendant_of(id, *root_id) { + return true; + } + } + false + } + + /// Calculate the intersection of a collection of forests. + pub fn intersection(tcx: TyCtxt<'a, 'gcx, 'tcx>, + iter: I) -> DefIdForest + where I: IntoIterator + { + let mut ret = DefIdForest::full(tcx); + let mut next_ret = SmallVec::new(); + let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new(); + for next_forest in iter { + for id in ret.root_ids.drain(..) { + if next_forest.contains(tcx, id) { + next_ret.push(id); + } else { + old_ret.push(id); + } + } + ret.root_ids.extend(old_ret.drain(..)); + + for id in next_forest.root_ids { + if ret.contains(tcx, id) { + next_ret.push(id); + } + } + + mem::swap(&mut next_ret, &mut ret.root_ids); + next_ret.drain(..); + } + ret + } + + /// Calculate the union of a collection of forests. + pub fn union(tcx: TyCtxt<'a, 'gcx, 'tcx>, + iter: I) -> DefIdForest + where I: IntoIterator + { + let mut ret = DefIdForest::empty(); + let mut next_ret = SmallVec::new(); + for next_forest in iter { + for id in ret.root_ids.drain(..) { + if !next_forest.contains(tcx, id) { + next_ret.push(id); + } + } + + for id in next_forest.root_ids { + if !next_ret.contains(&id) { + next_ret.push(id); + } + } + + mem::swap(&mut next_ret, &mut ret.root_ids); + next_ret.drain(..); + } + ret + } +} + diff --git a/src/librustc/ty/inhabitedness/mod.rs b/src/librustc/ty/inhabitedness/mod.rs new file mode 100644 index 0000000000000..c5b75839e99b7 --- /dev/null +++ b/src/librustc/ty/inhabitedness/mod.rs @@ -0,0 +1,199 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use util::nodemap::FxHashSet; +use ty::context::TyCtxt; +use ty::{AdtDef, VariantDef, FieldDef, TyS}; +use ty::{DefId, Substs}; +use ty::{AdtKind, Visibility}; +use ty::TypeVariants::*; + +pub use self::def_id_forest::DefIdForest; + +mod def_id_forest; + +// The methods in this module calculate DefIdForests of modules in which a +// AdtDef/VariantDef/FieldDef is visibly uninhabited. +// +// # Example +// ```rust +// enum Void {} +// mod a { +// pub mod b { +// pub struct SecretlyUninhabited { +// _priv: !, +// } +// } +// } +// +// mod c { +// pub struct AlsoSecretlyUninhabited { +// _priv: Void, +// } +// mod d { +// } +// } +// +// struct Foo { +// x: a::b::SecretlyUninhabited, +// y: c::AlsoSecretlyUninhabited, +// } +// ``` +// In this code, the type Foo will only be visibly uninhabited inside the +// modules b, c and d. Calling uninhabited_from on Foo or its AdtDef will +// return the forest of modules {b, c->d} (represented in a DefIdForest by the +// set {b, c}) +// +// We need this information for pattern-matching on Foo or types that contain +// Foo. +// +// # Example +// ```rust +// let foo_result: Result = ... ; +// let Ok(t) = foo_result; +// ``` +// This code should only compile in modules where the uninhabitedness of Foo is +// visible. + +impl<'a, 'gcx, 'tcx> AdtDef { + /// Calculate the forest of DefIds from which this adt is visibly uninhabited. + pub fn uninhabited_from( + &self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>) -> DefIdForest + { + if !visited.insert((self.did, substs)) { + return DefIdForest::empty(); + } + + let ret = DefIdForest::intersection(tcx, self.variants.iter().map(|v| { + v.uninhabited_from(visited, tcx, substs, self.adt_kind()) + })); + visited.remove(&(self.did, substs)); + ret + } +} + +impl<'a, 'gcx, 'tcx> VariantDef { + /// Calculate the forest of DefIds from which this variant is visibly uninhabited. + pub fn uninhabited_from( + &self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>, + adt_kind: AdtKind) -> DefIdForest + { + match adt_kind { + AdtKind::Union => { + DefIdForest::intersection(tcx, self.fields.iter().map(|f| { + f.uninhabited_from(visited, tcx, substs, false) + })) + }, + AdtKind::Struct => { + DefIdForest::union(tcx, self.fields.iter().map(|f| { + f.uninhabited_from(visited, tcx, substs, false) + })) + }, + AdtKind::Enum => { + DefIdForest::union(tcx, self.fields.iter().map(|f| { + f.uninhabited_from(visited, tcx, substs, true) + })) + }, + } + } +} + +impl<'a, 'gcx, 'tcx> FieldDef { + /// Calculate the forest of DefIds from which this field is visibly uninhabited. + pub fn uninhabited_from( + &self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>, + is_enum: bool) -> DefIdForest + { + let mut data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(visited, tcx); + // FIXME(canndrew): Currently enum fields are (incorrectly) stored with + // Visibility::Invisible so we need to override self.vis if we're + // dealing with an enum. + if is_enum { + data_uninhabitedness() + } else { + match self.vis { + Visibility::Invisible => DefIdForest::empty(), + Visibility::Restricted(from) => { + let forest = DefIdForest::from_id(from); + let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness())); + DefIdForest::intersection(tcx, iter) + }, + Visibility::Public => data_uninhabitedness(), + } + } + } +} + +impl<'a, 'gcx, 'tcx> TyS<'tcx> { + /// Calculate the forest of DefIds from which this type is visibly uninhabited. + pub fn uninhabited_from( + &self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest + { + match tcx.lift_to_global(&self) { + Some(global_ty) => { + { + let cache = tcx.inhabitedness_cache.borrow(); + if let Some(forest) = cache.get(&global_ty) { + return forest.clone(); + } + } + let forest = global_ty.uninhabited_from_inner(visited, tcx); + let mut cache = tcx.inhabitedness_cache.borrow_mut(); + cache.insert(global_ty, forest.clone()); + forest + }, + None => { + let forest = self.uninhabited_from_inner(visited, tcx); + forest + }, + } + } + + fn uninhabited_from_inner( + &self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest + { + match self.sty { + TyAdt(def, substs) => { + def.uninhabited_from(visited, tcx, substs) + }, + + TyNever => DefIdForest::full(tcx), + TyTuple(ref tys) => { + DefIdForest::union(tcx, tys.iter().map(|ty| { + ty.uninhabited_from(visited, tcx) + })) + }, + TyArray(ty, len) => { + if len == 0 { + DefIdForest::empty() + } else { + ty.uninhabited_from(visited, tcx) + } + } + TyRef(_, ref tm) => tm.ty.uninhabited_from(visited, tcx), + + _ => DefIdForest::empty(), + } + } +} + diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 3e33d246b6d18..fa62e893a2875 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -29,7 +29,7 @@ use ty; use ty::subst::{Subst, Substs}; use ty::walk::TypeWalker; use util::common::MemoizationMap; -use util::nodemap::{NodeSet, NodeMap, FxHashMap, FxHashSet}; +use util::nodemap::{NodeSet, NodeMap, FxHashMap}; use serialize::{self, Encodable, Encoder}; use std::borrow::Cow; @@ -78,6 +78,7 @@ pub mod cast; pub mod error; pub mod fast_reject; pub mod fold; +pub mod inhabitedness; pub mod item_path; pub mod layout; pub mod _match; @@ -226,6 +227,20 @@ pub enum Visibility { pub trait DefIdTree: Copy { fn parent(self, id: DefId) -> Option; + + fn is_descendant_of(self, mut descendant: DefId, ancestor: DefId) -> bool { + if descendant.krate != ancestor.krate { + return false; + } + + while descendant != ancestor { + match self.parent(descendant) { + Some(parent) => descendant = parent, + None => return false, + } + } + true + } } impl<'a, 'gcx, 'tcx> DefIdTree for TyCtxt<'a, 'gcx, 'tcx> { @@ -252,7 +267,7 @@ impl Visibility { } /// Returns true if an item with this visibility is accessible from the given block. - pub fn is_accessible_from(self, mut module: DefId, tree: T) -> bool { + pub fn is_accessible_from(self, module: DefId, tree: T) -> bool { let restriction = match self { // Public items are visible everywhere. Visibility::Public => return true, @@ -263,14 +278,7 @@ impl Visibility { Visibility::Restricted(module) => module, }; - while module != restriction { - match tree.parent(module) { - Some(parent) => module = parent, - None => return false, - } - } - - true + tree.is_descendant_of(module, restriction) } /// Returns true if this visibility is at least as accessible as the given visibility @@ -1406,20 +1414,6 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID) } - #[inline] - pub fn is_uninhabited_recurse(&self, - visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, - block: Option, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>) -> bool { - if !visited.insert((self.did, substs)) { - return false; - }; - self.variants.iter().all(|v| { - v.is_uninhabited_recurse(visited, block, tcx, substs, self.is_union()) - }) - } - #[inline] pub fn is_struct(&self) -> bool { !self.is_union() && !self.is_enum() @@ -1754,36 +1748,12 @@ impl<'a, 'gcx, 'tcx> VariantDef { pub fn field_named(&self, name: ast::Name) -> &FieldDef { self.find_field_named(name).unwrap() } - - #[inline] - pub fn is_uninhabited_recurse(&self, - visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, - block: Option, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>, - is_union: bool) -> bool { - if is_union { - self.fields.iter().all(|f| f.is_uninhabited_recurse(visited, block, tcx, substs)) - } else { - self.fields.iter().any(|f| f.is_uninhabited_recurse(visited, block, tcx, substs)) - } - } } impl<'a, 'gcx, 'tcx> FieldDef { pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> { tcx.item_type(self.did).subst(tcx, subst) } - - #[inline] - pub fn is_uninhabited_recurse(&self, - visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, - block: Option, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>) -> bool { - block.map_or(true, |b| tcx.vis_is_accessible_from(self.vis, b)) && - self.ty(tcx, substs).is_uninhabited_recurse(visited, block, tcx) - } } /// Records the substitutions used to translate the polytype for an diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 638345608c2f5..81b0a55841ad8 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -22,7 +22,7 @@ use std::fmt; use std::iter; use std::cmp::Ordering; use syntax::abi; -use syntax::ast::{self, Name, NodeId}; +use syntax::ast::{self, Name}; use syntax::symbol::{keywords, InternedString}; use util::nodemap::FxHashSet; @@ -979,29 +979,52 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } - /// Checks whether a type is uninhabited. - /// If `block` is `Some(id)` it also checks that the uninhabited-ness is visible from `id`. - pub fn is_uninhabited(&self, block: Option, cx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + /// Checks whether a type is visibly uninhabited from a particular module. + /// # Example + /// ```rust + /// enum Void {} + /// mod a { + /// pub mod b { + /// pub struct SecretlyUninhabited { + /// _priv: !, + /// } + /// } + /// } + /// + /// mod c { + /// pub struct AlsoSecretlyUninhabited { + /// _priv: Void, + /// } + /// mod d { + /// } + /// } + /// + /// struct Foo { + /// x: a::b::SecretlyUninhabited, + /// y: c::AlsoSecretlyUninhabited, + /// } + /// ``` + /// In this code, the type `Foo` will only be visibly uninhabited inside the + /// modules b, c and d. This effects pattern-matching on `Foo` or types that + /// contain `Foo`. + /// + /// # Example + /// ```rust + /// let foo_result: Result = ... ; + /// let Ok(t) = foo_result; + /// ``` + /// This code should only compile in modules where the uninhabitedness of Foo is + /// visible. + pub fn is_uninhabited_from(&self, module: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { let mut visited = FxHashSet::default(); - self.is_uninhabited_recurse(&mut visited, block, cx) - } - - pub fn is_uninhabited_recurse(&self, - visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, - block: Option, - cx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { - match self.sty { - TyAdt(def, substs) => { - def.is_uninhabited_recurse(visited, block, cx, substs) - }, - - TyNever => true, - TyTuple(ref tys) => tys.iter().any(|ty| ty.is_uninhabited_recurse(visited, block, cx)), - TyArray(ty, len) => len > 0 && ty.is_uninhabited_recurse(visited, block, cx), - TyRef(_, ref tm) => tm.ty.is_uninhabited_recurse(visited, block, cx), - - _ => false, - } + let forest = self.uninhabited_from(&mut visited, tcx); + + // To check whether this type is uninhabited at all (not just from the + // given node) you could check whether the forest is empty. + // ``` + // forest.is_empty() + // ``` + forest.contains(tcx, module) } pub fn is_primitive(&self) -> bool { diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index 0b45ff94a9312..0b1030f74b0fd 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -15,7 +15,7 @@ use hir::map::DefPathData; use infer::InferCtxt; use hir::map as ast_map; use traits::{self, Reveal}; -use ty::{self, Ty, AdtKind, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable}; +use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable}; use ty::{Disr, ParameterEnvironment}; use ty::fold::TypeVisitor; use ty::layout::{Layout, LayoutError}; @@ -120,9 +120,8 @@ impl IntTypeExt for attr::IntType { #[derive(Copy, Clone)] -pub enum CopyImplementationError { - InfrigingField(Name), - InfrigingVariant(Name), +pub enum CopyImplementationError<'tcx> { + InfrigingField(&'tcx ty::FieldDef), NotAnAdt, HasDestructor } @@ -145,37 +144,30 @@ pub enum Representability { impl<'tcx> ParameterEnvironment<'tcx> { pub fn can_type_implement_copy<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, self_type: Ty<'tcx>, span: Span) - -> Result<(),CopyImplementationError> { + -> Result<(), CopyImplementationError> { // FIXME: (@jroesch) float this code up - tcx.infer_ctxt(None, Some(self.clone()), Reveal::ExactMatch).enter(|infcx| { - let adt = match self_type.sty { - ty::TyAdt(adt, substs) => match adt.adt_kind() { - AdtKind::Struct | AdtKind::Union => { - for field in adt.all_fields() { - let field_ty = field.ty(tcx, substs); - if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingField( - field.name)) - } - } - adt - } - AdtKind::Enum => { - for variant in &adt.variants { - for field in &variant.fields { - let field_ty = field.ty(tcx, substs); - if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingVariant( - variant.name)) - } - } - } - adt - } - }, + tcx.infer_ctxt(None, Some(self.clone()), Reveal::NotSpecializable).enter(|infcx| { + let (adt, substs) = match self_type.sty { + ty::TyAdt(adt, substs) => (adt, substs), _ => return Err(CopyImplementationError::NotAnAdt) }; + let field_implements_copy = |field: &ty::FieldDef| { + let cause = traits::ObligationCause::dummy(); + match traits::fully_normalize(&infcx, cause, &field.ty(tcx, substs)) { + Ok(ty) => !infcx.type_moves_by_default(ty, span), + Err(..) => false + } + }; + + for variant in &adt.variants { + for field in &variant.fields { + if !field_implements_copy(field) { + return Err(CopyImplementationError::InfrigingField(field)); + } + } + } + if adt.has_dtor() { return Err(CopyImplementationError::HasDestructor); } diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs index 6d04975f533da..f4b3646fce02c 100644 --- a/src/librustc_const_eval/_match.rs +++ b/src/librustc_const_eval/_match.rs @@ -17,14 +17,14 @@ use eval::{compare_const_vals}; use rustc_const_math::ConstInt; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::Idx; use pattern::{FieldPattern, Pattern, PatternKind}; use pattern::{PatternFoldable, PatternFolder}; use rustc::hir::def_id::DefId; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use rustc::mir::Field; use rustc::util::common::ErrorReported; @@ -144,9 +144,13 @@ impl<'a, 'tcx> FromIterator>> for Matrix<'a, 'tcx> { //NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv pub struct MatchCheckCtxt<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - /// A wild pattern with an error type - it exists to avoid having to normalize - /// associated types to get field types. - pub wild_pattern: &'a Pattern<'tcx>, + /// The module in which the match occurs. This is necessary for + /// checking inhabited-ness of types because whether a type is (visibly) + /// inhabited can depend on whether it was defined in the current module or + /// not. eg. `struct Foo { _private: ! }` cannot be seen to be empty + /// outside it's module and should not be matchable with an empty match + /// statement. + pub module: DefId, pub pattern_arena: &'a TypedArena>, pub byte_array_map: FxHashMap<*const Pattern<'tcx>, Vec<&'a Pattern<'tcx>>>, } @@ -154,27 +158,24 @@ pub struct MatchCheckCtxt<'a, 'tcx: 'a> { impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { pub fn create_and_enter( tcx: TyCtxt<'a, 'tcx, 'tcx>, + module: DefId, f: F) -> R where F: for<'b> FnOnce(MatchCheckCtxt<'b, 'tcx>) -> R { - let wild_pattern = Pattern { - ty: tcx.types.err, - span: DUMMY_SP, - kind: box PatternKind::Wild - }; - let pattern_arena = TypedArena::new(); f(MatchCheckCtxt { tcx: tcx, - wild_pattern: &wild_pattern, + module: module, pattern_arena: &pattern_arena, byte_array_map: FxHashMap(), }) } // convert a byte-string pattern to a list of u8 patterns. - fn lower_byte_str_pattern(&mut self, pat: &'a Pattern<'tcx>) -> Vec<&'a Pattern<'tcx>> { + fn lower_byte_str_pattern<'p>(&mut self, pat: &'p Pattern<'tcx>) -> Vec<&'p Pattern<'tcx>> + where 'a: 'p + { let pattern_arena = &*self.pattern_arena; let tcx = self.tcx; self.byte_array_map.entry(pat).or_insert_with(|| { @@ -269,8 +270,14 @@ impl<'tcx> Witness<'tcx> { ty: Ty<'tcx>) -> Self { - let arity = constructor_arity(cx, ctor, ty); - self.0.extend(repeat(cx.wild_pattern).take(arity).cloned()); + let sub_pattern_tys = constructor_sub_pattern_tys(cx, ctor, ty); + self.0.extend(sub_pattern_tys.into_iter().map(|ty| { + Pattern { + ty: ty, + span: DUMMY_SP, + kind: box PatternKind::Wild, + } + })); self.apply_constructor(cx, ctor, ty) } @@ -310,10 +317,11 @@ impl<'tcx> Witness<'tcx> { } }).collect(); - if let ty::TyAdt(adt, _) = ty.sty { + if let ty::TyAdt(adt, substs) = ty.sty { if adt.variants.len() > 1 { PatternKind::Variant { adt_def: adt, + substs: substs, variant_index: ctor.variant_index_for_adt(adt), subpatterns: pats } @@ -356,51 +364,65 @@ impl<'tcx> Witness<'tcx> { } } -/// Return the set of constructors from the same type as the first column of `matrix`, -/// that are matched only by wildcard patterns from that first column. -/// -/// Therefore, if there is some pattern that is unmatched by `matrix`, it will -/// still be unmatched if the first constructor is replaced by any of the constructors -/// in the return value. -fn missing_constructors(cx: &mut MatchCheckCtxt, - matrix: &Matrix, - pcx: PatternContext) -> Vec { - let used_constructors: Vec = - matrix.0.iter() - .flat_map(|row| pat_constructors(cx, row[0], pcx).unwrap_or(vec![])) - .collect(); - debug!("used_constructors = {:?}", used_constructors); - all_constructors(cx, pcx).into_iter() - .filter(|c| !used_constructors.contains(c)) - .collect() -} - /// This determines the set of all possible constructors of a pattern matching /// values of type `left_ty`. For vectors, this would normally be an infinite set +/// but is instead bounded by the maximum fixed length of slice patterns in +/// the column of patterns being analyzed. /// /// This intentionally does not list ConstantValue specializations for /// non-booleans, because we currently assume that there is always a /// "non-standard constant" that matches. See issue #12483. /// -/// but is instead bounded by the maximum fixed length of slice patterns in -/// the column of patterns being analyzed. -fn all_constructors(_cx: &mut MatchCheckCtxt, pcx: PatternContext) -> Vec { +/// We make sure to omit constructors that are statically impossible. eg for +/// Option we do not include Some(_) in the returned list of constructors. +fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + pcx: PatternContext<'tcx>) -> Vec +{ + debug!("all_constructors({:?})", pcx.ty); match pcx.ty.sty { ty::TyBool => [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(), - ty::TySlice(_) => - (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect(), - ty::TyArray(_, length) => vec![Slice(length)], - ty::TyAdt(def, _) if def.is_enum() && def.variants.len() > 1 => - def.variants.iter().map(|v| Variant(v.did)).collect(), - _ => vec![Single] + ty::TySlice(ref sub_ty) => { + if sub_ty.is_uninhabited_from(cx.module, cx.tcx) { + vec![Slice(0)] + } else { + (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect() + } + } + ty::TyArray(ref sub_ty, length) => { + if length == 0 || !sub_ty.is_uninhabited_from(cx.module, cx.tcx) { + vec![Slice(length)] + } else { + vec![] + } + } + ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => { + def.variants.iter().filter_map(|v| { + let mut visited = FxHashSet::default(); + let forest = v.uninhabited_from(&mut visited, + cx.tcx, substs, + AdtKind::Enum); + if forest.contains(cx.tcx, cx.module) { + None + } else { + Some(Variant(v.did)) + } + }).collect() + } + _ => { + if pcx.ty.is_uninhabited_from(cx.module, cx.tcx) { + vec![] + } else { + vec![Single] + } + } } } -fn max_slice_length<'a, 'tcx, I>( +fn max_slice_length<'p, 'a: 'p, 'tcx: 'a, I>( _cx: &mut MatchCheckCtxt<'a, 'tcx>, patterns: I) -> usize - where I: Iterator> + where I: Iterator> { // The exhaustiveness-checking paper does not include any details on // checking variable-length slice patterns. However, they are matched @@ -491,6 +513,12 @@ fn max_slice_length<'a, 'tcx, I>( } /// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html +/// The algorithm from the paper has been modified to correctly handle empty +/// types. The changes are: +/// (0) We don't exit early if the pattern matrix has zero rows. We just +/// continue to recurse over columns. +/// (1) all_constructors will only return constructors that are statically +/// possible. eg. it will only return Ok for Result /// /// Whether a vector `v` of patterns is 'useful' in relation to a set of such /// vectors `m` is defined as there being a set of inputs that will match `v` @@ -500,29 +528,30 @@ fn max_slice_length<'a, 'tcx, I>( /// relation to preceding patterns, it is not reachable) and exhaustiveness /// checking (if a wildcard pattern is useful in relation to a matrix, the /// matrix isn't exhaustive). -/// -/// Note: is_useful doesn't work on empty types, as the paper notes. -/// So it assumes that v is non-empty. -pub fn is_useful<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, - matrix: &Matrix<'a, 'tcx>, - v: &[&'a Pattern<'tcx>], +pub fn is_useful<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + matrix: &Matrix<'p, 'tcx>, + v: &[&'p Pattern<'tcx>], witness: WitnessPreference) -> Usefulness<'tcx> { let &Matrix(ref rows) = matrix; debug!("is_useful({:?}, {:?})", matrix, v); - if rows.is_empty() { - return match witness { - ConstructWitness => UsefulWithWitness(vec![Witness( - repeat(cx.wild_pattern).take(v.len()).cloned().collect() - )]), - LeaveOutWitness => Useful - }; - } - if rows[0].is_empty() { - return NotUseful; - } - let &Matrix(ref rows) = matrix; + // The base case. We are pattern-matching on () and the return value is + // based on whether our matrix has a row or not. + // NOTE: This could potentially be optimized by checking rows.is_empty() + // first and then, if v is non-empty, the return value is based on whether + // the type of the tuple we're checking is inhabited or not. + if v.is_empty() { + return if rows.is_empty() { + match witness { + ConstructWitness => UsefulWithWitness(vec![Witness(vec![])]), + LeaveOutWitness => Useful, + } + } else { + NotUseful + } + }; + assert!(rows.iter().all(|r| r.len() == v.len())); @@ -541,10 +570,28 @@ pub fn is_useful<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, ).find(|result| result.is_useful()).unwrap_or(NotUseful) } else { debug!("is_useful - expanding wildcard"); - let constructors = missing_constructors(cx, matrix, pcx); - debug!("is_useful - missing_constructors = {:?}", constructors); - if constructors.is_empty() { - all_constructors(cx, pcx).into_iter().map(|c| { + + let used_ctors: Vec = rows.iter().flat_map(|row| { + pat_constructors(cx, row[0], pcx).unwrap_or(vec![]) + }).collect(); + debug!("used_ctors = {:?}", used_ctors); + let all_ctors = all_constructors(cx, pcx); + debug!("all_ctors = {:?}", all_ctors); + let missing_ctors: Vec = all_ctors.iter().filter(|c| { + !used_ctors.contains(*c) + }).cloned().collect(); + debug!("missing_ctors = {:?}", missing_ctors); + + // `missing_ctors` is the set of constructors from the same type as the + // first column of `matrix` that are matched only by wildcard patterns + // from the first column. + // + // Therefore, if there is some pattern that is unmatched by `matrix`, + // it will still be unmatched if the first constructor is replaced by + // any of the constructors in `missing_ctors` + + if missing_ctors.is_empty() { + all_ctors.into_iter().map(|c| { is_useful_specialized(cx, matrix, v, c.clone(), pcx.ty, witness) }).find(|result| result.is_useful()).unwrap_or(NotUseful) } else { @@ -558,11 +605,25 @@ pub fn is_useful<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, match is_useful(cx, &matrix, &v[1..], witness) { UsefulWithWitness(pats) => { let cx = &*cx; - UsefulWithWitness(pats.into_iter().flat_map(|witness| { - constructors.iter().map(move |ctor| { - witness.clone().push_wild_constructor(cx, ctor, pcx.ty) - }) - }).collect()) + let new_witnesses = if used_ctors.is_empty() { + // All constructors are unused. Add wild patterns + // rather than each individual constructor + pats.into_iter().map(|mut witness| { + witness.0.push(Pattern { + ty: pcx.ty, + span: DUMMY_SP, + kind: box PatternKind::Wild, + }); + witness + }).collect() + } else { + pats.into_iter().flat_map(|witness| { + missing_ctors.iter().map(move |ctor| { + witness.clone().push_wild_constructor(cx, ctor, pcx.ty) + }) + }).collect() + }; + UsefulWithWitness(new_witnesses) } result => result } @@ -570,19 +631,27 @@ pub fn is_useful<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } } -fn is_useful_specialized<'a, 'tcx>( +fn is_useful_specialized<'p, 'a:'p, 'tcx: 'a>( cx: &mut MatchCheckCtxt<'a, 'tcx>, - &Matrix(ref m): &Matrix<'a, 'tcx>, - v: &[&'a Pattern<'tcx>], + &Matrix(ref m): &Matrix<'p, 'tcx>, + v: &[&'p Pattern<'tcx>], ctor: Constructor, lty: Ty<'tcx>, witness: WitnessPreference) -> Usefulness<'tcx> { - let arity = constructor_arity(cx, &ctor, lty); + let sub_pat_tys = constructor_sub_pattern_tys(cx, &ctor, lty); + let wild_patterns_owned: Vec<_> = sub_pat_tys.iter().map(|ty| { + Pattern { + ty: ty, + span: DUMMY_SP, + kind: box PatternKind::Wild, + } + }).collect(); + let wild_patterns: Vec<_> = wild_patterns_owned.iter().collect(); let matrix = Matrix(m.iter().flat_map(|r| { - specialize(cx, &r[..], &ctor, 0, arity) + specialize(cx, &r[..], &ctor, &wild_patterns) }).collect()); - match specialize(cx, v, &ctor, 0, arity) { + match specialize(cx, v, &ctor, &wild_patterns) { Some(v) => match is_useful(cx, &matrix, &v[..], witness) { UsefulWithWitness(witnesses) => UsefulWithWitness( witnesses.into_iter() @@ -657,6 +726,33 @@ fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize } } +/// This computes the types of the sub patterns that a constructor should be +/// expanded to. +/// +/// For instance, a tuple pattern (43u32, 'a') has sub pattern types [u32, char]. +fn constructor_sub_pattern_tys<'a, 'tcx: 'a>(cx: &MatchCheckCtxt<'a, 'tcx>, + ctor: &Constructor, + ty: Ty<'tcx>) -> Vec> +{ + debug!("constructor_sub_pattern_tys({:?}, {:?})", ctor, ty); + match ty.sty { + ty::TyTuple(ref fs) => fs.into_iter().map(|t| *t).collect(), + ty::TyBox(ty) => vec![ty], + ty::TySlice(ty) | ty::TyArray(ty, _) => match *ctor { + Slice(length) => repeat(ty).take(length).collect(), + ConstantValue(_) => vec![], + _ => bug!("bad slice pattern {:?} {:?}", ctor, ty) + }, + ty::TyRef(_, ref ty_and_mut) => vec![ty_and_mut.ty], + ty::TyAdt(adt, substs) => { + adt.variants[ctor.variant_index_for_adt(adt)].fields.iter().map(|field| { + field.ty(cx.tcx, substs) + }).collect() + } + _ => vec![], + } +} + fn slice_pat_covered_by_constructor(_tcx: TyCtxt, _span: Span, ctor: &Constructor, prefix: &[Pattern], @@ -708,19 +804,18 @@ fn range_covered_by_constructor(tcx: TyCtxt, span: Span, Ok(cmp_from != Ordering::Less && cmp_to != Ordering::Greater) } -fn patterns_for_variant<'a, 'tcx>( - cx: &mut MatchCheckCtxt<'a, 'tcx>, - subpatterns: &'a [FieldPattern<'tcx>], - arity: usize) - -> Vec<&'a Pattern<'tcx>> +fn patterns_for_variant<'p, 'a: 'p, 'tcx: 'a>( + subpatterns: &'p [FieldPattern<'tcx>], + wild_patterns: &[&'p Pattern<'tcx>]) + -> Vec<&'p Pattern<'tcx>> { - let mut result = vec![cx.wild_pattern; arity]; + let mut result = wild_patterns.to_owned(); for subpat in subpatterns { result[subpat.field.index()] = &subpat.pattern; } - debug!("patterns_for_variant({:?}, {:?}) = {:?}", subpatterns, arity, result); + debug!("patterns_for_variant({:?}, {:?}) = {:?}", subpatterns, wild_patterns, result); result } @@ -732,35 +827,41 @@ fn patterns_for_variant<'a, 'tcx>( /// different patterns. /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing /// fields filled with wild patterns. -fn specialize<'a, 'tcx>( +fn specialize<'p, 'a: 'p, 'tcx: 'a>( cx: &mut MatchCheckCtxt<'a, 'tcx>, - r: &[&'a Pattern<'tcx>], - constructor: &Constructor, col: usize, arity: usize) - -> Option>> + r: &[&'p Pattern<'tcx>], + constructor: &Constructor, + wild_patterns: &[&'p Pattern<'tcx>]) + -> Option>> { - let pat = &r[col]; + let pat = &r[0]; let head: Option> = match *pat.kind { - PatternKind::Binding { .. } | PatternKind::Wild => - Some(vec![cx.wild_pattern; arity]), + PatternKind::Binding { .. } | PatternKind::Wild => { + Some(wild_patterns.to_owned()) + }, - PatternKind::Variant { adt_def, variant_index, ref subpatterns } => { + PatternKind::Variant { adt_def, variant_index, ref subpatterns, .. } => { let ref variant = adt_def.variants[variant_index]; if *constructor == Variant(variant.did) { - Some(patterns_for_variant(cx, subpatterns, arity)) + Some(patterns_for_variant(subpatterns, wild_patterns)) } else { None } } - PatternKind::Leaf { ref subpatterns } => Some(patterns_for_variant(cx, subpatterns, arity)), - PatternKind::Deref { ref subpattern } => Some(vec![subpattern]), + PatternKind::Leaf { ref subpatterns } => { + Some(patterns_for_variant(subpatterns, wild_patterns)) + } + PatternKind::Deref { ref subpattern } => { + Some(vec![subpattern]) + } PatternKind::Constant { ref value } => { match *constructor { Slice(..) => match *value { ConstVal::ByteStr(ref data) => { - if arity == data.len() { + if wild_patterns.len() == data.len() { Some(cx.lower_byte_str_pattern(pat)) } else { None @@ -796,11 +897,14 @@ fn specialize<'a, 'tcx>( match *constructor { Slice(..) => { let pat_len = prefix.len() + suffix.len(); - if let Some(slice_count) = arity.checked_sub(pat_len) { + if let Some(slice_count) = wild_patterns.len().checked_sub(pat_len) { if slice_count == 0 || slice.is_some() { Some( prefix.iter().chain( - repeat(cx.wild_pattern).take(slice_count).chain( + wild_patterns.iter().map(|p| *p) + .skip(prefix.len()) + .take(slice_count) + .chain( suffix.iter() )).collect()) } else { @@ -824,11 +928,10 @@ fn specialize<'a, 'tcx>( } } }; - debug!("specialize({:?}, {:?}) = {:?}", r[col], arity, head); + debug!("specialize({:?}, {:?}) = {:?}", r[0], wild_patterns, head); head.map(|mut head| { - head.extend_from_slice(&r[..col]); - head.extend_from_slice(&r[col + 1..]); + head.extend_from_slice(&r[1 ..]); head }) } diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs index 53e83815b4652..2949cf0d535bf 100644 --- a/src/librustc_const_eval/check_match.rs +++ b/src/librustc_const_eval/check_match.rs @@ -24,8 +24,9 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization::{cmt}; use rustc::session::Session; use rustc::traits::Reveal; -use rustc::ty::{self, TyCtxt}; -use rustc_errors::DiagnosticBuilder; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::lint; +use rustc_errors::{Diagnostic, Level, DiagnosticBuilder}; use rustc::hir::def::*; use rustc::hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; @@ -35,7 +36,7 @@ use rustc_back::slice; use syntax::ast; use syntax::ptr::P; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; struct OuterVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> } @@ -80,7 +81,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MatchVisitor<'a, 'tcx> { match ex.node { hir::ExprMatch(ref scrut, ref arms, source) => { - self.check_match(scrut, arms, source, ex.span); + self.check_match(scrut, arms, source); } _ => {} } @@ -131,8 +132,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { &self, scrut: &hir::Expr, arms: &[hir::Arm], - source: hir::MatchSource, - span: Span) + source: hir::MatchSource) { for arm in arms { // First, check legality of move bindings. @@ -150,7 +150,8 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { } } - MatchCheckCtxt::create_and_enter(self.tcx, |ref mut cx| { + let module = self.tcx.map.local_def_id(self.tcx.map.get_module_parent(scrut.id)); + MatchCheckCtxt::create_and_enter(self.tcx, module, |ref mut cx| { let mut have_errors = false; let inlined_arms : Vec<(Vec<_>, _)> = arms.iter().map(|arm| ( @@ -174,32 +175,14 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { // Fourth, check for unreachable arms. check_arms(cx, &inlined_arms, source); - // Finally, check if the whole match expression is exhaustive. - // Check for empty enum, because is_useful only works on inhabited types. - let pat_ty = self.tcx.tables().node_id_to_type(scrut.id); - if inlined_arms.is_empty() { - if !pat_ty.is_uninhabited(Some(scrut.id), self.tcx) { - // We know the type is inhabited, so this must be wrong - let mut err = create_e0004(self.tcx.sess, span, - format!("non-exhaustive patterns: type {} \ - is non-empty", - pat_ty)); - span_help!(&mut err, span, - "Please ensure that all possible cases are being handled; \ - possibly adding wildcards or more match arms."); - err.emit(); - } - // If the type *is* uninhabited, it's vacuously exhaustive - return; - } - let matrix: Matrix = inlined_arms .iter() .filter(|&&(_, guard)| guard.is_none()) .flat_map(|arm| &arm.0) .map(|pat| vec![pat.0]) .collect(); - check_exhaustive(cx, scrut.span, &matrix, source); + let scrut_ty = cx.tcx.tables().node_id_to_type(scrut.id); + check_exhaustive(cx, scrut_ty, scrut.span, &matrix, source); }) } @@ -210,13 +193,21 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { "local binding" }; - MatchCheckCtxt::create_and_enter(self.tcx, |ref mut cx| { + let module = self.tcx.map.local_def_id(self.tcx.map.get_module_parent(pat.id)); + MatchCheckCtxt::create_and_enter(self.tcx, module, |ref mut cx| { let mut patcx = PatternContext::new(self.tcx); + let pattern = patcx.lower_pattern(pat); + let pattern_ty = pattern.ty; let pats : Matrix = vec![vec![ - expand_pattern(cx, patcx.lower_pattern(pat)) + expand_pattern(cx, pattern) ]].into_iter().collect(); - let witness = match is_useful(cx, &pats, &[cx.wild_pattern], ConstructWitness) { + let wild_pattern = Pattern { + ty: pattern_ty, + span: DUMMY_SP, + kind: box PatternKind::Wild, + }; + let witness = match is_useful(cx, &pats, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(witness) => witness, NotUseful => return, Useful => bug!() @@ -324,14 +315,16 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, }, hir::MatchSource::Normal => { - let mut err = struct_span_err!(cx.tcx.sess, pat.span, E0001, - "unreachable pattern"); - err.span_label(pat.span, &"this is an unreachable pattern"); + let mut diagnostic = Diagnostic::new(Level::Warning, + "unreachable pattern"); + diagnostic.set_span(pat.span); // if we had a catchall pattern, hint at that if let Some(catchall) = catchall { - err.span_note(catchall, "this pattern matches any value"); + diagnostic.span_label(pat.span, &"this is an unreachable pattern"); + diagnostic.span_note(catchall, "this pattern matches any value"); } - err.emit(); + cx.tcx.sess.add_lint_diagnostic(lint::builtin::UNREACHABLE_PATTERNS, + hir_pat.id, diagnostic); }, hir::MatchSource::TryDesugar => { @@ -353,13 +346,19 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } fn check_exhaustive<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + scrut_ty: Ty<'tcx>, sp: Span, matrix: &Matrix<'a, 'tcx>, source: hir::MatchSource) { - match is_useful(cx, matrix, &[cx.wild_pattern], ConstructWitness) { + let wild_pattern = Pattern { + ty: scrut_ty, + span: DUMMY_SP, + kind: box PatternKind::Wild, + }; + match is_useful(cx, matrix, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(pats) => { let witnesses = if pats.is_empty() { - vec![cx.wild_pattern] + vec![&wild_pattern] } else { pats.iter().map(|w| w.single_pattern()).collect() }; diff --git a/src/librustc_const_eval/diagnostics.rs b/src/librustc_const_eval/diagnostics.rs index b24cd261dd584..8c8b2b5da36dc 100644 --- a/src/librustc_const_eval/diagnostics.rs +++ b/src/librustc_const_eval/diagnostics.rs @@ -16,6 +16,8 @@ register_long_diagnostics! { E0001: r##" +## Note: this error code is no longer emitted by the compiler. + This error suggests that the expression arm corresponding to the noted pattern will never be reached as for all possible values of the expression being matched, one of the preceding patterns will match. @@ -25,10 +27,10 @@ one is too specific or the ordering is incorrect. For example, the following `match` block has too many arms: -```compile_fail,E0001 +``` match Some(0) { Some(bar) => {/* ... */} - None => {/* ... */} + x => {/* ... */} // This handles the `None` case _ => {/* ... */} // All possible cases have already been handled } ``` diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index b122d97a702f6..42394f4745f66 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -13,7 +13,8 @@ use eval; use rustc::lint; use rustc::middle::const_val::ConstVal; use rustc::mir::{Field, BorrowKind, Mutability}; -use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; +use rustc::ty::{self, TyCtxt, AdtDef, Ty, TypeVariants, Region}; +use rustc::ty::subst::{Substs, Kind}; use rustc::hir::{self, PatKind}; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::pat_util::EnumerateAndAdjustIterator; @@ -67,6 +68,7 @@ pub enum PatternKind<'tcx> { /// Foo(...) or Foo{...} or Foo, where `Foo` is a variant name from an adt with >1 variants Variant { adt_def: &'tcx AdtDef, + substs: &'tcx Substs<'tcx>, variant_index: usize, subpatterns: Vec>, }, @@ -391,8 +393,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { PatKind::TupleStruct(ref qpath, ref subpatterns, ddpos) => { let def = self.tcx.tables().qpath_def(qpath, pat.id); - let pat_ty = self.tcx.tables().node_id_to_type(pat.id); - let adt_def = match pat_ty.sty { + let adt_def = match ty.sty { ty::TyAdt(adt_def, _) => adt_def, _ => span_bug!(pat.span, "tuple struct pattern not applied to an ADT"), }; @@ -406,13 +407,12 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { pattern: self.lower_pattern(field), }) .collect(); - self.lower_variant_or_leaf(def, subpatterns) + self.lower_variant_or_leaf(def, ty, subpatterns) } PatKind::Struct(ref qpath, ref fields, _) => { let def = self.tcx.tables().qpath_def(qpath, pat.id); - let pat_ty = self.tcx.tables().node_id_to_type(pat.id); - let adt_def = match pat_ty.sty { + let adt_def = match ty.sty { ty::TyAdt(adt_def, _) => adt_def, _ => { span_bug!( @@ -439,7 +439,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { }) .collect(); - self.lower_variant_or_leaf(def, subpatterns) + self.lower_variant_or_leaf(def, ty, subpatterns) } }; @@ -529,6 +529,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { fn lower_variant_or_leaf( &mut self, def: Def, + ty: Ty<'tcx>, subpatterns: Vec>) -> PatternKind<'tcx> { @@ -537,8 +538,14 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); let adt_def = self.tcx.lookup_adt_def(enum_id); if adt_def.variants.len() > 1 { + let substs = match ty.sty { + TypeVariants::TyAdt(_, substs) => substs, + TypeVariants::TyFnDef(_, substs, _) => substs, + _ => bug!("inappropriate type for def: {:?}", ty.sty), + }; PatternKind::Variant { adt_def: adt_def, + substs: substs, variant_index: adt_def.variant_index_with_id(variant_id), subpatterns: subpatterns, } @@ -562,6 +569,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { pat_id: ast::NodeId, span: Span) -> Pattern<'tcx> { + let ty = self.tcx.tables().node_id_to_type(id); let def = self.tcx.tables().qpath_def(qpath, id); let kind = match def { Def::Const(def_id) | Def::AssociatedConst(def_id) => { @@ -578,12 +586,12 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { } } } - _ => self.lower_variant_or_leaf(def, vec![]) + _ => self.lower_variant_or_leaf(def, ty, vec![]), }; Pattern { span: span, - ty: self.tcx.tables().node_id_to_type(id), + ty: ty, kind: Box::new(kind), } } @@ -651,6 +659,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { hir::ExprPath(ref qpath) => qpath, _ => bug!() }; + let ty = self.tcx.tables().node_id_to_type(callee.id); let def = self.tcx.tables().qpath_def(qpath, callee.id); match def { Def::Fn(..) | Def::Method(..) => self.lower_lit(expr), @@ -661,7 +670,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { pattern: self.lower_const_expr(expr, pat_id, span) } }).collect(); - self.lower_variant_or_leaf(def, subpatterns) + self.lower_variant_or_leaf(def, ty, subpatterns) } } } @@ -696,7 +705,7 @@ impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { }) .collect(); - self.lower_variant_or_leaf(def, subpatterns) + self.lower_variant_or_leaf(def, pat_ty, subpatterns) } hir::ExprArray(ref exprs) => { @@ -776,8 +785,9 @@ macro_rules! CloneImpls { } CloneImpls!{ <'tcx> - Span, Field, Mutability, ast::Name, ast::NodeId, usize, ConstVal, - Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef + Span, Field, Mutability, ast::Name, ast::NodeId, usize, ConstVal, Region, + Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef, + &'tcx Substs<'tcx>, &'tcx Kind<'tcx> } impl<'tcx> PatternFoldable<'tcx> for FieldPattern<'tcx> { @@ -828,10 +838,12 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { }, PatternKind::Variant { adt_def, + substs, variant_index, ref subpatterns, } => PatternKind::Variant { adt_def: adt_def.fold_with(folder), + substs: substs.fold_with(folder), variant_index: variant_index.fold_with(folder), subpatterns: subpatterns.fold_with(folder) }, diff --git a/src/librustc_data_structures/accumulate_vec.rs b/src/librustc_data_structures/accumulate_vec.rs index 937cb3f600746..78af655852d1b 100644 --- a/src/librustc_data_structures/accumulate_vec.rs +++ b/src/librustc_data_structures/accumulate_vec.rs @@ -19,6 +19,7 @@ use std::ops::{Deref, DerefMut}; use std::iter::{self, IntoIterator, FromIterator}; use std::slice; use std::vec; +use std::collections::range::RangeArgument; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; @@ -71,6 +72,19 @@ impl AccumulateVec { AccumulateVec::Heap(ref mut vec) => vec.pop(), } } + + pub fn drain(&mut self, range: R) -> Drain + where R: RangeArgument + { + match *self { + AccumulateVec::Array(ref mut v) => { + Drain::Array(v.drain(range)) + }, + AccumulateVec::Heap(ref mut v) => { + Drain::Heap(v.drain(range)) + }, + } + } } impl Deref for AccumulateVec { @@ -132,6 +146,31 @@ impl Iterator for IntoIter { } } +pub enum Drain<'a, A: Array> + where A::Element: 'a +{ + Array(array_vec::Drain<'a, A>), + Heap(vec::Drain<'a, A::Element>), +} + +impl<'a, A: Array> Iterator for Drain<'a, A> { + type Item = A::Element; + + fn next(&mut self) -> Option { + match *self { + Drain::Array(ref mut drain) => drain.next(), + Drain::Heap(ref mut drain) => drain.next(), + } + } + + fn size_hint(&self) -> (usize, Option) { + match *self { + Drain::Array(ref drain) => drain.size_hint(), + Drain::Heap(ref drain) => drain.size_hint(), + } + } +} + impl IntoIterator for AccumulateVec { type Item = A::Element; type IntoIter = IntoIter; diff --git a/src/librustc_data_structures/array_vec.rs b/src/librustc_data_structures/array_vec.rs index 631cf2cfcf6db..844e9041d2029 100644 --- a/src/librustc_data_structures/array_vec.rs +++ b/src/librustc_data_structures/array_vec.rs @@ -12,12 +12,13 @@ use std::marker::Unsize; use std::iter::Extend; -use std::ptr::{self, drop_in_place}; +use std::ptr::{self, drop_in_place, Shared}; use std::ops::{Deref, DerefMut, Range}; use std::hash::{Hash, Hasher}; use std::slice; use std::fmt; use std::mem; +use std::collections::range::RangeArgument; pub unsafe trait Array { type Element; @@ -103,6 +104,44 @@ impl ArrayVec { None } } + + pub fn drain(&mut self, range: R) -> Drain + where R: RangeArgument + { + // Memory safety + // + // When the Drain is first created, it shortens the length of + // the source vector to make sure no uninitalized or moved-from elements + // are accessible at all if the Drain's destructor never gets to run. + // + // Drain will ptr::read out the values to remove. + // When finished, remaining tail of the vec is copied back to cover + // the hole, and the vector length is restored to the new length. + // + let len = self.len(); + let start = *range.start().unwrap_or(&0); + let end = *range.end().unwrap_or(&len); + assert!(start <= end); + assert!(end <= len); + + unsafe { + // set self.vec length's to start, to be safe in case Drain is leaked + self.set_len(start); + // Use the borrow in the IterMut to indicate borrowing behavior of the + // whole Drain iterator (like &mut T). + let range_slice = { + let arr = &mut self.values as &mut [ManuallyDrop<_>]; + slice::from_raw_parts_mut(arr.as_mut_ptr().offset(start as isize), + end - start) + }; + Drain { + tail_start: end, + tail_len: len - end, + iter: range_slice.iter(), + array_vec: Shared::new(self as *mut _), + } + } + } } impl Default for ArrayVec @@ -179,6 +218,51 @@ impl Iterator for Iter { } } +pub struct Drain<'a, A: Array> + where A::Element: 'a +{ + tail_start: usize, + tail_len: usize, + iter: slice::Iter<'a, ManuallyDrop>, + array_vec: Shared>, +} + +impl<'a, A: Array> Iterator for Drain<'a, A> { + type Item = A::Element; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(|elt| unsafe { ptr::read(elt as *const ManuallyDrop<_>).value }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, A: Array> Drop for Drain<'a, A> { + fn drop(&mut self) { + // exhaust self first + while let Some(_) = self.next() {} + + if self.tail_len > 0 { + unsafe { + let source_array_vec = &mut **self.array_vec; + // memmove back untouched tail, update to new length + let start = source_array_vec.len(); + let tail = self.tail_start; + { + let mut arr = &mut source_array_vec.values as &mut [ManuallyDrop<_>]; + let src = arr.as_ptr().offset(tail as isize); + let dst = arr.as_mut_ptr().offset(start as isize); + ptr::copy(src, dst, self.tail_len); + }; + source_array_vec.set_len(start + self.tail_len); + } + } + } +} + impl IntoIterator for ArrayVec { type Item = A::Element; type IntoIter = Iter; diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index d3ec674daed4d..ee75a3596e18a 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -25,6 +25,8 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(not(stage0), deny(warnings))] +#![feature(shared)] +#![feature(collections_range)] #![feature(nonzero)] #![feature(rustc_private)] #![feature(staged_api)] diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index a24edfaaac1c2..efccc4abd43b8 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -165,6 +165,7 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { DEAD_CODE, UNUSED_MUT, UNREACHABLE_CODE, + UNREACHABLE_PATTERNS, UNUSED_MUST_USE, UNUSED_UNSAFE, PATH_STATEMENTS, diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 95e955bd6833e..570365c407f48 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -35,32 +35,6 @@ use rustc::hir; use rustc_i128::{i128, u128}; -register_long_diagnostics! { -E0519: r##" -It is not allowed to negate an unsigned integer. -You can negate a signed integer and cast it to an -unsigned integer or use the `!` operator. - -``` -let x: usize = -1isize as usize; -let y: usize = !0; -assert_eq!(x, y); -``` - -Alternatively you can use the `Wrapping` newtype -or the `wrapping_neg` operation that all -integral types support: - -``` -use std::num::Wrapping; -let x: Wrapping = -Wrapping(1); -let Wrapping(x) = x; -let y: usize = 1.wrapping_neg(); -assert_eq!(x, y); -``` -"## -} - declare_lint! { UNUSED_COMPARISONS, Warn, @@ -109,24 +83,6 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { match e.node { hir::ExprUnary(hir::UnNeg, ref expr) => { - if let hir::ExprLit(ref lit) = expr.node { - match lit.node { - ast::LitKind::Int(_, ast::LitIntType::Unsigned(_)) => { - forbid_unsigned_negation(cx, e.span); - } - ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => { - if let ty::TyUint(_) = cx.tcx.tables().node_id_to_type(e.id).sty { - forbid_unsigned_negation(cx, e.span); - } - } - _ => (), - } - } else { - let t = cx.tcx.tables().node_id_to_type(expr.id); - if let ty::TyUint(_) = t.sty { - forbid_unsigned_negation(cx, e.span); - } - } // propagate negation, if the negation itself isn't negated if self.negated_expr_id != e.id { self.negated_expr_id = expr.id; @@ -369,13 +325,6 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { _ => false, } } - - fn forbid_unsigned_negation(cx: &LateContext, span: Span) { - cx.sess() - .struct_span_err_with_code(span, "unary negation of unsigned integer", "E0519") - .span_help(span, "use a cast or the `!` operator") - .emit(); - } } } diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 71282dcf0ba07..b071834122367 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -26,6 +26,7 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::matches::{Binding, MatchPair, Candidate}; use hair::*; use rustc::mir::*; +use rustc_data_structures::fx::FxHashSet; use std::mem; @@ -93,11 +94,30 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } PatternKind::Range { .. } | - PatternKind::Variant { .. } | PatternKind::Slice { .. } => { Err(match_pair) } + PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => { + let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { + i == variant_index || { + let mut visited = FxHashSet::default(); + let node_set = v.uninhabited_from(&mut visited, + self.hir.tcx(), + substs, + adt_def.adt_kind()); + !node_set.is_empty() + } + }); + if irrefutable { + let lvalue = match_pair.lvalue.downcast(adt_def, variant_index); + candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns)); + Ok(()) + } else { + Err(match_pair) + } + }, + PatternKind::Array { ref prefix, ref slice, ref suffix } => { self.prefix_slice_suffix(&mut candidate.match_pairs, &match_pair.lvalue, diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index cb449037aeba3..8b4a013bad0a3 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -32,7 +32,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// It is a bug to call this with a simplifyable pattern. pub fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> { match *match_pair.pattern.kind { - PatternKind::Variant { ref adt_def, variant_index: _, subpatterns: _ } => { + PatternKind::Variant { ref adt_def, substs: _, variant_index: _, subpatterns: _ } => { Test { span: match_pair.pattern.span, kind: TestKind::Switch { @@ -451,7 +451,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // If we are performing a variant switch, then this // informs variant patterns, but nothing else. (&TestKind::Switch { adt_def: tested_adt_def, .. }, - &PatternKind::Variant { adt_def, variant_index, ref subpatterns }) => { + &PatternKind::Variant { adt_def, variant_index, ref subpatterns, .. }) => { assert_eq!(adt_def, tested_adt_def); let new_candidate = self.candidate_after_variant_switch(match_pair_index, diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index b7908f0c0edde..1eeb356480cf2 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -2336,20 +2336,19 @@ impl<'a> Resolver<'a> { PathResult::Indeterminate => bug!("indetermined path result in resolve_qpath"), }; - if path.len() == 1 || global_by_default || result.base_def == Def::Err { - return Some(result); - } - - let unqualified_result = { - match self.resolve_path(&[*path.last().unwrap()], Some(ns), None) { - PathResult::NonModule(path_res) => path_res.base_def, - PathResult::Module(module) => module.def().unwrap(), - _ => return Some(result), + if path.len() > 1 && !global_by_default && result.base_def != Def::Err && + path[0].name != keywords::CrateRoot.name() && path[0].name != "$crate" { + let unqualified_result = { + match self.resolve_path(&[*path.last().unwrap()], Some(ns), None) { + PathResult::NonModule(path_res) => path_res.base_def, + PathResult::Module(module) => module.def().unwrap(), + _ => return Some(result), + } + }; + if result.base_def == unqualified_result { + let lint = lint::builtin::UNUSED_QUALIFICATIONS; + self.session.add_lint(lint, id, span, "unnecessary qualification".to_string()); } - }; - if result.base_def == unqualified_result && path[0].name != "$crate" { - let lint = lint::builtin::UNUSED_QUALIFICATIONS; - self.session.add_lint(lint, id, span, "unnecessary qualification".to_string()); } Some(result) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 07b7adc63bffa..ad4bb0fce22ad 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,7 +10,8 @@ use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use base; -use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; +use builder::Builder; +use common::{type_is_fat_ptr, C_uint}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -236,7 +237,7 @@ impl ArgType { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } @@ -269,7 +270,7 @@ impl ArgType { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.fcx().alloca(ty, "abi_cast"); + let llscratch = bcx.alloca(ty, "abi_cast"); base::Lifetime::Start.call(bcx, llscratch); // ...where we first store the value... @@ -293,14 +294,14 @@ impl ArgType { } } - pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg(&self, bcx: &Builder, idx: &mut usize, dst: ValueRef) { if self.pad.is_some() { *idx += 1; } if self.is_ignore() { return; } - let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); + let val = llvm::get_param(bcx.llfn(), *idx as c_uint); *idx += 1; self.store(bcx, val, dst); } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index e08f29d24729c..c3b9a56ac9778 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -49,53 +49,20 @@ use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; use common::*; -use glue; +use builder::Builder; use base; use machine; use monomorphize; use type_::Type; use type_of; -use value::Value; - -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - Switch, - Single -} - -#[derive(Copy, Clone)] -pub struct MaybeSizedValue { - pub value: ValueRef, - pub meta: ValueRef, -} - -impl MaybeSizedValue { - pub fn sized(value: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: std::ptr::null_mut() - } - } - - pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: meta - } - } - - pub fn has_meta(&self) -> bool { - !self.meta.is_null() - } -} /// Given an enum, struct, closure, or tuple, extracts fields. /// Treats closures as a struct with one variant. /// `empty_if_no_variants` is a switch to deal with empty enums. /// If true, `variant_index` is disregarded and an empty Vec returned in this case. -fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - variant_index: usize, - empty_if_no_variants: bool) -> Vec> { +pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { match t.sty { ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { Vec::default() @@ -300,28 +267,6 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> } } -/// Obtain a representation of the discriminant sufficient to translate -/// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - range_assert: bool -) -> (BranchKind, Option) { - let l = bcx.ccx.layout_of(t); - match *l { - layout::CEnum { .. } | layout::General { .. } | - layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { - (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) - } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => { - // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (BranchKind::Single, None) - }, - _ => bug!("{} is not an enum.", t) - } -} - pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { match *l { layout::CEnum { signed, .. }=> signed, @@ -331,7 +276,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { /// Obtain the actual discriminant of a value. pub fn trans_get_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, @@ -358,7 +303,7 @@ pub fn trans_get_discr<'a, 'tcx>( layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let llptrty = type_of::sizing_type_of(bcx.ccx, - monomorphize::field_ty(bcx.ccx.tcx(), substs, + monomorphize::field_ty(bcx.tcx(), substs, &def.variants[nndiscr as usize].fields[0])); bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } @@ -374,7 +319,7 @@ pub fn trans_get_discr<'a, 'tcx>( } fn struct_wrapped_nullable_bitdiscr( - bcx: &BlockAndBuilder, + bcx: &Builder, nndiscr: u64, discrfield: &layout::FieldPath, scrutinee: ValueRef @@ -387,7 +332,7 @@ fn struct_wrapped_nullable_bitdiscr( } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, +fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { let llty = Type::from_integer(bcx.ccx, ity); @@ -415,7 +360,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { +pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { discr, .. } @@ -435,9 +380,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr -) { +pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { @@ -484,11 +427,11 @@ pub fn trans_set_discr<'a, 'tcx>( } } -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } -fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { +pub fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { if min <= max { assert!(min <= discr && discr <= max) } else { @@ -496,303 +439,6 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } } -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - val: MaybeSizedValue, - discr: Disr, - ix: usize -) -> ValueRef { - let l = bcx.ccx.layout_of(t); - debug!("trans_field_ptr on {} represented as {:#?}", t, l); - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *l { - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); - struct_field_ptr(bcx, &variant, - &compute_fields(bcx.ccx, t, 0, false), - val, ix, false) - } - layout::Vector { count, .. } => { - assert_eq!(discr.0, 0); - assert!((ix as u64) < count); - bcx.struct_gep(val.value, ix) - } - layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); - fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false)); - struct_field_ptr(bcx, &variants[discr.0 as usize], - &fields, - val, ix + 1, true) - } - layout::UntaggedUnion { .. } => { - let fields = compute_fields(bcx.ccx, t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { - let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx, nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; - assert_eq!(ix, 0); - assert_eq!(discr.0, nndiscr); - let ty = type_of::type_of(bcx.ccx, nnty); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr.0, nndiscr); - struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx, t, discr.0 as usize, false), - val, ix, false) - } - _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) - } -} - -fn struct_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - st: &layout::Struct, - fields: &Vec>, - val: MaybeSizedValue, - ix: usize, - needs_cast: bool -) -> ValueRef { - let fty = fields[ix]; - let ccx = bcx.ccx; - - let ptr_val = if needs_cast { - let fields = st.field_index_by_increasing_offset().map(|i| { - type_of::in_memory_type_of(ccx, fields[i]) - }).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], st.packed); - bcx.pointercast(val.value, real_ty.ptr_to()) - } else { - val.value - }; - - // Simple case - we can just GEP the field - // * First field - Always aligned properly - // * Packed struct - There is no alignment padding - // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx.shared().type_is_sized(fty) { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - - // If the type of the last field is [T] or str, then we don't need to do - // any adjusments - match fty.sty { - ty::TySlice(..) | ty::TyStr => { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - _ => () - } - - // There's no metadata available, log the case and just do the GEP. - if !val.has_meta() { - debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, Value(ptr_val)); - return bcx.struct_gep(ptr_val, ix); - } - - // We need to get the pointer manually now. - // We do this by casting to a *i8, then offsetting it by the appropriate amount. - // We do this instead of, say, simply adjusting the pointer from the result of a GEP - // because the field may have an arbitrary alignment in the LLVM representation - // anyway. - // - // To demonstrate: - // struct Foo { - // x: u16, - // y: T - // } - // - // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that - // the `y` field has 16-bit alignment. - - let meta = val.meta; - - - let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx, offset); - - // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); - - // Bump the unaligned offset up to the appropriate alignment using the - // following expression: - // - // (unaligned offset + (align - 1)) & -align - - // Calculate offset - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); - let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), - bcx.neg(align)); - - debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); - - // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); - let byte_ptr = bcx.gep(byte_ptr, &[offset]); - - // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); - debug!("struct_field_ptr: Field type is {:?}", ll_fty); - bcx.pointercast(byte_ptr, ll_fty.ptr_to()) -} - -/// Construct a constant value, suitable for initializing a -/// GlobalVariable, given a case and constant values for its fields. -/// Note that this may have a different LLVM type (and different -/// alignment!) from the representation's `type_of`, so it needs a -/// pointer cast before use. -/// -/// The LLVM type system does not directly support unions, and only -/// pointers can be bitcast, so a constant (and, by extension, the -/// GlobalVariable initialized by it) will have a type that can vary -/// depending on which case of an enum it is. -/// -/// To understand the alignment situation, consider `enum E { V64(u64), -/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to -/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, -/// i32, i32}`, which is 4-byte aligned. -/// -/// Currently the returned value has the same size as the type, but -/// this could be changed in the future to avoid allocating unnecessary -/// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, - vals: &[ValueRef]) -> ValueRef { - let l = ccx.layout_of(t); - let dl = &ccx.tcx().data_layout; - match *l { - layout::CEnum { discr: d, min, max, .. } => { - assert_eq!(vals.len(), 0); - assert_discr_in_range(Disr(min), Disr(max), discr); - C_integral(Type::from_integer(ccx, d), discr.0, true) - } - layout::General { discr: d, ref variants, .. } => { - let variant = &variants[discr.0 as usize]; - let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); - let mut vals_with_discr = vec![lldiscr]; - vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) - } - layout::UntaggedUnion { ref variants, .. }=> { - assert_eq!(discr, Disr(0)); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) - } - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) - } - layout::Vector { .. } => { - C_vector(vals) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; - if discr.0 == nndiscr { - assert_eq!(vals.len(), 1); - vals[0] - } else { - C_null(type_of::sizing_type_of(ccx, nnty)) - } - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr.0 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) - } else { - let fields = compute_fields(ccx, t, nndiscr as usize, false); - let vals = fields.iter().map(|&ty| { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::sizing_type_of(ccx, ty)) - }).collect::>(); - C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) - } - } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) - } -} - -/// Building structs is a little complicated, because we might need to -/// insert padding if a field's value is less aligned than its type. -/// -/// Continuing the example from `trans_const`, a value of type `(u32, -/// E)` should have the `E` at offset 8, but if that field's -/// initializer is 4-byte aligned then simply translating the tuple as -/// a two-element struct will locate it at offset 4, and accesses to it -/// will read the wrong memory. -fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &layout::Struct, - vals: &[ValueRef]) - -> Vec { - assert_eq!(vals.len(), st.offsets.len()); - - if vals.len() == 0 { - return Vec::new(); - } - - // offset of current value - let mut offset = 0; - let mut cfields = Vec::new(); - cfields.reserve(st.offsets.len()*2); - - let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) - }); - for (&val, target_offset) in parts { - if offset < target_offset { - cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; - } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); - } - - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { - cfields.push(padding(ccx, size - offset)); - } - - cfields -} - -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) -} - // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 05699fb9de9a5..c95d414701876 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -15,6 +15,7 @@ use base; use common::*; use type_of; use type_::Type; +use builder::Builder; use rustc::hir; use rustc::ty::Ty; @@ -25,7 +26,7 @@ use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn trans_inline_asm<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, ia: &hir::InlineAsm, outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2806be123a936..4cdde24ed48b5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -37,8 +37,9 @@ use llvm; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; +use rustc::mir::tcx::LvalueTy; use rustc::traits; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::hir::map as hir_map; @@ -47,14 +48,15 @@ use session::config::{self, NoDebugInfo}; use rustc_incremental::IncrementalHashesMap; use session::{self, DataTypeKind, Session}; use abi::{self, Abi, FnType}; +use mir::lvalue::LvalueRef; use adt; use attributes; use builder::Builder; use callee::{Callee}; -use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_struct_in_context, C_u64, C_undef}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; use common; @@ -161,7 +163,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } pub fn compare_simd_types<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -218,7 +220,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> @@ -242,7 +244,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst: ValueRef, @@ -278,8 +280,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, monomorphize::field_ty(bcx.tcx(), substs_b, f) }); - let src = adt::MaybeSizedValue::sized(src); - let dst = adt::MaybeSizedValue::sized(dst); + let src = LvalueRef::new_sized_ty(src, src_ty); + let dst = LvalueRef::new_sized_ty(dst, dst_ty); let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { @@ -287,8 +289,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, continue; } - let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); + let src_f = src.trans_field_ptr(bcx, i); + let dst_f = dst.trans_field_ptr(bcx, i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty, None); } else { @@ -322,7 +324,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } pub fn cast_shift_expr_rhs( - cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef + cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } @@ -421,7 +423,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { +pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.ccx, t) { @@ -433,7 +435,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu } } -pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, +pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>, data: ValueRef, extra: ValueRef, dst: ValueRef, @@ -459,7 +461,7 @@ pub fn load_fat_ptr<'a, 'tcx>( (ptr, meta) } -pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { +pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx) { bcx.zext(val, Type::i8(bcx.ccx)) } else { @@ -467,7 +469,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { bcx.trunc(val, Type::i1(bcx.ccx)) } else { @@ -523,11 +525,13 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - t: Ty<'tcx>, - align: Option) { +pub fn memcpy_ty<'a, 'tcx>( + bcx: &Builder<'a, 'tcx>, + dst: ValueRef, + src: ValueRef, + t: Ty<'tcx>, + align: Option, +) { let ccx = bcx.ccx; if type_is_zero_size(ccx, t) { @@ -553,11 +557,6 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - assert!(!ty.has_param_types()); - bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name) -} - pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { let _s = if ccx.sess().trans_stats() { let mut instance_name = String::new(); @@ -593,18 +592,17 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl); let mir = ccx.tcx().item_mir(instance.def); - mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi); + mir::trans_mir(ccx, lldecl, fn_ty, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, disr: Disr, - llfndecl: ValueRef) { - attributes::inline(llfndecl, attributes::InlineAttr::Hint); - attributes::set_frame_pointer_elimination(ccx, llfndecl); + llfn: ValueRef) { + attributes::inline(llfn, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfn); let ctor_ty = ccx.tcx().item_type(def_id); let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty); @@ -612,24 +610,29 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); if !fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection // and have a single retslot let dest = if fn_ty.ret.is_indirect() { - get_param(fcx.llfn, 0) + get_param(llfn, 0) } else { // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the // final ret value - fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + }; + // Can return unsized value + let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output()); + dest_val.ty = LvalueTy::Downcast { + adt_def: sig.output().ty_adt_def().unwrap(), + substs: substs, + variant_index: disr.0 as usize, }; - let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); + let lldestptr = dest_val.trans_field_ptr(&bcx, i); let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { @@ -756,12 +759,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { // `main` should respect same config for frame pointer elimination as rest of code attributes::set_frame_pointer_elimination(ccx, llfn); - let llbb = unsafe { - let name = CString::new("top").unwrap(); - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr()) - }; - let bld = Builder::with_ccx(ccx); - bld.position_at_end(llbb); + let bld = Builder::new_block(ccx, llfn, "top"); debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 865787f48fc52..cf7f3e9501d1a 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -19,12 +19,17 @@ use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; +use rustc::ty::{Ty, TyCtxt, TypeFoldable}; +use rustc::session::Session; +use type_of; use std::borrow::Cow; use std::ffi::CString; use std::ptr; use syntax_pos::Span; +// All Builders must have an llfn associated with them +#[must_use] pub struct Builder<'a, 'tcx: 'a> { pub llbuilder: BuilderRef, pub ccx: &'a CrateContext<'a, 'tcx>, @@ -46,6 +51,20 @@ fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { + pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self { + let builder = Builder::with_ccx(ccx); + let llbb = unsafe { + let name = CString::new(name).unwrap(); + llvm::LLVMAppendBasicBlockInContext( + ccx.llcx(), + llfn, + name.as_ptr() + ) + }; + builder.position_at_end(llbb); + builder + } + pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { @@ -57,6 +76,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn(), name) + } + + pub fn sess(&self) -> &Session { + self.ccx.sess() + } + + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.ccx.tcx() + } + + pub fn llfn(&self) -> ValueRef { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + + pub fn llbb(&self) -> BasicBlockRef { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); @@ -435,6 +478,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + builder.dynamic_alloca(ty, name) + } + + pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef { + assert!(!ty.has_param_types()); + self.alloca(type_of::type_of(self.ccx, ty), name) + } + pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { self.count_insn("alloca"); unsafe { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 1abe25ea6073e..257d6c01e4a65 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -23,11 +23,10 @@ use rustc::traits; use abi::{Abi, FnType}; use attributes; use base; -use base::*; -use common::{ - self, CrateContext, FunctionContext, SharedCrateContext -}; -use adt::MaybeSizedValue; +use builder::Builder; +use common::{self, CrateContext, SharedCrateContext}; +use cleanup::CleanupScope; +use mir::lvalue::LvalueRef; use consts; use declare; use value::Value; @@ -330,8 +329,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( attributes::set_frame_pointer_elimination(ccx, lloncefn); let orig_fn_ty = fn_ty; - let fcx = FunctionContext::new(ccx, lloncefn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::new_block(ccx, lloncefn, "entry-block"); let callee = Callee { data: Fn(llreffn), @@ -340,7 +338,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // the first argument (`self`) will be the (by value) closure env. - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(lloncefn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let self_idx = fn_ty.ret.is_indirect() as usize; @@ -348,7 +346,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llenv = if env_arg.is_indirect() { llargs[self_idx] } else { - let scratch = alloc_ty(&bcx, closure_ty, "self"); + let scratch = bcx.alloca_ty(closure_ty, "self"); let mut llarg_idx = self_idx; env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch @@ -365,12 +363,14 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.schedule_drop_mem(MaybeSizedValue::sized(llenv), closure_ty); + let self_scope = CleanupScope::schedule_drop_mem( + &bcx, LvalueRef::new_sized_ty(llenv, closure_ty) + ); let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -489,10 +489,9 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 4e59ea3f6c5ed..5d89a67d3fd80 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -20,11 +20,12 @@ use llvm::BasicBlockRef; use base; -use adt::MaybeSizedValue; -use common::{BlockAndBuilder, FunctionContext, Funclet}; +use mir::lvalue::LvalueRef; +use rustc::mir::tcx::LvalueTy; +use builder::Builder; +use common::Funclet; use glue; use type_::Type; -use rustc::ty::Ty; pub struct CleanupScope<'tcx> { // Cleanup to run upon scope exit. @@ -36,14 +37,13 @@ pub struct CleanupScope<'tcx> { #[derive(Copy, Clone)] pub struct DropValue<'tcx> { - val: MaybeSizedValue, - ty: Ty<'tcx>, + val: LvalueRef<'tcx>, skip_dtor: bool, } impl<'tcx> DropValue<'tcx> { - fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) { + glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet) } /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary @@ -52,13 +52,13 @@ impl<'tcx> DropValue<'tcx> { /// landing_pad -> ... cleanups ... -> [resume] /// /// This should only be called once per function, as it creates an alloca for the landingpad. - fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { + fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); - let bcx = fcx.build_new_block("cleanup_unwind"); + let bcx = bcx.build_sibling_block("cleanup_unwind"); let llpersonality = bcx.ccx.eh_personality(); bcx.set_personality_fn(llpersonality); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(bcx.sess()) { let pad = bcx.cleanup_pad(None, &[]); let funclet = Some(Funclet::new(pad)); self.trans(funclet.as_ref(), &bcx); @@ -68,10 +68,10 @@ impl<'tcx> DropValue<'tcx> { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality // function and this is what the EH proposal example uses. - let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); + let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false); // The only landing pad clause will be 'cleanup' - let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn()); // The landing pad block is a cleanup bcx.set_cleanup(llretval); @@ -92,17 +92,23 @@ impl<'tcx> DropValue<'tcx> { } } -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { +impl<'a, 'tcx> CleanupScope<'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` - pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupScope<'tcx> { - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + pub fn schedule_drop_mem( + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> + ) -> CleanupScope<'tcx> { + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: false, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -110,28 +116,31 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>) - -> CleanupScope<'tcx> { + pub fn schedule_drop_adt_contents( + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> + ) -> CleanupScope<'tcx> { + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: true, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } -} -impl<'tcx> CleanupScope<'tcx> { - fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { + fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), - landing_pad: if !fcx.ccx.sess().no_landing_pads() { - Some(drop_val.get_landing_pad(fcx)) + landing_pad: if !bcx.sess().no_landing_pads() { + Some(drop_val.get_landing_pad(bcx)) } else { None }, @@ -145,7 +154,7 @@ impl<'tcx> CleanupScope<'tcx> { } } - pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { + pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) { if let Some(cleanup) = self.cleanup { cleanup.trans(None, &bcx); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7e7bd15dc6e5a..13163518f941e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -12,11 +12,9 @@ //! Code that is useful in various trans modules. -use session::Session; use llvm; -use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; +use llvm::{ValueRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::util::common::MemoizationMap; @@ -37,11 +35,9 @@ use rustc::hir; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; -use std::ops::Deref; -use std::ffi::CString; use syntax::ast; -use syntax::symbol::{Symbol, InternedString}; +use syntax::symbol::InternedString; use syntax_pos::Span; use rustc_i128::u128; @@ -172,191 +168,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - * */ -use Disr; - -/// The concrete version of ty::FieldDef. The name is the field index if -/// the field is numeric. -pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); - -/// The concrete version of ty::VariantDef -pub struct VariantInfo<'tcx> { - pub discr: Disr, - pub fields: Vec> -} - -impl<'a, 'tcx> VariantInfo<'tcx> { - pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - opt_def: Option) - -> Self - { - match ty.sty { - ty::TyAdt(adt, substs) => { - let variant = match opt_def { - None => adt.struct_variant(), - Some(def) => adt.variant_of_def(def) - }; - - VariantInfo { - discr: Disr::from(variant.disr_val), - fields: variant.fields.iter().map(|f| { - Field(f.name, monomorphize::field_ty(tcx, substs, f)) - }).collect() - } - } - - ty::TyTuple(ref v) => { - VariantInfo { - discr: Disr(0), - fields: v.iter().enumerate().map(|(i, &t)| { - Field(Symbol::intern(&i.to_string()), t) - }).collect() - } - } - - _ => { - bug!("cannot get field types from the type {:?}", ty); - } - } - } -} - -// Function context. Every LLVM function we create will have one of these. -pub struct FunctionContext<'a, 'tcx: 'a> { - // The ValueRef returned from a call to llvm::LLVMAddFunction; the - // address of the first instruction in the sequence of - // instructions for this function that will go in the .text - // section of the executable we're generating. - pub llfn: ValueRef, - - // A marker for the place where we want to insert the function's static - // allocas, so that LLVM will coalesce them into a single alloca call. - alloca_insert_pt: Option, - - // This function's enclosing crate context. - pub ccx: &'a CrateContext<'a, 'tcx>, - - alloca_builder: Builder<'a, 'tcx>, -} - -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - /// Create a function context for the given function. - /// Call FunctionContext::get_entry_block for the first entry block. - pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> { - let mut fcx = FunctionContext { - llfn: llfndecl, - alloca_insert_pt: None, - ccx: ccx, - alloca_builder: Builder::with_ccx(ccx), - }; - - let val = { - let entry_bcx = fcx.build_new_block("entry-block"); - let val = entry_bcx.load(C_null(Type::i8p(ccx))); - fcx.alloca_builder.position_at_start(entry_bcx.llbb()); - val - }; - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in the drop of FunctionContext. - fcx.alloca_insert_pt = Some(val); - - fcx - } - - pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(unsafe { - llvm::LLVMGetFirstBasicBlock(self.llfn) - }, self) - } - - pub fn new_block(&'a self, name: &str) -> BasicBlockRef { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMAppendBasicBlockInContext( - self.ccx.llcx(), - self.llfn, - name.as_ptr() - ) - } - } - - pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.new_block(name), self) - } - - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - self.alloca_builder.dynamic_alloca(ty, name) - } -} - -impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { - fn drop(&mut self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap()); - } - } -} - -#[must_use] -pub struct BlockAndBuilder<'a, 'tcx: 'a> { - // The BasicBlockRef returned from a call to - // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic - // block to the function pointed to by llfn. We insert - // instructions into that block by way of this block context. - // The block pointing to this one in the function's digraph. - llbb: BasicBlockRef, - - // The function context for the function to which this block is - // attached. - fcx: &'a FunctionContext<'a, 'tcx>, - - builder: Builder<'a, 'tcx>, -} - -impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { - pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self { - let builder = Builder::with_ccx(fcx.ccx); - // Set the builder's position to this block's end. - builder.position_at_end(llbb); - BlockAndBuilder { - llbb: llbb, - fcx: fcx, - builder: builder, - } - } - - pub fn at_start(&self, f: F) -> R - where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R - { - self.position_at_start(self.llbb); - let r = f(self); - self.position_at_end(self.llbb); - r - } - - pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { - self.fcx - } - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.ccx.tcx() - } - pub fn sess(&self) -> &'a Session { - self.ccx.sess() - } - - pub fn llbb(&self) -> BasicBlockRef { - self.llbb - } -} - -impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> { - type Target = Builder<'a, 'tcx>; - fn deref(&self) -> &Self::Target { - &self.builder - } -} - /// A structure representing an active landing pad for the duration of a basic /// block. /// @@ -725,7 +536,7 @@ pub fn langcall(tcx: TyCtxt, // of Java. (See related discussion on #1877 and #10183.) pub fn build_unchecked_lshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { @@ -736,7 +547,7 @@ pub fn build_unchecked_lshift<'a, 'tcx>( } pub fn build_unchecked_rshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef + bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid @@ -749,13 +560,13 @@ pub fn build_unchecked_rshift<'a, 'tcx>( } } -fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { +fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llty: Type, mask_llty: Type, invert: bool diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index f5a8eeacf38ad..c6f8ba7b6dc78 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -14,7 +14,7 @@ use super::utils::{DIB, span_start}; use llvm; use llvm::debuginfo::{DIScope, DISubprogram}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use rustc::mir::{Mir, VisibilityScope}; use libc::c_uint; @@ -44,7 +44,7 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext) +pub fn create_mir_scopes(ccx: &CrateContext, mir: &Mir, debug_context: &FunctionDebugContext) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), @@ -71,7 +71,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &Funct // Instantiate all scopes. for idx in 0..mir.visibility_scopes.len() { let scope = VisibilityScope::new(idx); - make_mir_scope(fcx.ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); + make_mir_scope(ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); } scopes diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs index bcf5eb9920076..7a739071506db 100644 --- a/src/librustc_trans/debuginfo/doc.rs +++ b/src/librustc_trans/debuginfo/doc.rs @@ -45,7 +45,7 @@ //! //! All private state used by the module is stored within either the //! CrateDebugContext struct (owned by the CrateContext) or the -//! FunctionDebugContext (owned by the FunctionContext). +//! FunctionDebugContext (owned by the MirContext). //! //! This file consists of three conceptual sections: //! 1. The public interface of the module diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 86099d241df68..9117f49cf3ea5 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,8 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, BlockAndBuilder}; +use common::CrateContext; +use builder::Builder; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -423,7 +424,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, dbg_context: &FunctionDebugContext, variable_name: ast::Name, variable_type: Ty<'tcx>, diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index e02c8be19a2f4..e99e26261a3a1 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -38,7 +38,7 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span)); + debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span)); let loc = span_start(builder.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 62141369caec1..4fe07c9b86abf 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -13,6 +13,7 @@ // Code relating to drop glue. use std; +use std::ptr; use std::iter; use llvm; @@ -20,11 +21,14 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::BoxFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; +use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable}; use rustc::ty::subst::Kind; -use adt::{self, MaybeSizedValue}; +use rustc::mir::tcx::LvalueTy; +use mir::lvalue::LvalueRef; +use adt; use base::*; use callee::Callee; +use cleanup::CleanupScope; use common::*; use machine::*; use monomorphize; @@ -34,15 +38,12 @@ use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; use Disr; -use cleanup::CleanupScope; +use builder::Builder; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_ty<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - ptr: MaybeSizedValue, - content_ty: Ty<'tcx> -) { +pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) { + let content_ty = ptr.ty.to_ty(bcx.tcx()); let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let callee = Callee::def(bcx.ccx, def_id, substs); @@ -50,7 +51,7 @@ pub fn trans_exchange_free_ty<'a, 'tcx>( let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret = bcx.call(callee.reify(bcx.ccx), - &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize], None); + &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None); fn_ty.apply_attrs_callsite(llret); } @@ -93,17 +94,17 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t } } -fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { - call_drop_glue(bcx, args, t, false, None) +fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) { + call_drop_glue(bcx, args, false, None) } pub fn call_drop_glue<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - mut args: MaybeSizedValue, - t: Ty<'tcx>, + bcx: &Builder<'a, 'tcx>, + mut args: LvalueRef<'tcx>, skip_dtor: bool, funclet: Option<&'a Funclet>, ) { + let t = args.ty.to_ty(bcx.tcx()); // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); if bcx.ccx.shared().type_needs_drop(t) { @@ -116,11 +117,11 @@ pub fn call_drop_glue<'a, 'tcx>( let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx.shared(), t); if glue_type != t { - args.value = bcx.pointercast(args.value, type_of(ccx, glue_type).ptr_to()); + args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to()); } // No drop-hint ==> call standard drop glue - bcx.call(glue, &[args.value, args.meta][..1 + args.has_meta() as usize], + bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize], funclet.map(|b| b.bundle())); } } @@ -173,8 +174,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::new_block(ccx, llfn, "entry-block"); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -194,9 +194,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let value = get_param(llfn, 0); let ptr = if ccx.shared().type_is_sized(t) { - MaybeSizedValue::sized(value) + LvalueRef::new_sized_ty(value, t) } else { - MaybeSizedValue::unsized_(value, get_param(llfn, 1)) + LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t) }; let skip_dtor = match g { @@ -211,14 +211,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) { - let llbox = bcx.load(get_dataptr(&bcx, ptr.value)); - let info = bcx.load(get_meta(&bcx, ptr.value)); - MaybeSizedValue::unsized_(llbox, info) + let llbox = bcx.load(get_dataptr(&bcx, ptr.llval)); + let info = bcx.load(get_meta(&bcx, ptr.llval)); + LvalueRef::new_unsized_ty(llbox, info, content_ty) } else { - MaybeSizedValue::sized(bcx.load(ptr.value)) + LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty) }; - drop_ty(&bcx, ptr, content_ty); - trans_exchange_free_ty(&bcx, ptr, content_ty); + drop_ty(&bcx, ptr); + trans_exchange_free_ty(&bcx, ptr); bcx } ty::TyDynamic(..) => { @@ -226,8 +226,8 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // versus without calling Drop::drop. Assert caller is // okay with always calling the Drop impl, if any. assert!(!skip_dtor); - let dtor = bcx.load(ptr.meta); - bcx.call(dtor, &[ptr.value], None); + let dtor = bcx.load(ptr.llextra); + bcx.call(dtor, &[ptr.llval], None); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { @@ -245,7 +245,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. let contents_scope = if !shallow_drop { - bcx.fcx().schedule_drop_adt_contents(ptr, t) + CleanupScope::schedule_drop_adt_contents(&bcx, ptr) } else { CleanupScope::noop() }; @@ -262,9 +262,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; if let Some(landing_pad) = contents_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -279,7 +279,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi } _ => { if bcx.ccx.shared().type_needs_drop(t) { - drop_structural_ty(bcx, ptr, t) + drop_structural_ty(bcx, ptr) } else { bcx } @@ -288,8 +288,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi bcx.ret_void(); } -pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, info: ValueRef) +pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef) -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); @@ -397,60 +396,64 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, - ptr: MaybeSizedValue, - t: Ty<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { - fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - av: adt::MaybeSizedValue, - variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) { +fn drop_structural_ty<'a, 'tcx>( + cx: Builder<'a, 'tcx>, + mut ptr: LvalueRef<'tcx> +) -> Builder<'a, 'tcx> { + fn iter_variant_fields<'a, 'tcx>( + cx: &'a Builder<'a, 'tcx>, + av: LvalueRef<'tcx>, + adt_def: &'tcx AdtDef, + variant_index: usize, + substs: &'tcx Substs<'tcx> + ) { + let variant = &adt_def.variants[variant_index]; let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); - drop_ty(&cx, MaybeSizedValue::sized(field_ptr), arg); + let field_ptr = av.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } } let mut cx = cx; + let t = ptr.ty.to_ty(cx.tcx()); match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); + let llupvar = ptr.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty)); } } ty::TyArray(_, n) => { - let base = get_dataptr(&cx, ptr.value); + let base = get_dataptr(&cx, ptr.llval); let len = C_uint(cx.ccx, n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra, + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llfld_a), *arg); + let llfld_a = ptr.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg)); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); - let ptr = if cx.ccx.shared().type_is_sized(field_ty) { - MaybeSizedValue::sized(llfld_a) - } else { - MaybeSizedValue::unsized_(llfld_a, ptr.meta) - }; - drop_ty(&cx, ptr, field_ty); + for (i, field) in adt.variants[0].fields.iter().enumerate() { + let field_ty = monomorphize::field_ty(cx.tcx(), substs, field); + let mut field_ptr = ptr.clone(); + field_ptr.llval = ptr.trans_field_ptr(&cx, i); + field_ptr.ty = LvalueTy::from_ty(field_ty); + if cx.ccx.shared().type_is_sized(field_ty) { + field_ptr.llextra = ptr::null_mut(); + } + drop_ty(&cx, field_ptr); } } AdtKind::Union => { @@ -462,16 +465,29 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(&cx, t, ptr.value, false) { - (adt::BranchKind::Single, None) => { + // Obtain a representation of the discriminant sufficient to translate + // destructuring; this may or may not involve the actual discriminant. + let l = cx.ccx.layout_of(t); + match *l { + layout::Univariant { .. } | + layout::UntaggedUnion { .. } => { if n_variants != 0 { assert!(n_variants == 1); - iter_variant(&cx, t, ptr, &adt.variants[0], substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: 0, + }; + iter_variant_fields(&cx, ptr, &adt, 0, substs); } } - (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + layout::CEnum { .. } | + layout::General { .. } | + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } => { + let lldiscrim_a = adt::trans_get_discr(&cx, t, ptr.llval, None, false); let tcx = cx.tcx(); - drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); + drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize)); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -486,23 +502,28 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); + let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = cx.fcx().build_new_block("enum-iter-next"); + let next_cx = cx.build_sibling_block("enum-iter-next"); - for variant in &adt.variants { + for (i, variant) in adt.variants.iter().enumerate() { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = cx.fcx().build_new_block(&variant_cx_name); + let variant_cx = cx.build_sibling_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - iter_variant(&variant_cx, t, ptr, variant, substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: i, + }; + iter_variant_fields(&variant_cx, ptr, &adt, i, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; } - _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => bug!("{} is not an enum.", t), } } }, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7c026cb153037..842a21e98db46 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -16,6 +16,7 @@ use llvm; use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; +use mir::lvalue::LvalueRef; use base::*; use common::*; use declare; @@ -24,10 +25,10 @@ use type_of; use machine; use type_::Type; use rustc::ty::{self, Ty}; -use Disr; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; +use builder::Builder; use rustc::session::Session; use syntax_pos::Span; @@ -87,14 +88,14 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, llargs: &[ValueRef], llresult: ValueRef, span: Span) { let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); let (def_id, substs, fty) = match callee_ty.sty { ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), @@ -125,7 +126,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); + try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } "breakpoint" => { @@ -533,7 +534,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: &intrinsics::Type, arg_type: Ty<'tcx>, llarg: ValueRef) @@ -548,12 +549,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = adt::MaybeSizedValue::sized(llarg); - (0..contents.len()) - .map(|i| { - bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) - }) - .collect() + let arg = LvalueRef::new_sized_ty(llarg, arg_type); + (0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); @@ -634,7 +631,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } } -fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>, @@ -670,7 +667,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } fn memset_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: ValueRef, @@ -686,7 +683,8 @@ fn memset_intrinsic<'a, 'tcx>( } fn try_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, @@ -696,9 +694,9 @@ fn try_intrinsic<'a, 'tcx>( bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest); + trans_msvc_try(bcx, ccx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest); + trans_gnu_try(bcx, ccx, func, data, local_ptr, dest); } } @@ -709,24 +707,25 @@ fn try_intrinsic<'a, 'tcx>( // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; bcx.set_personality_fn(bcx.ccx.eh_personality()); - let normal = bcx.fcx().build_new_block("normal"); - let catchswitch = bcx.fcx().build_new_block("catchswitch"); - let catchpad = bcx.fcx().build_new_block("catchpad"); - let caught = bcx.fcx().build_new_block("caught"); + let normal = bcx.build_sibling_block("normal"); + let catchswitch = bcx.build_sibling_block("catchswitch"); + let catchpad = bcx.build_sibling_block("catchpad"); + let caught = bcx.build_sibling_block("caught"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); // We're generating an IR snippet that looks like: // @@ -768,7 +767,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = bcx.fcx().alloca(i64p, "slot"); + let slot = bcx.alloca(i64p, "slot"); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -812,12 +811,13 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; // Translates the shims described above: @@ -837,12 +837,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx().build_new_block("then"); - let catch = bcx.fcx().build_new_block("catch"); + let then = bcx.build_sibling_block("then"); + let catch = bcx.build_sibling_block("catch"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); then.ret(C_i32(ccx, 0)); @@ -854,7 +854,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); + let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn()); catch.add_clause(vals, C_null(Type::i8p(ccx))); let ptr = catch.extract_value(vals, 0); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None); @@ -869,13 +869,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // Helper function to give a Block to a closure to translate a shim function. // This is currently primarily used for the `try` intrinsic functions above. -fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { @@ -884,8 +883,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn); - trans(fcx.get_entry_block()); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); + trans(bcx); llfn } @@ -893,10 +892,9 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // catch exceptions. // // This function is only generated once and is then cached. -fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) +fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { return llfn; } @@ -910,7 +908,7 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(tcx.mk_fn_sig(iter::once(i8p), tcx.mk_nil(), false)), })); let output = tcx.types.i32; - let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); + let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); ccx.rust_try_fn().set(Some(rust_try)); return rust_try } @@ -920,7 +918,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, llargs: &[ValueRef], diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index cf50e7be2afb5..aecba2f57e52c 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -13,6 +13,7 @@ use llvm::{ValueRef, get_params}; use rustc::traits; use callee::{Callee, CalleeData}; use common::*; +use builder::Builder; use consts; use declare; use glue; @@ -27,7 +28,7 @@ use rustc::ty; const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llvtable: ValueRef, vtable_index: usize) -> ValueRef { @@ -75,10 +76,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 0321417b153aa..6d92cd99fbeb9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -14,10 +14,11 @@ use rustc::middle::lang_items; use rustc::ty::{self, layout}; use rustc::mir; use abi::{Abi, FnType, ArgType}; -use adt::{self, MaybeSizedValue}; +use adt; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, BlockAndBuilder, Funclet}; +use builder::Builder; +use common::{self, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use Disr; @@ -36,14 +37,14 @@ use std::cmp; use super::{MirContext, LocalRef}; use super::analyze::CleanupKind; use super::constant::Const; -use super::lvalue::{LvalueRef}; +use super::lvalue::LvalueRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, funclets: &IndexVec>) { - let mut bcx = self.build_block(bb); + let mut bcx = self.get_builder(bb); let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); @@ -57,7 +58,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); - let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| { let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { @@ -84,7 +85,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.build_new_block(name); + let trampoline = this.new_block(name); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -208,7 +209,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); + let llscratch = bcx.alloca(ret.original_ty, "ret"); self.store_operand(&bcx, llscratch, op, None); llscratch } @@ -241,20 +242,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } - let lvalue = self.trans_lvalue(&bcx, location); + let mut lvalue = self.trans_lvalue(&bcx, location); let drop_fn = glue::get_drop_glue(bcx.ccx, ty); let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); - let ptr = if bcx.ccx.shared().type_is_sized(ty) { - let value = if drop_ty != ty { - bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) - } else { - lvalue.llval - }; - MaybeSizedValue::sized(value) - } else { - MaybeSizedValue::unsized_(lvalue.llval, lvalue.llextra) - }; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + if bcx.ccx.shared().type_is_sized(ty) && drop_ty != ty { + lvalue.llval = bcx.pointercast( + lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()); + } + let args = &[lvalue.llval, lvalue.llextra][..1 + lvalue.has_extra() as usize]; if let Some(unwind) = unwind { bcx.invoke( drop_fn, @@ -301,7 +296,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.build_new_block("panic"); + let panic_block = self.new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -584,15 +579,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty.apply_attrs_callsite(invokeret); if let Some((_, target)) = *destination { - let ret_bcx = self.build_block(target); - ret_bcx.at_start(|ret_bcx| { - self.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: sig.output(), - }; - self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - }); + let ret_bcx = self.get_builder(target); + self.set_debug_loc(&ret_bcx, terminator.source_info); + let op = OperandRef { + val: Immediate(invokeret), + ty: sig.output(), + }; + self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -613,7 +606,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_argument(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -658,7 +651,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); + let llscratch = bcx.alloca(arg.original_ty, "arg"); self.store_operand(bcx, llscratch, op, None); (llscratch, true) } else { @@ -689,7 +682,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_arguments_untupled(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -706,9 +699,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval) => { - let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); + let ptr = LvalueRef::new_sized_ty(llval, tuple.ty); + let ptr = ptr.trans_field_ptr(bcx, n); let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) @@ -765,13 +758,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { let ccx = bcx.ccx; if let Some(slot) = self.llpersonalityslot { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.fcx().alloca(llretty, "personalityslot"); + let slot = bcx.alloca(llretty, "personalityslot"); self.llpersonalityslot = Some(slot); Lifetime::Start.call(bcx, slot); slot @@ -790,15 +783,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return self.blocks[target_bb]; } - let target = self.build_block(target_bb); + let target = self.get_builder(target_bb); - let bcx = self.fcx.build_new_block("cleanup"); + let bcx = self.new_block("cleanup"); self.landing_pads[target_bb] = Some(bcx.llbb()); let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot, None); @@ -808,18 +801,24 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.build_new_block("unreachable"); + let bl = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() }) } - pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.blocks[bb], self.fcx) + pub fn new_block(&self, name: &str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn, name) + } + + pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_end(self.blocks[bb]); + builder } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { // If the return is ignored, we can just return a do-nothing ReturnDest @@ -836,14 +835,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) @@ -864,7 +863,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } - fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { @@ -895,7 +894,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, dest: ReturnDest, ret_ty: ArgType, op: OperandRef<'tcx>) { @@ -911,7 +910,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); + let tmp = bcx.alloca_ty(op.ty, "tmp_ret"); ret_ty.store(bcx, op.immediate(), tmp); self.trans_load(bcx, tmp, op.ty) } else { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 700894c255da6..13e659a5ae0e8 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,16 +18,17 @@ use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir; use rustc::mir::tcx::LvalueTy; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; +use builder::Builder; +use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; -use common::{const_to_opt_u128}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; +use common::const_to_opt_u128; use consts; use monomorphize::{self, Instance}; use type_of; @@ -548,16 +549,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::AggregateKind::Adt(..) | mir::AggregateKind::Closure(..) | mir::AggregateKind::Tuple => { - let disr = match *kind { - mir::AggregateKind::Adt(adt_def, index, _, _) => { - Disr::from(adt_def.variants[index].disr_val) - } - _ => Disr(0) - }; - Const::new( - adt::trans_const(self.ccx, dest_ty, disr, &fields), - dest_ty - ) + Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) } } } @@ -900,7 +892,7 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_constant(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, constant: &mir::Constant<'tcx>) -> Const<'tcx> { @@ -945,3 +937,159 @@ pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) let instance = Instance::mono(ccx.shared(), def_id); MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) } + +/// Construct a constant value, suitable for initializing a +/// GlobalVariable, given a case and constant values for its fields. +/// Note that this may have a different LLVM type (and different +/// alignment!) from the representation's `type_of`, so it needs a +/// pointer cast before use. +/// +/// The LLVM type system does not directly support unions, and only +/// pointers can be bitcast, so a constant (and, by extension, the +/// GlobalVariable initialized by it) will have a type that can vary +/// depending on which case of an enum it is. +/// +/// To understand the alignment situation, consider `enum E { V64(u64), +/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to +/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, +/// i32, i32}`, which is 4-byte aligned. +/// +/// Currently the returned value has the same size as the type, but +/// this could be changed in the future to avoid allocating unnecessary +/// space after values of shorter-than-maximum cases. +fn trans_const<'a, 'tcx>( + ccx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + kind: &mir::AggregateKind, + vals: &[ValueRef] +) -> ValueRef { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + let variant_index = match *kind { + mir::AggregateKind::Adt(_, index, _, _) => index, + _ => 0, + }; + match *l { + layout::CEnum { discr: d, min, max, .. } => { + let discr = match *kind { + mir::AggregateKind::Adt(adt_def, _, _, _) => { + Disr::from(adt_def.variants[variant_index].disr_val) + }, + _ => Disr(0), + }; + assert_eq!(vals.len(), 0); + adt::assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) + } + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[variant_index]; + let lldiscr = C_integral(Type::from_integer(ccx, d), variant_index as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); + let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); + if needed_padding > 0 { + contents.push(padding(ccx, needed_padding)); + } + C_struct(ccx, &contents[..], false) + } + layout::UntaggedUnion { ref variants, .. }=> { + assert_eq!(variant_index, 0); + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) + } + layout::Univariant { ref variant, .. } => { + assert_eq!(variant_index, 0); + let contents = build_const_struct(ccx, &variant, vals); + C_struct(ccx, &contents[..], variant.packed) + } + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0]; + if variant_index as u64 == nndiscr { + assert_eq!(vals.len(), 1); + vals[0] + } else { + C_null(type_of::sizing_type_of(ccx, nnty)) + } + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + if variant_index as u64 == nndiscr { + C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + } else { + let fields = adt::compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { + // Always use null even if it's not the `discrfield`th + // field; see #8506. + C_null(type_of::sizing_type_of(ccx, ty)) + }).collect::>(); + C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) + } + } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + } +} + +/// Building structs is a little complicated, because we might need to +/// insert padding if a field's value is less aligned than its type. +/// +/// Continuing the example from `trans_const`, a value of type `(u32, +/// E)` should have the `E` at offset 8, but if that field's +/// initializer is 4-byte aligned then simply translating the tuple as +/// a two-element struct will locate it at offset 4, and accesses to it +/// will read the wrong memory. +fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + st: &layout::Struct, + vals: &[ValueRef]) + -> Vec { + assert_eq!(vals.len(), st.offsets.len()); + + if vals.len() == 0 { + return Vec::new(); + } + + // offset of current value + let mut offset = 0; + let mut cfields = Vec::new(); + cfields.reserve(st.offsets.len()*2); + + let parts = st.field_index_by_increasing_offset().map(|i| { + (&vals[i], st.offsets[i].bytes()) + }); + for (&val, target_offset) in parts { + if offset < target_offset { + cfields.push(padding(ccx, target_offset - offset)); + offset = target_offset; + } + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + } + + if offset < st.stride().bytes() { + cfields.push(padding(ccx, st.stride().bytes() - offset)); + } + + cfields +} + +fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + un: &layout::Union, + field_val: ValueRef) + -> Vec { + let mut cfields = vec![field_val]; + + let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); + let size = un.stride().bytes(); + if offset != size { + cfields.push(padding(ccx, size - offset)); + } + + cfields +} + +fn padding(ccx: &CrateContext, size: u64) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size)) +} diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0cd7f007c5df9..bd6e70639bba5 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -9,18 +9,20 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TypeFoldable}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use adt; -use base; -use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef}; +use builder::Builder; +use common::{self, CrateContext, C_uint, C_undef}; use consts; use machine; use type_of::type_of; use type_of; -use Disr; +use type_::Type; +use value::Value; +use glue; use std::ptr; @@ -39,22 +41,24 @@ pub struct LvalueRef<'tcx> { pub ty: LvalueTy<'tcx>, } -impl<'tcx> LvalueRef<'tcx> { +impl<'a, 'tcx> LvalueRef<'tcx> { pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } - pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> LvalueRef<'tcx> - { - assert!(!ty.has_erasable_regions()); - let lltemp = base::alloc_ty(bcx, ty, name); - LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) + pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef::new_sized(llval, LvalueTy::from_ty(ty)) + } + + pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef { + llval: llval, + llextra: llextra, + ty: LvalueTy::from_ty(ty), + } } - pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { ty::TyArray(_, n) => common::C_uint(ccx, n), @@ -65,17 +69,170 @@ impl<'tcx> LvalueRef<'tcx> { _ => bug!("unexpected type `{}` in LvalueRef::len", ty) } } + + pub fn has_extra(&self) -> bool { + !self.llextra.is_null() + } + + fn struct_field_ptr( + self, + bcx: &Builder<'a, 'tcx>, + st: &layout::Struct, + fields: &Vec>, + ix: usize, + needs_cast: bool + ) -> ValueRef { + let fty = fields[ix]; + let ccx = bcx.ccx; + + let ptr_val = if needs_cast { + let fields = st.field_index_by_increasing_offset().map(|i| { + type_of::in_memory_type_of(ccx, fields[i]) + }).collect::>(); + let real_ty = Type::struct_(ccx, &fields[..], st.packed); + bcx.pointercast(self.llval, real_ty.ptr_to()) + } else { + self.llval + }; + + // Simple case - we can just GEP the field + // * First field - Always aligned properly + // * Packed struct - There is no alignment padding + // * Field is sized - pointer is properly aligned already + if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || + bcx.ccx.shared().type_is_sized(fty) { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + + // If the type of the last field is [T] or str, then we don't need to do + // any adjusments + match fty.sty { + ty::TySlice(..) | ty::TyStr => { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + _ => () + } + + // There's no metadata available, log the case and just do the GEP. + if !self.has_extra() { + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, Value(ptr_val)); + return bcx.struct_gep(ptr_val, ix); + } + + // We need to get the pointer manually now. + // We do this by casting to a *i8, then offsetting it by the appropriate amount. + // We do this instead of, say, simply adjusting the pointer from the result of a GEP + // because the field may have an arbitrary alignment in the LLVM representation + // anyway. + // + // To demonstrate: + // struct Foo { + // x: u16, + // y: T + // } + // + // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that + // the `y` field has 16-bit alignment. + + let meta = self.llextra; + + + let offset = st.offsets[ix].bytes(); + let unaligned_offset = C_uint(bcx.ccx, offset); + + // Get the alignment of the field + let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + + // Bump the unaligned offset up to the appropriate alignment using the + // following expression: + // + // (unaligned offset + (align - 1)) & -align + + // Calculate offset + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); + let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), + bcx.neg(align)); + + debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); + + // Cast and adjust pointer + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.gep(byte_ptr, &[offset]); + + // Finally, cast back to the type expected + let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + bcx.pointercast(byte_ptr, ll_fty.ptr_to()) + } + + /// Access a field, at a point when the value's case is known. + pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef { + let discr = match self.ty { + LvalueTy::Ty { .. } => 0, + LvalueTy::Downcast { variant_index, .. } => variant_index, + }; + let t = self.ty.to_ty(bcx.tcx()); + let l = bcx.ccx.layout_of(t); + // Note: if this ever needs to generate conditionals (e.g., if we + // decide to do some kind of cdr-coding-like non-unique repr + // someday), it will need to return a possibly-new bcx as well. + match *l { + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, 0); + self.struct_field_ptr(bcx, &variant, + &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) + } + layout::Vector { count, .. } => { + assert_eq!(discr, 0); + assert!((ix as u64) < count); + bcx.struct_gep(self.llval, ix) + } + layout::General { discr: d, ref variants, .. } => { + let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); + fields.insert(0, d.to_ty(&bcx.tcx(), false)); + self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = adt::compute_fields(bcx.ccx, t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { + let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); + // The unit-like case might have a nonzero number of unit-like fields. + // (e.d., Result of Either with (), as one side.) + let ty = type_of::type_of(bcx.ccx, nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; + assert_eq!(ix, 0); + assert_eq!(discr as u64, nndiscr); + let ty = type_of::type_of(bcx.ccx, nnty); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr as u64, nndiscr); + self.struct_field_ptr(bcx, &nonnull, + &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) + } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) + } + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_lvalue(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { @@ -134,26 +291,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (llprojected, llextra) = match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let base_ty = tr_base.ty.to_ty(tcx); - let discr = match tr_base.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, - }; - let discr = discr as u64; - let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); - let base = if is_sized { - adt::MaybeSizedValue::sized(tr_base.llval) - } else { - adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) - }; - let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr), - field.index()); - let llextra = if is_sized { + let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) { ptr::null_mut() } else { tr_base.llextra }; - (llprojected, llextra) + (tr_base.trans_field_ptr(bcx, field.index()), llextra) } mir::ProjectionElem::Index(ref index) => { let index = self.trans_operand(bcx, index); @@ -214,7 +357,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Perform an action using the given Lvalue. // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // is created first, then used as an operand to update the Lvalue. - pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + pub fn with_lvalue_ref(&mut self, bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, f: F) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U { @@ -223,9 +366,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(lvalue) => f(self, lvalue), LocalRef::Operand(None) => { let lvalue_ty = self.monomorphized_lvalue_ty(lvalue); - let lvalue = LvalueRef::alloca(bcx, - lvalue_ty, - "lvalue_temp"); + assert!(!lvalue_ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp"); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty)); let ret = f(self, lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); self.locals[index] = LocalRef::Operand(Some(op)); @@ -254,18 +397,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { /// than we are. /// /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, - llindex: ValueRef) - -> ValueRef - { - let ccx = bcx.ccx; + fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); + let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type()); if index_size < int_size { - bcx.zext(llindex, ccx.int_type()) + bcx.zext(llindex, bcx.ccx.int_type()) } else if index_size > int_size { - bcx.trunc(llindex, ccx.int_type()) + bcx.trunc(llindex, bcx.ccx.int_type()) } else { llindex } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index dc8c6e89df9a4..eedd7956805b6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -19,7 +19,8 @@ use rustc::infer::TransNormalize; use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; -use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; +use builder::Builder; +use common::{self, CrateContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; use abi::FnType; @@ -37,7 +38,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; pub use self::constant::trans_static_initializer; use self::analyze::CleanupKind; -use self::lvalue::{LvalueRef}; +use self::lvalue::LvalueRef; use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; @@ -48,7 +49,7 @@ pub struct MirContext<'a, 'tcx:'a> { debug_context: debuginfo::FunctionDebugContext, - fcx: &'a common::FunctionContext<'a, 'tcx>, + llfn: ValueRef, ccx: &'a CrateContext<'a, 'tcx>, @@ -106,7 +107,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) } - pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { + pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) { let (scope, span) = self.debug_loc(source_info); debuginfo::set_source_location(&self.debug_context, bcx, scope, span); } @@ -198,7 +199,8 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'a, 'tcx: 'a>( - fcx: &'a FunctionContext<'a, 'tcx>, + ccx: &'a CrateContext<'a, 'tcx>, + llfn: ValueRef, fn_ty: FnType, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, @@ -207,8 +209,8 @@ pub fn trans_mir<'a, 'tcx: 'a>( ) { debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir); - let bcx = fcx.get_entry_block(); + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfn, mir); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); let cleanup_kinds = analyze::cleanup_kinds(&mir); @@ -216,20 +218,20 @@ pub fn trans_mir<'a, 'tcx: 'a>( let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { - fcx.new_block("start") + bcx.build_sibling_block("start").llbb() } else { - fcx.new_block(&format!("{:?}", bb)) + bcx.build_sibling_block(&format!("{:?}", bb)).llbb() } }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context); + let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context); let mut mircx = MirContext { mir: mir, - fcx: fcx, + llfn: llfn, fn_ty: fn_ty, - ccx: fcx.ccx, + ccx: ccx, llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, @@ -266,7 +268,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( } debug!("alloc: {:?} ({}) -> lvalue", local, name); - let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &name.as_str()); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)); if dbg { let (scope, span) = mircx.debug_loc(source_info); declare_local(&bcx, &mircx.debug_context, name, ty, scope, @@ -278,11 +282,13 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Temporary or return pointer if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); - let llretptr = llvm::get_param(fcx.llfn, 0); + let llretptr = llvm::get_param(llfn, 0); LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); - LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local)); + LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))) } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the @@ -312,9 +318,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { - let bcx = mircx.build_block(bb); + let bcx = mircx.get_builder(bb); bcx.set_personality_fn(mircx.ccx.eh_personality()); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(ccx.sess()) { return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); } } @@ -347,13 +353,12 @@ pub fn trans_mir<'a, 'tcx: 'a>( /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect. -fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>, scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { let mir = mircx.mir; - let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; @@ -381,7 +386,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, _ => bug!("spread argument isn't a tuple?!") }; - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); let arg = &mircx.fn_ty.args[idx]; @@ -428,7 +433,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; llarg } else if !lvalue_locals.contains(local.index()) && @@ -444,13 +449,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &mircx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); - let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; OperandValue::Pair(llarg, llmeta) } else { @@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, @@ -514,7 +519,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); + let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr"); bcx.store(llval, alloc, None); alloc } else { @@ -573,7 +578,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mod analyze; mod block; mod constant; -mod lvalue; +pub mod lvalue; mod operand; mod rvalue; mod statement; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index a4af5f9e22cc4..28a247ee612a9 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -14,7 +14,8 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use value::Value; use type_of; use type_::Type; @@ -85,8 +86,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a Pair, we return an /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. let llty = type_of::type_of(bcx.ccx, self.ty); @@ -107,8 +107,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a pair in an Immediate, /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. if common::type_is_imm_pair(bcx.ccx, self.ty) { @@ -136,7 +135,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_load(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llval: ValueRef, ty: Ty<'tcx>) -> OperandRef<'tcx> @@ -165,7 +164,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_consume(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> OperandRef<'tcx> { @@ -217,7 +216,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx> { @@ -242,7 +241,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn store_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lldest: ValueRef, operand: OperandRef<'tcx>, align: Option) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index dac81468be950..67fb8cf576d62 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -12,13 +12,15 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::Layout; +use rustc::mir::tcx::LvalueTy; use rustc::mir; use middle::lang_items::ExchangeMallocFnLangItem; use asm; use base; +use builder::Builder; use callee::Callee; -use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; +use common::{self, val_ty, C_bool, C_null, C_uint}; use common::{C_integral}; use adt; use machine; @@ -35,10 +37,10 @@ use super::lvalue::{LvalueRef}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, dest: LvalueRef<'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> BlockAndBuilder<'a, 'tcx> + -> Builder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); @@ -79,7 +81,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); + let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, lltemp, operand.ty); lltemp } @@ -101,7 +103,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { - mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); let dest_ty = dest.ty.to_ty(bcx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); @@ -109,10 +111,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { - let val = adt::MaybeSizedValue::sized(dest.llval); + let mut val = LvalueRef::new_sized(dest.llval, dest.ty); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, - field_index); + val.ty = LvalueTy::Downcast { + adt_def: adt_def, + substs: self.monomorphize(&substs), + variant_index: disr.0 as usize, + }; + let lldest_i = val.trans_field_ptr(&bcx, field_index); self.store_operand(&bcx, lldest_i, op, None); } } @@ -170,9 +176,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_rvalue_operand(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) + -> (Builder<'a, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); @@ -477,7 +483,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -552,7 +558,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_fat_ptr_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, @@ -599,7 +605,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_checked_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -681,7 +687,7 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef { +fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index cc85f68c197ec..48fc9720e4b83 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,7 +11,8 @@ use rustc::mir; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use super::MirContext; use super::LocalRef; @@ -20,9 +21,9 @@ use super::super::disr::Disr; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, statement: &mir::Statement<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); self.set_debug_loc(&bcx, statement.source_info); @@ -77,10 +78,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_storage_liveness(&self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, intrinsic: base::Lifetime) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Lvalue(tr_lval) = self.locals[index] { intrinsic.call(&bcx, tr_lval.llval); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index c09726fda0810..cbcbb02bdc890 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -9,28 +9,29 @@ // except according to those terms. use llvm; +use builder::Builder; use llvm::ValueRef; use common::*; use rustc::ty::Ty; pub fn slice_for_each<'a, 'tcx, F>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F -) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { +) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx, unit_ty); - let add = |bcx: &BlockAndBuilder, a, b| if zst { + let add = |bcx: &Builder, a, b| if zst { bcx.add(a, b) } else { bcx.inbounds_gep(a, &[b]) }; - let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); - let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); - let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); + let body_bcx = bcx.build_sibling_block("slice_loop_body"); + let next_bcx = bcx.build_sibling_block("slice_loop_next"); + let header_bcx = bcx.build_sibling_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx, 0usize) diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 9b86196b3ece2..0fa9062df4591 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -399,7 +399,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.check_pat(&p, discrim_ty); all_pats_diverge &= self.diverges.get(); } - all_pats_diverge + // As discussed with @eddyb, this is for disabling unreachable_code + // warnings on patterns (they're now subsumed by unreachable_patterns + // warnings). + match all_pats_diverge { + Diverges::Maybe => Diverges::Maybe, + Diverges::Always | Diverges::WarnedAlways => Diverges::WarnedAlways, + } }).collect(); // Now typecheck the blocks. diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 26dd53fecb243..ec1ca99c7687f 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -3552,19 +3552,23 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::UnNot => { oprnd_t = self.structurally_resolved_type(oprnd.span, oprnd_t); + let result = self.check_user_unop("!", "not", + tcx.lang_items.not_trait(), + expr, &oprnd, oprnd_t, unop); + // If it's builtin, we can reuse the type, this helps inference. if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) { - oprnd_t = self.check_user_unop("!", "not", - tcx.lang_items.not_trait(), - expr, &oprnd, oprnd_t, unop); + oprnd_t = result; } } hir::UnNeg => { oprnd_t = self.structurally_resolved_type(oprnd.span, oprnd_t); + let result = self.check_user_unop("-", "neg", + tcx.lang_items.neg_trait(), + expr, &oprnd, oprnd_t, unop); + // If it's builtin, we can reuse the type, this helps inference. if !(oprnd_t.is_integral() || oprnd_t.is_fp()) { - oprnd_t = self.check_user_unop("-", "neg", - tcx.lang_items.neg_trait(), - expr, &oprnd, oprnd_t, unop); + oprnd_t = result; } } } diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 9a2bfbf715af9..6d32b7364f8ea 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -123,8 +123,17 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { // as potentially overloaded. But then, during writeback, if // we observe that something like `a+b` is (known to be) // operating on scalars, we clear the overload. - fn fix_scalar_binary_expr(&mut self, e: &hir::Expr) { + fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr) { match e.node { + hir::ExprUnary(hir::UnNeg, ref inner) | + hir::ExprUnary(hir::UnNot, ref inner) => { + let inner_ty = self.fcx.node_ty(inner.id); + let inner_ty = self.fcx.resolve_type_vars_if_possible(&inner_ty); + + if inner_ty.is_scalar() { + self.fcx.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id)); + } + } hir::ExprBinary(ref op, ref lhs, ref rhs) | hir::ExprAssignOp(ref op, ref lhs, ref rhs) => { let lhs_ty = self.fcx.node_ty(lhs.id); @@ -185,7 +194,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { return; } - self.fix_scalar_binary_expr(e); + self.fix_scalar_builtin_expr(e); self.visit_node_id(ResolvingExpr(e.span), e.id); self.visit_method_map_entry(ResolvingExpr(e.span), diff --git a/src/librustc_typeck/coherence/builtin.rs b/src/librustc_typeck/coherence/builtin.rs new file mode 100644 index 0000000000000..ba95a17989165 --- /dev/null +++ b/src/librustc_typeck/coherence/builtin.rs @@ -0,0 +1,349 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Check properties that are required by built-in traits and set +//! up data structures required by type-checking/translation. + +use rustc::middle::free_region::FreeRegionMap; +use rustc::middle::lang_items::UnsizeTraitLangItem; + +use rustc::traits::{self, ObligationCause, Reveal}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::ParameterEnvironment; +use rustc::ty::TypeFoldable; +use rustc::ty::subst::Subst; +use rustc::ty::util::CopyImplementationError; +use rustc::infer; + +use rustc::hir::def_id::DefId; +use rustc::hir::map as hir_map; +use rustc::hir::{self, ItemImpl}; + +pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + check_trait(tcx, tcx.lang_items.drop_trait(), visit_implementation_of_drop); + check_trait(tcx, tcx.lang_items.copy_trait(), visit_implementation_of_copy); + check_trait( + tcx, + tcx.lang_items.coerce_unsized_trait(), + visit_implementation_of_coerce_unsized); +} + +fn check_trait<'a, 'tcx, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_def_id: Option, + mut f: F) + where F: FnMut(TyCtxt<'a, 'tcx, 'tcx>, DefId, DefId) +{ + if let Some(trait_def_id) = trait_def_id { + let mut impls = vec![]; + tcx.lookup_trait_def(trait_def_id).for_each_impl(tcx, |did| { + impls.push(did); + }); + impls.sort(); + for impl_def_id in impls { + f(tcx, trait_def_id, impl_def_id); + } + } +} + +fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + _drop_did: DefId, + impl_did: DefId) { + let items = tcx.associated_item_def_ids(impl_did); + if items.is_empty() { + // We'll error out later. For now, just don't ICE. + return; + } + let method_def_id = items[0]; + + let self_type = tcx.item_type(impl_did); + match self_type.sty { + ty::TyAdt(type_def, _) => { + type_def.set_destructor(method_def_id); + } + _ => { + // Destructors only work on nominal types. + if let Some(impl_node_id) = tcx.map.as_local_node_id(impl_did) { + match tcx.map.find(impl_node_id) { + Some(hir_map::NodeItem(item)) => { + let span = match item.node { + ItemImpl(.., ref ty, _) => ty.span, + _ => item.span, + }; + struct_span_err!(tcx.sess, + span, + E0120, + "the Drop trait may only be implemented on \ + structures") + .span_label(span, &format!("implementing Drop requires a struct")) + .emit(); + } + _ => { + bug!("didn't find impl in ast map"); + } + } + } else { + bug!("found external impl of Drop trait on \ + something other than a struct"); + } + } + } +} + +fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + _copy_did: DefId, + impl_did: DefId) { + debug!("visit_implementation_of_copy: impl_did={:?}", impl_did); + + let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) { + n + } else { + debug!("visit_implementation_of_copy(): impl not in this \ + crate"); + return; + }; + + let self_type = tcx.item_type(impl_did); + debug!("visit_implementation_of_copy: self_type={:?} (bound)", + self_type); + + let span = tcx.map.span(impl_node_id); + let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); + let self_type = self_type.subst(tcx, ¶m_env.free_substs); + assert!(!self_type.has_escaping_regions()); + + debug!("visit_implementation_of_copy: self_type={:?} (free)", + self_type); + + match param_env.can_type_implement_copy(tcx, self_type, span) { + Ok(()) => {} + Err(CopyImplementationError::InfrigingField(field)) => { + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., Some(ref tr), _, _) = item.node { + tr.path.span + } else { + span + }; + + struct_span_err!(tcx.sess, + span, + E0204, + "the trait `Copy` may not be implemented for this type") + .span_label( + tcx.def_span(field.did), + &"this field does not implement `Copy`") + .emit() + } + Err(CopyImplementationError::NotAnAdt) => { + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., ref ty, _) = item.node { + ty.span + } else { + span + }; + + struct_span_err!(tcx.sess, + span, + E0206, + "the trait `Copy` may not be implemented for this type") + .span_label(span, &format!("type is not a structure or enumeration")) + .emit(); + } + Err(CopyImplementationError::HasDestructor) => { + struct_span_err!(tcx.sess, + span, + E0184, + "the trait `Copy` may not be implemented for this type; the \ + type has a destructor") + .span_label(span, &format!("Copy not allowed on types with destructors")) + .emit(); + } + } +} + +fn visit_implementation_of_coerce_unsized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + coerce_unsized_trait: DefId, + impl_did: DefId) { + debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", + impl_did); + + let unsize_trait = match tcx.lang_items.require(UnsizeTraitLangItem) { + Ok(id) => id, + Err(err) => { + tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err)); + } + }; + + let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) { + n + } else { + debug!("visit_implementation_of_coerce_unsized(): impl not \ + in this crate"); + return; + }; + + let source = tcx.item_type(impl_did); + let trait_ref = tcx.impl_trait_ref(impl_did).unwrap(); + let target = trait_ref.substs.type_at(1); + debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", + source, + target); + + let span = tcx.map.span(impl_node_id); + let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); + let source = source.subst(tcx, ¶m_env.free_substs); + let target = target.subst(tcx, ¶m_env.free_substs); + assert!(!source.has_escaping_regions()); + + debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", + source, + target); + + tcx.infer_ctxt(None, Some(param_env), Reveal::ExactMatch).enter(|infcx| { + let cause = ObligationCause::misc(span, impl_node_id); + let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, + mt_b: ty::TypeAndMut<'tcx>, + mk_ptr: &Fn(Ty<'tcx>) -> Ty<'tcx>| { + if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) { + infcx.report_mismatched_types(&cause, + mk_ptr(mt_b.ty), + target, + ty::error::TypeError::Mutability) + .emit(); + } + (mt_a.ty, mt_b.ty, unsize_trait, None) + }; + let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) { + (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None), + + (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => { + infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a); + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) + } + + (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) | + (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => { + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) + } + + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) if def_a.is_struct() && + def_b.is_struct() => { + if def_a != def_b { + let source_path = tcx.item_path_str(def_a.did); + let target_path = tcx.item_path_str(def_b.did); + span_err!(tcx.sess, + span, + E0377, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with the same \ + definition; expected {}, found {}", + source_path, + target_path); + return; + } + + let fields = &def_a.struct_variant().fields; + let diff_fields = fields.iter() + .enumerate() + .filter_map(|(i, f)| { + let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b)); + + if tcx.item_type(f.did).is_phantom_data() { + // Ignore PhantomData fields + return None; + } + + // Ignore fields that aren't significantly changed + if let Ok(ok) = infcx.sub_types(false, &cause, b, a) { + if ok.obligations.is_empty() { + return None; + } + } + + // Collect up all fields that were significantly changed + // i.e. those that contain T in coerce_unsized T -> U + Some((i, a, b)) + }) + .collect::>(); + + if diff_fields.is_empty() { + span_err!(tcx.sess, + span, + E0374, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with one field \ + being coerced, none found"); + return; + } else if diff_fields.len() > 1 { + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., Some(ref t), _, _) = item.node { + t.path.span + } else { + tcx.map.span(impl_node_id) + }; + + let mut err = struct_span_err!(tcx.sess, + span, + E0375, + "implementing the trait \ + `CoerceUnsized` requires multiple \ + coercions"); + err.note("`CoerceUnsized` may only be implemented for \ + a coercion between structures with one field being coerced"); + err.note(&format!("currently, {} fields need coercions: {}", + diff_fields.len(), + diff_fields.iter() + .map(|&(i, a, b)| { + format!("{} ({} to {})", fields[i].name, a, b) + }) + .collect::>() + .join(", "))); + err.span_label(span, &format!("requires multiple coercions")); + err.emit(); + return; + } + + let (i, a, b) = diff_fields[0]; + let kind = ty::adjustment::CustomCoerceUnsized::Struct(i); + (a, b, coerce_unsized_trait, Some(kind)) + } + + _ => { + span_err!(tcx.sess, + span, + E0376, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures"); + return; + } + }; + + let mut fulfill_cx = traits::FulfillmentContext::new(); + + // Register an obligation for `A: Trait`. + let cause = traits::ObligationCause::misc(span, impl_node_id); + let predicate = tcx.predicate_for_trait_def(cause, trait_def_id, 0, source, &[target]); + fulfill_cx.register_predicate_obligation(&infcx, predicate); + + // Check that all transitive obligations are satisfied. + if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(&errors); + } + + // Finally, resolve all regions. + let mut free_regions = FreeRegionMap::new(); + free_regions.relate_free_regions_from_predicates(&infcx.parameter_environment + .caller_bounds); + infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id); + + if let Some(kind) = kind { + tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind); + } + }); +} diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index 3bbe5aa1fef37..6cf752dd69fca 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -16,45 +16,33 @@ // mappings. That mapping code resides here. use hir::def_id::DefId; -use middle::lang_items::UnsizeTraitLangItem; -use rustc::ty::subst::Subst; use rustc::ty::{self, TyCtxt, TypeFoldable}; -use rustc::traits::{self, ObligationCause, Reveal}; -use rustc::ty::ParameterEnvironment; use rustc::ty::{Ty, TyBool, TyChar, TyError}; use rustc::ty::{TyParam, TyRawPtr}; use rustc::ty::{TyRef, TyAdt, TyDynamic, TyNever, TyTuple}; use rustc::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt}; use rustc::ty::{TyUint, TyClosure, TyBox, TyFnDef, TyFnPtr}; use rustc::ty::{TyProjection, TyAnon}; -use rustc::ty::util::CopyImplementationError; -use middle::free_region::FreeRegionMap; use CrateCtxt; -use rustc::infer::{self, InferCtxt}; use syntax_pos::Span; use rustc::dep_graph::DepNode; -use rustc::hir::map as hir_map; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir::{Item, ItemImpl}; use rustc::hir; +mod builtin; mod orphan; mod overlap; mod unsafety; -struct CoherenceChecker<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { - crate_context: &'a CrateCtxt<'a, 'gcx>, - inference_context: InferCtxt<'a, 'gcx, 'tcx>, +struct CoherenceChecker<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, } -struct CoherenceCheckVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { - cc: &'a CoherenceChecker<'a, 'gcx, 'tcx>, -} - -impl<'a, 'gcx, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceCheckVisitor<'a, 'gcx, 'tcx> { +impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceChecker<'a, 'tcx> { fn visit_item(&mut self, item: &Item) { if let ItemImpl(..) = item.node { - self.cc.check_implementation(item) + self.check_implementation(item) } } @@ -65,7 +53,7 @@ impl<'a, 'gcx, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceCheckVisitor<'a, 'gcx, } } -impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { +impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { // Returns the def ID of the base type, if there is one. fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option { match ty.sty { @@ -73,7 +61,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { TyDynamic(ref t, ..) => t.principal().map(|p| p.def_id()), - TyBox(_) => self.inference_context.tcx.lang_items.owned_box(), + TyBox(_) => self.tcx.lang_items.owned_box(), TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) | TyTuple(..) | TyParam(..) | TyError | @@ -89,36 +77,22 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } } - fn check(&self) { + fn check(&mut self) { // Check implementations and traits. This populates the tables // containing the inherent methods and extension methods. It also // builds up the trait inheritance table. - self.crate_context.tcx.visit_all_item_likes_in_krate( - DepNode::CoherenceCheckImpl, - &mut CoherenceCheckVisitor { cc: self }); - - // Populate the table of destructors. It might seem a bit strange to - // do this here, but it's actually the most convenient place, since - // the coherence tables contain the trait -> type mappings. - self.populate_destructors(); - - // Check to make sure implementations of `Copy` are legal. - self.check_implementations_of_copy(); - - // Check to make sure implementations of `CoerceUnsized` are legal - // and collect the necessary information from them. - self.check_implementations_of_coerce_unsized(); + self.tcx.visit_all_item_likes_in_krate(DepNode::CoherenceCheckImpl, self); } fn check_implementation(&self, item: &Item) { - let tcx = self.crate_context.tcx; + let tcx = self.tcx; let impl_did = tcx.map.local_def_id(item.id); let self_type = tcx.item_type(impl_did); // If there are no traits, then this implementation must have a // base type. - if let Some(trait_ref) = self.crate_context.tcx.impl_trait_ref(impl_did) { + if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_did) { debug!("(checking implementation) adding impl for trait '{:?}', item '{}'", trait_ref, item.name); @@ -129,9 +103,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { return; } - enforce_trait_manually_implementable(self.crate_context.tcx, - item.span, - trait_ref.def_id); + enforce_trait_manually_implementable(self.tcx, item.span, trait_ref.def_id); self.add_trait_impl(trait_ref, impl_did); } else { // Skip inherent impls where the self type is an error @@ -150,348 +122,15 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) { - let tcx = self.crate_context.tcx; - tcx.inherent_impls.borrow_mut().push(base_def_id, impl_def_id); + self.tcx.inherent_impls.borrow_mut().push(base_def_id, impl_def_id); } - fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'gcx>, impl_def_id: DefId) { + fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'tcx>, impl_def_id: DefId) { debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}", impl_trait_ref, impl_def_id); - let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id); - trait_def.record_local_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref); - } - - // Destructors - // - - fn populate_destructors(&self) { - let tcx = self.crate_context.tcx; - let drop_trait = match tcx.lang_items.drop_trait() { - Some(id) => id, - None => return, - }; - tcx.populate_implementations_for_trait_if_necessary(drop_trait); - let drop_trait = tcx.lookup_trait_def(drop_trait); - - drop_trait.for_each_impl(tcx, |impl_did| { - let items = tcx.associated_item_def_ids(impl_did); - if items.is_empty() { - // We'll error out later. For now, just don't ICE. - return; - } - let method_def_id = items[0]; - - let self_type = tcx.item_type(impl_did); - match self_type.sty { - ty::TyAdt(type_def, _) => { - type_def.set_destructor(method_def_id); - } - _ => { - // Destructors only work on nominal types. - if let Some(impl_node_id) = tcx.map.as_local_node_id(impl_did) { - match tcx.map.find(impl_node_id) { - Some(hir_map::NodeItem(item)) => { - let span = match item.node { - ItemImpl(.., ref ty, _) => ty.span, - _ => item.span, - }; - struct_span_err!(tcx.sess, - span, - E0120, - "the Drop trait may only be implemented on \ - structures") - .span_label(span, - &format!("implementing Drop requires a struct")) - .emit(); - } - _ => { - bug!("didn't find impl in ast map"); - } - } - } else { - bug!("found external impl of Drop trait on \ - something other than a struct"); - } - } - } - }); - } - - /// Ensures that implementations of the built-in trait `Copy` are legal. - fn check_implementations_of_copy(&self) { - let tcx = self.crate_context.tcx; - let copy_trait = match tcx.lang_items.copy_trait() { - Some(id) => id, - None => return, - }; - tcx.populate_implementations_for_trait_if_necessary(copy_trait); - let copy_trait = tcx.lookup_trait_def(copy_trait); - - copy_trait.for_each_impl(tcx, |impl_did| { - debug!("check_implementations_of_copy: impl_did={:?}", impl_did); - - let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) { - n - } else { - debug!("check_implementations_of_copy(): impl not in this \ - crate"); - return; - }; - - let self_type = tcx.item_type(impl_did); - debug!("check_implementations_of_copy: self_type={:?} (bound)", - self_type); - - let span = tcx.map.span(impl_node_id); - let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); - let self_type = self_type.subst(tcx, ¶m_env.free_substs); - assert!(!self_type.has_escaping_regions()); - - debug!("check_implementations_of_copy: self_type={:?} (free)", - self_type); - - match param_env.can_type_implement_copy(tcx, self_type, span) { - Ok(()) => {} - Err(CopyImplementationError::InfrigingField(name)) => { - struct_span_err!(tcx.sess, - span, - E0204, - "the trait `Copy` may not be implemented for this type") - .span_label(span, &format!("field `{}` does not implement `Copy`", name)) - .emit() - } - Err(CopyImplementationError::InfrigingVariant(name)) => { - let item = tcx.map.expect_item(impl_node_id); - let span = if let ItemImpl(.., Some(ref tr), _, _) = item.node { - tr.path.span - } else { - span - }; - - struct_span_err!(tcx.sess, - span, - E0205, - "the trait `Copy` may not be implemented for this type") - .span_label(span, - &format!("variant `{}` does not implement `Copy`", name)) - .emit() - } - Err(CopyImplementationError::NotAnAdt) => { - let item = tcx.map.expect_item(impl_node_id); - let span = if let ItemImpl(.., ref ty, _) = item.node { - ty.span - } else { - span - }; - - struct_span_err!(tcx.sess, - span, - E0206, - "the trait `Copy` may not be implemented for this type") - .span_label(span, &format!("type is not a structure or enumeration")) - .emit(); - } - Err(CopyImplementationError::HasDestructor) => { - struct_span_err!(tcx.sess, - span, - E0184, - "the trait `Copy` may not be implemented for this type; the \ - type has a destructor") - .span_label(span, &format!("Copy not allowed on types with destructors")) - .emit(); - } - } - }); - } - - /// Process implementations of the built-in trait `CoerceUnsized`. - fn check_implementations_of_coerce_unsized(&self) { - let tcx = self.crate_context.tcx; - let coerce_unsized_trait = match tcx.lang_items.coerce_unsized_trait() { - Some(id) => id, - None => return, - }; - let unsize_trait = match tcx.lang_items.require(UnsizeTraitLangItem) { - Ok(id) => id, - Err(err) => { - tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err)); - } - }; - - let trait_def = tcx.lookup_trait_def(coerce_unsized_trait); - - trait_def.for_each_impl(tcx, |impl_did| { - debug!("check_implementations_of_coerce_unsized: impl_did={:?}", - impl_did); - - let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) { - n - } else { - debug!("check_implementations_of_coerce_unsized(): impl not \ - in this crate"); - return; - }; - - let source = tcx.item_type(impl_did); - let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap(); - let target = trait_ref.substs.type_at(1); - debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)", - source, - target); - - let span = tcx.map.span(impl_node_id); - let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); - let source = source.subst(tcx, ¶m_env.free_substs); - let target = target.subst(tcx, ¶m_env.free_substs); - assert!(!source.has_escaping_regions()); - - debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)", - source, - target); - - tcx.infer_ctxt(None, Some(param_env), Reveal::ExactMatch).enter(|infcx| { - let cause = ObligationCause::misc(span, impl_node_id); - let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>, - mt_b: ty::TypeAndMut<'gcx>, - mk_ptr: &Fn(Ty<'gcx>) -> Ty<'gcx>| { - if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) { - infcx.report_mismatched_types(&cause, - mk_ptr(mt_b.ty), - target, - ty::error::TypeError::Mutability).emit(); - } - (mt_a.ty, mt_b.ty, unsize_trait, None) - }; - let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None), - - (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => { - infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a); - check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) - } - - (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) | - (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => { - check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) - } - - (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) - if def_a.is_struct() && def_b.is_struct() => { - if def_a != def_b { - let source_path = tcx.item_path_str(def_a.did); - let target_path = tcx.item_path_str(def_b.did); - span_err!(tcx.sess, - span, - E0377, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with the same \ - definition; expected {}, found {}", - source_path, - target_path); - return; - } - - let fields = &def_a.struct_variant().fields; - let diff_fields = fields.iter() - .enumerate() - .filter_map(|(i, f)| { - let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b)); - - if tcx.item_type(f.did).is_phantom_data() { - // Ignore PhantomData fields - return None; - } - - // Ignore fields that aren't significantly changed - if let Ok(ok) = infcx.sub_types(false, &cause, b, a) { - if ok.obligations.is_empty() { - return None; - } - } - - // Collect up all fields that were significantly changed - // i.e. those that contain T in coerce_unsized T -> U - Some((i, a, b)) - }) - .collect::>(); - - if diff_fields.is_empty() { - span_err!(tcx.sess, - span, - E0374, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with one field \ - being coerced, none found"); - return; - } else if diff_fields.len() > 1 { - let item = tcx.map.expect_item(impl_node_id); - let span = if let ItemImpl(.., Some(ref t), _, _) = item.node { - t.path.span - } else { - tcx.map.span(impl_node_id) - }; - - let mut err = struct_span_err!(tcx.sess, - span, - E0375, - "implementing the trait \ - `CoerceUnsized` requires multiple \ - coercions"); - err.note("`CoerceUnsized` may only be implemented for \ - a coercion between structures with one field being coerced"); - err.note(&format!("currently, {} fields need coercions: {}", - diff_fields.len(), - diff_fields.iter() - .map(|&(i, a, b)| { - format!("{} ({} to {})", fields[i].name, a, b) - }) - .collect::>() - .join(", "))); - err.span_label(span, &format!("requires multiple coercions")); - err.emit(); - return; - } - - let (i, a, b) = diff_fields[0]; - let kind = ty::adjustment::CustomCoerceUnsized::Struct(i); - (a, b, coerce_unsized_trait, Some(kind)) - } - - _ => { - span_err!(tcx.sess, - span, - E0376, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures"); - return; - } - }; - - let mut fulfill_cx = traits::FulfillmentContext::new(); - - // Register an obligation for `A: Trait`. - let cause = traits::ObligationCause::misc(span, impl_node_id); - let predicate = - tcx.predicate_for_trait_def(cause, trait_def_id, 0, source, &[target]); - fulfill_cx.register_predicate_obligation(&infcx, predicate); - - // Check that all transitive obligations are satisfied. - if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) { - infcx.report_fulfillment_errors(&errors); - } - - // Finally, resolve all regions. - let mut free_regions = FreeRegionMap::new(); - free_regions.relate_free_regions_from_predicates(&infcx.parameter_environment - .caller_bounds); - infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id); - - if let Some(kind) = kind { - tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind); - } - }); - }); + let trait_def = self.tcx.lookup_trait_def(impl_trait_ref.def_id); + trait_def.record_local_impl(self.tcx, impl_def_id, impl_trait_ref); } } @@ -524,14 +163,9 @@ fn enforce_trait_manually_implementable(tcx: TyCtxt, sp: Span, trait_def_id: Def pub fn check_coherence(ccx: &CrateCtxt) { let _task = ccx.tcx.dep_graph.in_task(DepNode::Coherence); - ccx.tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| { - CoherenceChecker { - crate_context: ccx, - inference_context: infcx, - } - .check(); - }); + CoherenceChecker { tcx: ccx.tcx }.check(); unsafety::check(ccx.tcx); orphan::check(ccx.tcx); overlap::check(ccx.tcx); + builtin::check(ccx.tcx); } diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index d3b671f2a4d6e..1a971be64d819 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -2300,6 +2300,7 @@ This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this differs from the behavior for `&T`, which is always `Copy`). "##, +/* E0205: r##" An attempt to implement the `Copy` trait for an enum failed because one of the variants does not implement `Copy`. To fix this, you must implement `Copy` for @@ -2329,6 +2330,7 @@ enum Foo<'a> { This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this differs from the behavior for `&T`, which is always `Copy`). "##, +*/ E0206: r##" You can only implement `Copy` for a struct or enum. Both of the following diff --git a/src/rt/rust_test_helpers.c b/src/rt/rust_test_helpers.c index 7a04d377608d4..f2d9119a7d156 100644 --- a/src/rt/rust_test_helpers.c +++ b/src/rt/rust_test_helpers.c @@ -268,3 +268,22 @@ LARGE_INTEGER increment_all_parts(LARGE_INTEGER li) { li.QuadPart += 1; return li; } + +#define DO_INT128_TEST !(defined(WIN32) || defined(_WIN32) || defined(__WIN32)) && \ + defined(__amd64__) + +#if DO_INT128_TEST + +unsigned __int128 identity(unsigned __int128 a) { + return a; +} + +__int128 square(__int128 a) { + return a * a; +} + +__int128 sub(__int128 a, __int128 b) { + return a - b; +} + +#endif diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 9141b7245e35a..6135f49eb711b 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -24,8 +24,8 @@ pub struct Bytes { // dependent alignment #[no_mangle] pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: %arg1 = alloca [4 x i8] // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca [4 x i8] // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* @@ -38,8 +38,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // dependent alignment #[no_mangle] pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: %arg1 = alloca %Bytes // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca %Bytes // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* diff --git a/src/test/compile-fail-fulldeps/auxiliary/pub_and_stability.rs b/src/test/compile-fail-fulldeps/auxiliary/pub_and_stability.rs new file mode 100644 index 0000000000000..9dc4cf1252ec3 --- /dev/null +++ b/src/test/compile-fail-fulldeps/auxiliary/pub_and_stability.rs @@ -0,0 +1,144 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This crate attempts to enumerate the various scenarios for how a +// type can define fields and methods with various visiblities and +// stabilities. +// +// The basic stability pattern in this file has four cases: +// 1. no stability attribute at all +// 2. a stable attribute (feature "unit_test") +// 3. an unstable attribute that unit test declares (feature "unstable_declared") +// 4. an unstable attribute that unit test fails to declare (feature "unstable_undeclared") +// +// This file also covers four kinds of visibility: private, +// pub(module), pub(crate), and pub. +// +// However, since stability attributes can only be observed in +// cross-crate linkage scenarios, there is little reason to take the +// cross-product (4 stability cases * 4 visiblity cases), because the +// first three visibility cases cannot be accessed outside this crate, +// and therefore stability is only relevant when the visibility is pub +// to the whole universe. +// +// (The only reason to do so would be if one were worried about the +// compiler having some subtle bug where adding a stability attribute +// introduces a privacy violation. As a way to provide evidence that +// this is not occurring, I have put stability attributes on some +// non-pub fields, marked with SILLY below) + +#![feature(staged_api)] +#![feature(pub_restricted)] + +#![stable(feature = "unit_test", since = "0.0.0")] + +#[stable(feature = "unit_test", since = "0.0.0")] +pub use m::{Record, Trait, Tuple}; + +mod m { + #[derive(Default)] + #[stable(feature = "unit_test", since = "0.0.0")] + pub struct Record { + #[stable(feature = "unit_test", since = "0.0.0")] + pub a_stable_pub: i32, + #[unstable(feature = "unstable_declared", issue = "38412")] + pub a_unstable_declared_pub: i32, + #[unstable(feature = "unstable_undeclared", issue = "38412")] + pub a_unstable_undeclared_pub: i32, + #[unstable(feature = "unstable_undeclared", issue = "38412")] // SILLY + pub(crate) b_crate: i32, + #[unstable(feature = "unstable_declared", issue = "38412")] // SILLY + pub(m) c_mod: i32, + #[stable(feature = "unit_test", since = "0.0.0")] // SILLY + d_priv: i32 + } + + #[derive(Default)] + #[stable(feature = "unit_test", since = "1.0.0")] + pub struct Tuple( + #[stable(feature = "unit_test", since = "0.0.0")] + pub i32, + #[unstable(feature = "unstable_declared", issue = "38412")] + pub i32, + #[unstable(feature = "unstable_undeclared", issue = "38412")] + pub i32, + + pub(crate) i32, + pub(m) i32, + i32); + + impl Record { + #[stable(feature = "unit_test", since = "1.0.0")] + pub fn new() -> Self { Default::default() } + } + + impl Tuple { + #[stable(feature = "unit_test", since = "1.0.0")] + pub fn new() -> Self { Default::default() } + } + + + #[stable(feature = "unit_test", since = "0.0.0")] + pub trait Trait { + #[stable(feature = "unit_test", since = "0.0.0")] + type Type; + #[stable(feature = "unit_test", since = "0.0.0")] + fn stable_trait_method(&self) -> Self::Type; + #[unstable(feature = "unstable_undeclared", issue = "38412")] + fn unstable_undeclared_trait_method(&self) -> Self::Type; + #[unstable(feature = "unstable_declared", issue = "38412")] + fn unstable_declared_trait_method(&self) -> Self::Type; + } + + #[stable(feature = "unit_test", since = "0.0.0")] + impl Trait for Record { + type Type = i32; + fn stable_trait_method(&self) -> i32 { self.d_priv } + fn unstable_undeclared_trait_method(&self) -> i32 { self.d_priv } + fn unstable_declared_trait_method(&self) -> i32 { self.d_priv } + } + + #[stable(feature = "unit_test", since = "0.0.0")] + impl Trait for Tuple { + type Type = i32; + fn stable_trait_method(&self) -> i32 { self.3 } + fn unstable_undeclared_trait_method(&self) -> i32 { self.3 } + fn unstable_declared_trait_method(&self) -> i32 { self.3 } + } + + impl Record { + #[unstable(feature = "unstable_undeclared", issue = "38412")] + pub fn unstable_undeclared(&self) -> i32 { self.d_priv } + #[unstable(feature = "unstable_declared", issue = "38412")] + pub fn unstable_declared(&self) -> i32 { self.d_priv } + #[stable(feature = "unit_test", since = "0.0.0")] + pub fn stable(&self) -> i32 { self.d_priv } + + #[unstable(feature = "unstable_undeclared", issue = "38412")] // SILLY + pub(crate) fn pub_crate(&self) -> i32 { self.d_priv } + #[unstable(feature = "unstable_declared", issue = "38412")] // SILLY + pub(m) fn pub_mod(&self) -> i32 { self.d_priv } + #[stable(feature = "unit_test", since = "0.0.0")] // SILLY + fn private(&self) -> i32 { self.d_priv } + } + + impl Tuple { + #[unstable(feature = "unstable_undeclared", issue = "38412")] + pub fn unstable_undeclared(&self) -> i32 { self.0 } + #[unstable(feature = "unstable_declared", issue = "38412")] + pub fn unstable_declared(&self) -> i32 { self.0 } + #[stable(feature = "unit_test", since = "0.0.0")] + pub fn stable(&self) -> i32 { self.0 } + + pub(crate) fn pub_crate(&self) -> i32 { self.0 } + pub(m) fn pub_mod(&self) -> i32 { self.0 } + fn private(&self) -> i32 { self.0 } + } +} diff --git a/src/test/compile-fail-fulldeps/explore-issue-38412.rs b/src/test/compile-fail-fulldeps/explore-issue-38412.rs new file mode 100644 index 0000000000000..aab92575321e3 --- /dev/null +++ b/src/test/compile-fail-fulldeps/explore-issue-38412.rs @@ -0,0 +1,85 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// aux-build:pub_and_stability.rs + +#![feature(staged_api)] +#![feature(unused_feature)] + +// A big point of this test is that we *declare* `unstable_declared`, +// but do *not* declare `unstable_undeclared`. This way we can check +// that the compiler is letting in uses of declared feature-gated +// stuff but still rejecting uses of undeclared feature-gated stuff. +#![feature(unstable_declared)] + +extern crate pub_and_stability; +use pub_and_stability::{Record, Trait, Tuple}; + +fn main() { + // Okay + let Record { .. } = Record::new(); + // Okay (for now; see RFC Issue #902) + let Tuple(..) = Tuple::new(); + + // Okay + let Record { a_stable_pub: _, a_unstable_declared_pub: _, .. } = Record::new(); + // Okay (for now; see RFC Issue #902) + let Tuple(_, _, ..) = Tuple::new(); // analogous to above + + let Record { a_stable_pub: _, a_unstable_declared_pub: _, a_unstable_undeclared_pub: _, .. } = + Record::new(); + //~^^ ERROR use of unstable library feature 'unstable_undeclared' + + let Tuple(_, _, _, ..) = Tuple::new(); // analogous to previous + //~^ ERROR use of unstable library feature 'unstable_undeclared' + + let r = Record::new(); + let t = Tuple::new(); + + r.a_stable_pub; + r.a_unstable_declared_pub; + r.a_unstable_undeclared_pub; //~ ERROR use of unstable library feature + r.b_crate; //~ ERROR is private + r.c_mod; //~ ERROR is private + r.d_priv; //~ ERROR is private + + t.0; + t.1; + t.2; //~ ERROR use of unstable library feature + t.3; //~ ERROR is private + t.4; //~ ERROR is private + t.5; //~ ERROR is private + + r.stable_trait_method(); + r.unstable_declared_trait_method(); + r.unstable_undeclared_trait_method(); //~ ERROR use of unstable library feature + + r.stable(); + r.unstable_declared(); + r.unstable_undeclared(); //~ ERROR use of unstable library feature + + r.pub_crate(); //~ ERROR `pub_crate` is private + r.pub_mod(); //~ ERROR `pub_mod` is private + r.private(); //~ ERROR `private` is private + + let t = Tuple::new(); + t.stable_trait_method(); + t.unstable_declared_trait_method(); + t.unstable_undeclared_trait_method(); //~ ERROR use of unstable library feature + + t.stable(); + t.unstable_declared(); + t.unstable_undeclared(); //~ ERROR use of unstable library feature + + t.pub_crate(); //~ ERROR `pub_crate` is private + t.pub_mod(); //~ ERROR `pub_mod` is private + t.private(); //~ ERROR `private` is private + +} diff --git a/src/test/compile-fail/E0001.rs b/src/test/compile-fail/E0001.rs index 906642d855580..b72b0d661901f 100644 --- a/src/test/compile-fail/E0001.rs +++ b/src/test/compile-fail/E0001.rs @@ -8,11 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![deny(unreachable_patterns)] + fn main() { let foo = Some(1); match foo { - Some(bar) => {/* ... */} + Some(_) => {/* ... */} None => {/* ... */} - _ => {/* ... */} //~ ERROR E0001 + _ => {/* ... */} //~ ERROR unreachable pattern } } diff --git a/src/test/compile-fail/const-eval-overflow0.rs b/src/test/compile-fail/const-eval-overflow0.rs deleted file mode 100644 index 7db7de9cee30c..0000000000000 --- a/src/test/compile-fail/const-eval-overflow0.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(unused_imports)] - -// Note: the relevant lint pass here runs before some of the constant -// evaluation below (e.g. that performed by trans and llvm), so if you -// change this warn to a deny, then the compiler will exit before -// those errors are detected. - -use std::fmt; -use std::{i8, i16, i32, i64, isize}; -use std::{u8, u16, u32, u64, usize}; - -const VALS_I8: (i8, i8, i8, i8) = - (-i8::MIN, - i8::MIN - 1, - i8::MAX + 1, - i8::MIN * 2, - ); - -const VALS_I16: (i16, i16, i16, i16) = - (-i16::MIN, - i16::MIN - 1, - i16::MAX + 1, - i16::MIN * 2, - ); - -const VALS_I32: (i32, i32, i32, i32) = - (-i32::MIN, - i32::MIN - 1, - i32::MAX + 1, - i32::MIN * 2, - ); - -const VALS_I64: (i64, i64, i64, i64) = - (-i64::MIN, - i64::MIN - 1, - i64::MAX + 1, - i64::MAX * 2, - ); - -const VALS_U8: (u8, u8, u8, u8) = - (-u8::MIN, - //~^ ERROR unary negation of unsigned integer - //~| HELP use a cast or the `!` operator - u8::MIN - 1, - u8::MAX + 1, - u8::MAX * 2, - ); - -const VALS_U16: (u16, u16, u16, u16) = - (-u16::MIN, - //~^ ERROR unary negation of unsigned integer - //~| HELP use a cast or the `!` operator - u16::MIN - 1, - u16::MAX + 1, - u16::MAX * 2, - ); - -const VALS_U32: (u32, u32, u32, u32) = - (-u32::MIN, - //~^ ERROR unary negation of unsigned integer - //~| HELP use a cast or the `!` operator - u32::MIN - 1, - u32::MAX + 1, - u32::MAX * 2, - ); - -const VALS_U64: (u64, u64, u64, u64) = - (-u64::MIN, - //~^ ERROR unary negation of unsigned integer - //~| HELP use a cast or the `!` operator - u64::MIN - 1, - u64::MAX + 1, - u64::MAX * 2, - ); - -fn main() { - foo(VALS_I8); - foo(VALS_I16); - foo(VALS_I32); - foo(VALS_I64); - - foo(VALS_U8); - foo(VALS_U16); - foo(VALS_U32); - foo(VALS_U64); -} - -fn foo(x: T) { - println!("{:?}", x); -} diff --git a/src/test/compile-fail/feature-gate-negate-unsigned.rs b/src/test/compile-fail/feature-gate-negate-unsigned.rs index 98cc2fc0c3e0c..599e31341f233 100644 --- a/src/test/compile-fail/feature-gate-negate-unsigned.rs +++ b/src/test/compile-fail/feature-gate-negate-unsigned.rs @@ -16,16 +16,13 @@ impl std::ops::Neg for S { fn neg(self) -> u32 { 0 } } -// FIXME(eddyb) move this back to a `-1` literal when -// MIR building stops eagerly erroring in that case. -const _MAX: usize = -(2 - 1); -//~^ WARN unary negation of unsigned integer -//~| ERROR unary negation of unsigned integer -//~| HELP use a cast or the `!` operator - fn main() { + let _max: usize = -1; + //~^ ERROR cannot apply unary operator `-` to type `usize` + let x = 5u8; - let _y = -x; //~ ERROR unary negation of unsigned integer - //~^ HELP use a cast or the `!` operator + let _y = -x; + //~^ ERROR cannot apply unary operator `-` to type `u8` + -S; // should not trigger the gate; issue 26840 } diff --git a/src/test/compile-fail/feature-gate-negate-unsigned0.rs b/src/test/compile-fail/feature-gate-negate-unsigned0.rs deleted file mode 100644 index 89ae1a09bd3e4..0000000000000 --- a/src/test/compile-fail/feature-gate-negate-unsigned0.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Test that negating unsigned integers doesn't compile - -struct S; -impl std::ops::Neg for S { - type Output = u32; - fn neg(self) -> u32 { 0 } -} - -fn main() { - let a = -1; - //~^ ERROR E0080 - //~| unary negation of unsigned integer - let _b : u8 = a; // for infering variable a to u8. - - let _d = -1u8; - //~^ ERROR E0080 - //~| unary negation of unsigned integer - - for _ in -10..10u8 {} - //~^ ERROR E0080 - //~| unary negation of unsigned integer - - -S; // should not trigger the gate; issue 26840 -} diff --git a/src/test/compile-fail/issue-12116.rs b/src/test/compile-fail/issue-12116.rs index 1978068575b95..a8d2c55255340 100644 --- a/src/test/compile-fail/issue-12116.rs +++ b/src/test/compile-fail/issue-12116.rs @@ -10,6 +10,9 @@ #![feature(box_patterns)] #![feature(box_syntax)] +#![allow(dead_code)] +#![allow(unused_variables)] +#![deny(unreachable_patterns)] enum IntList { Cons(isize, Box), @@ -19,9 +22,8 @@ enum IntList { fn tail(source_list: &IntList) -> IntList { match source_list { &IntList::Cons(val, box ref next_list) => tail(next_list), - &IntList::Cons(val, box Nil) => IntList::Cons(val, box Nil), + &IntList::Cons(val, box IntList::Nil) => IntList::Cons(val, box IntList::Nil), //~^ ERROR unreachable pattern -//~^^ WARN pattern binding `Nil` is named the same as one of the variants of the type `IntList` _ => panic!() } } diff --git a/src/test/compile-fail/issue-12369.rs b/src/test/compile-fail/issue-12369.rs index 978d6f59b2df4..4df1e24dcfbd5 100644 --- a/src/test/compile-fail/issue-12369.rs +++ b/src/test/compile-fail/issue-12369.rs @@ -9,6 +9,8 @@ // except according to those terms. #![feature(slice_patterns)] +#![allow(unused_variables)] +#![deny(unreachable_patterns)] fn main() { let sl = vec![1,2,3]; diff --git a/src/test/compile-fail/issue-13727.rs b/src/test/compile-fail/issue-13727.rs index 28c2c7bc0e2e7..2e815548e8913 100644 --- a/src/test/compile-fail/issue-13727.rs +++ b/src/test/compile-fail/issue-13727.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(overflowing_literals)] +#![deny(unreachable_patterns)] + fn test(val: u8) { match val { 256 => print!("0b1110\n"), diff --git a/src/test/compile-fail/issue-14221.rs b/src/test/compile-fail/issue-14221.rs index e79be99a346fa..d11fe99c07f6b 100644 --- a/src/test/compile-fail/issue-14221.rs +++ b/src/test/compile-fail/issue-14221.rs @@ -8,6 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![deny(unreachable_patterns)] +#![allow(unused_variables)] +#![allow(non_snake_case)] + pub enum E { A, B, diff --git a/src/test/compile-fail/issue-30240-b.rs b/src/test/compile-fail/issue-30240-b.rs new file mode 100644 index 0000000000000..cf6935b9ba6d4 --- /dev/null +++ b/src/test/compile-fail/issue-30240-b.rs @@ -0,0 +1,26 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(unreachable_patterns)] + +fn main() { + match "world" { + "hello" => {} + _ => {}, + } + + match "world" { + ref _x if false => {} + "hello" => {} + "hello" => {} //~ ERROR unreachable pattern + _ => {}, + } +} + diff --git a/src/test/compile-fail/issue-30240.rs b/src/test/compile-fail/issue-30240.rs index 9b105e7ec159d..60fb307d4e1a4 100644 --- a/src/test/compile-fail/issue-30240.rs +++ b/src/test/compile-fail/issue-30240.rs @@ -16,6 +16,5 @@ fn main() { match "world" { //~ ERROR non-exhaustive patterns: `&_` ref _x if false => {} "hello" => {} - "hello" => {} //~ ERROR unreachable pattern } } diff --git a/src/test/compile-fail/issue-30302.rs b/src/test/compile-fail/issue-30302.rs index 26508a4722425..01150ff13740f 100644 --- a/src/test/compile-fail/issue-30302.rs +++ b/src/test/compile-fail/issue-30302.rs @@ -8,6 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(dead_code)] +#![allow(unused_variables)] +#![allow(non_snake_case)] +#![deny(unreachable_patterns)] + enum Stack { Nil, Cons(T, Box>) diff --git a/src/test/compile-fail/issue-31221.rs b/src/test/compile-fail/issue-31221.rs index 4997a6fee195b..e2b80215caf61 100644 --- a/src/test/compile-fail/issue-31221.rs +++ b/src/test/compile-fail/issue-31221.rs @@ -8,6 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(dead_code)] +#![allow(unused_variables)] +#![allow(non_snake_case)] +#![deny(unreachable_patterns)] +//~^ NOTE lint level defined here +//~^^ NOTE lint level defined here +//~^^^ NOTE lint level defined here + +#[derive(Clone, Copy)] enum Enum { Var1, Var2, @@ -41,13 +50,4 @@ fn main() { //~^ ERROR unreachable pattern //~^^ NOTE this is an unreachable pattern }; - // `_` need not emit a note, it is pretty obvious already. - let t = (Var1, Var1); - match t { - (Var1, b) => (), - _ => (), - anything => () - //~^ ERROR unreachable pattern - //~^^ NOTE this is an unreachable pattern - }; } diff --git a/src/test/compile-fail/issue-3601.rs b/src/test/compile-fail/issue-3601.rs index b25e683db0906..cc69a76e04331 100644 --- a/src/test/compile-fail/issue-3601.rs +++ b/src/test/compile-fail/issue-3601.rs @@ -40,6 +40,5 @@ fn main() { box NodeKind::Element(ed) => match ed.kind { //~ ERROR non-exhaustive patterns box ElementKind::HTMLImageElement(ref d) if d.image.is_some() => { true } }, - _ => panic!("WAT") //~ ERROR unreachable pattern }; } diff --git a/src/test/compile-fail/issue-38412.rs b/src/test/compile-fail/issue-38412.rs new file mode 100644 index 0000000000000..00305eb2bc04b --- /dev/null +++ b/src/test/compile-fail/issue-38412.rs @@ -0,0 +1,20 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + let Box(a) = loop { }; + //~^ ERROR field `0` of struct `std::boxed::Box` is private + + // (The below is a trick to allow compiler to infer a type for + // variable `a` without attempting to ascribe a type to the + // pattern or otherwise attempting to name the Box type, which + // would run afoul of issue #22207) + let _b: *mut i32 = *a; +} diff --git a/src/test/compile-fail/lint-qualification.rs b/src/test/compile-fail/lint-qualification.rs index af9b21dadd1d0..57c2166565f96 100644 --- a/src/test/compile-fail/lint-qualification.rs +++ b/src/test/compile-fail/lint-qualification.rs @@ -21,8 +21,9 @@ fn main() { let _ = || -> Result<(), ()> { try!(Ok(())); Ok(()) }; // issue #37345 - macro_rules! m { - () => { $crate::foo::bar(); } - } - m!(); // issue #37357 + macro_rules! m { () => { + $crate::foo::bar(); // issue #37357 + ::foo::bar(); // issue #38682 + } } + m!(); } diff --git a/src/test/compile-fail/match-argm-statics-2.rs b/src/test/compile-fail/match-argm-statics-2.rs new file mode 100644 index 0000000000000..40dcf3d0f12cc --- /dev/null +++ b/src/test/compile-fail/match-argm-statics-2.rs @@ -0,0 +1,71 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::Direction::{North, East, South, West}; + +struct NewBool(bool); + +enum Direction { + North, + East, + South, + West +} + +const TRUE_TRUE: (bool, bool) = (true, true); + +fn nonexhaustive_1() { + match (true, false) { + //~^ ERROR non-exhaustive patterns: `(true, false)` not covered + TRUE_TRUE => (), + (false, false) => (), + (false, true) => () + } +} + +const NONE: Option = None; +const EAST: Direction = East; + +fn nonexhaustive_2() { + match Some(Some(North)) { + //~^ ERROR non-exhaustive patterns: `Some(Some(West))` not covered + Some(NONE) => (), + Some(Some(North)) => (), + Some(Some(EAST)) => (), + Some(Some(South)) => (), + None => () + } +} + +const NEW_FALSE: NewBool = NewBool(false); +struct Foo { + bar: Option, + baz: NewBool +} + +const STATIC_FOO: Foo = Foo { bar: None, baz: NEW_FALSE }; + +fn nonexhaustive_3() { + match (Foo { bar: Some(North), baz: NewBool(true) }) { + //~^ ERROR non-exhaustive patterns: `Foo { bar: Some(North), baz: NewBool(true) }` + Foo { bar: None, baz: NewBool(true) } => (), + Foo { bar: _, baz: NEW_FALSE } => (), + Foo { bar: Some(West), baz: NewBool(true) } => (), + Foo { bar: Some(South), .. } => (), + Foo { bar: Some(EAST), .. } => () + } +} + +fn main() { + nonexhaustive_1(); + nonexhaustive_2(); + nonexhaustive_3(); +} + diff --git a/src/test/compile-fail/match-arm-statics.rs b/src/test/compile-fail/match-arm-statics.rs index 9b313f248fcbb..40d73ab51c762 100644 --- a/src/test/compile-fail/match-arm-statics.rs +++ b/src/test/compile-fail/match-arm-statics.rs @@ -7,10 +7,16 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. + +#![allow(dead_code)] +#![deny(unreachable_patterns)] + use self::Direction::{North, East, South, West}; +#[derive(PartialEq, Eq)] struct NewBool(bool); +#[derive(PartialEq, Eq)] enum Direction { North, East, @@ -20,15 +26,6 @@ enum Direction { const TRUE_TRUE: (bool, bool) = (true, true); -fn nonexhaustive_1() { - match (true, false) { - //~^ ERROR non-exhaustive patterns: `(true, false)` not covered - TRUE_TRUE => (), - (false, false) => (), - (false, true) => () - } -} - fn unreachable_1() { match (true, false) { TRUE_TRUE => (), @@ -43,17 +40,6 @@ fn unreachable_1() { const NONE: Option = None; const EAST: Direction = East; -fn nonexhaustive_2() { - match Some(Some(North)) { - //~^ ERROR non-exhaustive patterns: `Some(Some(West))` not covered - Some(NONE) => (), - Some(Some(North)) => (), - Some(Some(EAST)) => (), - Some(Some(South)) => (), - None => () - } -} - fn unreachable_2() { match Some(Some(North)) { Some(NONE) => (), @@ -73,19 +59,6 @@ struct Foo { baz: NewBool } -const STATIC_FOO: Foo = Foo { bar: None, baz: NEW_FALSE }; - -fn nonexhaustive_3() { - match (Foo { bar: Some(North), baz: NewBool(true) }) { - //~^ ERROR non-exhaustive patterns: `Foo { bar: Some(North), baz: NewBool(true) }` - Foo { bar: None, baz: NewBool(true) } => (), - Foo { bar: _, baz: NEW_FALSE } => (), - Foo { bar: Some(West), baz: NewBool(true) } => (), - Foo { bar: Some(South), .. } => (), - Foo { bar: Some(EAST), .. } => () - } -} - fn unreachable_3() { match (Foo { bar: Some(EAST), baz: NewBool(true) }) { Foo { bar: None, baz: NewBool(true) } => (), @@ -100,9 +73,6 @@ fn unreachable_3() { } fn main() { - nonexhaustive_1(); - nonexhaustive_2(); - nonexhaustive_3(); unreachable_1(); unreachable_2(); unreachable_3(); diff --git a/src/test/compile-fail/match-byte-array-patterns-2.rs b/src/test/compile-fail/match-byte-array-patterns-2.rs new file mode 100644 index 0000000000000..ad7e931a0ec97 --- /dev/null +++ b/src/test/compile-fail/match-byte-array-patterns-2.rs @@ -0,0 +1,26 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(advanced_slice_patterns, slice_patterns)] + +fn main() { + let buf = &[0, 1, 2, 3]; + + match buf { //~ ERROR non-exhaustive + b"AAAA" => {} + } + + let buf: &[u8] = buf; + + match buf { //~ ERROR non-exhaustive + b"AAAA" => {} + } +} + diff --git a/src/test/compile-fail/match-byte-array-patterns.rs b/src/test/compile-fail/match-byte-array-patterns.rs index 86323656b873e..1ff07eae1c9c0 100644 --- a/src/test/compile-fail/match-byte-array-patterns.rs +++ b/src/test/compile-fail/match-byte-array-patterns.rs @@ -9,6 +9,7 @@ // except according to those terms. #![feature(advanced_slice_patterns, slice_patterns)] +#![deny(unreachable_patterns)] fn main() { let buf = &[0, 1, 2, 3]; @@ -37,10 +38,6 @@ fn main() { _ => {} } - match buf { //~ ERROR non-exhaustive - b"AAAA" => {} - } - let buf: &[u8] = buf; match buf { @@ -66,8 +63,4 @@ fn main() { b"AAAA" => {}, //~ ERROR unreachable pattern _ => {} } - - match buf { //~ ERROR non-exhaustive - b"AAAA" => {} - } } diff --git a/src/test/compile-fail/match-range-fail-dominate.rs b/src/test/compile-fail/match-range-fail-dominate.rs index 825a485d52956..256aa180f4a59 100644 --- a/src/test/compile-fail/match-range-fail-dominate.rs +++ b/src/test/compile-fail/match-range-fail-dominate.rs @@ -14,6 +14,8 @@ //error-pattern: unreachable //error-pattern: unreachable +#![deny(unreachable_patterns)] + fn main() { match 5 { 1 ... 10 => { } diff --git a/src/test/compile-fail/match-ref-ice.rs b/src/test/compile-fail/match-ref-ice.rs index 042ec95f7e753..1cdbba17f658a 100644 --- a/src/test/compile-fail/match-ref-ice.rs +++ b/src/test/compile-fail/match-ref-ice.rs @@ -9,6 +9,7 @@ // except according to those terms. #![feature(slice_patterns)] +#![deny(unreachable_patterns)] // The arity of `ref x` is always 1. If the pattern is compared to some non-structural type whose // arity is always 0, an ICE occurs. @@ -19,7 +20,7 @@ fn main() { let homura = [1, 2, 3]; match homura { - [1, ref madoka, 3] => (), + [1, ref _madoka, 3] => (), [1, 2, 3] => (), //~ ERROR unreachable pattern [_, _, _] => (), } diff --git a/src/test/compile-fail/match-slice-patterns.rs b/src/test/compile-fail/match-slice-patterns.rs index c0fc75f9713a8..fd4bd1c7b944b 100644 --- a/src/test/compile-fail/match-slice-patterns.rs +++ b/src/test/compile-fail/match-slice-patterns.rs @@ -12,7 +12,7 @@ fn check(list: &[Option<()>]) { match list { - //~^ ERROR `&[None, Some(_), None, _]` and `&[Some(_), Some(_), None, _]` not covered + //~^ ERROR `&[_, Some(_), None, _]` not covered &[] => {}, &[_] => {}, &[_, _] => {}, diff --git a/src/test/compile-fail/match-vec-fixed.rs b/src/test/compile-fail/match-vec-fixed.rs index 60d0c24bb3d36..dd9379c756d12 100644 --- a/src/test/compile-fail/match-vec-fixed.rs +++ b/src/test/compile-fail/match-vec-fixed.rs @@ -9,6 +9,7 @@ // except according to those terms. #![feature(slice_patterns)] +#![deny(unreachable_patterns)] fn a() { let v = [1, 2, 3]; diff --git a/src/test/compile-fail/match-vec-unreachable.rs b/src/test/compile-fail/match-vec-unreachable.rs index 4d9b3aea1124b..472b054b08777 100644 --- a/src/test/compile-fail/match-vec-unreachable.rs +++ b/src/test/compile-fail/match-vec-unreachable.rs @@ -9,6 +9,8 @@ // except according to those terms. #![feature(slice_patterns)] +#![deny(unreachable_patterns)] +#![allow(unused_variables)] fn main() { let x: Vec<(isize, isize)> = Vec::new(); @@ -24,7 +26,7 @@ fn main() { "baz".to_string()]; let x: &[String] = &x; match *x { - [a, _, _, ..] => { println!("{}", a); } + [ref a, _, _, ..] => { println!("{}", a); } [_, _, _, _, _] => { } //~ ERROR unreachable pattern _ => { } } diff --git a/src/test/compile-fail/struct-pattern-match-useless.rs b/src/test/compile-fail/struct-pattern-match-useless.rs index 9f7ebc261ad2e..dda30141b4a06 100644 --- a/src/test/compile-fail/struct-pattern-match-useless.rs +++ b/src/test/compile-fail/struct-pattern-match-useless.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![deny(unreachable_patterns)] + struct Foo { x: isize, y: isize, @@ -16,7 +18,7 @@ struct Foo { pub fn main() { let a = Foo { x: 1, y: 2 }; match a { - Foo { x: x, y: y } => (), + Foo { x: _x, y: _y } => (), Foo { .. } => () //~ ERROR unreachable pattern } diff --git a/src/test/compile-fail/uninhabited-irrefutable.rs b/src/test/compile-fail/uninhabited-irrefutable.rs new file mode 100644 index 0000000000000..4755fdd4fd5e8 --- /dev/null +++ b/src/test/compile-fail/uninhabited-irrefutable.rs @@ -0,0 +1,38 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(never_type)] + +mod foo { + pub struct SecretlyEmpty { + _priv: !, + } + + pub struct NotSoSecretlyEmpty { + pub _pub: !, + } +} + +struct NotSoSecretlyEmpty { + _priv: !, +} + +enum Foo { + A(foo::SecretlyEmpty), + B(foo::NotSoSecretlyEmpty), + C(NotSoSecretlyEmpty), + D(u32), +} + +fn main() { + let x: Foo = Foo::D(123); + let Foo::D(_y) = x; //~ ERROR refutable pattern in local binding: `A(_)` not covered +} + diff --git a/src/test/compile-fail/uninhabited-patterns.rs b/src/test/compile-fail/uninhabited-patterns.rs new file mode 100644 index 0000000000000..0de29f3a8d737 --- /dev/null +++ b/src/test/compile-fail/uninhabited-patterns.rs @@ -0,0 +1,49 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(box_patterns)] +#![feature(slice_patterns)] +#![feature(box_syntax)] +#![feature(never_type)] +#![deny(unreachable_patterns)] + +mod foo { + pub struct SecretlyEmpty { + _priv: !, + } +} + +struct NotSoSecretlyEmpty { + _priv: !, +} + +fn main() { + let x: &[!] = &[]; + + match x { + &[] => (), + &[..] => (), //~ ERROR unreachable pattern + }; + + let x: Result, &[Result]> = Err(&[]); + match x { + Ok(box _) => (), //~ ERROR unreachable pattern + Err(&[]) => (), + Err(&[..]) => (), //~ ERROR unreachable pattern + } + + let x: Result> = Err(Err(123)); + match x { + Ok(_y) => (), + Err(Err(_y)) => (), + Err(Ok(_y)) => (), //~ ERROR unreachable pattern + } +} + diff --git a/src/test/compile-fail/unreachable-arm.rs b/src/test/compile-fail/unreachable-arm.rs index bc93b86a39119..df827d2c78421 100644 --- a/src/test/compile-fail/unreachable-arm.rs +++ b/src/test/compile-fail/unreachable-arm.rs @@ -8,11 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// error-pattern:unreachable pattern - #![feature(box_patterns)] #![feature(box_syntax)] +#![allow(dead_code)] +#![deny(unreachable_patterns)] + +enum Foo { A(Box, isize), B(usize), } -enum foo { a(Box, isize), b(usize), } +fn main() { + match Foo::B(1) { + Foo::B(_) | Foo::A(box _, 1) => { } + Foo::A(_, 1) => { } //~ ERROR unreachable pattern + _ => { } + } +} -fn main() { match foo::b(1) { foo::b(_) | foo::a(box _, 1) => { } foo::a(_, 1) => { } } } diff --git a/src/test/run-pass/empty-types-in-patterns.rs b/src/test/run-pass/empty-types-in-patterns.rs new file mode 100644 index 0000000000000..23705d36e3de2 --- /dev/null +++ b/src/test/run-pass/empty-types-in-patterns.rs @@ -0,0 +1,60 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(never_type)] +#![feature(slice_patterns)] +#![allow(unreachable_patterns)] +#![allow(unreachable_code)] + +#[allow(dead_code)] +fn foo(z: !) { + let x: Result = Ok(z); + + let Ok(_y) = x; + let Err(_y) = x; + + let x = [z; 1]; + + match x {}; + match x { + [q] => q, + }; +} + +fn bar(nevers: &[!]) { + match nevers { + &[] => (), + }; + + match nevers { + &[] => (), + &[_] => (), + &[_, _, _, ..] => (), + }; +} + +fn main() { + let x: Result = Ok(123); + let Ok(y) = x; + + assert_eq!(123, y); + + match x { + Ok(y) => y, + }; + + match x { + Ok(y) => y, + Err(e) => match e {}, + }; + + bar(&[]); +} + diff --git a/src/test/run-pass/i128-ffi.rs b/src/test/run-pass/i128-ffi.rs new file mode 100644 index 0000000000000..3b5f4884d21e7 --- /dev/null +++ b/src/test/run-pass/i128-ffi.rs @@ -0,0 +1,45 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-stage0 +// ignore-stage1 + +// MSVC doesn't support 128 bit integers, and other Windows +// C compilers have very inconsistent views on how the ABI +// should look like. + +// ignore-windows + +// Ignore 32 bit targets: +// ignore-x86, ignore-arm + +#![feature(i128_type)] + +#[link(name = "rust_test_helpers", kind = "static")] +extern "C" { + fn identity(f: u128) -> u128; + fn square(f: i128) -> i128; + fn sub(f: i128, f: i128) -> i128; +} + +fn main() { + unsafe { + let a = 0x734C_C2F2_A521; + let b = 0x33EE_0E2A_54E2_59DA_A0E7_8E41; + let b_out = identity(b); + assert_eq!(b, b_out); + let a_square = square(a); + assert_eq!(b, a_square as u128); + let k = 0x1234_5678_9ABC_DEFF_EDCB_A987_6543_210; + let k_d = 0x2468_ACF1_3579_BDFF_DB97_530E_CA86_420; + let k_out = sub(k_d, k); + assert_eq!(k, k_out); + } +} diff --git a/src/test/run-pass/i128.rs b/src/test/run-pass/i128.rs index b7aeb21229ccb..a4ff36d20e4fa 100644 --- a/src/test/run-pass/i128.rs +++ b/src/test/run-pass/i128.rs @@ -96,4 +96,12 @@ fn main() { assert_eq!((-z).checked_mul(-z), Some(0x734C_C2F2_A521)); assert_eq!((z).checked_mul(z), Some(0x734C_C2F2_A521)); assert_eq!((k).checked_mul(k), None); + let l: i128 = b(i128::min_value()); + let o: i128 = b(17); + assert_eq!(l.checked_sub(b(2)), None); + assert_eq!(l.checked_add(l), None); + assert_eq!((-(l + 1)).checked_add(2), None); + assert_eq!(l.checked_sub(l), Some(0)); + assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127)); + assert_eq!(o.checked_shl(b(128)), None); } diff --git a/src/test/compile-fail/E0205.rs b/src/test/run-pass/issue-33187.rs similarity index 54% rename from src/test/compile-fail/E0205.rs rename to src/test/run-pass/issue-33187.rs index c73e753430105..477112ab3c5ab 100644 --- a/src/test/compile-fail/E0205.rs +++ b/src/test/run-pass/issue-33187.rs @@ -8,22 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -enum Foo { - Bar(Vec), - Baz, +struct Foo(::Data); + +impl Copy for Foo where ::Data: Copy { } +impl Clone for Foo where ::Data: Clone { + fn clone(&self) -> Self { Foo(self.0.clone()) } } -impl Copy for Foo { } -//~^ ERROR the trait `Copy` may not be implemented for this type -//~| NOTE variant `Bar` does not implement `Copy` +trait Repr { + type Data; +} -#[derive(Copy)] -//~^ ERROR the trait `Copy` may not be implemented for this type -//~| NOTE variant `Bar` does not implement `Copy` -//~| NOTE in this expansion of #[derive(Copy)] -enum Foo2<'a> { - Bar(&'a mut bool), - Baz, +impl Repr for A { + type Data = u32; } fn main() { diff --git a/src/test/run-pass/u128.rs b/src/test/run-pass/u128.rs index d138e09318bfe..53d726f1f663b 100644 --- a/src/test/run-pass/u128.rs +++ b/src/test/run-pass/u128.rs @@ -10,7 +10,10 @@ // ignore-stage0 // ignore-stage1 -#![feature(i128_type)] +#![feature(i128_type, test)] + +extern crate test; +use test::black_box as b; fn main() { let x: u128 = 0xFFFF_FFFF_FFFF_FFFF__FFFF_FFFF_FFFF_FFFF; @@ -63,5 +66,15 @@ fn main() { format!("{}", u128::max_value())); assert_eq!("147573952589676412928", format!("{:?}", j)); // common traits - x.clone(); + assert_eq!(x, b(x.clone())); + // overflow checks + assert_eq!((z).checked_mul(z), Some(0x734C_C2F2_A521)); + assert_eq!((k).checked_mul(k), None); + let l: u128 = b(u128::max_value() - 10); + let o: u128 = b(17); + assert_eq!(l.checked_add(b(11)), None); + assert_eq!(l.checked_sub(l), Some(0)); + assert_eq!(o.checked_sub(b(18)), None); + assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127)); + assert_eq!(o.checked_shl(b(128)), None); } diff --git a/src/test/ui/check_match/issue-35609.stderr b/src/test/ui/check_match/issue-35609.stderr index 66069c7a86a34..0aafe3f17b3d0 100644 --- a/src/test/ui/check_match/issue-35609.stderr +++ b/src/test/ui/check_match/issue-35609.stderr @@ -4,11 +4,11 @@ error[E0004]: non-exhaustive patterns: `(B, _)`, `(C, _)`, `(D, _)` and 2 more n 20 | match (A, ()) { | ^^^^^^^ patterns `(B, _)`, `(C, _)`, `(D, _)` and 2 more not covered -error[E0004]: non-exhaustive patterns: `(A, B)`, `(B, B)`, `(C, B)` and 27 more not covered +error[E0004]: non-exhaustive patterns: `(_, B)`, `(_, C)`, `(_, D)` and 2 more not covered --> $DIR/issue-35609.rs:24:11 | 24 | match (A, A) { - | ^^^^^^ patterns `(A, B)`, `(B, B)`, `(C, B)` and 27 more not covered + | ^^^^^^ patterns `(_, B)`, `(_, C)`, `(_, D)` and 2 more not covered error[E0004]: non-exhaustive patterns: `((B, _), _)`, `((C, _), _)`, `((D, _), _)` and 2 more not covered --> $DIR/issue-35609.rs:28:11 diff --git a/src/test/compile-fail/E0204.rs b/src/test/ui/span/E0204.rs similarity index 77% rename from src/test/compile-fail/E0204.rs rename to src/test/ui/span/E0204.rs index 0f108a17c95db..9fb37607c7da7 100644 --- a/src/test/compile-fail/E0204.rs +++ b/src/test/ui/span/E0204.rs @@ -13,16 +13,24 @@ struct Foo { } impl Copy for Foo { } -//~^ ERROR E0204 -//~| NOTE field `foo` does not implement `Copy` #[derive(Copy)] -//~^ ERROR E0204 -//~| NOTE field `ty` does not implement `Copy` -//~| NOTE in this expansion of #[derive(Copy)] struct Foo2<'a> { ty: &'a mut bool, } +enum EFoo { + Bar { x: Vec }, + Baz, +} + +impl Copy for EFoo { } + +#[derive(Copy)] +enum EFoo2<'a> { + Bar(&'a mut bool), + Baz, +} + fn main() { } diff --git a/src/test/ui/span/E0204.stderr b/src/test/ui/span/E0204.stderr new file mode 100644 index 0000000000000..81a60a9dd3099 --- /dev/null +++ b/src/test/ui/span/E0204.stderr @@ -0,0 +1,38 @@ +error[E0204]: the trait `Copy` may not be implemented for this type + --> $DIR/E0204.rs:15:6 + | +12 | foo: Vec, + | ------------- this field does not implement `Copy` +... +15 | impl Copy for Foo { } + | ^^^^ + +error[E0204]: the trait `Copy` may not be implemented for this type + --> $DIR/E0204.rs:27:6 + | +23 | Bar { x: Vec }, + | ----------- this field does not implement `Copy` +... +27 | impl Copy for EFoo { } + | ^^^^ + +error[E0204]: the trait `Copy` may not be implemented for this type + --> $DIR/E0204.rs:17:10 + | +17 | #[derive(Copy)] + | ^^^^ +18 | struct Foo2<'a> { +19 | ty: &'a mut bool, + | ---------------- this field does not implement `Copy` + +error[E0204]: the trait `Copy` may not be implemented for this type + --> $DIR/E0204.rs:29:10 + | +29 | #[derive(Copy)] + | ^^^^ +30 | enum EFoo2<'a> { +31 | Bar(&'a mut bool), + | ------------- this field does not implement `Copy` + +error: aborting due to 4 previous errors +