diff --git a/.dir-locals.el b/.dir-locals.el new file mode 100644 index 00000000000..125e009be38 --- /dev/null +++ b/.dir-locals.el @@ -0,0 +1 @@ +((rust-mode (rust-format-on-save . t))) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000000..d54cae9c214 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,119 @@ +name: CI +on: + merge_group: + pull_request: + +jobs: + test: + name: Test + runs-on: ubuntu-latest + continue-on-error: ${{ matrix.rust == 'nightly' }} + strategy: + matrix: + rust: [stable, nightly] + steps: + - name: Checkout the source code + uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.rust }} + profile: minimal + override: true + + - name: Build chalk-engine without default features + run: cd chalk-engine && cargo build --no-default-features + + - name: Build chalk-engine with all features + run: cd chalk-engine && cargo build --all-features + + - name: Execute tests for all crates in the workspace + run: cargo test --all + + - name: Install mdbook + run: | + cd book + curl -L https://github.com/rust-lang/mdBook/releases/download/v0.4.5/mdbook-v0.4.5-x86_64-unknown-linux-gnu.tar.gz | tar xz + # Add the book directory to the $PATH + echo "$GITHUB_WORKSPACE/book" >> $GITHUB_PATH + + - name: Install mdbook-mermaid + run: cargo install mdbook-mermaid + + - name: Execute tests for Chalk book + run: cd book && ./mdbook test + + - name: Build Chalk book + run: cd book && ./mdbook build + + - name: Build documentation for all crates in the workspace + run: cargo doc --all --document-private-items --no-deps + + - name: Upload documentation to GitHub Pages + run: | + touch target/doc/.nojekyll + curl -LsSf https://raw.githubusercontent.com/rust-lang/simpleinfra/master/setup-deploy-keys/src/deploy.rs | rustc - -o /tmp/deploy + cp -r book/book/html target/doc/book + (cd target/doc && /tmp/deploy) + env: + GITHUB_DEPLOY_KEY: ${{ secrets.GITHUB_DEPLOY_KEY }} + if: matrix.rust == 'stable' && github.ref == 'refs/heads/master' + + fmt: + name: Format + runs-on: ubuntu-latest + steps: + - name: Checkout the source code + uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + components: rustfmt + + - name: Check formatting of all crates in the workspace + run: cargo fmt --all -- --check + + mdbook-linkcheck: + name: Book link check + runs-on: ubuntu-latest + if: github.ref != 'refs/heads/master' + steps: + - name: Checkout the source code + uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Install mdbook + run: | + cd book + curl -L https://github.com/rust-lang/mdBook/releases/download/v0.4.5/mdbook-v0.4.5-x86_64-unknown-linux-gnu.tar.gz | tar xz + # Add the book directory to the $PATH + echo "$GITHUB_WORKSPACE/book" >> $GITHUB_PATH + + - name: Install mdbook-linkcheck + run: cd book && curl -L https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v0.7.0/mdbook-linkcheck-v0.7.0-x86_64-unknown-linux-gnu.tar.gz | tar xz + + - name: Build Chalk book + run: cd book && ./mdbook build + + conclusion: + needs: [test, fmt] + # !cancelled() executes the job regardless of whether the previous jobs passed, failed or get skipped. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + steps: + - name: Conclusion + run: | + # Print the dependent jobs to see them in the CI log + jq -C <<< '${{ toJson(needs) }}' + # Check if all jobs that we depend on (in the needs array) were successful. + jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000000..f4d1e124c04 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,70 @@ +name: Publish +on: + workflow_dispatch: # We can add version input when 1.0 is released and scheduled releases are removed + schedule: + - cron: "0 0 * * 0" # midnight UTC on Sunday + +jobs: + publish: + name: Publish + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + ssh-key: ${{ secrets.PUBLISH_DEPLOY_KEY }} + fetch-depth: 0 + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - name: Install cargo-workspaces + uses: actions-rs/install@v0.1 + with: + crate: cargo-workspaces + version: 0.2.44 + + - name: Install Node (for changelog generation) + uses: actions/setup-node@v2 + with: + node-version: 16 + + - name: Release + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} + shell: bash + run: | + # Check if we can skip releasing a new version + # (there are no changes and the job was not manually triggered) + export CHANGED=$(cargo workspaces changed --include-merged-tags --ignore-changes "**/Cargo.toml") + if [[ -z "$CHANGED" && "$GITHUB_EVENT_NAME" != "workflow_dispatch" ]]; then + # Nothing has changed, so don't publish a new version + echo "No changes detected, skipping publish." + exit 0 + fi + + # Update version + git config --global user.email "runner@gha.local" + git config --global user.name "Github Action" + cargo workspaces -v version -ay --force '*' --include-merged-tags --no-git-commit --exact patch + export VERSION=$(cargo pkgid | sed -E 's/.*#(.*)/\1/g') + + # Update changelog + npm install -g auto-changelog@2.2.1 + auto-changelog --output RELEASES.md \ + --starting-version v0.11.0 \ + --latest-version "$VERSION" \ + --merge-pattern 'Auto merge of #(\d+) - .+\n\n(.+)' \ + --template releases-template.hbs + + # Commit and publish + git commit -am "Release $VERSION" + git tag "v$VERSION" + cargo workspaces -v publish --from-git --skip-published + git push --tags + cargo workspaces -v version -ay --force '*' --include-merged-tags --no-git-tag --pre-id dev preminor diff --git a/.gitignore b/.gitignore index f553b294e5d..bf77720790a 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,12 @@ TAGS *~ target chalk-parse/src/parser.rs + +## IDE files +/.idea/ +/.vscode/ + +## Files used in changelog generation +package.json +package-lock.json +node_modules diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 2fc4e348713..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: rust -rust: - - beta - - nightly -script: - - cd chalk-engine && RUSTC_BOOTSTRAP=1 cargo build --no-default-features && cd .. - - cd chalk-engine && RUSTC_BOOTSTRAP=1 cargo build --all-features && cd .. - - RUSTC_BOOTSTRAP=1 cargo test --all - - RUSTC_BOOTSTRAP=1 cargo doc --all --document-private-items -deploy: - - provider: script - script: mkdir -p target/gh-pages && mv target/doc target/gh-pages/ - skip-cleanup: true - on: - branch: master - - provider: pages - local-dir: ./target/gh-pages - skip-cleanup: true - github-token: $GITHUB_TOKEN - on: - branch: master diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 65a39c4e9f8..032f4145bbb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,35 +1,24 @@ -*Note that this `Contribution.md` document is more or less a copy of the file -with the same name of the [Rust compiler](https://github.com/rust-lang/rust) -project.* - # Contributing to chalk Thank you for your interest in contributing to chalk! There are many ways to contribute, and we appreciate all of them. -* [Feature Requests](#feature-requests) * [Bug Reports](#bug-reports) +* [Running and Debugging](#running-and-debugging) * [Pull Requests](#pull-requests) * [Writing Documentation](#writing-documentation) -* [Issue Triage](#issue-triage) * [Helpful Links and Information](#helpful-links-and-information) -If you have questions, please join [our gitter channel](https://gitter.im/chalk-rs/Lobby). +If you'd like to contribute, consider joining the [Traits Working Group][traits-working-group]. +We hang out on the [rust-lang zulip][rust-lang-zulip] in the [#wg-traits][wg-traits-stream] stream. As a reminder, all contributors are expected to follow our [Code of Conduct][coc]. -[pound-rust-internals]: https://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals -[internals]: https://internals.rust-lang.org +[traits-working-group]: https://rust-lang.github.io/compiler-team/working-groups/traits/ +[rust-lang-zulip]:https://rust-lang.zulipchat.com +[wg-traits-stream]: https://rust-lang.zulipchat.com/#narrow/stream/144729-wg-traits [coc]: https://www.rust-lang.org/conduct.html -## Feature Requests -[feature-requests]: #feature-requests - -To request a change to the way that the Rust language works, please open an -issue in the [RFCs repository](https://github.com/rust-lang/rfcs/issues/new) -rather than this one. New features and other significant language changes -must go through the RFC process. - ## Bug Reports [bug-reports]: #bug-reports @@ -50,25 +39,32 @@ other than `0`. The easiest way to do this is to invoke `chalk` like this: $ RUST_BACKTRACE=1 chalk ... ``` -### Building -[building]: #building +## Running and Debugging +[running-and-debugging]: #running-and-debugging +There is a repl mainly for debugging purposes which can be run by `cargo run`. Some basic examples are in [libstd.chalk](libstd.chalk): +```bash +$ cargo run +?- load libstd.chalk +?- Vec>: Clone +Unique; substitution [], lifetime constraints [] +``` -Chalk has to be build with the nightly version of the rust compiler. +More logging can be enabled by setting the `CHALK_DEBUG` environment variable. Set `CHALK_DEBUG=1` to see `info!(...)` output, and `CHALK_DEBUG=2` to see `debug!(...)` output as well. ## Pull Requests [pull-requests]: #pull-requests Pull requests are the primary mechanism we use to change Rust. GitHub itself -has some [great documentation][pull-requests] on using the Pull Request feature. +has some [great documentation][pull-request-documentation] on using the Pull Request feature. We use the "fork and pull" model [described here][development-models], where contributors push changes to their personal fork and create pull requests to bring those changes into the source repository. -[pull-requests]: https://help.github.com/articles/about-pull-requests/ -[development-models]: https://help.github.com/articles/about-collaborative-development-models/ - Please make pull requests against the `master` branch. +[pull-request-documentation]: https://help.github.com/articles/about-pull-requests/ +[development-models]: https://help.github.com/articles/about-collaborative-development-models/ + ## Writing Documentation [writing-documentation]: #writing-documentation @@ -79,16 +75,21 @@ You can find documentation style guidelines in [RFC 1574][rfc1574]. [rfc1574]: https://github.com/rust-lang/rfcs/blob/master/text/1574-more-api-documentation-conventions.md#appendix-a-full-conventions-text -# Helpful Links and Information +## Helpful Links and Information [Helpful Links and Information]: #helpful-links-and-information -## Blog posts +### Blog posts There are several [blog posts][blog-posts] which describe the ideas and machinery inside of chalk. [blog-posts]: README.md#blog-posts -## Glossary +### Glossary In addition to the blog posts there is a [glossary](GLOSSARY.md) explaining some of the terminology used in chalk. + +### Trait solving in rustc-dev-guide +The rustc-dev-guide describes [new-style trait solving][trait-solving], which is slowly replacing the old trait resolution. + +[trait-solving]: https://rustc-dev-guide.rust-lang.org/traits/chalk.html diff --git a/Cargo.lock b/Cargo.lock index 64b5bc13c3f..8d0fbd74c03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,870 +1,1170 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + [[package]] name = "aho-corasick" -version = "0.6.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] -[[package]] -name = "arrayref" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "ascii-canvas" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "atty" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "backtrace" -version = "0.3.9" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" dependencies = [ - "backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "term", ] [[package]] -name = "backtrace-sys" -version = "0.1.24" +name = "autocfg" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bit-set" -version = "0.5.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ - "bit-vec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bit-vec", ] [[package]] name = "bit-vec" -version = "0.5.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitflags" -version = "0.4.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "block-buffer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "byte-tools" -version = "0.2.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] -name = "byteorder" -version = "1.2.6" +name = "cfg-if" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "cc" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" +name = "chalk" +version = "0.104.0-dev.0" +dependencies = [ + "chalk-derive", + "chalk-engine", + "chalk-integration", + "chalk-ir", + "chalk-parse", + "chalk-recursive", + "chalk-solve", + "diff", + "docopt", + "expect-test", + "itertools 0.12.0", + "pretty_assertions", + "regex", + "rustyline", + "salsa", + "serde", + "serde_derive", +] [[package]] -name = "cfg-if" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" +name = "chalk-derive" +version = "0.104.0-dev.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", + "synstructure", +] [[package]] -name = "chalk" -version = "0.1.0" +name = "chalk-engine" +version = "0.104.0-dev.0" dependencies = [ - "chalk-engine 0.9.0", - "chalk-ir 0.1.0", - "chalk-macros 0.1.1", - "chalk-parse 0.1.0", - "chalk-solve 0.1.0", - "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", - "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "rustyline 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "stacker 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-derive", + "chalk-integration", + "chalk-ir", + "chalk-solve", + "rustc-hash", + "tracing", ] [[package]] -name = "chalk-engine" -version = "0.9.0" +name = "chalk-integration" +version = "0.104.0-dev.0" dependencies = [ - "chalk-macros 0.1.1", - "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "stacker 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-derive", + "chalk-engine", + "chalk-ir", + "chalk-parse", + "chalk-recursive", + "chalk-solve", + "indexmap 2.1.0", + "salsa", + "string_cache", + "tracing", ] [[package]] name = "chalk-ir" -version = "0.1.0" +version = "0.104.0-dev.0" dependencies = [ - "chalk-engine 0.9.0", - "chalk-macros 0.1.1", - "lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.4.1", + "chalk-derive", ] [[package]] -name = "chalk-macros" -version = "0.1.1" +name = "chalk-parse" +version = "0.104.0-dev.0" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lalrpop", + "lalrpop-util", + "regex", + "string_cache", ] [[package]] -name = "chalk-parse" -version = "0.1.0" +name = "chalk-recursive" +version = "0.104.0-dev.0" dependencies = [ - "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-util 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-derive", + "chalk-integration", + "chalk-ir", + "chalk-solve", + "rustc-hash", + "tracing", ] [[package]] name = "chalk-solve" -version = "0.1.0" +version = "0.104.0-dev.0" +dependencies = [ + "chalk-derive", + "chalk-integration", + "chalk-ir", + "ena", + "indexmap 2.1.0", + "itertools 0.12.0", + "petgraph", + "rustc-hash", + "tracing", + "tracing-subscriber", + "tracing-tree", +] + +[[package]] +name = "clipboard-win" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7191c27c2357d9b7ef96baac1773290d4ca63b24205b82a3fd8a0637afcf0362" dependencies = [ - "chalk-engine 0.9.0", - "chalk-ir 0.1.0", - "chalk-macros 0.1.1", - "chalk-parse 0.1.0", - "ena 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "error-code", + "str-buf", + "winapi", ] [[package]] -name = "cloudabi" -version = "0.0.3" +name = "crossbeam-utils" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "diff" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] -name = "digest" -version = "0.7.6" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "dirs-sys-next", ] [[package]] -name = "docopt" -version = "0.8.3" +name = "dirs-sys-next" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_users", + "winapi", ] +[[package]] +name = "dissimilar" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" + [[package]] name = "docopt" -version = "1.0.1" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static", + "regex", + "serde", + "strsim", ] [[package]] name = "either" -version = "1.5.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "ena" -version = "0.9.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" dependencies = [ - "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log", ] [[package]] -name = "ena" -version = "0.10.1" +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8" dependencies = [ - "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "windows-sys", ] [[package]] -name = "encode_unicode" -version = "0.1.3" +name = "error-code" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f18991e7bf11e7ffee451b5318b5c1a73c52d0d0ada6e5a3017c8c1ced6a21" +dependencies = [ + "libc", + "str-buf", +] [[package]] -name = "error-chain" -version = "0.12.0" +name = "expect-test" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d9eafeadd538e68fb28016364c9732d78e420b9ff8853fa5e4058861e9f8d3" dependencies = [ - "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "dissimilar", + "once_cell", ] [[package]] -name = "fake-simd" -version = "0.1.2" +name = "fixedbitset" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] -name = "fixedbitset" -version = "0.1.9" +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" [[package]] -name = "fuchsia-zircon" +name = "heck" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation", ] [[package]] -name = "fuchsia-zircon-sys" +name = "hermit-abi" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] -name = "gcc" -version = "0.3.55" +name = "indexmap" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] [[package]] -name = "generic-array" -version = "0.9.0" +name = "indexmap" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ - "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equivalent", + "hashbrown 0.14.2", ] [[package]] -name = "itertools" -version = "0.7.8" +name = "instant" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] -name = "kernel32-sys" -version = "0.2.2" +name = "is-terminal" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "rustix", + "windows-sys", ] [[package]] -name = "lalrpop" -version = "0.16.0" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ - "ascii-canvas 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-snap 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-util 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "either", ] [[package]] -name = "lalrpop-intern" -version = "0.15.1" +name = "itertools" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] [[package]] -name = "lalrpop-snap" -version = "0.16.0" +name = "lalrpop" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da4081d44f4611b66c6dd725e6de3169f9f63905421e8626fcb86b6a898998b8" dependencies = [ - "ascii-canvas 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-util 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ascii-canvas", + "bit-set", + "diff", + "ena", + "is-terminal", + "itertools 0.10.5", + "lalrpop-util", + "petgraph", + "pico-args", + "regex", + "regex-syntax 0.7.5", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", ] [[package]] name = "lalrpop-util" -version = "0.16.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f35c735096c0293d313e8f2a641627472b83d01b937177fe76e5e2708d31e0d" +dependencies = [ + "regex", +] [[package]] name = "lazy_static" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.43" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] -name = "log" -version = "0.4.5" +name = "libredox" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.4.1", + "libc", + "redox_syscall 0.4.1", ] [[package]] -name = "memchr" -version = "2.1.0" +name = "linux-raw-sys" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" + +[[package]] +name = "lock_api" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "scopeguard", ] [[package]] -name = "new_debug_unreachable" -version = "1.0.1" +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "matchers" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-automata 0.1.10", ] +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "new_debug_unreachable" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" + [[package]] name = "nix" -version = "0.5.1" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.3.2", + "cfg-if", + "libc", ] [[package]] -name = "ordermap" -version = "0.3.5" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] [[package]] -name = "petgraph" -version = "0.4.13" +name = "nu-ansi-term" +version = "0.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c073d3c1930d0751774acf49e66653acecb416c3a54c6ec095a9b11caddb5a68" dependencies = [ - "fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "windows-sys", ] [[package]] -name = "phf_generator" -version = "0.7.23" +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ - "phf_shared 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "instant", + "lock_api", + "parking_lot_core 0.8.6", ] [[package]] -name = "phf_shared" -version = "0.7.23" +name = "parking_lot" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "lock_api", + "parking_lot_core 0.9.9", ] [[package]] -name = "precomputed-hash" -version = "0.1.1" +name = "parking_lot_core" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", +] [[package]] -name = "proc-macro2" -version = "0.3.8" +name = "parking_lot_core" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "redox_syscall 0.4.1", + "smallvec", + "windows-targets", ] [[package]] -name = "proc-macro2" -version = "0.4.20" +name = "petgraph" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fixedbitset", + "indexmap 2.1.0", ] [[package]] -name = "quote" -version = "0.5.2" +name = "phf_shared" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "siphasher", ] [[package]] -name = "quote" -version = "0.6.8" +name = "pico-args" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + +[[package]] +name = "pretty_assertions" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ - "proc-macro2 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)", + "diff", + "yansi", ] [[package]] -name = "rand" -version = "0.5.5" +name = "proc-macro2" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-ident", ] [[package]] -name = "rand_core" -version = "0.2.2" +name = "quote" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ - "rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] [[package]] -name = "rand_core" -version = "0.3.0" +name = "redox_syscall" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] [[package]] name = "redox_syscall" -version = "0.1.40" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] [[package]] -name = "redox_termios" -version = "0.1.1" +name = "redox_users" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libredox", + "thiserror", ] [[package]] name = "regex" -version = "0.2.11" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ - "aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] -name = "regex" -version = "1.0.5" +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ - "aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-syntax 0.8.2", ] [[package]] name = "regex-syntax" -version = "0.4.2" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.5.6" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" -version = "0.6.2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "0.38.24" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad981d6c340a49cdc40a1028d9c6084ec7e9fa33fcb839cab656a267071e234" dependencies = [ - "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", ] [[package]] -name = "rustc-demangle" -version = "0.1.9" +name = "rustversion" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] -name = "rustc-hash" -version = "1.0.1" +name = "rustyline" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "994eca4bca05c87e86e15d90fc7a91d1be64b4482b38cb2d27474568fe7c9db9" dependencies = [ - "byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.4.1", + "cfg-if", + "clipboard-win", + "libc", + "log", + "memchr", + "nix", + "scopeguard", + "unicode-segmentation", + "unicode-width", + "utf8parse", + "winapi", ] [[package]] -name = "rustyline" -version = "1.0.0" +name = "salsa" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b84d9f96071f3f3be0dc818eae3327625d8ebc95b58da37d6850724f31d3403" +dependencies = [ + "crossbeam-utils", + "indexmap 1.9.3", + "lock_api", + "log", + "oorandom", + "parking_lot 0.11.2", + "rustc-hash", + "salsa-macros", + "smallvec", +] + +[[package]] +name = "salsa-macros" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3904a4ba0a9d0211816177fd34b04c7095443f8cdacd11175064fe541c8fe2" dependencies = [ - "encode_unicode 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "heck", + "proc-macro2", + "quote", + "syn 1.0.109", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "serde" -version = "1.0.80" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +dependencies = [ + "serde_derive", +] [[package]] name = "serde_derive" -version = "1.0.80" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ - "proc-macro2 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn 2.0.39", ] [[package]] -name = "sha2" -version = "0.7.1" +name = "sharded-slab" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ - "block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static", ] [[package]] name = "siphasher" -version = "0.2.3" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] -name = "stacker" -version = "0.1.3" +name = "smallvec" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" + +[[package]] +name = "str-buf" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" [[package]] name = "string_cache" -version = "0.7.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)", - "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "new_debug_unreachable", + "once_cell", + "parking_lot 0.12.1", + "phf_shared", + "precomputed-hash", + "serde", ] [[package]] -name = "string_cache_codegen" -version = "0.4.1" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "phf_generator 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "string_cache_shared" -version = "0.3.0" +name = "syn" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] [[package]] -name = "strsim" -version = "0.6.0" +name = "synstructure" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", + "unicode-xid", +] [[package]] -name = "strsim" +name = "term" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] [[package]] -name = "syn" -version = "0.15.11" +name = "thiserror" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ - "proc-macro2 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror-impl", ] [[package]] -name = "term" -version = "0.4.6" +name = "thiserror-impl" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn 2.0.39", ] [[package]] -name = "termion" -version = "1.5.1" +name = "thread_local" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "once_cell", ] [[package]] -name = "thread_local" -version = "0.3.6" +name = "tiny-keccak" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crunchy", ] [[package]] -name = "typenum" -version = "1.10.0" +name = "tracing" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] [[package]] -name = "ucd-util" -version = "0.1.1" +name = "tracing-attributes" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] [[package]] -name = "unicode-width" -version = "0.1.5" +name = "tracing-core" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] [[package]] -name = "unicode-xid" -version = "0.1.0" +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] [[package]] -name = "unreachable" -version = "1.0.0" +name = "tracing-subscriber" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "matchers", + "nu-ansi-term 0.46.0", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", ] [[package]] -name = "utf8-ranges" -version = "1.0.1" +name = "tracing-tree" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65139ecd2c3f6484c3b99bc01c77afe21e95473630747c7aca525e78b0666675" +dependencies = [ + "nu-ansi-term 0.49.0", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] [[package]] -name = "version_check" -version = "0.1.5" +name = "unicode-ident" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] -name = "void" -version = "1.0.2" +name = "unicode-segmentation" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] -name = "winapi" -version = "0.2.8" +name = "unicode-width" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] -name = "winapi" -version = "0.3.6" +name = "unicode-xid" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] -name = "winapi-build" -version = "0.1.1" +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" -[metadata] -"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum ascii-canvas 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b385d69402821a1c254533a011a312531cbcc0e3e24f19bbb4747a5a2daf37e2" -"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" -"checksum backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "89a47830402e9981c5c41223151efcced65a0510c13097c769cede7efb34782a" -"checksum backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c66d56ac8dabd07f6aacdaf633f4b8262f5b3601a810a0dcddffd5c22c69daa0" -"checksum bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f1efcc46c18245a69c38fcc5cc650f16d3a59d034f3106e9ed63748f695730a" -"checksum bit-vec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4440d5cb623bb7390ae27fec0bb6c61111969860f8e3ae198bfa0663645e67cf" -"checksum bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dead7461c1127cf637931a1e50934eb6eee8bff2f74433ac7909e9afcee04a3" -"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" -"checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab" -"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" -"checksum byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "90492c5858dd7d2e78691cfb89f90d273a2800fc11d98f60786e5d87e2f83781" -"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16" -"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" -"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90" -"checksum docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8acd393692c503b168471874953a2531df0e9ab77d0b6bbc582395743300a4a" -"checksum docopt 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d60c92df70dfaaabecc14b409fd79f55ba0f247780529db1d73bfa601e1d3ac0" -"checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" -"checksum ena 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25b4e5febb25f08c49f1b07dc33a182729a6b21edfb562b5aef95f78e0dbe5bb" -"checksum ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "88dc8393b3c7352f94092497f6b52019643e493b6b890eb417cdb7c46117e621" -"checksum encode_unicode 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "28d65f1f5841ef7c6792861294b72beda34c664deb8be27970f36c306b7da1ce" -"checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" -"checksum generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d" -"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lalrpop 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9f7014afd5642680074fd5dcc624d544f9eabfa281cba2c3ac56c3db6d21ad1b" -"checksum lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc4fd87be4a815fd373e02773983940f0d75fb26fde8c098e9e45f7af03154c0" -"checksum lalrpop-snap 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0b85aa455529344133d7ecaaac04c01ed87f459deeaa0fe5422885e2095d8cdc" -"checksum lalrpop-util 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2400aeebcd11259370d038c24821b93218dd2f33a53f53e9c8fcccca70be6696" -"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7" -"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" -"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f" -"checksum memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4b3629fe9fdbff6daa6c33b90f7c08355c1aca05a3d01fa8063b822fcf185f3b" -"checksum new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cdc457076c78ab54d5e0d6fa7c47981757f1e34dc39ff92787f217dede586c4" -"checksum nix 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfb3ddedaa14746434a02041940495bf11325c22f6d36125d3bdd56090d50a79" -"checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" -"checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" -"checksum phf_generator 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)" = "03dc191feb9b08b0dc1330d6549b795b9d81aec19efe6b4a45aec8d4caee0c4b" -"checksum phf_shared 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)" = "b539898d22d4273ded07f64a05737649dc69095d92cb87c7097ec68e3f150b93" -"checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -"checksum proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1b06e2f335f48d24442b35a19df506a835fb3547bc3c06ef27340da9acf5cae7" -"checksum proc-macro2 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)" = "3d7b7eaaa90b4a90a932a9ea6666c95a389e424eff347f0f793979289429feee" -"checksum quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9949cfe66888ffe1d53e6ec9d9f3b70714083854be20fd5e271b232a017401e8" -"checksum quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "dd636425967c33af890042c483632d33fa7a18f19ad1d7ea72e8998c6ef8dea5" -"checksum rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e464cd887e869cddcae8792a4ee31d23c7edd516700695608f5b98c67ee0131c" -"checksum rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1961a422c4d189dfb50ffa9320bf1f2a9bd54ecb92792fb9477f99a1045f3372" -"checksum rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0905b6b7079ec73b314d4c748701f6931eb79fd97c668caa3f1899b22b32c6db" -"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" -"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" -"checksum regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" -"checksum regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2069749032ea3ec200ca51e4a31df41759190a88edca0d2d86ee8bedf7073341" -"checksum regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8e931c58b93d86f080c734bfd2bce7dd0079ae2331235818133c8be7f422e20e" -"checksum regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" -"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d" -"checksum rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "bcfe5b13211b4d78e5c2cadfebd7769197d95c639c35a50057eb4c05de811395" -"checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" -"checksum rustyline 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "00b06ac9c8e8e3e83b33d175d39a9f7b6c2c930c82990593719c8e48788ae2d9" -"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef" -"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c" -"checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0" -"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" -"checksum stacker 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "82c150485b78a81ed189dbdd1947397344bc296b86f7fcc7ca3cdae8bfe882e0" -"checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423" -"checksum string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "35293b05cf1494e8ddd042a7df6756bf18d07f42d234f32e71dce8a7aabb0191" -"checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" -"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" -"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" -"checksum syn 0.15.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b036b7b35e846707c0e55c2c9441fa47867c0f87fca416921db3261b1d8c741a" -"checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" -"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" -"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" -"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" diff --git a/Cargo.toml b/Cargo.toml index 195f22f5de8..542dff6b2ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,43 +1,39 @@ [package] name = "chalk" -version = "0.1.0" +version = "0.104.0-dev.0" description = "Model of the Rust trait system" -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" +repository = "https://github.com/rust-lang/chalk" readme = "README.md" keywords = ["compiler", "traits", "prolog"] +edition = "2018" +publish = false + +[features] +bench = [] [dependencies] -diff = "0.1.11" -docopt = "1.0.0" -error-chain = "0.12.0" -itertools = "0.7.8" -lalrpop-intern = "0.15.1" -petgraph = "0.4.13" -rustyline = "1.0" +docopt = "1.1.0" +itertools = "0.12.0" +rustyline = { version = "12.0.0", default-features = false } +salsa = "0.16.0" serde = "1.0" serde_derive = "1.0" -stacker = "0.1.2" - -[dependencies.chalk-parse] -version = "0.1.0" -path = "chalk-parse" - -[dependencies.chalk-ir] -version = "0.1.0" -path = "chalk-ir" -[dependencies.chalk-solve] -version = "0.1.0" -path = "chalk-solve" - -[dependencies.chalk-macros] -version = "0.1.0" -path = "chalk-macros" - -[dependencies.chalk-engine] -version = "0.9.0" -path = "chalk-engine" +chalk-derive = { version = "0.104.0-dev.0", path = "chalk-derive" } +chalk-engine = { version = "0.104.0-dev.0", path = "chalk-engine" } +chalk-ir = { version = "0.104.0-dev.0", path = "chalk-ir" } +chalk-solve = { version = "0.104.0-dev.0", path = "chalk-solve" } +chalk-recursive = { version = "0.104.0-dev.0", path = "chalk-recursive" } +chalk-parse = { version = "0.104.0-dev.0", path = "chalk-parse" } +chalk-integration = { version = "0.104.0-dev.0", path = "chalk-integration" } [workspace] + +[dev-dependencies] +# used for program_writer test errors +diff = "0.1" +expect-test = "1.4.1" +pretty_assertions = "1.4.0" +regex = "1" diff --git a/GLOSSARY.md b/GLOSSARY.md index e93456da67d..c4bbca4618b 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -1,203 +1,3 @@ # Glossary -This is a glossary of terminology (possibly) used in the chalk crate. -## Binary connective -There are sixteen logical connectives on two boolean variables. The most -interesting in this context are listed below. There is also a truth table given -which encodes the possible results of the operations like this - -``` -f(false, false) f(false, true) f(true, false) f(true, true). -``` - -As a shorthand the resulting truth table is encoded with `true = 1` and `false = -0`. - -| Truth table | Operator symbol | Common name | -|-------------|-----------------|----------------------------------| -| 0001 | && | Conjunction; and | -| 1001 | <=> | Equivalence; if and only if; iff | -| 1101 | => | Implication; if ... then | - -## Binder -A binder is an expression that binds a literal to a certain expression. -Examples for binders: - -- The universal quantifier `forall(a)` states that a certain condition holds for - all allowed values for `a`. -- A function definition `f(x) = a * x` is a binder for the variable `x` whereas - `a` is a free variable. -- A sum `\sum_n x_n` binds the index variable `n`. - -## Canonical Form -A formula in canonical form has the property that its DeBruijn indices are -minimized. For example when the formula `forall<0, 1> { 0: A && 1: B }` is -processed, both "branches" `0: A` and `1: B` are processed individually. The -first branch would be in canonical form, the second branch not since the -occurring DeBruijn index `1` could be replaced with `0`. - -## Clause -In the A clause is the disjunction of several expressions. For example the clause -`condition_1 || condition_2 || ...` states that at least one of the conditions -holds. - -There are two notable special cases of clauses. A *Horn clause* has at most one -positive literal. A *Definite clause* has exactly one positive literal. - -*Horn clauses* can be written in the form `A || !B || !C || ...` with `A` being -the optional positive literal. Due to the equivalence `(P => Q) <=> (!P || Q)` -the clause can be expressed as `B && C && ... => A` which means that A is true -if `B`, `C`, etc. are all true. All rules in chalk are in this form. For example - -``` -struct A {} -impl B for A where T: C + D {} -``` - -is expressed as the *Horn clause* `(T: C) && (T: D) => (A: B)`. This formula -has to hold for all values of `T`. The second example - -``` -struct A {} -impl B for A {} -impl C for A {} -``` - -is expressed as the *Horn clause* `(A: B) && (A: C)`. Note the missing -consequence. - -## DeBruijn Index -DeBruijn indices numerate literals that are bound in an unambiguous way. The -literal is given the number of its binder. The indices start at zero from the -innermost binder increasing from the inside out. - -Given the example `forall { exists { T: Foo } }` the -literal names `U` and `T` are replaced with `0` and `1` respectively: `forall<0> -{ exists<1> { 0: Foo } }`. - -## Formula -A formula is a logical expression consisting of literals and constants connected -by logical operators. - -## Goal -With a set of type variables, given types, traits and impls, a goal specifies a -problem which is solved by finding types for the type variables that satisfy the -formula. For example the goal `exists { T: u32 }` can be solved with `T = -u32`. - -## Literal -A literal is an atomic element of a formula together with the constants `true` -and `false`. It is equivalent to a variable in an algebraic expressions. Note -that literals are *not* the same as the type variables used in specifying a -goal. - -## Normal form -To say that a statement is in a certain *normal form* means that the pattern in -which the subformulas are arranged fulfil certain rules. The individual patterns -have different advantages for their manipulation. - -### Conjunctive normal form (CNF) -A formula in CNF is a conjunction of disjunctions. For example `(x1 || x2 || -x3) && (x4 || x5 || x6)` is in CNF. - -### Disjunctive normal form (DNF) -A formula in DNF is a disjunction of conjunctions. For example `(x1 && x2 && -x3) || (x4 && x5 && x6)` is in DNF. - -### Negation normal form (NNF) -A formula in NNF consists only of literals, the connectives `&&` and `||` and -`true` and `false`. - -### Prenex normal form (PNF) -All quantifiers are on the highest level of a formula and do not occur inside -the subformulas of the expression. - -- `forall(x). exists(y). forall(z). P(x) && P(y) => P(z)` is in PNF. -- `(exists(x). P(x)) => exists(y). P(y) && forall(z). P(z)` is *not* in PNF. - -## Normalization -Normalization is the process of converting an associated type to a concrete -type. In the case of an iterator this would mean that the associated `Item` type -is replaced with something more meaningful with respect to the individual -context (e.g. `u32`). - -## Projection -Projection is the reference to a field or (in the context of Rust) to a type -from another type. - -## Satisfiability -A formula is satisfiable iff there is a valuation for the atoms inside the -formula that makes it true. - -## Unification -Unification is the process of solving a formula. That means unification finds -values for all the free literals of the formula that satisfy it. In the context -of chalk the values refer to types. - -## Universe -A universe sets the scope in which a particular variable name is bound. (See -*Binder*.) A universe can encapsulate other universes. A universe can -be contained by only one parent universe. Universes have therefore a tree-like -structure. A universe can access the variable names of itself and the parent -universes but not of the sibling universes. - -## Well-formed -A formula is well-formed if it is constructed according to a predefined set of -syntactic rules. - -In the context of the Rust type system this means that basic rules for type -construction have to be met. Two examples: 1) Given a struct definition - -```rust -struct HashSet -``` -then a type `HashSet` is well-formed since `i32` implements `Hash`. A type -`HashSet` with a type `NoHash` that does not implement the `Hash` trait -is not well-formed. - -2) If a trait demands by its definition the implementation of further traits -for a certain type then these secondary traits have to be implemented as well. -If a type `Foo` implements `trait Eq: PartialEq` then this type has to implement -`trait PartialEq` as well. If it does not, then the type `Foo: Eq` is not well -formed according to Rust type building rules. - -## Quantifier - -### Existential quantifier -A formula with the existential quantifier `exists(x). P(x)` is satisfiable if -and only if there exists at least one value for all possible values of x which -satisfies the subformula `P(x)`. - -In the context of chalk, the existential quantifier usually demands the -existence of exactly one instance (i.e. type) that satisfies the formula (i.e. -type constraints). More than one instance means that the result is ambiguous. - -### Universal quantifier -A formula with the universal quantifier `forall(x). P(x)` is satisfiable -if and only if the subformula `P(x)` is true for all possible values for x. - -### Helpful equivalences -- `not(forall(x). P(x)) <=> exists(x). not(P(x))` -- `not(exists(x). P(x)) <=> forall(x). not(P(x))` - -## Skolemization -Skolemization is a technique of transferring a logical formula with existential -quantifiers to a statement without them. The resulting statement is in general -not equivalent to the original statement but equisatisfiable. - -## Validity -An argument (*premisses* therefore *conclusion*) is valid iff there is no -valuation which makes the premisses true and the conclusion false. - -Valid: `A && B therefore A || B`. Invalid: `A || B therefore A && B` because the -valuation `A = true, B = false` makes the premiss true and the conclusion false. - -## Valuation -A valuation is an assignment of values to all variables inside a logical -formula. - -# Literature -- Offline - - "Introduction to Formal Logic", Peter Smith - - "Handbook of Practical Logic and Automated Reasoning", John Harrison - - "Types and Programming Languages", Benjamin C. Pierce +Please see [Appendix A: Glossary and terminology](`https://rust-lang.github.io/chalk/book/glossary.html`) in Chalk book. diff --git a/README.md b/README.md index 4b53c3aaba5..de27c2b7b60 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,21 @@ -[![Build Status](https://travis-ci.org/rust-lang-nursery/chalk.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/chalk) +[![Build Status](https://github.com/rust-lang/chalk/workflows/CI/badge.svg)](https://github.com/rust-lang/chalk/actions?workflow=CI) +[![Chalk Book](https://img.shields.io/badge/book-chalk-blue.svg)](https://rust-lang.github.io/chalk/book/) +[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://rust-lang.github.io/chalk/chalk/) # chalk -A [Prolog-ish][Prolog] interpreter written in Rust, intended perhaps for use in -the compiler, but also for experimentation. +Chalk is a library that implements the Rust trait system, based on [Prolog-ish][Prolog] logic rules. + +See the [Chalk book](https://rust-lang.github.io/chalk/book/) for more information. ## FAQ -**How does chalk relate to rustc?** The plan is to have rustc use the `chalk-engine` crate (in this repo), which defines chalk's solver. The rest of chalk can then be considered an elaborate unit testing harness. For more details, see [the Traits chapter of the rustc-guide](https://rust-lang.github.io/rustc-guide/traits/index.html). +**How does chalk relate to rustc?** The plan is to have rustc use the +`chalk-solve` crate (in this repo) to answer questions about Rust programs, for +example, "Does `Vec` implement `Debug`?". Internally, chalk converts +Rust-specific information into logic and uses a logic engine to find the answer +to the original query. For more details, see +[this explanation in the chalk book][chalk-lowering-details]. **Where does the name come from?** `chalk` is named after [Chalkidiki], the area where [Aristotle] was born. Since Prolog is a logic programming language, this seemed a @@ -16,24 +24,43 @@ suitable reference. [Prolog]: https://en.wikipedia.org/wiki/Prolog [Chalkidiki]: https://en.wikipedia.org/wiki/Chalkidiki [Aristotle]: https://en.wikipedia.org/wiki/Aristotle +[chalk-lowering-details]: https://rust-lang.github.io/chalk/book/#chalk-works-by-converting-rust-goals-into-logical-inference-rules ## Blog posts [blog-posts]: #blog-posts Here are some blog posts talking about chalk: -- [Lowering Rust Traits to Logic](http://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/) +- [Lowering Rust Traits to Logic](https://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/) - Explains the basic concepts at play -- [Unification in Chalk, Part 1](http://smallcultfollowing.com/babysteps/blog/2017/03/25/unification-in-chalk-part-1/) +- [Unification in Chalk, Part 1](https://smallcultfollowing.com/babysteps/blog/2017/03/25/unification-in-chalk-part-1/) - An introduction to unification -- [Unification in Chalk, Part 2](http://smallcultfollowing.com/babysteps/blog/2017/04/23/unification-in-chalk-part-2/) +- [Unification in Chalk, Part 2](https://smallcultfollowing.com/babysteps/blog/2017/04/23/unification-in-chalk-part-2/) - Extending the system for associated types -- [Negative reasoning in Chalk](http://aturon.github.io/blog/2017/04/24/negative-chalk/) +- [Negative reasoning in Chalk](https://aturon.github.io/blog/2017/04/24/negative-chalk/) - How to prove that something is not true -- [Query structure in chalk](http://smallcultfollowing.com/babysteps/blog/2017/05/25/query-structure-in-chalk/) +- [Query structure in chalk](https://smallcultfollowing.com/babysteps/blog/2017/05/25/query-structure-in-chalk/) - The basic chalk query structure, with pointers into the chalk implementation -- [Cyclic queries in chalk](http://smallcultfollowing.com/babysteps/blog/2017/09/12/tabling-handling-cyclic-queries-in-chalk/) +- [Cyclic queries in chalk](https://smallcultfollowing.com/babysteps/blog/2017/09/12/tabling-handling-cyclic-queries-in-chalk/) - Handling cyclic relations and enabling the implementation of implied bounds and other long-desired features in an elegant way +## REPL + +There is a repl mainly for debugging purposes which can be run by `cargo run`. Some basic examples are in [libstd.chalk](libstd.chalk): +```bash +$ cargo run +?- load libstd.chalk +?- Vec>: Clone +Unique; substitution [], lifetime constraints [] +``` + ## Contributing -If you're like to contribute, consider joining the [Traits Working Group](https://github.com/rust-lang/rust/issues/48416). We hang out on the [rust-lang discord](http://discord.gg/rust-lang) in the `#wg-traits` channel (Lang category). +If you'd like to contribute, consider joining the [Traits Working Group][working-group]. +We hang out on the [rust-lang zulip][rust-lang-zulip] in the [#wg-traits][wg-traits-stream] stream. + +See [the contributing chapter][contributing] in the chalk book for more info. + +[working-group]: https://rust-lang.github.io/compiler-team/working-groups/traits/ +[rust-lang-zulip]:https://rust-lang.zulipchat.com +[wg-traits-stream]: https://rust-lang.zulipchat.com/#narrow/stream/144729-wg-traits +[contributing]: https://rust-lang.github.io/chalk/book/contribution_guide.html diff --git a/RELEASES.md b/RELEASES.md index 1da9b86fb61..24aa23ab72b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,50 +1,755 @@ -The `chalk-engine` and `chalk-macros` crates are published to -crates.io periodically for use by the compiler. The rest of chalk is -not yet published, though it might be nice to publish the interpreter -at some point. +### Changelog -# Release 0.9.0 +All notable changes to this project will be documented in this file. Dates are displayed in UTC. -- Added the variance parameter +Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog) +(Note: versions before 0.11.0 were manually generated). -# Releases 0.6.0 .. 0.8.1 +#### [v0.103.0](https://github.com/rust-lang/chalk/compare/v0.102.0...v0.103.0) -Forgot to write release notes =) +- fix: Don't panic when formatting without Program [`#830`](https://github.com/rust-lang/chalk/pull/830) -# Release 0.5.0 +#### [v0.102.0](https://github.com/rust-lang/chalk/compare/v0.101.0...v0.102.0) -**Tag:** `chalk-engine-v0.5.0` +> 20 April 2025 -Pare down to very few dependencies, and make even those optional. +#### [v0.101.0](https://github.com/rust-lang/chalk/compare/v0.100.0...v0.101.0) -# Release 0.4.0 +> 13 April 2025 -**Tag:** `chalk-engine-v0.4.0` +- Add `associated_ty_from_impl` to Chalk db to avoid computing associated types eagerly [`#826`](https://github.com/rust-lang/chalk/pull/826) -Tweak various things about the traits to aid in rustc integration. +#### [v0.100.0](https://github.com/rust-lang/chalk/compare/v0.99.0...v0.100.0) -# Release 0.2.0 +> 9 March 2025 -**Tag:** `chalk-engine-v0.2.0` +- Bump versions to `0.100.0-dev.0` [`#825`](https://github.com/rust-lang/chalk/pull/825) +- Implement built-in supports for `AsyncFn*` traits [`#824`](https://github.com/rust-lang/chalk/pull/824) -Remove some pointless traits from Chalk engine context. +#### [v0.99.0](https://github.com/rust-lang/chalk/compare/v0.98.0...v0.99.0) -# Release 0.1.0 +> 14 January 2025 -**Tag:** `chalk-engine-v0.1.0` +- bump version -> 0.99.0-dev.0 [`#822`](https://github.com/rust-lang/chalk/pull/822) +- Implement trait upcasting [`#821`](https://github.com/rust-lang/chalk/pull/821) +- Configure CI for merge queue [`#818`](https://github.com/rust-lang/chalk/pull/818) +- Remove unnecessary symbols and add missing symbols [`#817`](https://github.com/rust-lang/chalk/pull/817) -Initial release. +#### [v0.98.0](https://github.com/rust-lang/chalk/compare/v0.97.0...v0.98.0) + +> 7 July 2024 + +- Bump version number to `0.98.0-dev.0` to fix automatic releases [`#813`](https://github.com/rust-lang/chalk/pull/813) +- Add `f16` and `f128` [`#811`](https://github.com/rust-lang/chalk/pull/811) + +#### [v0.97.0](https://github.com/rust-lang/chalk/compare/v0.96.0...v0.97.0) + +> 7 April 2024 + +- Add missing `?Sized` bounds for `HasInterner` impls [`#810`](https://github.com/rust-lang/chalk/pull/810) +- chore: fix typo in comment [`#808`](https://github.com/rust-lang/chalk/pull/808) +- Introduce Lifetime::Error [`#809`](https://github.com/rust-lang/chalk/pull/809) +- Remove unused dependency [`#807`](https://github.com/rust-lang/chalk/pull/807) + +#### [v0.96.0](https://github.com/rust-lang/chalk/compare/v0.95.0...v0.96.0) + +> 7 January 2024 + +- Rename generator to coroutine [`#806`](https://github.com/rust-lang/chalk/pull/806) + +#### [v0.95.0](https://github.com/rust-lang/chalk/compare/v0.94.0...v0.95.0) + +> 19 November 2023 + +- Fixes typo in what_is_chalk.md [`#804`](https://github.com/rust-lang/chalk/pull/804) +- Bump deps [`#805`](https://github.com/rust-lang/chalk/pull/805) + +#### [v0.94.0](https://github.com/rust-lang/chalk/compare/v0.93.0...v0.94.0) + +> 8 October 2023 + +- Remove an unnecessary mut and fix warning [`#803`](https://github.com/rust-lang/chalk/pull/803) +- Upgrade to indexmap 2 [`#802`](https://github.com/rust-lang/chalk/pull/802) +- chalk-solve: upgrade petgraph to version 0.6.4 [`#801`](https://github.com/rust-lang/chalk/pull/801) + +#### [v0.93.0](https://github.com/rust-lang/chalk/compare/v0.92.0...v0.93.0) + +> 13 August 2023 + +- Push DiscriminantKind implementation fact unconditionally [`#800`](https://github.com/rust-lang/chalk/pull/800) + +#### [v0.92.0](https://github.com/rust-lang/chalk/compare/v0.91.0...v0.92.0) + +> 30 June 2023 + +- Pin cargo-workspaces to 0.2.44 [`#799`](https://github.com/rust-lang/chalk/pull/799) +- fix: generalize types before generating built-in `Normalize` clauses [`#797`](https://github.com/rust-lang/chalk/pull/797) +- Support `FnPtr` trait [`#798`](https://github.com/rust-lang/chalk/pull/798) +- Generate clauses for placeholder associated types [`#795`](https://github.com/rust-lang/chalk/pull/795) +- Generalize program clause for `AliasEq` goal with nested alias [`#792`](https://github.com/rust-lang/chalk/pull/792) +- Generate `Normalize` clauses for dyn and opaque types [`#780`](https://github.com/rust-lang/chalk/pull/780) + +#### [v0.91.0](https://github.com/rust-lang/chalk/compare/v0.90.0...v0.91.0) + +> 30 April 2023 + +- Add support for `Pointee` trait [`#794`](https://github.com/rust-lang/chalk/pull/794) +- add test for bug with mixed cycles [`#788`](https://github.com/rust-lang/chalk/pull/788) + +#### [v0.90.0](https://github.com/rust-lang/chalk/compare/v0.89.0...v0.90.0) + +> 2 April 2023 + +- update syn to 2.0 [`#793`](https://github.com/rust-lang/chalk/pull/793) + +#### [v0.89.0](https://github.com/rust-lang/chalk/compare/v0.88.0...v0.89.0) + +> 19 March 2023 + +- Fix projection substitution order considering GATs [`#790`](https://github.com/rust-lang/chalk/pull/790) +- update `inductive_canonical_cycle` test [`#787`](https://github.com/rust-lang/chalk/pull/787) +- Add test for inductive canonical cycle [`#786`](https://github.com/rust-lang/chalk/pull/786) + +#### [v0.88.0](https://github.com/rust-lang/chalk/compare/v0.87.0...v0.88.0) + +> 4 December 2022 + +- Add test for example found from removing fresh vars in rustc [`#784`](https://github.com/rust-lang/chalk/pull/784) +- Implement should_continue in chalk-recursive [`#774`](https://github.com/rust-lang/chalk/pull/774) + +#### [v0.87.0](https://github.com/rust-lang/chalk/compare/v0.86.0...v0.87.0) + +> 13 November 2022 + +- Remove empty lifetime [`#783`](https://github.com/rust-lang/chalk/pull/783) +- Implement support for the `Tuple` trait [`#782`](https://github.com/rust-lang/chalk/pull/782) +- fix: consider ADT's generic parameters [`#781`](https://github.com/rust-lang/chalk/pull/781) + +#### [v0.86.0](https://github.com/rust-lang/chalk/compare/v0.85.0...v0.86.0) + +> 16 October 2022 + +- Fix parameter for `Interner` in `derive(FallibleTypeFolder)` macro [`#779`](https://github.com/rust-lang/chalk/pull/779) +- Fix `Self` type retrieval from `ProjectionTy` [`#778`](https://github.com/rust-lang/chalk/pull/778) + +#### [v0.85.0](https://github.com/rust-lang/chalk/compare/v0.84.0...v0.85.0) + +> 9 October 2022 + +- Split fallible infallible folding [`#772`](https://github.com/rust-lang/chalk/pull/772) +- Bump regex from 1.5.4 to 1.5.5 [`#764`](https://github.com/rust-lang/chalk/pull/764) +- Bump thread_local from 1.1.3 to 1.1.4 [`#766`](https://github.com/rust-lang/chalk/pull/766) +- Bump crossbeam-utils from 0.8.5 to 0.8.9 [`#771`](https://github.com/rust-lang/chalk/pull/771) +- Update mdbook-mermaid [`#776`](https://github.com/rust-lang/chalk/pull/776) + +#### [v0.84.0](https://github.com/rust-lang/chalk/compare/v0.83.0...v0.84.0) + +> 31 July 2022 + +- Solve auto traits for closures (issue #734) [`#755`](https://github.com/rust-lang/chalk/pull/755) + +#### [v0.83.0](https://github.com/rust-lang/chalk/compare/v0.82.0...v0.83.0) + +> 26 June 2022 + +- Remove unused Result assoc type from Fold trait [`#768`](https://github.com/rust-lang/chalk/pull/768) +- Rename folding/visiting traits [`#767`](https://github.com/rust-lang/chalk/pull/767) +- Detect dark theme for mermaid diagrams [`#770`](https://github.com/rust-lang/chalk/pull/770) +- Add workaround for GitHub doc url's returning 403 [`#769`](https://github.com/rust-lang/chalk/pull/769) +- Typo [`#765`](https://github.com/rust-lang/chalk/pull/765) + +#### [v0.82.0](https://github.com/rust-lang/chalk/compare/v0.81.0...v0.82.0) + +> 12 April 2022 + +- we only need to prove things one way [`#754`](https://github.com/rust-lang/chalk/pull/754) + +#### [v0.81.0](https://github.com/rust-lang/chalk/compare/v0.80.0...v0.81.0) + +> 27 March 2022 + +- Use SPDX license format [`#757`](https://github.com/rust-lang/chalk/pull/757) +- Fix function pointers of different param counts unifying [`#759`](https://github.com/rust-lang/chalk/pull/759) + +#### [v0.80.0](https://github.com/rust-lang/chalk/compare/v0.79.0...v0.80.0) + +> 20 March 2022 + +- Make DispatchFromDyn a well-known type [`#738`](https://github.com/rust-lang/chalk/pull/738) + +#### [v0.79.0](https://github.com/rust-lang/chalk/compare/v0.78.0...v0.79.0) + +> 13 March 2022 + +- Add missing fold_free_var_const [`#756`](https://github.com/rust-lang/chalk/pull/756) +- Fix clippy warnings [`#753`](https://github.com/rust-lang/chalk/pull/753) + +#### [v0.78.0](https://github.com/rust-lang/chalk/compare/v0.77.0...v0.78.0) + +> 6 March 2022 + +- repl: add --solver flag. [`#751`](https://github.com/rust-lang/chalk/pull/751) +- recursive: fix hang on fulfill by slightly smarter check for progress. [`#752`](https://github.com/rust-lang/chalk/pull/752) +- Add some additional topics to todo.md [`#748`](https://github.com/rust-lang/chalk/pull/748) + +#### [v0.77.0](https://github.com/rust-lang/chalk/compare/v0.76.0...v0.77.0) + +> 20 February 2022 + +- Update `indexmap` to 1.8.0 [`#746`](https://github.com/rust-lang/chalk/pull/746) +- Allow tests to be updated automatically [`#744`](https://github.com/rust-lang/chalk/pull/744) + +#### [v0.76.0](https://github.com/rust-lang/chalk/compare/v0.75.0...v0.76.0) + +> 26 December 2021 + +- Remove `Ord` from `chalk_ir::interner::DefId` [`#740`](https://github.com/rust-lang/chalk/pull/740) +- Remove unused field [`#743`](https://github.com/rust-lang/chalk/pull/743) +- Allow `test` invocations to elide empty `program` declarations [`#741`](https://github.com/rust-lang/chalk/pull/741) + +#### [v0.75.0](https://github.com/rust-lang/chalk/compare/v0.74.0...v0.75.0) + +> 12 December 2021 + +- Use owned copies of Interner rather than refs [`#735`](https://github.com/rust-lang/chalk/pull/735) +- chore: update tracing dependencies (amongst others) [`#737`](https://github.com/rust-lang/chalk/pull/737) + +#### [v0.74.0](https://github.com/rust-lang/chalk/compare/v0.73.0...v0.74.0) + +> 29 November 2021 + +- More clause fixes [`#728`](https://github.com/rust-lang/chalk/pull/728) +- Introduce `Folder::Error` [`#709`](https://github.com/rust-lang/chalk/pull/709) + +#### [v0.73.0](https://github.com/rust-lang/chalk/compare/v0.72.0...v0.73.0) + +> 7 November 2021 + +- Fix broken markdown and intr-doc links in documentation [`#731`](https://github.com/rust-lang/chalk/pull/731) + +#### [v0.72.0](https://github.com/rust-lang/chalk/compare/v0.71.0...v0.72.0) + +> 10 October 2021 + +- Replace `ControlFlow` with version in std [`#729`](https://github.com/rust-lang/chalk/pull/729) +- Regenerate lockfile to fix tests in nightly [`#730`](https://github.com/rust-lang/chalk/pull/730) + +#### [v0.71.0](https://github.com/rust-lang/chalk/compare/v0.70.0...v0.71.0) + +> 29 August 2021 + +- Make the Generator trait well-known [`#723`](https://github.com/rust-lang/chalk/pull/723) +- Switch runner for publish job to Ubuntu from Mac [`#722`](https://github.com/rust-lang/chalk/pull/722) + +#### [v0.70.0](https://github.com/rust-lang/chalk/compare/v0.69.0...v0.70.0) + +> 1 August 2021 + +- Install auto-changelog globally to fix command not found [`#720`](https://github.com/rust-lang/chalk/pull/720) +- rework recursive solver for better integration into an expanded version of salsa [`#708`](https://github.com/rust-lang/chalk/pull/708) +- Make various methods on InferenceTable public [`#718`](https://github.com/rust-lang/chalk/pull/718) +- Use unnamed consts in chalk-derive [`#717`](https://github.com/rust-lang/chalk/pull/717) +- Update "implied bounds" rules for types to match #206 [`#714`](https://github.com/rust-lang/chalk/pull/714) +- Update publishing page in the book with automation details [`#715`](https://github.com/rust-lang/chalk/pull/715) +- Skip crate publishing if there have been no changes [`#711`](https://github.com/rust-lang/chalk/pull/711) +- Fix automatic changelog generation to include current version's changes [`#713`](https://github.com/rust-lang/chalk/pull/713) +- Automate the changelog [`#710`](https://github.com/rust-lang/chalk/pull/710) +- Fix typo: [defined by the] embedded -> embedder [`#712`](https://github.com/rust-lang/chalk/pull/712) + +#### [v0.69.0](https://github.com/rust-lang/chalk/compare/v0.68.0...v0.69.0) + +> 6 June 2021 + +#### [v0.68.0](https://github.com/rust-lang/chalk/compare/v0.67.0...v0.68.0) + +> 30 May 2021 + +#### [v0.67.0](https://github.com/rust-lang/chalk/compare/v0.65.0...v0.67.0) + +> 23 May 2021 + +- Mentions to Goal in major_concepts are meant to be DomainGoal [`#707`](https://github.com/rust-lang/chalk/pull/707) +- fix typo (Deterermine->Determine) in docs [`#705`](https://github.com/rust-lang/chalk/pull/705) +- Bump version to 0.67.0 [`#706`](https://github.com/rust-lang/chalk/pull/706) +- fix(chalk-recursive): allow chalk-solve's default-features to be disabled [`#703`](https://github.com/rust-lang/chalk/pull/703) +- Unignore a few tests. [`#702`](https://github.com/rust-lang/chalk/pull/702) +- Small fixes on engine [`#704`](https://github.com/rust-lang/chalk/pull/704) + +#### [v0.65.0](https://github.com/rust-lang/chalk/compare/v0.64.0...v0.65.0) + +> 25 April 2021 + +- Show -> Display [`#701`](https://github.com/rust-lang/chalk/pull/701) +- Coinduction handling for recursive solver [`#698`](https://github.com/rust-lang/chalk/pull/698) + +#### [v0.64.0](https://github.com/rust-lang/chalk/compare/v0.63.0...v0.64.0) + +> 11 April 2021 + +- Add Fold binders depth example [`#699`](https://github.com/rust-lang/chalk/pull/699) + +#### [v0.63.0](https://github.com/rust-lang/chalk/compare/v0.62.0...v0.63.0) + +> 28 March 2021 + +- Add a couple of examples on the generator section of the book [`#697`](https://github.com/rust-lang/chalk/pull/697) +- Properly link Rustc's TyKind [`#696`](https://github.com/rust-lang/chalk/pull/696) +- Fix some book typos [`#695`](https://github.com/rust-lang/chalk/pull/695) + +#### [v0.62.0](https://github.com/rust-lang/chalk/compare/v0.61.0...v0.62.0) + +> 21 March 2021 + +- Implement CastTo<WhereClause> for TypeOutlives [`#693`](https://github.com/rust-lang/chalk/pull/693) + +#### [v0.61.0](https://github.com/rust-lang/chalk/compare/v0.60.0...v0.61.0) + +> 14 March 2021 + +- fix typo types.md [`#692`](https://github.com/rust-lang/chalk/pull/692) +- fix typo [`#691`](https://github.com/rust-lang/chalk/pull/691) + +#### [v0.60.0](https://github.com/rust-lang/chalk/compare/v0.59.0...v0.60.0) + +> 7 March 2021 + +#### [v0.59.0](https://github.com/rust-lang/chalk/compare/v0.58.0...v0.59.0) + +> 28 February 2021 + +- Bump dependencies and propagate tracing features [`#687`](https://github.com/rust-lang/chalk/pull/687) + +#### [v0.58.0](https://github.com/rust-lang/chalk/compare/v0.57.0...v0.58.0) + +> 21 February 2021 + +- Generate clauses for super traits of opaque bounds [`#681`](https://github.com/rust-lang/chalk/pull/681) + +#### [v0.57.0](https://github.com/rust-lang/chalk/compare/v0.56.0...v0.57.0) + +> 14 February 2021 + +- add Movability to Generator [`#685`](https://github.com/rust-lang/chalk/pull/685) +- Opaque type cleanup [`#684`](https://github.com/rust-lang/chalk/pull/684) + +#### [v0.56.0](https://github.com/rust-lang/chalk/compare/v0.55.0...v0.56.0) + +> 7 February 2021 + +- Compute flags in interner [`#682`](https://github.com/rust-lang/chalk/pull/682) + +#### [v0.55.0](https://github.com/rust-lang/chalk/compare/v0.52.0...v0.55.0) + +> 1 February 2021 + +- No environment in Constraints [`#680`](https://github.com/rust-lang/chalk/pull/680) + +#### [v0.52.0](https://github.com/rust-lang/chalk/compare/v0.51.0...v0.52.0) + +> 31 January 2021 -# Procedure to cut a release +- mdbook fix [`#679`](https://github.com/rust-lang/chalk/pull/679) +- Subst canonical environment clauses [`#671`](https://github.com/rust-lang/chalk/pull/671) +- Some various bits of cleanup [`#661`](https://github.com/rust-lang/chalk/pull/661) -Should make a script or something, but: +#### [v0.51.0](https://github.com/rust-lang/chalk/compare/v0.50.0...v0.51.0) -``` -> // update version numbers -> cd chalk-macros -> cargo publish -> cd ../chalk-ngine -> cargo publish -> git tag chalk-engine-vXXX -``` +> 24 January 2021 +#### [v0.50.0](https://github.com/rust-lang/chalk/compare/v0.49.0...v0.50.0) + +> 20 January 2021 + +- Generalize with variance [`#673`](https://github.com/rust-lang/chalk/pull/673) +- Remove dead code and re-enable functions tests [`#664`](https://github.com/rust-lang/chalk/pull/664) + +#### [v0.49.0](https://github.com/rust-lang/chalk/compare/v0.48.0...v0.49.0) + +> 17 January 2021 + +- Update WellFormed-AssocTy rule in book [`#675`](https://github.com/rust-lang/chalk/pull/675) + +#### [v0.48.0](https://github.com/rust-lang/chalk/compare/v0.47.0...v0.48.0) + +> 10 January 2021 + +- Deduplicate solution enum in chalk-recursive [`#674`](https://github.com/rust-lang/chalk/pull/674) +- Update mdbook [`#672`](https://github.com/rust-lang/chalk/pull/672) + +#### [v0.47.0](https://github.com/rust-lang/chalk/compare/v0.46.0...v0.47.0) + +> 3 January 2021 + +- fix: small typo error [`#669`](https://github.com/rust-lang/chalk/pull/669) + +#### [v0.46.0](https://github.com/rust-lang/chalk/compare/v0.45.0...v0.46.0) + +> 27 December 2020 + +- Update occurences of outdated ProjectionEq predicate in Chalk book [`#668`](https://github.com/rust-lang/chalk/pull/668) + +#### [v0.45.0](https://github.com/rust-lang/chalk/compare/v0.43.0...v0.45.0) + +> 20 December 2020 + +- Optimize `Fold` impls for types on the heap [`#662`](https://github.com/rust-lang/chalk/pull/662) +- Add TypeFlags for TyKind in chalk-ir [`#639`](https://github.com/rust-lang/chalk/pull/639) +- Update mermaid.min.js to fix mermaid graphs [`#663`](https://github.com/rust-lang/chalk/pull/663) +- Pass canonical goals to program clauses code in SLG solver [`#624`](https://github.com/rust-lang/chalk/pull/624) +- Add `DiscriminantKind` builtin trait [`#633`](https://github.com/rust-lang/chalk/pull/633) + +#### [v0.43.0](https://github.com/rust-lang/chalk/compare/v0.41.0...v0.43.0) + +> 6 December 2020 + +- Always relate with Invariant to non-General inference vars [`#659`](https://github.com/rust-lang/chalk/pull/659) +- Fold by value, not by reference [`#660`](https://github.com/rust-lang/chalk/pull/660) + +#### [v0.41.0](https://github.com/rust-lang/chalk/compare/v0.40.0...v0.41.0) + +> 29 November 2020 + +- Add "Recursive solver coinduction chapter" to book todo list [`#657`](https://github.com/rust-lang/chalk/pull/657) +- Fix handling of variables in goal for new alias clauses [`#656`](https://github.com/rust-lang/chalk/pull/656) +- Make Variances Debug [`#655`](https://github.com/rust-lang/chalk/pull/655) + +#### [v0.40.0](https://github.com/rust-lang/chalk/compare/v0.39.0...v0.40.0) + +> 22 November 2020 + +- Fix boats' coherence talk link in the book [`#654`](https://github.com/rust-lang/chalk/pull/654) +- Introduce `Visitor::BreakTy` [`#651`](https://github.com/rust-lang/chalk/pull/651) +- Hyperlink protocol changed: HTTP -> HTTPS (#1) [`#652`](https://github.com/rust-lang/chalk/pull/652) +- Hyperlink protocol changed: HTTP -> HTTPS [`#1`](https://github.com/rust-lang/chalk/pull/1) + +#### [v0.39.0](https://github.com/rust-lang/chalk/compare/v0.37.0...v0.39.0) + +> 15 November 2020 + +- Bump version to 0.39.0-dev.0 [`#653`](https://github.com/rust-lang/chalk/pull/653) +- Clauses cleanup [`#638`](https://github.com/rust-lang/chalk/pull/638) +- Variance [`#609`](https://github.com/rust-lang/chalk/pull/609) +- Fix "var_universe invoked on bound variable" crash [`#649`](https://github.com/rust-lang/chalk/pull/649) +- Add empty and erased regions [`#650`](https://github.com/rust-lang/chalk/pull/650) +- Use `ControlFlow` in `Visitor` [`#645`](https://github.com/rust-lang/chalk/pull/645) +- Remove `TargetInterner` [`#648`](https://github.com/rust-lang/chalk/pull/648) +- Make max goal size for recursive solver configurable [`#647`](https://github.com/rust-lang/chalk/pull/647) + +#### [v0.37.0](https://github.com/rust-lang/chalk/compare/v0.36.0...v0.37.0) + +> 1 November 2020 + +- Program printer: Use Debug to print concrete consts [`#641`](https://github.com/rust-lang/chalk/pull/641) + +#### [v0.36.0](https://github.com/rust-lang/chalk/compare/v0.35.0...v0.36.0) + +> 29 October 2020 + +- Replace bound lifetime variables with inference variables in AntiUnifier [`#640`](https://github.com/rust-lang/chalk/pull/640) + +#### [v0.35.0](https://github.com/rust-lang/chalk/compare/v0.34.0...v0.35.0) + +> 25 October 2020 + +- Fix glossary links [`#635`](https://github.com/rust-lang/chalk/pull/635) +- rewrite parts concerning typename, fix links [`#634`](https://github.com/rust-lang/chalk/pull/634) +- Document notation [`#630`](https://github.com/rust-lang/chalk/pull/630) +- Format parse errors better [`#632`](https://github.com/rust-lang/chalk/pull/632) +- Remove TypeName and merge into TyKind [`#629`](https://github.com/rust-lang/chalk/pull/629) +- Rename TyData to TyKind [`#628`](https://github.com/rust-lang/chalk/pull/628) + +#### [v0.34.0](https://github.com/rust-lang/chalk/compare/v0.33.0...v0.34.0) + +> 18 October 2020 + +- Handle cached answers correctly in any_future_answer [`#626`](https://github.com/rust-lang/chalk/pull/626) +- Don't add wf clauses for function args [`#625`](https://github.com/rust-lang/chalk/pull/625) + +#### [v0.33.0](https://github.com/rust-lang/chalk/compare/v0.32.0...v0.33.0) + +> 11 October 2020 + +#### [v0.32.0](https://github.com/rust-lang/chalk/compare/v0.31.0...v0.32.0) + +> 7 October 2020 + +- Don't panic on unimplemented types for auto traits [`#622`](https://github.com/rust-lang/chalk/pull/622) +- Update book and add coherence chapter [`#623`](https://github.com/rust-lang/chalk/pull/623) +- Fix mdbook links after chalk-engine changes [`#620`](https://github.com/rust-lang/chalk/pull/620) +- Parse opaque types without bounds [`#619`](https://github.com/rust-lang/chalk/pull/619) + +#### [v0.31.0](https://github.com/rust-lang/chalk/compare/v0.30.0...v0.31.0) + +> 4 October 2020 + +- Add static lifetime [`#617`](https://github.com/rust-lang/chalk/pull/617) +- Support fundamental types with multiple type parameters [`#616`](https://github.com/rust-lang/chalk/pull/616) +- Remove Context trait from chalk-engine [`#611`](https://github.com/rust-lang/chalk/pull/611) +- Implement generators [`#593`](https://github.com/rust-lang/chalk/pull/593) + +#### [v0.30.0](https://github.com/rust-lang/chalk/compare/v0.29.0...v0.30.0) + +> 28 September 2020 + +- Fix assertion failure during recursive solving [`#613`](https://github.com/rust-lang/chalk/pull/613) + +#### [v0.29.0](https://github.com/rust-lang/chalk/compare/v0.28.0...v0.29.0) + +> 27 September 2020 + +#### [v0.28.0](https://github.com/rust-lang/chalk/compare/v0.27.0...v0.28.0) + +> 20 September 2020 + +- Add CoerceUnsized builtin rules [`#607`](https://github.com/rust-lang/chalk/pull/607) +- Extend push_auto_trait_impl to built-in types [`#612`](https://github.com/rust-lang/chalk/pull/612) +- fix debug print of pointer types [`#608`](https://github.com/rust-lang/chalk/pull/608) + +#### [v0.27.0](https://github.com/rust-lang/chalk/compare/v0.26.0...v0.27.0) + +> 13 September 2020 + +- More lowering improvements [`#606`](https://github.com/rust-lang/chalk/pull/606) +- Extend well-known traits by Unpin [`#603`](https://github.com/rust-lang/chalk/pull/603) +- Simplify lowering [`#602`](https://github.com/rust-lang/chalk/pull/602) + +#### [v0.26.0](https://github.com/rust-lang/chalk/compare/v0.25.0...v0.26.0) + +> 6 September 2020 + +- Extern types [`#601`](https://github.com/rust-lang/chalk/pull/601) +- Check well-formedness of opaque type declarations [`#579`](https://github.com/rust-lang/chalk/pull/579) +- Refactor FnPointer and FnDef [`#599`](https://github.com/rust-lang/chalk/pull/599) +- document opaque types [`#559`](https://github.com/rust-lang/chalk/pull/559) +- Fix links in chalk book [`#600`](https://github.com/rust-lang/chalk/pull/600) +- Properly unify consts with vars [`#598`](https://github.com/rust-lang/chalk/pull/598) + +#### [v0.25.0](https://github.com/rust-lang/chalk/compare/v0.24.0...v0.25.0) + +> 30 August 2020 + +- Guard against infinite loop in recursive solver [`#569`](https://github.com/rust-lang/chalk/pull/569) + +#### [v0.24.0](https://github.com/rust-lang/chalk/compare/v0.23.0...v0.24.0) + +> 23 August 2020 + +#### [v0.23.0](https://github.com/rust-lang/chalk/compare/v0.22.0...v0.23.0) + +> 16 August 2020 + +- Update dependencies [`#594`](https://github.com/rust-lang/chalk/pull/594) + +#### [v0.22.0](https://github.com/rust-lang/chalk/compare/v0.21.0...v0.22.0) + +> 9 August 2020 + +- Turn a few generics into dynamic dispatch. [`#592`](https://github.com/rust-lang/chalk/pull/592) + +#### [v0.21.0](https://github.com/rust-lang/chalk/compare/v0.20.0...v0.21.0) + +> 4 August 2020 + +- Handle well known traits for more types [`#591`](https://github.com/rust-lang/chalk/pull/591) +- .chalk syntax writer: Fix name de-duplication bug [`#585`](https://github.com/rust-lang/chalk/pull/585) + +#### [v0.20.0](https://github.com/rust-lang/chalk/compare/v0.19.0...v0.20.0) + +> 2 August 2020 + +- Don't ever consider cycles until we've started accepting ambiguous answers [`#588`](https://github.com/rust-lang/chalk/pull/588) +- Upgrade salsa [`#581`](https://github.com/rust-lang/chalk/pull/581) +- Add default `RustIrDatabase::*_name` impls [`#575`](https://github.com/rust-lang/chalk/pull/575) + +#### [v0.19.0](https://github.com/rust-lang/chalk/compare/v0.18.0...v0.19.0) + +> 26 July 2020 + +- Fix clippy warnings [`#577`](https://github.com/rust-lang/chalk/pull/577) +- Refactor and flesh out Fn pointers [`#573`](https://github.com/rust-lang/chalk/pull/573) +- Create single Github actions job to gate bors on [`#582`](https://github.com/rust-lang/chalk/pull/582) +- Remove `force_impl_for` from `RustIrDatabase` [`#583`](https://github.com/rust-lang/chalk/pull/583) +- Don't fail CI if only nightly fails [`#580`](https://github.com/rust-lang/chalk/pull/580) +- Add workflow_dispatch for manual publish [`#578`](https://github.com/rust-lang/chalk/pull/578) +- Support where clauses on opaque types [`#563`](https://github.com/rust-lang/chalk/pull/563) +- Upgrade everything except Salsa [`#576`](https://github.com/rust-lang/chalk/pull/576) + +#### [v0.18.0](https://github.com/rust-lang/chalk/compare/v0.17.0...v0.18.0) + +> 19 July 2020 + +- Only generate clauses requiring Compatible when it is in the environment [`#566`](https://github.com/rust-lang/chalk/pull/566) +- Cleanup & comment .chalk file writer tests [`#570`](https://github.com/rust-lang/chalk/pull/570) + +#### [v0.17.0](https://github.com/rust-lang/chalk/compare/v0.16.0...v0.17.0) + +> 12 July 2020 + +- Add readme for chalk-recursive [`#572`](https://github.com/rust-lang/chalk/pull/572) +- Don't flounder on int/float vars [`#555`](https://github.com/rust-lang/chalk/pull/555) +- Fix associated type bindings in dyn types [`#567`](https://github.com/rust-lang/chalk/pull/567) +- Remove unit fields from enum variants [`#565`](https://github.com/rust-lang/chalk/pull/565) +- Fix links in chalk book [`#564`](https://github.com/rust-lang/chalk/pull/564) +- Separate recursive solver [`#546`](https://github.com/rust-lang/chalk/pull/546) +- Upgrade dependencies (incl. Salsa) [`#553`](https://github.com/rust-lang/chalk/pull/553) + +#### [v0.16.0](https://github.com/rust-lang/chalk/compare/v0.15.0...v0.16.0) + +> 5 July 2020 + +- Cleanup comments in syntax writer [`#558`](https://github.com/rust-lang/chalk/pull/558) +- Further reduce interned type boilerplate [`#554`](https://github.com/rust-lang/chalk/pull/554) +- Allow printing lifetime placeholders [`#557`](https://github.com/rust-lang/chalk/pull/557) +- Support for ADTs [`#524`](https://github.com/rust-lang/chalk/pull/524) +- Add type outlives goal [`#551`](https://github.com/rust-lang/chalk/pull/551) +- Incorporate region constraints into program clauses [`#543`](https://github.com/rust-lang/chalk/pull/543) +- Feature gate heavy `tracing` dependencies [`#549`](https://github.com/rust-lang/chalk/pull/549) +- Cleanup function names in `chalk-ir` [`#545`](https://github.com/rust-lang/chalk/pull/545) +- Use tracing-tree for logging / clean up debug output [`#544`](https://github.com/rust-lang/chalk/pull/544) +- Fix coherence issue with associated types in generic bound [`#538`](https://github.com/rust-lang/chalk/pull/538) +- .chalk file syntax writer [`#430`](https://github.com/rust-lang/chalk/pull/430) + +#### [v0.15.0](https://github.com/rust-lang/chalk/compare/v0.14.0...v0.15.0) + +> 28 June 2020 + +- Fix built-in Fn impls when generics are involved [`#541`](https://github.com/rust-lang/chalk/pull/541) +- Add method to get repr data of an ADT to ChalkDatabase [`#523`](https://github.com/rust-lang/chalk/pull/523) +- Add more Copy implementations [`#539`](https://github.com/rust-lang/chalk/pull/539) +- Refactor `LifetimeOutlives` goals to produce `AddRegionConstraint` subgoals [`#527`](https://github.com/rust-lang/chalk/pull/527) +- Document all public items in chalk-ir [`#536`](https://github.com/rust-lang/chalk/pull/536) +- Typo in book [`#537`](https://github.com/rust-lang/chalk/pull/537) +- Support logs in lowering tests [`#535`](https://github.com/rust-lang/chalk/pull/535) + +#### [v0.14.0](https://github.com/rust-lang/chalk/compare/v0.13.0...v0.14.0) + +> 21 June 2020 + +- Change README to be more clear [`#534`](https://github.com/rust-lang/chalk/pull/534) +- Ignore auto traits order [`#531`](https://github.com/rust-lang/chalk/pull/531) +- Add closures [`#519`](https://github.com/rust-lang/chalk/pull/519) +- Introduce Tracing into Chalk (Third Time's the Charm) [`#525`](https://github.com/rust-lang/chalk/pull/525) +- Ucanonicalization fix [`#529`](https://github.com/rust-lang/chalk/pull/529) +- Fix recursive solver completeness example to correctly fail [`#528`](https://github.com/rust-lang/chalk/pull/528) + +#### [v0.13.0](https://github.com/rust-lang/chalk/compare/v0.11.0...v0.13.0) + +> 14 June 2020 + +- Refactor ProgramClauseData to remove Implies variant [`#514`](https://github.com/rust-lang/chalk/pull/514) +- new version: 0.13.0-dev.0 [`#522`](https://github.com/rust-lang/chalk/pull/522) +- Recursive solver factoring and privacy [`#513`](https://github.com/rust-lang/chalk/pull/513) +- complete wf checks for Copy impls [`#503`](https://github.com/rust-lang/chalk/pull/503) +- Model function ABI in the Rust IR [`#481`](https://github.com/rust-lang/chalk/pull/481) +- add `Unsize` trait implementation [`#427`](https://github.com/rust-lang/chalk/pull/427) +- add phantom_data adt flag [`#502`](https://github.com/rust-lang/chalk/pull/502) +- Add FnOnce trait, and provide impl for Function type [`#494`](https://github.com/rust-lang/chalk/pull/494) +- remove IntoIterator impl for &Binders [`#501`](https://github.com/rust-lang/chalk/pull/501) +- Use `pretty_assertions` crate when comparing expected and actual output [`#495`](https://github.com/rust-lang/chalk/pull/495) +- Fix typo in Implied Bounds book chapter [`#497`](https://github.com/rust-lang/chalk/pull/497) +- rename `WellKnownTrait::<TraitName>Trait` to `WellKnownTrait::<TraitName>` [`#500`](https://github.com/rust-lang/chalk/pull/500) +- Fix link to Minimums in recursive solver documentation [`#493`](https://github.com/rust-lang/chalk/pull/493) +- Refactor the recursive solver a bit [`#487`](https://github.com/rust-lang/chalk/pull/487) +- Add binders around inputs/outputs for FnDefDatum [`#489`](https://github.com/rust-lang/chalk/pull/489) +- add lifetimes to trait objects [`#492`](https://github.com/rust-lang/chalk/pull/492) +- update example trait name [`#490`](https://github.com/rust-lang/chalk/pull/490) +- words [`#491`](https://github.com/rust-lang/chalk/pull/491) +- document recursive solver [`#488`](https://github.com/rust-lang/chalk/pull/488) + +#### [v0.11.0](https://github.com/rust-lang/chalk/compare/v0.10.0...v0.11.0) + +> 2 June 2020 + +- Update publish.yml [`#485`](https://github.com/rust-lang/chalk/pull/485) +- Update publish.yml [`#484`](https://github.com/rust-lang/chalk/pull/484) +- Get ready for the first publish [`#483`](https://github.com/rust-lang/chalk/pull/483) +- Correct mistake in publishing [`#482`](https://github.com/rust-lang/chalk/pull/482) +- Add lifetime outlives goal [`#451`](https://github.com/rust-lang/chalk/pull/451) +- Added scheduled release workflow [`#461`](https://github.com/rust-lang/chalk/pull/461) +- Actually fix tests [`#480`](https://github.com/rust-lang/chalk/pull/480) +- Remove chalk-base [`#479`](https://github.com/rust-lang/chalk/pull/479) +- Add function def clauses [`#477`](https://github.com/rust-lang/chalk/pull/477) +- Include more guidance in ambiguous results [`#433`](https://github.com/rust-lang/chalk/pull/433) +- Request hidden opaque types lazily [`#478`](https://github.com/rust-lang/chalk/pull/478) +- Integer and float variable kinds [`#470`](https://github.com/rust-lang/chalk/pull/470) +- Fix solving <impl Trait as Trait>::AssocType [`#473`](https://github.com/rust-lang/chalk/pull/473) +- Generate auto trait clauses for opaque type goals [`#468`](https://github.com/rust-lang/chalk/pull/468) +- Remove unused StackIndex from chalk-engine. [`#474`](https://github.com/rust-lang/chalk/pull/474) +- Use chain and filter instead of extending a vec in program_clauses_fo… [`#476`](https://github.com/rust-lang/chalk/pull/476) +- Make chalk-engine depend on chalk-ir [`#472`](https://github.com/rust-lang/chalk/pull/472) +- Add array types to `TypeName` [`#467`](https://github.com/rust-lang/chalk/pull/467) +- Fix references to chalk_rust_ir [`#471`](https://github.com/rust-lang/chalk/pull/471) +- Cleanup crate structure and add features for SLG/recursive solvers [`#459`](https://github.com/rust-lang/chalk/pull/459) +- Check for trivial cycles in persue_answer [`#469`](https://github.com/rust-lang/chalk/pull/469) +- Add never type to `TypeName` [`#466`](https://github.com/rust-lang/chalk/pull/466) +- Opaque Type Generics [`#464`](https://github.com/rust-lang/chalk/pull/464) +- Issue260 [`#462`](https://github.com/rust-lang/chalk/pull/462) +- Handle bound vars in builtin trait impls [`#463`](https://github.com/rust-lang/chalk/pull/463) +- Fix broken link [`#465`](https://github.com/rust-lang/chalk/pull/465) +- const generics support [`#393`](https://github.com/rust-lang/chalk/pull/393) +- Add `FnDef` to `TypeName` [`#449`](https://github.com/rust-lang/chalk/pull/449) +- Rename struct types to ADT types and introduce new interned ADT ID [`#454`](https://github.com/rust-lang/chalk/pull/454) +- refactor Parameter into GenericArg [`#455`](https://github.com/rust-lang/chalk/pull/455) +- Bump most dependency crate version. [`#458`](https://github.com/rust-lang/chalk/pull/458) +- Make lowering failure tests less fragile [`#448`](https://github.com/rust-lang/chalk/pull/448) +- Fix overeager cycle detection in dyn impl clause generation [`#446`](https://github.com/rust-lang/chalk/pull/446) +- Upgrade book linkcheck warnings to errors [`#450`](https://github.com/rust-lang/chalk/pull/450) +- Add the str type [`#441`](https://github.com/rust-lang/chalk/pull/441) +- Mutable references should not be Copy/Clone [`#447`](https://github.com/rust-lang/chalk/pull/447) +- Add ObjectSafe goal and flag [`#445`](https://github.com/rust-lang/chalk/pull/445) +- Add slice type to `TypeName` [`#443`](https://github.com/rust-lang/chalk/pull/443) +- Add reference types to `TypeName` [`#444`](https://github.com/rust-lang/chalk/pull/444) +- Add raw pointers to `TypeName` [`#442`](https://github.com/rust-lang/chalk/pull/442) +- Fix outdated dev guide links [`#436`](https://github.com/rust-lang/chalk/pull/436) +- add WellKnownTraits chapter to the book [`#428`](https://github.com/rust-lang/chalk/pull/428) +- Add publishing steps to book [`#422`](https://github.com/rust-lang/chalk/pull/422) +- Replace lalrpop-intern with string_cache [`#432`](https://github.com/rust-lang/chalk/pull/432) +- Make `dyn Trait` implement its super traits as well [`#415`](https://github.com/rust-lang/chalk/pull/415) +- convert from lifetime equality constraints to outlives constraints [`#419`](https://github.com/rust-lang/chalk/pull/419) +- Fix broken links in "What is Chalk?" and "Walkthrough" [`#426`](https://github.com/rust-lang/chalk/pull/426) +- Fix mdBook plugins not loading in CI [`#424`](https://github.com/rust-lang/chalk/pull/424) +- Release 0.10.0 [`#421`](https://github.com/rust-lang/chalk/pull/421) + + + +#### [v0.10.0](https://github.com/rust-lang/chalk/compare/chalk-engine-v0.9.0...v0.10.0) + +> 30 April 2020 + +- Too many changes to list + +#### [v0.9.0](https://github.com/rust-lang/chalk/compare/e9c0a34dff162848f361f0f54ca3aab750ff4683...chalk-engine-v0.9.0) + +> 17 December 2018 + +- Added the variance parameter + +#### Releases [0.6.0 .. 0.8.1](https://github.com/rust-lang/chalk/compare/chalk-engine-v0.5.0...e9c0a34dff162848f361f0f54ca3aab750ff4683) + +> 24 May 2018 to 31 October 2018 + +Forgot to write release notes =) + +#### [v0.5.0](https://github.com/rust-lang/chalk/compare/chalk-engine-v0.4.0...chalk-engine-v0.5.0) + +> 21 May 2018 + +* Pare down to very few dependencies, and make even those optional. + +#### [v0.4.0](https://github.com/rust-lang/chalk/compare/85c4226c8629991f502e9b3268699e1d4c6f9348...chalk-engine-v0.4.0) + +> 21 May 2018 + +* Tweak various things about the traits to aid in rustc integration. + +#### [v0.2.0](https://github.com/rust-lang/chalk/compare/87b6dab8c271ac3b66fcc156242911a6000daf89...b4c3b919d32141f2f53f554730ef41211f5b5d5a) + +> 27 March 2018 + +* Remove some pointless traits from Chalk engine context. + +#### [v0.1.0](https://github.com/rust-lang/chalk/commit/87b6dab8c271ac3b66fcc156242911a6000daf89) + +> 22 March 2018 + +Initial release. diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 00000000000..7585238efed --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 00000000000..100f7062b67 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,26 @@ +[book] +authors = [] +language = "en" +multilingual = false +src = "src" + +[preprocessor.mermaid] +command = "mdbook-mermaid" +renderer = ["html"] + +[output.html] +additional-css = ["mermaid.css"] +additional-js = ["mermaid.min.js", "mermaid-init.js"] + +[output.linkcheck] +follow-web-links = true +warning-policy = "error" +optional = true +exclude = [ + # This even returns 403 on real browsers with Cloudflare's verify human challenge + 'dl\.acm\.org', +] + +# Workaround for GitHub docs returning 403 response: https://github.com/github/docs/issues/17358 +[output.linkcheck.http-headers] +"https://help.github.com" = ["accept-encoding: deflate, gzip, br"] diff --git a/book/mermaid-init.js b/book/mermaid-init.js new file mode 100644 index 00000000000..6f7eb8554b8 --- /dev/null +++ b/book/mermaid-init.js @@ -0,0 +1,4 @@ +mermaid.initialize({ + startOnLoad: true, + theme: ['coal', 'navy', 'ayu'].includes(theme) ? 'dark' : 'default', +}); diff --git a/book/mermaid.css b/book/mermaid.css new file mode 100644 index 00000000000..74de2c11108 --- /dev/null +++ b/book/mermaid.css @@ -0,0 +1,351 @@ +/* Flowchart variables */ +/* Sequence Diagram variables */ +/* Gantt chart variables */ +.mermaid .mermaid .label { + color: #333; +} +.mermaid .node rect, +.mermaid .node circle, +.mermaid .node ellipse, +.mermaid .node polygon { + fill: #ECECFF; + stroke: #CCCCFF; + stroke-width: 1px; +} +.mermaid .arrowheadPath { + fill: #333333; +} +.mermaid .edgePath .path { + stroke: #333333; +} +.mermaid .edgeLabel { + background-color: #e8e8e8; +} +.mermaid .cluster rect { + fill: #ffffde !important; + rx: 4 !important; + stroke: #aaaa33 !important; + stroke-width: 1px !important; +} +.mermaid .cluster text { + fill: #333; +} +.mermaid .actor { + stroke: #CCCCFF; + fill: #ECECFF; +} +.mermaid text.actor { + fill: black; + stroke: none; +} +.mermaid .actor-line { + stroke: grey; +} +.mermaid .messageLine0 { + stroke-width: 1.5; + stroke-dasharray: "2 2"; + marker-end: "url(#arrowhead)"; + stroke: #333; +} +.mermaid .messageLine1 { + stroke-width: 1.5; + stroke-dasharray: "2 2"; + stroke: #333; +} +.mermaid #arrowhead { + fill: #333; +} +.mermaid #crosshead path { + fill: #333 !important; + stroke: #333 !important; +} +.mermaid .messageText { + fill: #333; + stroke: none; +} +.mermaid .labelBox { + stroke: #CCCCFF; + fill: #ECECFF; +} +.mermaid .labelText { + fill: black; + stroke: none; +} +.mermaid .loopText { + fill: black; + stroke: none; +} +.mermaid .loopLine { + stroke-width: 2; + stroke-dasharray: "2 2"; + marker-end: "url(#arrowhead)"; + stroke: #CCCCFF; +} +.mermaid .note { + stroke: #aaaa33; + fill: #fff5ad; +} +.mermaid .noteText { + fill: black; + stroke: none; + font-family: 'trebuchet ms', verdana, arial; + font-size: 14px; +} +/** Section styling */ +.mermaid .section { + stroke: none; + opacity: 0.2; +} +.mermaid .section0 { + fill: rgba(102, 102, 255, 0.49); +} +.mermaid .section2 { + fill: #fff400; +} +.mermaid .section1, +.mermaid .section3 { + fill: white; + opacity: 0.2; +} +.mermaid .sectionTitle0 { + fill: #333; +} +.mermaid .sectionTitle1 { + fill: #333; +} +.mermaid .sectionTitle2 { + fill: #333; +} +.mermaid .sectionTitle3 { + fill: #333; +} +.mermaid .sectionTitle { + text-anchor: start; + font-size: 11px; + text-height: 14px; +} +/* Grid and axis */ +.mermaid .grid .tick { + stroke: lightgrey; + opacity: 0.3; + shape-rendering: crispEdges; +} +.mermaid .grid path { + stroke-width: 0; +} +/* Today line */ +.mermaid .today { + fill: none; + stroke: red; + stroke-width: 2px; +} +/* Task styling */ +/* Default task */ +.mermaid .task { + stroke-width: 2; +} +.mermaid .taskText { + text-anchor: middle; + font-size: 11px; +} +.mermaid .taskTextOutsideRight { + fill: black; + text-anchor: start; + font-size: 11px; +} +.mermaid .taskTextOutsideLeft { + fill: black; + text-anchor: end; + font-size: 11px; +} +/* Specific task settings for the sections*/ +.mermaid .taskText0, +.mermaid .taskText1, +.mermaid .taskText2, +.mermaid .taskText3 { + fill: white; +} +.mermaid .task0, +.mermaid .task1, +.mermaid .task2, +.mermaid .task3 { + fill: #8a90dd; + stroke: #534fbc; +} +.mermaid .taskTextOutside0, +.mermaid .taskTextOutside2 { + fill: black; +} +.mermaid .taskTextOutside1, +.mermaid .taskTextOutside3 { + fill: black; +} +/* Active task */ +.mermaid .active0, +.mermaid .active1, +.mermaid .active2, +.mermaid .active3 { + fill: #bfc7ff; + stroke: #534fbc; +} +.mermaid .activeText0, +.mermaid .activeText1, +.mermaid .activeText2, +.mermaid .activeText3 { + fill: black !important; +} +/* Completed task */ +.mermaid .done0, +.mermaid .done1, +.mermaid .done2, +.mermaid .done3 { + stroke: grey; + fill: lightgrey; + stroke-width: 2; +} +.mermaid .doneText0, +.mermaid .doneText1, +.mermaid .doneText2, +.mermaid .doneText3 { + fill: black !important; +} +/* Tasks on the critical line */ +.mermaid .crit0, +.mermaid .crit1, +.mermaid .crit2, +.mermaid .crit3 { + stroke: #ff8888; + fill: red; + stroke-width: 2; +} +.mermaid .activeCrit0, +.mermaid .activeCrit1, +.mermaid .activeCrit2, +.mermaid .activeCrit3 { + stroke: #ff8888; + fill: #bfc7ff; + stroke-width: 2; +} +.mermaid .doneCrit0, +.mermaid .doneCrit1, +.mermaid .doneCrit2, +.mermaid .doneCrit3 { + stroke: #ff8888; + fill: lightgrey; + stroke-width: 2; + cursor: pointer; + shape-rendering: crispEdges; +} +.mermaid .doneCritText0, +.mermaid .doneCritText1, +.mermaid .doneCritText2, +.mermaid .doneCritText3 { + fill: black !important; +} +.mermaid .activeCritText0, +.mermaid .activeCritText1, +.mermaid .activeCritText2, +.mermaid .activeCritText3 { + fill: black !important; +} +.mermaid .titleText { + text-anchor: middle; + font-size: 18px; + fill: black; +} +.mermaid g.classGroup text { + fill: #9370DB; + stroke: none; + font-family: 'trebuchet ms', verdana, arial; + font-size: 10px; +} +.mermaid g.classGroup rect { + fill: #ECECFF; + stroke: #9370DB; +} +.mermaid g.classGroup line { + stroke: #9370DB; + stroke-width: 1; +} +.mermaid svg .classLabel .box { + stroke: none; + stroke-width: 0; + fill: #ECECFF; + opacity: 0.5; +} +.mermaid svg .classLabel .label { + fill: #9370DB; + font-size: 10px; +} +.mermaid .relation { + stroke: #9370DB; + stroke-width: 1; + fill: none; +} +.mermaid .composition { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #compositionStart { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #compositionEnd { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid .aggregation { + fill: #ECECFF; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #aggregationStart { + fill: #ECECFF; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #aggregationEnd { + fill: #ECECFF; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #dependencyStart { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #dependencyEnd { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #extensionStart { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid #extensionEnd { + fill: #9370DB; + stroke: #9370DB; + stroke-width: 1; +} +.mermaid .node text { + font-family: 'trebuchet ms', verdana, arial; + font-size: 14px; +} +.mermaid div.mermaidTooltip { + position: absolute; + text-align: center; + max-width: 200px; + padding: 2px; + font-family: 'trebuchet ms', verdana, arial; + font-size: 12px; + background: #ffffde; + border: 1px solid #aaaa33; + border-radius: 2px; + pointer-events: none; + z-index: 100; +} diff --git a/book/mermaid.min.js b/book/mermaid.min.js new file mode 100644 index 00000000000..8d775e39ec4 --- /dev/null +++ b/book/mermaid.min.js @@ -0,0 +1,32 @@ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=383)}([function(t,e,n){"use strict";n.r(e);var r=function(t,e){return te?1:t>=e?0:NaN},i=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return r(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}};var a=i(r),o=a.right,s=a.left,c=o,u=function(t,e){null==e&&(e=l);for(var n=0,r=t.length-1,i=t[0],a=new Array(r<0?0:r);nt?1:e>=t?0:NaN},d=function(t){return null===t?NaN:+t},p=function(t,e){var n,r,i=t.length,a=0,o=-1,s=0,c=0;if(null==e)for(;++o1)return c/(a-1)},g=function(t,e){var n=p(t,e);return n?Math.sqrt(n):n},y=function(t,e){var n,r,i,a=t.length,o=-1;if(null==e){for(;++o=n)for(r=i=n;++on&&(r=n),i=n)for(r=i=n;++on&&(r=n),i0)return[t];if((r=e0)for(t=Math.ceil(t/o),e=Math.floor(e/o),a=new Array(i=Math.ceil(e-t+1));++s=0?(a>=w?10:a>=E?5:a>=T?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=w?10:a>=E?5:a>=T?2:1)}function A(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),a=r/i;return a>=w?i*=10:a>=E?i*=5:a>=T&&(i*=2),eh;)f.pop(),--d;var p,g=new Array(d+1);for(i=0;i<=d;++i)(p=g[i]=[]).x0=i>0?f[i-1]:l,p.x1=i=1)return+n(t[r-1],r-1,t);var r,i=(r-1)*e,a=Math.floor(i),o=+n(t[a],a,t);return o+(+n(t[a+1],a+1,t)-o)*(i-a)}},N=function(t,e,n){return t=b.call(t,d).sort(r),Math.ceil((n-e)/(2*(D(t,.75)-D(t,.25))*Math.pow(t.length,-1/3)))},B=function(t,e,n){return Math.ceil((n-e)/(3.5*g(t)*Math.pow(t.length,-1/3)))},L=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++ar&&(r=n)}else for(;++a=n)for(r=n;++ar&&(r=n);return r},P=function(t,e){var n,r=t.length,i=r,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n},j=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++an&&(r=n)}else for(;++a=n)for(r=n;++an&&(r=n);return r},R=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},Y=function(t,e){if(n=t.length){var n,i,a=0,o=0,s=t[o];for(null==e&&(e=r);++a=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function ct(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;ae?1:t>=e?0:NaN}var _t="http://www.w3.org/1999/xhtml",kt={svg:"http://www.w3.org/2000/svg",xhtml:_t,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},wt=function(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),kt.hasOwnProperty(e)?{space:kt[e],local:t}:t};function Et(t){return function(){this.removeAttribute(t)}}function Tt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ct(t,e){return function(){this.setAttribute(t,e)}}function St(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function At(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function Mt(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}var Ot=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};function Dt(t){return function(){this.style.removeProperty(t)}}function Nt(t,e,n){return function(){this.style.setProperty(t,e,n)}}function Bt(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function Lt(t,e){return t.style.getPropertyValue(e)||Ot(t).getComputedStyle(t,null).getPropertyValue(e)}function Pt(t){return function(){delete this[t]}}function Ft(t,e){return function(){this[t]=e}}function It(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function jt(t){return t.trim().split(/^|\s+/)}function Rt(t){return t.classList||new Yt(t)}function Yt(t){this._node=t,this._names=jt(t.getAttribute("class")||"")}function zt(t,e){for(var n=Rt(t),r=-1,i=e.length;++r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};function Ht(){this.textContent=""}function Gt(t){return function(){this.textContent=t}}function qt(t){return function(){var e=t.apply(this,arguments);this.textContent=null==e?"":e}}function Xt(){this.innerHTML=""}function Zt(t){return function(){this.innerHTML=t}}function Jt(t){return function(){var e=t.apply(this,arguments);this.innerHTML=null==e?"":e}}function Qt(){this.nextSibling&&this.parentNode.appendChild(this)}function Kt(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}function te(t){return function(){var e=this.ownerDocument,n=this.namespaceURI;return n===_t&&e.documentElement.namespaceURI===_t?e.createElement(t):e.createElementNS(n,t)}}function ee(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}var ne=function(t){var e=wt(t);return(e.local?ee:te)(e)};function re(){return null}function ie(){var t=this.parentNode;t&&t.removeChild(this)}function ae(){var t=this.cloneNode(!1),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}function oe(){var t=this.cloneNode(!0),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}var se={},ce=null;"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(se={mouseenter:"mouseover",mouseleave:"mouseout"}));function ue(t,e,n){return t=le(t,e,n),function(e){var n=e.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||t.call(this,e)}}function le(t,e,n){return function(r){var i=ce;ce=r;try{t.call(this,this.__data__,e,n)}finally{ce=i}}}function he(t){return t.trim().split(/^|\s+/).map((function(t){var e="",n=t.indexOf(".");return n>=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function fe(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=_&&(_=x+1);!(b=v[_])&&++_=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=xt);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?Dt:"function"==typeof e?Bt:Nt)(t,e,null==n?"":n)):Lt(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?Pt:"function"==typeof e?It:Ft)(t,e)):this.node()[t]},classed:function(t,e){var n=jt(t+"");if(arguments.length<2){for(var r=Rt(this.node()),i=-1,a=n.length;++i>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?new qe(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?new qe(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Le.exec(t))?new qe(e[1],e[2],e[3],1):(e=Pe.exec(t))?new qe(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Fe.exec(t))?Ve(e[1],e[2],e[3],e[4]):(e=Ie.exec(t))?Ve(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=je.exec(t))?Qe(e[1],e[2]/100,e[3]/100,1):(e=Re.exec(t))?Qe(e[1],e[2]/100,e[3]/100,e[4]):Ye.hasOwnProperty(t)?We(Ye[t]):"transparent"===t?new qe(NaN,NaN,NaN,0):null}function We(t){return new qe(t>>16&255,t>>8&255,255&t,1)}function Ve(t,e,n,r){return r<=0&&(t=e=n=NaN),new qe(t,e,n,r)}function He(t){return t instanceof Me||(t=$e(t)),t?new qe((t=t.rgb()).r,t.g,t.b,t.opacity):new qe}function Ge(t,e,n,r){return 1===arguments.length?He(t):new qe(t,e,n,null==r?1:r)}function qe(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function Xe(){return"#"+Je(this.r)+Je(this.g)+Je(this.b)}function Ze(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Je(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Qe(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new en(t,e,n,r)}function Ke(t){if(t instanceof en)return new en(t.h,t.s,t.l,t.opacity);if(t instanceof Me||(t=$e(t)),!t)return new en;if(t instanceof en)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new en(o,s,c,t.opacity)}function tn(t,e,n,r){return 1===arguments.length?Ke(t):new en(t,e,n,null==r?1:r)}function en(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function nn(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function rn(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Se(Me,$e,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:ze,formatHex:ze,formatHsl:function(){return Ke(this).formatHsl()},formatRgb:Ue,toString:Ue}),Se(qe,Ge,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Xe,formatHex:Xe,formatRgb:Ze,toString:Ze})),Se(en,tn,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new en(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new en(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new qe(nn(t>=240?t-240:t+120,i,r),nn(t,i,r),nn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var an=function(t){var e=t.length-1;return function(n){var r=n<=0?n=0:n>=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=r180||n<-180?n-360*Math.round(n/360):n):sn(isNaN(t)?e:t)}function ln(t){return 1==(t=+t)?hn:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):sn(isNaN(e)?n:e)}}function hn(t,e){var n=e-t;return n?cn(t,n):sn(isNaN(t)?e:t)}var fn=function t(e){var n=ln(e);function r(t,e){var r=n((t=Ge(t)).r,(e=Ge(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=hn(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function dn(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;na&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:_n(n,r)})),a=En.lastIndex;return a=0&&e._call.call(null,t),e=e._next;--Bn}function Hn(){In=(Fn=Rn.now())+jn,Bn=Ln=0;try{Vn()}finally{Bn=0,function(){var t,e,n=Tn,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:Tn=e);Cn=t,qn(r)}(),In=0}}function Gn(){var t=Rn.now(),e=t-Fn;e>1e3&&(jn-=e,Fn=t)}function qn(t){Bn||(Ln&&(Ln=clearTimeout(Ln)),t-In>24?(t<1/0&&(Ln=setTimeout(Hn,t-Rn.now()-jn)),Pn&&(Pn=clearInterval(Pn))):(Pn||(Fn=Rn.now(),Pn=setInterval(Gn,1e3)),Bn=1,Yn(Hn)))}$n.prototype=Wn.prototype={constructor:$n,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?zn():+n)+(null==e?0:+e),this._next||Cn===this||(Cn?Cn._next=this:Tn=this,Cn=this),this._call=t,this._time=n,qn()},stop:function(){this._call&&(this._call=null,this._time=1/0,qn())}};var Xn=function(t,e,n){var r=new $n;return e=null==e?0:+e,r.restart((function(n){r.stop(),t(n+e)}),e,n),r},Zn=lt("start","end","cancel","interrupt"),Jn=[],Qn=function(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return Xn(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function tr(t,e){var n=er(t,e);if(n.state>3)throw new Error("too late; already running");return n}function er(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}var nr,rr,ir,ar,or=function(t,e){var n,r,i,a=t.__transition,o=!0;if(a){for(i in e=null==e?null:e+"",a)(n=a[i]).name===e?(r=n.state>2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}},sr=180/Math.PI,cr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},ur=function(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:_n(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:_n(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:_n(t,n)},{i:s-2,x:_n(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?Kn:tr;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var Br=_e.prototype.constructor;function Lr(t){return function(){this.style.removeProperty(t)}}function Pr(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function Fr(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Pr(t,a,n)),r}return a._value=e,a}function Ir(t){return function(e){this.textContent=t.call(this,e)}}function jr(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Ir(r)),e}return r._value=t,r}var Rr=0;function Yr(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function zr(t){return _e().transition(t)}function Ur(){return++Rr}var $r=_e.prototype;function Wr(t){return t*t*t}function Vr(t){return--t*t*t+1}function Hr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}Yr.prototype=zr.prototype={constructor:Yr,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=ft(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o1&&n.name===e)return new Yr([[t]],Xr,e,+r);return null},Jr=function(t){return function(){return t}},Qr=function(t,e,n){this.target=t,this.type=e,this.selection=n};function Kr(){ce.stopImmediatePropagation()}var ti=function(){ce.preventDefault(),ce.stopImmediatePropagation()},ei={name:"drag"},ni={name:"space"},ri={name:"handle"},ii={name:"center"};function ai(t){return[+t[0],+t[1]]}function oi(t){return[ai(t[0]),ai(t[1])]}function si(t){return function(e){return Dn(e,ce.touches,t)}}var ci={name:"x",handles:["w","e"].map(yi),input:function(t,e){return null==t?null:[[+t[0],e[0][1]],[+t[1],e[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},ui={name:"y",handles:["n","s"].map(yi),input:function(t,e){return null==t?null:[[e[0][0],+t[0]],[e[1][0],+t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},li={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(yi),input:function(t){return null==t?null:oi(t)},output:function(t){return t}},hi={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},fi={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},di={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},pi={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},gi={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function yi(t){return{type:t}}function vi(){return!ce.ctrlKey&&!ce.button}function mi(){var t=this.ownerSVGElement||this;return t.hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function bi(){return navigator.maxTouchPoints||"ontouchstart"in this}function xi(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function _i(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function ki(t){var e=t.__brush;return e?e.dim.output(e.selection):null}function wi(){return Ci(ci)}function Ei(){return Ci(ui)}var Ti=function(){return Ci(li)};function Ci(t){var e,n=mi,r=vi,i=bi,a=!0,o=lt("start","brush","end"),s=6;function c(e){var n=e.property("__brush",g).selectAll(".overlay").data([yi("overlay")]);n.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",hi.overlay).merge(n).each((function(){var t=xi(this).extent;ke(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])})),e.selectAll(".selection").data([yi("selection")]).enter().append("rect").attr("class","selection").attr("cursor",hi.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=e.selectAll(".handle").data(t.handles,(function(t){return t.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(t){return"handle handle--"+t.type})).attr("cursor",(function(t){return hi[t.type]})),e.each(u).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",f).filter(i).on("touchstart.brush",f).on("touchmove.brush",d).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function u(){var t=ke(this),e=xi(this).selection;e?(t.selectAll(".selection").style("display",null).attr("x",e[0][0]).attr("y",e[0][1]).attr("width",e[1][0]-e[0][0]).attr("height",e[1][1]-e[0][1]),t.selectAll(".handle").style("display",null).attr("x",(function(t){return"e"===t.type[t.type.length-1]?e[1][0]-s/2:e[0][0]-s/2})).attr("y",(function(t){return"s"===t.type[0]?e[1][1]-s/2:e[0][1]-s/2})).attr("width",(function(t){return"n"===t.type||"s"===t.type?e[1][0]-e[0][0]+s:s})).attr("height",(function(t){return"e"===t.type||"w"===t.type?e[1][1]-e[0][1]+s:s}))):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function l(t,e,n){return!n&&t.__brush.emitter||new h(t,e)}function h(t,e){this.that=t,this.args=e,this.state=t.__brush,this.active=0}function f(){if((!e||ce.touches)&&r.apply(this,arguments)){var n,i,o,s,c,h,f,d,p,g,y,v=this,m=ce.target.__data__.type,b="selection"===(a&&ce.metaKey?m="overlay":m)?ei:a&&ce.altKey?ii:ri,x=t===ui?null:pi[m],_=t===ci?null:gi[m],k=xi(v),w=k.extent,E=k.selection,T=w[0][0],C=w[0][1],S=w[1][0],A=w[1][1],M=0,O=0,D=x&&_&&a&&ce.shiftKey,N=ce.touches?si(ce.changedTouches[0].identifier):Nn,B=N(v),L=B,P=l(v,arguments,!0).beforestart();"overlay"===m?(E&&(p=!0),k.selection=E=[[n=t===ui?T:B[0],o=t===ci?C:B[1]],[c=t===ui?S:n,f=t===ci?A:o]]):(n=E[0][0],o=E[0][1],c=E[1][0],f=E[1][1]),i=n,s=o,h=c,d=f;var F=ke(v).attr("pointer-events","none"),I=F.selectAll(".overlay").attr("cursor",hi[m]);if(ce.touches)P.moved=R,P.ended=z;else{var j=ke(ce.view).on("mousemove.brush",R,!0).on("mouseup.brush",z,!0);a&&j.on("keydown.brush",U,!0).on("keyup.brush",$,!0),Te(ce.view)}Kr(),or(v),u.call(v),P.start()}function R(){var t=N(v);!D||g||y||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?y=!0:g=!0),L=t,p=!0,ti(),Y()}function Y(){var t;switch(M=L[0]-B[0],O=L[1]-B[1],b){case ni:case ei:x&&(M=Math.max(T-n,Math.min(S-c,M)),i=n+M,h=c+M),_&&(O=Math.max(C-o,Math.min(A-f,O)),s=o+O,d=f+O);break;case ri:x<0?(M=Math.max(T-n,Math.min(S-n,M)),i=n+M,h=c):x>0&&(M=Math.max(T-c,Math.min(S-c,M)),i=n,h=c+M),_<0?(O=Math.max(C-o,Math.min(A-o,O)),s=o+O,d=f):_>0&&(O=Math.max(C-f,Math.min(A-f,O)),s=o,d=f+O);break;case ii:x&&(i=Math.max(T,Math.min(S,n-M*x)),h=Math.max(T,Math.min(S,c+M*x))),_&&(s=Math.max(C,Math.min(A,o-O*_)),d=Math.max(C,Math.min(A,f+O*_)))}h0&&(n=i-M),_<0?f=d-O:_>0&&(o=s-O),b=ni,I.attr("cursor",hi.selection),Y());break;default:return}ti()}function $(){switch(ce.keyCode){case 16:D&&(g=y=D=!1,Y());break;case 18:b===ii&&(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri,Y());break;case 32:b===ni&&(ce.altKey?(x&&(c=h-M*x,n=i+M*x),_&&(f=d-O*_,o=s+O*_),b=ii):(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri),I.attr("cursor",hi[m]),Y());break;default:return}ti()}}function d(){l(this,arguments).moved()}function p(){l(this,arguments).ended()}function g(){var e=this.__brush||{selection:null};return e.extent=oi(n.apply(this,arguments)),e.dim=t,e}return c.move=function(e,n){e.selection?e.on("start.brush",(function(){l(this,arguments).beforestart().start()})).on("interrupt.brush end.brush",(function(){l(this,arguments).end()})).tween("brush",(function(){var e=this,r=e.__brush,i=l(e,arguments),a=r.selection,o=t.input("function"==typeof n?n.apply(this,arguments):n,r.extent),s=An(a,o);function c(t){r.selection=1===t&&null===o?null:s(t),u.call(e),i.brush()}return null!==a&&null!==o?c:c(1)})):e.each((function(){var e=this,r=arguments,i=e.__brush,a=t.input("function"==typeof n?n.apply(e,r):n,i.extent),o=l(e,r).beforestart();or(e),i.selection=null===a?null:a,u.call(e),o.start().brush().end()}))},c.clear=function(t){c.move(t,null)},h.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting?(this.starting=!1,this.emit("start")):this.emit("brush"),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(e){pe(new Qr(c,e,t.output(this.state.selection)),o.apply,o,[e,this.that,this.args])}},c.extent=function(t){return arguments.length?(n="function"==typeof t?t:Jr(oi(t)),c):n},c.filter=function(t){return arguments.length?(r="function"==typeof t?t:Jr(!!t),c):r},c.touchable=function(t){return arguments.length?(i="function"==typeof t?t:Jr(!!t),c):i},c.handleSize=function(t){return arguments.length?(s=+t,c):s},c.keyModifiers=function(t){return arguments.length?(a=!!t,c):a},c.on=function(){var t=o.on.apply(o,arguments);return t===o?c:t},c}var Si=Math.cos,Ai=Math.sin,Mi=Math.PI,Oi=Mi/2,Di=2*Mi,Ni=Math.max;function Bi(t){return function(e,n){return t(e.source.value+e.target.value,n.source.value+n.target.value)}}var Li=function(){var t=0,e=null,n=null,r=null;function i(i){var a,o,s,c,u,l,h=i.length,f=[],d=k(h),p=[],g=[],y=g.groups=new Array(h),v=new Array(h*h);for(a=0,u=-1;++u1e-6)if(Math.abs(l*s-c*u)>1e-6&&i){var f=n-a,d=r-o,p=s*s+c*c,g=f*f+d*d,y=Math.sqrt(p),v=Math.sqrt(h),m=i*Math.tan((Ii-Math.acos((p+h-g)/(2*y*v)))/2),b=m/v,x=m/y;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*u)+","+(e+b*l)),this._+="A"+i+","+i+",0,0,"+ +(l*f>u*d)+","+(this._x1=t+x*s)+","+(this._y1=e+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e);else;},arc:function(t,e,n,r,i,a){t=+t,e=+e,a=!!a;var o=(n=+n)*Math.cos(r),s=n*Math.sin(r),c=t+o,u=e+s,l=1^a,h=a?r-i:i-r;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+c+","+u:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-u)>1e-6)&&(this._+="L"+c+","+u),n&&(h<0&&(h=h%ji+ji),h>Ri?this._+="A"+n+","+n+",0,1,"+l+","+(t-o)+","+(e-s)+"A"+n+","+n+",0,1,"+l+","+(this._x1=c)+","+(this._y1=u):h>1e-6&&(this._+="A"+n+","+n+",0,"+ +(h>=Ii)+","+l+","+(this._x1=t+n*Math.cos(i))+","+(this._y1=e+n*Math.sin(i))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};var Ui=zi;function $i(t){return t.source}function Wi(t){return t.target}function Vi(t){return t.radius}function Hi(t){return t.startAngle}function Gi(t){return t.endAngle}var qi=function(){var t=$i,e=Wi,n=Vi,r=Hi,i=Gi,a=null;function o(){var o,s=Pi.call(arguments),c=t.apply(this,s),u=e.apply(this,s),l=+n.apply(this,(s[0]=c,s)),h=r.apply(this,s)-Oi,f=i.apply(this,s)-Oi,d=l*Si(h),p=l*Ai(h),g=+n.apply(this,(s[0]=u,s)),y=r.apply(this,s)-Oi,v=i.apply(this,s)-Oi;if(a||(a=o=Ui()),a.moveTo(d,p),a.arc(0,0,l,h,f),h===y&&f===v||(a.quadraticCurveTo(0,0,g*Si(y),g*Ai(y)),a.arc(0,0,g,y,v)),a.quadraticCurveTo(0,0,d,p),a.closePath(),o)return a=null,o+""||null}return o.radius=function(t){return arguments.length?(n="function"==typeof t?t:Fi(+t),o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:Fi(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:Fi(+t),o):i},o.source=function(e){return arguments.length?(t=e,o):t},o.target=function(t){return arguments.length?(e=t,o):e},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o};function Xi(){}function Zi(t,e){var n=new Xi;if(t instanceof Xi)t.each((function(t,e){n.set(e,t)}));else if(Array.isArray(t)){var r,i=-1,a=t.length;if(null==e)for(;++i=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,u,l,h=-1,f=n.length,d=r[i++],p=Ji(),g=o();++hr.length)return n;var o,s=i[a-1];return null!=e&&a>=r.length?o=n.entries():(o=[],n.each((function(e,n){o.push({key:n,values:t(e,a)})}))),null!=s?o.sort((function(t,e){return s(t.key,e.key)})):o}(a(t,0,ea,na),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}};function Ki(){return{}}function ta(t,e,n){t[e]=n}function ea(){return Ji()}function na(t,e,n){t.set(e,n)}function ra(){}var ia=Ji.prototype;function aa(t,e){var n=new ra;if(t instanceof ra)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r6/29*(6/29)*(6/29)?Math.pow(t,1/3):t/(6/29*3*(6/29))+4/29}function va(t){return t>6/29?t*t*t:6/29*3*(6/29)*(t-4/29)}function ma(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function ba(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function xa(t){if(t instanceof wa)return new wa(t.h,t.c,t.l,t.opacity);if(t instanceof ga||(t=fa(t)),0===t.a&&0===t.b)return new wa(NaN,0r!=d>r&&n<(f-u)*(r-l)/(d-l)+u&&(i=-i)}return i}function Ia(t,e,n){var r,i,a,o;return function(t,e,n){return(e[0]-t[0])*(n[1]-t[1])==(n[0]-t[0])*(e[1]-t[1])}(t,e,n)&&(i=t[r=+(t[0]===e[0])],a=n[r],o=e[r],i<=a&&a<=o||o<=a&&a<=i)}var ja=function(){},Ra=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]],Ya=function(){var t=1,e=1,n=M,r=s;function i(t){var e=n(t);if(Array.isArray(e))e=e.slice().sort(Ba);else{var r=y(t),i=r[0],o=r[1];e=A(i,o,e),e=k(Math.floor(i/e)*e,Math.floor(o/e)*e,e)}return e.map((function(e){return a(t,e)}))}function a(n,i){var a=[],s=[];return function(n,r,i){var a,s,c,u,l,h,f=new Array,d=new Array;a=s=-1,u=n[0]>=r,Ra[u<<1].forEach(p);for(;++a=r,Ra[c|u<<1].forEach(p);Ra[u<<0].forEach(p);for(;++s=r,l=n[s*t]>=r,Ra[u<<1|l<<2].forEach(p);++a=r,h=l,l=n[s*t+a+1]>=r,Ra[c|u<<1|l<<2|h<<3].forEach(p);Ra[u|l<<3].forEach(p)}a=-1,l=n[s*t]>=r,Ra[l<<2].forEach(p);for(;++a=r,Ra[l<<2|h<<3].forEach(p);function p(t){var e,n,r=[t[0][0]+a,t[0][1]+s],c=[t[1][0]+a,t[1][1]+s],u=o(r),l=o(c);(e=d[u])?(n=f[l])?(delete d[e.end],delete f[n.start],e===n?(e.ring.push(c),i(e.ring)):f[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete d[e.end],e.ring.push(c),d[e.end=l]=e):(e=f[l])?(n=d[u])?(delete f[e.start],delete d[n.end],e===n?(e.ring.push(c),i(e.ring)):f[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete f[e.start],e.ring.unshift(r),f[e.start=u]=e):f[u]=d[l]={start:u,end:l,ring:[r,c]}}Ra[l<<3].forEach(p)}(n,i,(function(t){r(t,n,i),function(t){for(var e=0,n=t.length,r=t[n-1][1]*t[0][0]-t[n-1][0]*t[0][1];++e0?a.push([t]):s.push(t)})),s.forEach((function(t){for(var e,n=0,r=a.length;n0&&o0&&s0&&a>0))throw new Error("invalid size");return t=r,e=a,i},i.thresholds=function(t){return arguments.length?(n="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),i):n},i.smooth=function(t){return arguments.length?(r=t?s:ja,i):r===s},i};function za(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[s-a+o*r]),e.data[s-n+o*r]=c/Math.min(s+1,r-1+a-s,a))}function Ua(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[o+(s-a)*r]),e.data[o+(s-n)*r]=c/Math.min(s+1,i-1+a-s,a))}function $a(t){return t[0]}function Wa(t){return t[1]}function Va(){return 1}var Ha=function(){var t=$a,e=Wa,n=Va,r=960,i=500,a=20,o=2,s=3*a,c=r+2*s>>o,u=i+2*s>>o,l=La(20);function h(r){var i=new Float32Array(c*u),h=new Float32Array(c*u);r.forEach((function(r,a,l){var h=+t(r,a,l)+s>>o,f=+e(r,a,l)+s>>o,d=+n(r,a,l);h>=0&&h=0&&f>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o);var d=l(i);if(!Array.isArray(d)){var p=L(i);d=A(0,p,d),(d=k(0,Math.floor(p/d)*d,d)).shift()}return Ya().thresholds(d).size([c,u])(i).map(f)}function f(t){return t.value*=Math.pow(2,-2*o),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(g)}function g(t){t[0]=t[0]*Math.pow(2,o)-s,t[1]=t[1]*Math.pow(2,o)-s}function y(){return c=r+2*(s=3*a)>>o,u=i+2*s>>o,h}return h.x=function(e){return arguments.length?(t="function"==typeof e?e:La(+e),h):t},h.y=function(t){return arguments.length?(e="function"==typeof t?t:La(+t),h):e},h.weight=function(t){return arguments.length?(n="function"==typeof t?t:La(+t),h):n},h.size=function(t){if(!arguments.length)return[r,i];var e=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(e>=0||e>=0))throw new Error("invalid size");return r=e,i=n,y()},h.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return o=Math.floor(Math.log(t)/Math.LN2),y()},h.thresholds=function(t){return arguments.length?(l="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),h):l},h.bandwidth=function(t){if(!arguments.length)return Math.sqrt(a*(a+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return a=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},h},Ga=function(t){return function(){return t}};function qa(t,e,n,r,i,a,o,s,c,u){this.target=t,this.type=e,this.subject=n,this.identifier=r,this.active=i,this.x=a,this.y=o,this.dx=s,this.dy=c,this._=u}function Xa(){return!ce.ctrlKey&&!ce.button}function Za(){return this.parentNode}function Ja(t){return null==t?{x:ce.x,y:ce.y}:t}function Qa(){return navigator.maxTouchPoints||"ontouchstart"in this}qa.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var Ka=function(){var t,e,n,r,i=Xa,a=Za,o=Ja,s=Qa,c={},u=lt("start","drag","end"),l=0,h=0;function f(t){t.on("mousedown.drag",d).filter(s).on("touchstart.drag",y).on("touchmove.drag",v).on("touchend.drag touchcancel.drag",m).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function d(){if(!r&&i.apply(this,arguments)){var o=b("mouse",a.apply(this,arguments),Nn,this,arguments);o&&(ke(ce.view).on("mousemove.drag",p,!0).on("mouseup.drag",g,!0),Te(ce.view),we(),n=!1,t=ce.clientX,e=ce.clientY,o("start"))}}function p(){if(Ee(),!n){var r=ce.clientX-t,i=ce.clientY-e;n=r*r+i*i>h}c.mouse("drag")}function g(){ke(ce.view).on("mousemove.drag mouseup.drag",null),Ce(ce.view,n),Ee(),c.mouse("end")}function y(){if(i.apply(this,arguments)){var t,e,n=ce.changedTouches,r=a.apply(this,arguments),o=n.length;for(t=0;t9999?"+"+io(e,6):io(e,4))+"-"+io(t.getUTCMonth()+1,2)+"-"+io(t.getUTCDate(),2)+(a?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"."+io(a,3)+"Z":i?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"Z":r||n?"T"+io(n,2)+":"+io(r,2)+"Z":"")}var oo=function(t){var e=new RegExp('["'+t+"\n\r]"),n=t.charCodeAt(0);function r(t,e){var r,i=[],a=t.length,o=0,s=0,c=a<=0,u=!1;function l(){if(c)return eo;if(u)return u=!1,to;var e,r,i=o;if(34===t.charCodeAt(i)){for(;o++=a?c=!0:10===(r=t.charCodeAt(o++))?u=!0:13===r&&(u=!0,10===t.charCodeAt(o)&&++o),t.slice(i+1,e-1).replace(/""/g,'"')}for(;o=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o,i=d,!(d=d[h=l<<1|u]))return i[h]=p,t;if(s=+t._x.call(null,d.data),c=+t._y.call(null,d.data),e===s&&n===c)return p.next=d,i?i[h]=p:t._root=p,t;do{i=i?i[h]=new Array(4):t._root=new Array(4),(u=e>=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o}while((h=l<<1|u)==(f=(c>=o)<<1|s>=a));return i[f]=d,i[h]=p,t}var _s=function(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i};function ks(t){return t[0]}function ws(t){return t[1]}function Es(t,e,n){var r=new Ts(null==e?ks:e,null==n?ws:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ts(t,e,n,r,i,a){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=a,this._root=void 0}function Cs(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}var Ss=Es.prototype=Ts.prototype;function As(t){return t.x+t.vx}function Ms(t){return t.y+t.vy}Ss.copy=function(){var t,e,n=new Ts(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return n;if(!r.length)return n._root=Cs(r),n;for(t=[{source:r,target:n._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(e=r.source[i])&&(e.length?t.push({source:e,target:r.target[i]=new Array(4)}):r.target[i]=Cs(e));return n},Ss.add=function(t){var e=+this._x.call(null,t),n=+this._y.call(null,t);return xs(this.cover(e,n),e,n,t)},Ss.addAll=function(t){var e,n,r,i,a=t.length,o=new Array(a),s=new Array(a),c=1/0,u=1/0,l=-1/0,h=-1/0;for(n=0;nl&&(l=r),ih&&(h=i));if(c>l||u>h)return this;for(this.cover(c,u).cover(l,h),n=0;nt||t>=i||r>e||e>=a;)switch(s=(ef||(a=c.y0)>d||(o=c.x1)=v)<<1|t>=y)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-u],p[p.length-1-u]=c)}else{var m=t-+this._x.call(null,g.data),b=e-+this._y.call(null,g.data),x=m*m+b*b;if(x=(s=(p+y)/2))?p=s:y=s,(l=o>=(c=(g+v)/2))?g=c:v=c,e=d,!(d=d[h=l<<1|u]))return this;if(!d.length)break;(e[h+1&3]||e[h+2&3]||e[h+3&3])&&(n=e,f=h)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[h]=i:delete e[h],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[f]=d:this._root=d),this):(this._root=i,this)},Ss.removeAll=function(t){for(var e=0,n=t.length;ec+d||iu+d||as.index){var p=c-o.x-o.vx,g=u-o.y-o.vy,y=p*p+g*g;yt.r&&(t.r=t[e].r)}function s(){if(e){var r,i,a=e.length;for(n=new Array(a),r=0;r1?(null==n?s.remove(t):s.set(t,d(n)),e):s.get(t)},find:function(e,n,r){var i,a,o,s,c,u=0,l=t.length;for(null==r?r=1/0:r*=r,u=0;u1?(u.on(t,n),e):u.on(t)}}},js=function(){var t,e,n,r,i=ms(-30),a=1,o=1/0,s=.81;function c(r){var i,a=t.length,o=Es(t,Ls,Ps).visitAfter(l);for(n=r,i=0;i=o)){(t.data!==e||t.next)&&(0===l&&(d+=(l=bs())*l),0===h&&(d+=(h=bs())*h),d1?r[0]+r.slice(2):r,+t.slice(n+1)]},$s=function(t){return(t=Us(Math.abs(t)))?t[1]:NaN},Ws=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Vs(t){if(!(e=Ws.exec(t)))throw new Error("invalid format: "+t);var e;return new Hs({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function Hs(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}Vs.prototype=Hs.prototype,Hs.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Gs,qs,Xs,Zs,Js=function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},Qs={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return Js(100*t,e)},r:Js,s:function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1],a=i-(Gs=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,o=r.length;return a===o?r:a>o?r+new Array(a-o+1).join("0"):a>0?r.slice(0,a)+"."+r.slice(a):"0."+new Array(1-a).join("0")+Us(t,Math.max(0,e+a-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},Ks=function(t){return t},tc=Array.prototype.map,ec=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],nc=function(t){var e,n,r=void 0===t.grouping||void 0===t.thousands?Ks:(e=tc.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],c=0;i>0&&s>0&&(c+s+1>r&&(s=Math.max(1,r-c)),a.push(t.substring(i-=s,i+s)),!((c+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),i=void 0===t.currency?"":t.currency[0]+"",a=void 0===t.currency?"":t.currency[1]+"",o=void 0===t.decimal?".":t.decimal+"",s=void 0===t.numerals?Ks:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(tc.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",u=void 0===t.minus?"-":t.minus+"",l=void 0===t.nan?"NaN":t.nan+"";function h(t){var e=(t=Vs(t)).fill,n=t.align,h=t.sign,f=t.symbol,d=t.zero,p=t.width,g=t.comma,y=t.precision,v=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Qs[m]||(void 0===y&&(y=12),v=!0,m="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var b="$"===f?i:"#"===f&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",x="$"===f?a:/[%p]/.test(m)?c:"",_=Qs[m],k=/[defgprs%]/.test(m);function w(t){var i,a,c,f=b,w=x;if("c"===m)w=_(t)+w,t="";else{var E=(t=+t)<0;if(t=isNaN(t)?l:_(Math.abs(t),y),v&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),E&&0==+t&&(E=!1),f=(E?"("===h?h:u:"-"===h||"("===h?"":h)+f,w=("s"===m?ec[8+Gs/3]:"")+w+(E&&"("===h?")":""),k)for(i=-1,a=t.length;++i(c=t.charCodeAt(i))||c>57){w=(46===c?o+t.slice(i+1):t.slice(i))+w,t=t.slice(0,i);break}}g&&!d&&(t=r(t,1/0));var T=f.length+t.length+w.length,C=T>1)+f+t+w+C.slice(T);break;default:t=C+f+t+w}return s(t)}return y=void 0===y?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y)),w.toString=function(){return t+""},w}return{format:h,formatPrefix:function(t,e){var n=h(((t=Vs(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor($s(e)/3))),i=Math.pow(10,-r),a=ec[8+r/3];return function(t){return n(i*t)+a}}}};function rc(t){return qs=nc(t),Xs=qs.format,Zs=qs.formatPrefix,qs}rc({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"});var ic=function(t){return Math.max(0,-$s(Math.abs(t)))},ac=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor($s(e)/3)))-$s(Math.abs(t)))},oc=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,$s(e)-$s(t))+1},sc=function(){return new cc};function cc(){this.reset()}cc.prototype={constructor:cc,reset:function(){this.s=this.t=0},add:function(t){lc(uc,t,this.t),lc(this,uc.s,this.s),this.s?this.t+=uc.t:this.s=uc.t},valueOf:function(){return this.s}};var uc=new cc;function lc(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}var hc=Math.PI,fc=hc/2,dc=hc/4,pc=2*hc,gc=180/hc,yc=hc/180,vc=Math.abs,mc=Math.atan,bc=Math.atan2,xc=Math.cos,_c=Math.ceil,kc=Math.exp,wc=(Math.floor,Math.log),Ec=Math.pow,Tc=Math.sin,Cc=Math.sign||function(t){return t>0?1:t<0?-1:0},Sc=Math.sqrt,Ac=Math.tan;function Mc(t){return t>1?0:t<-1?hc:Math.acos(t)}function Oc(t){return t>1?fc:t<-1?-fc:Math.asin(t)}function Dc(t){return(t=Tc(t/2))*t}function Nc(){}function Bc(t,e){t&&Pc.hasOwnProperty(t.type)&&Pc[t.type](t,e)}var Lc={Feature:function(t,e){Bc(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,a=xc(e=(e*=yc)/2+dc),o=Tc(e),s=Uc*o,c=zc*a+s*xc(i),u=s*r*Tc(i);Wc.add(bc(u,c)),Yc=t,zc=a,Uc=o}var Jc=function(t){return Vc.reset(),$c(t,Hc),2*Vc};function Qc(t){return[bc(t[1],t[0]),Oc(t[2])]}function Kc(t){var e=t[0],n=t[1],r=xc(n);return[r*xc(e),r*Tc(e),Tc(n)]}function tu(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function eu(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function nu(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function ru(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function iu(t){var e=Sc(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var au,ou,su,cu,uu,lu,hu,fu,du,pu,gu=sc(),yu={point:vu,lineStart:bu,lineEnd:xu,polygonStart:function(){yu.point=_u,yu.lineStart=ku,yu.lineEnd=wu,gu.reset(),Hc.polygonStart()},polygonEnd:function(){Hc.polygonEnd(),yu.point=vu,yu.lineStart=bu,yu.lineEnd=xu,Wc<0?(au=-(su=180),ou=-(cu=90)):gu>1e-6?cu=90:gu<-1e-6&&(ou=-90),pu[0]=au,pu[1]=su},sphere:function(){au=-(su=180),ou=-(cu=90)}};function vu(t,e){du.push(pu=[au=t,su=t]),ecu&&(cu=e)}function mu(t,e){var n=Kc([t*yc,e*yc]);if(fu){var r=eu(fu,n),i=eu([r[1],-r[0],0],r);iu(i),i=Qc(i);var a,o=t-uu,s=o>0?1:-1,c=i[0]*gc*s,u=vc(o)>180;u^(s*uucu&&(cu=a):u^(s*uu<(c=(c+360)%360-180)&&ccu&&(cu=e)),u?tEu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t):su>=au?(tsu&&(su=t)):t>uu?Eu(au,t)>Eu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t)}else du.push(pu=[au=t,su=t]);ecu&&(cu=e),fu=n,uu=t}function bu(){yu.point=mu}function xu(){pu[0]=au,pu[1]=su,yu.point=vu,fu=null}function _u(t,e){if(fu){var n=t-uu;gu.add(vc(n)>180?n+(n>0?360:-360):n)}else lu=t,hu=e;Hc.point(t,e),mu(t,e)}function ku(){Hc.lineStart()}function wu(){_u(lu,hu),Hc.lineEnd(),vc(gu)>1e-6&&(au=-(su=180)),pu[0]=au,pu[1]=su,fu=null}function Eu(t,e){return(e-=t)<0?e+360:e}function Tu(t,e){return t[0]-e[0]}function Cu(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:eEu(r[0],r[1])&&(r[1]=i[1]),Eu(i[0],r[1])>Eu(r[0],r[1])&&(r[0]=i[0])):a.push(r=i);for(o=-1/0,e=0,r=a[n=a.length-1];e<=n;r=i,++e)i=a[e],(s=Eu(r[1],i[0]))>o&&(o=s,au=i[0],su=r[1])}return du=pu=null,au===1/0||ou===1/0?[[NaN,NaN],[NaN,NaN]]:[[au,ou],[su,cu]]},Wu={sphere:Nc,point:Vu,lineStart:Gu,lineEnd:Zu,polygonStart:function(){Wu.lineStart=Ju,Wu.lineEnd=Qu},polygonEnd:function(){Wu.lineStart=Gu,Wu.lineEnd=Zu}};function Vu(t,e){t*=yc;var n=xc(e*=yc);Hu(n*xc(t),n*Tc(t),Tc(e))}function Hu(t,e,n){++Su,Mu+=(t-Mu)/Su,Ou+=(e-Ou)/Su,Du+=(n-Du)/Su}function Gu(){Wu.point=qu}function qu(t,e){t*=yc;var n=xc(e*=yc);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Wu.point=Xu,Hu(Yu,zu,Uu)}function Xu(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=bc(Sc((o=zu*a-Uu*i)*o+(o=Uu*r-Yu*a)*o+(o=Yu*i-zu*r)*o),Yu*r+zu*i+Uu*a);Au+=o,Nu+=o*(Yu+(Yu=r)),Bu+=o*(zu+(zu=i)),Lu+=o*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}function Zu(){Wu.point=Vu}function Ju(){Wu.point=Ku}function Qu(){tl(ju,Ru),Wu.point=Vu}function Ku(t,e){ju=t,Ru=e,t*=yc,e*=yc,Wu.point=tl;var n=xc(e);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Hu(Yu,zu,Uu)}function tl(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=zu*a-Uu*i,s=Uu*r-Yu*a,c=Yu*i-zu*r,u=Sc(o*o+s*s+c*c),l=Oc(u),h=u&&-l/u;Pu+=h*o,Fu+=h*s,Iu+=h*c,Au+=l,Nu+=l*(Yu+(Yu=r)),Bu+=l*(zu+(zu=i)),Lu+=l*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}var el=function(t){Su=Au=Mu=Ou=Du=Nu=Bu=Lu=Pu=Fu=Iu=0,$c(t,Wu);var e=Pu,n=Fu,r=Iu,i=e*e+n*n+r*r;return i<1e-12&&(e=Nu,n=Bu,r=Lu,Au<1e-6&&(e=Mu,n=Ou,r=Du),(i=e*e+n*n+r*r)<1e-12)?[NaN,NaN]:[bc(n,e)*gc,Oc(r/Sc(i))*gc]},nl=function(t){return function(){return t}},rl=function(t,e){function n(n,r){return n=t(n,r),e(n[0],n[1])}return t.invert&&e.invert&&(n.invert=function(n,r){return(n=e.invert(n,r))&&t.invert(n[0],n[1])}),n};function il(t,e){return[vc(t)>hc?t+Math.round(-t/pc)*pc:t,e]}function al(t,e,n){return(t%=pc)?e||n?rl(sl(t),cl(e,n)):sl(t):e||n?cl(e,n):il}function ol(t){return function(e,n){return[(e+=t)>hc?e-pc:e<-hc?e+pc:e,n]}}function sl(t){var e=ol(t);return e.invert=ol(-t),e}function cl(t,e){var n=xc(t),r=Tc(t),i=xc(e),a=Tc(e);function o(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*n+s*r;return[bc(c*i-l*a,s*n-u*r),Oc(l*i+c*a)]}return o.invert=function(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*i-c*a;return[bc(c*i+u*a,s*n+l*r),Oc(l*n-s*r)]},o}il.invert=il;var ul=function(t){function e(e){return(e=t(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e}return t=al(t[0]*yc,t[1]*yc,t.length>2?t[2]*yc:0),e.invert=function(e){return(e=t.invert(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e},e};function ll(t,e,n,r,i,a){if(n){var o=xc(e),s=Tc(e),c=r*n;null==i?(i=e+r*pc,a=e-c/2):(i=hl(o,i),a=hl(o,a),(r>0?ia)&&(i+=r*pc));for(var u,l=i;r>0?l>a:l1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}},pl=function(t,e){return vc(t[0]-e[0])<1e-6&&vc(t[1]-e[1])<1e-6};function gl(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}var yl=function(t,e,n,r,i){var a,o,s=[],c=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,r=t[0],o=t[e];if(pl(r,o)){for(i.lineStart(),a=0;a=0;--a)i.point((l=u[a])[0],l[1]);else r(f.x,f.p.x,-1,i);f=f.p}u=(f=f.o).z,d=!d}while(!f.v);i.lineEnd()}}};function vl(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r=0?1:-1,T=E*w,C=T>hc,S=g*_;if(ml.add(bc(S*E*Tc(T),y*k+S*xc(T))),o+=C?w+E*pc:w,C^d>=n^b>=n){var A=eu(Kc(f),Kc(m));iu(A);var M=eu(a,A);iu(M);var O=(C^w>=0?-1:1)*Oc(M[2]);(r>O||r===O&&(A[0]||A[1]))&&(s+=C^w>=0?1:-1)}}return(o<-1e-6||o<1e-6&&ml<-1e-6)^1&s},_l=function(t,e,n,r){return function(i){var a,o,s,c=e(i),u=dl(),l=e(u),h=!1,f={point:d,lineStart:g,lineEnd:y,polygonStart:function(){f.point=v,f.lineStart=m,f.lineEnd=b,o=[],a=[]},polygonEnd:function(){f.point=d,f.lineStart=g,f.lineEnd=y,o=I(o);var t=xl(a,r);o.length?(h||(i.polygonStart(),h=!0),yl(o,wl,t,n,i)):t&&(h||(i.polygonStart(),h=!0),i.lineStart(),n(null,null,1,i),i.lineEnd()),h&&(i.polygonEnd(),h=!1),o=a=null},sphere:function(){i.polygonStart(),i.lineStart(),n(null,null,1,i),i.lineEnd(),i.polygonEnd()}};function d(e,n){t(e,n)&&i.point(e,n)}function p(t,e){c.point(t,e)}function g(){f.point=p,c.lineStart()}function y(){f.point=d,c.lineEnd()}function v(t,e){s.push([t,e]),l.point(t,e)}function m(){l.lineStart(),s=[]}function b(){v(s[0][0],s[0][1]),l.lineEnd();var t,e,n,r,c=l.clean(),f=u.result(),d=f.length;if(s.pop(),a.push(s),s=null,d)if(1&c){if((e=(n=f[0]).length-1)>0){for(h||(i.polygonStart(),h=!0),i.lineStart(),t=0;t1&&2&c&&f.push(f.pop().concat(f.shift())),o.push(f.filter(kl))}return f}};function kl(t){return t.length>1}function wl(t,e){return((t=t.x)[0]<0?t[1]-fc-1e-6:fc-t[1])-((e=e.x)[0]<0?e[1]-fc-1e-6:fc-e[1])}var El=_l((function(){return!0}),(function(t){var e,n=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(a,o){var s=a>0?hc:-hc,c=vc(a-n);vc(c-hc)<1e-6?(t.point(n,r=(r+o)/2>0?fc:-fc),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),t.point(a,r),e=0):i!==s&&c>=hc&&(vc(n-i)<1e-6&&(n-=1e-6*i),vc(a-s)<1e-6&&(a-=1e-6*s),r=function(t,e,n,r){var i,a,o=Tc(t-n);return vc(o)>1e-6?mc((Tc(e)*(a=xc(r))*Tc(n)-Tc(r)*(i=xc(e))*Tc(t))/(i*a*o)):(e+r)/2}(n,r,a,o),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),e=0),t.point(n=a,r=o),i=s},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var i;if(null==t)i=n*fc,r.point(-hc,i),r.point(0,i),r.point(hc,i),r.point(hc,0),r.point(hc,-i),r.point(0,-i),r.point(-hc,-i),r.point(-hc,0),r.point(-hc,i);else if(vc(t[0]-e[0])>1e-6){var a=t[0]0,i=vc(e)>1e-6;function a(t,n){return xc(t)*xc(n)>e}function o(t,n,r){var i=[1,0,0],a=eu(Kc(t),Kc(n)),o=tu(a,a),s=a[0],c=o-s*s;if(!c)return!r&&t;var u=e*o/c,l=-e*s/c,h=eu(i,a),f=ru(i,u);nu(f,ru(a,l));var d=h,p=tu(f,d),g=tu(d,d),y=p*p-g*(tu(f,f)-1);if(!(y<0)){var v=Sc(y),m=ru(d,(-p-v)/g);if(nu(m,f),m=Qc(m),!r)return m;var b,x=t[0],_=n[0],k=t[1],w=n[1];_0^m[1]<(vc(m[0]-x)<1e-6?k:w):k<=m[1]&&m[1]<=w:E>hc^(x<=m[0]&&m[0]<=_)){var C=ru(d,(-p+v)/g);return nu(C,f),[m,Qc(C)]}}}function s(e,n){var i=r?t:hc-t,a=0;return e<-i?a|=1:e>i&&(a|=2),n<-i?a|=4:n>i&&(a|=8),a}return _l(a,(function(t){var e,n,c,u,l;return{lineStart:function(){u=c=!1,l=1},point:function(h,f){var d,p=[h,f],g=a(h,f),y=r?g?0:s(h,f):g?s(h+(h<0?hc:-hc),f):0;if(!e&&(u=c=g)&&t.lineStart(),g!==c&&(!(d=o(e,p))||pl(e,d)||pl(p,d))&&(p[0]+=1e-6,p[1]+=1e-6,g=a(p[0],p[1])),g!==c)l=0,g?(t.lineStart(),d=o(p,e),t.point(d[0],d[1])):(d=o(e,p),t.point(d[0],d[1]),t.lineEnd()),e=d;else if(i&&e&&r^g){var v;y&n||!(v=o(p,e,!0))||(l=0,r?(t.lineStart(),t.point(v[0][0],v[0][1]),t.point(v[1][0],v[1][1]),t.lineEnd()):(t.point(v[1][0],v[1][1]),t.lineEnd(),t.lineStart(),t.point(v[0][0],v[0][1])))}!g||e&&pl(e,p)||t.point(p[0],p[1]),e=p,c=g,n=y},lineEnd:function(){c&&t.lineEnd(),e=null},clean:function(){return l|(u&&c)<<1}}}),(function(e,r,i,a){ll(a,t,n,i,e,r)}),r?[0,-t]:[-hc,t-hc])};function Cl(t,e,n,r){function i(i,a){return t<=i&&i<=n&&e<=a&&a<=r}function a(i,a,s,u){var l=0,h=0;if(null==i||(l=o(i,s))!==(h=o(a,s))||c(i,a)<0^s>0)do{u.point(0===l||3===l?t:n,l>1?r:e)}while((l=(l+s+4)%4)!==h);else u.point(a[0],a[1])}function o(r,i){return vc(r[0]-t)<1e-6?i>0?0:3:vc(r[0]-n)<1e-6?i>0?2:1:vc(r[1]-e)<1e-6?i>0?1:0:i>0?3:2}function s(t,e){return c(t.x,e.x)}function c(t,e){var n=o(t,1),r=o(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(o){var c,u,l,h,f,d,p,g,y,v,m,b=o,x=dl(),_={point:k,lineStart:function(){_.point=w,u&&u.push(l=[]);v=!0,y=!1,p=g=NaN},lineEnd:function(){c&&(w(h,f),d&&y&&x.rejoin(),c.push(x.result()));_.point=k,y&&b.lineEnd()},polygonStart:function(){b=x,c=[],u=[],m=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,i=u.length;nr&&(f-a)*(r-o)>(d-o)*(t-a)&&++e:d<=r&&(f-a)*(r-o)<(d-o)*(t-a)&&--e;return e}(),n=m&&e,i=(c=I(c)).length;(n||i)&&(o.polygonStart(),n&&(o.lineStart(),a(null,null,1,o),o.lineEnd()),i&&yl(c,s,e,a,o),o.polygonEnd());b=o,c=u=l=null}};function k(t,e){i(t,e)&&b.point(t,e)}function w(a,o){var s=i(a,o);if(u&&l.push([a,o]),v)h=a,f=o,d=s,v=!1,s&&(b.lineStart(),b.point(a,o));else if(s&&y)b.point(a,o);else{var c=[p=Math.max(-1e9,Math.min(1e9,p)),g=Math.max(-1e9,Math.min(1e9,g))],x=[a=Math.max(-1e9,Math.min(1e9,a)),o=Math.max(-1e9,Math.min(1e9,o))];!function(t,e,n,r,i,a){var o,s=t[0],c=t[1],u=0,l=1,h=e[0]-s,f=e[1]-c;if(o=n-s,h||!(o>0)){if(o/=h,h<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=i-s,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>u&&(u=o)}else if(h>0){if(o0)){if(o/=f,f<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=a-c,f||!(o<0)){if(o/=f,f<0){if(o>l)return;o>u&&(u=o)}else if(f>0){if(o0&&(t[0]=s+u*h,t[1]=c+u*f),l<1&&(e[0]=s+l*h,e[1]=c+l*f),!0}}}}}(c,x,t,e,n,r)?s&&(b.lineStart(),b.point(a,o),m=!1):(y||(b.lineStart(),b.point(c[0],c[1])),b.point(x[0],x[1]),s||b.lineEnd(),m=!1)}p=a,g=o,y=s}return _}}var Sl,Al,Ml,Ol=function(){var t,e,n,r=0,i=0,a=960,o=500;return n={stream:function(n){return t&&e===n?t:t=Cl(r,i,a,o)(e=n)},extent:function(s){return arguments.length?(r=+s[0][0],i=+s[0][1],a=+s[1][0],o=+s[1][1],t=e=null,n):[[r,i],[a,o]]}}},Dl=sc(),Nl={sphere:Nc,point:Nc,lineStart:function(){Nl.point=Ll,Nl.lineEnd=Bl},lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc};function Bl(){Nl.point=Nl.lineEnd=Nc}function Ll(t,e){Sl=t*=yc,Al=Tc(e*=yc),Ml=xc(e),Nl.point=Pl}function Pl(t,e){t*=yc;var n=Tc(e*=yc),r=xc(e),i=vc(t-Sl),a=xc(i),o=r*Tc(i),s=Ml*n-Al*r*a,c=Al*n+Ml*r*a;Dl.add(bc(Sc(o*o+s*s),c)),Sl=t,Al=n,Ml=r}var Fl=function(t){return Dl.reset(),$c(t,Nl),+Dl},Il=[null,null],jl={type:"LineString",coordinates:Il},Rl=function(t,e){return Il[0]=t,Il[1]=e,Fl(jl)},Yl={Feature:function(t,e){return Ul(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r0&&(i=Rl(t[a],t[a-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))<1e-12*i)return!0;n=r}return!1}function Vl(t,e){return!!xl(t.map(Hl),Gl(e))}function Hl(t){return(t=t.map(Gl)).pop(),t}function Gl(t){return[t[0]*yc,t[1]*yc]}var ql=function(t,e){return(t&&Yl.hasOwnProperty(t.type)?Yl[t.type]:Ul)(t,e)};function Xl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[t,e]}))}}function Zl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[e,t]}))}}function Jl(){var t,e,n,r,i,a,o,s,c,u,l,h,f=10,d=f,p=90,g=360,y=2.5;function v(){return{type:"MultiLineString",coordinates:m()}}function m(){return k(_c(r/p)*p,n,p).map(l).concat(k(_c(s/g)*g,o,g).map(h)).concat(k(_c(e/f)*f,t,f).filter((function(t){return vc(t%p)>1e-6})).map(c)).concat(k(_c(a/d)*d,i,d).filter((function(t){return vc(t%g)>1e-6})).map(u))}return v.lines=function(){return m().map((function(t){return{type:"LineString",coordinates:t}}))},v.outline=function(){return{type:"Polygon",coordinates:[l(r).concat(h(o).slice(1),l(n).reverse().slice(1),h(s).reverse().slice(1))]}},v.extent=function(t){return arguments.length?v.extentMajor(t).extentMinor(t):v.extentMinor()},v.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],s=+t[0][1],o=+t[1][1],r>n&&(t=r,r=n,n=t),s>o&&(t=s,s=o,o=t),v.precision(y)):[[r,s],[n,o]]},v.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],a=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),a>i&&(n=a,a=i,i=n),v.precision(y)):[[e,a],[t,i]]},v.step=function(t){return arguments.length?v.stepMajor(t).stepMinor(t):v.stepMinor()},v.stepMajor=function(t){return arguments.length?(p=+t[0],g=+t[1],v):[p,g]},v.stepMinor=function(t){return arguments.length?(f=+t[0],d=+t[1],v):[f,d]},v.precision=function(f){return arguments.length?(y=+f,c=Xl(a,i,90),u=Zl(e,t,y),l=Xl(s,o,90),h=Zl(r,n,y),v):y},v.extentMajor([[-180,1e-6-90],[180,90-1e-6]]).extentMinor([[-180,-80-1e-6],[180,80+1e-6]])}function Ql(){return Jl()()}var Kl,th,eh,nh,rh=function(t,e){var n=t[0]*yc,r=t[1]*yc,i=e[0]*yc,a=e[1]*yc,o=xc(r),s=Tc(r),c=xc(a),u=Tc(a),l=o*xc(n),h=o*Tc(n),f=c*xc(i),d=c*Tc(i),p=2*Oc(Sc(Dc(a-r)+o*c*Dc(i-n))),g=Tc(p),y=p?function(t){var e=Tc(t*=p)/g,n=Tc(p-t)/g,r=n*l+e*f,i=n*h+e*d,a=n*s+e*u;return[bc(i,r)*gc,bc(a,Sc(r*r+i*i))*gc]}:function(){return[n*gc,r*gc]};return y.distance=p,y},ih=function(t){return t},ah=sc(),oh=sc(),sh={point:Nc,lineStart:Nc,lineEnd:Nc,polygonStart:function(){sh.lineStart=ch,sh.lineEnd=hh},polygonEnd:function(){sh.lineStart=sh.lineEnd=sh.point=Nc,ah.add(vc(oh)),oh.reset()},result:function(){var t=ah/2;return ah.reset(),t}};function ch(){sh.point=uh}function uh(t,e){sh.point=lh,Kl=eh=t,th=nh=e}function lh(t,e){oh.add(nh*t-eh*e),eh=t,nh=e}function hh(){lh(Kl,th)}var fh=sh,dh=1/0,ph=dh,gh=-dh,yh=gh;var vh,mh,bh,xh,_h={point:function(t,e){tgh&&(gh=t);eyh&&(yh=e)},lineStart:Nc,lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc,result:function(){var t=[[dh,ph],[gh,yh]];return gh=yh=-(ph=dh=1/0),t}},kh=0,wh=0,Eh=0,Th=0,Ch=0,Sh=0,Ah=0,Mh=0,Oh=0,Dh={point:Nh,lineStart:Bh,lineEnd:Fh,polygonStart:function(){Dh.lineStart=Ih,Dh.lineEnd=jh},polygonEnd:function(){Dh.point=Nh,Dh.lineStart=Bh,Dh.lineEnd=Fh},result:function(){var t=Oh?[Ah/Oh,Mh/Oh]:Sh?[Th/Sh,Ch/Sh]:Eh?[kh/Eh,wh/Eh]:[NaN,NaN];return kh=wh=Eh=Th=Ch=Sh=Ah=Mh=Oh=0,t}};function Nh(t,e){kh+=t,wh+=e,++Eh}function Bh(){Dh.point=Lh}function Lh(t,e){Dh.point=Ph,Nh(bh=t,xh=e)}function Ph(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Nh(bh=t,xh=e)}function Fh(){Dh.point=Nh}function Ih(){Dh.point=Rh}function jh(){Yh(vh,mh)}function Rh(t,e){Dh.point=Yh,Nh(vh=bh=t,mh=xh=e)}function Yh(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Ah+=(i=xh*t-bh*e)*(bh+t),Mh+=i*(xh+e),Oh+=3*i,Nh(bh=t,xh=e)}var zh=Dh;function Uh(t){this._context=t}Uh.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,pc)}},result:Nc};var $h,Wh,Vh,Hh,Gh,qh=sc(),Xh={point:Nc,lineStart:function(){Xh.point=Zh},lineEnd:function(){$h&&Jh(Wh,Vh),Xh.point=Nc},polygonStart:function(){$h=!0},polygonEnd:function(){$h=null},result:function(){var t=+qh;return qh.reset(),t}};function Zh(t,e){Xh.point=Jh,Wh=Hh=t,Vh=Gh=e}function Jh(t,e){Hh-=t,Gh-=e,qh.add(Sc(Hh*Hh+Gh*Gh)),Hh=t,Gh=e}var Qh=Xh;function Kh(){this._string=[]}function tf(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}Kh.prototype={_radius:4.5,_circle:tf(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=tf(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var ef=function(t,e){var n,r,i=4.5;function a(t){return t&&("function"==typeof i&&r.pointRadius(+i.apply(this,arguments)),$c(t,n(r))),r.result()}return a.area=function(t){return $c(t,n(fh)),fh.result()},a.measure=function(t){return $c(t,n(Qh)),Qh.result()},a.bounds=function(t){return $c(t,n(_h)),_h.result()},a.centroid=function(t){return $c(t,n(zh)),zh.result()},a.projection=function(e){return arguments.length?(n=null==e?(t=null,ih):(t=e).stream,a):t},a.context=function(t){return arguments.length?(r=null==t?(e=null,new Kh):new Uh(e=t),"function"!=typeof i&&r.pointRadius(i),a):e},a.pointRadius=function(t){return arguments.length?(i="function"==typeof t?t:(r.pointRadius(+t),+t),a):i},a.projection(t).context(e)},nf=function(t){return{stream:rf(t)}};function rf(t){return function(e){var n=new af;for(var r in t)n[r]=t[r];return n.stream=e,n}}function af(){}function of(t,e,n){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),$c(n,t.stream(_h)),e(_h.result()),null!=r&&t.clipExtent(r),t}function sf(t,e,n){return of(t,(function(n){var r=e[1][0]-e[0][0],i=e[1][1]-e[0][1],a=Math.min(r/(n[1][0]-n[0][0]),i/(n[1][1]-n[0][1])),o=+e[0][0]+(r-a*(n[1][0]+n[0][0]))/2,s=+e[0][1]+(i-a*(n[1][1]+n[0][1]))/2;t.scale(150*a).translate([o,s])}),n)}function cf(t,e,n){return sf(t,[[0,0],e],n)}function uf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][0]-n[0][0]),a=(r-i*(n[1][0]+n[0][0]))/2,o=-i*n[0][1];t.scale(150*i).translate([a,o])}),n)}function lf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][1]-n[0][1]),a=-i*n[0][0],o=(r-i*(n[1][1]+n[0][1]))/2;t.scale(150*i).translate([a,o])}),n)}af.prototype={constructor:af,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var hf=xc(30*yc),ff=function(t,e){return+e?function(t,e){function n(r,i,a,o,s,c,u,l,h,f,d,p,g,y){var v=u-r,m=l-i,b=v*v+m*m;if(b>4*e&&g--){var x=o+f,_=s+d,k=c+p,w=Sc(x*x+_*_+k*k),E=Oc(k/=w),T=vc(vc(k)-1)<1e-6||vc(a-h)<1e-6?(a+h)/2:bc(_,x),C=t(T,E),S=C[0],A=C[1],M=S-r,O=A-i,D=m*M-v*O;(D*D/b>e||vc((v*M+m*O)/b-.5)>.3||o*f+s*d+c*p2?t[2]%360*yc:0,S()):[y*gc,v*gc,m*gc]},T.angle=function(t){return arguments.length?(b=t%360*yc,S()):b*gc},T.precision=function(t){return arguments.length?(o=ff(s,E=t*t),A()):Sc(E)},T.fitExtent=function(t,e){return sf(T,t,e)},T.fitSize=function(t,e){return cf(T,t,e)},T.fitWidth=function(t,e){return uf(T,t,e)},T.fitHeight=function(t,e){return lf(T,t,e)},function(){return e=t.apply(this,arguments),T.invert=e.invert&&C,S()}}function mf(t){var e=0,n=hc/3,r=vf(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*yc,n=t[1]*yc):[e*gc,n*gc]},i}function bf(t,e){var n=Tc(t),r=(n+Tc(e))/2;if(vc(r)<1e-6)return function(t){var e=xc(t);function n(t,n){return[t*e,Tc(n)/e]}return n.invert=function(t,n){return[t/e,Oc(n*e)]},n}(t);var i=1+n*(2*r-n),a=Sc(i)/r;function o(t,e){var n=Sc(i-2*r*Tc(e))/r;return[n*Tc(t*=r),a-n*xc(t)]}return o.invert=function(t,e){var n=a-e;return[bc(t,vc(n))/r*Cc(n),Oc((i-(t*t+n*n)*r*r)/(2*r))]},o}var xf=function(){return mf(bf).scale(155.424).center([0,33.6442])},_f=function(){return xf().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])};var kf=function(){var t,e,n,r,i,a,o=_f(),s=xf().rotate([154,0]).center([-2,58.5]).parallels([55,65]),c=xf().rotate([157,0]).center([-3,19.9]).parallels([8,18]),u={point:function(t,e){a=[t,e]}};function l(t){var e=t[0],o=t[1];return a=null,n.point(e,o),a||(r.point(e,o),a)||(i.point(e,o),a)}function h(){return t=e=null,l}return l.invert=function(t){var e=o.scale(),n=o.translate(),r=(t[0]-n[0])/e,i=(t[1]-n[1])/e;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:o).invert(t)},l.stream=function(n){return t&&e===n?t:(r=[o.stream(e=n),s.stream(n),c.stream(n)],i=r.length,t={point:function(t,e){for(var n=-1;++n0?e<1e-6-fc&&(e=1e-6-fc):e>fc-1e-6&&(e=fc-1e-6);var n=i/Ec(Nf(e),r);return[n*Tc(r*t),i-n*xc(r*t)]}return a.invert=function(t,e){var n=i-e,a=Cc(r)*Sc(t*t+n*n);return[bc(t,vc(n))/r*Cc(n),2*mc(Ec(i/a,1/r))-fc]},a}var Lf=function(){return mf(Bf).scale(109.5).parallels([30,30])};function Pf(t,e){return[t,e]}Pf.invert=Pf;var Ff=function(){return yf(Pf).scale(152.63)};function If(t,e){var n=xc(t),r=t===e?Tc(t):(n-xc(e))/(e-t),i=n/r+t;if(vc(r)<1e-6)return Pf;function a(t,e){var n=i-e,a=r*t;return[n*Tc(a),i-n*xc(a)]}return a.invert=function(t,e){var n=i-e;return[bc(t,vc(n))/r*Cc(n),i-Cc(r)*Sc(t*t+n*n)]},a}var jf=function(){return mf(If).scale(131.154).center([0,13.9389])},Rf=1.340264,Yf=-.081106,zf=893e-6,Uf=.003796,$f=Sc(3)/2;function Wf(t,e){var n=Oc($f*Tc(e)),r=n*n,i=r*r*r;return[t*xc(n)/($f*(Rf+3*Yf*r+i*(7*zf+9*Uf*r))),n*(Rf+Yf*r+i*(zf+Uf*r))]}Wf.invert=function(t,e){for(var n,r=e,i=r*r,a=i*i*i,o=0;o<12&&(a=(i=(r-=n=(r*(Rf+Yf*i+a*(zf+Uf*i))-e)/(Rf+3*Yf*i+a*(7*zf+9*Uf*i)))*r)*i*i,!(vc(n)<1e-12));++o);return[$f*t*(Rf+3*Yf*i+a*(7*zf+9*Uf*i))/xc(r),Oc(Tc(r)/$f)]};var Vf=function(){return yf(Wf).scale(177.158)};function Hf(t,e){var n=xc(e),r=xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}Hf.invert=Ef(mc);var Gf=function(){return yf(Hf).scale(144.049).clipAngle(60)};function qf(t,e,n,r){return 1===t&&1===e&&0===n&&0===r?ih:rf({point:function(i,a){this.stream.point(i*t+n,a*e+r)}})}var Xf=function(){var t,e,n,r,i,a,o=1,s=0,c=0,u=1,l=1,h=ih,f=null,d=ih;function p(){return r=i=null,a}return a={stream:function(t){return r&&i===t?r:r=h(d(i=t))},postclip:function(r){return arguments.length?(d=r,f=t=e=n=null,p()):d},clipExtent:function(r){return arguments.length?(d=null==r?(f=t=e=n=null,ih):Cl(f=+r[0][0],t=+r[0][1],e=+r[1][0],n=+r[1][1]),p()):null==f?null:[[f,t],[e,n]]},scale:function(t){return arguments.length?(h=qf((o=+t)*u,o*l,s,c),p()):o},translate:function(t){return arguments.length?(h=qf(o*u,o*l,s=+t[0],c=+t[1]),p()):[s,c]},reflectX:function(t){return arguments.length?(h=qf(o*(u=t?-1:1),o*l,s,c),p()):u<0},reflectY:function(t){return arguments.length?(h=qf(o*u,o*(l=t?-1:1),s,c),p()):l<0},fitExtent:function(t,e){return sf(a,t,e)},fitSize:function(t,e){return cf(a,t,e)},fitWidth:function(t,e){return uf(a,t,e)},fitHeight:function(t,e){return lf(a,t,e)}}};function Zf(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}Zf.invert=function(t,e){var n,r=e,i=25;do{var a=r*r,o=a*a;r-=n=(r*(1.007226+a*(.015085+o*(.028874*a-.044475-.005916*o)))-e)/(1.007226+a*(.045255+o*(.259866*a-.311325-.005916*11*o)))}while(vc(n)>1e-6&&--i>0);return[t/(.8707+(a=r*r)*(a*(a*a*a*(.003971-.001529*a)-.013791)-.131979)),r]};var Jf=function(){return yf(Zf).scale(175.295)};function Qf(t,e){return[xc(e)*Tc(t),Tc(e)]}Qf.invert=Ef(Oc);var Kf=function(){return yf(Qf).scale(249.5).clipAngle(90+1e-6)};function td(t,e){var n=xc(e),r=1+xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}td.invert=Ef((function(t){return 2*mc(t)}));var ed=function(){return yf(td).scale(250).clipAngle(142)};function nd(t,e){return[wc(Ac((fc+e)/2)),-t]}nd.invert=function(t,e){return[-e,2*mc(kc(t))-fc]};var rd=function(){var t=Df(nd),e=t.center,n=t.rotate;return t.center=function(t){return arguments.length?e([-t[1],t[0]]):[(t=e())[1],-t[0]]},t.rotate=function(t){return arguments.length?n([t[0],t[1],t.length>2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)};function id(t,e){return t.parent===e.parent?1:2}function ad(t,e){return t+e.x}function od(t,e){return Math.max(t,e.y)}var sd=function(){var t=id,e=1,n=1,r=!1;function i(i){var a,o=0;i.eachAfter((function(e){var n=e.children;n?(e.x=function(t){return t.reduce(ad,0)/t.length}(n),e.y=function(t){return 1+t.reduce(od,0)}(n)):(e.x=a?o+=t(e,a):0,e.y=0,a=e)}));var s=function(t){for(var e;e=t.children;)t=e[0];return t}(i),c=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(i),u=s.x-t(s,c)/2,l=c.x+t(c,s)/2;return i.eachAfter(r?function(t){t.x=(t.x-i.x)*e,t.y=(i.y-t.y)*n}:function(t){t.x=(t.x-u)/(l-u)*e,t.y=(1-(i.y?t.y/i.y:1))*n})}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i};function cd(t){var e=0,n=t.children,r=n&&n.length;if(r)for(;--r>=0;)e+=n[r].value;else e=1;t.value=e}function ud(t,e){var n,r,i,a,o,s=new dd(t),c=+t.value&&(s.value=t.value),u=[s];for(null==e&&(e=ld);n=u.pop();)if(c&&(n.value=+n.data.value),(i=e(n.data))&&(o=i.length))for(n.children=new Array(o),a=o-1;a>=0;--a)u.push(r=n.children[a]=new dd(i[a])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(fd)}function ld(t){return t.children}function hd(t){t.data=t.data.data}function fd(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function dd(t){this.data=t,this.depth=this.height=0,this.parent=null}dd.prototype=ud.prototype={constructor:dd,count:function(){return this.eachAfter(cd)},each:function(t){var e,n,r,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),n=a.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;t=n.pop(),e=r.pop();for(;t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return ud(this).eachBefore(hd)}};var pd=Array.prototype.slice;var gd=function(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(pd.call(t))).length,a=[];r0&&n*n>r*r+i*i}function bd(t,e){for(var n=0;n(o*=o)?(r=(u+o-i)/(2*u),a=Math.sqrt(Math.max(0,o/u-r*r)),n.x=t.x-r*s-a*c,n.y=t.y-r*c+a*s):(r=(u+i-o)/(2*u),a=Math.sqrt(Math.max(0,i/u-r*r)),n.x=e.x+r*s-a*c,n.y=e.y+r*c+a*s)):(n.x=e.x+n.r,n.y=e.y)}function Ed(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function Td(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,a=(e.y*n.r+n.y*e.r)/r;return i*i+a*a}function Cd(t){this._=t,this.next=null,this.previous=null}function Sd(t){if(!(i=t.length))return 0;var e,n,r,i,a,o,s,c,u,l,h;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;wd(n,e,r=t[2]),e=new Cd(e),n=new Cd(n),r=new Cd(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(s=3;s0)throw new Error("cycle");return a}return n.id=function(e){return arguments.length?(t=Od(e),n):t},n.parentId=function(t){return arguments.length?(e=Od(t),n):e},n};function Hd(t,e){return t.parent===e.parent?1:2}function Gd(t){var e=t.children;return e?e[0]:t.t}function qd(t){var e=t.children;return e?e[e.length-1]:t.t}function Xd(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function Zd(t,e,n){return t.a.parent===e.parent?t.a:n}function Jd(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}Jd.prototype=Object.create(dd.prototype);var Qd=function(){var t=Hd,e=1,n=1,r=null;function i(i){var c=function(t){for(var e,n,r,i,a,o=new Jd(t,0),s=[o];e=s.pop();)if(r=e._.children)for(e.children=new Array(a=r.length),i=a-1;i>=0;--i)s.push(n=e.children[i]=new Jd(r[i],i)),n.parent=e;return(o.parent=new Jd(null,0)).children=[o],o}(i);if(c.eachAfter(a),c.parent.m=-c.z,c.eachBefore(o),r)i.eachBefore(s);else{var u=i,l=i,h=i;i.eachBefore((function(t){t.xl.x&&(l=t),t.depth>h.depth&&(h=t)}));var f=u===l?1:t(u,l)/2,d=f-u.x,p=e/(l.x+f+d),g=n/(h.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function a(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var a=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,a=e,o=e,s=n,c=a.parent.children[0],u=a.m,l=o.m,h=s.m,f=c.m;s=qd(s),a=Gd(a),s&&a;)c=Gd(c),(o=qd(o)).a=e,(i=s.z+h-a.z-u+t(s._,a._))>0&&(Xd(Zd(s,e,r),e,i),u+=i,l+=i),h+=s.m,u+=a.m,f+=c.m,l+=o.m;s&&!qd(o)&&(o.t=s,o.m+=h-l),a&&!Gd(c)&&(c.t=a,c.m+=u-f,r=e)}return r}(e,i,e.parent.A||r[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i},Kd=function(t,e,n,r,i){for(var a,o=t.children,s=-1,c=o.length,u=t.value&&(i-n)/t.value;++sf&&(f=s),y=l*l*g,(d=Math.max(f/y,y/h))>p){l-=s;break}p=d}v.push(o={value:l,dice:c1?e:1)},n}(tp),rp=function(){var t=np,e=!1,n=1,r=1,i=[0],a=Dd,o=Dd,s=Dd,c=Dd,u=Dd;function l(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(h),i=[0],e&&t.eachBefore(jd),t}function h(e){var n=i[e.depth],r=e.x0+n,l=e.y0+n,h=e.x1-n,f=e.y1-n;h=n-1){var l=s[e];return l.x0=i,l.y0=a,l.x1=o,void(l.y1=c)}var h=u[e],f=r/2+h,d=e+1,p=n-1;for(;d>>1;u[g]c-a){var m=(i*v+o*y)/r;t(e,d,y,i,a,m,c),t(d,n,v,m,a,o,c)}else{var b=(a*v+c*y)/r;t(e,d,y,i,a,o,b),t(d,n,v,i,b,o,c)}}(0,c,t.value,e,n,r,i)},ap=function(t,e,n,r,i){(1&t.depth?Kd:Rd)(t,e,n,r,i)},op=function t(e){function n(t,n,r,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,c,u,l,h=-1,f=o.length,d=t.value;++h1?e:1)},n}(tp),sp=function(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}},cp=function(t,e){var n=un(+t,+e);return function(t){var e=n(t);return e-360*Math.floor(e/360)}},up=function(t,e){return t=+t,e=+e,function(n){return Math.round(t*(1-n)+e*n)}},lp=Math.SQRT2;function hp(t){return((t=Math.exp(t))+1/t)/2}var fp=function(t,e){var n,r,i=t[0],a=t[1],o=t[2],s=e[0],c=e[1],u=e[2],l=s-i,h=c-a,f=l*l+h*h;if(f<1e-12)r=Math.log(u/o)/lp,n=function(t){return[i+t*l,a+t*h,o*Math.exp(lp*t*r)]};else{var d=Math.sqrt(f),p=(u*u-o*o+4*f)/(2*o*2*d),g=(u*u-o*o-4*f)/(2*u*2*d),y=Math.log(Math.sqrt(p*p+1)-p),v=Math.log(Math.sqrt(g*g+1)-g);r=(v-y)/lp,n=function(t){var e,n=t*r,s=hp(y),c=o/(2*d)*(s*(e=lp*n+y,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(y));return[i+c*l,a+c*h,o*s/hp(lp*n+y)]}}return n.duration=1e3*r,n};function dp(t){return function(e,n){var r=t((e=tn(e)).h,(n=tn(n)).h),i=hn(e.s,n.s),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.s=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var pp=dp(un),gp=dp(hn);function yp(t,e){var n=hn((t=pa(t)).l,(e=pa(e)).l),r=hn(t.a,e.a),i=hn(t.b,e.b),a=hn(t.opacity,e.opacity);return function(e){return t.l=n(e),t.a=r(e),t.b=i(e),t.opacity=a(e),t+""}}function vp(t){return function(e,n){var r=t((e=ka(e)).h,(n=ka(n)).h),i=hn(e.c,n.c),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var mp=vp(un),bp=vp(hn);function xp(t){return function e(n){function r(e,r){var i=t((e=Oa(e)).h,(r=Oa(r)).h),a=hn(e.s,r.s),o=hn(e.l,r.l),s=hn(e.opacity,r.opacity);return function(t){return e.h=i(t),e.s=a(t),e.l=o(Math.pow(t,n)),e.opacity=s(t),e+""}}return n=+n,r.gamma=e,r}(1)}var _p=xp(un),kp=xp(hn);function wp(t,e){for(var n=0,r=e.length-1,i=e[0],a=new Array(r<0?0:r);n1&&(e=t[a[o-2]],n=t[a[o-1]],r=t[s],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}var Mp=function(t){if((n=t.length)<3)return null;var e,n,r=new Array(n),i=new Array(n);for(e=0;e=0;--e)u.push(t[r[a[e]][2]]);for(e=+s;es!=u>s&&o<(c-n)*(s-r)/(u-r)+n&&(l=!l),c=n,u=r;return l},Dp=function(t){for(var e,n,r=-1,i=t.length,a=t[i-1],o=a[0],s=a[1],c=0;++r1);return t+n*a*Math.sqrt(-2*Math.log(i)/i)}}return n.source=t,n}(Np),Pp=function t(e){function n(){var t=Lp.source(e).apply(this,arguments);return function(){return Math.exp(t())}}return n.source=t,n}(Np),Fp=function t(e){function n(t){return function(){for(var n=0,r=0;rr&&(e=n,n=r,r=e),function(t){return Math.max(n,Math.min(r,t))}}function tg(t,e,n){var r=t[0],i=t[1],a=e[0],o=e[1];return i2?eg:tg,i=a=null,h}function h(e){return isNaN(e=+e)?n:(i||(i=r(o.map(t),s,c)))(t(u(e)))}return h.invert=function(n){return u(e((a||(a=r(s,o.map(t),_n)))(n)))},h.domain=function(t){return arguments.length?(o=Up.call(t,Xp),u===Jp||(u=Kp(o)),l()):o.slice()},h.range=function(t){return arguments.length?(s=$p.call(t),l()):s.slice()},h.rangeRound=function(t){return s=$p.call(t),c=up,l()},h.clamp=function(t){return arguments.length?(u=t?Kp(o):Jp,h):u!==Jp},h.interpolate=function(t){return arguments.length?(c=t,l()):c},h.unknown=function(t){return arguments.length?(n=t,h):n},function(n,r){return t=n,e=r,l()}}function ig(t,e){return rg()(t,e)}var ag=function(t,e,n,r){var i,a=A(t,e,n);switch((r=Vs(null==r?",f":r)).type){case"s":var o=Math.max(Math.abs(t),Math.abs(e));return null!=r.precision||isNaN(i=ac(a,o))||(r.precision=i),Zs(r,o);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=oc(a,Math.max(Math.abs(t),Math.abs(e))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=ic(a))||(r.precision=i-2*("%"===r.type))}return Xs(r)};function og(t){var e=t.domain;return t.ticks=function(t){var n=e();return C(n[0],n[n.length-1],null==t?10:t)},t.tickFormat=function(t,n){var r=e();return ag(r[0],r[r.length-1],null==t?10:t,n)},t.nice=function(n){null==n&&(n=10);var r,i=e(),a=0,o=i.length-1,s=i[a],c=i[o];return c0?r=S(s=Math.floor(s/r)*r,c=Math.ceil(c/r)*r,n):r<0&&(r=S(s=Math.ceil(s*r)/r,c=Math.floor(c*r)/r,n)),r>0?(i[a]=Math.floor(s/r)*r,i[o]=Math.ceil(c/r)*r,e(i)):r<0&&(i[a]=Math.ceil(s*r)/r,i[o]=Math.floor(c*r)/r,e(i)),t},t}function sg(){var t=ig(Jp,Jp);return t.copy=function(){return ng(t,sg())},Rp.apply(t,arguments),og(t)}function cg(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=Up.call(e,Xp),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return cg(t).unknown(e)},t=arguments.length?Up.call(t,Xp):[0,1],og(n)}var ug=function(t,e){var n,r=0,i=(t=t.slice()).length-1,a=t[r],o=t[i];return o0){for(;fc)break;g.push(h)}}else for(;f=1;--l)if(!((h=u*l)c)break;g.push(h)}}else g=C(f,d,Math.min(d-f,p)).map(n);return r?g.reverse():g},r.tickFormat=function(t,i){if(null==i&&(i=10===a?".0e":","),"function"!=typeof i&&(i=Xs(i)),t===1/0)return i;null==t&&(t=10);var o=Math.max(1,a*t/r.ticks().length);return function(t){var r=t/n(Math.round(e(t)));return r*a0?i[r-1]:e[0],r=r?[i[r-1],n]:[i[o-1],i[o]]},o.unknown=function(e){return arguments.length?(t=e,o):o},o.thresholds=function(){return i.slice()},o.copy=function(){return Mg().domain([e,n]).range(a).unknown(t)},Rp.apply(og(o),arguments)}function Og(){var t,e=[.5],n=[0,1],r=1;function i(i){return i<=i?n[c(e,i,0,r)]:t}return i.domain=function(t){return arguments.length?(e=$p.call(t),r=Math.min(e.length,n.length-1),i):e.slice()},i.range=function(t){return arguments.length?(n=$p.call(t),r=Math.min(e.length,n.length-1),i):n.slice()},i.invertExtent=function(t){var r=n.indexOf(t);return[e[r-1],e[r]]},i.unknown=function(e){return arguments.length?(t=e,i):t},i.copy=function(){return Og().domain(e).range(n).unknown(t)},Rp.apply(i,arguments)}var Dg=new Date,Ng=new Date;function Bg(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return Dg.setTime(+e),Ng.setTime(+r),t(Dg),t(Ng),Math.floor(n(Dg,Ng))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}var Lg=Bg((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Lg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};var Pg=Lg,Fg=Lg.range,Ig=Bg((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),jg=Ig,Rg=Ig.range;function Yg(t){return Bg((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/6048e5}))}var zg=Yg(0),Ug=Yg(1),$g=Yg(2),Wg=Yg(3),Vg=Yg(4),Hg=Yg(5),Gg=Yg(6),qg=zg.range,Xg=Ug.range,Zg=$g.range,Jg=Wg.range,Qg=Vg.range,Kg=Hg.range,ty=Gg.range,ey=Bg((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/864e5}),(function(t){return t.getDate()-1})),ny=ey,ry=ey.range,iy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-6e4*t.getMinutes())}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getHours()})),ay=iy,oy=iy.range,sy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getMinutes()})),cy=sy,uy=sy.range,ly=Bg((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+1e3*e)}),(function(t,e){return(e-t)/1e3}),(function(t){return t.getUTCSeconds()})),hy=ly,fy=ly.range,dy=Bg((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));dy.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Bg((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):dy:null};var py=dy,gy=dy.range;function yy(t){return Bg((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/6048e5}))}var vy=yy(0),my=yy(1),by=yy(2),xy=yy(3),_y=yy(4),ky=yy(5),wy=yy(6),Ey=vy.range,Ty=my.range,Cy=by.range,Sy=xy.range,Ay=_y.range,My=ky.range,Oy=wy.range,Dy=Bg((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/864e5}),(function(t){return t.getUTCDate()-1})),Ny=Dy,By=Dy.range,Ly=Bg((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Ly.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};var Py=Ly,Fy=Ly.range;function Iy(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function jy(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Ry(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}function Yy(t){var e=t.dateTime,n=t.date,r=t.time,i=t.periods,a=t.days,o=t.shortDays,s=t.months,c=t.shortMonths,u=Qy(i),l=Ky(i),h=Qy(a),f=Ky(a),d=Qy(o),p=Ky(o),g=Qy(s),y=Ky(s),v=Qy(c),m=Ky(c),b={a:function(t){return o[t.getDay()]},A:function(t){return a[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return s[t.getMonth()]},c:null,d:xv,e:xv,f:Tv,H:_v,I:kv,j:wv,L:Ev,m:Cv,M:Sv,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:em,s:nm,S:Av,u:Mv,U:Ov,V:Dv,w:Nv,W:Bv,x:null,X:null,y:Lv,Y:Pv,Z:Fv,"%":tm},x={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:Iv,e:Iv,f:Uv,H:jv,I:Rv,j:Yv,L:zv,m:$v,M:Wv,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:em,s:nm,S:Vv,u:Hv,U:Gv,V:qv,w:Xv,W:Zv,x:null,X:null,y:Jv,Y:Qv,Z:Kv,"%":tm},_={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=v.exec(e.slice(n));return r?(t.m=m[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=g.exec(e.slice(n));return r?(t.m=y[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return E(t,e,n,r)},d:lv,e:lv,f:yv,H:fv,I:fv,j:hv,L:gv,m:uv,M:dv,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l[r[0].toLowerCase()],n+r[0].length):-1},q:cv,Q:mv,s:bv,S:pv,u:ev,U:nv,V:rv,w:tv,W:iv,x:function(t,e,r){return E(t,n,e,r)},X:function(t,e,n){return E(t,r,e,n)},y:ov,Y:av,Z:sv,"%":vv};function k(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=jy(Ry(a.y,0,1))).getUTCDay(),r=i>4||0===i?my.ceil(r):my(r),r=Ny.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=Iy(Ry(a.y,0,1))).getDay(),r=i>4||0===i?Ug.ceil(r):Ug(r),r=ny.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?jy(Ry(a.y,0,1)).getUTCDay():Iy(Ry(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,jy(a)):Iy(a)}}function E(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=_[i in Hy?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return(b.x=k(n,b),b.X=k(r,b),b.c=k(e,b),x.x=k(n,x),x.X=k(r,x),x.c=k(e,x),{format:function(t){var e=k(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=w(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=k(t+="",x);return e.toString=function(){return t},e},utcParse:function(t){var e=w(t+="",!0);return e.toString=function(){return t},e}})}var zy,Uy,$y,Wy,Vy,Hy={"-":"",_:" ",0:"0"},Gy=/^\s*\d+/,qy=/^%/,Xy=/[\\^$*+?|[\]().{}]/g;function Zy(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a68?1900:2e3),n+r[0].length):-1}function sv(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function cv(t,e,n){var r=Gy.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function uv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function lv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function hv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function fv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function dv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function pv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function gv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function yv(t,e,n){var r=Gy.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function vv(t,e,n){var r=qy.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function mv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function bv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function xv(t,e){return Zy(t.getDate(),e,2)}function _v(t,e){return Zy(t.getHours(),e,2)}function kv(t,e){return Zy(t.getHours()%12||12,e,2)}function wv(t,e){return Zy(1+ny.count(Pg(t),t),e,3)}function Ev(t,e){return Zy(t.getMilliseconds(),e,3)}function Tv(t,e){return Ev(t,e)+"000"}function Cv(t,e){return Zy(t.getMonth()+1,e,2)}function Sv(t,e){return Zy(t.getMinutes(),e,2)}function Av(t,e){return Zy(t.getSeconds(),e,2)}function Mv(t){var e=t.getDay();return 0===e?7:e}function Ov(t,e){return Zy(zg.count(Pg(t)-1,t),e,2)}function Dv(t,e){var n=t.getDay();return t=n>=4||0===n?Vg(t):Vg.ceil(t),Zy(Vg.count(Pg(t),t)+(4===Pg(t).getDay()),e,2)}function Nv(t){return t.getDay()}function Bv(t,e){return Zy(Ug.count(Pg(t)-1,t),e,2)}function Lv(t,e){return Zy(t.getFullYear()%100,e,2)}function Pv(t,e){return Zy(t.getFullYear()%1e4,e,4)}function Fv(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+Zy(e/60|0,"0",2)+Zy(e%60,"0",2)}function Iv(t,e){return Zy(t.getUTCDate(),e,2)}function jv(t,e){return Zy(t.getUTCHours(),e,2)}function Rv(t,e){return Zy(t.getUTCHours()%12||12,e,2)}function Yv(t,e){return Zy(1+Ny.count(Py(t),t),e,3)}function zv(t,e){return Zy(t.getUTCMilliseconds(),e,3)}function Uv(t,e){return zv(t,e)+"000"}function $v(t,e){return Zy(t.getUTCMonth()+1,e,2)}function Wv(t,e){return Zy(t.getUTCMinutes(),e,2)}function Vv(t,e){return Zy(t.getUTCSeconds(),e,2)}function Hv(t){var e=t.getUTCDay();return 0===e?7:e}function Gv(t,e){return Zy(vy.count(Py(t)-1,t),e,2)}function qv(t,e){var n=t.getUTCDay();return t=n>=4||0===n?_y(t):_y.ceil(t),Zy(_y.count(Py(t),t)+(4===Py(t).getUTCDay()),e,2)}function Xv(t){return t.getUTCDay()}function Zv(t,e){return Zy(my.count(Py(t)-1,t),e,2)}function Jv(t,e){return Zy(t.getUTCFullYear()%100,e,2)}function Qv(t,e){return Zy(t.getUTCFullYear()%1e4,e,4)}function Kv(){return"+0000"}function tm(){return"%"}function em(t){return+t}function nm(t){return Math.floor(+t/1e3)}function rm(t){return zy=Yy(t),Uy=zy.format,$y=zy.parse,Wy=zy.utcFormat,Vy=zy.utcParse,zy}rm({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});function im(t){return new Date(t)}function am(t){return t instanceof Date?+t:+new Date(+t)}function om(t,e,n,r,a,o,s,c,u){var l=ig(Jp,Jp),h=l.invert,f=l.domain,d=u(".%L"),p=u(":%S"),g=u("%I:%M"),y=u("%I %p"),v=u("%a %d"),m=u("%b %d"),b=u("%B"),x=u("%Y"),_=[[s,1,1e3],[s,5,5e3],[s,15,15e3],[s,30,3e4],[o,1,6e4],[o,5,3e5],[o,15,9e5],[o,30,18e5],[a,1,36e5],[a,3,108e5],[a,6,216e5],[a,12,432e5],[r,1,864e5],[r,2,1728e5],[n,1,6048e5],[e,1,2592e6],[e,3,7776e6],[t,1,31536e6]];function k(i){return(s(i)1)&&(t-=Math.floor(t));var e=Math.abs(t-.5);return qb.h=360*t-100,qb.s=1.5-1.5*e,qb.l=.8-.9*e,qb+""},Zb=Ge(),Jb=Math.PI/3,Qb=2*Math.PI/3,Kb=function(t){var e;return t=(.5-t)*Math.PI,Zb.r=255*(e=Math.sin(t))*e,Zb.g=255*(e=Math.sin(t+Jb))*e,Zb.b=255*(e=Math.sin(t+Qb))*e,Zb+""},tx=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"};function ex(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}}var nx=ex(Nm("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),rx=ex(Nm("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),ix=ex(Nm("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),ax=ex(Nm("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),ox=function(t){return ke(ne(t).call(document.documentElement))},sx=0;function cx(){return new ux}function ux(){this._="@"+(++sx).toString(36)}ux.prototype=cx.prototype={constructor:ux,get:function(t){for(var e=this._;!(e in t);)if(!(t=t.parentNode))return;return t[e]},set:function(t,e){return t[this._]=e},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var lx=function(t){return"string"==typeof t?new be([document.querySelectorAll(t)],[document.documentElement]):new be([null==t?[]:t],me)},hx=function(t,e){null==e&&(e=Mn().touches);for(var n=0,r=e?e.length:0,i=new Array(r);n1?0:t<-1?xx:Math.acos(t)}function Ex(t){return t>=1?_x:t<=-1?-_x:Math.asin(t)}function Tx(t){return t.innerRadius}function Cx(t){return t.outerRadius}function Sx(t){return t.startAngle}function Ax(t){return t.endAngle}function Mx(t){return t&&t.padAngle}function Ox(t,e,n,r,i,a,o,s){var c=n-t,u=r-e,l=o-i,h=s-a,f=h*c-l*u;if(!(f*f<1e-12))return[t+(f=(l*(e-a)-h*(t-i))/f)*c,e+f*u]}function Dx(t,e,n,r,i,a,o){var s=t-n,c=e-r,u=(o?a:-a)/bx(s*s+c*c),l=u*c,h=-u*s,f=t+l,d=e+h,p=n+l,g=r+h,y=(f+p)/2,v=(d+g)/2,m=p-f,b=g-d,x=m*m+b*b,_=i-a,k=f*g-p*d,w=(b<0?-1:1)*bx(yx(0,_*_*x-k*k)),E=(k*b-m*w)/x,T=(-k*m-b*w)/x,C=(k*b+m*w)/x,S=(-k*m+b*w)/x,A=E-y,M=T-v,O=C-y,D=S-v;return A*A+M*M>O*O+D*D&&(E=C,T=S),{cx:E,cy:T,x01:-l,y01:-h,x11:E*(i/_-1),y11:T*(i/_-1)}}var Nx=function(){var t=Tx,e=Cx,n=fx(0),r=null,i=Sx,a=Ax,o=Mx,s=null;function c(){var c,u,l=+t.apply(this,arguments),h=+e.apply(this,arguments),f=i.apply(this,arguments)-_x,d=a.apply(this,arguments)-_x,p=dx(d-f),g=d>f;if(s||(s=c=Ui()),h1e-12)if(p>kx-1e-12)s.moveTo(h*gx(f),h*mx(f)),s.arc(0,0,h,f,d,!g),l>1e-12&&(s.moveTo(l*gx(d),l*mx(d)),s.arc(0,0,l,d,f,g));else{var y,v,m=f,b=d,x=f,_=d,k=p,w=p,E=o.apply(this,arguments)/2,T=E>1e-12&&(r?+r.apply(this,arguments):bx(l*l+h*h)),C=vx(dx(h-l)/2,+n.apply(this,arguments)),S=C,A=C;if(T>1e-12){var M=Ex(T/l*mx(E)),O=Ex(T/h*mx(E));(k-=2*M)>1e-12?(x+=M*=g?1:-1,_-=M):(k=0,x=_=(f+d)/2),(w-=2*O)>1e-12?(m+=O*=g?1:-1,b-=O):(w=0,m=b=(f+d)/2)}var D=h*gx(m),N=h*mx(m),B=l*gx(_),L=l*mx(_);if(C>1e-12){var P,F=h*gx(b),I=h*mx(b),j=l*gx(x),R=l*mx(x);if(p1e-12?A>1e-12?(y=Dx(j,R,D,N,h,A,g),v=Dx(F,I,B,L,h,A,g),s.moveTo(y.cx+y.x01,y.cy+y.y01),A1e-12&&k>1e-12?S>1e-12?(y=Dx(B,L,F,I,l,-S,g),v=Dx(D,N,j,R,l,-S,g),s.lineTo(y.cx+y.x01,y.cy+y.y01),S=l;--h)s.point(y[h],v[h]);s.lineEnd(),s.areaEnd()}g&&(y[u]=+t(f,u,c),v[u]=+n(f,u,c),s.point(e?+e(f,u,c):y[u],r?+r(f,u,c):v[u]))}if(d)return s=null,d+""||null}function u(){return Ix().defined(i).curve(o).context(a)}return c.x=function(n){return arguments.length?(t="function"==typeof n?n:fx(+n),e=null,c):t},c.x0=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),c):t},c.x1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:fx(+t),c):e},c.y=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),r=null,c):n},c.y0=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),c):n},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:fx(+t),c):r},c.lineX0=c.lineY0=function(){return u().x(t).y(n)},c.lineY1=function(){return u().x(t).y(r)},c.lineX1=function(){return u().x(e).y(n)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:fx(!!t),c):i},c.curve=function(t){return arguments.length?(o=t,null!=a&&(s=o(a)),c):o},c.context=function(t){return arguments.length?(null==t?a=s=null:s=o(a=t),c):a},c},Rx=function(t,e){return et?1:e>=t?0:NaN},Yx=function(t){return t},zx=function(){var t=Yx,e=Rx,n=null,r=fx(0),i=fx(kx),a=fx(0);function o(o){var s,c,u,l,h,f=o.length,d=0,p=new Array(f),g=new Array(f),y=+r.apply(this,arguments),v=Math.min(kx,Math.max(-kx,i.apply(this,arguments)-y)),m=Math.min(Math.abs(v)/f,a.apply(this,arguments)),b=m*(v<0?-1:1);for(s=0;s0&&(d+=h);for(null!=e?p.sort((function(t,n){return e(g[t],g[n])})):null!=n&&p.sort((function(t,e){return n(o[t],o[e])})),s=0,u=d?(v-f*b)/d:0;s0?h*u:0)+b,g[c]={data:o[c],index:s,value:h,startAngle:y,endAngle:l,padAngle:m};return g}return o.value=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),o):t},o.sortValues=function(t){return arguments.length?(e=t,n=null,o):e},o.sort=function(t){return arguments.length?(n=t,e=null,o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.padAngle=function(t){return arguments.length?(a="function"==typeof t?t:fx(+t),o):a},o},Ux=Wx(Lx);function $x(t){this._curve=t}function Wx(t){function e(e){return new $x(t(e))}return e._curve=t,e}function Vx(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t}$x.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var Hx=function(){return Vx(Ix().curve(Ux))},Gx=function(){var t=jx().curve(Ux),e=t.curve,n=t.lineX0,r=t.lineX1,i=t.lineY0,a=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Vx(n())},delete t.lineX0,t.lineEndAngle=function(){return Vx(r())},delete t.lineX1,t.lineInnerRadius=function(){return Vx(i())},delete t.lineY0,t.lineOuterRadius=function(){return Vx(a())},delete t.lineY1,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t},qx=function(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]},Xx=Array.prototype.slice;function Zx(t){return t.source}function Jx(t){return t.target}function Qx(t){var e=Zx,n=Jx,r=Px,i=Fx,a=null;function o(){var o,s=Xx.call(arguments),c=e.apply(this,s),u=n.apply(this,s);if(a||(a=o=Ui()),t(a,+r.apply(this,(s[0]=c,s)),+i.apply(this,s),+r.apply(this,(s[0]=u,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(t){return arguments.length?(e=t,o):e},o.target=function(t){return arguments.length?(n=t,o):n},o.x=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.y=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o}function Kx(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function t_(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e,n=(n+i)/2,r,n,r,i)}function e_(t,e,n,r,i){var a=qx(e,n),o=qx(e,n=(n+i)/2),s=qx(r,n),c=qx(r,i);t.moveTo(a[0],a[1]),t.bezierCurveTo(o[0],o[1],s[0],s[1],c[0],c[1])}function n_(){return Qx(Kx)}function r_(){return Qx(t_)}function i_(){var t=Qx(e_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}var a_={draw:function(t,e){var n=Math.sqrt(e/xx);t.moveTo(n,0),t.arc(0,0,n,0,kx)}},o_={draw:function(t,e){var n=Math.sqrt(e/5)/2;t.moveTo(-3*n,-n),t.lineTo(-n,-n),t.lineTo(-n,-3*n),t.lineTo(n,-3*n),t.lineTo(n,-n),t.lineTo(3*n,-n),t.lineTo(3*n,n),t.lineTo(n,n),t.lineTo(n,3*n),t.lineTo(-n,3*n),t.lineTo(-n,n),t.lineTo(-3*n,n),t.closePath()}},s_=Math.sqrt(1/3),c_=2*s_,u_={draw:function(t,e){var n=Math.sqrt(e/c_),r=n*s_;t.moveTo(0,-n),t.lineTo(r,0),t.lineTo(0,n),t.lineTo(-r,0),t.closePath()}},l_=Math.sin(xx/10)/Math.sin(7*xx/10),h_=Math.sin(kx/10)*l_,f_=-Math.cos(kx/10)*l_,d_={draw:function(t,e){var n=Math.sqrt(.8908130915292852*e),r=h_*n,i=f_*n;t.moveTo(0,-n),t.lineTo(r,i);for(var a=1;a<5;++a){var o=kx*a/5,s=Math.cos(o),c=Math.sin(o);t.lineTo(c*n,-s*n),t.lineTo(s*r-c*i,c*r+s*i)}t.closePath()}},p_={draw:function(t,e){var n=Math.sqrt(e),r=-n/2;t.rect(r,r,n,n)}},g_=Math.sqrt(3),y_={draw:function(t,e){var n=-Math.sqrt(e/(3*g_));t.moveTo(0,2*n),t.lineTo(-g_*n,-n),t.lineTo(g_*n,-n),t.closePath()}},v_=Math.sqrt(3)/2,m_=1/Math.sqrt(12),b_=3*(m_/2+1),x_={draw:function(t,e){var n=Math.sqrt(e/b_),r=n/2,i=n*m_,a=r,o=n*m_+n,s=-a,c=o;t.moveTo(r,i),t.lineTo(a,o),t.lineTo(s,c),t.lineTo(-.5*r-v_*i,v_*r+-.5*i),t.lineTo(-.5*a-v_*o,v_*a+-.5*o),t.lineTo(-.5*s-v_*c,v_*s+-.5*c),t.lineTo(-.5*r+v_*i,-.5*i-v_*r),t.lineTo(-.5*a+v_*o,-.5*o-v_*a),t.lineTo(-.5*s+v_*c,-.5*c-v_*s),t.closePath()}},__=[a_,o_,u_,p_,d_,y_,x_],k_=function(){var t=fx(a_),e=fx(64),n=null;function r(){var r;if(n||(n=r=Ui()),t.apply(this,arguments).draw(n,+e.apply(this,arguments)),r)return n=null,r+""||null}return r.type=function(e){return arguments.length?(t="function"==typeof e?e:fx(e),r):t},r.size=function(t){return arguments.length?(e="function"==typeof t?t:fx(+t),r):e},r.context=function(t){return arguments.length?(n=null==t?null:t,r):n},r},w_=function(){};function E_(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function T_(t){this._context=t}T_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:E_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var C_=function(t){return new T_(t)};function S_(t){this._context=t}S_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var A_=function(t){return new S_(t)};function M_(t){this._context=t}M_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var O_=function(t){return new M_(t)};function D_(t,e){this._basis=new T_(t),this._beta=e}D_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,n=t.length-1;if(n>0)for(var r,i=t[0],a=e[0],o=t[n]-i,s=e[n]-a,c=-1;++c<=n;)r=c/n,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*o),this._beta*e[c]+(1-this._beta)*(a+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var N_=function t(e){function n(t){return 1===e?new T_(t):new D_(t,e)}return n.beta=function(e){return t(+e)},n}(.85);function B_(t,e,n){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-n),t._x2,t._y2)}function L_(t,e){this._context=t,this._k=(1-e)/6}L_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:B_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var P_=function t(e){function n(t){return new L_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function F_(t,e){this._context=t,this._k=(1-e)/6}F_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var I_=function t(e){function n(t){return new F_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function j_(t,e){this._context=t,this._k=(1-e)/6}j_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var R_=function t(e){function n(t){return new j_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Y_(t,e,n){var r=t._x1,i=t._y1,a=t._x2,o=t._y2;if(t._l01_a>1e-12){var s=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*s-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*s-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>1e-12){var u=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,l=3*t._l23_a*(t._l23_a+t._l12_a);a=(a*u+t._x1*t._l23_2a-e*t._l12_2a)/l,o=(o*u+t._y1*t._l23_2a-n*t._l12_2a)/l}t._context.bezierCurveTo(r,i,a,o,t._x2,t._y2)}function z_(t,e){this._context=t,this._alpha=e}z_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var U_=function t(e){function n(t){return e?new z_(t,e):new L_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function $_(t,e){this._context=t,this._alpha=e}$_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var W_=function t(e){function n(t){return e?new $_(t,e):new F_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function V_(t,e){this._context=t,this._alpha=e}V_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var H_=function t(e){function n(t){return e?new V_(t,e):new j_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function G_(t){this._context=t}G_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,e){t=+t,e=+e,this._point?this._context.lineTo(t,e):(this._point=1,this._context.moveTo(t,e))}};var q_=function(t){return new G_(t)};function X_(t){return t<0?-1:1}function Z_(t,e,n){var r=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(r||i<0&&-0),o=(n-t._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(X_(a)+X_(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function J_(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function Q_(t,e,n){var r=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-r)/3;t._context.bezierCurveTo(r+s,i+s*e,a-s,o-s*n,a,o)}function K_(t){this._context=t}function tk(t){this._context=new ek(t)}function ek(t){this._context=t}function nk(t){return new K_(t)}function rk(t){return new tk(t)}function ik(t){this._context=t}function ak(t){var e,n,r=t.length-1,i=new Array(r),a=new Array(r),o=new Array(r);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var ck=function(t){return new sk(t,.5)};function uk(t){return new sk(t,0)}function lk(t){return new sk(t,1)}var hk=function(t,e){if((i=t.length)>1)for(var n,r,i,a=1,o=t[e[0]],s=o.length;a=0;)n[e]=e;return n};function dk(t,e){return t[e]}var pk=function(){var t=fx([]),e=fk,n=hk,r=dk;function i(i){var a,o,s=t.apply(this,arguments),c=i.length,u=s.length,l=new Array(u);for(a=0;a0){for(var n,r,i,a=0,o=t[0].length;a0)for(var n,r,i,a,o,s,c=0,u=t[e[0]].length;c0?(r[0]=a,r[1]=a+=i):i<0?(r[1]=o,r[0]=o+=i):(r[0]=0,r[1]=i)},vk=function(t,e){if((n=t.length)>0){for(var n,r=0,i=t[e[0]],a=i.length;r0&&(r=(n=t[e[0]]).length)>0){for(var n,r,i,a=0,o=1;oa&&(a=e,r=n);return r}var _k=function(t){var e=t.map(kk);return fk(t).sort((function(t,n){return e[t]-e[n]}))};function kk(t){for(var e,n=0,r=-1,i=t.length;++r0)){if(a/=f,f<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=r-c,f||!(a<0)){if(a/=f,f<0){if(a>h)return;a>l&&(l=a)}else if(f>0){if(a0)){if(a/=d,d<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=i-u,d||!(a<0)){if(a/=d,d<0){if(a>h)return;a>l&&(l=a)}else if(d>0){if(a0||h<1)||(l>0&&(t[0]=[c+l*f,u+l*d]),h<1&&(t[1]=[c+h*f,u+h*d]),!0)}}}}}function Uk(t,e,n,r,i){var a=t[1];if(a)return!0;var o,s,c=t[0],u=t.left,l=t.right,h=u[0],f=u[1],d=l[0],p=l[1],g=(h+d)/2,y=(f+p)/2;if(p===f){if(g=r)return;if(h>d){if(c){if(c[1]>=i)return}else c=[g,n];a=[g,i]}else{if(c){if(c[1]1)if(h>d){if(c){if(c[1]>=i)return}else c=[(n-s)/o,n];a=[(i-s)/o,i]}else{if(c){if(c[1]=r)return}else c=[e,o*e+s];a=[r,o*r+s]}else{if(c){if(c[0]=-lw)){var d=c*c+u*u,p=l*l+h*h,g=(h*d-u*p)/f,y=(c*p-l*d)/f,v=Gk.pop()||new qk;v.arc=t,v.site=i,v.x=g+o,v.y=(v.cy=y+s)+Math.sqrt(g*g+y*y),t.circle=v;for(var m=null,b=sw._;b;)if(v.yuw)s=s.L;else{if(!((i=a-iw(s,o))>uw)){r>-uw?(e=s.P,n=s):i>-uw?(e=s,n=s.N):e=n=s;break}if(!s.R){e=s;break}s=s.R}!function(t){ow[t.index]={site:t,halfedges:[]}}(t);var c=Kk(t);if(aw.insert(e,c),e||n){if(e===n)return Zk(e),n=Kk(e.site),aw.insert(c,n),c.edge=n.edge=jk(e.site,c.site),Xk(e),void Xk(n);if(n){Zk(e),Zk(n);var u=e.site,l=u[0],h=u[1],f=t[0]-l,d=t[1]-h,p=n.site,g=p[0]-l,y=p[1]-h,v=2*(f*y-d*g),m=f*f+d*d,b=g*g+y*y,x=[(y*m-d*b)/v+l,(f*b-g*m)/v+h];Yk(n.edge,u,p,x),c.edge=jk(u,t,null,x),n.edge=jk(t,p,null,x),Xk(e),Xk(n)}else c.edge=jk(e.site,c.site)}}function rw(t,e){var n=t.site,r=n[0],i=n[1],a=i-e;if(!a)return r;var o=t.P;if(!o)return-1/0;var s=(n=o.site)[0],c=n[1],u=c-e;if(!u)return s;var l=s-r,h=1/a-1/u,f=l/u;return h?(-f+Math.sqrt(f*f-2*h*(l*l/(-2*u)-c+u/2+i-a/2)))/h+r:(r+s)/2}function iw(t,e){var n=t.N;if(n)return rw(n,e);var r=t.site;return r[1]===e?r[0]:1/0}var aw,ow,sw,cw,uw=1e-6,lw=1e-12;function hw(t,e){return e[1]-t[1]||e[0]-t[0]}function fw(t,e){var n,r,i,a=t.sort(hw).pop();for(cw=[],ow=new Array(t.length),aw=new Ik,sw=new Ik;;)if(i=Hk,a&&(!i||a[1]uw||Math.abs(i[0][1]-i[1][1])>uw)||delete cw[a]}(o,s,c,u),function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y=ow.length,v=!0;for(i=0;iuw||Math.abs(g-f)>uw)&&(c.splice(s,0,cw.push(Rk(o,d,Math.abs(p-t)uw?[t,Math.abs(h-t)uw?[Math.abs(f-r)uw?[n,Math.abs(h-n)uw?[Math.abs(f-e)=s)return null;var c=t-i.site[0],u=e-i.site[1],l=c*c+u*u;do{i=a.cells[r=o],o=null,i.halfedges.forEach((function(n){var r=a.edges[n],s=r.left;if(s!==i.site&&s||(s=r.right)){var c=t-s[0],u=e-s[1],h=c*c+u*u;hr?(r+i)/2:Math.min(0,r)||Math.max(0,i),o>a?(a+o)/2:Math.min(0,a)||Math.max(0,o))}var Sw=function(){var t,e,n=_w,r=kw,i=Cw,a=Ew,o=Tw,s=[0,1/0],c=[[-1/0,-1/0],[1/0,1/0]],u=250,l=fp,h=lt("start","zoom","end"),f=0;function d(t){t.property("__zoom",ww).on("wheel.zoom",x).on("mousedown.zoom",_).on("dblclick.zoom",k).filter(o).on("touchstart.zoom",w).on("touchmove.zoom",E).on("touchend.zoom touchcancel.zoom",T).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(t,e){return(e=Math.max(s[0],Math.min(s[1],e)))===t.k?t:new yw(e,t.x,t.y)}function g(t,e,n){var r=e[0]-n[0]*t.k,i=e[1]-n[1]*t.k;return r===t.x&&i===t.y?t:new yw(t.k,r,i)}function y(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function v(t,e,n){t.on("start.zoom",(function(){m(this,arguments).start()})).on("interrupt.zoom end.zoom",(function(){m(this,arguments).end()})).tween("zoom",(function(){var t=this,i=arguments,a=m(t,i),o=r.apply(t,i),s=null==n?y(o):"function"==typeof n?n.apply(t,i):n,c=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),u=t.__zoom,h="function"==typeof e?e.apply(t,i):e,f=l(u.invert(s).concat(c/u.k),h.invert(s).concat(c/h.k));return function(t){if(1===t)t=h;else{var e=f(t),n=c/e[2];t=new yw(n,s[0]-e[0]*n,s[1]-e[1]*n)}a.zoom(null,t)}}))}function m(t,e,n){return!n&&t.__zooming||new b(t,e)}function b(t,e){this.that=t,this.args=e,this.active=0,this.extent=r.apply(t,e),this.taps=0}function x(){if(n.apply(this,arguments)){var t=m(this,arguments),e=this.__zoom,r=Math.max(s[0],Math.min(s[1],e.k*Math.pow(2,a.apply(this,arguments)))),o=Nn(this);if(t.wheel)t.mouse[0][0]===o[0]&&t.mouse[0][1]===o[1]||(t.mouse[1]=e.invert(t.mouse[0]=o)),clearTimeout(t.wheel);else{if(e.k===r)return;t.mouse=[o,e.invert(o)],or(this),t.start()}xw(),t.wheel=setTimeout(u,150),t.zoom("mouse",i(g(p(e,r),t.mouse[0],t.mouse[1]),t.extent,c))}function u(){t.wheel=null,t.end()}}function _(){if(!e&&n.apply(this,arguments)){var t=m(this,arguments,!0),r=ke(ce.view).on("mousemove.zoom",u,!0).on("mouseup.zoom",l,!0),a=Nn(this),o=ce.clientX,s=ce.clientY;Te(ce.view),bw(),t.mouse=[a,this.__zoom.invert(a)],or(this),t.start()}function u(){if(xw(),!t.moved){var e=ce.clientX-o,n=ce.clientY-s;t.moved=e*e+n*n>f}t.zoom("mouse",i(g(t.that.__zoom,t.mouse[0]=Nn(t.that),t.mouse[1]),t.extent,c))}function l(){r.on("mousemove.zoom mouseup.zoom",null),Ce(ce.view,t.moved),xw(),t.end()}}function k(){if(n.apply(this,arguments)){var t=this.__zoom,e=Nn(this),a=t.invert(e),o=t.k*(ce.shiftKey?.5:2),s=i(g(p(t,o),e,a),r.apply(this,arguments),c);xw(),u>0?ke(this).transition().duration(u).call(v,s,e):ke(this).call(d.transform,s)}}function w(){if(n.apply(this,arguments)){var e,r,i,a,o=ce.touches,s=o.length,c=m(this,arguments,ce.changedTouches.length===s);for(bw(),r=0;rh&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),56;case 1:return this.begin("type_directive"),57;case 2:return this.popState(),this.begin("arg_directive"),14;case 3:return this.popState(),this.popState(),59;case 4:return 58;case 5:return 5;case 6:case 7:case 8:case 9:case 10:break;case 11:return this.begin("ID"),16;case 12:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),48;case 13:return this.popState(),this.popState(),this.begin("LINE"),18;case 14:return this.popState(),this.popState(),5;case 15:return this.begin("LINE"),27;case 16:return this.begin("LINE"),29;case 17:return this.begin("LINE"),30;case 18:return this.begin("LINE"),31;case 19:return this.begin("LINE"),36;case 20:return this.begin("LINE"),33;case 21:return this.begin("LINE"),35;case 22:return this.popState(),19;case 23:return 28;case 24:return 43;case 25:return 44;case 26:return 39;case 27:return 37;case 28:return this.begin("ID"),22;case 29:return this.begin("ID"),23;case 30:return 25;case 31:return 7;case 32:return 21;case 33:return 42;case 34:return 5;case 35:return e.yytext=e.yytext.trim(),48;case 36:return 51;case 37:return 52;case 38:return 49;case 39:return 50;case 40:return 53;case 41:return 54;case 42:return 55;case 43:return 46;case 44:return 47;case 45:return 5;case 46:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:participant\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:(?:[:]?(?:no)?wrap)?[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+((?!(-x|--x))[\-]*[^\+\->:\n,;]+)*)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?::(?:(?:no)?wrap)?[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1,8],inclusive:!1},type_directive:{rules:[2,3,8],inclusive:!1},arg_directive:{rules:[3,4,8],inclusive:!1},ID:{rules:[7,8,12],inclusive:!1},ALIAS:{rules:[7,8,13,14],inclusive:!1},LINE:{rules:[7,8,22],inclusive:!1},INITIAL:{rules:[0,5,6,8,9,10,11,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],inclusive:!0}}};function O(){this.yy={}}return A.lexer=M,O.prototype=A,A.Parser=O,new O}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){var r=n(198);t.exports={Graph:r.Graph,json:n(301),alg:n(302),version:r.version}},function(t,e,n){var r;try{r={cloneDeep:n(313),constant:n(86),defaults:n(154),each:n(87),filter:n(128),find:n(314),flatten:n(156),forEach:n(126),forIn:n(319),has:n(93),isUndefined:n(139),last:n(320),map:n(140),mapValues:n(321),max:n(322),merge:n(324),min:n(329),minBy:n(330),now:n(331),pick:n(161),range:n(162),reduce:n(142),sortBy:n(338),uniqueId:n(163),values:n(147),zipObject:n(343)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){var n=Array.isArray;t.exports=n},function(t,e,n){ +/** + * @license + * Copyright (c) 2012-2013 Chris Pettitt + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +t.exports={graphlib:n(311),dagre:n(153),intersect:n(368),render:n(370),util:n(12),version:n(382)}},function(t,e){t.exports=function(t){return t.webpackPolyfill||(t.deprecate=function(){},t.paths=[],t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),t.webpackPolyfill=1),t}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph;function a(t,e,n,i){var a;do{a=r.uniqueId(i)}while(t.hasNode(a));return n.dummy=e,t.setNode(a,n),a}function o(t){return r.max(r.map(t.nodes(),(function(e){var n=t.node(e).rank;if(!r.isUndefined(n))return n})))}t.exports={addDummyNode:a,simplify:function(t){var e=(new i).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){var r=e.edge(n.v,n.w)||{weight:0,minlen:1},i=t.edge(n);e.setEdge(n.v,n.w,{weight:r.weight+i.weight,minlen:Math.max(r.minlen,i.minlen)})})),e},asNonCompoundGraph:function(t){var e=new i({multigraph:t.isMultigraph()}).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){t.children(n).length||e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){e.setEdge(n,t.edge(n))})),e},successorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.outEdges(e),(function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.inEdges(e),(function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,r,i=t.x,a=t.y,o=e.x-i,s=e.y-a,c=t.width/2,u=t.height/2;if(!o&&!s)throw new Error("Not possible to find intersection inside of the rectangle");Math.abs(s)*c>Math.abs(o)*u?(s<0&&(u=-u),n=u*o/s,r=u):(o<0&&(c=-c),n=c,r=c*s/o);return{x:i+n,y:a+r}},buildLayerMatrix:function(t){var e=r.map(r.range(o(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),a=i.rank;r.isUndefined(a)||(e[a][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,a=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%a!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return a(t,"border",i,e)},maxRank:o,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(173),i=n(174),a=n(175),o={channel:r.default,lang:i.default,unit:a.default};e.default=o},function(t,e,n){var r;try{r={clone:n(199),constant:n(86),each:n(87),filter:n(128),has:n(93),isArray:n(5),isEmpty:n(276),isFunction:n(37),isUndefined:n(139),keys:n(30),map:n(140),reduce:n(142),size:n(279),transform:n(285),union:n(286),values:n(147)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){t.exports=function(t){var e=typeof t;return null!=t&&("object"==e||"function"==e)}},function(t,e,n){var r=n(43);t.exports={isSubgraph:function(t,e){return!!t.children(e).length},edgeToId:function(t){return a(t.v)+":"+a(t.w)+":"+a(t.name)},applyStyle:function(t,e){e&&t.attr("style",e)},applyClass:function(t,e,n){e&&t.attr("class",e).attr("class",n+" "+t.attr("class"))},applyTransition:function(t,e){var n=e.graph();if(r.isPlainObject(n)){var i=n.transition;if(r.isFunction(i))return i(t)}return t}};var i=/:/g;function a(t){return t?String(t).replace(i,"\\:"):""}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,7],n=[1,6],r=[1,14],i=[1,25],a=[1,28],o=[1,26],s=[1,27],c=[1,29],u=[1,30],l=[1,31],h=[1,33],f=[1,34],d=[1,35],p=[10,19],g=[1,47],y=[1,48],v=[1,49],m=[1,50],b=[1,51],x=[1,52],_=[10,19,25,32,33,41,44,45,46,47,48,49],k=[10,19,23,25,32,33,37,41,44,45,46,47,48,49,66,67,68],w=[10,13,17,19],E=[41,66,67,68],T=[41,48,49,66,67,68],C=[41,44,45,46,47,66,67,68],S=[10,19,25],A=[1,81],M={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,directive:5,graphConfig:6,openDirective:7,typeDirective:8,closeDirective:9,NEWLINE:10,":":11,argDirective:12,open_directive:13,type_directive:14,arg_directive:15,close_directive:16,CLASS_DIAGRAM:17,statements:18,EOF:19,statement:20,className:21,alphaNumToken:22,GENERICTYPE:23,relationStatement:24,LABEL:25,classStatement:26,methodStatement:27,annotationStatement:28,clickStatement:29,cssClassStatement:30,CLASS:31,STYLE_SEPARATOR:32,STRUCT_START:33,members:34,STRUCT_STOP:35,ANNOTATION_START:36,ANNOTATION_END:37,MEMBER:38,SEPARATOR:39,relation:40,STR:41,relationType:42,lineType:43,AGGREGATION:44,EXTENSION:45,COMPOSITION:46,DEPENDENCY:47,LINE:48,DOTTED_LINE:49,CALLBACK:50,LINK:51,CSSCLASS:52,commentToken:53,textToken:54,graphCodeTokens:55,textNoTagsToken:56,TAGSTART:57,TAGEND:58,"==":59,"--":60,PCT:61,DEFAULT:62,SPACE:63,MINUS:64,keywords:65,UNICODE_TEXT:66,NUM:67,ALPHA:68,$accept:0,$end:1},terminals_:{2:"error",10:"NEWLINE",11:":",13:"open_directive",14:"type_directive",15:"arg_directive",16:"close_directive",17:"CLASS_DIAGRAM",19:"EOF",23:"GENERICTYPE",25:"LABEL",31:"CLASS",32:"STYLE_SEPARATOR",33:"STRUCT_START",35:"STRUCT_STOP",36:"ANNOTATION_START",37:"ANNOTATION_END",38:"MEMBER",39:"SEPARATOR",41:"STR",44:"AGGREGATION",45:"EXTENSION",46:"COMPOSITION",47:"DEPENDENCY",48:"LINE",49:"DOTTED_LINE",50:"CALLBACK",51:"LINK",52:"CSSCLASS",55:"graphCodeTokens",57:"TAGSTART",58:"TAGEND",59:"==",60:"--",61:"PCT",62:"DEFAULT",63:"SPACE",64:"MINUS",65:"keywords",66:"UNICODE_TEXT",67:"NUM",68:"ALPHA"},productions_:[0,[3,1],[3,2],[4,1],[5,4],[5,6],[7,1],[8,1],[12,1],[9,1],[6,4],[18,1],[18,2],[18,3],[21,1],[21,2],[21,3],[21,2],[20,1],[20,2],[20,1],[20,1],[20,1],[20,1],[20,1],[20,1],[26,2],[26,4],[26,5],[26,7],[28,4],[34,1],[34,2],[27,1],[27,2],[27,1],[27,1],[24,3],[24,4],[24,4],[24,5],[40,3],[40,2],[40,2],[40,1],[42,1],[42,1],[42,1],[42,1],[43,1],[43,1],[29,3],[29,4],[29,3],[29,4],[30,3],[53,1],[53,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[56,1],[56,1],[56,1],[56,1],[22,1],[22,1],[22,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:r.parseDirective("%%{","open_directive");break;case 7:r.parseDirective(a[s],"type_directive");break;case 8:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 9:r.parseDirective("}%%","close_directive","class");break;case 14:this.$=a[s];break;case 15:this.$=a[s-1]+a[s];break;case 16:this.$=a[s-2]+"~"+a[s-1]+a[s];break;case 17:this.$=a[s-1]+"~"+a[s];break;case 18:r.addRelation(a[s]);break;case 19:a[s-1].title=r.cleanupLabel(a[s]),r.addRelation(a[s-1]);break;case 26:r.addClass(a[s]);break;case 27:r.addClass(a[s-2]),r.setCssClass(a[s-2],a[s]);break;case 28:r.addClass(a[s-3]),r.addMembers(a[s-3],a[s-1]);break;case 29:r.addClass(a[s-5]),r.setCssClass(a[s-5],a[s-3]),r.addMembers(a[s-5],a[s-1]);break;case 30:r.addAnnotation(a[s],a[s-2]);break;case 31:this.$=[a[s]];break;case 32:a[s].push(a[s-1]),this.$=a[s];break;case 33:break;case 34:r.addMember(a[s-1],r.cleanupLabel(a[s]));break;case 35:case 36:break;case 37:this.$={id1:a[s-2],id2:a[s],relation:a[s-1],relationTitle1:"none",relationTitle2:"none"};break;case 38:this.$={id1:a[s-3],id2:a[s],relation:a[s-1],relationTitle1:a[s-2],relationTitle2:"none"};break;case 39:this.$={id1:a[s-3],id2:a[s],relation:a[s-2],relationTitle1:"none",relationTitle2:a[s-1]};break;case 40:this.$={id1:a[s-4],id2:a[s],relation:a[s-2],relationTitle1:a[s-3],relationTitle2:a[s-1]};break;case 41:this.$={type1:a[s-2],type2:a[s],lineType:a[s-1]};break;case 42:this.$={type1:"none",type2:a[s],lineType:a[s-1]};break;case 43:this.$={type1:a[s-1],type2:"none",lineType:a[s]};break;case 44:this.$={type1:"none",type2:"none",lineType:a[s]};break;case 45:this.$=r.relationType.AGGREGATION;break;case 46:this.$=r.relationType.EXTENSION;break;case 47:this.$=r.relationType.COMPOSITION;break;case 48:this.$=r.relationType.DEPENDENCY;break;case 49:this.$=r.lineType.LINE;break;case 50:this.$=r.lineType.DOTTED_LINE;break;case 51:this.$=a[s-2],r.setClickEvent(a[s-1],a[s],void 0);break;case 52:this.$=a[s-3],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 53:this.$=a[s-2],r.setLink(a[s-1],a[s],void 0);break;case 54:this.$=a[s-3],r.setLink(a[s-2],a[s-1],a[s]);break;case 55:r.setCssClass(a[s-1],a[s])}},table:[{3:1,4:2,5:3,6:4,7:5,13:e,17:n},{1:[3]},{1:[2,1]},{3:8,4:2,5:3,6:4,7:5,13:e,17:n},{1:[2,3]},{8:9,14:[1,10]},{10:[1,11]},{14:[2,6]},{1:[2,2]},{9:12,11:[1,13],16:r},t([11,16],[2,7]),{5:23,7:5,13:e,18:15,20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},{10:[1,36]},{12:37,15:[1,38]},{10:[2,9]},{19:[1,39]},{10:[1,40],19:[2,11]},t(p,[2,18],{25:[1,41]}),t(p,[2,20]),t(p,[2,21]),t(p,[2,22]),t(p,[2,23]),t(p,[2,24]),t(p,[2,25]),t(p,[2,33],{40:42,42:45,43:46,25:[1,44],41:[1,43],44:g,45:y,46:v,47:m,48:b,49:x}),{21:53,22:32,66:h,67:f,68:d},t(p,[2,35]),t(p,[2,36]),{22:54,66:h,67:f,68:d},{21:55,22:32,66:h,67:f,68:d},{21:56,22:32,66:h,67:f,68:d},{41:[1,57]},t(_,[2,14],{22:32,21:58,23:[1,59],66:h,67:f,68:d}),t(k,[2,69]),t(k,[2,70]),t(k,[2,71]),t(w,[2,4]),{9:60,16:r},{16:[2,8]},{1:[2,10]},{5:23,7:5,13:e,18:61,19:[2,12],20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},t(p,[2,19]),{21:62,22:32,41:[1,63],66:h,67:f,68:d},{40:64,42:45,43:46,44:g,45:y,46:v,47:m,48:b,49:x},t(p,[2,34]),{43:65,48:b,49:x},t(E,[2,44],{42:66,44:g,45:y,46:v,47:m}),t(T,[2,45]),t(T,[2,46]),t(T,[2,47]),t(T,[2,48]),t(C,[2,49]),t(C,[2,50]),t(p,[2,26],{32:[1,67],33:[1,68]}),{37:[1,69]},{41:[1,70]},{41:[1,71]},{22:72,66:h,67:f,68:d},t(_,[2,15]),t(_,[2,17],{22:32,21:73,66:h,67:f,68:d}),{10:[1,74]},{19:[2,13]},t(S,[2,37]),{21:75,22:32,66:h,67:f,68:d},{21:76,22:32,41:[1,77],66:h,67:f,68:d},t(E,[2,43],{42:78,44:g,45:y,46:v,47:m}),t(E,[2,42]),{22:79,66:h,67:f,68:d},{34:80,38:A},{21:82,22:32,66:h,67:f,68:d},t(p,[2,51],{41:[1,83]}),t(p,[2,53],{41:[1,84]}),t(p,[2,55]),t(_,[2,16]),t(w,[2,5]),t(S,[2,39]),t(S,[2,38]),{21:85,22:32,66:h,67:f,68:d},t(E,[2,41]),t(p,[2,27],{33:[1,86]}),{35:[1,87]},{34:88,35:[2,31],38:A},t(p,[2,30]),t(p,[2,52]),t(p,[2,54]),t(S,[2,40]),{34:89,38:A},t(p,[2,28]),{35:[2,32]},{35:[1,90]},t(p,[2,29])],defaultActions:{2:[2,1],4:[2,3],7:[2,6],8:[2,2],14:[2,9],38:[2,8],39:[2,10],61:[2,13],88:[2,32]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},O={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),13;case 1:return this.begin("type_directive"),14;case 2:return this.popState(),this.begin("arg_directive"),11;case 3:return this.popState(),this.popState(),16;case 4:return 15;case 5:case 6:break;case 7:return 10;case 8:break;case 9:case 10:return 17;case 11:return this.begin("struct"),33;case 12:return"EOF_IN_STRUCT";case 13:return"OPEN_IN_STRUCT";case 14:return this.popState(),35;case 15:break;case 16:return"MEMBER";case 17:return 31;case 18:return 52;case 19:return 50;case 20:return 51;case 21:return 36;case 22:return 37;case 23:this.begin("generic");break;case 24:this.popState();break;case 25:return"GENERICTYPE";case 26:this.begin("string");break;case 27:this.popState();break;case 28:return"STR";case 29:case 30:return 45;case 31:case 32:return 47;case 33:return 46;case 34:return 44;case 35:return 48;case 36:return 49;case 37:return 25;case 38:return 32;case 39:return 64;case 40:return"DOT";case 41:return"PLUS";case 42:return 61;case 43:case 44:return"EQUALS";case 45:return 68;case 46:return"PUNCTUATION";case 47:return 67;case 48:return 66;case 49:return 63;case 50:return 19}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)*[^\n]*(\r?\n?)+)/,/^(?:%%[^\n]*(\r?\n)*)/,/^(?:(\r?\n)+)/,/^(?:\s+)/,/^(?:classDiagram-v2\b)/,/^(?:classDiagram\b)/,/^(?:[{])/,/^(?:$)/,/^(?:[{])/,/^(?:[}])/,/^(?:[\n])/,/^(?:[^{}\n]*)/,/^(?:class\b)/,/^(?:cssClass\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::{1}[^:\n;]+)/,/^(?::{3})/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{string:{rules:[27,28],inclusive:!1},generic:{rules:[24,25],inclusive:!1},struct:{rules:[12,13,14,15,16],inclusive:!1},open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,17,18,19,20,21,22,23,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],inclusive:!0}}};function D(){this.yy={}}return M.lexer=O,D.prototype=M,M.Parser=D,new D}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e){var n,r,i=t.exports={};function a(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function s(t){if(n===setTimeout)return setTimeout(t,0);if((n===a||!n)&&setTimeout)return n=setTimeout,setTimeout(t,0);try{return n(t,0)}catch(e){try{return n.call(null,t,0)}catch(e){return n.call(this,t,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:a}catch(t){n=a}try{r="function"==typeof clearTimeout?clearTimeout:o}catch(t){r=o}}();var c,u=[],l=!1,h=-1;function f(){l&&c&&(l=!1,c.length?u=c.concat(u):h=-1,u.length&&d())}function d(){if(!l){var t=s(f);l=!0;for(var e=u.length;e;){for(c=u,u=[];++h1)for(var n=1;n=0;r--){var i=t[r];"."===i?t.splice(r,1):".."===i?(t.splice(r,1),n++):n&&(t.splice(r,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],r=0;r=-1&&!i;a--){var o=a>=0?arguments[a]:t.cwd();if("string"!=typeof o)throw new TypeError("Arguments to path.resolve must be strings");o&&(e=o+"/"+e,i="/"===o.charAt(0))}return(i?"/":"")+(e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"))||"."},e.normalize=function(t){var a=e.isAbsolute(t),o="/"===i(t,-1);return(t=n(r(t.split("/"),(function(t){return!!t})),!a).join("/"))||a||(t="."),t&&o&&(t+="/"),(a?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!=typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function r(t){for(var e=0;e=0&&""===t[n];n--);return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var i=r(t.split("/")),a=r(n.split("/")),o=Math.min(i.length,a.length),s=o,c=0;c=1;--a)if(47===(e=t.charCodeAt(a))){if(!i){r=a;break}}else i=!1;return-1===r?n?"/":".":n&&1===r?"/":t.slice(0,r)},e.basename=function(t,e){var n=function(t){"string"!=typeof t&&(t+="");var e,n=0,r=-1,i=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!i){n=e+1;break}}else-1===r&&(i=!1,r=e+1);return-1===r?"":t.slice(n,r)}(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!=typeof t&&(t+="");for(var e=-1,n=0,r=-1,i=!0,a=0,o=t.length-1;o>=0;--o){var s=t.charCodeAt(o);if(47!==s)-1===r&&(i=!1,r=o+1),46===s?-1===e?e=o:1!==a&&(a=1):-1!==e&&(a=-1);else if(!i){n=o+1;break}}return-1===e||-1===r||0===a||1===a&&e===r-1&&e===n+1?"":t.slice(e,r)};var i="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n(14))},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,28],d=[1,23],p=[1,24],g=[1,25],y=[1,26],v=[1,29],m=[1,32],b=[1,4,5,14,15,17,19,20,22,23,24,25,26,36,39],x=[1,4,5,12,13,14,15,17,19,20,22,23,24,25,26,36,39],_=[1,4,5,7,14,15,17,19,20,22,23,24,25,26,36,39],k=[4,5,14,15,17,19,20,22,23,24,25,26,36,39],w={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,directive:6,SD:7,document:8,line:9,statement:10,idStatement:11,DESCR:12,"--\x3e":13,HIDE_EMPTY:14,scale:15,WIDTH:16,COMPOSIT_STATE:17,STRUCT_START:18,STRUCT_STOP:19,STATE_DESCR:20,AS:21,ID:22,FORK:23,JOIN:24,CONCURRENT:25,note:26,notePosition:27,NOTE_TEXT:28,openDirective:29,typeDirective:30,closeDirective:31,":":32,argDirective:33,eol:34,";":35,EDGE_STATE:36,left_of:37,right_of:38,open_directive:39,type_directive:40,arg_directive:41,close_directive:42,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",7:"SD",12:"DESCR",13:"--\x3e",14:"HIDE_EMPTY",15:"scale",16:"WIDTH",17:"COMPOSIT_STATE",18:"STRUCT_START",19:"STRUCT_STOP",20:"STATE_DESCR",21:"AS",22:"ID",23:"FORK",24:"JOIN",25:"CONCURRENT",26:"note",28:"NOTE_TEXT",32:":",35:";",36:"EDGE_STATE",37:"left_of",38:"right_of",39:"open_directive",40:"type_directive",41:"arg_directive",42:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[10,1],[10,2],[10,3],[10,4],[10,1],[10,2],[10,1],[10,4],[10,3],[10,6],[10,1],[10,1],[10,1],[10,4],[10,4],[10,1],[6,3],[6,5],[34,1],[34,1],[11,1],[11,1],[27,1],[27,1],[29,1],[30,1],[33,1],[31,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.setRootDoc(a[s]),a[s];case 5:this.$=[];break;case 6:"nl"!=a[s]&&(a[s-1].push(a[s]),this.$=a[s-1]);break;case 7:case 8:this.$=a[s];break;case 9:this.$="nl";break;case 10:this.$={stmt:"state",id:a[s],type:"default",description:""};break;case 11:this.$={stmt:"state",id:a[s-1],type:"default",description:r.trimColon(a[s])};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-2],type:"default",description:""},state2:{stmt:"state",id:a[s],type:"default",description:""}};break;case 13:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-3],type:"default",description:""},state2:{stmt:"state",id:a[s-1],type:"default",description:""},description:a[s].substr(1).trim()};break;case 17:this.$={stmt:"state",id:a[s-3],type:"default",description:"",doc:a[s-1]};break;case 18:var c=a[s],u=a[s-2].trim();if(a[s].match(":")){var l=a[s].split(":");c=l[0],u=[u,l[1]]}this.$={stmt:"state",id:c,type:"default",description:u};break;case 19:this.$={stmt:"state",id:a[s-3],type:"default",description:a[s-5],doc:a[s-1]};break;case 20:this.$={stmt:"state",id:a[s],type:"fork"};break;case 21:this.$={stmt:"state",id:a[s],type:"join"};break;case 22:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 23:this.$={stmt:"state",id:a[s-1].trim(),note:{position:a[s-2].trim(),text:a[s].trim()}};break;case 30:case 31:this.$=a[s];break;case 34:r.parseDirective("%%{","open_directive");break;case 35:r.parseDirective(a[s],"type_directive");break;case 36:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 37:r.parseDirective("}%%","close_directive","state")}},table:[{3:1,4:e,5:n,6:4,7:r,29:6,39:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,29:6,39:i},{3:9,4:e,5:n,6:4,7:r,29:6,39:i},{3:10,4:e,5:n,6:4,7:r,29:6,39:i},t([1,4,5,14,15,17,20,22,23,24,25,26,36,39],a,{8:11}),{30:12,40:[1,13]},{40:[2,34]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},{31:30,32:[1,31],42:m},t([32,42],[2,35]),t(b,[2,6]),{6:27,10:33,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,8]),t(b,[2,9]),t(b,[2,10],{12:[1,34],13:[1,35]}),t(b,[2,14]),{16:[1,36]},t(b,[2,16],{18:[1,37]}),{21:[1,38]},t(b,[2,20]),t(b,[2,21]),t(b,[2,22]),{27:39,28:[1,40],37:[1,41],38:[1,42]},t(b,[2,25]),t(x,[2,30]),t(x,[2,31]),t(_,[2,26]),{33:43,41:[1,44]},t(_,[2,37]),t(b,[2,7]),t(b,[2,11]),{11:45,22:f,36:v},t(b,[2,15]),t(k,a,{8:46}),{22:[1,47]},{22:[1,48]},{21:[1,49]},{22:[2,32]},{22:[2,33]},{31:50,42:m},{42:[2,36]},t(b,[2,12],{12:[1,51]}),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,52],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,18],{18:[1,53]}),{28:[1,54]},{22:[1,55]},t(_,[2,27]),t(b,[2,13]),t(b,[2,17]),t(k,a,{8:56}),t(b,[2,23]),t(b,[2,24]),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,57],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,19])],defaultActions:{7:[2,34],8:[2,1],9:[2,2],10:[2,3],41:[2,32],42:[2,33],44:[2,36]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},E={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),39;case 1:return this.begin("type_directive"),40;case 2:return this.popState(),this.begin("arg_directive"),32;case 3:return this.popState(),this.popState(),42;case 4:return 41;case 5:break;case 6:console.log("Crap after close");break;case 7:return 5;case 8:case 9:case 10:case 11:break;case 12:return this.pushState("SCALE"),15;case 13:return 16;case 14:this.popState();break;case 15:this.pushState("STATE");break;case 16:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 17:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 18:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 19:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 20:this.begin("STATE_STRING");break;case 21:return this.popState(),this.pushState("STATE_ID"),"AS";case 22:return this.popState(),"ID";case 23:this.popState();break;case 24:return"STATE_DESCR";case 25:return 17;case 26:this.popState();break;case 27:return this.popState(),this.pushState("struct"),18;case 28:return this.popState(),19;case 29:break;case 30:return this.begin("NOTE"),26;case 31:return this.popState(),this.pushState("NOTE_ID"),37;case 32:return this.popState(),this.pushState("NOTE_ID"),38;case 33:this.popState(),this.pushState("FLOATING_NOTE");break;case 34:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 35:break;case 36:return"NOTE_TEXT";case 37:return this.popState(),"ID";case 38:return this.popState(),this.pushState("NOTE_TEXT"),22;case 39:return this.popState(),e.yytext=e.yytext.substr(2).trim(),28;case 40:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),28;case 41:case 42:return 7;case 43:return 14;case 44:return 36;case 45:return 22;case 46:return e.yytext=e.yytext.trim(),12;case 47:return 13;case 48:return 25;case 49:return 5;case 50:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:[\s]+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:[\s\S]*?end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:stateDiagram-v2\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[9,10],inclusive:!1},close_directive:{rules:[9,10],inclusive:!1},arg_directive:{rules:[3,4,9,10],inclusive:!1},type_directive:{rules:[2,3,9,10],inclusive:!1},open_directive:{rules:[1,9,10],inclusive:!1},struct:{rules:[9,10,15,28,29,30,44,45,46,47,48],inclusive:!1},FLOATING_NOTE_ID:{rules:[37],inclusive:!1},FLOATING_NOTE:{rules:[34,35,36],inclusive:!1},NOTE_TEXT:{rules:[39,40],inclusive:!1},NOTE_ID:{rules:[38],inclusive:!1},NOTE:{rules:[31,32,33],inclusive:!1},SCALE:{rules:[13,14],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[22],inclusive:!1},STATE_STRING:{rules:[23,24],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[9,10,16,17,18,19,20,21,25,26,27],inclusive:!1},ID:{rules:[9,10],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,10,11,12,15,27,30,41,42,43,44,45,46,47,49,50],inclusive:!0}}};function T(){this.yy={}}return w.lexer=E,T.prototype=w,w.Parser=T,new T}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t){t.exports=function(){"use strict";var e,r;function i(){return e.apply(null,arguments)}function a(t){return t instanceof Array||"[object Array]"===Object.prototype.toString.call(t)}function o(t){return null!=t&&"[object Object]"===Object.prototype.toString.call(t)}function s(t){return void 0===t}function c(t){return"number"==typeof t||"[object Number]"===Object.prototype.toString.call(t)}function u(t){return t instanceof Date||"[object Date]"===Object.prototype.toString.call(t)}function l(t,e){var n,r=[];for(n=0;n>>0,r=0;ryt(t)?(a=t+1,s-yt(t)):(a=t,s),{year:a,dayOfYear:o}}function Pt(t,e,n){var r,i,a=Bt(t.year(),e,n),o=Math.floor((t.dayOfYear()-a-1)/7)+1;return o<1?r=o+Ft(i=t.year()-1,e,n):o>Ft(t.year(),e,n)?(r=o-Ft(t.year(),e,n),i=t.year()+1):(i=t.year(),r=o),{week:r,year:i}}function Ft(t,e,n){var r=Bt(t,e,n),i=Bt(t+1,e,n);return(yt(t)-r+i)/7}function It(t,e){return t.slice(e,7).concat(t.slice(0,e))}W("w",["ww",2],"wo","week"),W("W",["WW",2],"Wo","isoWeek"),L("week","w"),L("isoWeek","W"),j("week",5),j("isoWeek",5),lt("w",Q),lt("ww",Q,q),lt("W",Q),lt("WW",Q,q),gt(["w","ww","W","WW"],(function(t,e,n,r){e[r.substr(0,1)]=w(t)})),W("d",0,"do","day"),W("dd",0,0,(function(t){return this.localeData().weekdaysMin(this,t)})),W("ddd",0,0,(function(t){return this.localeData().weekdaysShort(this,t)})),W("dddd",0,0,(function(t){return this.localeData().weekdays(this,t)})),W("e",0,0,"weekday"),W("E",0,0,"isoWeekday"),L("day","d"),L("weekday","e"),L("isoWeekday","E"),j("day",11),j("weekday",11),j("isoWeekday",11),lt("d",Q),lt("e",Q),lt("E",Q),lt("dd",(function(t,e){return e.weekdaysMinRegex(t)})),lt("ddd",(function(t,e){return e.weekdaysShortRegex(t)})),lt("dddd",(function(t,e){return e.weekdaysRegex(t)})),gt(["dd","ddd","dddd"],(function(t,e,n,r){var i=n._locale.weekdaysParse(t,r,n._strict);null!=i?e.d=i:p(n).invalidWeekday=t})),gt(["d","e","E"],(function(t,e,n,r){e[r]=w(t)}));var jt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Rt="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),Yt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),zt=ct,Ut=ct,$t=ct;function Wt(){function t(t,e){return e.length-t.length}var e,n,r,i,a,o=[],s=[],c=[],u=[];for(e=0;e<7;e++)n=d([2e3,1]).day(e),r=this.weekdaysMin(n,""),i=this.weekdaysShort(n,""),a=this.weekdays(n,""),o.push(r),s.push(i),c.push(a),u.push(r),u.push(i),u.push(a);for(o.sort(t),s.sort(t),c.sort(t),u.sort(t),e=0;e<7;e++)s[e]=ft(s[e]),c[e]=ft(c[e]),u[e]=ft(u[e]);this._weekdaysRegex=new RegExp("^("+u.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+c.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+s.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+o.join("|")+")","i")}function Vt(){return this.hours()%12||12}function Ht(t,e){W(t,0,0,(function(){return this.localeData().meridiem(this.hours(),this.minutes(),e)}))}function Gt(t,e){return e._meridiemParse}W("H",["HH",2],0,"hour"),W("h",["hh",2],0,Vt),W("k",["kk",2],0,(function(){return this.hours()||24})),W("hmm",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)})),W("hmmss",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)+R(this.seconds(),2)})),W("Hmm",0,0,(function(){return""+this.hours()+R(this.minutes(),2)})),W("Hmmss",0,0,(function(){return""+this.hours()+R(this.minutes(),2)+R(this.seconds(),2)})),Ht("a",!0),Ht("A",!1),L("hour","h"),j("hour",13),lt("a",Gt),lt("A",Gt),lt("H",Q),lt("h",Q),lt("k",Q),lt("HH",Q,q),lt("hh",Q,q),lt("kk",Q,q),lt("hmm",K),lt("hmmss",tt),lt("Hmm",K),lt("Hmmss",tt),pt(["H","HH"],3),pt(["k","kk"],(function(t,e,n){var r=w(t);e[3]=24===r?0:r})),pt(["a","A"],(function(t,e,n){n._isPm=n._locale.isPM(t),n._meridiem=t})),pt(["h","hh"],(function(t,e,n){e[3]=w(t),p(n).bigHour=!0})),pt("hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r)),p(n).bigHour=!0})),pt("hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i)),p(n).bigHour=!0})),pt("Hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r))})),pt("Hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i))}));var qt,Xt=xt("Hours",!0),Zt={calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},invalidDate:"Invalid date",ordinal:"%d",dayOfMonthOrdinalParse:/\d{1,2}/,relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},months:Tt,monthsShort:Ct,week:{dow:0,doy:6},weekdays:jt,weekdaysMin:Yt,weekdaysShort:Rt,meridiemParse:/[ap]\.?m?\.?/i},Jt={},Qt={};function Kt(t){return t?t.toLowerCase().replace("_","-"):t}function te(e){var r=null;if(!Jt[e]&&void 0!==t&&t&&t.exports)try{r=qt._abbr,n(171)("./"+e),ee(r)}catch(e){}return Jt[e]}function ee(t,e){var n;return t&&((n=s(e)?re(t):ne(t,e))?qt=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+t+" not found. Did you forget to load it?")),qt._abbr}function ne(t,e){if(null===e)return delete Jt[t],null;var n,r=Zt;if(e.abbr=t,null!=Jt[t])M("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=Jt[t]._config;else if(null!=e.parentLocale)if(null!=Jt[e.parentLocale])r=Jt[e.parentLocale]._config;else{if(null==(n=te(e.parentLocale)))return Qt[e.parentLocale]||(Qt[e.parentLocale]=[]),Qt[e.parentLocale].push({name:t,config:e}),null;r=n._config}return Jt[t]=new N(D(r,e)),Qt[t]&&Qt[t].forEach((function(t){ne(t.name,t.config)})),ee(t),Jt[t]}function re(t){var e;if(t&&t._locale&&t._locale._abbr&&(t=t._locale._abbr),!t)return qt;if(!a(t)){if(e=te(t))return e;t=[t]}return function(t){for(var e,n,r,i,a=0;a=e&&E(i,n,!0)>=e-1)break;e--}a++}return qt}(t)}function ie(t){var e,n=t._a;return n&&-2===p(t).overflow&&(e=n[1]<0||11wt(n[0],n[1])?2:n[3]<0||24Ft(n,a,o)?p(t)._overflowWeeks=!0:null!=c?p(t)._overflowWeekday=!0:(s=Lt(n,r,i,a,o),t._a[0]=s.year,t._dayOfYear=s.dayOfYear)}(t),null!=t._dayOfYear&&(o=ae(t._a[0],r[0]),(t._dayOfYear>yt(o)||0===t._dayOfYear)&&(p(t)._overflowDayOfYear=!0),n=Nt(o,0,t._dayOfYear),t._a[1]=n.getUTCMonth(),t._a[2]=n.getUTCDate()),e=0;e<3&&null==t._a[e];++e)t._a[e]=s[e]=r[e];for(;e<7;e++)t._a[e]=s[e]=null==t._a[e]?2===e?1:0:t._a[e];24===t._a[3]&&0===t._a[4]&&0===t._a[5]&&0===t._a[6]&&(t._nextDay=!0,t._a[3]=0),t._d=(t._useUTC?Nt:function(t,e,n,r,i,a,o){var s;return t<100&&0<=t?(s=new Date(t+400,e,n,r,i,a,o),isFinite(s.getFullYear())&&s.setFullYear(t)):s=new Date(t,e,n,r,i,a,o),s}).apply(null,s),a=t._useUTC?t._d.getUTCDay():t._d.getDay(),null!=t._tzm&&t._d.setUTCMinutes(t._d.getUTCMinutes()-t._tzm),t._nextDay&&(t._a[3]=24),t._w&&void 0!==t._w.d&&t._w.d!==a&&(p(t).weekdayMismatch=!0)}}var se=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ce=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ue=/Z|[+-]\d\d(?::?\d\d)?/,le=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],he=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],fe=/^\/?Date\((\-?\d+)/i;function de(t){var e,n,r,i,a,o,s=t._i,c=se.exec(s)||ce.exec(s);if(c){for(p(t).iso=!0,e=0,n=le.length;en.valueOf():n.valueOf()this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},on.isLocal=function(){return!!this.isValid()&&!this._isUTC},on.isUtcOffset=function(){return!!this.isValid()&&this._isUTC},on.isUtc=Be,on.isUTC=Be,on.zoneAbbr=function(){return this._isUTC?"UTC":""},on.zoneName=function(){return this._isUTC?"Coordinated Universal Time":""},on.dates=C("dates accessor is deprecated. Use date instead.",Ke),on.months=C("months accessor is deprecated. Use month instead",At),on.years=C("years accessor is deprecated. Use year instead",bt),on.zone=C("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",(function(t,e){return null!=t?("string"!=typeof t&&(t=-t),this.utcOffset(t,e),this):-this.utcOffset()})),on.isDSTShifted=C("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",(function(){if(!s(this._isDSTShifted))return this._isDSTShifted;var t={};if(m(t,this),(t=me(t))._a){var e=t._isUTC?d(t._a):xe(t._a);this._isDSTShifted=this.isValid()&&0h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},qt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),12;case 1:return this.begin("type_directive"),13;case 2:return this.popState(),this.begin("arg_directive"),10;case 3:return this.popState(),this.popState(),15;case 4:return 14;case 5:case 6:break;case 7:this.begin("string");break;case 8:this.popState();break;case 9:return"STR";case 10:return 75;case 11:return 84;case 12:return 76;case 13:return 90;case 14:return 77;case 15:return 78;case 16:return 79;case 17:case 18:return t.lex.firstGraph()&&this.begin("dir"),24;case 19:return 38;case 20:return 42;case 21:case 22:case 23:case 24:return 87;case 25:return this.popState(),25;case 26:case 27:case 28:case 29:case 30:case 31:case 32:case 33:case 34:case 35:return this.popState(),26;case 36:return 91;case 37:return 99;case 38:return 47;case 39:return 96;case 40:return 46;case 41:return 20;case 42:return 92;case 43:return 110;case 44:case 45:case 46:return 70;case 47:case 48:case 49:return 69;case 50:return 51;case 51:return 52;case 52:return 53;case 53:return 54;case 54:return 55;case 55:return 56;case 56:return 57;case 57:return 58;case 58:return 97;case 59:return 100;case 60:return 111;case 61:return 108;case 62:return 101;case 63:case 64:return 109;case 65:return 102;case 66:return 61;case 67:return 81;case 68:return"SEP";case 69:return 80;case 70:return 95;case 71:return 63;case 72:return 62;case 73:return 65;case 74:return 64;case 75:return 106;case 76:return 107;case 77:return 71;case 78:return 49;case 79:return 50;case 80:return 40;case 81:return 41;case 82:return 59;case 83:return 60;case 84:return 117;case 85:return 21;case 86:return 22;case 87:return 23}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)[^\n]*)/,/^(?:[^\}]%%[^\n]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:click\b)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:(\r?\n)*\s*\n)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*[xo<]?--+[-xo>]\s*)/,/^(?:\s*[xo<]?==+[=xo>]\s*)/,/^(?:\s*[xo<]?-?\.+-[xo>]?\s*)/,/^(?:\s*[xo<]?--\s*)/,/^(?:\s*[xo<]?==\s*)/,/^(?:\s*[xo<]?-\.\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\[)/,/^(?:\]\])/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r?\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},vertex:{rules:[],inclusive:!1},dir:{rules:[25,26,27,28,29,30,31,32,33,34,35],inclusive:!1},string:{rules:[8,9],inclusive:!1},INITIAL:{rules:[0,5,6,7,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87],inclusive:!0}}};function Xt(){this.yy={}}return Gt.lexer=qt,Xt.prototype=Gt,Gt.Parser=Xt,new Xt}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[7,9,11,12,13,14,15,16,17,18,20,27,32],i=[1,15],a=[1,16],o=[1,17],s=[1,18],c=[1,19],u=[1,20],l=[1,21],h=[1,23],f=[1,25],d=[1,28],p=[5,7,9,11,12,13,14,15,16,17,18,20,27,32],g={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,gantt:5,document:6,EOF:7,line:8,SPACE:9,statement:10,NL:11,dateFormat:12,inclusiveEndDates:13,axisFormat:14,excludes:15,todayMarker:16,title:17,section:18,clickStatement:19,taskTxt:20,taskData:21,openDirective:22,typeDirective:23,closeDirective:24,":":25,argDirective:26,click:27,callbackname:28,callbackargs:29,href:30,clickStatementDebug:31,open_directive:32,type_directive:33,arg_directive:34,close_directive:35,$accept:0,$end:1},terminals_:{2:"error",5:"gantt",7:"EOF",9:"SPACE",11:"NL",12:"dateFormat",13:"inclusiveEndDates",14:"axisFormat",15:"excludes",16:"todayMarker",17:"title",18:"section",20:"taskTxt",21:"taskData",25:":",27:"click",28:"callbackname",29:"callbackargs",30:"href",32:"open_directive",33:"type_directive",34:"arg_directive",35:"close_directive"},productions_:[0,[3,2],[3,3],[6,0],[6,2],[8,2],[8,1],[8,1],[8,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,2],[10,1],[4,4],[4,6],[19,2],[19,3],[19,3],[19,4],[19,3],[19,4],[19,2],[31,2],[31,3],[31,3],[31,4],[31,3],[31,4],[31,2],[22,1],[23,1],[26,1],[24,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 2:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 9:r.setDateFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 10:r.enableInclusiveEndDates(),this.$=a[s].substr(18);break;case 11:r.setAxisFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 12:r.setExcludes(a[s].substr(9)),this.$=a[s].substr(9);break;case 13:r.setTodayMarker(a[s].substr(12)),this.$=a[s].substr(12);break;case 14:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 15:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 17:r.addTask(a[s-1],a[s]),this.$="task";break;case 21:this.$=a[s-1],r.setClickEvent(a[s-1],a[s],null);break;case 22:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 23:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],null),r.setLink(a[s-2],a[s]);break;case 24:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2],a[s-1]),r.setLink(a[s-3],a[s]);break;case 25:this.$=a[s-2],r.setClickEvent(a[s-2],a[s],null),r.setLink(a[s-2],a[s-1]);break;case 26:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-1],a[s]),r.setLink(a[s-3],a[s-2]);break;case 27:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 28:case 34:this.$=a[s-1]+" "+a[s];break;case 29:case 30:case 32:this.$=a[s-2]+" "+a[s-1]+" "+a[s];break;case 31:case 33:this.$=a[s-3]+" "+a[s-2]+" "+a[s-1]+" "+a[s];break;case 35:r.parseDirective("%%{","open_directive");break;case 36:r.parseDirective(a[s],"type_directive");break;case 37:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 38:r.parseDirective("}%%","close_directive","gantt")}},table:[{3:1,4:2,5:e,22:4,32:n},{1:[3]},{3:6,4:2,5:e,22:4,32:n},t(r,[2,3],{6:7}),{23:8,33:[1,9]},{33:[2,35]},{1:[2,1]},{4:24,7:[1,10],8:11,9:[1,12],10:13,11:[1,14],12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},{24:26,25:[1,27],35:d},t([25,35],[2,36]),t(r,[2,8],{1:[2,2]}),t(r,[2,4]),{4:24,10:29,12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,9]),t(r,[2,10]),t(r,[2,11]),t(r,[2,12]),t(r,[2,13]),t(r,[2,14]),t(r,[2,15]),t(r,[2,16]),{21:[1,30]},t(r,[2,18]),{28:[1,31],30:[1,32]},{11:[1,33]},{26:34,34:[1,35]},{11:[2,38]},t(r,[2,5]),t(r,[2,17]),t(r,[2,21],{29:[1,36],30:[1,37]}),t(r,[2,27],{28:[1,38]}),t(p,[2,19]),{24:39,35:d},{35:[2,37]},t(r,[2,22],{30:[1,40]}),t(r,[2,23]),t(r,[2,25],{29:[1,41]}),{11:[1,42]},t(r,[2,24]),t(r,[2,26]),t(p,[2,20])],defaultActions:{5:[2,35],6:[2,1],28:[2,38],35:[2,37]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},y={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),32;case 1:return this.begin("type_directive"),33;case 2:return this.popState(),this.begin("arg_directive"),25;case 3:return this.popState(),this.popState(),35;case 4:return 34;case 5:case 6:case 7:break;case 8:return 11;case 9:case 10:case 11:break;case 12:this.begin("href");break;case 13:this.popState();break;case 14:return 30;case 15:this.begin("callbackname");break;case 16:this.popState();break;case 17:this.popState(),this.begin("callbackargs");break;case 18:return 28;case 19:this.popState();break;case 20:return 29;case 21:this.begin("click");break;case 22:this.popState();break;case 23:return 27;case 24:return 5;case 25:return 12;case 26:return 13;case 27:return 14;case 28:return 15;case 29:return 16;case 30:return"date";case 31:return 17;case 32:return 18;case 33:return 20;case 34:return 21;case 35:return 25;case 36:return 7;case 37:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)*[^\n]*)/i,/^(?:[^\}]%%*[^\n]*)/i,/^(?:%%*[^\n]*[\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:todayMarker\s[^\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[19,20],inclusive:!1},callbackname:{rules:[16,17,18],inclusive:!1},href:{rules:[13,14],inclusive:!1},click:{rules:[22,23],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,15,21,24,25,26,27,28,29,30,31,32,33,34,35,36,37],inclusive:!0}}};function v(){this.yy={}}return g.lexer=y,v.prototype=g,g.Parser=v,new v}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,17,18,19,21],i=[1,15],a=[1,16],o=[1,17],s=[1,21],c=[4,6,9,11,17,18,19,21],u={trace:function(){},yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,title:17,section:18,taskName:19,taskData:20,open_directive:21,type_directive:22,arg_directive:23,close_directive:24,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",17:"title",18:"section",19:"taskName",20:"taskData",21:"open_directive",22:"type_directive",23:"arg_directive",24:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,1],[10,2],[10,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 11:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 12:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 13:r.addTask(a[s-1],a[s]),this.$="task";break;case 15:r.parseDirective("%%{","open_directive");break;case 16:r.parseDirective(a[s],"type_directive");break;case 17:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 18:r.parseDirective("}%%","close_directive","journey")}},table:[{3:1,4:e,7:3,12:4,21:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,21:n},{13:8,22:[1,9]},{22:[2,15]},{6:[1,10],7:18,8:11,9:[1,12],10:13,11:[1,14],12:4,17:i,18:a,19:o,21:n},{1:[2,2]},{14:19,15:[1,20],24:s},t([15,24],[2,16]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:18,10:22,12:4,17:i,18:a,19:o,21:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,12]),{20:[1,23]},t(r,[2,14]),{11:[1,24]},{16:25,23:[1,26]},{11:[2,18]},t(r,[2,5]),t(r,[2,13]),t(c,[2,9]),{14:27,24:s},{24:[2,17]},{11:[1,28]},t(c,[2,10])],defaultActions:{5:[2,15],7:[2,2],21:[2,18],26:[2,17]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),21;case 1:return this.begin("type_directive"),22;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),24;case 4:return 23;case 5:case 6:break;case 7:return 11;case 8:case 9:break;case 10:return 4;case 11:return 17;case 12:return 18;case 13:return 19;case 14:return 20;case 15:return 15;case 16:return 6;case 17:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:journey\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17],inclusive:!0}}};function h(){this.yy={}}return u.lexer=l,h.prototype=u,u.Parser=h,new h}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e){return r.default.lang.round(i.default.parse(t)[e])}},function(t,e,n){var r=n(112),i=n(82),a=n(24);t.exports=function(t){return a(t)?r(t):i(t)}},function(t,e,n){var r;if(!r)try{r=n(0)}catch(t){}r||(r=window.d3),t.exports=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e,n){var a=i.default.parse(t),o=a[e],s=r.default.channel.clamp[e](o+n);return o!==s&&(a[e]=s),i.default.stringify(a)}},function(t,e,n){var r=n(210),i=n(216);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(38),i=n(212),a=n(213),o=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":o&&o in Object(t)?i(t):a(t)}},function(t,e){t.exports=function(t){return t}},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(34),i=n(11);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(16).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(16),i=n(232),a=e&&!e.nodeType&&e,o=a&&"object"==typeof t&&t&&!t.nodeType&&t,s=o&&o.exports===a?r.Buffer:void 0,c=(s?s.isBuffer:void 0)||i;t.exports=c}).call(this,n(7)(t))},function(t,e,n){var r=n(112),i=n(236),a=n(24);t.exports=function(t){return a(t)?r(t,!0):i(t)}},function(t,e,n){var r=n(241),i=n(77),a=n(242),o=n(121),s=n(243),c=n(34),u=n(110),l=u(r),h=u(i),f=u(a),d=u(o),p=u(s),g=c;(r&&"[object DataView]"!=g(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=g(new i)||a&&"[object Promise]"!=g(a.resolve())||o&&"[object Set]"!=g(new o)||s&&"[object WeakMap]"!=g(new s))&&(g=function(t){var e=c(t),n="[object Object]"==e?t.constructor:void 0,r=n?u(n):"";if(r)switch(r){case l:return"[object DataView]";case h:return"[object Map]";case f:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=g},function(t,e,n){var r=n(34),i=n(21);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e,n){var r;try{r={defaults:n(154),each:n(87),isFunction:n(37),isPlainObject:n(158),pick:n(161),has:n(93),range:n(162),uniqueId:n(163)}}catch(t){}r||(r=window._),t.exports=r},function(t){t.exports=JSON.parse('{"name":"mermaid","version":"8.8.4","description":"Markdownish syntax for generating flowcharts, sequence diagrams, class diagrams, gantt charts and git graphs.","main":"dist/mermaid.core.js","keywords":["diagram","markdown","flowchart","sequence diagram","gantt","class diagram","git graph"],"scripts":{"build:development":"webpack --progress --colors","build:production":"yarn build:development -p --config webpack.config.prod.babel.js","build":"yarn build:development && yarn build:production","postbuild":"documentation build src/mermaidAPI.js src/config.js --shallow -f md --markdown-toc false > docs/Setup.md","build:watch":"yarn build --watch","minify":"minify ./dist/mermaid.js > ./dist/mermaid.min.js","release":"yarn build","lint":"eslint src","e2e:depr":"yarn lint && jest e2e --config e2e/jest.config.js","cypress":"percy exec -- cypress run","e2e":"start-server-and-test dev http://localhost:9000/ cypress","e2e-upd":"yarn lint && jest e2e -u --config e2e/jest.config.js","dev":"webpack-dev-server --config webpack.config.e2e.js","test":"yarn lint && jest src/.*","test:watch":"jest --watch src","prepublishOnly":"yarn build && yarn test","prepare":"yarn build"},"repository":{"type":"git","url":"https://github.com/knsv/mermaid"},"author":"Knut Sveidqvist","license":"MIT","standard":{"ignore":["**/parser/*.js","dist/**/*.js","cypress/**/*.js"],"globals":["page"]},"dependencies":{"@braintree/sanitize-url":"^3.1.0","d3":"^5.7.0","dagre":"^0.8.4","dagre-d3":"^0.6.4","entity-decode":"^2.0.2","graphlib":"^2.1.7","he":"^1.2.0","khroma":"^1.1.0","minify":"^4.1.1","moment-mini":"^2.22.1","stylis":"^3.5.2"},"devDependencies":{"@babel/core":"^7.2.2","@babel/preset-env":"^7.8.4","@babel/register":"^7.0.0","@percy/cypress":"*","babel-core":"7.0.0-bridge.0","babel-eslint":"^10.1.0","babel-jest":"^24.9.0","babel-loader":"^8.0.4","coveralls":"^3.0.2","css-loader":"^2.0.1","css-to-string-loader":"^0.1.3","cypress":"4.0.1","documentation":"^12.0.1","eslint":"^6.3.0","eslint-config-prettier":"^6.3.0","eslint-plugin-prettier":"^3.1.0","husky":"^1.2.1","identity-obj-proxy":"^3.0.0","jest":"^24.9.0","jison":"^0.4.18","moment":"^2.23.0","node-sass":"^4.12.0","prettier":"^1.18.2","puppeteer":"^1.17.0","sass-loader":"^7.1.0","start-server-and-test":"^1.10.6","terser-webpack-plugin":"^2.2.2","webpack":"^4.41.2","webpack-bundle-analyzer":"^3.7.0","webpack-cli":"^3.1.2","webpack-dev-server":"^3.4.1","webpack-node-externals":"^1.7.2","yarn-upgrade-all":"^0.5.0"},"files":["dist"],"yarn-upgrade-all":{"ignore":["babel-core"]},"sideEffects":["**/*.css","**/*.scss"],"husky":{"hooks":{"pre-push":"yarn test"}}}')},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=new(n(176).default)({r:0,g:0,b:0,a:0},"transparent");e.default=r},function(t,e,n){var r=n(58),i=n(59);t.exports=function(t,e,n,a){var o=!n;n||(n={});for(var s=-1,c=e.length;++s-1&&t%1==0&&t-1}(s)?s:(n=s.match(a))?(e=n[0],r.test(e)?"about:blank":s):"about:blank"}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],a=[2,20],o=[1,32],s={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 2:return r.setDirection(a[s-3]),a[s-1];case 4:r.setOptions(a[s-1]),this.$=a[s];break;case 5:a[s-1]+=a[s],this.$=a[s-1];break;case 7:this.$=[];break;case 8:a[s-1].push(a[s]),this.$=a[s-1];break;case 9:this.$=a[s-1];break;case 11:r.commit(a[s]);break;case 12:r.branch(a[s]);break;case 13:r.checkout(a[s]);break;case 14:r.merge(a[s]);break;case 15:r.reset(a[s]);break;case 16:this.$="";break;case 17:this.$=a[s];break;case 18:this.$=a[s-1]+":"+a[s];break;case 19:this.$=a[s-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:a,25:31,26:o},{12:a,25:33,26:o},{12:[2,18]},{12:a,25:34,26:o},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},c={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 18:this.popState();break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][-_\.a-zA-Z0-9]*[-_a-zA-Z0-9])/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function u(){this.yy={}}return s.lexer=c,u.prototype=s,s.Parser=u,new u}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,a,o){a.length;switch(i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,5],r=[1,6],i=[1,7],a=[1,9],o=[1,10,12,19,20,21,22],s=[1,6,10,12,19,20,21,22],c=[19,20,21],u=[1,22],l=[6,19,20,21,22],h={trace:function(){},yy:{},symbols_:{error:2,start:3,eol:4,directive:5,PIE:6,document:7,line:8,statement:9,txt:10,value:11,title:12,title_value:13,openDirective:14,typeDirective:15,closeDirective:16,":":17,argDirective:18,NEWLINE:19,";":20,EOF:21,open_directive:22,type_directive:23,arg_directive:24,close_directive:25,$accept:0,$end:1},terminals_:{2:"error",6:"PIE",10:"txt",11:"value",12:"title",13:"title_value",17:":",19:"NEWLINE",20:";",21:"EOF",22:"open_directive",23:"type_directive",24:"arg_directive",25:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[7,0],[7,2],[8,2],[9,0],[9,2],[9,2],[9,1],[5,3],[5,5],[4,1],[4,1],[4,1],[14,1],[15,1],[18,1],[16,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:this.$=a[s-1];break;case 8:r.addSection(a[s-1],r.cleanupValue(a[s]));break;case 9:this.$=a[s].trim(),r.setTitle(this.$);break;case 16:r.parseDirective("%%{","open_directive");break;case 17:r.parseDirective(a[s],"type_directive");break;case 18:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 19:r.parseDirective("}%%","close_directive","pie")}},table:[{3:1,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{1:[3]},{3:10,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{3:11,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},t(o,[2,4],{7:12}),t(s,[2,13]),t(s,[2,14]),t(s,[2,15]),{15:13,23:[1,14]},{23:[2,16]},{1:[2,1]},{1:[2,2]},t(c,[2,7],{14:8,8:15,9:16,5:19,1:[2,3],10:[1,17],12:[1,18],22:a}),{16:20,17:[1,21],25:u},t([17,25],[2,17]),t(o,[2,5]),{4:23,19:n,20:r,21:i},{11:[1,24]},{13:[1,25]},t(c,[2,10]),t(l,[2,11]),{18:26,24:[1,27]},t(l,[2,19]),t(o,[2,6]),t(c,[2,8]),t(c,[2,9]),{16:28,25:u},{25:[2,18]},t(l,[2,12])],defaultActions:{9:[2,16],10:[2,1],11:[2,2],27:[2,18]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},f={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),22;case 1:return this.begin("type_directive"),23;case 2:return this.popState(),this.begin("arg_directive"),17;case 3:return this.popState(),this.popState(),25;case 4:return 24;case 5:case 6:break;case 7:return 19;case 8:case 9:break;case 10:return this.begin("title"),12;case 11:return this.popState(),"title_value";case 12:this.begin("string");break;case 13:this.popState();break;case 14:return"txt";case 15:return 6;case 16:return"value";case 17:return 21}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n\r]+)/i,/^(?:%%[^\n]*)/i,/^(?:[\s]+)/i,/^(?:title\b)/i,/^(?:(?!\n||)*[^\n]*)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:pie\b)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},title:{rules:[11],inclusive:!1},string:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,12,15,16,17],inclusive:!0}}};function d(){this.yy={}}return h.lexer=f,d.prototype=h,h.Parser=d,new d}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,23,37],i=[1,17],a=[1,20],o=[1,25],s=[1,26],c=[1,27],u=[1,28],l=[1,37],h=[23,34,35],f=[4,6,9,11,23,37],d=[30,31,32,33],p=[22,27],g={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,entityName:17,relSpec:18,role:19,BLOCK_START:20,attributes:21,BLOCK_STOP:22,ALPHANUM:23,attribute:24,attributeType:25,attributeName:26,ATTRIBUTE_WORD:27,cardinality:28,relType:29,ZERO_OR_ONE:30,ZERO_OR_MORE:31,ONE_OR_MORE:32,ONLY_ONE:33,NON_IDENTIFYING:34,IDENTIFYING:35,WORD:36,open_directive:37,type_directive:38,arg_directive:39,close_directive:40,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",20:"BLOCK_START",22:"BLOCK_STOP",23:"ALPHANUM",27:"ATTRIBUTE_WORD",30:"ZERO_OR_ONE",31:"ZERO_OR_MORE",32:"ONE_OR_MORE",33:"ONLY_ONE",34:"NON_IDENTIFYING",35:"IDENTIFYING",36:"WORD",37:"open_directive",38:"type_directive",39:"arg_directive",40:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,5],[10,4],[10,3],[10,1],[17,1],[21,1],[21,2],[24,2],[25,1],[26,1],[18,3],[28,1],[28,1],[28,1],[28,1],[29,1],[29,1],[19,1],[19,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:break;case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 12:r.addEntity(a[s-4]),r.addEntity(a[s-2]),r.addRelationship(a[s-4],a[s],a[s-2],a[s-3]);break;case 13:r.addEntity(a[s-3]),r.addAttributes(a[s-3],a[s-1]);break;case 14:r.addEntity(a[s-2]);break;case 15:r.addEntity(a[s]);break;case 16:this.$=a[s];break;case 17:this.$=[a[s]];break;case 18:a[s].push(a[s-1]),this.$=a[s];break;case 19:this.$={attributeType:a[s-1],attributeName:a[s]};break;case 20:case 21:this.$=a[s];break;case 22:this.$={cardA:a[s],relType:a[s-1],cardB:a[s-2]};break;case 23:this.$=r.Cardinality.ZERO_OR_ONE;break;case 24:this.$=r.Cardinality.ZERO_OR_MORE;break;case 25:this.$=r.Cardinality.ONE_OR_MORE;break;case 26:this.$=r.Cardinality.ONLY_ONE;break;case 27:this.$=r.Identification.NON_IDENTIFYING;break;case 28:this.$=r.Identification.IDENTIFYING;break;case 29:this.$=a[s].replace(/"/g,"");break;case 30:this.$=a[s];break;case 31:r.parseDirective("%%{","open_directive");break;case 32:r.parseDirective(a[s],"type_directive");break;case 33:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 34:r.parseDirective("}%%","close_directive","er")}},table:[{3:1,4:e,7:3,12:4,37:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,37:n},{13:8,38:[1,9]},{38:[2,31]},{6:[1,10],7:15,8:11,9:[1,12],10:13,11:[1,14],12:4,17:16,23:i,37:n},{1:[2,2]},{14:18,15:[1,19],40:a},t([15,40],[2,32]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:15,10:21,12:4,17:16,23:i,37:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,15],{18:22,28:24,20:[1,23],30:o,31:s,32:c,33:u}),t([6,9,11,15,20,23,30,31,32,33,37],[2,16]),{11:[1,29]},{16:30,39:[1,31]},{11:[2,34]},t(r,[2,5]),{17:32,23:i},{21:33,22:[1,34],24:35,25:36,27:l},{29:38,34:[1,39],35:[1,40]},t(h,[2,23]),t(h,[2,24]),t(h,[2,25]),t(h,[2,26]),t(f,[2,9]),{14:41,40:a},{40:[2,33]},{15:[1,42]},{22:[1,43]},t(r,[2,14]),{21:44,22:[2,17],24:35,25:36,27:l},{26:45,27:[1,46]},{27:[2,20]},{28:47,30:o,31:s,32:c,33:u},t(d,[2,27]),t(d,[2,28]),{11:[1,48]},{19:49,23:[1,51],36:[1,50]},t(r,[2,13]),{22:[2,18]},t(p,[2,19]),t(p,[2,21]),{23:[2,22]},t(f,[2,10]),t(r,[2,12]),t(r,[2,29]),t(r,[2,30])],defaultActions:{5:[2,31],7:[2,2],20:[2,34],31:[2,33],37:[2,20],44:[2,18],47:[2,22]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},y={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),37;case 1:return this.begin("type_directive"),38;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),40;case 4:return 39;case 5:case 6:break;case 7:return 11;case 8:break;case 9:return 9;case 10:return 36;case 11:return 4;case 12:return this.begin("block"),20;case 13:break;case 14:return 27;case 15:break;case 16:return this.popState(),22;case 17:return e.yytext[0];case 18:return 30;case 19:return 31;case 20:return 32;case 21:return 33;case 22:return 30;case 23:return 31;case 24:return 32;case 25:return 34;case 26:return 35;case 27:case 28:return 34;case 29:return 23;case 30:return e.yytext[0];case 31:return 6}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:"[^"]*")/i,/^(?:erDiagram\b)/i,/^(?:\{)/i,/^(?:\s+)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:[\n]+)/i,/^(?:\})/i,/^(?:.)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},block:{rules:[13,14,15,16,17],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31],inclusive:!0}}};function v(){this.yy={}}return g.lexer=y,v.prototype=g,g.Parser=v,new v}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";var r;Object.defineProperty(e,"__esModule",{value:!0}),function(t){t[t.ALL=0]="ALL",t[t.RGB=1]="RGB",t[t.HSL=2]="HSL"}(r||(r={})),e.TYPE=r},function(t,e,n){"use strict";var r=n(10);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function a(t,e){t[e]?t[e]++:t[e]=1}function o(t,e){--t[e]||delete t[e]}function s(t,e,n,i){var a=""+e,o=""+n;if(!t&&a>o){var s=a;a=o,o=s}return a+""+o+""+(r.isUndefined(i)?"\0":i)}function c(t,e,n,r){var i=""+e,a=""+n;if(!t&&i>a){var o=i;i=a,a=o}var s={v:i,w:a};return r&&(s.name=r),s}function u(t,e){return s(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var a=n.parent(r);return void 0===a||e.hasNode(a)?(i[r]=a,a):a in i?i[a]:t(a)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,o=!1,u=arguments[0];"object"==typeof u&&null!==u&&"v"in u?(t=u.v,e=u.w,n=u.name,2===arguments.length&&(i=arguments[1],o=!0)):(t=u,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],o=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var l=s(this._isDirected,t,e,n);if(r.has(this._edgeLabels,l))return o&&(this._edgeLabels[l]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[l]=o?i:this._defaultEdgeLabelFn(t,e,n);var h=c(this._isDirected,t,e,n);return t=h.v,e=h.w,Object.freeze(h),this._edgeObjs[l]=h,a(this._preds[e],t),a(this._sucs[t],e),this._in[e][l]=h,this._out[t][l]=h,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],o(this._preds[e],t),o(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(33)(n(16),"Map");t.exports=r},function(t,e,n){var r=n(217),i=n(224),a=n(226),o=n(227),s=n(228);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(109),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i&&r.process,s=function(){try{var t=a&&a.require&&a.require("util").types;return t||o&&o.binding&&o.binding("util")}catch(t){}}();t.exports=s}).call(this,n(7)(t))},function(t,e,n){var r=n(62),i=n(234),a=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))a.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(116),i=n(117),a=Object.prototype.propertyIsEnumerable,o=Object.getOwnPropertySymbols,s=o?function(t){return null==t?[]:(t=Object(t),r(o(t),(function(e){return a.call(t,e)})))}:i;t.exports=s},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&a(l)?n>1?t(l,n-1,a,o,s):r(s,l):o||(s[s.length]=l)}return s}},function(t,e,n){var r=n(42);t.exports=function(t,e,n){for(var i=-1,a=t.length;++i4,u=c?1:17,l=c?8:4,h=s?0:-1,f=c?255:15;return i.default.set({r:(r>>l*(h+3)&f)*u,g:(r>>l*(h+2)&f)*u,b:(r>>l*(h+1)&f)*u,a:s?(r&f)*u/255:1},t)}}},stringify:function(t){return t.a<1?"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]+r.default.unit.frac2hex(t.a):"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]}};e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a=n(15);e.default=function(t,e,n,o){void 0===o&&(o=1);var s=i.default.set({h:r.default.channel.clamp.h(t),s:r.default.channel.clamp.s(e),l:r.default.channel.clamp.l(n),a:r.default.channel.clamp.a(o)});return a.default.stringify(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"a")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t){var e=i.default.parse(t),n=e.r,a=e.g,o=e.b,s=.2126*r.default.channel.toLinear(n)+.7152*r.default.channel.toLinear(a)+.0722*r.default.channel.toLinear(o);return r.default.lang.round(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(102);e.default=function(t){return r.default(t)>=.5}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(52);e.default=function(t,e){var n=r.default.parse(t),a={};for(var o in e)e[o]&&(a[o]=n[o]+e[o]);return i.default(t,a)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(51);e.default=function(t,e,n){void 0===n&&(n=50);var a=r.default.parse(t),o=a.r,s=a.g,c=a.b,u=a.a,l=r.default.parse(e),h=l.r,f=l.g,d=l.b,p=l.a,g=n/100,y=2*g-1,v=u-p,m=((y*v==-1?y:(y+v)/(1+y*v))+1)/2,b=1-m,x=o*m+h*b,_=s*m+f*b,k=c*m+d*b,w=u*g+p*(1-g);return i.default(x,_,k,w)}},function(t,e,n){var r=n(53),i=n(79),a=n(58),o=n(229),s=n(235),c=n(114),u=n(115),l=n(238),h=n(239),f=n(119),d=n(240),p=n(41),g=n(244),y=n(245),v=n(124),m=n(5),b=n(39),x=n(249),_=n(11),k=n(251),w=n(30),E={};E["[object Arguments]"]=E["[object Array]"]=E["[object ArrayBuffer]"]=E["[object DataView]"]=E["[object Boolean]"]=E["[object Date]"]=E["[object Float32Array]"]=E["[object Float64Array]"]=E["[object Int8Array]"]=E["[object Int16Array]"]=E["[object Int32Array]"]=E["[object Map]"]=E["[object Number]"]=E["[object Object]"]=E["[object RegExp]"]=E["[object Set]"]=E["[object String]"]=E["[object Symbol]"]=E["[object Uint8Array]"]=E["[object Uint8ClampedArray]"]=E["[object Uint16Array]"]=E["[object Uint32Array]"]=!0,E["[object Error]"]=E["[object Function]"]=E["[object WeakMap]"]=!1,t.exports=function t(e,n,T,C,S,A){var M,O=1&n,D=2&n,N=4&n;if(T&&(M=S?T(e,C,S,A):T(e)),void 0!==M)return M;if(!_(e))return e;var B=m(e);if(B){if(M=g(e),!O)return u(e,M)}else{var L=p(e),P="[object Function]"==L||"[object GeneratorFunction]"==L;if(b(e))return c(e,O);if("[object Object]"==L||"[object Arguments]"==L||P&&!S){if(M=D||P?{}:v(e),!O)return D?h(e,s(M,e)):l(e,o(M,e))}else{if(!E[L])return S?e:{};M=y(e,L,O)}}A||(A=new r);var F=A.get(e);if(F)return F;A.set(e,M),k(e)?e.forEach((function(r){M.add(t(r,n,T,r,e,A))})):x(e)&&e.forEach((function(r,i){M.set(i,t(r,n,T,i,e,A))}));var I=N?D?d:f:D?keysIn:w,j=B?void 0:I(e);return i(j||e,(function(r,i){j&&(r=e[i=r]),a(M,i,t(r,n,T,i,e,A))})),M}},function(t,e,n){(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.exports=n}).call(this,n(211))},function(t,e){var n=Function.prototype.toString;t.exports=function(t){if(null!=t){try{return n.call(t)}catch(t){}try{return t+""}catch(t){}}return""}},function(t,e,n){var r=n(33),i=function(){try{var t=r(Object,"defineProperty");return t({},"",{}),t}catch(t){}}();t.exports=i},function(t,e,n){var r=n(230),i=n(47),a=n(5),o=n(39),s=n(60),c=n(48),u=Object.prototype.hasOwnProperty;t.exports=function(t,e){var n=a(t),l=!n&&i(t),h=!n&&!l&&o(t),f=!n&&!l&&!h&&c(t),d=n||l||h||f,p=d?r(t.length,String):[],g=p.length;for(var y in t)!e&&!u.call(t,y)||d&&("length"==y||h&&("offset"==y||"parent"==y)||f&&("buffer"==y||"byteLength"==y||"byteOffset"==y)||s(y,g))||p.push(y);return p}},function(t,e){t.exports=function(t,e){return function(n){return t(e(n))}}},function(t,e,n){(function(t){var r=n(16),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i?r.Buffer:void 0,s=o?o.allocUnsafe:void 0;t.exports=function(t,e){if(e)return t.slice();var n=t.length,r=s?s(n):new t.constructor(n);return t.copy(r),r}}).call(this,n(7)(t))},function(t,e){t.exports=function(t,e){var n=-1,r=t.length;for(e||(e=Array(r));++nl))return!1;var f=c.get(t);if(f&&c.get(e))return f==e;var d=-1,p=!0,g=2&n?new r:void 0;for(c.set(t,e),c.set(e,t);++d0&&(a=c.removeMin(),(o=s[a]).distance!==Number.POSITIVE_INFINITY);)r(a).forEach(u);return s}(t,String(e),n||a,r||function(e){return t.outEdges(e)})};var a=r.constant(1)},function(t,e,n){var r=n(10);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,a=i.length;return n[t]=a,i.push({key:t,priority:e}),this._decrease(a),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priority2?e[2]:void 0;for(u&&a(e[0],e[1],u)&&(r=1);++n1&&o.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o);return{x:i+n,y:a+r}}},function(t,e,n){t.exports=function t(e){"use strict";var n=/^\0+/g,r=/[\0\r\f]/g,i=/: */g,a=/zoo|gra/,o=/([,: ])(transform)/g,s=/,+\s*(?![^(]*[)])/g,c=/ +\s*(?![^(]*[)])/g,u=/ *[\0] */g,l=/,\r+?/g,h=/([\t\r\n ])*\f?&/g,f=/:global\(((?:[^\(\)\[\]]*|\[.*\]|\([^\(\)]*\))*)\)/g,d=/\W+/g,p=/@(k\w+)\s*(\S*)\s*/,g=/::(place)/g,y=/:(read-only)/g,v=/\s+(?=[{\];=:>])/g,m=/([[}=:>])\s+/g,b=/(\{[^{]+?);(?=\})/g,x=/\s{2,}/g,_=/([^\(])(:+) */g,k=/[svh]\w+-[tblr]{2}/,w=/\(\s*(.*)\s*\)/g,E=/([\s\S]*?);/g,T=/-self|flex-/g,C=/[^]*?(:[rp][el]a[\w-]+)[^]*/,S=/stretch|:\s*\w+\-(?:conte|avail)/,A=/([^-])(image-set\()/,M="-webkit-",O="-moz-",D="-ms-",N=1,B=1,L=0,P=1,F=1,I=1,j=0,R=0,Y=0,z=[],U=[],$=0,W=null,V=0,H=1,G="",q="",X="";function Z(t,e,i,a,o){for(var s,c,l=0,h=0,f=0,d=0,v=0,m=0,b=0,x=0,k=0,E=0,T=0,C=0,S=0,A=0,O=0,D=0,j=0,U=0,W=0,Q=i.length,it=Q-1,at="",ot="",st="",ct="",ut="",lt="";O0&&(ot=ot.replace(r,"")),ot.trim().length>0)){switch(b){case 32:case 9:case 59:case 13:case 10:break;default:ot+=i.charAt(O)}b=59}if(1===j)switch(b){case 123:case 125:case 59:case 34:case 39:case 40:case 41:case 44:j=0;case 9:case 13:case 10:case 32:break;default:for(j=0,W=O,v=b,O--,b=59;W0&&(++O,b=v);case 123:W=Q}}switch(b){case 123:for(v=(ot=ot.trim()).charCodeAt(0),T=1,W=++O;O0&&(ot=ot.replace(r,"")),m=ot.charCodeAt(1)){case 100:case 109:case 115:case 45:s=e;break;default:s=z}if(W=(st=Z(e,s,st,m,o+1)).length,Y>0&&0===W&&(W=ot.length),$>0&&(c=nt(3,st,s=J(z,ot,U),e,B,N,W,m,o,a),ot=s.join(""),void 0!==c&&0===(W=(st=c.trim()).length)&&(m=0,st="")),W>0)switch(m){case 115:ot=ot.replace(w,et);case 100:case 109:case 45:st=ot+"{"+st+"}";break;case 107:st=(ot=ot.replace(p,"$1 $2"+(H>0?G:"")))+"{"+st+"}",st=1===F||2===F&&tt("@"+st,3)?"@"+M+st+"@"+st:"@"+st;break;default:st=ot+st,112===a&&(ct+=st,st="")}else st="";break;default:st=Z(e,J(e,ot,U),st,a,o+1)}ut+=st,C=0,j=0,A=0,D=0,U=0,S=0,ot="",st="",b=i.charCodeAt(++O);break;case 125:case 59:if((W=(ot=(D>0?ot.replace(r,""):ot).trim()).length)>1)switch(0===A&&(45===(v=ot.charCodeAt(0))||v>96&&v<123)&&(W=(ot=ot.replace(" ",":")).length),$>0&&void 0!==(c=nt(1,ot,e,t,B,N,ct.length,a,o,a))&&0===(W=(ot=c.trim()).length)&&(ot="\0\0"),v=ot.charCodeAt(0),m=ot.charCodeAt(1),v){case 0:break;case 64:if(105===m||99===m){lt+=ot+i.charAt(O);break}default:if(58===ot.charCodeAt(W-1))break;ct+=K(ot,v,m,ot.charCodeAt(2))}C=0,j=0,A=0,D=0,U=0,ot="",b=i.charCodeAt(++O)}}switch(b){case 13:case 10:if(h+d+f+l+R===0)switch(E){case 41:case 39:case 34:case 64:case 126:case 62:case 42:case 43:case 47:case 45:case 58:case 44:case 59:case 123:case 125:break;default:A>0&&(j=1)}47===h?h=0:P+C===0&&107!==a&&ot.length>0&&(D=1,ot+="\0"),$*V>0&&nt(0,ot,e,t,B,N,ct.length,a,o,a),N=1,B++;break;case 59:case 125:if(h+d+f+l===0){N++;break}default:switch(N++,at=i.charAt(O),b){case 9:case 32:if(d+l+h===0)switch(x){case 44:case 58:case 9:case 32:at="";break;default:32!==b&&(at=" ")}break;case 0:at="\\0";break;case 12:at="\\f";break;case 11:at="\\v";break;case 38:d+h+l===0&&P>0&&(U=1,D=1,at="\f"+at);break;case 108:if(d+h+l+L===0&&A>0)switch(O-A){case 2:112===x&&58===i.charCodeAt(O-3)&&(L=x);case 8:111===k&&(L=k)}break;case 58:d+h+l===0&&(A=O);break;case 44:h+f+d+l===0&&(D=1,at+="\r");break;case 34:case 39:0===h&&(d=d===b?0:0===d?b:d);break;case 91:d+h+f===0&&l++;break;case 93:d+h+f===0&&l--;break;case 41:d+h+l===0&&f--;break;case 40:if(d+h+l===0){if(0===C)switch(2*x+3*k){case 533:break;default:T=0,C=1}f++}break;case 64:h+f+d+l+A+S===0&&(S=1);break;case 42:case 47:if(d+l+f>0)break;switch(h){case 0:switch(2*b+3*i.charCodeAt(O+1)){case 235:h=47;break;case 220:W=O,h=42}break;case 42:47===b&&42===x&&W+2!==O&&(33===i.charCodeAt(W+2)&&(ct+=i.substring(W,O+1)),at="",h=0)}}if(0===h){if(P+d+l+S===0&&107!==a&&59!==b)switch(b){case 44:case 126:case 62:case 43:case 41:case 40:if(0===C){switch(x){case 9:case 32:case 10:case 13:at+="\0";break;default:at="\0"+at+(44===b?"":"\0")}D=1}else switch(b){case 40:A+7===O&&108===x&&(A=0),C=++T;break;case 41:0==(C=--T)&&(D=1,at+="\0")}break;case 9:case 32:switch(x){case 0:case 123:case 125:case 59:case 44:case 12:case 9:case 32:case 10:case 13:break;default:0===C&&(D=1,at+="\0")}}ot+=at,32!==b&&9!==b&&(E=b)}}k=x,x=b,O++}if(W=ct.length,Y>0&&0===W&&0===ut.length&&0===e[0].length==0&&(109!==a||1===e.length&&(P>0?q:X)===e[0])&&(W=e.join(",").length+2),W>0){if(s=0===P&&107!==a?function(t){for(var e,n,i=0,a=t.length,o=Array(a);i1)){if(f=c.charCodeAt(c.length-1),d=n.charCodeAt(0),e="",0!==l)switch(f){case 42:case 126:case 62:case 43:case 32:case 40:break;default:e=" "}switch(d){case 38:n=e+q;case 126:case 62:case 43:case 32:case 41:case 40:break;case 91:n=e+n+q;break;case 58:switch(2*n.charCodeAt(1)+3*n.charCodeAt(2)){case 530:if(I>0){n=e+n.substring(8,h-1);break}default:(l<1||s[l-1].length<1)&&(n=e+q+n)}break;case 44:e="";default:n=h>1&&n.indexOf(":")>0?e+n.replace(_,"$1"+q+"$2"):e+n+q}c+=n}o[i]=c.replace(r,"").trim()}return o}(e):e,$>0&&void 0!==(c=nt(2,ct,s,t,B,N,W,a,o,a))&&0===(ct=c).length)return lt+ct+ut;if(ct=s.join(",")+"{"+ct+"}",F*L!=0){switch(2!==F||tt(ct,2)||(L=0),L){case 111:ct=ct.replace(y,":-moz-$1")+ct;break;case 112:ct=ct.replace(g,"::-webkit-input-$1")+ct.replace(g,"::-moz-$1")+ct.replace(g,":-ms-input-$1")+ct}L=0}}return lt+ct+ut}function J(t,e,n){var r=e.trim().split(l),i=r,a=r.length,o=t.length;switch(o){case 0:case 1:for(var s=0,c=0===o?"":t[0]+" ";s0&&P>0)return i.replace(f,"$1").replace(h,"$1"+X);break;default:return t.trim()+i.replace(h,"$1"+t.trim())}default:if(n*P>0&&i.indexOf("\f")>0)return i.replace(h,(58===t.charCodeAt(0)?"":"$1")+t.trim())}return t+i}function K(t,e,n,r){var u,l=0,h=t+";",f=2*e+3*n+4*r;if(944===f)return function(t){var e=t.length,n=t.indexOf(":",9)+1,r=t.substring(0,n).trim(),i=t.substring(n,e-1).trim();switch(t.charCodeAt(9)*H){case 0:break;case 45:if(110!==t.charCodeAt(10))break;default:var a=i.split((i="",s)),o=0;for(n=0,e=a.length;o64&&h<90||h>96&&h<123||95===h||45===h&&45!==u.charCodeAt(1)))switch(isNaN(parseFloat(u))+(-1!==u.indexOf("("))){case 1:switch(u){case"infinite":case"alternate":case"backwards":case"running":case"normal":case"forwards":case"both":case"none":case"linear":case"ease":case"ease-in":case"ease-out":case"ease-in-out":case"paused":case"reverse":case"alternate-reverse":case"inherit":case"initial":case"unset":case"step-start":case"step-end":break;default:u+=G}}l[n++]=u}i+=(0===o?"":",")+l.join(" ")}}return i=r+i+";",1===F||2===F&&tt(i,1)?M+i+i:i}(h);if(0===F||2===F&&!tt(h,1))return h;switch(f){case 1015:return 97===h.charCodeAt(10)?M+h+h:h;case 951:return 116===h.charCodeAt(3)?M+h+h:h;case 963:return 110===h.charCodeAt(5)?M+h+h:h;case 1009:if(100!==h.charCodeAt(4))break;case 969:case 942:return M+h+h;case 978:return M+h+O+h+h;case 1019:case 983:return M+h+O+h+D+h+h;case 883:return 45===h.charCodeAt(8)?M+h+h:h.indexOf("image-set(",11)>0?h.replace(A,"$1-webkit-$2")+h:h;case 932:if(45===h.charCodeAt(4))switch(h.charCodeAt(5)){case 103:return M+"box-"+h.replace("-grow","")+M+h+D+h.replace("grow","positive")+h;case 115:return M+h+D+h.replace("shrink","negative")+h;case 98:return M+h+D+h.replace("basis","preferred-size")+h}return M+h+D+h+h;case 964:return M+h+D+"flex-"+h+h;case 1023:if(99!==h.charCodeAt(8))break;return u=h.substring(h.indexOf(":",15)).replace("flex-","").replace("space-between","justify"),M+"box-pack"+u+M+h+D+"flex-pack"+u+h;case 1005:return a.test(h)?h.replace(i,":"+M)+h.replace(i,":"+O)+h:h;case 1e3:switch(l=(u=h.substring(13).trim()).indexOf("-")+1,u.charCodeAt(0)+u.charCodeAt(l)){case 226:u=h.replace(k,"tb");break;case 232:u=h.replace(k,"tb-rl");break;case 220:u=h.replace(k,"lr");break;default:return h}return M+h+D+u+h;case 1017:if(-1===h.indexOf("sticky",9))return h;case 975:switch(l=(h=t).length-10,f=(u=(33===h.charCodeAt(l)?h.substring(0,l):h).substring(t.indexOf(":",7)+1).trim()).charCodeAt(0)+(0|u.charCodeAt(7))){case 203:if(u.charCodeAt(8)<111)break;case 115:h=h.replace(u,M+u)+";"+h;break;case 207:case 102:h=h.replace(u,M+(f>102?"inline-":"")+"box")+";"+h.replace(u,M+u)+";"+h.replace(u,D+u+"box")+";"+h}return h+";";case 938:if(45===h.charCodeAt(5))switch(h.charCodeAt(6)){case 105:return u=h.replace("-items",""),M+h+M+"box-"+u+D+"flex-"+u+h;case 115:return M+h+D+"flex-item-"+h.replace(T,"")+h;default:return M+h+D+"flex-line-pack"+h.replace("align-content","").replace(T,"")+h}break;case 973:case 989:if(45!==h.charCodeAt(3)||122===h.charCodeAt(4))break;case 931:case 953:if(!0===S.test(t))return 115===(u=t.substring(t.indexOf(":")+1)).charCodeAt(0)?K(t.replace("stretch","fill-available"),e,n,r).replace(":fill-available",":stretch"):h.replace(u,M+u)+h.replace(u,O+u.replace("fill-",""))+h;break;case 962:if(h=M+h+(102===h.charCodeAt(5)?D+h:"")+h,n+r===211&&105===h.charCodeAt(13)&&h.indexOf("transform",10)>0)return h.substring(0,h.indexOf(";",27)+1).replace(o,"$1-webkit-$2")+h}return h}function tt(t,e){var n=t.indexOf(1===e?":":"{"),r=t.substring(0,3!==e?n:10),i=t.substring(n+1,t.length-1);return W(2!==e?r:r.replace(C,"$1"),i,e)}function et(t,e){var n=K(e,e.charCodeAt(0),e.charCodeAt(1),e.charCodeAt(2));return n!==e+";"?n.replace(E," or ($1)").substring(4):"("+e+")"}function nt(t,e,n,r,i,a,o,s,c,u){for(var l,h=0,f=e;h<$;++h)switch(l=U[h].call(at,t,f,n,r,i,a,o,s,c,u)){case void 0:case!1:case!0:case null:break;default:f=l}if(f!==e)return f}function rt(t,e,n,r){for(var i=e+1;i0&&(G=i.replace(d,91===a?"":"-")),a=1,1===P?X=i:q=i;var o,s=[X];$>0&&void 0!==(o=nt(-1,n,s,s,B,N,0,0,0,0))&&"string"==typeof o&&(n=o);var c=Z(z,s,n,0,0);return $>0&&void 0!==(o=nt(-2,c,s,s,B,N,c.length,0,0,0))&&"string"!=typeof(c=o)&&(a=0),G="",X="",q="",L=0,B=1,N=1,j*a==0?c:function(t){return t.replace(r,"").replace(v,"").replace(m,"$1").replace(b,"$1").replace(x," ")}(c)}return at.use=function t(e){switch(e){case void 0:case null:$=U.length=0;break;default:if("function"==typeof e)U[$++]=e;else if("object"==typeof e)for(var n=0,r=e.length;n=255?255:t<0?0:t},g:function(t){return t>=255?255:t<0?0:t},b:function(t){return t>=255?255:t<0?0:t},h:function(t){return t%360},s:function(t){return t>=100?100:t<0?0:t},l:function(t){return t>=100?100:t<0?0:t},a:function(t){return t>=1?1:t<0?0:t}},toLinear:function(t){var e=t/255;return t>.03928?Math.pow((e+.055)/1.055,2.4):e/12.92},hue2rgb:function(t,e,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t},hsl2rgb:function(t,e){var n=t.h,i=t.s,a=t.l;if(100===i)return 2.55*a;n/=360,i/=100;var o=(a/=100)<.5?a*(1+i):a+i-a*i,s=2*a-o;switch(e){case"r":return 255*r.hue2rgb(s,o,n+1/3);case"g":return 255*r.hue2rgb(s,o,n);case"b":return 255*r.hue2rgb(s,o,n-1/3)}},rgb2hsl:function(t,e){var n=t.r,r=t.g,i=t.b;n/=255,r/=255,i/=255;var a=Math.max(n,r,i),o=Math.min(n,r,i),s=(a+o)/2;if("l"===e)return 100*s;if(a===o)return 0;var c=a-o;if("s"===e)return 100*(s>.5?c/(2-a-o):c/(a+o));switch(a){case n:return 60*((r-i)/c+(r1?e:"0"+e},dec2hex:function(t){var e=Math.round(t).toString(16);return e.length>1?e:"0"+e}};e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(75),a=n(177),o=function(){function t(t,e){this.color=e,this.changed=!1,this.data=t,this.type=new a.default}return t.prototype.set=function(t,e){return this.color=e,this.changed=!1,this.data=t,this.type.type=i.TYPE.ALL,this},t.prototype._ensureHSL=function(){void 0===this.data.h&&(this.data.h=r.default.channel.rgb2hsl(this.data,"h")),void 0===this.data.s&&(this.data.s=r.default.channel.rgb2hsl(this.data,"s")),void 0===this.data.l&&(this.data.l=r.default.channel.rgb2hsl(this.data,"l"))},t.prototype._ensureRGB=function(){void 0===this.data.r&&(this.data.r=r.default.channel.hsl2rgb(this.data,"r")),void 0===this.data.g&&(this.data.g=r.default.channel.hsl2rgb(this.data,"g")),void 0===this.data.b&&(this.data.b=r.default.channel.hsl2rgb(this.data,"b"))},Object.defineProperty(t.prototype,"r",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.r?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"r")):this.data.r},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.r=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"g",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.g?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"g")):this.data.g},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.g=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"b",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.b?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"b")):this.data.b},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.b=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"h",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.h?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"h")):this.data.h},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.h=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"s",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.s?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"s")):this.data.s},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.s=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"l",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.l?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"l")):this.data.l},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.l=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"a",{get:function(){return this.data.a},set:function(t){this.changed=!0,this.data.a=t},enumerable:!0,configurable:!0}),t}();e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(75),i=function(){function t(){this.type=r.TYPE.ALL}return t.prototype.get=function(){return this.type},t.prototype.set=function(t){if(this.type&&this.type!==t)throw new Error("Cannot change both RGB and HSL channels at the same time");this.type=t},t.prototype.reset=function(){this.type=r.TYPE.ALL},t.prototype.is=function(t){return this.type===t},t}();e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i={};e.DEC2HEX=i;for(var a=0;a<=255;a++)i[a]=r.default.unit.dec2hex(a)},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(99),i={colors:{aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyanaqua:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",transparent:"#00000000",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"},parse:function(t){t=t.toLowerCase();var e=i.colors[t];if(e)return r.default.parse(e)},stringify:function(t){var e=r.default.stringify(t);for(var n in i.colors)if(i.colors[n]===e)return n}};e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^rgba?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?)))?\s*?\)$/i,parse:function(t){var e=t.charCodeAt(0);if(114===e||82===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5],h=n[6],f=n[7],d=n[8];return i.default.set({r:r.default.channel.clamp.r(s?2.55*parseFloat(o):parseFloat(o)),g:r.default.channel.clamp.g(u?2.55*parseFloat(c):parseFloat(c)),b:r.default.channel.clamp.b(h?2.55*parseFloat(l):parseFloat(l)),a:f?r.default.channel.clamp.a(d?parseFloat(f)/100:parseFloat(f)):1},t)}}},stringify:function(t){return t.a<1?"rgba("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+", "+r.default.lang.round(t.a)+")":"rgb("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+")"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^hsla?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(?:deg|grad|rad|turn)?)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(%)?))?\s*?\)$/i,hueRe:/^(.+?)(deg|grad|rad|turn)$/i,_hue2deg:function(t){var e=t.match(a.hueRe);if(e){var n=e[1];switch(e[2]){case"grad":return r.default.channel.clamp.h(.9*parseFloat(n));case"rad":return r.default.channel.clamp.h(180*parseFloat(n)/Math.PI);case"turn":return r.default.channel.clamp.h(360*parseFloat(n))}}return r.default.channel.clamp.h(parseFloat(t))},parse:function(t){var e=t.charCodeAt(0);if(104===e||72===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5];return i.default.set({h:a._hue2deg(o),s:r.default.channel.clamp.s(parseFloat(s)),l:r.default.channel.clamp.l(parseFloat(c)),a:u?r.default.channel.clamp.a(l?parseFloat(u)/100:parseFloat(u)):1},t)}}},stringify:function(t){return t.a<1?"hsla("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%, "+t.a+")":"hsl("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%)"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"r")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"g")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"b")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"h")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"s")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"l")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(103);e.default=function(t){return!r.default(t)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15);e.default=function(t){try{return r.default.parse(t),!0}catch(t){return!1}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t){return r.default(t,"h",180)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(52);e.default=function(t){return r.default(t,{s:0})}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(107);e.default=function(t,e){void 0===e&&(e=100);var n=r.default.parse(t);return n.r=255-n.r,n.g=255-n.g,n.b=255-n.b,i.default(n,t,e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15),a=n(106);e.default=function(t,e){var n,o,s,c=i.default.parse(t),u={};for(var l in e)u[l]=(n=c[l],o=e[l],s=r.default.channel.max[l],o>0?(s-n)*o/100:n*o/100);return a.default(t,u)}},function(t,e,n){t.exports={Graph:n(76),version:n(300)}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,4)}},function(t,e){t.exports=function(){this.__data__=[],this.size=0}},function(t,e,n){var r=n(55),i=Array.prototype.splice;t.exports=function(t){var e=this.__data__,n=r(e,t);return!(n<0)&&(n==e.length-1?e.pop():i.call(e,n,1),--this.size,!0)}},function(t,e,n){var r=n(55);t.exports=function(t){var e=this.__data__,n=r(e,t);return n<0?void 0:e[n][1]}},function(t,e,n){var r=n(55);t.exports=function(t){return r(this.__data__,t)>-1}},function(t,e,n){var r=n(55);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(54);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(54),i=n(77),a=n(78);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var o=n.__data__;if(!i||o.length<199)return o.push([t,e]),this.size=++n.size,this;n=this.__data__=new a(o)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(37),i=n(214),a=n(11),o=n(110),s=/^\[object .+?Constructor\]$/,c=Function.prototype,u=Object.prototype,l=c.toString,h=u.hasOwnProperty,f=RegExp("^"+l.call(h).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!a(t)||i(t))&&(r(t)?f:s).test(o(t))}},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e,n){var r=n(38),i=Object.prototype,a=i.hasOwnProperty,o=i.toString,s=r?r.toStringTag:void 0;t.exports=function(t){var e=a.call(t,s),n=t[s];try{t[s]=void 0;var r=!0}catch(t){}var i=o.call(t);return r&&(e?t[s]=n:delete t[s]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(215),a=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!a&&a in t}},function(t,e,n){var r=n(16)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(218),i=n(54),a=n(77);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(a||i),string:new r}}},function(t,e,n){var r=n(219),i=n(220),a=n(221),o=n(222),s=n(223);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(131),i=n(292),a=n(296),o=n(132),s=n(297),c=n(90);t.exports=function(t,e,n){var u=-1,l=i,h=t.length,f=!0,d=[],p=d;if(n)f=!1,l=a;else if(h>=200){var g=e?null:s(t);if(g)return c(g);f=!1,l=o,p=new r}else p=e?[]:d;t:for(;++u-1}},function(t,e,n){var r=n(145),i=n(294),a=n(295);t.exports=function(t,e,n){return e==e?a(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(10);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,a=e(n);r[t][i]={distance:a,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var a=r[n];i.forEach((function(n){var r=a[t],i=e[n],o=a[n],s=r.distance+i.distance;s0;){if(n=c.removeMin(),r.has(s,n))o.setEdge(n,s[n]);else{if(l)throw new Error("Input graph is not connected: "+t);l=!0}t.nodeEdges(n).forEach(u)}return o}},function(t,e,n){var r;try{r=n(3)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){"use strict";var r=n(4),i=n(345),a=n(348),o=n(349),s=n(8).normalizeRanks,c=n(351),u=n(8).removeEmptyRanks,l=n(352),h=n(353),f=n(354),d=n(355),p=n(364),g=n(8),y=n(17).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?g.time:g.notime;n("layout",(function(){var e=n(" buildLayoutGraph",(function(){return function(t){var e=new y({multigraph:!0,compound:!0}),n=C(t.graph());return e.setGraph(r.merge({},m,T(n,v),r.pick(n,b))),r.forEach(t.nodes(),(function(n){var i=C(t.node(n));e.setNode(n,r.defaults(T(i,x),_)),e.setParent(n,t.parent(n))})),r.forEach(t.edges(),(function(n){var i=C(t.edge(n));e.setEdge(n,r.merge({},w,T(i,k),r.pick(i,E)))})),e}(t)}));n(" runLayout",(function(){!function(t,e){e(" makeSpaceForEdgeLabels",(function(){!function(t){var e=t.graph();e.ranksep/=2,r.forEach(t.edges(),(function(n){var r=t.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(t)})),e(" removeSelfEdges",(function(){!function(t){r.forEach(t.edges(),(function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:e,label:t.edge(e)}),t.removeEdge(e)}}))}(t)})),e(" acyclic",(function(){i.run(t)})),e(" nestingGraph.run",(function(){l.run(t)})),e(" rank",(function(){o(g.asNonCompoundGraph(t))})),e(" injectEdgeLabelProxies",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(n.width&&n.height){var r=t.node(e.v),i={rank:(t.node(e.w).rank-r.rank)/2+r.rank,e:e};g.addDummyNode(t,"edge-proxy",i,"_ep")}}))}(t)})),e(" removeEmptyRanks",(function(){u(t)})),e(" nestingGraph.cleanup",(function(){l.cleanup(t)})),e(" normalizeRanks",(function(){s(t)})),e(" assignRankMinMax",(function(){!function(t){var e=0;r.forEach(t.nodes(),(function(n){var i=t.node(n);i.borderTop&&(i.minRank=t.node(i.borderTop).rank,i.maxRank=t.node(i.borderBottom).rank,e=r.max(e,i.maxRank))})),t.graph().maxRank=e}(t)})),e(" removeEdgeLabelProxies",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))}))}(t)})),e(" normalize.run",(function(){a.run(t)})),e(" parentDummyChains",(function(){c(t)})),e(" addBorderSegments",(function(){h(t)})),e(" order",(function(){d(t)})),e(" insertSelfEdges",(function(){!function(t){var e=g.buildLayerMatrix(t);r.forEach(e,(function(e){var n=0;r.forEach(e,(function(e,i){var a=t.node(e);a.order=i+n,r.forEach(a.selfEdges,(function(e){g.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:a.rank,order:i+ ++n,e:e.e,label:e.label},"_se")})),delete a.selfEdges}))}))}(t)})),e(" adjustCoordinateSystem",(function(){f.adjust(t)})),e(" position",(function(){p(t)})),e(" positionSelfEdges",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);if("selfedge"===n.dummy){var r=t.node(n.e.v),i=r.x+r.width/2,a=r.y,o=n.x-i,s=r.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:i+2*o/3,y:a-s},{x:i+5*o/6,y:a-s},{x:i+o,y:a},{x:i+5*o/6,y:a+s},{x:i+2*o/3,y:a+s}],n.label.x=n.x,n.label.y=n.y}}))}(t)})),e(" removeBorderNodes",(function(){!function(t){r.forEach(t.nodes(),(function(e){if(t.children(e).length){var n=t.node(e),i=t.node(n.borderTop),a=t.node(n.borderBottom),o=t.node(r.last(n.borderLeft)),s=t.node(r.last(n.borderRight));n.width=Math.abs(s.x-o.x),n.height=Math.abs(a.y-i.y),n.x=o.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(t.nodes(),(function(e){"border"===t.node(e).dummy&&t.removeNode(e)}))}(t)})),e(" normalize.undo",(function(){a.undo(t)})),e(" fixupEdgeLabelCoords",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(t)})),e(" undoCoordinateSystem",(function(){f.undo(t)})),e(" translateGraph",(function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,a=0,o=t.graph(),s=o.marginx||0,c=o.marginy||0;function u(t){var r=t.x,o=t.y,s=t.width,c=t.height;e=Math.min(e,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,o-c/2),a=Math.max(a,o+c/2)}r.forEach(t.nodes(),(function(e){u(t.node(e))})),r.forEach(t.edges(),(function(e){var n=t.edge(e);r.has(n,"x")&&u(n)})),e-=s,i-=c,r.forEach(t.nodes(),(function(n){var r=t.node(n);r.x-=e,r.y-=i})),r.forEach(t.edges(),(function(n){var a=t.edge(n);r.forEach(a.points,(function(t){t.x-=e,t.y-=i})),r.has(a,"x")&&(a.x-=e),r.has(a,"y")&&(a.y-=i)})),o.width=n-e+s,o.height=a-i+c}(t)})),e(" assignNodeIntersects",(function(){!function(t){r.forEach(t.edges(),(function(e){var n,r,i=t.edge(e),a=t.node(e.v),o=t.node(e.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=o,r=a),i.points.unshift(g.intersectRect(a,n)),i.points.push(g.intersectRect(o,r))}))}(t)})),e(" reversePoints",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);n.reversed&&n.points.reverse()}))}(t)})),e(" acyclic.undo",(function(){i.undo(t)}))}(e,n)})),n(" updateInputGraph",(function(){!function(t,e){r.forEach(t.nodes(),(function(n){var r=t.node(n),i=e.node(n);r&&(r.x=i.x,r.y=i.y,e.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(t.edges(),(function(n){var i=t.edge(n),a=e.edge(n);i.points=a.points,r.has(a,"x")&&(i.x=a.x,i.y=a.y)})),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],m={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},b=["acyclicer","ranker","rankdir","align"],x=["width","height"],_={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],w={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function T(t,e){return r.mapValues(r.pick(t,e),Number)}function C(t){var e={};return r.forEach(t,(function(t,n){e[n.toLowerCase()]=t})),e}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,5)}},function(t,e,n){var r=n(315)(n(316));t.exports=r},function(t,e,n){var r=n(25),i=n(24),a=n(30);t.exports=function(t){return function(e,n,o){var s=Object(e);if(!i(e)){var c=r(n,3);e=a(e),n=function(t){return c(s[t],t,s)}}var u=t(e,n,o);return u>-1?s[c?e[u]:u]:void 0}}},function(t,e,n){var r=n(145),i=n(25),a=n(317),o=Math.max;t.exports=function(t,e,n){var s=null==t?0:t.length;if(!s)return-1;var c=null==n?0:a(n);return c<0&&(c=o(s+c,0)),r(t,i(e,3),c)}},function(t,e,n){var r=n(155);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(11),i=n(42),a=/^\s+|\s+$/g,o=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,c=/^0o[0-7]+$/i,u=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(i(t))return NaN;if(r(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=r(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(a,"");var n=s.test(t);return n||c.test(t)?u(t.slice(2),n?2:8):o.test(t)?NaN:+t}},function(t,e,n){var r=n(89),i=n(127),a=n(40);t.exports=function(t,e){return null==t?t:r(t,i(e),a)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(59),i=n(88),a=n(25);t.exports=function(t,e){var n={};return e=a(e,3),i(t,(function(t,i,a){r(n,i,e(t,i,a))})),n}},function(t,e,n){var r=n(95),i=n(323),a=n(35);t.exports=function(t){return t&&t.length?r(t,a,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(325),i=n(328)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(53),i=n(157),a=n(89),o=n(326),s=n(11),c=n(40),u=n(159);t.exports=function t(e,n,l,h,f){e!==n&&a(n,(function(a,c){if(f||(f=new r),s(a))o(e,n,c,l,t,h,f);else{var d=h?h(u(e,c),a,c+"",e,n,f):void 0;void 0===d&&(d=a),i(e,c,d)}}),c)}},function(t,e,n){var r=n(157),i=n(114),a=n(123),o=n(115),s=n(124),c=n(47),u=n(5),l=n(146),h=n(39),f=n(37),d=n(11),p=n(158),g=n(48),y=n(159),v=n(327);t.exports=function(t,e,n,m,b,x,_){var k=y(t,n),w=y(e,n),E=_.get(w);if(E)r(t,n,E);else{var T=x?x(k,w,n+"",t,e,_):void 0,C=void 0===T;if(C){var S=u(w),A=!S&&h(w),M=!S&&!A&&g(w);T=w,S||A||M?u(k)?T=k:l(k)?T=o(k):A?(C=!1,T=i(w,!0)):M?(C=!1,T=a(w,!0)):T=[]:p(w)||c(w)?(T=k,c(k)?T=v(k):d(k)&&!f(k)||(T=s(w))):C=!1}C&&(_.set(w,T),b(T,w,m,x,_),_.delete(w)),r(t,n,T)}}},function(t,e,n){var r=n(46),i=n(40);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(67),i=n(68);t.exports=function(t){return r((function(e,n){var r=-1,a=n.length,o=a>1?n[a-1]:void 0,s=a>2?n[2]:void 0;for(o=t.length>3&&"function"==typeof o?(a--,o):void 0,s&&i(n[0],n[1],s)&&(o=a<3?void 0:o,a=1),e=Object(e);++r1&&o(t,e[0],e[1])?e=[]:n>2&&o(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=s},function(t,e,n){var r=n(66),i=n(25),a=n(141),o=n(340),s=n(61),c=n(341),u=n(35);t.exports=function(t,e,n){var l=-1;e=r(e.length?e:[u],s(i));var h=a(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++l,value:t}}));return o(h,(function(t,e){return c(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(342);t.exports=function(t,e,n){for(var i=-1,a=t.criteria,o=e.criteria,s=a.length,c=n.length;++i=c?u:u*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(42);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,a=t==t,o=r(t),s=void 0!==e,c=null===e,u=e==e,l=r(e);if(!c&&!l&&!o&&t>e||o&&s&&u&&!c&&!l||i&&s&&u||!n&&u||!a)return 1;if(!i&&!o&&!l&&t0;--c)if(r=e[c].dequeue()){i=i.concat(s(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(u,(function(e){return t.outEdges(e.v,e.w)})),!0)};var o=r.constant(1);function s(t,e,n,i,a){var o=a?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),s=t.node(r.v);a&&o.push({v:r.v,w:r.w}),s.out-=i,c(e,n,s)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),a=r.w,o=t.node(a);o.in-=i,c(e,n,o)})),t.removeNode(i.v),o}function c(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(4),i=n(8);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,a,o=e.v,s=t.node(o).rank,c=e.w,u=t.node(c).rank,l=e.name,h=t.edge(e),f=h.labelRank;if(u===s+1)return;for(t.removeEdge(e),a=0,++s;sc.lim&&(u=c,l=!0);var h=r.filter(e.edges(),(function(e){return l===m(t,t.node(e.v),u)&&l!==m(t,t.node(e.w),u)}));return r.minBy(h,(function(t){return a(e,t)}))}function v(t,e,n,i){var a=n.v,o=n.w;t.removeEdge(a,o),t.setEdge(i.v,i.w,{}),d(t),h(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=s(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),a=!1;i||(i=e.edge(r,n),a=!0),e.node(n).rank=e.node(r).rank+(a?i.minlen:-i.minlen)}))}(t,e)}function m(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=l,l.initLowLimValues=d,l.initCutValues=h,l.calcCutValue=f,l.leaveEdge=g,l.enterEdge=y,l.exchangeEdges=v},function(t,e,n){var r=n(4);t.exports=function(t){var e=function(t){var e={},n=0;function i(a){var o=n;r.forEach(t.children(a),i),e[a]={low:o,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,a=function(t,e,n,r){var i,a,o=[],s=[],c=Math.min(e[n].low,e[r].low),u=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),o.push(i)}while(i&&(e[i].low>c||u>e[i].lim));a=i,i=r;for(;(i=t.parent(i))!==a;)s.push(i);return{path:o.concat(s.reverse()),lca:a}}(t,e,i.v,i.w),o=a.path,s=a.lca,c=0,u=o[c],l=!0;n!==i.w;){if(r=t.node(n),l){for(;(u=o[c])!==s&&t.node(u).maxRank=2),s=l.buildLayerMatrix(t);var y=a(t,s);y0;)e%2&&(n+=c[e+1]),c[e=e-1>>1]+=t.weight;u+=t.weight*n}))),u}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var a=t.pop();e.push(a),r.forEach(a.in.reverse(),n(a)),r.forEach(a.out,i(a))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(4),i=n(8);function a(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),o=n.lhs,s=r.sortBy(n.rhs,(function(t){return-t.i})),c=[],u=0,l=0,h=0;o.sort((f=!!e,function(t,e){return t.barycentere.barycenter?1:f?e.i-t.i:t.i-e.i})),h=a(c,s,h),r.forEach(o,(function(t){h+=t.vs.length,c.push(t.vs),u+=t.barycenter*t.weight,l+=t.weight,h=a(c,s,h)}));var f;var d={vs:r.flatten(c,!0)};l&&(d.barycenter=u/l,d.weight=l);return d}},function(t,e,n){var r=n(4),i=n(17).Graph;t.exports=function(t,e,n){var a=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),o=new i({compound:!0}).setGraph({root:a}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var s=t.node(i),c=t.parent(i);(s.rank===e||s.minRank<=e&&e<=s.maxRank)&&(o.setNode(i),o.setParent(i,c||a),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,a=o.edge(n,i),s=r.isUndefined(a)?0:a.weight;o.setEdge(n,i,{weight:t.edge(e).weight+s})})),r.has(s,"minRank")&&o.setNode(i,{borderLeft:s.borderLeft[e],borderRight:s.borderRight[e]}))})),o}},function(t,e,n){var r=n(4);t.exports=function(t,e,n){var i,a={};r.forEach(n,(function(n){for(var r,o,s=t.parent(n);s;){if((r=t.parent(s))?(o=a[r],a[r]=s):(o=i,i=s),o&&o!==s)return void e.setEdge(o,s);s=r}}))}},function(t,e,n){"use strict";var r=n(4),i=n(8),a=n(365).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,a=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=a+i/2})),a+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(a(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph,a=n(8);function o(t,e){var n={};return r.reduce(e,(function(e,i){var a=0,o=0,s=e.length,u=r.last(i);return r.forEach(i,(function(e,l){var h=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),f=h?t.node(h).order:s;(h||e===u)&&(r.forEach(i.slice(o,l+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),o=i.order;!(os)&&c(n,e,u)}))}))}return r.reduce(e,(function(e,n){var a,o=-1,s=0;return r.forEach(n,(function(r,c){if("border"===t.node(r).dummy){var u=t.predecessors(r);u.length&&(a=t.node(u[0]).order,i(n,s,c,o,a),s=c,o=a)}i(n,s,n.length,a,e.length)})),n})),n}function c(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function u(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function l(t,e,n,i){var a={},o={},s={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){a[t]=t,o[t]=t,s[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var c=i(t);if(c.length)for(var l=((c=r.sortBy(c,(function(t){return s[t]}))).length-1)/2,h=Math.floor(l),f=Math.ceil(l);h<=f;++h){var d=c[h];o[t]===t&&e0}t.exports=function(t,e,r,i){var a,o,s,c,u,l,h,f,d,p,g,y,v;if(a=e.y-t.y,s=t.x-e.x,u=e.x*t.y-t.x*e.y,d=a*r.x+s*r.y+u,p=a*i.x+s*i.y+u,0!==d&&0!==p&&n(d,p))return;if(o=i.y-r.y,c=r.x-i.x,l=i.x*r.y-r.x*i.y,h=o*t.x+c*t.y+l,f=o*e.x+c*e.y+l,0!==h&&0!==f&&n(h,f))return;if(0===(g=a*c-o*s))return;return y=Math.abs(g/2),{x:(v=s*l-c*u)<0?(v-y)/g:(v+y)/g,y:(v=o*u-a*l)<0?(v-y)/g:(v+y)/g}}},function(t,e,n){var r=n(43),i=n(31),a=n(153).layout;t.exports=function(){var t=n(371),e=n(374),i=n(375),u=n(376),l=n(377),h=n(378),f=n(379),d=n(380),p=n(381),g=function(n,g){!function(t){t.nodes().forEach((function(e){var n=t.node(e);r.has(n,"label")||t.children(e).length||(n.label=e),r.has(n,"paddingX")&&r.defaults(n,{paddingLeft:n.paddingX,paddingRight:n.paddingX}),r.has(n,"paddingY")&&r.defaults(n,{paddingTop:n.paddingY,paddingBottom:n.paddingY}),r.has(n,"padding")&&r.defaults(n,{paddingLeft:n.padding,paddingRight:n.padding,paddingTop:n.padding,paddingBottom:n.padding}),r.defaults(n,o),r.each(["paddingLeft","paddingRight","paddingTop","paddingBottom"],(function(t){n[t]=Number(n[t])})),r.has(n,"width")&&(n._prevWidth=n.width),r.has(n,"height")&&(n._prevHeight=n.height)})),t.edges().forEach((function(e){var n=t.edge(e);r.has(n,"label")||(n.label=""),r.defaults(n,s)}))}(g);var y=c(n,"output"),v=c(y,"clusters"),m=c(y,"edgePaths"),b=i(c(y,"edgeLabels"),g),x=t(c(y,"nodes"),g,d);a(g),l(x,g),h(b,g),u(m,g,p);var _=e(v,g);f(_,g),function(t){r.each(t.nodes(),(function(e){var n=t.node(e);r.has(n,"_prevWidth")?n.width=n._prevWidth:delete n.width,r.has(n,"_prevHeight")?n.height=n._prevHeight:delete n.height,delete n._prevWidth,delete n._prevHeight}))}(g)};return g.createNodes=function(e){return arguments.length?(t=e,g):t},g.createClusters=function(t){return arguments.length?(e=t,g):e},g.createEdgeLabels=function(t){return arguments.length?(i=t,g):i},g.createEdgePaths=function(t){return arguments.length?(u=t,g):u},g.shapes=function(t){return arguments.length?(d=t,g):d},g.arrows=function(t){return arguments.length?(p=t,g):p},g};var o={paddingLeft:10,paddingRight:10,paddingTop:10,paddingBottom:10,rx:0,ry:0,shape:"rect"},s={arrowhead:"normal",curve:i.curveLinear};function c(t,e){var n=t.select("g."+e);return n.empty()&&(n=t.append("g").attr("class",e)),n}},function(t,e,n){"use strict";var r=n(43),i=n(97),a=n(12),o=n(31);t.exports=function(t,e,n){var s,c=e.nodes().filter((function(t){return!a.isSubgraph(e,t)})),u=t.selectAll("g.node").data(c,(function(t){return t})).classed("update",!0);u.exit().remove(),u.enter().append("g").attr("class","node").style("opacity",0),(u=t.selectAll("g.node")).each((function(t){var s=e.node(t),c=o.select(this);a.applyClass(c,s.class,(c.classed("update")?"update ":"")+"node"),c.select("g.label").remove();var u=c.append("g").attr("class","label"),l=i(u,s),h=n[s.shape],f=r.pick(l.node().getBBox(),"width","height");s.elem=this,s.id&&c.attr("id",s.id),s.labelId&&u.attr("id",s.labelId),r.has(s,"width")&&(f.width=s.width),r.has(s,"height")&&(f.height=s.height),f.width+=s.paddingLeft+s.paddingRight,f.height+=s.paddingTop+s.paddingBottom,u.attr("transform","translate("+(s.paddingLeft-s.paddingRight)/2+","+(s.paddingTop-s.paddingBottom)/2+")");var d=o.select(this);d.select(".label-container").remove();var p=h(d,f,s).classed("label-container",!0);a.applyStyle(p,s.style);var g=p.node().getBBox();s.width=g.width,s.height=g.height})),s=u.exit?u.exit():u.selectAll(null);return a.applyTransition(s,e).style("opacity",0).remove(),u}},function(t,e,n){var r=n(12);t.exports=function(t,e){for(var n=t.append("text"),i=function(t){for(var e,n="",r=!1,i=0;i0&&void 0!==arguments[0]?arguments[0]:"fatal";isNaN(t)&&(t=t.toLowerCase(),void 0!==s[t]&&(t=s[t])),c.trace=function(){},c.debug=function(){},c.info=function(){},c.warn=function(){},c.error=function(){},c.fatal=function(){},t<=s.fatal&&(c.fatal=console.error?console.error.bind(console,l("FATAL"),"color: orange"):console.log.bind(console,"",l("FATAL"))),t<=s.error&&(c.error=console.error?console.error.bind(console,l("ERROR"),"color: orange"):console.log.bind(console,"",l("ERROR"))),t<=s.warn&&(c.warn=console.warn?console.warn.bind(console,l("WARN"),"color: orange"):console.log.bind(console,"",l("WARN"))),t<=s.info&&(c.info=console.info?console.info.bind(console,l("INFO"),"color: lightblue"):console.log.bind(console,"",l("INFO"))),t<=s.debug&&(c.debug=console.debug?console.debug.bind(console,l("DEBUG"),"color: lightgreen"):console.log.bind(console,"",l("DEBUG")))},l=function(t){var e=o()().format("ss.SSS");return"%c".concat(e," : ").concat(t," : ")},h=n(169),f=n.n(h),d=n(0),p=n(44),g=n(70),y=function(t){for(var e="",n=0;n>=0;){if(!((n=t.indexOf("=0)){e+=t,n=-1;break}e+=t.substr(0,n),(n=(t=t.substr(n+1)).indexOf("<\/script>"))>=0&&(n+=9,t=t.substr(n))}return e},v=//gi,m=function(t){return t.replace(v,"#br#")},b=function(t){return t.replace(/#br#/g,"
")},x={getRows:function(t){if(!t)return 1;var e=m(t);return(e=e.replace(/\\n/g,"#br#")).split("#br#")},sanitizeText:function(t,e){var n=t,r=!0;if(!e.flowchart||!1!==e.flowchart.htmlLabels&&"false"!==e.flowchart.htmlLabels||(r=!1),r){var i=e.securityLevel;"antiscript"===i?n=y(n):"loose"!==i&&(n=(n=(n=m(n)).replace(//g,">")).replace(/=/g,"="),n=b(n))}return n},hasBreaks:function(t){return//gi.test(t)},splitBreaks:function(t){return t.split(//gi)},lineBreakRegex:v,removeScript:y};function _(t,e){for(var n=0;n1&&void 0!==arguments[1]?arguments[1]:null;try{var n=new RegExp("[%]{2}(?![{]".concat(C.source,")(?=[}][%]{2}).*\n"),"ig");t=t.trim().replace(n,"").replace(/'/gm,'"'),c.debug("Detecting diagram directive".concat(null!==e?" type:"+e:""," based on the text:").concat(t));for(var r,i=[];null!==(r=T.exec(t));)if(r.index===T.lastIndex&&T.lastIndex++,r&&!e||e&&r[1]&&r[1].match(e)||e&&r[2]&&r[2].match(e)){var a=r[1]?r[1]:r[2],o=r[3]?r[3].trim():r[4]?JSON.parse(r[4].trim()):null;i.push({type:a,args:o})}return 0===i.length&&i.push({type:t,args:null}),1===i.length?i[0]:i}catch(n){return c.error("ERROR: ".concat(n.message," - Unable to parse directive").concat(null!==e?" type:"+e:""," based on the text:").concat(t)),{type:null,args:null}}},M=function(t){return t=t.replace(T,"").replace(S,"\n"),c.debug("Detecting diagram type based on the text "+t),t.match(/^\s*sequenceDiagram/)?"sequence":t.match(/^\s*gantt/)?"gantt":t.match(/^\s*classDiagram-v2/)?"classDiagram":t.match(/^\s*classDiagram/)?"class":t.match(/^\s*stateDiagram-v2/)?"stateDiagram":t.match(/^\s*stateDiagram/)?"state":t.match(/^\s*gitGraph/)?"git":t.match(/^\s*flowchart/)?"flowchart-v2":t.match(/^\s*info/)?"info":t.match(/^\s*pie/)?"pie":t.match(/^\s*erDiagram/)?"er":t.match(/^\s*journey/)?"journey":"flowchart"},O=function(t,e){var n={};return function(){for(var r=arguments.length,i=new Array(r),a=0;a"},n),x.lineBreakRegex.test(t))return t;var r=t.split(" "),i=[],a="";return r.forEach((function(t,o){var s=z("".concat(t," "),n),c=z(a,n);if(s>e){var u=Y(t,e,"-",n),l=u.hyphenatedStrings,h=u.remainingWord;i.push.apply(i,[a].concat(w(l))),a=h}else c+s>=e?(i.push(a),a=t):a=[a,t].filter(Boolean).join(" ");o+1===r.length&&i.push(a)})),i.filter((function(t){return""!==t})).join(n.joinWith)}),(function(t,e,n){return"".concat(t,"-").concat(e,"-").concat(n.fontSize,"-").concat(n.fontWeight,"-").concat(n.fontFamily,"-").concat(n.joinWith)})),Y=O((function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;r=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:0},r);var i=t.split(""),a=[],o="";return i.forEach((function(t,s){var c="".concat(o).concat(t);if(z(c,r)>=e){var u=s+1,l=i.length===u,h="".concat(c).concat(n);a.push(l?c:h),o=""}else o=c})),{hyphenatedStrings:a,remainingWord:o}}),(function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;return"".concat(t,"-").concat(e,"-").concat(n,"-").concat(r.fontSize,"-").concat(r.fontWeight,"-").concat(r.fontFamily)})),z=function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),U(t,e).width},U=O((function(t,e){var n=e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),r=n.fontSize,i=n.fontFamily,a=n.fontWeight;if(!t)return{width:0,height:0};var o=["sans-serif",i],s=t.split(x.lineBreakRegex),c=[],u=Object(d.select)("body");if(!u.remove)return{width:0,height:0,lineHeight:0};for(var l=u.append("svg"),h=0,f=o;hc[1].height&&c[0].width>c[1].width&&c[0].lineHeight>c[1].lineHeight?0:1]}),(function(t,e){return"".concat(t,"-").concat(e.fontSize,"-").concat(e.fontWeight,"-").concat(e.fontFamily)})),$=function(t,e,n){var r=new Map;return r.set("height",t),n?(r.set("width","100%"),r.set("style","max-width: ".concat(e,"px;"))):r.set("width",e),r},W=function(t,e,n,r){!function(t,e){var n=!0,r=!1,i=void 0;try{for(var a,o=e[Symbol.iterator]();!(n=(a=o.next()).done);n=!0){var s=a.value;t.attr(s[0],s[1])}}catch(t){r=!0,i=t}finally{try{n||null==o.return||o.return()}finally{if(r)throw i}}}(t,$(e,n,r))},V={assignWithDepth:I,wrapLabel:R,calculateTextHeight:function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:15},e),U(t,e).height},calculateTextWidth:z,calculateTextDimensions:U,calculateSvgSizeAttrs:$,configureSvgSize:W,detectInit:function(t){var e=A(t,/(?:init\b)|(?:initialize\b)/),n={};if(Array.isArray(e)){var r=e.map((function(t){return t.args}));n=I(n,w(r))}else n=e.args;if(n){var i=M(t);["config"].forEach((function(t){void 0!==n[t]&&("flowchart-v2"===i&&(i="flowchart"),n[i]=n[t],delete n[t])}))}return n},detectDirective:A,detectType:M,isSubstringInArray:function(t,e){for(var n=0;n=1&&(i={x:t.x,y:t.y}),a>0&&a<1&&(i={x:(1-a)*e.x+a*t.x,y:(1-a)*e.y+a*t.y})}}e=t})),i}(t)},calcCardinalityPosition:function(t,e,n){var r;c.info("our points",e),e[0]!==n&&(e=e.reverse()),e.forEach((function(t){N(t,r),r=t}));var i,a=25;r=void 0,e.forEach((function(t){if(r&&!i){var e=N(t,r);if(e=1&&(i={x:t.x,y:t.y}),n>0&&n<1&&(i={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var o=t?10:5,s=Math.atan2(e[0].y-i.y,e[0].x-i.x),u={x:0,y:0};return u.x=Math.sin(s)*o+(e[0].x+i.x)/2,u.y=-Math.cos(s)*o+(e[0].y+i.y)/2,u},calcTerminalLabelPosition:function(t,e,n){var r,i=JSON.parse(JSON.stringify(n));c.info("our points",i),"start_left"!==e&&"start_right"!==e&&(i=i.reverse()),i.forEach((function(t){N(t,r),r=t}));var a,o=25;r=void 0,i.forEach((function(t){if(r&&!a){var e=N(t,r);if(e=1&&(a={x:t.x,y:t.y}),n>0&&n<1&&(a={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var s=10,u=Math.atan2(i[0].y-a.y,i[0].x-a.x),l={x:0,y:0};return l.x=Math.sin(u)*s+(i[0].x+a.x)/2,l.y=-Math.cos(u)*s+(i[0].y+a.y)/2,"start_left"===e&&(l.x=Math.sin(u+Math.PI)*s+(i[0].x+a.x)/2,l.y=-Math.cos(u+Math.PI)*s+(i[0].y+a.y)/2),"end_right"===e&&(l.x=Math.sin(u-Math.PI)*s+(i[0].x+a.x)/2-5,l.y=-Math.cos(u-Math.PI)*s+(i[0].y+a.y)/2-5),"end_left"===e&&(l.x=Math.sin(u)*s+(i[0].x+a.x)/2-5,l.y=-Math.cos(u)*s+(i[0].y+a.y)/2-5),l},formatUrl:function(t,e){var n=t.trim();if(n)return"loose"!==e.securityLevel?Object(g.sanitizeUrl)(n):n},getStylesFromArray:B,generateId:P,random:F,memoize:O,runFunc:function(t){for(var e,n=t.split("."),r=n.length-1,i=n[r],a=window,o=0;o1?s-1:0),u=1;u=0&&(n=!0)})),n},qt=function(t,e){var n=[];return t.nodes.forEach((function(r,i){Gt(e,r)||n.push(t.nodes[i])})),{nodes:n}},Xt={parseDirective:function(t,e,n){Ho.parseDirective(this,t,e,n)},defaultConfig:function(){return gt.flowchart},addVertex:function(t,e,n,r,i){var a,o=t;void 0!==o&&0!==o.trim().length&&(void 0===Ot[o]&&(Ot[o]={id:o,domId:"flowchart-"+o+"-"+At,styles:[],classes:[]}),At++,void 0!==e?(Mt=_t(),'"'===(a=x.sanitizeText(e.trim(),Mt))[0]&&'"'===a[a.length-1]&&(a=a.substring(1,a.length-1)),Ot[o].text=a):void 0===Ot[o].text&&(Ot[o].text=t),void 0!==n&&(Ot[o].type=n),null!=r&&r.forEach((function(t){Ot[o].styles.push(t)})),null!=i&&i.forEach((function(t){Ot[o].classes.push(t)})))},lookUpDomId:Rt,addLink:function(t,e,n,r){var i,a;for(i=0;i/)&&(Ct="LR"),Ct.match(/.*v/)&&(Ct="TB")},setClass:zt,getTooltip:function(t){return Pt[t]},setClickEvent:function(t,e,n){t.split(",").forEach((function(t){!function(t,e){var n=Rt(t);"loose"===_t().securityLevel&&void 0!==e&&void 0!==Ot[t]&&(Ot[t].haveCallback=!0,jt.push((function(){var r=document.querySelector('[id="'.concat(n,'"]'));null!==r&&r.addEventListener("click",(function(){V.runFunc(e,t)}),!1)})))}(t,e)})),Ut(t,n),zt(t,"clickable")},setLink:function(t,e,n,r){t.split(",").forEach((function(t){void 0!==Ot[t]&&(Ot[t].link=V.formatUrl(e,Mt),Ot[t].linkTarget=r)})),Ut(t,n),zt(t,"clickable")},bindFunctions:function(t){jt.forEach((function(e){e(t)}))},getDirection:function(){return Ct.trim()},getVertices:function(){return Ot},getEdges:function(){return Dt},getClasses:function(){return Nt},clear:function(t){Ot={},Nt={},Dt=[],(jt=[]).push($t),Bt=[],Lt={},Ft=0,Pt=[],It=!0,St=t||"gen-1"},setGen:function(t){St=t||"gen-1"},defaultStyle:function(){return"fill:#ffa;stroke: #f66; stroke-width: 3px; stroke-dasharray: 5, 5;fill:#ffa;stroke: #666;"},addSubGraph:function(t,e,n){var r=t.trim(),i=n;t===n&&n.match(/\s/)&&(r=void 0);var a,o,s,u=[];if(a=u.concat.apply(u,e),o={boolean:{},number:{},string:{}},s=[],u=a.filter((function(t){var e=Tt(t);return""!==t.trim()&&(e in o?!o[e].hasOwnProperty(t)&&(o[e][t]=!0):!(s.indexOf(t)>=0)&&s.push(t))})),"gen-1"===St){c.warn("LOOKING UP");for(var l=0;l0&&function t(e,n){var r=Bt[n].nodes;if(!((Vt+=1)>2e3)){if(Ht[Vt]=n,Bt[n].id===e)return{result:!0,count:0};for(var i=0,a=1;i=0){var s=t(e,o);if(s.result)return{result:!0,count:a+s.count};a+=s.count}i+=1}return{result:!1,count:a}}}("none",Bt.length-1)},getSubGraphs:function(){return Bt},destructLink:function(t,e){var n,r=function(t){var e=t.trim(),n=e.slice(0,-1),r="arrow_open";switch(e.slice(-1)){case"x":r="arrow_cross","x"===e[0]&&(r="double_"+r,n=n.slice(1));break;case">":r="arrow_point","<"===e[0]&&(r="double_"+r,n=n.slice(1));break;case"o":r="arrow_circle","o"===e[0]&&(r="double_"+r,n=n.slice(1))}var i="normal",a=n.length-1;"="===n[0]&&(i="thick");var o=function(t,e){for(var n=e.length,r=0,i=0;in.height/2-a)){var o=a*a*(1-r*r/(i*i));0!=o&&(o=Math.sqrt(o)),o=a-o,t.y-n.y>0&&(o=-o),e.y+=o}return e},c}function de(t,e,n,r){return t.insert("polygon",":first-child").attr("points",r.map((function(t){return t.x+","+t.y})).join(" ")).attr("transform","translate("+-e/2+","+n/2+")")}var pe={addToRender:function(t){t.shapes().question=ne,t.shapes().hexagon=re,t.shapes().stadium=le,t.shapes().subroutine=he,t.shapes().cylinder=fe,t.shapes().rect_left_inv_arrow=ie,t.shapes().lean_right=ae,t.shapes().lean_left=oe,t.shapes().trapezoid=se,t.shapes().inv_trapezoid=ce,t.shapes().rect_right_inv_arrow=ue},addToRenderV2:function(t){t({question:ne}),t({hexagon:re}),t({stadium:le}),t({subroutine:he}),t({cylinder:fe}),t({rect_left_inv_arrow:ie}),t({lean_right:ae}),t({lean_left:oe}),t({trapezoid:se}),t({inv_trapezoid:ce}),t({rect_right_inv_arrow:ue})}},ge={},ye=function(t,e,n){var r=Object(d.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=B(i.styles),u=void 0!==i.text?i.text:i.id;if(_t().flowchart.htmlLabels){var l={label:u.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=ee()(r,l).node()).parentNode.removeChild(o)}else{var h=document.createElementNS("http://www.w3.org/2000/svg","text");h.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var f=u.split(x.lineBreakRegex),d=0;d').concat(a.text.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),"")):(u.labelType="text",u.label=a.text.replace(x.lineBreakRegex,"\n"),void 0===a.style&&(u.style=u.style||"stroke: #333; stroke-width: 1.5px;fill:none"),u.labelStyle=u.labelStyle.replace("color:","fill:"))),u.id=o,u.class=s+" "+c,u.minlen=a.length||1,e.setEdge(Xt.lookUpDomId(a.start),Xt.lookUpDomId(a.end),u,i)}))},me=function(t){for(var e=Object.keys(t),n=0;n=0;h--)i=l[h],Xt.addVertex(i.id,i.title,"group",void 0,i.classes);var f=Xt.getVertices();c.warn("Get vertices",f);var p=Xt.getEdges(),g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(d.selectAll)("cluster").append("text");for(var y=0;y"),c.info("vertexText"+i),function(t){var e,n,r=Object(d.select)(document.createElementNS("http://www.w3.org/2000/svg","foreignObject")),i=r.append("xhtml:div"),a=t.label,o=t.isNode?"nodeLabel":"edgeLabel";return i.html(''+a+""),e=i,(n=t.labelStyle)&&e.attr("style",n),i.style("display","inline-block"),i.style("white-space","nowrap"),i.attr("xmlns","http://www.w3.org/1999/xhtml"),r.node()}({isNode:r,label:i.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),labelStyle:e.replace("fill:","color:")});var a=document.createElementNS("http://www.w3.org/2000/svg","text");a.setAttribute("style",e.replace("color:","fill:"));var o=[];o="string"==typeof i?i.split(/\\n|\n|/gi):Array.isArray(i)?i:[];for(var s=0;s0)t(a,n,r,i);else{var o=n.node(a);c.info("cp ",a," to ",i," with parent ",e),r.setNode(a,o),i!==n.parent(a)&&(c.warn("Setting parent",a,n.parent(a)),r.setParent(a,n.parent(a))),e!==i&&a!==e?(c.debug("Setting parent",a,e),r.setParent(a,e)):(c.info("In copy ",e,"root",i,"data",n.node(e),i),c.debug("Not Setting parent for node=",a,"cluster!==rootId",e!==i,"node!==clusterId",a!==e));var s=n.edges(a);c.debug("Copying Edges",s),s.forEach((function(t){c.info("Edge",t);var a=n.edge(t.v,t.w,t.name);c.info("Edge data",a,i);try{!function(t,e){return c.info("Decendants of ",e," is ",Oe[e]),c.info("Edge is ",t),t.v!==e&&(t.w!==e&&(Oe[e]?(c.info("Here "),Oe[e].indexOf(t.v)>=0||(!!Ne(t.v,e)||(!!Ne(t.w,e)||Oe[e].indexOf(t.w)>=0))):(c.debug("Tilt, ",e,",not in decendants"),!1)))}(t,i)?c.info("Skipping copy of edge ",t.v,"--\x3e",t.w," rootId: ",i," clusterId:",e):(c.info("Copying as ",t.v,t.w,a,t.name),r.setEdge(t.v,t.w,a,t.name),c.info("newGraph edges ",r.edges(),r.edge(r.edges()[0])))}catch(t){c.error(t)}}))}c.debug("Removing node",a),n.removeNode(a)}))},Le=function t(e,n){c.trace("Searching",e);var r=n.children(e);if(c.trace("Searching children of id ",e,r),r.length<1)return c.trace("This is a valid node",e),e;for(var i=0;i ",a),a}},Pe=function(t){return Me[t]&&Me[t].externalConnections&&Me[t]?Me[t].id:t},Fe=function(t,e){!t||e>10?c.debug("Opting out, no graph "):(c.debug("Opting in, graph "),t.nodes().forEach((function(e){t.children(e).length>0&&(c.warn("Cluster identified",e," Replacement id in edges: ",Le(e,t)),Oe[e]=function t(e,n){for(var r=n.children(e),i=[].concat(r),a=0;a0?(c.debug("Cluster identified",e,Oe),r.forEach((function(t){t.v!==e&&t.w!==e&&(Ne(t.v,e)^Ne(t.w,e)&&(c.warn("Edge: ",t," leaves cluster ",e),c.warn("Decendants of XXX ",e,": ",Oe[e]),Me[e].externalConnections=!0))}))):c.debug("Not a cluster ",e,Oe)})),t.edges().forEach((function(e){var n=t.edge(e);c.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(e)),c.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(t.edge(e)));var r=e.v,i=e.w;c.warn("Fix XXX",Me,"ids:",e.v,e.w,"Translateing: ",Me[e.v]," --- ",Me[e.w]),(Me[e.v]||Me[e.w])&&(c.warn("Fixing and trixing - removing XXX",e.v,e.w,e.name),r=Pe(e.v),i=Pe(e.w),t.removeEdge(e.v,e.w,e.name),r!==e.v&&(n.fromCluster=e.v),i!==e.w&&(n.toCluster=e.w),c.warn("Fix Replacing with XXX",r,i,e.name),t.setEdge(r,i,n,e.name))})),c.warn("Adjusted Graph",G.a.json.write(t)),Ie(t,0),c.trace(Me))},Ie=function t(e,n){if(c.warn("extractor - ",n,G.a.json.write(e),e.children("D")),n>10)c.error("Bailing out");else{for(var r=e.nodes(),i=!1,a=0;a0}if(i){c.debug("Nodes = ",r,n);for(var u=0;u0){c.warn("Cluster without external connections, without a parent and with children",l,n);var h=e.graph(),f=new G.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TB"===h.rankdir?"LR":"TB",nodesep:50,ranksep:50,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}}));c.warn("Old graph before copy",G.a.json.write(e)),Be(l,e,f,l),e.setNode(l,{clusterNode:!0,id:l,clusterData:Me[l].clusterData,labelText:Me[l].labelText,graph:f}),c.warn("New graph after copy node: (",l,")",G.a.json.write(f)),c.debug("Old graph after copy",G.a.json.write(e))}else c.warn("Cluster ** ",l," **not meeting the criteria !externalConnections:",!Me[l].externalConnections," no parent: ",!e.parent(l)," children ",e.children(l)&&e.children(l).length>0,e.children("D"),n),c.debug(Me);else c.debug("Not a cluster",l,n)}r=e.nodes(),c.warn("New list of nodes",r);for(var d=0;d0}var $e=function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y;if(i=e.y-t.y,o=t.x-e.x,c=e.x*t.y-t.x*e.y,f=i*n.x+o*n.y+c,d=i*r.x+o*r.y+c,!(0!==f&&0!==d&&Ue(f,d)||(a=r.y-n.y,s=n.x-r.x,u=r.x*n.y-n.x*r.y,l=a*t.x+s*t.y+u,h=a*e.x+s*e.y+u,0!==l&&0!==h&&Ue(l,h)||0==(p=i*s-a*o))))return g=Math.abs(p/2),{x:(y=o*u-s*c)<0?(y-g)/p:(y+g)/p,y:(y=a*c-i*u)<0?(y-g)/p:(y+g)/p}},We=function(t,e,n){var r=t.x,i=t.y,a=[],o=Number.POSITIVE_INFINITY,s=Number.POSITIVE_INFINITY;"function"==typeof e.forEach?e.forEach((function(t){o=Math.min(o,t.x),s=Math.min(s,t.y)})):(o=Math.min(o,e.x),s=Math.min(s,e.y));for(var c=r-t.width/2-o,u=i-t.height/2-s,l=0;l1&&a.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o),{x:i+n,y:a+r}},He={node:n.n(Re).a,circle:ze,ellipse:Ye,polygon:We,rect:Ve},Ge=function(t,e){var n=Ce(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;c.info("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Se(e,o),e.intersect=function(t){return He.rect(e,t)},r},qe=[],Xe={},Ze=0,Je=[],Qe=function(t){var e="",n=t;if(t.indexOf("~")>0){var r=t.split("~");n=r[0],e=r[1]}return{className:n,type:e}},Ke=function(t){var e=Qe(t);void 0===Xe[e.className]&&(Xe[e.className]={id:e.className,type:e.type,cssClasses:[],methods:[],members:[],annotations:[],domId:"classid-"+e.className+"-"+Ze},Ze++)},tn=function(t){for(var e=Object.keys(Xe),n=0;n>")?r.annotations.push(i.substring(2,i.length-2)):i.indexOf(")")>0?r.methods.push(i):i&&r.members.push(i)}},nn=function(t,e){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n="classid-"+n),void 0!==Xe[n]&&Xe[n].cssClasses.push(e)}))},rn=function(t,e,n){var r=_t(),i=t,a=tn(i);"loose"===r.securityLevel&&void 0!==e&&void 0!==Xe[i]&&(n&&(Xe[i].tooltip=x.sanitizeText(n,r)),Je.push((function(){var t=document.querySelector('[id="'.concat(a,'"]'));null!==t&&t.addEventListener("click",(function(){V.runFunc(e,a)}),!1)})))},an={AGGREGATION:0,EXTENSION:1,COMPOSITION:2,DEPENDENCY:3},on=function(t){var e=Object(d.select)(".mermaidTooltip");null===(e._groups||e)[0][0]&&(e=Object(d.select)("body").append("div").attr("class","mermaidTooltip").style("opacity",0)),Object(d.select)(t).select("svg").selectAll("g.node").on("mouseover",(function(){var t=Object(d.select)(this);if(null!==t.attr("title")){var n=this.getBoundingClientRect();e.transition().duration(200).style("opacity",".9"),e.html(t.attr("title")).style("left",window.scrollX+n.left+(n.right-n.left)/2+"px").style("top",window.scrollY+n.top-14+document.body.scrollTop+"px"),t.classed("hover",!0)}})).on("mouseout",(function(){e.transition().duration(500).style("opacity",0),Object(d.select)(this).classed("hover",!1)}))};Je.push(on);var sn={parseDirective:function(t,e,n){Ho.parseDirective(this,t,e,n)},getConfig:function(){return _t().class},addClass:Ke,bindFunctions:function(t){Je.forEach((function(e){e(t)}))},clear:function(){qe=[],Xe={},(Je=[]).push(on)},getClass:function(t){return Xe[t]},getClasses:function(){return Xe},addAnnotation:function(t,e){var n=Qe(t).className;Xe[n].annotations.push(e)},getRelations:function(){return qe},addRelation:function(t){c.debug("Adding relation: "+JSON.stringify(t)),Ke(t.id1),Ke(t.id2),t.id1=Qe(t.id1).className,t.id2=Qe(t.id2).className,qe.push(t)},addMember:en,addMembers:function(t,e){Array.isArray(e)&&(e.reverse(),e.forEach((function(e){return en(t,e)})))},cleanupLabel:function(t){return":"===t.substring(0,1)?t.substr(1).trim():t.trim()},lineType:{LINE:0,DOTTED_LINE:1},relationType:an,setClickEvent:function(t,e,n){t.split(",").forEach((function(t){rn(t,e,n),Xe[t].haveCallback=!0})),nn(t,"clickable")},setCssClass:nn,setLink:function(t,e,n){var r=_t();t.split(",").forEach((function(t){var i=t;t[0].match(/\d/)&&(i="classid-"+i),void 0!==Xe[i]&&(Xe[i].link=V.formatUrl(e,r),n&&(Xe[i].tooltip=x.sanitizeText(n,r)))})),nn(t,"clickable")},lookUpDomId:tn},cn=0,un=function(t){var e=t.match(/(\+|-|~|#)?(\w+)(~\w+~|\[\])?\s+(\w+)/),n=t.match(/^([+|\-|~|#])?(\w+) *\( *(.*)\) *(\*|\$)? *(\w*[~|[\]]*\s*\w*~?)$/);return e&&!n?ln(e):n?hn(n):fn(t)},ln=function(t){var e="";try{e=(t[1]?t[1].trim():"")+(t[2]?t[2].trim():"")+(t[3]?pn(t[3].trim()):"")+" "+(t[4]?t[4].trim():"")}catch(n){e=t}return{displayText:e,cssStyle:""}},hn=function(t){var e="",n="";try{var r=t[1]?t[1].trim():"",i=t[2]?t[2].trim():"",a=t[3]?pn(t[3].trim()):"",o=t[4]?t[4].trim():"";n=r+i+"("+a+")"+(t[5]?" : "+pn(t[5]).trim():""),e=gn(o)}catch(e){n=t}return{displayText:n,cssStyle:e}},fn=function(t){var e="",n="",r="",i=t.indexOf("("),a=t.indexOf(")");if(i>1&&a>i&&a<=t.length){var o="",s="",c=t.substring(0,1);c.match(/\w/)?s=t.substring(0,i).trim():(c.match(/\+|-|~|#/)&&(o=c),s=t.substring(1,i).trim());var u=t.substring(i+1,a),l=t.substring(a+1,1);n=gn(l),e=o+s+"("+pn(u.trim())+")",a<"".length&&""!==(r=t.substring(a+2).trim())&&(r=" : "+pn(r))}else e=pn(t);return{displayText:e,cssStyle:n}},dn=function(t,e,n,r){var i=un(e),a=t.append("tspan").attr("x",r.padding).text(i.displayText);""!==i.cssStyle&&a.attr("style",i.cssStyle),n||a.attr("dy",r.textHeight)},pn=function t(e){var n=e;return-1!=e.indexOf("~")?t(n=(n=n.replace("~","<")).replace("~",">")):n},gn=function(t){switch(t){case"*":return"font-style:italic;";case"$":return"text-decoration:underline;";default:return""}},yn=function(t,e,n){c.info("Rendering class "+e);var r,i=e.id,a={id:i,label:e.id,width:0,height:0},o=t.append("g").attr("id",tn(i)).attr("class","classGroup");r=e.link?o.append("svg:a").attr("xlink:href",e.link).attr("target","_blank").append("text").attr("y",n.textHeight+n.padding).attr("x",0):o.append("text").attr("y",n.textHeight+n.padding).attr("x",0);var s=!0;e.annotations.forEach((function(t){var e=r.append("tspan").text("«"+t+"»");s||e.attr("dy",n.textHeight),s=!1}));var u=e.id;void 0!==e.type&&""!==e.type&&(u+="<"+e.type+">");var l=r.append("tspan").text(u).attr("class","title");s||l.attr("dy",n.textHeight);var h=r.node().getBBox().height,f=o.append("line").attr("x1",0).attr("y1",n.padding+h+n.dividerMargin/2).attr("y2",n.padding+h+n.dividerMargin/2),d=o.append("text").attr("x",n.padding).attr("y",h+n.dividerMargin+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.members.forEach((function(t){dn(d,t,s,n),s=!1}));var p=d.node().getBBox(),g=o.append("line").attr("x1",0).attr("y1",n.padding+h+n.dividerMargin+p.height).attr("y2",n.padding+h+n.dividerMargin+p.height),y=o.append("text").attr("x",n.padding).attr("y",h+2*n.dividerMargin+p.height+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.methods.forEach((function(t){dn(y,t,s,n),s=!1}));var v=o.node().getBBox(),m=" ";e.cssClasses.length>0&&(m+=e.cssClasses.join(" "));var b=o.insert("rect",":first-child").attr("x",0).attr("y",0).attr("width",v.width+2*n.padding).attr("height",v.height+n.padding+.5*n.dividerMargin).attr("class",m).node().getBBox().width;return r.node().childNodes.forEach((function(t){t.setAttribute("x",(b-t.getBBox().width)/2)})),e.tooltip&&r.insert("title").text(e.tooltip),f.attr("x2",b),g.attr("x2",b),a.width=b,a.height=v.height+n.padding+.5*n.dividerMargin,a},vn=function(t,e,n,r){var i=function(t){switch(t){case an.AGGREGATION:return"aggregation";case an.EXTENSION:return"extension";case an.COMPOSITION:return"composition";case an.DEPENDENCY:return"dependency"}};e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var a,o,s=e.points,u=Object(d.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(d.curveBasis),l=t.append("path").attr("d",u(s)).attr("id","edge"+cn).attr("class","relation"),h="";r.arrowMarkerAbsolute&&(h=(h=(h=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),1==n.relation.lineType&&l.attr("class","relation dashed-line"),"none"!==n.relation.type1&&l.attr("marker-start","url("+h+"#"+i(n.relation.type1)+"Start)"),"none"!==n.relation.type2&&l.attr("marker-end","url("+h+"#"+i(n.relation.type2)+"End)");var f,p,g,y,v=e.points.length,m=V.calcLabelPosition(e.points);if(a=m.x,o=m.y,v%2!=0&&v>1){var b=V.calcCardinalityPosition("none"!==n.relation.type1,e.points,e.points[0]),x=V.calcCardinalityPosition("none"!==n.relation.type2,e.points,e.points[v-1]);c.debug("cardinality_1_point "+JSON.stringify(b)),c.debug("cardinality_2_point "+JSON.stringify(x)),f=b.x,p=b.y,g=x.x,y=x.y}if(void 0!==n.title){var _=t.append("g").attr("class","classLabel"),k=_.append("text").attr("class","label").attr("x",a).attr("y",o).attr("fill","red").attr("text-anchor","middle").text(n.title);window.label=k;var w=k.node().getBBox();_.insert("rect",":first-child").attr("class","box").attr("x",w.x-r.padding/2).attr("y",w.y-r.padding/2).attr("width",w.width+r.padding).attr("height",w.height+r.padding)}(c.info("Rendering relation "+JSON.stringify(n)),void 0!==n.relationTitle1&&"none"!==n.relationTitle1)&&t.append("g").attr("class","cardinality").append("text").attr("class","type1").attr("x",f).attr("y",p).attr("fill","black").attr("font-size","6").text(n.relationTitle1);void 0!==n.relationTitle2&&"none"!==n.relationTitle2&&t.append("g").attr("class","cardinality").append("text").attr("class","type2").attr("x",g).attr("y",y).attr("fill","black").attr("font-size","6").text(n.relationTitle2);cn++},mn=function(t,e,n){var r=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),i=70,a=10;"LR"===n&&(i=10,a=70);var o=r.append("rect").style("stroke","black").style("fill","black").attr("x",-1*i/2).attr("y",-1*a/2).attr("width",i).attr("height",a).attr("class","fork-join");return Se(e,o),e.height=e.height+e.padding/2,e.width=e.width+e.padding/2,e.intersect=function(t){return He.rect(e,t)},r},bn={question:function(t,e){var n=Ce(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding+(i.height+e.padding),o=[{x:a/2,y:0},{x:a,y:-a/2},{x:a/2,y:-a},{x:0,y:-a/2}];c.info("Question main (Circle)");var s=Ae(r,a,a,o);return Se(e,s),e.intersect=function(t){return c.warn("Intersect called"),He.polygon(e,o,t)},r},rect:function(t,e){var n=Ce(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;c.trace("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("class","basic label-container").attr("style",e.style).attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Se(e,o),e.intersect=function(t){return He.rect(e,t)},r},rectWithTitle:function(t,e){var n;n=e.classes?"node "+e.classes:"node default";var r=t.insert("g").attr("class",n).attr("id",e.domId||e.id),i=r.insert("rect",":first-child"),a=r.insert("line"),o=r.insert("g").attr("class","label"),s=e.labelText.flat();c.info("Label text",s[0]);var u,l=o.node().appendChild(Te(s[0],e.labelStyle,!0,!0));if(_t().flowchart.htmlLabels){var h=l.children[0],f=Object(d.select)(l);u=h.getBoundingClientRect(),f.attr("width",u.width),f.attr("height",u.height)}c.info("Text 2",s);var p=s.slice(1,s.length),g=l.getBBox(),y=o.node().appendChild(Te(p.join("
"),e.labelStyle,!0,!0));if(_t().flowchart.htmlLabels){var v=y.children[0],m=Object(d.select)(y);u=v.getBoundingClientRect(),m.attr("width",u.width),m.attr("height",u.height)}var b=e.padding/2;return Object(d.select)(y).attr("transform","translate( "+(u.width>g.width?0:(g.width-u.width)/2)+", "+(g.height+b+5)+")"),Object(d.select)(l).attr("transform","translate( "+(u.widthe.height/2-s)){var i=s*s*(1-r*r/(o*o));0!=i&&(i=Math.sqrt(i)),i=s-i,t.y-e.y>0&&(i=-i),n.y+=i}return n},r},start:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child");return r.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),Se(e,r),e.intersect=function(t){return He.circle(e,7,t)},n},end:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child"),i=n.insert("circle",":first-child");return i.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),r.attr("class","state-end").attr("r",5).attr("width",10).attr("height",10),Se(e,i),e.intersect=function(t){return He.circle(e,7,t)},n},note:Ge,subroutine:function(t,e){var n=Ce(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding,o=i.height+e.padding,s=Ae(r,a,o,[{x:0,y:0},{x:a,y:0},{x:a,y:-o},{x:0,y:-o},{x:0,y:0},{x:-8,y:0},{x:a+8,y:0},{x:a+8,y:-o},{x:-8,y:-o},{x:-8,y:0}]);return Se(e,s),e.intersect=function(t){return He.polygon(e,t)},r},fork:mn,join:mn,class_box:function(t,e){var n,r=e.padding/2;n=e.classes?"node "+e.classes:"node default";var i=t.insert("g").attr("class",n).attr("id",e.domId||e.id),a=i.insert("rect",":first-child"),o=i.insert("line"),s=i.insert("line"),c=0,u=4,l=i.insert("g").attr("class","label"),h=0,f=e.classData.annotations&&e.classData.annotations[0],p=e.classData.annotations[0]?"«"+e.classData.annotations[0]+"»":"",g=l.node().appendChild(Te(p,e.labelStyle,!0,!0)),y=g.getBBox();if(_t().flowchart.htmlLabels){var v=g.children[0],m=Object(d.select)(g);y=v.getBoundingClientRect(),m.attr("width",y.width),m.attr("height",y.height)}e.classData.annotations[0]&&(u+=y.height+4,c+=y.width);var b=e.classData.id;void 0!==e.classData.type&&""!==e.classData.type&&(b+="<"+e.classData.type+">");var x=l.node().appendChild(Te(b,e.labelStyle,!0,!0));Object(d.select)(x).attr("class","classTitle");var _=x.getBBox();if(_t().flowchart.htmlLabels){var k=x.children[0],w=Object(d.select)(x);_=k.getBoundingClientRect(),w.attr("width",_.width),w.attr("height",_.height)}u+=_.height+4,_.width>c&&(c=_.width);var E=[];e.classData.members.forEach((function(t){var n=un(t).displayText,r=l.node().appendChild(Te(n,e.labelStyle,!0,!0)),i=r.getBBox();if(_t().flowchart.htmlLabels){var a=r.children[0],o=Object(d.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>c&&(c=i.width),u+=i.height+4,E.push(r)})),u+=8;var T=[];if(e.classData.methods.forEach((function(t){var n=un(t).displayText,r=l.node().appendChild(Te(n,e.labelStyle,!0,!0)),i=r.getBBox();if(_t().flowchart.htmlLabels){var a=r.children[0],o=Object(d.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>c&&(c=i.width),u+=i.height+4,T.push(r)})),u+=8,f){var C=(c-y.width)/2;Object(d.select)(g).attr("transform","translate( "+(-1*c/2+C)+", "+-1*u/2+")"),h=y.height+4}var S=(c-_.width)/2;return Object(d.select)(x).attr("transform","translate( "+(-1*c/2+S)+", "+(-1*u/2+h)+")"),h+=_.height+4,o.attr("class","divider").attr("x1",-c/2-r).attr("x2",c/2+r).attr("y1",-u/2-r+8+h).attr("y2",-u/2-r+8+h),h+=8,E.forEach((function(t){Object(d.select)(t).attr("transform","translate( "+-c/2+", "+(-1*u/2+h+4)+")"),h+=_.height+4})),h+=8,s.attr("class","divider").attr("x1",-c/2-r).attr("x2",c/2+r).attr("y1",-u/2-r+8+h).attr("y2",-u/2-r+8+h),h+=8,T.forEach((function(t){Object(d.select)(t).attr("transform","translate( "+-c/2+", "+(-1*u/2+h)+")"),h+=_.height+4})),a.attr("class","outer title-state").attr("x",-c/2-r).attr("y",-u/2-r).attr("width",c+e.padding).attr("height",u+e.padding),Se(e,a),e.intersect=function(t){return He.rect(e,t)},i}},xn={},_n=function(t){var e=xn[t.id];c.trace("Transforming node",t,"translate("+(t.x-t.width/2-5)+", "+(t.y-t.height/2-5)+")");t.clusterNode?e.attr("transform","translate("+(t.x-t.width/2-8)+", "+(t.y-t.height/2-8)+")"):e.attr("transform","translate("+t.x+", "+t.y+")")},kn={rect:function(t,e){c.trace("Creating subgraph rect for ",e.id,e);var n=t.insert("g").attr("class","cluster"+(e.class?" "+e.class:"")).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=i.node().appendChild(Te(e.labelText,e.labelStyle,void 0,!0)),o=a.getBBox();if(_t().flowchart.htmlLabels){var s=a.children[0],u=Object(d.select)(a);o=s.getBoundingClientRect(),u.attr("width",o.width),u.attr("height",o.height)}var l=0*e.padding,h=l/2;c.trace("Data ",e,JSON.stringify(e)),r.attr("style",e.style).attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-h).attr("y",e.y-e.height/2-h).attr("width",e.width+l).attr("height",e.height+l),i.attr("transform","translate("+(e.x-o.width/2)+", "+(e.y-e.height/2+e.padding/3)+")");var f=r.node().getBBox();return e.width=f.width,e.height=f.height,e.intersect=function(t){return Ve(e,t)},n},roundedWithTitle:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=n.append("rect"),o=i.node().appendChild(Te(e.labelText,e.labelStyle,void 0,!0)),s=o.getBBox();if(_t().flowchart.htmlLabels){var c=o.children[0],u=Object(d.select)(o);s=c.getBoundingClientRect(),u.attr("width",s.width),u.attr("height",s.height)}s=o.getBBox();var l=0*e.padding,h=l/2;r.attr("class","outer").attr("x",e.x-e.width/2-h).attr("y",e.y-e.height/2-h).attr("width",e.width+l).attr("height",e.height+l),a.attr("class","inner").attr("x",e.x-e.width/2-h).attr("y",e.y-e.height/2-h+s.height-1).attr("width",e.width+l).attr("height",e.height+l-s.height-3),i.attr("transform","translate("+(e.x-s.width/2)+", "+(e.y-e.height/2-e.padding/3+(_t().flowchart.htmlLabels?5:3))+")");var f=r.node().getBBox();return e.width=f.width,e.height=f.height,e.intersect=function(t){return Ve(e,t)},n},noteGroup:function(t,e){var n=t.insert("g").attr("class","note-cluster").attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2-a).attr("width",e.width+i).attr("height",e.height+i).attr("fill","none");var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return Ve(e,t)},n},divider:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("class","divider").attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2).attr("width",e.width+i).attr("height",e.height+i);var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return Ve(e,t)},n}},wn={},En={},Tn={},Cn=function(t,e){var n=t.x,r=t.y,i=Math.abs(e.x-n),a=Math.abs(e.y-r),o=t.width/2,s=t.height/2;return i>=o||a>=s},Sn=function(t,e,n){c.warn("intersection calc o:",e," i:",n,t);var r=t.x,i=t.y,a=Math.abs(r-n.x),o=t.width/2,s=n.xMath.abs(r-e.x)*u){var y=n.y0&&c.info("Recursive edges",n.edge(n.edges()[0]));var s=o.insert("g").attr("class","clusters"),u=o.insert("g").attr("class","edgePaths"),l=o.insert("g").attr("class","edgeLabels"),h=o.insert("g").attr("class","nodes");return n.nodes().forEach((function(e){var o=n.node(e);if(void 0!==i){var s=JSON.parse(JSON.stringify(i.clusterData));c.info("Setting data for cluster XXX (",e,") ",s,i),n.setNode(i.id,s),n.parent(e)||(c.warn("Setting parent",e,i.id),n.setParent(e,i.id,s))}if(c.info("(Insert) Node XXX"+e+": "+JSON.stringify(n.node(e))),o&&o.clusterNode){c.info("Cluster identified",e,o,n.node(e));var u=t(h,o.graph,r,n.node(e));Se(o,u),function(t,e){xn[e.id]=t}(u,o),c.warn("Recursive render complete",u,o)}else n.children(e).length>0?(c.info("Cluster - the non recursive path XXX",e,o.id,o,n),c.info(Le(o.id,n)),Me[o.id]={id:Le(o.id,n),node:o}):(c.info("Node - the non recursive path",e,o.id,o),function(t,e,n){var r,i;e.link?(r=t.insert("svg:a").attr("xlink:href",e.link).attr("target",e.linkTarget||"_blank"),i=bn[e.shape](r,e,n)):r=i=bn[e.shape](t,e,n),e.tooltip&&i.attr("title",e.tooltip),e.class&&i.attr("class","node default "+e.class),xn[e.id]=r,e.haveCallback&&xn[e.id].attr("class",xn[e.id].attr("class")+" clickable")}(h,n.node(e),a))})),n.edges().forEach((function(t){var e=n.edge(t.v,t.w,t.name);c.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t)),c.info("Edge "+t.v+" -> "+t.w+": ",t," ",JSON.stringify(n.edge(t))),c.info("Fix",Me,"ids:",t.v,t.w,"Translateing: ",Me[t.v],Me[t.w]),function(t,e){var n=Te(e.label,e.labelStyle),r=t.insert("g").attr("class","edgeLabel"),i=r.insert("g").attr("class","label");i.node().appendChild(n);var a=n.getBBox();if(_t().flowchart.htmlLabels){var o=n.children[0],s=Object(d.select)(n);a=o.getBoundingClientRect(),s.attr("width",a.width),s.attr("height",a.height)}if(i.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),En[e.id]=r,e.width=a.width,e.height=a.height,e.startLabelLeft){var c=Te(e.startLabelLeft,e.labelStyle),u=t.insert("g").attr("class","edgeTerminals"),l=u.insert("g").attr("class","inner");l.node().appendChild(c);var h=c.getBBox();l.attr("transform","translate("+-h.width/2+", "+-h.height/2+")"),Tn[e.id]||(Tn[e.id]={}),Tn[e.id].startLeft=u}if(e.startLabelRight){var f=Te(e.startLabelRight,e.labelStyle),p=t.insert("g").attr("class","edgeTerminals"),g=p.insert("g").attr("class","inner");p.node().appendChild(f),g.node().appendChild(f);var y=f.getBBox();g.attr("transform","translate("+-y.width/2+", "+-y.height/2+")"),Tn[e.id]||(Tn[e.id]={}),Tn[e.id].startRight=p}if(e.endLabelLeft){var v=Te(e.endLabelLeft,e.labelStyle),m=t.insert("g").attr("class","edgeTerminals"),b=m.insert("g").attr("class","inner");b.node().appendChild(v);var x=v.getBBox();b.attr("transform","translate("+-x.width/2+", "+-x.height/2+")"),m.node().appendChild(v),Tn[e.id]||(Tn[e.id]={}),Tn[e.id].endLeft=m}if(e.endLabelRight){var _=Te(e.endLabelRight,e.labelStyle),k=t.insert("g").attr("class","edgeTerminals"),w=k.insert("g").attr("class","inner");w.node().appendChild(_);var E=_.getBBox();w.attr("transform","translate("+-E.width/2+", "+-E.height/2+")"),k.node().appendChild(_),Tn[e.id]||(Tn[e.id]={}),Tn[e.id].endRight=k}}(l,e)})),n.edges().forEach((function(t){c.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t))})),c.info("#############################################"),c.info("### Layout ###"),c.info("#############################################"),c.info(n),ke.a.layout(n),c.info("Graph after layout:",G.a.json.write(n)),je(n).forEach((function(t){var e=n.node(t);c.info("Position "+t+": "+JSON.stringify(n.node(t))),c.info("Position "+t+": ("+e.x,","+e.y,") width: ",e.width," height: ",e.height),e&&e.clusterNode?_n(e):n.children(t).length>0?(!function(t,e){c.trace("Inserting cluster");var n=e.shape||"rect";wn[e.id]=kn[n](t,e)}(s,e),Me[e.id].node=e):_n(e)})),n.edges().forEach((function(t){var e=n.edge(t);c.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(e),e);var i=function(t,e,n,r,i,a){var o=n.points,s=!1,u=a.node(e.v),l=a.node(e.w);if(l.intersect&&u.intersect&&((o=o.slice(1,n.points.length-1)).unshift(u.intersect(o[0])),c.info("Last point",o[o.length-1],l,l.intersect(o[o.length-1])),o.push(l.intersect(o[o.length-1]))),n.toCluster){var h;c.trace("edge",n),c.trace("to cluster",r[n.toCluster]),o=[];var f=!1;n.points.forEach((function(t){var e=r[n.toCluster].node;if(Cn(e,t)||f)f||o.push(t);else{c.trace("inside",n.toCluster,t,h);var i=Sn(e,h,t),a=!1;o.forEach((function(t){a=a||t.x===i.x&&t.y===i.y})),o.find((function(t){return t.x===i.x&&t.y===i.y}))?c.warn("no intersect",i,o):o.push(i),f=!0}h=t})),s=!0}if(n.fromCluster){c.trace("edge",n),c.warn("from cluster",r[n.fromCluster]);for(var p,g=[],y=!1,v=o.length-1;v>=0;v--){var m=o[v],b=r[n.fromCluster].node;if(Cn(b,m)||y)c.trace("Outside point",m),y||g.unshift(m);else{c.warn("inside",n.fromCluster,m,b);var x=Sn(b,p,m);g.unshift(x),y=!0}p=m}o=g,s=!0}var _,k=o.filter((function(t){return!Number.isNaN(t.y)})),w=Object(d.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(d.curveBasis);switch(n.thickness){case"normal":_="edge-thickness-normal";break;case"thick":_="edge-thickness-thick";break;default:_=""}switch(n.pattern){case"solid":_+=" edge-pattern-solid";break;case"dotted":_+=" edge-pattern-dotted";break;case"dashed":_+=" edge-pattern-dashed"}var E=t.append("path").attr("d",w(k)).attr("id",n.id).attr("class"," "+_+(n.classes?" "+n.classes:"")).attr("style",n.style),T="";switch(_t().state.arrowMarkerAbsolute&&(T=(T=(T=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),c.info("arrowTypeStart",n.arrowTypeStart),c.info("arrowTypeEnd",n.arrowTypeEnd),n.arrowTypeStart){case"arrow_cross":E.attr("marker-start","url("+T+"#"+i+"-crossStart)");break;case"arrow_point":E.attr("marker-start","url("+T+"#"+i+"-pointStart)");break;case"arrow_barb":E.attr("marker-start","url("+T+"#"+i+"-barbStart)");break;case"arrow_circle":E.attr("marker-start","url("+T+"#"+i+"-circleStart)");break;case"aggregation":E.attr("marker-start","url("+T+"#"+i+"-aggregationStart)");break;case"extension":E.attr("marker-start","url("+T+"#"+i+"-extensionStart)");break;case"composition":E.attr("marker-start","url("+T+"#"+i+"-compositionStart)");break;case"dependency":E.attr("marker-start","url("+T+"#"+i+"-dependencyStart)")}switch(n.arrowTypeEnd){case"arrow_cross":E.attr("marker-end","url("+T+"#"+i+"-crossEnd)");break;case"arrow_point":E.attr("marker-end","url("+T+"#"+i+"-pointEnd)");break;case"arrow_barb":E.attr("marker-end","url("+T+"#"+i+"-barbEnd)");break;case"arrow_circle":E.attr("marker-end","url("+T+"#"+i+"-circleEnd)");break;case"aggregation":E.attr("marker-end","url("+T+"#"+i+"-aggregationEnd)");break;case"extension":E.attr("marker-end","url("+T+"#"+i+"-extensionEnd)");break;case"composition":E.attr("marker-end","url("+T+"#"+i+"-compositionEnd)");break;case"dependency":E.attr("marker-end","url("+T+"#"+i+"-dependencyEnd)")}var C={};return s&&(C.updatedPath=o),C.originalPath=n.points,C}(u,t,e,Me,r,n);!function(t,e){c.info("Moving label",t.id,t.label,En[t.id]);var n=e.updatedPath?e.updatedPath:e.originalPath;if(t.label){var r=En[t.id],i=t.x,a=t.y;if(n){var o=V.calcLabelPosition(n);c.info("Moving label from (",i,",",a,") to (",o.x,",",o.y,")")}r.attr("transform","translate("+i+", "+a+")")}if(t.startLabelLeft){var s=Tn[t.id].startLeft,u=t.x,l=t.y;if(n){var h=V.calcTerminalLabelPosition(0,"start_left",n);u=h.x,l=h.y}s.attr("transform","translate("+u+", "+l+")")}if(t.startLabelRight){var f=Tn[t.id].startRight,d=t.x,p=t.y;if(n){var g=V.calcTerminalLabelPosition(0,"start_right",n);d=g.x,p=g.y}f.attr("transform","translate("+d+", "+p+")")}if(t.endLabelLeft){var y=Tn[t.id].endLeft,v=t.x,m=t.y;if(n){var b=V.calcTerminalLabelPosition(0,"end_left",n);v=b.x,m=b.y}y.attr("transform","translate("+v+", "+m+")")}if(t.endLabelRight){var x=Tn[t.id].endRight,_=t.x,k=t.y;if(n){var w=V.calcTerminalLabelPosition(0,"end_right",n);_=w.x,k=w.y}x.attr("transform","translate("+_+", "+k+")")}}(e,i)})),o},Mn=function(t,e,n,r,i){Ee(t,n,r,i),xn={},En={},Tn={},wn={},Oe={},De={},Me={},c.warn("Graph at first:",G.a.json.write(e)),Fe(e),c.warn("Graph after:",G.a.json.write(e)),An(t,e,r)},On={},Dn=function(t,e,n){var r=Object(d.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=B(i.styles),u=void 0!==i.text?i.text:i.id;if(_t().flowchart.htmlLabels){var l={label:u.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=ee()(r,l).node()).parentNode.removeChild(o)}else{var h=document.createElementNS("http://www.w3.org/2000/svg","text");h.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var f=u.split(x.lineBreakRegex),d=0;d=0;h--)i=l[h],c.info("Subgraph - ",i),Xt.addVertex(i.id,i.title,"group",void 0,i.classes);var f=Xt.getVertices(),p=Xt.getEdges();c.info(p);var g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(d.selectAll)("cluster").append("text");for(var y=0;y0)switch(e.valign){case"top":case"start":s=function(){return Math.round(e.y+e.textMargin)};break;case"middle":case"center":s=function(){return Math.round(e.y+(n+r+e.textMargin)/2)};break;case"bottom":case"end":s=function(){return Math.round(e.y+(n+r+2*e.textMargin)-e.textMargin)}}if(void 0!==e.anchor&&void 0!==e.textMargin&&void 0!==e.width)switch(e.anchor){case"left":case"start":e.x=Math.round(e.x+e.textMargin),e.anchor="start",e.dominantBaseline="text-after-edge",e.alignmentBaseline="middle";break;case"middle":case"center":e.x=Math.round(e.x+e.width/2),e.anchor="middle",e.dominantBaseline="middle",e.alignmentBaseline="middle";break;case"right":case"end":e.x=Math.round(e.x+e.width-e.textMargin),e.anchor="end",e.dominantBaseline="text-before-edge",e.alignmentBaseline="middle"}for(var c=0;c0&&(r+=(l._groups||l)[0][0].getBBox().height,n=r),a.push(l)}return a},In=function(t,e){var n,r,i,a,o,s=t.append("polygon");return s.attr("points",(n=e.x,r=e.y,i=e.width,a=e.height,n+","+r+" "+(n+i)+","+r+" "+(n+i)+","+(r+a-(o=7))+" "+(n+i-1.2*o)+","+(r+a)+" "+n+","+(r+a))),s.attr("class","labelBox"),e.y=e.y+e.height/2,Fn(t,e),s},jn=-1,Rn=function(){return{x:0,y:0,fill:void 0,anchor:void 0,style:"#666",width:void 0,height:void 0,textMargin:0,rx:0,ry:0,tspan:!0,valign:void 0}},Yn=function(){return{x:0,y:0,fill:"#EDF2AE",stroke:"#666",width:100,anchor:"start",height:100,rx:0,ry:0}},zn=function(){function t(t,e,n,i,a,o,s){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c){for(var u=c.actorFontSize,l=c.actorFontFamily,h=c.actorFontWeight,f=t.split(x.lineBreakRegex),d=0;d2&&void 0!==arguments[2]?arguments[2]:{text:void 0,wrap:void 0},r=arguments.length>3?arguments[3]:void 0;if(r===rr.ACTIVE_END){var i=tr(t.actor);if(i<1){var a=new Error("Trying to inactivate an inactive participant ("+t.actor+")");throw a.hash={text:"->>-",token:"->>-",line:"1",loc:{first_line:1,last_line:1,first_column:1,last_column:1},expected:["'ACTIVE_PARTICIPANT'"]},a}}return Gn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&nr()||!!n.wrap,type:r}),!0},nr=function(){return Qn},rr={SOLID:0,DOTTED:1,NOTE:2,SOLID_CROSS:3,DOTTED_CROSS:4,SOLID_OPEN:5,DOTTED_OPEN:6,LOOP_START:10,LOOP_END:11,ALT_START:12,ALT_ELSE:13,ALT_END:14,OPT_START:15,OPT_END:16,ACTIVE_START:17,ACTIVE_END:18,PAR_START:19,PAR_AND:20,PAR_END:21,RECT_START:22,RECT_END:23},ir=function(t,e,n){var r={actor:t,placement:e,message:n.text,wrap:void 0===n.wrap&&nr()||!!n.wrap},i=[].concat(t,t);qn.push(r),Gn.push({from:i[0],to:i[1],message:n.text,wrap:void 0===n.wrap&&nr()||!!n.wrap,type:rr.NOTE,placement:e})},ar=function(t){Xn=t.text,Zn=void 0===t.wrap&&nr()||!!t.wrap},or={addActor:Kn,addMessage:function(t,e,n,r){Gn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&nr()||!!n.wrap,answer:r})},addSignal:er,autoWrap:nr,setWrap:function(t){Qn=t},enableSequenceNumbers:function(){Jn=!0},showSequenceNumbers:function(){return Jn},getMessages:function(){return Gn},getActors:function(){return Hn},getActor:function(t){return Hn[t]},getActorKeys:function(){return Object.keys(Hn)},getTitle:function(){return Xn},parseDirective:function(t,e,n){Ho.parseDirective(this,t,e,n)},getConfig:function(){return _t().sequence},getTitleWrapped:function(){return Zn},clear:function(){Hn={},Gn=[]},parseMessage:function(t){var e=t.trim(),n={text:e.replace(/^[:]?(?:no)?wrap:/,"").trim(),wrap:null===e.match(/^[:]?(?:no)?wrap:/)?x.hasBreaks(e)||void 0:null!==e.match(/^[:]?wrap:/)||null===e.match(/^[:]?nowrap:/)&&void 0};return c.debug("parseMessage:",n),n},LINETYPE:rr,ARROWTYPE:{FILLED:0,OPEN:1},PLACEMENT:{LEFTOF:0,RIGHTOF:1,OVER:2},addNote:ir,setTitle:ar,apply:function t(e){if(e instanceof Array)e.forEach((function(e){t(e)}));else switch(e.type){case"addActor":Kn(e.actor,e.actor,e.description);break;case"activeStart":case"activeEnd":er(e.actor,void 0,void 0,e.signalType);break;case"addNote":ir(e.actor,e.placement,e.text);break;case"addMessage":er(e.from,e.to,e.msg,e.signalType);break;case"loopStart":er(void 0,void 0,e.loopText,e.signalType);break;case"loopEnd":er(void 0,void 0,void 0,e.signalType);break;case"rectStart":er(void 0,void 0,e.color,e.signalType);break;case"rectEnd":er(void 0,void 0,void 0,e.signalType);break;case"optStart":er(void 0,void 0,e.optText,e.signalType);break;case"optEnd":er(void 0,void 0,void 0,e.signalType);break;case"altStart":case"else":er(void 0,void 0,e.altText,e.signalType);break;case"altEnd":er(void 0,void 0,void 0,e.signalType);break;case"setTitle":ar(e.text);break;case"parStart":case"and":er(void 0,void 0,e.parText,e.signalType);break;case"parEnd":er(void 0,void 0,void 0,e.signalType)}}};$n.parser.yy=or;var sr={},cr={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],activations:[],models:{getHeight:function(){return Math.max.apply(null,0===this.actors.length?[0]:this.actors.map((function(t){return t.height||0})))+(0===this.loops.length?0:this.loops.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.messages.length?0:this.messages.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.notes.length?0:this.notes.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))},clear:function(){this.actors=[],this.loops=[],this.messages=[],this.notes=[]},addActor:function(t){this.actors.push(t)},addLoop:function(t){this.loops.push(t)},addMessage:function(t){this.messages.push(t)},addNote:function(t){this.notes.push(t)},lastActor:function(){return this.actors[this.actors.length-1]},lastLoop:function(){return this.loops[this.loops.length-1]},lastMessage:function(){return this.messages[this.messages.length-1]},lastNote:function(){return this.notes[this.notes.length-1]},actors:[],loops:[],messages:[],notes:[]},init:function(){this.sequenceItems=[],this.activations=[],this.models.clear(),this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0,dr($n.parser.yy.getConfig())},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i=this,a=0;function o(o){return function(s){a++;var c=i.sequenceItems.length-a+1;i.updateVal(s,"starty",e-c*sr.boxMargin,Math.min),i.updateVal(s,"stopy",r+c*sr.boxMargin,Math.max),i.updateVal(cr.data,"startx",t-c*sr.boxMargin,Math.min),i.updateVal(cr.data,"stopx",n+c*sr.boxMargin,Math.max),"activation"!==o&&(i.updateVal(s,"startx",t-c*sr.boxMargin,Math.min),i.updateVal(s,"stopx",n+c*sr.boxMargin,Math.max),i.updateVal(cr.data,"starty",e-c*sr.boxMargin,Math.min),i.updateVal(cr.data,"stopy",r+c*sr.boxMargin,Math.max))}}this.sequenceItems.forEach(o()),this.activations.forEach(o("activation"))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(cr.data,"startx",i,Math.min),this.updateVal(cr.data,"starty",o,Math.min),this.updateVal(cr.data,"stopx",a,Math.max),this.updateVal(cr.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},newActivation:function(t,e,n){var r=n[t.from.actor],i=pr(t.from.actor).length||0,a=r.x+r.width/2+(i-1)*sr.activationWidth/2;this.activations.push({startx:a,starty:this.verticalPos+2,stopx:a+sr.activationWidth,stopy:void 0,actor:t.from.actor,anchored:Un.anchorElement(e)})},endActivation:function(t){var e=this.activations.map((function(t){return t.actor})).lastIndexOf(t.from.actor);return this.activations.splice(e,1)[0]},createLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;return{startx:void 0,starty:this.verticalPos,stopx:void 0,stopy:void 0,title:t.message,wrap:t.wrap,width:t.width,height:0,fill:e}},newLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;this.sequenceItems.push(this.createLoop(t,e))},endLoop:function(){return this.sequenceItems.pop()},addSectionToLoop:function(t){var e=this.sequenceItems.pop();e.sections=e.sections||[],e.sectionTitles=e.sectionTitles||[],e.sections.push({y:cr.getVerticalPos(),height:0}),e.sectionTitles.push(t),this.sequenceItems.push(e)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return{bounds:this.data,models:this.models}}},ur=function(t){return{fontFamily:t.messageFontFamily,fontSize:t.messageFontSize,fontWeight:t.messageFontWeight}},lr=function(t){return{fontFamily:t.noteFontFamily,fontSize:t.noteFontSize,fontWeight:t.noteFontWeight}},hr=function(t){return{fontFamily:t.actorFontFamily,fontSize:t.actorFontSize,fontWeight:t.actorFontWeight}},fr=function(t,e,n,r){for(var i=0,a=0,o=0;o0&&o.forEach((function(r){if(n=r,i.startx===i.stopx){var a=e[t.from],o=e[t.to];n.from=Math.min(a.x-i.width/2,a.x-a.width/2,n.from),n.to=Math.max(o.x+i.width/2,o.x+a.width/2,n.to),n.width=Math.max(n.width,Math.abs(n.to-n.from))-sr.labelBoxWidth}else n.from=Math.min(i.startx,n.from),n.to=Math.max(i.stopx,n.to),n.width=Math.max(n.width,i.width)-sr.labelBoxWidth})))})),cr.activations=[],c.debug("Loop type widths:",a),a},xr={bounds:cr,drawActors:fr,setConf:dr,draw:function(t,e){sr=_t().sequence,$n.parser.yy.clear(),$n.parser.yy.setWrap(sr.wrap),$n.parser.parse(t+"\n"),cr.init(),c.debug("C:".concat(JSON.stringify(sr,null,2)));var n=Object(d.select)('[id="'.concat(e,'"]')),r=$n.parser.yy.getActors(),i=$n.parser.yy.getActorKeys(),a=$n.parser.yy.getMessages(),o=$n.parser.yy.getTitle(),s=vr(r,a);sr.height=mr(r,s),fr(n,r,i,0);var u=br(a,r,s);Un.insertArrowHead(n),Un.insertArrowCrossHead(n),Un.insertSequenceNumber(n);var l=1;a.forEach((function(t){var e,i,a;switch(t.type){case $n.parser.yy.LINETYPE.NOTE:i=t.noteModel,function(t,e){cr.bumpVerticalPos(sr.boxMargin),e.height=sr.boxMargin,e.starty=cr.getVerticalPos();var n=Un.getNoteRect();n.x=e.startx,n.y=e.starty,n.width=e.width||sr.width,n.class="note";var r=t.append("g"),i=Un.drawRect(r,n),a=Un.getTextObj();a.x=e.startx,a.y=e.starty,a.width=n.width,a.dy="1em",a.text=e.message,a.class="noteText",a.fontFamily=sr.noteFontFamily,a.fontSize=sr.noteFontSize,a.fontWeight=sr.noteFontWeight,a.anchor=sr.noteAlign,a.textMargin=sr.noteMargin,a.valign=sr.noteAlign,a.wrap=!0;var o=Fn(r,a),s=Math.round(o.map((function(t){return(t._groups||t)[0][0].getBBox().height})).reduce((function(t,e){return t+e})));i.attr("height",s+2*sr.noteMargin),e.height+=s+2*sr.noteMargin,cr.bumpVerticalPos(s+2*sr.noteMargin),e.stopy=e.starty+s+2*sr.noteMargin,e.stopx=e.startx+n.width,cr.insert(e.startx,e.starty,e.stopx,e.stopy),cr.models.addNote(e)}(n,i);break;case $n.parser.yy.LINETYPE.ACTIVE_START:cr.newActivation(t,n,r);break;case $n.parser.yy.LINETYPE.ACTIVE_END:!function(t,e){var r=cr.endActivation(t);r.starty+18>e&&(r.starty=e-6,e+=12),Un.drawActivation(n,r,e,sr,pr(t.from.actor).length),cr.insert(r.startx,e-10,r.stopx,e)}(t,cr.getVerticalPos());break;case $n.parser.yy.LINETYPE.LOOP_START:yr(u,t,sr.boxMargin,sr.boxMargin+sr.boxTextMargin,(function(t){return cr.newLoop(t)}));break;case $n.parser.yy.LINETYPE.LOOP_END:e=cr.endLoop(),Un.drawLoop(n,e,"loop",sr),cr.bumpVerticalPos(e.stopy-cr.getVerticalPos()),cr.models.addLoop(e);break;case $n.parser.yy.LINETYPE.RECT_START:yr(u,t,sr.boxMargin,sr.boxMargin,(function(t){return cr.newLoop(void 0,t.message)}));break;case $n.parser.yy.LINETYPE.RECT_END:e=cr.endLoop(),Un.drawBackgroundRect(n,e),cr.models.addLoop(e),cr.bumpVerticalPos(e.stopy-cr.getVerticalPos());break;case $n.parser.yy.LINETYPE.OPT_START:yr(u,t,sr.boxMargin,sr.boxMargin+sr.boxTextMargin,(function(t){return cr.newLoop(t)}));break;case $n.parser.yy.LINETYPE.OPT_END:e=cr.endLoop(),Un.drawLoop(n,e,"opt",sr),cr.bumpVerticalPos(e.stopy-cr.getVerticalPos()),cr.models.addLoop(e);break;case $n.parser.yy.LINETYPE.ALT_START:yr(u,t,sr.boxMargin,sr.boxMargin+sr.boxTextMargin,(function(t){return cr.newLoop(t)}));break;case $n.parser.yy.LINETYPE.ALT_ELSE:yr(u,t,sr.boxMargin+sr.boxTextMargin,sr.boxMargin,(function(t){return cr.addSectionToLoop(t)}));break;case $n.parser.yy.LINETYPE.ALT_END:e=cr.endLoop(),Un.drawLoop(n,e,"alt",sr),cr.bumpVerticalPos(e.stopy-cr.getVerticalPos()),cr.models.addLoop(e);break;case $n.parser.yy.LINETYPE.PAR_START:yr(u,t,sr.boxMargin,sr.boxMargin+sr.boxTextMargin,(function(t){return cr.newLoop(t)}));break;case $n.parser.yy.LINETYPE.PAR_AND:yr(u,t,sr.boxMargin+sr.boxTextMargin,sr.boxMargin,(function(t){return cr.addSectionToLoop(t)}));break;case $n.parser.yy.LINETYPE.PAR_END:e=cr.endLoop(),Un.drawLoop(n,e,"par",sr),cr.bumpVerticalPos(e.stopy-cr.getVerticalPos()),cr.models.addLoop(e);break;default:try{(a=t.msgModel).starty=cr.getVerticalPos(),a.sequenceIndex=l,function(t,e){cr.bumpVerticalPos(10);var n=e.startx,r=e.stopx,i=e.starty,a=e.message,o=e.type,s=e.sequenceIndex,c=e.wrap,u=x.splitBreaks(a).length,l=V.calculateTextDimensions(a,ur(sr)),h=l.height/u;e.height+=h,cr.bumpVerticalPos(h);var f=Un.getTextObj();f.x=n,f.y=i+10,f.width=r-n,f.class="messageText",f.dy="1em",f.text=a,f.fontFamily=sr.messageFontFamily,f.fontSize=sr.messageFontSize,f.fontWeight=sr.messageFontWeight,f.anchor=sr.messageAlign,f.valign=sr.messageAlign,f.textMargin=sr.wrapPadding,f.tspan=!1,f.wrap=c,Fn(t,f);var d,p,g=l.height-10,y=l.width;if(n===r){p=cr.getVerticalPos()+g,sr.rightAngles?d=t.append("path").attr("d","M ".concat(n,",").concat(p," H ").concat(n+Math.max(sr.width/2,y/2)," V ").concat(p+25," H ").concat(n)):(g+=sr.boxMargin,p=cr.getVerticalPos()+g,d=t.append("path").attr("d","M "+n+","+p+" C "+(n+60)+","+(p-10)+" "+(n+60)+","+(p+30)+" "+n+","+(p+20))),g+=30;var v=Math.max(y/2,sr.width/2);cr.insert(n-v,cr.getVerticalPos()-10+g,r+v,cr.getVerticalPos()+30+g)}else g+=sr.boxMargin,p=cr.getVerticalPos()+g,(d=t.append("line")).attr("x1",n),d.attr("y1",p),d.attr("x2",r),d.attr("y2",p),cr.insert(n,p-10,r,p);o===$n.parser.yy.LINETYPE.DOTTED||o===$n.parser.yy.LINETYPE.DOTTED_CROSS||o===$n.parser.yy.LINETYPE.DOTTED_OPEN?(d.style("stroke-dasharray","3, 3"),d.attr("class","messageLine1")):d.attr("class","messageLine0");var m="";sr.arrowMarkerAbsolute&&(m=(m=(m=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),d.attr("stroke-width",2),d.attr("stroke","none"),d.style("fill","none"),o!==$n.parser.yy.LINETYPE.SOLID&&o!==$n.parser.yy.LINETYPE.DOTTED||d.attr("marker-end","url("+m+"#arrowhead)"),o!==$n.parser.yy.LINETYPE.SOLID_CROSS&&o!==$n.parser.yy.LINETYPE.DOTTED_CROSS||d.attr("marker-end","url("+m+"#crosshead)"),(or.showSequenceNumbers()||sr.showSequenceNumbers)&&(d.attr("marker-start","url("+m+"#sequencenumber)"),t.append("text").attr("x",n).attr("y",p+4).attr("font-family","sans-serif").attr("font-size","12px").attr("text-anchor","middle").attr("textLength","16px").attr("class","sequenceNumber").text(s)),cr.bumpVerticalPos(g),e.height+=g,e.stopy=e.starty+e.height,cr.insert(e.fromBounds,e.starty,e.toBounds,e.stopy)}(n,a),cr.models.addMessage(a)}catch(t){c.error("error while drawing message",t)}}[$n.parser.yy.LINETYPE.SOLID_OPEN,$n.parser.yy.LINETYPE.DOTTED_OPEN,$n.parser.yy.LINETYPE.SOLID,$n.parser.yy.LINETYPE.DOTTED,$n.parser.yy.LINETYPE.SOLID_CROSS,$n.parser.yy.LINETYPE.DOTTED_CROSS].includes(t.type)&&l++})),sr.mirrorActors&&(cr.bumpVerticalPos(2*sr.boxMargin),fr(n,r,i,cr.getVerticalPos()));var h=cr.getBounds().bounds;c.debug("For line height fix Querying: #"+e+" .actor-line"),Object(d.selectAll)("#"+e+" .actor-line").attr("y2",h.stopy);var f=h.stopy-h.starty+2*sr.diagramMarginY;sr.mirrorActors&&(f=f-sr.boxMargin+sr.bottomMarginAdj);var p=h.stopx-h.startx+2*sr.diagramMarginX;o&&n.append("text").text(o).attr("x",(h.stopx-h.startx)/2-2*sr.diagramMarginX).attr("y",-25),W(n,f,p,sr.useMaxWidth);var g=o?40:0;n.attr("viewBox",h.startx-sr.diagramMarginX+" -"+(sr.diagramMarginY+g)+" "+p+" "+(f+g)),c.debug("models:",cr.models)}},_r=n(27),kr=n.n(_r);function wr(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e=6&&n.indexOf("weekends")>=0||(n.indexOf(t.format("dddd").toLowerCase())>=0||n.indexOf(t.format(e.trim()))>=0)},Rr=function(t,e,n){if(n.length&&!t.manualEndTime){var r=o()(t.startTime,e,!0);r.add(1,"d");var i=o()(t.endTime,e,!0),a=Yr(r,i,e,n);t.endTime=i.toDate(),t.renderEndTime=a}},Yr=function(t,e,n,r){for(var i=!1,a=null;t<=e;)i||(a=e.toDate()),(i=jr(t,n,r))&&e.add(1,"d"),t.add(1,"d");return a},zr=function(t,e,n){n=n.trim();var r=/^after\s+([\d\w- ]+)/.exec(n.trim());if(null!==r){var i=null;if(r[1].split(" ").forEach((function(t){var e=qr(t);void 0!==e&&(i?e.endTime>i.endTime&&(i=e):i=e)})),i)return i.endTime;var a=new Date;return a.setHours(0,0,0,0),a}var s=o()(n,e.trim(),!0);return s.isValid()?s.toDate():(c.debug("Invalid date:"+n),c.debug("With date format:"+e.trim()),new Date)},Ur=function(t,e){if(null!==t)switch(t[2]){case"s":e.add(t[1],"seconds");break;case"m":e.add(t[1],"minutes");break;case"h":e.add(t[1],"hours");break;case"d":e.add(t[1],"days");break;case"w":e.add(t[1],"weeks")}return e.toDate()},$r=function(t,e,n,r){r=r||!1,n=n.trim();var i=o()(n,e.trim(),!0);return i.isValid()?(r&&i.add(1,"d"),i.toDate()):Ur(/^([\d]+)([wdhms])/.exec(n.trim()),o()(t))},Wr=0,Vr=function(t){return void 0===t?"task"+(Wr+=1):t},Hr=[],Gr={},qr=function(t){var e=Gr[t];return Hr[e]},Xr=function(){for(var t=function(t){var e=Hr[t],n="";switch(Hr[t].raw.startTime.type){case"prevTaskEnd":var r=qr(e.prevTaskId);e.startTime=r.endTime;break;case"getStartDate":(n=zr(0,Cr,Hr[t].raw.startTime.startData))&&(Hr[t].startTime=n)}return Hr[t].startTime&&(Hr[t].endTime=$r(Hr[t].startTime,Cr,Hr[t].raw.endTime.data,Fr),Hr[t].endTime&&(Hr[t].processed=!0,Hr[t].manualEndTime=o()(Hr[t].raw.endTime.data,"YYYY-MM-DD",!0).isValid(),Rr(Hr[t],Cr,Mr))),Hr[t].processed},e=!0,n=0;nr?i=1:n0&&(e=t.classes.join(" "));for(var n=0,r=0;rn-e?n+a+1.5*ei.leftPadding>u?e+r-5:n+r+5:(n-e)/2+e+r})).attr("y",(function(t,r){return t.order*e+ei.barHeight/2+(ei.fontSize/2-2)+n})).attr("text-height",i).attr("class",(function(t){var e=o(t.startTime),n=o(t.endTime);t.milestone&&(n=e+i);var r=this.getBBox().width,a="";t.classes.length>0&&(a=t.classes.join(" "));for(var c=0,l=0;ln-e?n+r+1.5*ei.leftPadding>u?a+" taskTextOutsideLeft taskTextOutside"+c+" "+h:a+" taskTextOutsideRight taskTextOutside"+c+" "+h+" width-"+r:a+" taskText taskText"+c+" "+h+" width-"+r}))}(t,i,c,h,r,0,e),function(t,e){for(var n=[],r=0,i=0;i0&&a.setAttribute("dy","1em"),a.textContent=e[i],r.appendChild(a)}return r})).attr("x",10).attr("y",(function(i,a){if(!(a>0))return i[1]*t/2+e;for(var o=0;o "+t.w+": "+JSON.stringify(i.edge(t))),vn(r,i.edge(t),i.edge(t).relation,si))}));var h=r.node().getBBox(),f=h.width+40,p=h.height+40;W(r,p,f,si.useMaxWidth);var g="".concat(h.x-20," ").concat(h.y-20," ").concat(f," ").concat(p);c.debug("viewBox ".concat(g)),r.attr("viewBox",g)};ii.parser.yy=sn;var hi={dividerMargin:10,padding:5,textHeight:10},fi=function(t){Object.keys(t).forEach((function(e){hi[e]=t[e]}))},di=function(t,e){c.info("Drawing class"),sn.clear(),ii.parser.parse(t);var n=_t().flowchart;c.info("config:",n);var r=n.nodeSpacing||50,i=n.rankSpacing||50,a=new G.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TD",nodesep:r,ranksep:i,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}})),o=sn.getClasses(),s=sn.getRelations();c.info(s),function(t,e){var n=Object.keys(t);c.info("keys:",n),c.info(t),n.forEach((function(n){var r=t[n],i="";r.cssClasses.length>0&&(i=i+" "+r.cssClasses.join(" "));var a={labelStyle:""},o=void 0!==r.text?r.text:r.id,s="";switch(r.type){case"class":s="class_box";break;default:s="class_box"}e.setNode(r.id,{labelStyle:a.labelStyle,shape:s,labelText:o,classData:r,rx:0,ry:0,class:i,style:a.style,id:r.id,domId:r.domId,haveCallback:r.haveCallback,link:r.link,width:"group"===r.type?500:void 0,type:r.type,padding:_t().flowchart.padding}),c.info("setNode",{labelStyle:a.labelStyle,shape:s,labelText:o,rx:0,ry:0,class:i,style:a.style,id:r.id,width:"group"===r.type?500:void 0,type:r.type,padding:_t().flowchart.padding})}))}(o,a),function(t,e){var n=0;t.forEach((function(r){n++;var i={classes:"relation"};i.pattern=1==r.relation.lineType?"dashed":"solid",i.id="id"+n,"arrow_open"===r.type?i.arrowhead="none":i.arrowhead="normal",c.info(i,r),i.startLabelRight="none"===r.relationTitle1?"":r.relationTitle1,i.endLabelLeft="none"===r.relationTitle2?"":r.relationTitle2,i.arrowTypeStart=pi(r.relation.type1),i.arrowTypeEnd=pi(r.relation.type2);var a="",o="";if(void 0!==r.style){var s=B(r.style);a=s.style,o=s.labelStyle}else a="fill:none";i.style=a,i.labelStyle=o,void 0!==r.interpolate?i.curve=D(r.interpolate,d.curveLinear):void 0!==t.defaultInterpolate?i.curve=D(t.defaultInterpolate,d.curveLinear):i.curve=D(hi.curve,d.curveLinear),r.text=r.title,void 0===r.text?void 0!==r.style&&(i.arrowheadStyle="fill: #333"):(i.arrowheadStyle="fill: #333",i.labelpos="c",_t().flowchart.htmlLabels,i.labelType="text",i.label=r.text.replace(x.lineBreakRegex,"\n"),void 0===r.style&&(i.style=i.style||"stroke: #333; stroke-width: 1.5px;fill:none"),i.labelStyle=i.labelStyle.replace("color:","fill:")),e.setEdge(r.id1,r.id2,i,n)}))}(s,a);var u=Object(d.select)('[id="'.concat(e,'"]'));u.attr("xmlns:xlink","http://www.w3.org/1999/xlink");var l=Object(d.select)("#"+e+" g");Mn(l,a,["aggregation","extension","composition","dependency"],"classDiagram",e);var h=u.node().getBBox(),f=h.width+16,p=h.height+16;if(c.debug("new ViewBox 0 0 ".concat(f," ").concat(p),"translate(".concat(8-a._label.marginx,", ").concat(8-a._label.marginy,")")),W(u,p,f,n.useMaxWidth),u.attr("viewBox","0 0 ".concat(f," ").concat(p)),u.select("g").attr("transform","translate(".concat(8-a._label.marginx,", ").concat(8-h.y,")")),!n.htmlLabels)for(var g=document.querySelectorAll('[id="'+e+'"] .edgeLabel .label'),y=0;y0&&o.length>0){var c={stmt:"state",id:P(),type:"divider",doc:vi(o)};i.push(vi(c)),n.doc=i}n.doc.forEach((function(e){return t(n,e,!0)}))}}({id:"root"},{id:"root",doc:mi},!0),{id:"root",doc:mi}},extract:function(t){var e;e=t.doc?t.doc:t,c.info(e),wi(),c.info("Extract",e),e.forEach((function(t){"state"===t.stmt&&ki(t.id,t.type,t.doc,t.description,t.note),"relation"===t.stmt&&Ei(t.state1.id,t.state2.id,t.description)}))},trimColon:function(t){return t&&":"===t[0]?t.substr(1).trim():t.trim()}},Mi=n(22),Oi=n.n(Mi),Di={},Ni=function(t,e){Di[t]=e},Bi=function(t,e){var n=t.append("text").attr("x",2*_t().state.padding).attr("y",_t().state.textHeight+1.3*_t().state.padding).attr("font-size",_t().state.fontSize).attr("class","state-title").text(e.descriptions[0]).node().getBBox(),r=n.height,i=t.append("text").attr("x",_t().state.padding).attr("y",r+.4*_t().state.padding+_t().state.dividerMargin+_t().state.textHeight).attr("class","state-description"),a=!0,o=!0;e.descriptions.forEach((function(t){a||(!function(t,e,n){var r=t.append("tspan").attr("x",2*_t().state.padding).text(e);n||r.attr("dy",_t().state.textHeight)}(i,t,o),o=!1),a=!1}));var s=t.append("line").attr("x1",_t().state.padding).attr("y1",_t().state.padding+r+_t().state.dividerMargin/2).attr("y2",_t().state.padding+r+_t().state.dividerMargin/2).attr("class","descr-divider"),c=i.node().getBBox(),u=Math.max(c.width,n.width);return s.attr("x2",u+3*_t().state.padding),t.insert("rect",":first-child").attr("x",_t().state.padding).attr("y",_t().state.padding).attr("width",u+2*_t().state.padding).attr("height",c.height+r+2*_t().state.padding).attr("rx",_t().state.radius),t},Li=function(t,e,n){var r,i=_t().state.padding,a=2*_t().state.padding,o=t.node().getBBox(),s=o.width,c=o.x,u=t.append("text").attr("x",0).attr("y",_t().state.titleShift).attr("font-size",_t().state.fontSize).attr("class","state-title").text(e.id),l=u.node().getBBox().width+a,h=Math.max(l,s);h===s&&(h+=a);var f=t.node().getBBox();e.doc,r=c-i,l>s&&(r=(s-h)/2+i),Math.abs(c-f.x)s&&(r=c-(l-s)/2);var d=1-_t().state.textHeight;return t.insert("rect",":first-child").attr("x",r).attr("y",d).attr("class",n?"alt-composit":"composit").attr("width",h).attr("height",f.height+_t().state.textHeight+_t().state.titleShift+1).attr("rx","0"),u.attr("x",r+i),l<=s&&u.attr("x",c+(h-a)/2-l/2+i),t.insert("rect",":first-child").attr("x",r).attr("y",_t().state.titleShift-_t().state.textHeight-_t().state.padding).attr("width",h).attr("height",3*_t().state.textHeight).attr("rx",_t().state.radius),t.insert("rect",":first-child").attr("x",r).attr("y",_t().state.titleShift-_t().state.textHeight-_t().state.padding).attr("width",h).attr("height",f.height+3+2*_t().state.textHeight).attr("rx",_t().state.radius),t},Pi=function(t,e){e.attr("class","state-note");var n=e.append("rect").attr("x",0).attr("y",_t().state.padding),r=function(t,e,n,r){var i=0,a=r.append("text");a.style("text-anchor","start"),a.attr("class","noteText");var o=t.replace(/\r\n/g,"
"),s=(o=o.replace(/\n/g,"
")).split(x.lineBreakRegex),c=1.25*_t().state.noteMargin,u=!0,l=!1,h=void 0;try{for(var f,d=s[Symbol.iterator]();!(u=(f=d.next()).done);u=!0){var p=f.value.trim();if(p.length>0){var g=a.append("tspan");if(g.text(p),0===c)c+=g.node().getBBox().height;i+=c,g.attr("x",e+_t().state.noteMargin),g.attr("y",n+i+1.25*_t().state.noteMargin)}}}catch(t){l=!0,h=t}finally{try{u||null==d.return||d.return()}finally{if(l)throw h}}return{textWidth:a.node().getBBox().width,textHeight:i}}(t,0,0,e.append("g")),i=r.textWidth,a=r.textHeight;return n.attr("height",a+2*_t().state.noteMargin),n.attr("width",i+2*_t().state.noteMargin),n},Fi=function(t,e){var n=e.id,r={id:n,label:e.id,width:0,height:0},i=t.append("g").attr("id",n).attr("class","stateGroup");"start"===e.type&&function(t){t.append("circle").attr("class","start-state").attr("r",_t().state.sizeUnit).attr("cx",_t().state.padding+_t().state.sizeUnit).attr("cy",_t().state.padding+_t().state.sizeUnit)}(i),"end"===e.type&&function(t){t.append("circle").attr("class","end-state-outer").attr("r",_t().state.sizeUnit+_t().state.miniPadding).attr("cx",_t().state.padding+_t().state.sizeUnit+_t().state.miniPadding).attr("cy",_t().state.padding+_t().state.sizeUnit+_t().state.miniPadding),t.append("circle").attr("class","end-state-inner").attr("r",_t().state.sizeUnit).attr("cx",_t().state.padding+_t().state.sizeUnit+2).attr("cy",_t().state.padding+_t().state.sizeUnit+2)}(i),"fork"!==e.type&&"join"!==e.type||function(t,e){var n=_t().state.forkWidth,r=_t().state.forkHeight;if(e.parentId){var i=n;n=r,r=i}t.append("rect").style("stroke","black").style("fill","black").attr("width",n).attr("height",r).attr("x",_t().state.padding).attr("y",_t().state.padding)}(i,e),"note"===e.type&&Pi(e.note.text,i),"divider"===e.type&&function(t){t.append("line").style("stroke","grey").style("stroke-dasharray","3").attr("x1",_t().state.textHeight).attr("class","divider").attr("x2",2*_t().state.textHeight).attr("y1",0).attr("y2",0)}(i),"default"===e.type&&0===e.descriptions.length&&function(t,e){var n=t.append("text").attr("x",2*_t().state.padding).attr("y",_t().state.textHeight+2*_t().state.padding).attr("font-size",_t().state.fontSize).attr("class","state-title").text(e.id),r=n.node().getBBox();t.insert("rect",":first-child").attr("x",_t().state.padding).attr("y",_t().state.padding).attr("width",r.width+2*_t().state.padding).attr("height",r.height+2*_t().state.padding).attr("rx",_t().state.radius)}(i,e),"default"===e.type&&e.descriptions.length>0&&Bi(i,e);var a=i.node().getBBox();return r.width=a.width+2*_t().state.padding,r.height=a.height+2*_t().state.padding,Ni(n,r),r},Ii=0;Mi.parser.yy=Ai;var ji={},Ri=function t(e,n,r,i){var a,o=new G.a.Graph({compound:!0,multigraph:!0}),s=!0;for(a=0;a "+t.w+": "+JSON.stringify(o.edge(t))),function(t,e,n){e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var r=e.points,i=Object(d.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(d.curveBasis),a=t.append("path").attr("d",i(r)).attr("id","edge"+Ii).attr("class","transition"),o="";if(_t().state.arrowMarkerAbsolute&&(o=(o=(o=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),a.attr("marker-end","url("+o+"#"+function(t){switch(t){case Ai.relationType.AGGREGATION:return"aggregation";case Ai.relationType.EXTENSION:return"extension";case Ai.relationType.COMPOSITION:return"composition";case Ai.relationType.DEPENDENCY:return"dependency"}}(Ai.relationType.DEPENDENCY)+"End)"),void 0!==n.title){for(var s=t.append("g").attr("class","stateLabel"),u=V.calcLabelPosition(e.points),l=u.x,h=u.y,f=x.getRows(n.title),p=0,g=[],y=0,v=0,m=0;m<=f.length;m++){var b=s.append("text").attr("text-anchor","middle").text(f[m]).attr("x",l).attr("y",h+p),_=b.node().getBBox();if(y=Math.max(y,_.width),v=Math.min(v,_.x),c.info(_.x,l,h+p),0===p){var k=b.node().getBBox();p=k.height,c.info("Title height",p,h)}g.push(b)}var w=p*f.length;if(f.length>1){var E=(f.length-1)*p*.5;g.forEach((function(t,e){return t.attr("y",h+e*p-E)})),w=p*f.length}var T=s.node().getBBox();s.insert("rect",":first-child").attr("class","box").attr("x",l-y/2-_t().state.padding/2).attr("y",h-w/2-_t().state.padding/2-3.5).attr("width",y+_t().state.padding).attr("height",w+_t().state.padding),c.info(T)}Ii++}(n,o.edge(t),o.edge(t).relation))})),w=k.getBBox();var E={id:r||"root",label:r||"root",width:0,height:0};return E.width=w.width+2*yi.padding,E.height=w.height+2*yi.padding,c.debug("Doc rendered",E,o),E},Yi=function(){},zi=function(t,e){yi=_t().state,Mi.parser.yy.clear(),Mi.parser.parse(t),c.debug("Rendering diagram "+t);var n=Object(d.select)("[id='".concat(e,"']"));n.append("defs").append("marker").attr("id","dependencyEnd").attr("refX",19).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 19,7 L9,13 L14,7 L9,1 Z"),new G.a.Graph({multigraph:!0,compound:!0,rankdir:"RL"}).setDefaultEdgeLabel((function(){return{}}));var r=Ai.getRootDoc();Ri(r,n,void 0,!1);var i=yi.padding,a=n.node().getBBox(),o=a.width+2*i,s=a.height+2*i;W(n,s,1.75*o,yi.useMaxWidth),n.attr("viewBox","".concat(a.x-yi.padding," ").concat(a.y-yi.padding," ")+o+" "+s)},Ui={},$i={},Wi=function(t,e,n,r){if("root"!==n.id){var i="rect";!0===n.start&&(i="start"),!1===n.start&&(i="end"),"default"!==n.type&&(i=n.type),$i[n.id]||($i[n.id]={id:n.id,shape:i,description:n.id,classes:"statediagram-state"}),n.description&&(Array.isArray($i[n.id].description)?($i[n.id].shape="rectWithTitle",$i[n.id].description.push(n.description)):$i[n.id].description.length>0?($i[n.id].shape="rectWithTitle",$i[n.id].description===n.id?$i[n.id].description=[n.description]:$i[n.id].description=[$i[n.id].description,n.description]):($i[n.id].shape="rect",$i[n.id].description=n.description)),!$i[n.id].type&&n.doc&&(c.info("Setting cluser for ",n.id),$i[n.id].type="group",$i[n.id].shape="divider"===n.type?"divider":"roundedWithTitle",$i[n.id].classes=$i[n.id].classes+" "+(r?"statediagram-cluster statediagram-cluster-alt":"statediagram-cluster"));var a={labelStyle:"",shape:$i[n.id].shape,labelText:$i[n.id].description,classes:$i[n.id].classes,style:"",id:n.id,domId:"state-"+n.id+"-"+Vi,type:$i[n.id].type,padding:15};if(n.note){var o={labelStyle:"",shape:"note",labelText:n.note.text,classes:"statediagram-note",style:"",id:n.id+"----note",domId:"state-"+n.id+"----note-"+Vi,type:$i[n.id].type,padding:15},s={labelStyle:"",shape:"noteGroup",labelText:n.note.text,classes:$i[n.id].classes,style:"",id:n.id+"----parent",domId:"state-"+n.id+"----parent-"+Vi,type:"group",padding:0};Vi++,t.setNode(n.id+"----parent",s),t.setNode(o.id,o),t.setNode(n.id,a),t.setParent(n.id,n.id+"----parent"),t.setParent(o.id,n.id+"----parent");var u=n.id,l=o.id;"left of"===n.note.position&&(u=o.id,l=n.id),t.setEdge(u,l,{arrowhead:"none",arrowType:"",style:"fill:none",labelStyle:"",classes:"transition note-edge",arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal"})}else t.setNode(n.id,a)}e&&"root"!==e.id&&(c.info("Setting node ",n.id," to be child of its parent ",e.id),t.setParent(n.id,e.id)),n.doc&&(c.info("Adding nodes children "),Hi(t,n,n.doc,!r))},Vi=0,Hi=function(t,e,n,r){Vi=0,c.trace("items",n),n.forEach((function(n){if("state"===n.stmt||"default"===n.stmt)Wi(t,e,n,r);else if("relation"===n.stmt){Wi(t,e,n.state1,r),Wi(t,e,n.state2,r);var i={id:"edge"+Vi,arrowhead:"normal",arrowTypeEnd:"arrow_barb",style:"fill:none",labelStyle:"",label:n.description,arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal",classes:"transition"},a=n.state1.id,o=n.state2.id;t.setEdge(a,o,i,Vi),Vi++}}))},Gi=function(t){for(var e=Object.keys(t),n=0;ne.seq?t:e}),t[0]),n="";t.forEach((function(t){n+=t===e?"\t*":"\t|"}));var r,i,a,o=[n,e.id,e.seq];for(var s in Ji)Ji[s]===e.id&&o.push(s);if(c.debug(o.join(" ")),Array.isArray(e.parent)){var u=Xi[e.parent[0]];ia(t,e,u),t.push(Xi[e.parent[1]])}else{if(null==e.parent)return;var l=Xi[e.parent];ia(t,e,l)}r=t,i=function(t){return t.id},a=Object.create(null),aa(t=r.reduce((function(t,e){var n=i(e);return a[n]||(a[n]=!0,t.push(e)),t}),[]))}var oa,sa=function(){var t=Object.keys(Xi).map((function(t){return Xi[t]}));return t.forEach((function(t){c.debug(t.id)})),t.sort((function(t,e){return e.seq-t.seq})),t},ca={setDirection:function(t){Ki=t},setOptions:function(t){c.debug("options str",t),t=(t=t&&t.trim())||"{}";try{ra=JSON.parse(t)}catch(t){c.error("error while parsing gitGraph options",t.message)}},getOptions:function(){return ra},commit:function(t){var e={id:ea(),message:t,seq:ta++,parent:null==Zi?null:Zi.id};Zi=e,Xi[e.id]=e,Ji[Qi]=e.id,c.debug("in pushCommit "+e.id)},branch:function(t){Ji[t]=null!=Zi?Zi.id:null,c.debug("in createBranch")},merge:function(t){var e=Xi[Ji[Qi]],n=Xi[Ji[t]];if(function(t,e){return t.seq>e.seq&&na(e,t)}(e,n))c.debug("Already merged");else{if(na(e,n))Ji[Qi]=Ji[t],Zi=Xi[Ji[Qi]];else{var r={id:ea(),message:"merged branch "+t+" into "+Qi,seq:ta++,parent:[null==Zi?null:Zi.id,Ji[t]]};Zi=r,Xi[r.id]=r,Ji[Qi]=r.id}c.debug(Ji),c.debug("in mergeBranch")}},checkout:function(t){c.debug("in checkout");var e=Ji[Qi=t];Zi=Xi[e]},reset:function(t){c.debug("in reset",t);var e=t.split(":")[0],n=parseInt(t.split(":")[1]),r="HEAD"===e?Zi:Xi[Ji[e]];for(c.debug(r,n);n>0;)if(n--,!(r=Xi[r.parent])){var i="Critical error - unique parent commit not found during reset";throw c.error(i),i}Zi=r,Ji[Qi]=r.id},prettyPrint:function(){c.debug(Xi),aa([sa()[0]])},clear:function(){Xi={},Ji={master:Zi=null},Qi="master",ta=0},getBranchesAsObjArray:function(){var t=[];for(var e in Ji)t.push({name:e,commit:Xi[Ji[e]]});return t},getBranches:function(){return Ji},getCommits:function(){return Xi},getCommitsArray:sa,getCurrentBranch:function(){return Qi},getDirection:function(){return Ki},getHead:function(){return Zi}},ua=n(71),la=n.n(ua),ha={},fa={nodeSpacing:150,nodeFillColor:"yellow",nodeStrokeWidth:2,nodeStrokeColor:"grey",lineStrokeWidth:4,branchOffset:50,lineColor:"grey",leftMargin:50,branchColors:["#442f74","#983351","#609732","#AA9A39"],nodeRadius:10,nodeLabel:{width:75,height:100,x:-25,y:0}},da={};function pa(t,e,n,r){var i=D(r,d.curveBasis),a=fa.branchColors[n%fa.branchColors.length],o=Object(d.line)().x((function(t){return Math.round(t.x)})).y((function(t){return Math.round(t.y)})).curve(i);t.append("svg:path").attr("d",o(e)).style("stroke",a).style("stroke-width",fa.lineStrokeWidth).style("fill","none")}function ga(t,e){e=e||t.node().getBBox();var n=t.node().getCTM();return{left:n.e+e.x*n.a,top:n.f+e.y*n.d,width:e.width,height:e.height}}function ya(t,e,n,r,i){c.debug("svgDrawLineForCommits: ",e,n);var a=ga(t.select("#node-"+e+" circle")),o=ga(t.select("#node-"+n+" circle"));switch(r){case"LR":if(a.left-o.left>fa.nodeSpacing){var s={x:a.left-fa.nodeSpacing,y:o.top+o.height/2};pa(t,[s,{x:o.left+o.width,y:o.top+o.height/2}],i,"linear"),pa(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-fa.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-fa.nodeSpacing/2,y:s.y},s],i)}else pa(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-fa.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-fa.nodeSpacing/2,y:o.top+o.height/2},{x:o.left+o.width,y:o.top+o.height/2}],i);break;case"BT":if(o.top-a.top>fa.nodeSpacing){var u={x:o.left+o.width/2,y:a.top+a.height+fa.nodeSpacing};pa(t,[u,{x:o.left+o.width/2,y:o.top}],i,"linear"),pa(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+a.height+fa.nodeSpacing/2},{x:o.left+o.width/2,y:u.y-fa.nodeSpacing/2},u],i)}else pa(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+fa.nodeSpacing/2},{x:o.left+o.width/2,y:o.top-fa.nodeSpacing/2},{x:o.left+o.width/2,y:o.top}],i)}}function va(t,e){return t.select(e).node().cloneNode(!0)}function ma(t,e,n,r){var i,a=Object.keys(ha).length;if("string"==typeof e)do{if(i=ha[e],c.debug("in renderCommitHistory",i.id,i.seq),t.select("#node-"+e).size()>0)return;t.append((function(){return va(t,"#def-commit")})).attr("class","commit").attr("id",(function(){return"node-"+i.id})).attr("transform",(function(){switch(r){case"LR":return"translate("+(i.seq*fa.nodeSpacing+fa.leftMargin)+", "+oa*fa.branchOffset+")";case"BT":return"translate("+(oa*fa.branchOffset+fa.leftMargin)+", "+(a-i.seq)*fa.nodeSpacing+")"}})).attr("fill",fa.nodeFillColor).attr("stroke",fa.nodeStrokeColor).attr("stroke-width",fa.nodeStrokeWidth);var o=void 0;for(var s in n)if(n[s].commit===i){o=n[s];break}o&&(c.debug("found branch ",o.name),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","branch-label").text(o.name+", ")),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-id").text(i.id),""!==i.message&&"BT"===r&&t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-msg").text(", "+i.message),e=i.parent}while(e&&ha[e]);Array.isArray(e)&&(c.debug("found merge commmit",e),ma(t,e[0],n,r),oa++,ma(t,e[1],n,r),oa--)}function ba(t,e,n,r){for(r=r||0;e.seq>0&&!e.lineDrawn;)"string"==typeof e.parent?(ya(t,e.id,e.parent,n,r),e.lineDrawn=!0,e=ha[e.parent]):Array.isArray(e.parent)&&(ya(t,e.id,e.parent[0],n,r),ya(t,e.id,e.parent[1],n,r+1),ba(t,ha[e.parent[1]],n,r+1),e.lineDrawn=!0,e=ha[e.parent[0]])}var xa,_a=function(t){da=t},ka=function(t,e,n){try{var r=la.a.parser;r.yy=ca,r.yy.clear(),c.debug("in gitgraph renderer",t+"\n","id:",e,n),r.parse(t+"\n"),fa=Object.assign(fa,da,ca.getOptions()),c.debug("effective options",fa);var i=ca.getDirection();ha=ca.getCommits();var a=ca.getBranchesAsObjArray();"BT"===i&&(fa.nodeLabel.x=a.length*fa.branchOffset,fa.nodeLabel.width="100%",fa.nodeLabel.y=-2*fa.nodeRadius);var o=Object(d.select)('[id="'.concat(e,'"]'));for(var s in function(t){t.append("defs").append("g").attr("id","def-commit").append("circle").attr("r",fa.nodeRadius).attr("cx",0).attr("cy",0),t.select("#def-commit").append("foreignObject").attr("width",fa.nodeLabel.width).attr("height",fa.nodeLabel.height).attr("x",fa.nodeLabel.x).attr("y",fa.nodeLabel.y).attr("class","node-label").attr("requiredFeatures","http://www.w3.org/TR/SVG11/feature#Extensibility").append("p").html("")}(o),oa=1,a){var u=a[s];ma(o,u.commit.id,a,i),ba(o,u.commit,i),oa++}o.attr("height",(function(){return"BT"===i?Object.keys(ha).length*fa.nodeSpacing:(a.length+1)*fa.branchOffset}))}catch(t){c.error("Error while rendering gitgraph"),c.error(t.message)}},wa="",Ea=!1,Ta={setMessage:function(t){c.debug("Setting message to: "+t),wa=t},getMessage:function(){return wa},setInfo:function(t){Ea=t},getInfo:function(){return Ea}},Ca=n(72),Sa=n.n(Ca),Aa={},Ma=function(t){Object.keys(t).forEach((function(e){Aa[e]=t[e]}))},Oa=function(t,e,n){try{var r=Sa.a.parser;r.yy=Ta,c.debug("Renering info diagram\n"+t),r.parse(t),c.debug("Parsed info diagram");var i=Object(d.select)("#"+e);i.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size","32px").style("text-anchor","middle").text("v "+n),i.attr("height",100),i.attr("width",400)}catch(t){c.error("Error while rendering info diagram"),c.error(t.message)}},Da={},Na=function(t){Object.keys(t).forEach((function(e){Da[e]=t[e]}))},Ba=function(t,e){try{c.debug("Renering svg for syntax error\n");var n=Object(d.select)("#"+t),r=n.append("g");r.append("path").attr("class","error-icon").attr("d","m411.313,123.313c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32-9.375,9.375-20.688-20.688c-12.484-12.5-32.766-12.5-45.25,0l-16,16c-1.261,1.261-2.304,2.648-3.31,4.051-21.739-8.561-45.324-13.426-70.065-13.426-105.867,0-192,86.133-192,192s86.133,192 192,192 192-86.133 192-192c0-24.741-4.864-48.327-13.426-70.065 1.402-1.007 2.79-2.049 4.051-3.31l16-16c12.5-12.492 12.5-32.758 0-45.25l-20.688-20.688 9.375-9.375 32.001-31.999zm-219.313,100.687c-52.938,0-96,43.063-96,96 0,8.836-7.164,16-16,16s-16-7.164-16-16c0-70.578 57.422-128 128-128 8.836,0 16,7.164 16,16s-7.164,16-16,16z"),r.append("path").attr("class","error-icon").attr("d","m459.02,148.98c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l16,16c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16.001-16z"),r.append("path").attr("class","error-icon").attr("d","m340.395,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16-16c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l15.999,16z"),r.append("path").attr("class","error-icon").attr("d","m400,64c8.844,0 16-7.164 16-16v-32c0-8.836-7.156-16-16-16-8.844,0-16,7.164-16,16v32c0,8.836 7.156,16 16,16z"),r.append("path").attr("class","error-icon").attr("d","m496,96.586h-32c-8.844,0-16,7.164-16,16 0,8.836 7.156,16 16,16h32c8.844,0 16-7.164 16-16 0-8.836-7.156-16-16-16z"),r.append("path").attr("class","error-icon").attr("d","m436.98,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688l32-32c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32c-6.251,6.25-6.251,16.375-0.001,22.625z"),r.append("text").attr("class","error-text").attr("x",1240).attr("y",250).attr("font-size","150px").style("text-anchor","middle").text("Syntax error in graph"),r.append("text").attr("class","error-text").attr("x",1050).attr("y",400).attr("font-size","100px").style("text-anchor","middle").text("mermaid version "+e),n.attr("height",100),n.attr("width",400),n.attr("viewBox","768 0 512 512")}catch(t){c.error("Error while rendering info diagram"),c.error(t.message)}},La={},Pa="",Fa={parseDirective:function(t,e,n){Ho.parseDirective(this,t,e,n)},getConfig:function(){return _t().pie},addSection:function(t,e){void 0===La[t]&&(La[t]=e,c.debug("Added new section :",t))},getSections:function(){return La},cleanupValue:function(t){return":"===t.substring(0,1)?(t=t.substring(1).trim(),Number(t.trim())):Number(t.trim())},clear:function(){La={},Pa=""},setTitle:function(t){Pa=t},getTitle:function(){return Pa}},Ia=n(73),ja=n.n(Ia),Ra={},Ya=function(t){Object.keys(t).forEach((function(e){Ra[e]=t[e]}))},za=function(t,e){try{var n=ja.a.parser;n.yy=Fa,c.debug("Rendering info diagram\n"+t),n.yy.clear(),n.parse(t),c.debug("Parsed info diagram");var r=document.getElementById(e);void 0===(xa=r.parentElement.offsetWidth)&&(xa=1200),void 0!==Ra.useWidth&&(xa=Ra.useWidth);var i=Object(d.select)("#"+e);W(i,450,xa,Ra.useMaxWidth),r.setAttribute("viewBox","0 0 "+xa+" 450");var a=Math.min(xa,450)/2-40,o=i.append("g").attr("transform","translate("+xa/2+",225)"),s=Fa.getSections(),u=0;Object.keys(s).forEach((function(t){u+=s[t]}));var l=Object(d.scaleOrdinal)().domain(s).range(d.schemeSet2),h=Object(d.pie)().value((function(t){return t.value}))(Object(d.entries)(s)),f=Object(d.arc)().innerRadius(0).outerRadius(a);o.selectAll("mySlices").data(h).enter().append("path").attr("d",f).attr("fill",(function(t){return l(t.data.key)})).attr("stroke","black").style("stroke-width","2px").style("opacity",.7),o.selectAll("mySlices").data(h).enter().append("text").text((function(t){return(t.data.value/u*100).toFixed(0)+"%"})).attr("transform",(function(t){return"translate("+f.centroid(t)+")"})).style("text-anchor","middle").attr("class","slice").style("font-size",17),o.append("text").text(n.yy.getTitle()).attr("x",0).attr("y",-200).attr("class","pieTitleText");var p=o.selectAll(".legend").data(l.domain()).enter().append("g").attr("class","legend").attr("transform",(function(t,e){return"translate(216,"+(22*e-22*l.domain().length/2)+")"}));p.append("rect").attr("width",18).attr("height",18).style("fill",l).style("stroke",l),p.append("text").attr("x",22).attr("y",14).text((function(t){return t}))}catch(t){c.error("Error while rendering info diagram"),c.error(t)}},Ua={},$a=[],Wa="",Va=function(t){return void 0===Ua[t]&&(Ua[t]={attributes:[]},c.info("Added new entity :",t)),Ua[t]},Ha={Cardinality:{ZERO_OR_ONE:"ZERO_OR_ONE",ZERO_OR_MORE:"ZERO_OR_MORE",ONE_OR_MORE:"ONE_OR_MORE",ONLY_ONE:"ONLY_ONE"},Identification:{NON_IDENTIFYING:"NON_IDENTIFYING",IDENTIFYING:"IDENTIFYING"},parseDirective:function(t,e,n){Ho.parseDirective(this,t,e,n)},getConfig:function(){return _t().er},addEntity:Va,addAttributes:function(t,e){var n,r=Va(t);for(n=e.length-1;n>=0;n--)r.attributes.push(e[n]),c.debug("Added attribute ",e[n].attributeName)},getEntities:function(){return Ua},addRelationship:function(t,e,n,r){var i={entityA:t,roleA:e,entityB:n,relSpec:r};$a.push(i),c.debug("Added new relationship :",i)},getRelationships:function(){return $a},clear:function(){Ua={},$a=[],Wa=""},setTitle:function(t){Wa=t},getTitle:function(){return Wa}},Ga=n(74),qa=n.n(Ga),Xa={ONLY_ONE_START:"ONLY_ONE_START",ONLY_ONE_END:"ONLY_ONE_END",ZERO_OR_ONE_START:"ZERO_OR_ONE_START",ZERO_OR_ONE_END:"ZERO_OR_ONE_END",ONE_OR_MORE_START:"ONE_OR_MORE_START",ONE_OR_MORE_END:"ONE_OR_MORE_END",ZERO_OR_MORE_START:"ZERO_OR_MORE_START",ZERO_OR_MORE_END:"ZERO_OR_MORE_END"},Za=Xa,Ja=function(t,e){var n;t.append("defs").append("marker").attr("id",Xa.ONLY_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18 M15,0 L15,18"),t.append("defs").append("marker").attr("id",Xa.ONLY_ONE_END).attr("refX",18).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,0 L3,18 M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Xa.ZERO_OR_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",21).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Xa.ZERO_OR_ONE_END).attr("refX",30).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,0 L21,18"),t.append("defs").append("marker").attr("id",Xa.ONE_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q 18,0 36,18 Q 18,36 0,18 M42,9 L42,27"),t.append("defs").append("marker").attr("id",Xa.ONE_OR_MORE_END).attr("refX",27).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,9 L3,27 M9,18 Q27,0 45,18 Q27,36 9,18"),(n=t.append("defs").append("marker").attr("id",Xa.ZERO_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",48).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q18,0 36,18 Q18,36 0,18"),(n=t.append("defs").append("marker").attr("id",Xa.ZERO_OR_MORE_END).attr("refX",39).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,18 Q39,0 57,18 Q39,36 21,18")},Qa={},Ka=function(t,e,n){var r;return Object.keys(e).forEach((function(i){var a=t.append("g").attr("id",i);r=void 0===r?i:r;var o="entity-"+i,s=a.append("text").attr("class","er entityLabel").attr("id",o).attr("x",0).attr("y",0).attr("dominant-baseline","middle").attr("text-anchor","middle").attr("style","font-family: "+_t().fontFamily+"; font-size: "+Qa.fontSize+"px").text(i),c=function(t,e,n){var r=Qa.entityPadding/3,i=Qa.entityPadding/3,a=.85*Qa.fontSize,o=e.node().getBBox(),s=[],c=0,u=0,l=o.height+2*r,h=1;n.forEach((function(n){var i="".concat(e.node().id,"-attr-").concat(h),o=t.append("text").attr("class","er entityLabel").attr("id","".concat(i,"-type")).attr("x",0).attr("y",0).attr("dominant-baseline","middle").attr("text-anchor","left").attr("style","font-family: "+_t().fontFamily+"; font-size: "+a+"px").text(n.attributeType),f=t.append("text").attr("class","er entityLabel").attr("id","".concat(i,"-name")).attr("x",0).attr("y",0).attr("dominant-baseline","middle").attr("text-anchor","left").attr("style","font-family: "+_t().fontFamily+"; font-size: "+a+"px").text(n.attributeName);s.push({tn:o,nn:f});var d=o.node().getBBox(),p=f.node().getBBox();c=Math.max(c,d.width),u=Math.max(u,p.width),l+=Math.max(d.height,p.height)+2*r,h+=1}));var f={width:Math.max(Qa.minEntityWidth,Math.max(o.width+2*Qa.entityPadding,c+u+4*i)),height:n.length>0?l:Math.max(Qa.minEntityHeight,o.height+2*Qa.entityPadding)},d=Math.max(0,f.width-(c+u)-4*i);if(n.length>0){e.attr("transform","translate("+f.width/2+","+(r+o.height/2)+")");var p=o.height+2*r,g="attributeBoxOdd";s.forEach((function(e){var n=p+r+Math.max(e.tn.node().getBBox().height,e.nn.node().getBBox().height)/2;e.tn.attr("transform","translate("+i+","+n+")");var a=t.insert("rect","#"+e.tn.node().id).attr("class","er ".concat(g)).attr("fill",Qa.fill).attr("fill-opacity","100%").attr("stroke",Qa.stroke).attr("x",0).attr("y",p).attr("width",c+2*i+d/2).attr("height",e.tn.node().getBBox().height+2*r);e.nn.attr("transform","translate("+(parseFloat(a.attr("width"))+i)+","+n+")"),t.insert("rect","#"+e.nn.node().id).attr("class","er ".concat(g)).attr("fill",Qa.fill).attr("fill-opacity","100%").attr("stroke",Qa.stroke).attr("x","".concat(a.attr("x")+a.attr("width"))).attr("y",p).attr("width",u+2*i+d/2).attr("height",e.nn.node().getBBox().height+2*r),p+=Math.max(e.tn.node().getBBox().height,e.nn.node().getBBox().height)+2*r,g="attributeBoxOdd"==g?"attributeBoxEven":"attributeBoxOdd"}))}else f.height=Math.max(Qa.minEntityHeight,l),e.attr("transform","translate("+f.width/2+","+f.height/2+")");return f}(a,s,e[i].attributes),u=c.width,l=c.height,h=a.insert("rect","#"+o).attr("class","er entityBox").attr("fill",Qa.fill).attr("fill-opacity","100%").attr("stroke",Qa.stroke).attr("x",0).attr("y",0).attr("width",u).attr("height",l).node().getBBox();n.setNode(i,{width:h.width,height:h.height,shape:"rect",id:i})})),r},to=function(t){return(t.entityA+t.roleA+t.entityB).replace(/\s/g,"")},eo=0,no=function(t){for(var e=Object.keys(t),n=0;n/gi," "),r=t.append("text");r.attr("x",e.x),r.attr("y",e.y),r.attr("class","legend"),r.style("text-anchor",e.anchor),void 0!==e.class&&r.attr("class",e.class);var i=r.append("tspan");return i.attr("x",e.x+2*e.textMargin),i.text(n),r},mo=-1,bo=function(){return{x:0,y:0,width:100,anchor:"start",height:100,rx:0,ry:0}},xo=function(){function t(t,e,n,i,a,o,s,c){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("font-color",c).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c,u){for(var l=c.taskFontSize,h=c.taskFontFamily,f=t.split(//gi),d=0;d3?function(t){var e=Object(d.arc)().startAngle(Math.PI/2).endAngle(Math.PI/2*3).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+2)+")")}(s):o.score<3?function(t){var e=Object(d.arc)().startAngle(3*Math.PI/2).endAngle(Math.PI/2*5).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+7)+")")}(s):function(t){t.append("line").attr("class","mouth").attr("stroke",2).attr("x1",o.cx-5).attr("y1",o.cy+7).attr("x2",o.cx+5).attr("y2",o.cy+7).attr("class","mouth").attr("stroke-width","1px").attr("stroke","#666")}(s);var c=bo();c.x=e.x,c.y=e.y,c.fill=e.fill,c.width=n.width,c.height=n.height,c.class="task task-type-"+e.num,c.rx=3,c.ry=3,go(i,c);var u=e.x+14;e.people.forEach((function(t){var n=e.actors[t],r={cx:u,cy:e.y,r:7,fill:n,stroke:"#000",title:t};yo(i,r),u+=10})),xo(n)(e.task,i,c.x,c.y,c.width,c.height,{class:"task"},n,e.colour)},To=function(t){t.append("defs").append("marker").attr("id","arrowhead").attr("refX",5).attr("refY",2).attr("markerWidth",6).attr("markerHeight",4).attr("orient","auto").append("path").attr("d","M 0,0 V 4 L6,2 Z")};io.parser.yy=po;var Co={leftMargin:150,diagramMarginX:50,diagramMarginY:20,taskMargin:50,width:150,height:50,taskFontSize:14,taskFontFamily:'"Open-Sans", "sans-serif"',boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",bottomMarginAdj:1,activationWidth:10,textPlacement:"fo",actorColours:["#8FBC8F","#7CFC00","#00FFFF","#20B2AA","#B0E0E6","#FFFFE0"],sectionFills:["#191970","#8B008B","#4B0082","#2F4F4F","#800000","#8B4513","#00008B"],sectionColours:["#fff"]},So={};var Ao=Co.leftMargin,Mo={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],init:function(){this.sequenceItems=[],this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i,a=this,o=0;this.sequenceItems.forEach((function(s){o++;var c=a.sequenceItems.length-o+1;a.updateVal(s,"starty",e-c*Co.boxMargin,Math.min),a.updateVal(s,"stopy",r+c*Co.boxMargin,Math.max),a.updateVal(Mo.data,"startx",t-c*Co.boxMargin,Math.min),a.updateVal(Mo.data,"stopx",n+c*Co.boxMargin,Math.max),"activation"!==i&&(a.updateVal(s,"startx",t-c*Co.boxMargin,Math.min),a.updateVal(s,"stopx",n+c*Co.boxMargin,Math.max),a.updateVal(Mo.data,"starty",e-c*Co.boxMargin,Math.min),a.updateVal(Mo.data,"stopy",r+c*Co.boxMargin,Math.max))}))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(Mo.data,"startx",i,Math.min),this.updateVal(Mo.data,"starty",o,Math.min),this.updateVal(Mo.data,"stopx",a,Math.max),this.updateVal(Mo.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return this.data}},Oo=Co.sectionFills,Do=Co.sectionColours,No=function(t,e,n){for(var r="",i=n+(2*Co.height+Co.diagramMarginY),a=0,o="#CCC",s="black",c=0,u=0;u tspan {\n fill: ").concat(t.actorTextColor,";\n stroke: none;\n }\n\n .actor-line {\n stroke: ").concat(t.actorLineColor,";\n }\n\n .messageLine0 {\n stroke-width: 1.5;\n stroke-dasharray: none;\n stroke: ").concat(t.signalColor,";\n }\n\n .messageLine1 {\n stroke-width: 1.5;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.signalColor,";\n }\n\n #arrowhead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .sequenceNumber {\n fill: ").concat(t.sequenceNumberColor,";\n }\n\n #sequencenumber {\n fill: ").concat(t.signalColor,";\n }\n\n #crosshead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .messageText {\n fill: ").concat(t.signalTextColor,";\n stroke: ").concat(t.signalTextColor,";\n }\n\n .labelBox {\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBkgColor,";\n }\n\n .labelText, .labelText > tspan {\n fill: ").concat(t.labelTextColor,";\n stroke: none;\n }\n\n .loopText, .loopText > tspan {\n fill: ").concat(t.loopTextColor,";\n stroke: none;\n }\n\n .loopLine {\n stroke-width: 2px;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBorderColor,";\n }\n\n .note {\n //stroke: #decc93;\n stroke: ").concat(t.noteBorderColor,";\n fill: ").concat(t.noteBkgColor,";\n }\n\n .noteText, .noteText > tspan {\n fill: ").concat(t.noteTextColor,";\n stroke: none;\n }\n\n .activation0 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation1 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation2 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n")},gantt:function(t){return'\n .mermaid-main-font {\n font-family: "trebuchet ms", verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n }\n\n .section {\n stroke: none;\n opacity: 0.2;\n }\n\n .section0 {\n fill: '.concat(t.sectionBkgColor,";\n }\n\n .section2 {\n fill: ").concat(t.sectionBkgColor2,";\n }\n\n .section1,\n .section3 {\n fill: ").concat(t.altSectionBkgColor,";\n opacity: 0.2;\n }\n\n .sectionTitle0 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle1 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle2 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle3 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle {\n text-anchor: start;\n font-size: 11px;\n text-height: 14px;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n\n }\n\n\n /* Grid and axis */\n\n .grid .tick {\n stroke: ").concat(t.gridColor,";\n opacity: 0.8;\n shape-rendering: crispEdges;\n text {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n }\n }\n\n .grid path {\n stroke-width: 0;\n }\n\n\n /* Today line */\n\n .today {\n fill: none;\n stroke: ").concat(t.todayLineColor,";\n stroke-width: 2px;\n }\n\n\n /* Task styling */\n\n /* Default task */\n\n .task {\n stroke-width: 2;\n }\n\n .taskText {\n text-anchor: middle;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n }\n\n .taskText:not([font-size]) {\n font-size: 11px;\n }\n\n .taskTextOutsideRight {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: start;\n font-size: 11px;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n\n }\n\n .taskTextOutsideLeft {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: end;\n font-size: 11px;\n }\n\n /* Special case clickable */\n .task.clickable {\n cursor: pointer;\n }\n .taskText.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideLeft.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideRight.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n /* Specific task settings for the sections*/\n\n .taskText0,\n .taskText1,\n .taskText2,\n .taskText3 {\n fill: ").concat(t.taskTextColor,";\n }\n\n .task0,\n .task1,\n .task2,\n .task3 {\n fill: ").concat(t.taskBkgColor,";\n stroke: ").concat(t.taskBorderColor,";\n }\n\n .taskTextOutside0,\n .taskTextOutside2\n {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n .taskTextOutside1,\n .taskTextOutside3 {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n\n /* Active task */\n\n .active0,\n .active1,\n .active2,\n .active3 {\n fill: ").concat(t.activeTaskBkgColor,";\n stroke: ").concat(t.activeTaskBorderColor,";\n }\n\n .activeText0,\n .activeText1,\n .activeText2,\n .activeText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Completed task */\n\n .done0,\n .done1,\n .done2,\n .done3 {\n stroke: ").concat(t.doneTaskBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneText0,\n .doneText1,\n .doneText2,\n .doneText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Tasks on the critical line */\n\n .crit0,\n .crit1,\n .crit2,\n .crit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.critBkgColor,";\n stroke-width: 2;\n }\n\n .activeCrit0,\n .activeCrit1,\n .activeCrit2,\n .activeCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.activeTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneCrit0,\n .doneCrit1,\n .doneCrit2,\n .doneCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n cursor: pointer;\n shape-rendering: crispEdges;\n }\n\n .milestone {\n transform: rotate(45deg) scale(0.8,0.8);\n }\n\n .milestoneText {\n font-style: italic;\n }\n .doneCritText0,\n .doneCritText1,\n .doneCritText2,\n .doneCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .activeCritText0,\n .activeCritText1,\n .activeCritText2,\n .activeCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .titleText {\n text-anchor: middle;\n font-size: 18px;\n fill: ").concat(t.textColor," ;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n }\n")},classDiagram:Po,"classDiagram-v2":Po,class:Po,stateDiagram:Io,state:Io,git:function(){return"\n .commit-id,\n .commit-msg,\n .branch-label {\n fill: lightgrey;\n color: lightgrey;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n }\n"},info:function(){return""},pie:function(t){return".pieTitleText {\n text-anchor: middle;\n font-size: 25px;\n fill: ".concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n }\n .slice {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n // fill: white;\n }\n .legend text {\n fill: ").concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n font-size: 17px;\n }\n")},er:function(t){return"\n .entityBox {\n fill: ".concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n }\n\n .attributeBoxOdd {\n fill: #ffffff;\n stroke: ").concat(t.nodeBorder,";\n }\n\n .attributeBoxEven {\n fill: #f2f2f2;\n stroke: ").concat(t.nodeBorder,";\n }\n\n .relationshipLabelBox {\n fill: ").concat(t.tertiaryColor,";\n opacity: 0.7;\n background-color: ").concat(t.tertiaryColor,";\n rect {\n opacity: 0.5;\n }\n }\n\n .relationshipLine {\n stroke: ").concat(t.lineColor,";\n }\n")},journey:function(t){return".label {\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n color: ".concat(t.textColor,";\n }\n .mouth {\n stroke: #666;\n }\n\n line {\n stroke: ").concat(t.textColor,"\n }\n\n .legend {\n fill: ").concat(t.textColor,";\n }\n\n .label text {\n fill: #333;\n }\n .label {\n color: ").concat(t.textColor,"\n }\n\n .face {\n fill: #FFF8DC;\n stroke: #999;\n }\n\n .node rect,\n .node circle,\n .node ellipse,\n .node polygon,\n .node path {\n fill: ").concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n stroke-width: 1px;\n }\n\n .node .label {\n text-align: center;\n }\n .node.clickable {\n cursor: pointer;\n }\n\n .arrowheadPath {\n fill: ").concat(t.arrowheadColor,";\n }\n\n .edgePath .path {\n stroke: ").concat(t.lineColor,";\n stroke-width: 1.5px;\n }\n\n .flowchart-link {\n stroke: ").concat(t.lineColor,";\n fill: none;\n }\n\n .edgeLabel {\n background-color: ").concat(t.edgeLabelBackground,";\n rect {\n opacity: 0.5;\n }\n text-align: center;\n }\n\n .cluster rect {\n }\n\n .cluster text {\n fill: ").concat(t.titleColor,";\n }\n\n div.mermaidTooltip {\n position: absolute;\n text-align: center;\n max-width: 200px;\n padding: 2px;\n font-family: 'trebuchet ms', verdana, arial, sans-serif;\n font-family: var(--mermaid-font-family);\n font-size: 12px;\n background: ").concat(t.tertiaryColor,";\n border: 1px solid ").concat(t.border2,";\n border-radius: 2px;\n pointer-events: none;\n z-index: 100;\n }\n\n .task-type-0, .section-type-0 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType0):"",";\n }\n .task-type-1, .section-type-1 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType1):"",";\n }\n .task-type-2, .section-type-2 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType2):"",";\n }\n .task-type-3, .section-type-3 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType3):"",";\n }\n .task-type-4, .section-type-4 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType4):"",";\n }\n .task-type-5, .section-type-5 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType5):"",";\n }\n .task-type-6, .section-type-6 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType6):"",";\n }\n .task-type-7, .section-type-7 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType7):"",";\n }\n")}},Ro=function(t,e,n){return" {\n font-family: ".concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n fill: ").concat(n.textColor,"\n }\n\n /* Classes common for multiple diagrams */\n\n .error-icon {\n fill: ").concat(n.errorBkgColor,";\n }\n .error-text {\n fill: ").concat(n.errorTextColor,";\n stroke: ").concat(n.errorTextColor,";\n }\n\n .edge-thickness-normal {\n stroke-width: 2px;\n }\n .edge-thickness-thick {\n stroke-width: 3.5px\n }\n .edge-pattern-solid {\n stroke-dasharray: 0;\n }\n\n .edge-pattern-dashed{\n stroke-dasharray: 3;\n }\n .edge-pattern-dotted {\n stroke-dasharray: 2;\n }\n\n .marker {\n fill: ").concat(n.lineColor,";\n }\n .marker.cross {\n stroke: ").concat(n.lineColor,";\n }\n\n svg {\n font-family: ").concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n }\n\n ").concat(jo[t](n),"\n\n ").concat(e,"\n\n ").concat(t," { fill: apa;}\n")};function Yo(t){return(Yo="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var zo={},Uo=function(t,e,n){switch(c.debug("Directive type=".concat(e.type," with args:"),e.args),e.type){case"init":case"initialize":["config"].forEach((function(t){void 0!==e.args[t]&&("flowchart-v2"===n&&(n="flowchart"),e.args[n]=e.args[t],delete e.args[t])})),e.args,wt(e.args);break;case"wrap":case"nowrap":t&&t.setWrap&&t.setWrap("wrap"===e.type);break;default:c.warn("Unhandled directive: source: '%%{".concat(e.type,": ").concat(JSON.stringify(e.args?e.args:{}),"}%%"),e)}};function $o(t){_a(t.git),me(t.flowchart),Bn(t.flowchart),void 0!==t.sequenceDiagram&&xr.setConf(I(t.sequence,t.sequenceDiagram)),xr.setConf(t.sequence),ni(t.gantt),ui(t.class),Yi(t.state),Gi(t.state),Ma(t.class),Ya(t.class),no(t.er),Bo(t.journey),Na(t.class)}function Wo(){}var Vo=Object.freeze({render:function(t,e,n,r){Et();var i=e,a=V.detectInit(i);a&&wt(a);var o=_t();if(e.length>o.maxTextSize&&(i="graph TB;a[Maximum text size in diagram exceeded];style a fill:#faa"),void 0!==r)r.innerHTML="",Object(d.select)(r).append("div").attr("id","d"+t).attr("style","font-family: "+o.fontFamily).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g");else{var s=document.getElementById(t);s&&s.remove();var u=document.querySelector("#d"+t);u&&u.remove(),Object(d.select)("body").append("div").attr("id","d"+t).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g")}window.txt=i,i=function(t){var e=t;return e=(e=(e=e.replace(/style.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/classDef.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/#\w+;/g,(function(t){var e=t.substring(1,t.length-1);return/^\+?\d+$/.test(e)?"fl°°"+e+"¶ß":"fl°"+e+"¶ß"}))}(i);var l=Object(d.select)("#d"+t).node(),h=V.detectType(i),g=l.firstChild,y=g.firstChild,v="";if(void 0!==o.themeCSS&&(v+="\n".concat(o.themeCSS)),void 0!==o.fontFamily&&(v+="\n:root { --mermaid-font-family: ".concat(o.fontFamily,"}")),void 0!==o.altFontFamily&&(v+="\n:root { --mermaid-alt-font-family: ".concat(o.altFontFamily,"}")),"flowchart"===h||"flowchart-v2"===h||"graph"===h){var m=be(i);for(var b in m)v+="\n.".concat(b," > * { ").concat(m[b].styles.join(" !important; ")," !important; }"),m[b].textStyles&&(v+="\n.".concat(b," tspan { ").concat(m[b].textStyles.join(" !important; ")," !important; }"))}var x=(new f.a)("#".concat(t),Ro(h,v,o.themeVariables)),_=document.createElement("style");_.innerHTML=x,g.insertBefore(_,y);try{switch(h){case"git":o.flowchart.arrowMarkerAbsolute=o.arrowMarkerAbsolute,_a(o.git),ka(i,t,!1);break;case"flowchart":o.flowchart.arrowMarkerAbsolute=o.arrowMarkerAbsolute,me(o.flowchart),xe(i,t,!1);break;case"flowchart-v2":o.flowchart.arrowMarkerAbsolute=o.arrowMarkerAbsolute,Bn(o.flowchart),Ln(i,t,!1);break;case"sequence":o.sequence.arrowMarkerAbsolute=o.arrowMarkerAbsolute,o.sequenceDiagram?(xr.setConf(Object.assign(o.sequence,o.sequenceDiagram)),console.error("`mermaid config.sequenceDiagram` has been renamed to `config.sequence`. Please update your mermaid config.")):xr.setConf(o.sequence),xr.draw(i,t);break;case"gantt":o.gantt.arrowMarkerAbsolute=o.arrowMarkerAbsolute,ni(o.gantt),ri(i,t);break;case"class":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,ui(o.class),li(i,t);break;case"classDiagram":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,fi(o.class),di(i,t);break;case"state":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,Yi(o.state),zi(i,t);break;case"stateDiagram":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,Gi(o.state),qi(i,t);break;case"info":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,Ma(o.class),Oa(i,t,p.version);break;case"pie":o.class.arrowMarkerAbsolute=o.arrowMarkerAbsolute,Ya(o.pie),za(i,t,p.version);break;case"er":no(o.er),ro(i,t,p.version);break;case"journey":Bo(o.journey),Lo(i,t,p.version)}}catch(e){throw Ba(t,p.version),e}Object(d.select)('[id="'.concat(t,'"]')).selectAll("foreignobject > *").attr("xmlns","http://www.w3.org/1999/xhtml");var k=Object(d.select)("#d"+t).node().innerHTML;if(c.debug("cnf.arrowMarkerAbsolute",o.arrowMarkerAbsolute),o.arrowMarkerAbsolute&&"false"!==o.arrowMarkerAbsolute||(k=k.replace(/marker-end="url\(.*?#/g,'marker-end="url(#',"g")),k=function(t){var e=t;return e=(e=(e=e.replace(/fl°°/g,(function(){return"&#"}))).replace(/fl°/g,(function(){return"&"}))).replace(/¶ß/g,(function(){return";"}))}(k),void 0!==n)switch(h){case"flowchart":case"flowchart-v2":n(k,Xt.bindFunctions);break;case"gantt":n(k,Qr.bindFunctions);break;case"class":case"classDiagram":n(k,sn.bindFunctions);break;default:n(k)}else c.debug("CB = undefined!");var w=Object(d.select)("#d"+t).node();return null!==w&&"function"==typeof w.remove&&Object(d.select)("#d"+t).node().remove(),k},parse:function(t){var e=V.detectInit(t);e&&c.debug("reinit ",e);var n,r=V.detectType(t);switch(c.debug("Type "+r),r){case"git":(n=la.a).parser.yy=ca;break;case"flowchart":case"flowchart-v2":Xt.clear(),(n=Jt.a).parser.yy=Xt;break;case"sequence":(n=Wn.a).parser.yy=or;break;case"gantt":(n=kr.a).parser.yy=Qr;break;case"class":case"classDiagram":(n=ai.a).parser.yy=sn;break;case"state":case"stateDiagram":(n=Oi.a).parser.yy=Ai;break;case"info":c.debug("info info info"),(n=Sa.a).parser.yy=Ta;break;case"pie":c.debug("pie"),(n=ja.a).parser.yy=Fa;break;case"er":c.debug("er"),(n=qa.a).parser.yy=Ha;break;case"journey":c.debug("Journey"),(n=ao.a).parser.yy=po}return n.parser.yy.graphType=r,n.parser.yy.parseError=function(t,e){throw{str:t,hash:e}},n.parse(t),n},parseDirective:function(t,e,n,r){try{if(void 0!==e)switch(e=e.trim(),n){case"open_directive":zo={};break;case"type_directive":zo.type=e.toLowerCase();break;case"arg_directive":zo.args=JSON.parse(e);break;case"close_directive":Uo(t,zo,r),zo=null}}catch(t){c.error("Error while rendering sequenceDiagram directive: ".concat(e," jison context: ").concat(n)),c.error(t.message)}},initialize:function(t){t&&t.fontFamily&&(t.themeVariables&&t.themeVariables.fontFamily||(t.themeVariables={fontFamily:t.fontFamily})),dt=I({},t),t&&t.theme&&ht[t.theme]?t.themeVariables=ht[t.theme].getThemeVariables(t.themeVariables):t&&(t.themeVariables=ht.default.getThemeVariables(t.themeVariables));var e="object"===Yo(t)?function(t){return yt=I({},gt),yt=I(yt,t),t.theme&&(yt.themeVariables=ht[t.theme].getThemeVariables(t.themeVariables)),mt=bt(yt,vt),yt}(t):xt();$o(e),u(e.logLevel)},reinitialize:Wo,getConfig:_t,setConfig:function(t){return I(mt,t),_t()},getSiteConfig:xt,updateSiteConfig:function(t){return yt=I(yt,t),bt(yt,vt),yt},reset:function(){Et()},globalReset:function(){Et(),$o(_t())},defaultConfig:gt});u(_t().logLevel),Et(_t());var Ho=Vo,Go=function(){qo.startOnLoad?Ho.getConfig().startOnLoad&&qo.init():void 0===qo.startOnLoad&&(c.debug("In start, no config"),Ho.getConfig().startOnLoad&&qo.init())};"undefined"!=typeof document&& +/*! + * Wait for document loaded before starting the execution + */ +window.addEventListener("load",(function(){Go()}),!1);var qo={startOnLoad:!0,htmlLabels:!0,mermaidAPI:Ho,parse:Ho.parse,render:Ho.render,init:function(){var t,e,n=this,r=Ho.getConfig();arguments.length>=2?( +/*! sequence config was passed as #1 */ +void 0!==arguments[0]&&(qo.sequenceConfig=arguments[0]),t=arguments[1]):t=arguments[0],"function"==typeof arguments[arguments.length-1]?(e=arguments[arguments.length-1],c.debug("Callback function found")):void 0!==r.mermaid&&("function"==typeof r.mermaid.callback?(e=r.mermaid.callback,c.debug("Callback function found")):c.debug("No Callback function found")),t=void 0===t?document.querySelectorAll(".mermaid"):"string"==typeof t?document.querySelectorAll(t):t instanceof window.Node?[t]:t,c.debug("Start On Load before: "+qo.startOnLoad),void 0!==qo.startOnLoad&&(c.debug("Start On Load inner: "+qo.startOnLoad),Ho.updateSiteConfig({startOnLoad:qo.startOnLoad})),void 0!==qo.ganttConfig&&Ho.updateSiteConfig({gantt:qo.ganttConfig});for(var a,o=V.initIdGeneratior(r.deterministicIds,r.deterministicIDSeed).next,s=function(r){var s=t[r]; +/*! Check if previously processed */if(s.getAttribute("data-processed"))return"continue";s.setAttribute("data-processed",!0);var u="mermaid-".concat(o());a=i(a=s.innerHTML).trim().replace(//gi,"
");var l=V.detectInit(a);l&&c.debug("Detected early reinit: ",l);try{Ho.render(u,a,(function(t,n){s.innerHTML=t,void 0!==e&&e(u),n&&n(s)}),s)}catch(t){c.warn("Syntax Error rendering"),c.warn(t),n.parseError&&n.parseError(t)}},u=0;u + +["A proof procedure for the logic of Hereditary Harrop formulas"][pphhf], +by Gopalan Nadathur. This paper covers the basics of universes, +environments, and Lambda Prolog-style proof search. Quite readable. + +[pphhf]: https://dl.acm.org/citation.cfm?id=868380 + + + +["A new formulation of tabled resolution with delay"][nftrd], by +Theresa Swift. This paper gives a kind of abstract treatment of the +SLG formulation that is the basis for our on-demand solver. + +[nftrd]: https://dl.acm.org/citation.cfm?id=651202 + + +## Books +* "Introduction to Formal Logic", Peter Smith +* "Handbook of Practical Logic and Automated Reasoning", John Harrison +* "Types and Programming Languages", Benjamin C. Pierce +* [Programming with Higher-order Logic][phl], by Dale Miller and Gopalan +Nadathur, covers the key concepts of Lambda prolog. Although it's a +slim little volume, it's the kind of book where you learn something +new every time you open it. + +[phl]: https://www.amazon.com/Programming-Higher-Order-Logic-Dale-Miller/dp/052187940X + diff --git a/book/src/canonical_queries.md b/book/src/canonical_queries.md new file mode 100644 index 00000000000..a5e3598fcf9 --- /dev/null +++ b/book/src/canonical_queries.md @@ -0,0 +1,251 @@ +# Canonical queries + +The "start" of the trait system is the **canonical query** (these are +both queries in the more general sense of the word – something you +would like to know the answer to – and in the +rustc-specific sense). The idea is that the type +checker or other parts of the system, may in the course of doing their +thing want to know whether some trait is implemented for some type +(e.g., is `u32: Debug` true?). Or they may want to +normalize some associated type. + +This section covers queries at a fairly high level of abstraction. The +subsections look a bit more closely at how these ideas are implemented +in rustc. + +## The traditional, interactive Prolog query + +In a traditional Prolog system, when you start a query, the solver +will run off and start supplying you with every possible answer it can +find. So given something like this: + +```text +?- Vec: AsRef +``` + +The solver might answer: + +```text +Vec: AsRef<[i32]> + continue? (y/n) +``` + +This `continue` bit is interesting. The idea in Prolog is that the +solver is finding **all possible** instantiations of your query that +are true. In this case, if we instantiate `?U = [i32]`, then the query +is true (note that a traditional Prolog interface does not, directly, +tell us a value for `?U`, but we can infer one by unifying the +response with our original query – Rust's solver gives back a +substitution instead). If we were to hit `y`, the solver might then +give us another possible answer: + +```text +Vec: AsRef> + continue? (y/n) +``` + +This answer derives from the fact that there is a reflexive impl +(`impl AsRef for T`) for `AsRef`. If were to hit `y` again, +then we might get back a negative response: + +```text +no +``` + +Naturally, in some cases, there may be no possible answers, and hence +the solver will just give me back `no` right away: + +```text +?- Box: Copy + no +``` + +In some cases, there might be an infinite number of responses. So for +example if I gave this query, and I kept hitting `y`, then the solver +would never stop giving me back answers: + +```text +?- Vec: Clone + Vec: Clone + continue? (y/n) + Vec>: Clone + continue? (y/n) + Vec>>: Clone + continue? (y/n) + Vec>>>: Clone + continue? (y/n) +``` + +As you can imagine, the solver will gleefully keep adding another +layer of `Box` until we ask it to stop, or it runs out of memory. + +Another interesting thing is that queries might still have variables +in them. For example: + +```text +?- Rc: Clone +``` + +might produce the answer: + +```text +Rc: Clone + continue? (y/n) +``` + +After all, `Rc` is true **no matter what type `?T` is**. + + + +## A trait query in rustc + +The trait queries in rustc work somewhat differently. Instead of +trying to enumerate **all possible** answers for you, they are looking +for an **unambiguous** answer. In particular, when they tell you the +value for a type variable, that means that this is the **only possible +instantiation** that you could use, given the current set of impls and +where-clauses, that would be provable. (Internally within the solver, +though, they can potentially enumerate all possible answers. See +the description of the SLG solver for details.) + +The response to a trait query in rustc is typically a +`Result, NoSolution>` (where the `T` will vary a bit +depending on the query itself). The `Err(NoSolution)` case indicates +that the query was false and had no answers (e.g., `Box: Copy`). +Otherwise, the `QueryResult` gives back information about the possible answer(s) +we did find. It consists of four parts: + +- **Certainty:** tells you how sure we are of this answer. It can have two + values: + - `Proven` means that the result is known to be true. + - This might be the result for trying to prove `Vec: Clone`, + say, or `Rc: Clone`. + - `Ambiguous` means that there were things we could not yet prove to + be either true *or* false, typically because more type information + was needed. (We'll see an example shortly.) + - This might be the result for trying to prove `Vec: Clone`. +- **Var values:** Values for each of the unbound inference variables + (like `?T`) that appeared in your original query. (Remember that in Prolog, + we had to infer these.) + - As we'll see in the example below, we can get back var values even + for `Ambiguous` cases. +- **Region constraints:** these are relations that must hold between + the lifetimes that you supplied as inputs. We'll ignore these here, + but see the section on handling regions in traits for + more details. +- **Value:** The query result also comes with a value of type `T`. For + some specialized queries – like normalizing associated types – + this is used to carry back an extra result, but it's often just + `()`. + +### Examples + +Let's work through an example query to see what all the parts mean. +Consider [the `Borrow` trait][borrow]. This trait has a number of +impls; among them, there are these two (for clarity, I've written the +`Sized` bounds explicitly): + +[borrow]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html + +```rust,ignore +impl Borrow for T where T: ?Sized +impl Borrow<[T]> for Vec where T: Sized +``` + +**Example 1.** Imagine we are type-checking this (rather artificial) +bit of code: + +```rust,ignore +fn foo(a: A, vec_b: Option) where A: Borrow { } + +fn main() { + let mut t: Vec<_> = vec![]; // Type: Vec + let mut u: Option<_> = None; // Type: Option + foo(t, u); // Example 1: requires `Vec: Borrow` + ... +} +``` + +As the comments indicate, we first create two variables `t` and `u`; +`t` is an empty vector and `u` is a `None` option. Both of these +variables have unbound inference variables in their type: `?T` +represents the elements in the vector `t` and `?U` represents the +value stored in the option `u`. Next, we invoke `foo`; comparing the +signature of `foo` to its arguments, we wind up with `A = Vec` and +`B = ?U`. Therefore, the where clause on `foo` requires that `Vec: +Borrow`. This is thus our first example trait query. + +There are many possible solutions to the query `Vec: Borrow`; +for example: + +- `?U = Vec`, +- `?U = [?T]`, +- `?T = u32, ?U = [u32]` +- and so forth. + +Therefore, the result we get back would be as follows (I'm going to +ignore region constraints and the "value"): + +- Certainty: `Ambiguous` – we're not sure yet if this holds +- Var values: `[?T = ?T, ?U = ?U]` – we learned nothing about the values of + the variables + +In short, the query result says that it is too soon to say much about +whether this trait is proven. During type-checking, this is not an +immediate error: instead, the type checker would hold on to this +requirement (`Vec: Borrow`) and wait. As we'll see in the next +example, it may happen that `?T` and `?U` wind up constrained from +other sources, in which case we can try the trait query again. + +**Example 2.** We can now extend our previous example a bit, +and assign a value to `u`: + +```rust,ignore +fn foo(a: A, vec_b: Option) where A: Borrow { } + +fn main() { + // What we saw before: + let mut t: Vec<_> = vec![]; // Type: Vec + let mut u: Option<_> = None; // Type: Option + foo(t, u); // `Vec: Borrow` => ambiguous + + // New stuff: + u = Some(vec![]); // ?U = Vec +} +``` + +As a result of this assignment, the type of `u` is forced to be +`Option>`, where `?V` represents the element type of the +vector. This in turn implies that `?U` is unified to `Vec`. + +[unified]: ../type-checking.html + +Let's suppose that the type checker decides to revisit the +"as-yet-unproven" trait obligation we saw before, `Vec: +Borrow`. `?U` is no longer an unbound inference variable; it now +has a value, `Vec`. So, if we "refresh" the query with that value, we get: + +```text +Vec: Borrow> +``` + +This time, there is only one impl that applies, the reflexive impl: + +```text +impl Borrow for T where T: ?Sized +``` + +Therefore, the trait checker will answer: + +- Certainty: `Proven` +- Var values: `[?T = ?T, ?V = ?T]` + +Here, it is saying that we have indeed proven that the obligation +holds, and we also know that `?T` and `?V` are the same type (but we +don't know what that type is yet!). + +(In fact, as the function ends here, the type checker would give an +error at this point, since the element types of `t` and `u` are still +not yet known, even though they are known to be the same.) + + diff --git a/book/src/canonical_queries/canonicalization.md b/book/src/canonical_queries/canonicalization.md new file mode 100644 index 00000000000..757a1bc5a35 --- /dev/null +++ b/book/src/canonical_queries/canonicalization.md @@ -0,0 +1,256 @@ +# Canonicalization + +Canonicalization is the process of **isolating** an inference value +from its context. It is a key part of implementing +[canonical queries][cq], and you may wish to read the parent chapter +to get more context. + +Canonicalization is really based on a very simple concept: every +[inference variable](https://rustc-dev-guide.rust-lang.org/type-inference.html#vars) +is always in one of two states: either it is **unbound**, in which case we don't know yet +what type it is, or it is **bound**, in which case we do. So to +isolate some data-structure T that contains types/regions from its +environment, we just walk down and find the unbound variables that +appear in T; those variables get replaced with "canonical variables", +starting from zero and numbered in a fixed order (left to right, for +the most part, but really it doesn't matter as long as it is +consistent). + +[cq]: ../canonical_queries.html + +So, for example, if we have the type `X = (?T, ?U)`, where `?T` and +`?U` are distinct, unbound inference variables, then the canonical +form of `X` would be `(?0, ?1)`, where `?0` and `?1` represent these +**canonical placeholders**. Note that the type `Y = (?U, ?T)` also +canonicalizes to `(?0, ?1)`. But the type `Z = (?T, ?T)` would +canonicalize to `(?0, ?0)` (as would `(?U, ?U)`). In other words, the +exact identity of the inference variables is not important – unless +they are repeated. + +We use this to improve caching as well as to detect cycles and other +things during trait resolution. Roughly speaking, the idea is that if +two trait queries have the same canonical form, then they will get +the same answer. That answer will be expressed in terms of the +canonical variables (`?0`, `?1`), which we can then map back to the +original variables (`?T`, `?U`). + +## Canonicalizing the query + +To see how it works, imagine that we are asking to solve the following +trait query: `?A: Foo<'static, ?B>`, where `?A` and `?B` are unbound. +This query contains two unbound variables, but it also contains the +lifetime `'static`. The trait system generally ignores all lifetimes +and treats them equally, so when canonicalizing, we will *also* +replace any [free lifetime](https://rustc-dev-guide.rust-lang.org/appendix/background.html#free-vs-bound) with a +canonical variable (Note that `'static` is actually a _free_ lifetime +variable here. We are not considering it in the typing context of the whole +program but only in the context of this trait reference. Mathematically, we +are not quantifying over the whole program, but only this obligation). +Therefore, we get the following result: + +```text +?0: Foo<'?1, ?2> +``` + +Sometimes we write this differently, like so: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +This `for<>` gives some information about each of the canonical +variables within. In this case, each `T` indicates a type variable, +so `?0` and `?2` are types; the `L` indicates a lifetime variable, so +`?1` is a lifetime. The `canonicalize` method *also* gives back a +`CanonicalVarValues` array OV with the "original values" for each +canonicalized variable: + +```text +[?A, 'static, ?B] +``` + +We'll need this vector OV later, when we process the query response. + +## Executing the query + +Once we've constructed the canonical query, we can try to solve it. +To do so, we will wind up creating a fresh inference context and +**instantiating** the canonical query in that context. The idea is that +we create a substitution S from the canonical form containing a fresh +inference variable (of suitable kind) for each canonical variable. +So, for our example query: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +the substitution S might be: + +```text +S = [?A, '?B, ?C] +``` + +We can then replace the bound canonical variables (`?0`, etc) with +these inference variables, yielding the following fully instantiated +query: + +```text +?A: Foo<'?B, ?C> +``` + +Remember that substitution S though! We're going to need it later. + +OK, now that we have a fresh inference context and an instantiated +query, we can go ahead and try to solve it. The trait solver itself is +explained in more detail in [another section](../engine/slg.html), but +suffice to say that it will compute a [certainty value][cqqr] (`Proven` or +`Ambiguous`) and have side-effects on the inference variables we've +created. For example, if there were only one impl of `Foo`, like so: + +[cqqr]: ../canonical_queries.html#query-response + +```rust,ignore +impl<'a, X> Foo<'a, X> for Vec +where X: 'a +{ ... } +``` + +then we might wind up with a certainty value of `Proven`, as well as +creating fresh inference variables `'?D` and `?E` (to represent the +parameters on the impl) and unifying as follows: + +- `'?B = '?D` +- `?A = Vec` +- `?C = ?E` + +We would also accumulate the region constraint `?E: '?D`, due to the +where clause. + +In order to create our final query result, we have to "lift" these +values out of the query's inference context and into something that +can be reapplied in our original inference context. We do that by +**re-applying canonicalization**, but to the **query result**. + +## Canonicalizing the query result + +As discussed in [the parent section][cqqr], most trait queries wind up +with a result that brings together a "certainty value" `certainty`, a +result substitution `var_values`, and some region constraints. To +create this, we wind up re-using the substitution S that we created +when first instantiating our query. To refresh your memory, we had a query + +```text +for { ?0: Foo<'?1, ?2> } +``` + +for which we made a substitution S: + +```text +S = [?A, '?B, ?C] +``` + +We then did some work which unified some of those variables with other things. +If we "refresh" S with the latest results, we get: + +```text +S = [Vec, '?D, ?E] +``` + +These are precisely the new values for the three input variables from +our original query. Note though that they include some new variables +(like `?E`). We can make those go away by canonicalizing again! We don't +just canonicalize S, though, we canonicalize the whole query response QR: + +```text +QR = { + certainty: Proven, // or whatever + var_values: [Vec, '?D, ?E] // this is S + region_constraints: [?E: '?D], // from the impl + value: (), // for our purposes, just (), but + // in some cases this might have + // a type or other info +} +``` + +The result would be as follows: + +```text +Canonical(QR) = for { + certainty: Proven, + var_values: [Vec, '?1, ?0] + region_constraints: [?0: '?1], + value: (), +} +``` + +(One subtle point: when we canonicalize the query **result**, we do not +use any special treatment for free lifetimes. Note that both +references to `'?D`, for example, were converted into the same +canonical variable (`?1`). This is in contrast to the original query, +where we canonicalized every free lifetime into a fresh canonical +variable.) + +Now, this result must be reapplied in each context where needed. + +## Processing the canonicalized query result + +In the previous section we produced a canonical query result. We now have +to apply that result in our original context. If you recall, way back in the +beginning, we were trying to prove this query: + +```text +?A: Foo<'static, ?B> +``` + +We canonicalized that into this: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +and now we got back a canonical response: + +```text +for { + certainty: Proven, + var_values: [Vec, '?1, ?0] + region_constraints: [?0: '?1], + value: (), +} +``` + +We now want to apply that response to our context. Conceptually, how +we do that is to (a) instantiate each of the canonical variables in +the result with a fresh inference variable, (b) unify the values in +the result with the original values, and then (c) record the region +constraints for later. Doing step (a) would yield a result of + +```text +{ + certainty: Proven, + var_values: [Vec, '?D, ?C] + ^^ ^^^ fresh inference variables + region_constraints: [?C: '?D], + value: (), +} +``` + +Step (b) would then unify: + +```text +?A with Vec +'static with '?D +?B with ?C +``` + +And finally the region constraint of `?C: 'static` would be recorded +for later verification. + +(What we *actually* do is a mildly optimized variant of that: Rather +than eagerly instantiating all of the canonical values in the result +with variables, we instead walk the vector of values, looking for +cases where the value is just a canonical variable. In our example, +`values[2]` is `?C`, so that means we can deduce that `?C := ?B` and +`'?D := 'static`. This gives us a partial set of values. Anything for +which we do not find a value, we create an inference variable.) + diff --git a/book/src/clauses.md b/book/src/clauses.md new file mode 100644 index 00000000000..c0e8f36c667 --- /dev/null +++ b/book/src/clauses.md @@ -0,0 +1,185 @@ +# Lowering Rust IR to logic + +The key observation here is that the Rust trait system is basically a +kind of logic, and it can be mapped onto standard logical inference +rules. We can then look for solutions to those inference rules in a +very similar fashion to how e.g. a [Prolog] solver works. It turns out +that we can't *quite* use Prolog rules (also called Horn clauses) but +rather need a somewhat more expressive variant. + +[Prolog]: https://en.wikipedia.org/wiki/Prolog + +## Rust traits and logic + +One of the first observations is that the Rust trait system is +basically a kind of logic. As such, we can map our struct, trait, and +impl declarations into logical inference rules. For the most part, +these are basically Horn clauses, though we'll see that to capture the +full richness of Rust – and in particular to support generic +programming – we have to go a bit further than standard Horn clauses. + +To see how this mapping works, let's start with an example. Imagine +we declare a trait and a few impls, like so: + +```rust +trait Clone { } +impl Clone for usize { } +impl Clone for Vec where T: Clone { } +``` + +We could map these declarations to some Horn clauses, written in a +Prolog-like notation, as follows: + +```text +Clone(usize). +Clone(Vec) :- Clone(?T). + +// The notation `A :- B` means "A is true if B is true". +// Or, put another way, B implies A. +``` + +In Prolog terms, we might say that `Clone(Foo)` – where `Foo` is some +Rust type – is a *predicate* that represents the idea that the type +`Foo` implements `Clone`. These rules are **program clauses**; they +state the conditions under which that predicate can be proven (i.e., +considered true). So the first rule just says "Clone is implemented +for `usize`". The next rule says "for any type `?T`, Clone is +implemented for `Vec` if clone is implemented for `?T`". So +e.g. if we wanted to prove that `Clone(Vec>)`, we would do +so by applying the rules recursively: + +- `Clone(Vec>)` is provable if: + - `Clone(Vec)` is provable if: + - `Clone(usize)` is provable. (Which it is, so we're all good.) + +But now suppose we tried to prove that `Clone(Vec)`. This would +fail (after all, I didn't give an impl of `Clone` for `Bar`): + +- `Clone(Vec)` is provable if: + - `Clone(Bar)` is provable. (But it is not, as there are no applicable rules.) + +We can easily extend the example above to cover generic traits with +more than one input type. So imagine the `Eq` trait, which declares +that `Self` is equatable with a value of type `T`: + +```rust,ignore +trait Eq { ... } +impl Eq for usize { } +impl> Eq> for Vec { } +``` + +That could be mapped as follows: + +```text +Eq(usize, usize). +Eq(Vec, Vec) :- Eq(?T, ?U). +``` + +So far so good. + +## Type-checking normal functions + +OK, now that we have defined some logical rules that are able to +express when traits are implemented and to handle associated types, +let's turn our focus a bit towards **type-checking**. Type-checking is +interesting because it is what gives us the goals that we need to +prove. That is, everything we've seen so far has been about how we +derive the rules by which we can prove goals from the traits and impls +in the program; but we are also interested in how to derive the goals +that we need to prove, and those come from type-checking. + +Consider type-checking the function `foo()` here: + +```rust,ignore +fn foo() { bar::() } +fn bar>() { } +``` + +This function is very simple, of course: all it does is to call +`bar::()`. Now, looking at the definition of `bar()`, we can see +that it has one where-clause `U: Eq`. So, that means that `foo()` will +have to prove that `usize: Eq` in order to show that it can call `bar()` +with `usize` as the type argument. + +If we wanted, we could write a Prolog predicate that defines the +conditions under which `bar()` can be called. We'll say that those +conditions are called being "well-formed": + +```text +barWellFormed(?U) :- Eq(?U, ?U). +``` + +Then we can say that `foo()` type-checks if the reference to +`bar::` (that is, `bar()` applied to the type `usize`) is +well-formed: + +```text +fooTypeChecks :- barWellFormed(usize). +``` + +If we try to prove the goal `fooTypeChecks`, it will succeed: + +- `fooTypeChecks` is provable if: + - `barWellFormed(usize)`, which is provable if: + - `Eq(usize, usize)`, which is provable because of an impl. + +Ok, so far so good. Let's move on to type-checking a more complex function. + +## Type-checking generic functions: beyond Horn clauses + +In the last section, we used standard Prolog horn-clauses (augmented with Rust's +notion of type equality) to type-check some simple Rust functions. But that only +works when we are type-checking non-generic functions. If we want to type-check +a generic function, it turns out we need a stronger notion of goal than what Prolog +can provide. To see what I'm talking about, let's revamp our previous +example to make `foo` generic: + +```rust,ignore +fn foo>() { bar::() } +fn bar>() { } +``` + +To type-check the body of `foo`, we need to be able to hold the type +`T` "abstract". That is, we need to check that the body of `foo` is +type-safe *for all types `T`*, not just for some specific type. We might express +this like so: + +```text +fooTypeChecks :- + // for all types T... + forall { + // ...if we assume that Eq(T, T) is provable... + if (Eq(T, T)) { + // ...then we can prove that `barWellFormed(T)` holds. + barWellFormed(T) + } + }. +``` + +This notation I'm using here is the notation I've been using in my +prototype implementation; it's similar to standard mathematical +notation but a bit Rustified. Anyway, the problem is that standard +Horn clauses don't allow universal quantification (`forall`) or +implication (`if`) in goals (though many Prolog engines do support +them, as an extension). For this reason, we need to accept something +called "first-order hereditary harrop" (FOHH) clauses – this long +name basically means "standard Horn clauses with `forall` and `if` in +the body". But it's nice to know the proper name, because there is a +lot of work describing how to efficiently handle FOHH clauses; see for +example Gopalan Nadathur's excellent +["A Proof Procedure for the Logic of Hereditary Harrop Formulas"][pphhf] +in [the bibliography]. + +[the bibliography]: ./bibliography.html +[pphhf]: ./bibliography.html#pphhf + +It turns out that supporting FOHH is not really all that hard. And +once we are able to do that, we can easily describe the type-checking +rule for generic functions like `foo` in our logic. + +## Source + +This page is a lightly adapted version of a +[blog post by Nicholas Matsakis][lrtl]. + +[lrtl]: https://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/ diff --git a/book/src/clauses/coherence.md b/book/src/clauses/coherence.md new file mode 100644 index 00000000000..40a368feada --- /dev/null +++ b/book/src/clauses/coherence.md @@ -0,0 +1,328 @@ +# Chalk Coherence + +This document was previously prepared for the initial design of coherence rules in Chalk. It was copy-pasted here on 2020-10-06, but has not been vetted for accuracy of the current implementation or edited for clarity. + +## Coherence +> The idea of trait coherence is that, given a trait and some set of types for its type parameters, there should be exactly one impl that applies. So if we think of the trait `Display`, we want to guarantee that if we have a trait reference like `MyType : Display`, we can uniquely identify a particular impl. +> +> The role of the orphan rules in particular is basically to prevent you from implementing external traits for external types. So continuing our simple example of `Display`, if you are defining your own library, you could not implement `Display` for `Vec`, because both `Display` and `Vec` are defined in the standard library. But you can implement `Display` for `MyType`, because you defined `MyType`. However, if you define your own trait `MyTrait`, then you can implement `MyTrait` for any type you like, including external types like `Vec`. To this end, the orphan rule intuitively says “either the trait must be local or the self-type must be local”. +> +> -- [Little Orphan Impls](https://smallcultfollowing.com/babysteps/blog/2015/01/14/little-orphan-impls/) by Niko Matsakis + +To check for coherence, the Rust compiler completes two separate but related checks: + +- orphan check - ensures that each impl abides by the orphan rules, or in other words, that an impl is potentially implementable by the crate adding it + - A consequence of the orphan rules: for every impl that could exist, it only exists in **one** place — this is key to having a coherent system +- overlap check - ensures that no two impls overlap in your program **or** **in any** ***compatible*** **world** + - **compatible** - any semver compatible world +# Resources About Coherence +- [Coherence - talk by withoutboats](https://www.youtube.com/watch?v=AI7SLCubTnk&t=43m19s) +- [Little Orphan Impls](https://smallcultfollowing.com/babysteps/blog/2015/01/14/little-orphan-impls/) +- [RFC 1023 Rebalancing Coherence](https://rust-lang.github.io/rfcs/1023-rebalancing-coherence.html) +- [Type classes: confluence, coherence and global uniqueness](https://web.archive.org/web/20250308110404/https://blog.ezyang.com/2014/07/type-classes-confluence-coherence-global-uniqueness/) +## Axioms & Properties of Coherence +> Historical Note: We used to use the term “external” instead of “upstream”. + + +- **Axiom 1:** crates upstream to you should be able to implement their own traits for their own types +- **Axiom 2:** crates downstream from you should be able to implement your traits +- **Property:** Upstream crates must assume that downstream crates will add any impls that compile. Downstream crates are allowed to assume that upstream crates will not add any semver incompatible impls. +# Chalk: Orphan Check + +The purpose of the orphan check is to ensure that an impl is only definable in a single crate. This check is what makes it impossible for other crates to define impls of your traits for your types. + +**We want to capture some rule:** Given `impl for Trait for P0`, `LocalImplAllowed(P0: Trait)` is true if and only if this impl is allowed in the current (local) crate. + +This check is applied to all impls in the current crate. Upstream impls are not checked with this rule. + +## The Orphan Rules + +In order to model the orphan check in chalk, we need a precise description of the orphan rules as they are implemented in rustc today. + +There are several resources which can be used to figure out the orphan rules in rustc. + +- [RFC 1023: Rebalancing Coherence](https://rust-lang.github.io/rfcs/1023-rebalancing-coherence.html) +- [*Trait Implementation Coherence*](https://doc.rust-lang.org/reference/items/implementations.html#trait-implementation-coherence) [in the](https://doc.rust-lang.org/reference/items/implementations.html#trait-implementation-coherence) [*Rust Reference*](https://doc.rust-lang.org/reference/items/implementations.html#trait-implementation-coherence) +- [E0210: A violation of the orphan rules in the](https://doc.rust-lang.org/error-index.html#E0210) [*Rust Error Index*](https://doc.rust-lang.org/error-index.html#E0210) +- [*Little Orphan Impls*](https://smallcultfollowing.com/babysteps/blog/2015/01/14/little-orphan-impls/) [by Niko Matsakis](https://smallcultfollowing.com/babysteps/blog/2015/01/14/little-orphan-impls/) + +Of all of these, RFC 1023 is probably considered the most authoritative source on the orphan rules. The orphan rules as proposed in that RFC are as follows: + +Given an impl `impl Trait for P0`, either `Trait` must be local to the current crate, or: + +1. At least one type must meet the `LT` pattern defined above. Let `Pi` be the first such type. +2. No type parameters `T1...Tn` may appear in the type parameters that precede `Pi` (that is, `Pj` where `j < i`). + +The `LT` pattern being referred to basically means that the type is a “local type” including the affects of fundamental types. That means that `Ti` is either a local type, or a fundamental type whose first parameter is a local type. + +This definition is good. Once you read it a few times and it makes sense, it is fairly unambiguous. That being said, the RFC was written quite a while ago and we have since found [unsoundness](https://github.com/rust-lang/rust/issues/43355) in some of the parts of the compiler that were implemented based on that RFC. + +Thus, it is probably best to look at the only *truly authoritative* source on the Rust compiler: the rustc source code itself! Indeed, if you think of the rustc source code as an executable specification of how the Rust programming language is meant to work, you can look at it and determine the true behaviour of the orphan rules. + +## The Orphan Check in rustc + +The orphan check as implemented today in the Rust compiler takes place in the [`orphan_check`](https://github.com/rust-lang/rust/blob/b7c6e8f1805cd8a4b0a1c1f22f17a89e9e2cea23/src/librustc/traits/coherence.rs#L236) function which is called [for every declared impl](https://github.com/rust-lang/rust/blob/b7c6e8f1805cd8a4b0a1c1f22f17a89e9e2cea23/src/librustc_typeck/coherence/orphan.rs#L45). Since implementations for locally defined traits are always defined, that function returns OK if the trait being implemented is local. Otherwise, it dispatches to the [`orphan_check_trait_ref`](https://github.com/rust-lang/rust/blob/b7c6e8f1805cd8a4b0a1c1f22f17a89e9e2cea23/src/librustc/traits/coherence.rs#L343) function which does the major orphan rules checking. + +Recall that the impls we are dealing with are in the form `impl Trait for P0`. + +The `orphan_check_trait_ref` function takes a **trait ref** which is essentially `Trait` and its parameters `P0…Pn` (notice that the `Self` type `P0` is included). The parameters `P0…Pn` are known as the **input types** of the trait. The function goes through each input type from `P0` to `Pn` looking for the first local type `Pi`. For each type parameter `Pj` found before that, the function checks that it does not contain any of the placeholder types `T0…Tn` at any level. That means that `Pj` cannot have any of the types `T0…Tn` at any level recursively. When the first local type `Pi` is found, we check to make sure any type parameters used in it are covered by a local type. Since we don’t have any fundamental types with more than one type parameter, this check is probably extraneous. + +## The Orphan Rules in rustc + +Thus, based on the source code, the orphan rules in Rust are as follows: + +Given an impl of the form `impl Trait for P0`, the impl is allowed if: + + +- `Trait` is local to the current crate +- `Trait` is upstream to the current crate and: + - There is at least one type parameter `Pi` which, taking fundamental types into account, is **local** to the current crate + - Within the type `Pi`, all type parameters are covered by `Pi` + - This only really applies if we allowed fundamental types with multiple type parameters + - Since we don’t do that yet, we can ignore this for the time being + - All types `Pj` such that `j < i` do not contain `T0…Tn` at any level of depth (i.e. the types are **fully visible** **—** “visible” meaning that the type is a known type and not a type parameter or variable) +## Modeling The Orphan Check + +Determining how to model these rules in chalk is actually quite straightforward at this point. We have an exact specification of how the rules are meant to work and we can translate that directly. + +Here’s how the lowering rules would look: + +For each trait `Trait`, + +- If `Trait` is local to the current crate, we generate: + `forall { LocalImplAllowed(Self: Trait) }` + This models that any impls are allowed if the trait is local to the current crate. +- If `Trait` is upstream to the current crate, we need a rule which models the additional conditions on which impls are allowed: +```ignore +forall { LocalImplAllowed(Self: Trait) :- IsLocal(Self) } +forall { + LocalImplAllowed(Self: Trait) :- IsFullyVisible(Self), IsLocal(P1) +} +forall { + LocalImplAllowed(Self: Trait) :- + IsFullyVisible(Self), + IsFullyVisible(P1), + IsLocal(P2) +} +forall { + LocalImplAllowed(Self: Trait) :- + IsFullyVisible(Self), + IsFullyVisible(P1), + IsFullyVisible(P2), + IsLocal(P3) +} +... +forall { + LocalImplAllowed(Self: Trait) :- + IsFullyVisible(Self), + IsFullyVisible(P1), + IsFullyVisible(P2), + ... + IsFullyVisible(Pn-1), + IsLocal(Pn) +} +``` +Here, we have modeled every possible case of `P1` to `Pn` being local and then checked if all prior type parameters are fully visible. This truly is a direct translation of the rules listed above! + +Now, to complete the orphan check, we can iterate over each impl of the same form as before and check if `LocalImplAllowed(P0: Trait)` is provable. + +# Chalk: Overlap Check +> Note: A key assumption for the overlap check is that the orphan check runs before it. That means that any impl that the overlap check encounters already abides by the orphan rules. This is very important to how the check works and it wouldn’t work without the orphan check also present before it. + +The purpose of the overlap check is to ensure that there is only up to one impl that can apply to a method call at a given time. In order to accomplish this, the overlap check looks at all pairs of impls and tries to ensure that there is no “overlap” between the sets of types that both impls can apply to. It accomplishes this by attempting to take the “intersection” of the constraints of both impls and then ensuring that this intersection cannot possibly apply to any types. If this turns out to be provable, the types are truly disjoint. + +This is a simple application of the mathematical law: + +> If two sets *A* and *B* are disjoint, then *A* ∩ *B* = ∅ + +More concretely, let’s say you have the following two impls: ([example from RFC 1023](https://rust-lang.github.io/rfcs/1023-rebalancing-coherence.html#type-locality-and-negative-reasoning)) + +```rust,ignore +impl Clone for T { /* ... */ } +impl Clone for MyType { /* ... */ } +``` + +Then we’ll try to solve the following: + +```ignore +not { exists { T = MyType, T: Copy } } +``` + +One way to read this is to say “try to prove that there is no `MyType` for any `U` that implements the `Copy` trait”. The reason we’re trying to prove this is because if there is such an implementation, then the second impl would overlap with the first one. The first impl applies to any type that implements `Copy`. + +The issue is that there may very well not be any such impl at this current time. In that case, chalk will conclude that these two impls do not overlap. This is an issue because that is certainly an impl that could be added later, so this conclusion may be too strong. + +Why is that we’re only saying that this conclusion *may* be too strong? Well we’re using “may” because it depends on what we want to assume about different crates. The orphan rules make it so that upstream crates can add certain impls to themselves in a semver compatible way. In particular, upstream crates can add impls of upstream traits for their own upstream types without having to worry about breaking downstream code. That means that we can’t just assume that upstream type doesn’t implement an upstream trait. This particular assumption is too strong. + +On the other hand, the orphan rules permit the current crate to add certain impls as well. A property of the orphan rules is that the impls it allows are only allowed to be defined in a single crate. So that means that if the impls allowed by the orphan rules in the current crate don’t exist, it is perfectly safe to assume that they are not there. + +The conclusion from all of this is that it is perfectly safe to rule out impls that can be defined in the current crate, but we can’t do the same for impls in any other crate. That means that we need to come up with a way to model all possible impls in upstream, downstream and even sibling crates so we can make sure that our overlap check isn’t making assumptions that are too strong. + +**Clarification:** One caveat to all of this is that we can’t simply model “all possible impls” because then the current crate wouldn’t be able to add any impls at all for upstream traits. After all, it is *possible* for an upstream crate to add *any* impl for its upstream trait. A more precise version of what we’re looking for is to model impls that an upstream crate could add in a **compatible** way. These are impls that we may not be able to current see, but also cannot ignore since that would be too strong of an assumption. + +**We are specifically trying to avoid a situation where a semver compatible upgrade of a dependency breaks the current crate because the current crate was able to add an impl that only the dependency was meant to be able to add.** + +**Sibling Crates:** Furthermore, we can immediately rule out sibling crates because by definition they are unable to use each other’s types or traits. If two crates are unable to interact at all, they cannot possibly add a conflicting implementation in any **coherent** world. Proof: Suppose that a sibling crate could add an impl that would conflict with a conclusion drawn by the overlap check in the current crate. Then the sibling crate would have to be able to implement a trait that was available to the current crate for a type that was available for the current crate. Since the sibling crate by definition does not have access to the current crate’s types or traits, the conflicting type and trait must be upstream. By the orphan rules, the sibling crate cannot implement a trait for upstream types and traits. Thus, the conflicting implementation in the sibling crate is impossible and no such implementation can exist. + +**Downstream Crates:** Downstream crates come into play because all traits in upstream crates and in the current crate can potentially be implemented by downstream crates using the forms allowed by the orphan rules. In essence, we always need to assume that downstream crates will implement traits in all ways that compile. + +## Discussion: Modeling the Overlap Check + +[Aaron’s excellent blog post](https://aturon.github.io/blog/2017/04/24/negative-chalk/) talks about this exact problem from the point of view of negative reasoning. It also describes a potential solution which we will apply here to solve our problem. + +The **compatible modality** (`compat` in Aaron’s blog post) is necessary because we don’t always want to assume that all compatible impls exist. In particular, there are certain phases of compilation (e.g. trans) where the closed-world assumption is entirely necessary and sufficient. + +To start addressing the problem at hand, the question is: what implementations are crates other than the current crate allowed to add in a semver compatible way? + +Since we already ruled out sibling crates, this only leaves upstream crates and downstream crates. Upstream crates only have access to upstream types and traits. That means that the only impls they can add are impls for upstream types or blanket impls over type parameters. Downstream crates have access to all upstream traits and types in addition to all traits and types in the current crate. + +**Claim:** No impl containing generic types can be added in a semver compatible way. +**Proof:** If the impl contains only generic types, it is considered a blanket impl and it may already be that a downstream trait implements that trait. So by adding a blanket impl, it now conflicts with the potential downstream implementation and is thus a breaking change. If the impl contains a generic type and also some number of upstream types, then a downstream crate may still have implemented that trait for all of the same values of the type parameters but with the generic types filled with downstream types. Thus, adding such an impl would also be a breaking change that would conflict with that potential downstream impl. + +The only situation where an impl containing generic types can be added in a way that is **not** a breaking change is if **in addition to the impl**, a new type is also added to the upstream crate. In that case, downstream crates would not have had an opportunity to implement that trait for those types just yet. All of that being said, from the perspective of the current crate looking at potential upstream impls, this case does not matter at all because the current crate can never query for a type that doesn’t exist yet. That means that this situation doesn’t actually impact the potential impls that we need to account for even though it is a valid example of a situation where a new blanket impl is possible. + +Thus, for all intents and purposes, impls containing generic type parameters cannot be added in semver compatible ways. This only leaves a single option: impls containing only upstream types. These are compatible because by the orphan rules, the current crate and any further downstream crates is not allowed to implement upstream traits for all upstream types. Thus, adding these impls cannot possibly break anything. + +This significantly narrows down our set of potential impls that we need to account for to only impls of upstream traits for upstream types. + +For downstream crates, we need to add rules for all possible impls that they could potentially add using any upstream traits or traits in the current crate. We can do this by enumerating the possibilities generated from the orphan rules specified above: + +```ignore +// Given a trait MyTrait where WCs + +forall { + Implemented(Self: MyTrait) :- + WCs, // where clauses + Compatible, + DownstreamType(Self), // local to a downstream crate + CannotProve, +} +forall { + Implemented(Self: MyTrait) :- + WCs, + Compatible, + IsFullyVisible(Self), + DownstreamType(P1), + CannotProve, +} +... +forall { + Implemented(Self: MyTrait) :- + WCs, + Compatible, + IsFullyVisible(Self), + IsFullyVisible(P1), + ..., + IsFullyVisible(Pn-1), + DownstreamType(Pn), + CannotProve, +} +``` + +Perhaps somewhat surprisingly, `IsFullyVisible` works here too. This is because our previous definition of the lowering for `IsFullyVisible` was quite broad. By lowering *all* types in the current crate and in upstream crates with `IsFullyVisible`, that predicate covers the correct set of types here too. The orphan rules only require that there are no types parameters prior to the first local type. Types that are not type parameters and also by definition not downstream types are all of the types in the current crate and in upstream crates. This is exactly what `IsFullyVisible` covers. + +Fundamental types in both the current crate and in upstream crates can be considered local in a downstream crate if they are provided with a downstream type. To model this, we can add an additional rule for fundamental types: + +```ignore +forall { DownstreamType(MyFundamentalType) :- DownstreamType(T) } +``` + +**Where clauses:** Traits can have where clauses. + +```rust,ignore +#[upstream] trait Foo where Self: Eq { /* ... */ } +``` + +**The question is**: do we need to bring these where clauses down into the rule that we generate for the overlap check? +**Answer:** Yes. Since the trait can only be implemented for types that satisfy its where clauses, it makes sense to also limit our assumption of compatible impls to impls that can exist. + +**Associated types:** Traits can have associated types. We do not need to worry about them in our discussion because associated types are output types and trait matching is done on input types. This is also why the orphan rules do not mention associated types at all. + +## Overlap Check in Chalk + +Thus, based on the discussion above, the overlap check with coherence in mind can be modeled in chalk with the following: + + +- All disjoint queries take place inside of `compatible` + +- `compatible { G }` desugars into `forall { (Compatible, DownstreamType(T)) => G }`, thus introducing a `Compatible` predicate using implication + +- For each upstream trait `MyTrait`, we lower it into the following rule: + + ```ignore + forall { + Implemented(Self: MyTrait) :- + Compatible, + IsUpstream(Self), + IsUpstream(P1), + ..., + IsUpstream(Pn), + CannotProve + } + ``` + + This will accomplish our goal of returning an ambiguous answer whenever the +overlap check query asks about any impls that an upstream crate may add in a +compatible way. We determined in the discussion above that these are the only +impls in any crate that can be added compatibly. + + **Note:** Trait `where` clauses are lowered into the rule’s conditions as well as a prerequisite to everything else. + +- For all traits `MyTrait where WCs` in the current crate and in upstream traits, + ```ignore + forall { + Implemented(Self: MyTrait) :- + WCs, // where clauses + Compatible, + DownstreamType(Self), // local to a downstream crate + CannotProve, + } + forall { + Implemented(Self: MyTrait) :- + WCs, + Compatible, + IsFullyVisible(Self), + DownstreamType(P1), + CannotProve, + } + ... + forall { + Implemented(Self: MyTrait) :- + WCs, + Compatible, + IsFullyVisible(Self), + IsFullyVisible(P1), + ..., + IsFullyVisible(Pn-1), + DownstreamType(Pn), + CannotProve, + } + ``` + +- For fundamental types in both the current crate and in upstream crates, + ```ignore + forall { DownstreamType(MyFundamentalType) :- DownstreamType(T) } + ``` + +## Alternative Designs + +Initially, when Niko and I started working on this, Niko suggested the following implementation: + +> For each upstream trait, `MyTrait`, we lower it into the following rule: +> ```ignore +> forall { +> Implemented(Self: MyTrait) :- +> Compatible, +> not { LocalImplAllowed(Self: MyTrait) }, +> CannotProve +> } +> ``` + +This appears to make sense because we need to assume that any impls that the current crate cannot add itself may exist somewhere else. By using `not { LocalImplAllowed(…) }`, we modeled exactly that. The problem is, that this assumption is actually too strong. What we actually need to model is that any **compatible** impls that the current crate cannot add itself may exist somewhere else. This is a **subset** of the impls covered by `not { LocalImplAllowed(…) }`. + +Notes to be added somewhere: + +- For impls that are definable in the current crate, we assume that the only ones that exist are the ones that are actually present. If the current crate does not define an impl that it could define, for our purposes, that impl does not exist. This is in contrast to how we treat upstream impls. For those, we assume that impls *may* exist even if we don’t *know* that they do. +- Struct/Trait privacy (e.g. `pub`) does not matter. For better or for worse, we always assume that everything is public or is going to be public someday, so we do not consider privacy at all. +- Fundamental traits - tend to be traits that you generally wouldn't implement yourself. The compiler is the one generating implementations for those traits, so it was decided that it was okay to definitively conclude whether or not an impl exists for them + diff --git a/book/src/clauses/goals_and_clauses.md b/book/src/clauses/goals_and_clauses.md new file mode 100644 index 00000000000..09b619fc1c5 --- /dev/null +++ b/book/src/clauses/goals_and_clauses.md @@ -0,0 +1,263 @@ +# Goals and clauses + +In logic programming terms, a **goal** is something that you must +prove and a **clause** is something that you know is true. As +described in the [lowering to logic](../clauses.html) +chapter, Rust's trait solver is based on an extension of hereditary +harrop (HH) clauses, which extend traditional Prolog Horn clauses with +a few new superpowers. + +## Goals and clauses meta structure + +In Rust's solver, **goals** and **clauses** have the following forms +(note that the two definitions reference one another): + +```text +Goal = DomainGoal // defined in the section below + | Goal && Goal + | Goal || Goal + | exists { Goal } // existential quantification + | forall { Goal } // universal quantification + | if (Clause) { Goal } // implication + | true // something that's trivially true + | ambiguous // something that's never provable + +Clause = DomainGoal + | Clause :- Goal // if can prove Goal, then Clause is true + | Clause && Clause + | forall { Clause } + +K = // a "kind" + | +``` + +The proof procedure for these sorts of goals is actually quite +straightforward. Essentially, it's a form of depth-first search. The +paper +["A Proof Procedure for the Logic of Hereditary Harrop Formulas"][pphhf] +gives the details. + +In terms of code, these types are defined in +[`compiler/rustc_middle/src/traits/mod.rs`][traits_mod] in rustc, and in +[`chalk-ir/src/lib.rs`][chalk_ir] in chalk. + +[pphhf]: ../bibliography.html#pphhf +[traits_mod]: https://github.com/rust-lang/rust/blob/master/compiler/rustc_middle/src/traits/mod.rs +[chalk_ir]: https://github.com/rust-lang/chalk/blob/master/chalk-ir/src/lib.rs + + + +## Domain goals + +*Domain goals* are the atoms of the trait logic. As can be seen in the +definitions given above, general goals basically consist in a combination of +domain goals. + +Moreover, flattening a bit the definition of clauses given previously, one can +see that clauses are always of the form: +```text +forall { DomainGoal :- Goal } +``` +hence domain goals are in fact clauses' LHS. That is, at the most granular level, +domain goals are what the trait solver will end up trying to prove. + + + +To define the set of domain goals in our system, we need to first +introduce a few simple formulations. A **trait reference** consists of +the name of a trait along with a suitable set of inputs P0..Pn: + +```text +TraitRef = P0: TraitName +``` + +So, for example, `u32: Display` is a trait reference, as is `Vec: +IntoIterator`. Note that Rust surface syntax also permits some extra +things, like associated type bindings (`Vec: IntoIterator`), that are not part of a trait reference. + + + +A **projection** consists of an associated item reference along with +its inputs P0..Pm: + +```text +Projection = >::AssocItem +``` + +Given these, we can define a `DomainGoal` as follows: + +```text +DomainGoal = Holds(WhereClause) + | FromEnv(TraitRef) + | FromEnv(Type) + | WellFormed(TraitRef) + | WellFormed(Type) + | Normalize(Projection -> Type) + +WhereClause = Implemented(TraitRef) + | AliasEq(Projection = Type) + | Outlives(Type: Region) + | Outlives(Region: Region) +``` + +`WhereClause` refers to a `where` clause that a Rust user would actually be able +to write in a Rust program. This abstraction exists only as a convenience as we +sometimes want to only deal with domain goals that are effectively writable in +Rust. + +Let's break down each one of these, one-by-one. + +#### Implemented(TraitRef) +e.g. `Implemented(i32: Copy)` + +True if the given trait is implemented for the given input types and lifetimes. + +#### AliasEq(Projection = Type) +e.g. `AliasEq::Item = u8` + +The given associated type `Projection` is equal to `Type`; this can be proved +with either normalization or using placeholder associated types and is handled +as a special kind of type aliases. See +[the section on associated types](./type_equality.html). + +#### Normalize(Projection -> Type) +e.g. `Normalize::Item -> u8` + +The given associated type `Projection` can be [normalized][n] to `Type`. + +As discussed in [the section on associated +types](./type_equality.html), `Normalize` implies `AliasEq`, +but not vice versa. In general, proving `Normalize(::Item -> U)` +also requires proving `Implemented(T: Trait)`. + +[n]: ./type_equality.html#normalize + +#### FromEnv(TraitRef) +e.g. `FromEnv(Self: Add)` + +True if the inner `TraitRef` is *assumed* to be true, +that is, if it can be derived from the in-scope where clauses. + +For example, given the following function: + +```rust +fn loud_clone(stuff: &T) -> T { + println!("cloning!"); + stuff.clone() +} +``` + +Inside the body of our function, we would have `FromEnv(T: Clone)`. In-scope +where clauses nest, so a function body inside an impl body inherits the +impl body's where clauses, too. + +This and the next rule are used to implement [implied bounds]. As we'll see +in the section on lowering, `FromEnv(TraitRef)` implies `Implemented(TraitRef)`, +but not vice versa. This distinction is crucial to implied bounds. + +#### FromEnv(Type) +e.g. `FromEnv(HashSet)` + +True if the inner `Type` is *assumed* to be well-formed, that is, if it is an +input type of a function or an impl. + +For example, given the following code: + +```rust,ignore +struct HashSet where K: Hash { ... } + +fn loud_insert(set: &mut HashSet, item: K) { + println!("inserting!"); + set.insert(item); +} +``` + +`HashSet` is an input type of the `loud_insert` function. Hence, we assume it +to be well-formed, so we would have `FromEnv(HashSet)` inside the body of our +function. As we'll see in the section on lowering, `FromEnv(HashSet)` implies +`Implemented(K: Hash)` because the +`HashSet` declaration was written with a `K: Hash` where clause. Hence, we don't +need to repeat that bound on the `loud_insert` function: we rather automatically +assume that it is true. + +#### WellFormed(Item) +These goals imply that the given item is *well-formed*. + +We can talk about different types of items being well-formed: + +* *Types*, like `WellFormed(Vec)`, which is true in Rust, or + `WellFormed(Vec)`, which is not (because `str` is not `Sized`.) + +* *TraitRefs*, like `WellFormed(Vec: Clone)`. + +Well-formedness is important to [implied bounds]. In particular, the reason +it is okay to assume `FromEnv(T: Clone)` in the `loud_clone` example is that we +_also_ verify `WellFormed(T: Clone)` for each call site of `loud_clone`. +Similarly, it is okay to assume `FromEnv(HashSet)` in the `loud_insert` +example because we will verify `WellFormed(HashSet)` for each call site of +`loud_insert`. + +#### Outlives(Type: Region), Outlives(Region: Region) +e.g. `Outlives(&'a str: 'b)`, `Outlives('a: 'static)` + +True if the given type or region on the left outlives the right-hand region. + + + +## Coinductive goals + +Most goals in our system are "inductive". In an inductive goal, +circular reasoning is disallowed. Consider this example clause: + +```text + Implemented(Foo: Bar) :- + Implemented(Foo: Bar). +``` + +Considered inductively, this clause is useless: if we are trying to +prove `Implemented(Foo: Bar)`, we would then recursively have to prove +`Implemented(Foo: Bar)`, and that cycle would continue ad infinitum +(the trait solver will terminate here, it would just consider that +`Implemented(Foo: Bar)` is not known to be true). + +However, some goals are *co-inductive*. Simply put, this means that +cycles are OK. So, if `Bar` were a co-inductive trait, then the rule +above would be perfectly valid, and it would indicate that +`Implemented(Foo: Bar)` is true. + +*Auto traits* are one example in Rust where co-inductive goals are used. +Consider the `Send` trait, and imagine that we have this struct: + +```rust +struct Foo { + next: Option> +} +``` + +The default rules for auto traits say that `Foo` is `Send` if the +types of its fields are `Send`. Therefore, we would have a rule like + +```text +Implemented(Foo: Send) :- + Implemented(Option>: Send). +``` + +As you can probably imagine, proving that `Option>: Send` is +going to wind up circularly requiring us to prove that `Foo: Send` +again. So this would be an example where we wind up in a cycle – but +that's ok, we *do* consider `Foo: Send` to hold, even though it +references itself. + +In general, co-inductive traits are used in Rust trait solving when we +want to enumerate a fixed set of possibilities. In the case of auto +traits, we are enumerating the set of reachable types from a given +starting point (i.e., `Foo` can reach values of type +`Option>`, which implies it can reach values of type +`Box`, and then of type `Foo`, and then the cycle is complete). + +In addition to auto traits, `WellFormed` predicates are co-inductive. +These are used to achieve a similar "enumerate all the cases" pattern, +as described in the section on [implied bounds]. + +[implied bounds]: ./lowering_rules.html#implied-bounds diff --git a/book/src/clauses/implied_bounds.md b/book/src/clauses/implied_bounds.md new file mode 100644 index 00000000000..ac9d33b5d99 --- /dev/null +++ b/book/src/clauses/implied_bounds.md @@ -0,0 +1,499 @@ +# Implied Bounds + +Implied bounds remove the need to repeat where clauses written on +a type declaration or a trait declaration. For example, say we have the +following type declaration: +```rust,ignore +struct HashSet { + ... +} +``` + +then everywhere we use `HashSet` as an "input" type, that is appearing in +the receiver type of an `impl` or in the arguments of a function, we don't +want to have to repeat the `where K: Hash` bound, as in: + +```rust,ignore +// I don't want to have to repeat `where K: Hash` here. +impl HashSet { + ... +} + +// Same here. +fn loud_insert(set: &mut HashSet, item: K) { + println!("inserting!"); + set.insert(item); +} +``` + +Note that in the `loud_insert` example, `HashSet` is not the type +of the `set` argument of `loud_insert`, it only *appears* in the +argument type `&mut HashSet`: we care about every type appearing +in the function's header (the header is the signature without the return type), +not only types of the function's arguments. + +The rationale for applying implied bounds to input types is that, for example, +in order to call the `loud_insert` function above, the programmer must have +*produced* the type `HashSet` already, hence the compiler already verified +that `HashSet` was well-formed, i.e. that `K` effectively implemented +`Hash`, as in the following example: + +```rust,ignore +fn main() { + // I am producing a value of type `HashSet`. + // If `i32` was not `Hash`, the compiler would report an error here. + let set: HashSet = HashSet::new(); + loud_insert(&mut set, 5); +} +``` + +Hence, we don't want to repeat where clauses for input types because that would +sort of duplicate the work of the programmer, having to verify that their types +are well-formed both when calling the function and when using them in the +arguments of their function. The same reasoning applies when using an `impl`. + +Similarly, given the following trait declaration: +```rust,ignore +trait Copy where Self: Clone { // desugared version of `Copy: Clone` + ... +} +``` + +then everywhere we bound over `SomeType: Copy`, we would like to be able to +use the fact that `SomeType: Clone` without having to write it explicitly, +as in: +```rust,ignore +fn loud_clone(x: T) { + println!("cloning!"); + x.clone(); +} + +fn fun_with_copy(x: T) { + println!("will clone a `Copy` type soon..."); + + // I'm using `loud_clone` with `T: Copy`, I know this + // implies `T: Clone` so I don't want to have to write it explicitly. + loud_clone(x); +} +``` + +The rationale for implied bounds for traits is that if a type implements +`Copy`, that is, if there exists an `impl Copy` for that type, there *ought* +to exist an `impl Clone` for that type, otherwise the compiler would have +reported an error in the first place. So again, if we were forced to repeat the +additional `where SomeType: Clone` everywhere whereas we already know that +`SomeType: Copy` hold, we would kind of duplicate the verification work. + +Implied bounds are not yet completely enforced in rustc, at the moment it only +works for outlive requirements, super trait bounds, and bounds on associated +types. The full RFC can be found [here][RFC]. We'll give here a brief view +of how implied bounds work and why we chose to implement it that way. The +complete set of lowering rules can be found in the corresponding +[chapter](./lowering_rules.md). + +[RFC]: https://github.com/rust-lang/rfcs/blob/master/text/2089-implied-bounds.md + +## Implied bounds and lowering rules + +Now we need to express implied bounds in terms of logical rules. We will start +with exposing a naive way to do it. Suppose that we have the following traits: +```rust,ignore +trait Foo { + ... +} + +trait Bar where Self: Foo { + ... +} +``` + +So we would like to say that if a type implements `Bar`, then necessarily +it must also implement `Foo`. We might think that a clause like this would +work: +```text +forall { + Implemented(Type: Foo) :- Implemented(Type: Bar). +} +``` + +Now suppose that we just write this impl: +```rust,ignore +struct X; + +impl Bar for X { } +``` + +Clearly this should not be allowed: indeed, we wrote a `Bar` impl for `X`, but +the `Bar` trait requires that we also implement `Foo` for `X`, which we never +did. In terms of what the compiler does, this would look like this: +```rust,ignore +struct X; + +impl Bar for X { + // We are in a `Bar` impl for the type `X`. + // There is a `where Self: Foo` bound on the `Bar` trait declaration. + // Hence I need to prove that `X` also implements `Foo` for that impl + // to be legal. +} +``` +So the compiler would try to prove `Implemented(X: Foo)`. Of course it will +not find any `impl Foo for X` since we did not write any. However, it +will see our implied bound clause: +```text +forall { + Implemented(Type: Foo) :- Implemented(Type: Bar). +} +``` + +so that it may be able to prove `Implemented(X: Foo)` if `Implemented(X: Bar)` +holds. And it turns out that `Implemented(X: Bar)` does hold since we wrote +a `Bar` impl for `X`! Hence the compiler will accept the `Bar` impl while it +should not. + +## Implied bounds coming from the environment + +So the naive approach does not work. What we need to do is to somehow decouple +implied bounds from impls. Suppose we know that a type `SomeType<...>` +implements `Bar` and we want to deduce that `SomeType<...>` must also implement +`Foo`. + +There are two possibilities: first, we have enough information about +`SomeType<...>` to see that there exists a `Bar` impl in the program which +covers `SomeType<...>`, for example a plain `impl<...> Bar for SomeType<...>`. +Then if the compiler has done its job correctly, there *must* exist a `Foo` +impl which covers `SomeType<...>`, e.g. another plain +`impl<...> Foo for SomeType<...>`. In that case then, we can just use this +impl and we do not need implied bounds at all. + +Second possibility: we do not know enough about `SomeType<...>` in order to +find a `Bar` impl which covers it, for example if `SomeType<...>` is just +a type parameter in a function: +```rust,ignore +fn foo() { + // We'd like to deduce `Implemented(T: Foo)`. +} +``` + +That is, the information that `T` implements `Bar` here comes from the +*environment*. The environment is the set of things that we assume to be true +when we type check some Rust declaration. In that case, what we assume is that +`T: Bar`. Then at that point, we might authorize ourselves to have some kind +of "local" implied bound reasoning which would say +`Implemented(T: Foo) :- Implemented(T: Bar)`. This reasoning would +only be done within our `foo` function in order to avoid the earlier +problem where we had a global clause. + +We can apply this local reasoning everywhere we can have an environment +-- i.e. when we can write where clauses -- that is, inside impls, +trait declarations, and type declarations. + +## Computing implied bounds with `FromEnv` + +The previous subsection showed that it was only useful to compute implied +bounds for facts coming from the environment. +We talked about "local" rules, but there are multiple possible strategies to +indeed implement the locality of implied bounds. + +In rustc, the current strategy is to *elaborate* bounds: that is, each time +we have a fact in the environment, we recursively derive all the other things +that are implied by this fact until we reach a fixed point. For example, if +we have the following declarations: +```rust,ignore +trait A { } +trait B where Self: A { } +trait C where Self: B { } + +fn foo() { + ... +} +``` +then inside the `foo` function, we start with an environment containing only +`Implemented(T: C)`. Then because of implied bounds for the `C` trait, we +elaborate `Implemented(T: B)` and add it to our environment. Because of +implied bounds for the `B` trait, we elaborate `Implemented(T: A)`and add it +to our environment as well. We cannot elaborate anything else, so we conclude +that our final environment consists of `Implemented(T: A + B + C)`. + +In the new-style trait system, we like to encode as many things as possible +with logical rules. So rather than "elaborating", we have a set of *global* +program clauses defined like so: +```text +forall { Implemented(T: A) :- FromEnv(T: A). } + +forall { Implemented(T: B) :- FromEnv(T: B). } +forall { FromEnv(T: A) :- FromEnv(T: B). } + +forall { Implemented(T: C) :- FromEnv(T: C). } +forall { FromEnv(T: B) :- FromEnv(T: C). } +``` +So these clauses are defined globally (that is, they are available from +everywhere in the program) but they cannot be used because the hypothesis +is always of the form `FromEnv(...)` which is a bit special. Indeed, as +indicated by the name, `FromEnv(...)` facts can **only** come from the +environment. +How it works is that in the `foo` function, instead of having an environment +containing `Implemented(T: C)`, we replace this environment with +`FromEnv(T: C)`. From here and thanks to the above clauses, we see that we +are able to reach any of `Implemented(T: A)`, `Implemented(T: B)` or +`Implemented(T: C)`, which is what we wanted. + +## Implied bounds and well-formedness checking + +Implied bounds are tightly related with well-formedness checking. +Well-formedness checking is the process of checking that the impls the +programmer wrote are legal, what we referred to earlier as "the compiler doing +its job correctly". + +We already saw examples of illegal and legal impls: +```rust,ignore +trait Foo { } +trait Bar where Self: Foo { } + +struct X; +struct Y; + +impl Bar for X { + // This impl is not legal: the `Bar` trait requires that we also + // implement `Foo`, and we didn't. +} + +impl Foo for Y { + // This impl is legal: there is nothing to check as there are no where + // clauses on the `Foo` trait. +} + +impl Bar for Y { + // This impl is legal: we have a `Foo` impl for `Y`. +} +``` +We must define what "legal" and "illegal" mean. For this, we introduce another +predicate: `WellFormed(Type: Trait)`. We say that the trait reference +`Type: Trait` is well-formed if `Type` meets the bounds written on the +`Trait` declaration. For each impl we write, assuming that the where clauses +declared on the impl hold, the compiler tries to prove that the corresponding +trait reference is well-formed. The impl is legal if the compiler manages to do +so. + +Coming to the definition of `WellFormed(Type: Trait)`, it would be tempting +to define it as: +```rust,ignore +trait Trait where WC1, WC2, ..., WCn { + ... +} +``` + +```text +forall { + WellFormed(Type: Trait) :- WC1 && WC2 && .. && WCn. +} +``` +and indeed this was basically what was done in rustc until it was noticed that +this mixed badly with implied bounds. The key thing is that implied bounds +allows someone to derive all bounds implied by a fact in the environment, and +this *transitively* as we've seen with the `A + B + C` traits example. +However, the `WellFormed` predicate as defined above only checks that the +*direct* superbounds hold. That is, if we come back to our `A + B + C` +example: +```rust,ignore +trait A { } +// No where clauses, always well-formed. +// forall { WellFormed(Type: A). } + +trait B where Self: A { } +// We only check the direct superbound `Self: A`. +// forall { WellFormed(Type: B) :- Implemented(Type: A). } + +trait C where Self: B { } +// We only check the direct superbound `Self: B`. We do not check +// the `Self: A` implied bound coming from the `Self: B` superbound. +// forall { WellFormed(Type: C) :- Implemented(Type: B). } +``` +There is an asymmetry between the recursive power of implied bounds and +the shallow checking of `WellFormed`. It turns out that this asymmetry +can be [exploited][bug]. Indeed, suppose that we define the following +traits: +```rust,ignore +trait Partial where Self: Copy { } +// WellFormed(Self: Partial) :- Implemented(Self: Copy). + +trait Complete where Self: Partial { } +// WellFormed(Self: Complete) :- Implemented(Self: Partial). + +impl Partial for T where T: Complete { } + +impl Complete for T { } +``` + +For the `Partial` impl, what the compiler must prove is: +```text +forall { + if (T: Complete) { // assume that the where clauses hold + WellFormed(T: Partial) // show that the trait reference is well-formed + } +} +``` +Proving `WellFormed(T: Partial)` amounts to proving `Implemented(T: Copy)`. +However, we have `Implemented(T: Complete)` in our environment: thanks to +implied bounds, we can deduce `Implemented(T: Partial)`. Using implied bounds +one level deeper, we can deduce `Implemented(T: Copy)`. Finally, the `Partial` +impl is legal. + +For the `Complete` impl, what the compiler must prove is: +```text +forall { + WellFormed(T: Complete) // show that the trait reference is well-formed +} +``` +Proving `WellFormed(T: Complete)` amounts to proving `Implemented(T: Partial)`. +We see that the `impl Partial for T` applies if we can prove +`Implemented(T: Complete)`, and it turns out we can prove this fact since our +`impl Complete for T` is a blanket impl without any where clauses. + +So both impls are legal and the compiler accepts the program. Moreover, thanks +to the `Complete` blanket impl, all types implement `Complete`. So we could +now use this impl like so: +```rust,ignore +fn eat(x: T) { } + +fn copy_everything(x: T) { + eat(x); + eat(x); +} + +fn main() { + let not_copiable = vec![1, 2, 3, 4]; + copy_everything(not_copiable); +} +``` +In this program, we use the fact that `Vec` implements `Complete`, as any +other type. Hence we can call `copy_everything` with an argument of type +`Vec`. Inside the `copy_everything` function, we have the +`Implemented(T: Complete)` bound in our environment. Thanks to implied bounds, +we can deduce `Implemented(T: Partial)`. Using implied bounds again, we deduce +`Implemented(T: Copy)` and we can indeed call the `eat` function which moves +the argument twice since its argument is `Copy`. Problem: the `T` type was +in fact `Vec` which is not copy at all, hence we will double-free the +underlying vec storage so we have a memory unsoundness in safe Rust. + +Of course, disregarding the asymmetry between `WellFormed` and implied bounds, +this bug was possible only because we had some kind of self-referencing impls. +But self-referencing impls are very useful in practice and are not the real +culprits in this affair. + +[bug]: https://github.com/rust-lang/rust/pull/43786 + +## Co-inductiveness of `WellFormed` + +So the solution is to fix this asymmetry between `WellFormed` and implied +bounds. For that, we need for the `WellFormed` predicate to not only require +that the direct superbounds hold, but also all the bounds transitively implied +by the superbounds. What we can do is to have the following rules for the +`WellFormed` predicate: +```rust,ignore +trait A { } +// WellFormed(Self: A) :- Implemented(Self: A). + +trait B where Self: A { } +// WellFormed(Self: B) :- Implemented(Self: B) && WellFormed(Self: A). + +trait C where Self: B { } +// WellFormed(Self: C) :- Implemented(Self: C) && WellFormed(Self: B). +``` + +Notice that we are now also requiring `Implemented(Self: Trait)` for +`WellFormed(Self: Trait)` to be true: this is to simplify the process of +traversing all the implied bounds transitively. This does not change anything +when checking whether impls are legal, because since we assume +that the where clauses hold inside the impl, we know that the corresponding +trait reference does hold. Thanks to this setup, you can see that we indeed +require to prove the set of all bounds transitively implied by the where +clauses. + +However there is still a catch. Suppose that we have the following trait +definition: +```rust,ignore +trait Foo where ::Item: Foo { + type Item; +} +``` + +so this definition is a bit more involved than the ones we've seen already +because it defines an associated item. However, the well-formedness rule +would not be more complicated: +```text +WellFormed(Self: Foo) :- + Implemented(Self: Foo) && + WellFormed(::Item: Foo). +``` + +Now we would like to write the following impl: +```rust,ignore +impl Foo for i32 { + type Item = i32; +} +``` +The `Foo` trait definition and the `impl Foo for i32` are perfectly valid +Rust: we're kind of recursively using our `Foo` impl in order to show that +the associated value indeed implements `Foo`, but that's ok. But if we +translate this to our well-formedness setting, the compiler proof process +inside the `Foo` impl is the following: it starts with proving that the +well-formedness goal `WellFormed(i32: Foo)` is true. In order to do that, +it must prove the following goals: `Implemented(i32: Foo)` and +`WellFormed(::Item: Foo)`. `Implemented(i32: Foo)` holds because +there is our impl and there are no where clauses on it so it's always true. +However, because of the associated type value we used, +`WellFormed(::Item: Foo)` simplifies to just +`WellFormed(i32: Foo)`. So in order to prove its original goal +`WellFormed(i32: Foo)`, the compiler needs to prove `WellFormed(i32: Foo)`: +this clearly is a cycle and cycles are usually rejected by the trait solver, +unless... if the `WellFormed` predicate was made to be co-inductive. + +A co-inductive predicate, as discussed in the chapter on +[goals and clauses](./goals_and_clauses.html#coinductive-goals), are predicates +for which the +trait solver accepts cycles. In our setting, this would be a valid thing to do: +indeed, the `WellFormed` predicate just serves as a way of enumerating all +the implied bounds. Hence, it's like a fixed point algorithm: it tries to grow +the set of implied bounds until there is nothing more to add. Here, a cycle +in the chain of `WellFormed` predicates just means that there is no more bounds +to add in that direction, so we can just accept this cycle and focus on other +directions. It's easy to prove that under these co-inductive semantics, we +are effectively visiting all the transitive implied bounds, and only these. + +## Implied bounds on types + +We mainly talked about implied bounds for traits, but implied bounds on types +are very similar. Suppose we have the following definition: + +```rust,ignore +struct Type<...> where WC1, ..., WCn { + ... +} +``` + +To prove that `Type<...>` is well-formed, we would need to prove a goal of the +form `WellFormed(Type<...>).`. The `WellFormed(Type<...>)` predicate is defined +by the rule: + +```text +forall<...> { + WellFormed(Type<...>) :- WellFormed(WC1), ..., WellFormed(WCn). +} +``` + +Conversely, if we know a type is well-formed from our environment (for example +because it appears as an argument of one of our functions), we can have implied +bounds thanks to the below set of rules: + +```text +forall<...> { + FromEnv(WC1) :- FromEnv(Type<...>). + ... + FromEnv(WCn) :- FromEnv(Type<...>). +} +``` + +Looking at the above rules, we see that we can never encounter a chain of +deductions of the form `WellFormed(Type<...>) :- ... :- WellFormed(Type<...>)`. +So in contrast with traits, the `WellFormed(Type<...>)` predicate does not need +to be co-inductive. diff --git a/book/src/clauses/lowering_rules.md b/book/src/clauses/lowering_rules.md new file mode 100644 index 00000000000..fa15a56c063 --- /dev/null +++ b/book/src/clauses/lowering_rules.md @@ -0,0 +1,416 @@ +# Lowering rules + +This section gives the complete lowering rules for Rust traits into +[program clauses][pc]. It is a kind of reference. These rules +reference the [domain goals][dg] defined in an earlier section. + +[pc]: ./goals_and_clauses.html +[dg]: ./goals_and_clauses.html#domain-goals + +## Notation + +The nonterminal `Pi` is used to mean some generic *parameter*, either a +named lifetime like `'a` or a type parameter like `A`. + +The nonterminal `Ai` is used to mean some generic *argument*, which +might be a lifetime like `'a` or a type like `Vec`. + +When defining the lowering rules, we will give goals and clauses in +the [notation given in this section](./goals_and_clauses.html). +We sometimes insert "macros" like `LowerWhereClause!` into these +definitions; these macros reference other sections within this chapter. + +## Rule names and cross-references + +Each of these lowering rules is given a name, documented with a +comment like so: + + // Rule Foo-Bar-Baz + +The reference implementation of these rules is to be found in +[`chalk/chalk-solve/src/clauses.rs`][chalk_rules]. They are also ported in +rustc in the [`rustc_traits`][rustc_traits] crate. + +[chalk_rules]: https://github.com/rust-lang/chalk/blob/master/chalk-solve/src/clauses.rs +[rustc_traits]: https://github.com/rust-lang/rust/tree/master/compiler/rustc_traits + +## Lowering where clauses + +When used in a goal position, where clauses can be mapped directly to +the `Holds` variant of [domain goals][dg], as follows: + +- `A0: Foo` maps to `Implemented(A0: Foo)` +- `T: 'r` maps to `Outlives(T, 'r)` +- `'a: 'b` maps to `Outlives('a, 'b)` +- `A0: Foo` is a bit special and expands to two distinct + goals, namely `Implemented(A0: Foo)` and + `AliasEq(>::Item = T)` + +In the rules below, we will use `WC` to indicate where clauses that +appear in Rust syntax; we will then use the same `WC` to indicate +where those where clauses appear as goals in the program clauses that +we are producing. In that case, the mapping above is used to convert +from the Rust syntax into goals. + +### Transforming the lowered where clauses + +In addition, in the rules below, we sometimes do some transformations +on the lowered where clauses, as defined here: + +- `FromEnv(WC)` – this indicates that: + - `Implemented(TraitRef)` becomes `FromEnv(TraitRef)` + - other where-clauses are left intact +- `WellFormed(WC)` – this indicates that: + - `Implemented(TraitRef)` becomes `WellFormed(TraitRef)` + - other where-clauses are left intact + +*TODO*: I suspect that we want to alter the outlives relations too, +but Chalk isn't modeling those right now. + +## Lowering traits + +Given a trait definition + +```rust,ignore +trait Trait // P0 == Self +where WC +{ + // trait items +} +``` + +we will produce a number of declarations. This section is focused on +the program clauses for the trait header (i.e., the stuff outside the +`{}`); the [section on trait items](#trait-items) covers the stuff +inside the `{}`. + +### Trait header + +From the trait itself we mostly make "meta" rules that setup the +relationships between different kinds of domain goals. The first such +rule from the trait header creates the mapping between the `FromEnv` +and `Implemented` predicates: + +```text +// Rule Implemented-From-Env +forall { + Implemented(Self: Trait) :- FromEnv(Self: Trait) +} +``` + + + +#### Implied bounds + +The next few clauses have to do with implied bounds (see also +[RFC 2089] and the [implied bounds][implied_bounds] chapter for a more in depth +cover). For each trait, we produce two clauses: + +[RFC 2089]: https://rust-lang.github.io/rfcs/2089-implied-bounds.html +[implied_bounds]: ./implied_bounds.md + +```text +// Rule Implied-Bound-From-Trait +// +// For each where clause WC: +forall { + FromEnv(WC) :- FromEnv(Self: Trait) +} +``` + +This clause says that if we are assuming that the trait holds, then we can also +assume that its where-clauses hold. It's perhaps useful to see an example: + +```rust,ignore +trait Eq: PartialEq { ... } +``` + +In this case, the `PartialEq` supertrait is equivalent to a `where +Self: PartialEq` where clause, in our simplified model. The program +clause above therefore states that if we can prove `FromEnv(T: Eq)` – +e.g., if we are in some function with `T: Eq` in its where clauses – +then we also know that `FromEnv(T: PartialEq)`. Thus the set of things +that follow from the environment are not only the **direct where +clauses** but also things that follow from them. + +The next rule is related; it defines what it means for a trait reference +to be **well-formed**: + +```text +// Rule WellFormed-TraitRef +forall { + WellFormed(Self: Trait) :- Implemented(Self: Trait) && WellFormed(WC) +} +``` + +This `WellFormed` rule states that `T: Trait` is well-formed if (a) +`T: Trait` is implemented and (b) all the where-clauses declared on +`Trait` are well-formed (and hence they are implemented). Remember +that the `WellFormed` predicate is +[coinductive](./goals_and_clauses.html#coinductive); in this +case, it is serving as a kind of "carrier" that allows us to enumerate +all the where clauses that are transitively implied by `T: Trait`. + +An example: + +```rust,ignore +trait Foo: A + Bar { } +trait Bar: B + Foo { } +trait A { } +trait B { } +``` + +Here, the transitive set of implications for `T: Foo` are `T: A`, `T: Bar`, and +`T: B`. And indeed if we were to try to prove `WellFormed(T: Foo)`, we would +have to prove each one of those: + +- `WellFormed(T: Foo)` + - `Implemented(T: Foo)` + - `WellFormed(T: A)` + - `Implemented(T: A)` + - `WellFormed(T: Bar)` + - `Implemented(T: Bar)` + - `WellFormed(T: B)` + - `Implemented(T: Bar)` + - `WellFormed(T: Foo)` -- cycle, true coinductively + +This `WellFormed` predicate is only used when proving that impls are +well-formed – basically, for each impl of some trait ref `TraitRef`, +we must show that `WellFormed(TraitRef)`. This in turn justifies the +implied bounds rules that allow us to extend the set of `FromEnv` +items. + +## Lowering type definitions + +We also want to have some rules which define when a type is well-formed. +For example, given this type: + +```rust,ignore +struct Set where K: Hash { ... } +``` + +then `Set` is well-formed because `i32` implements `Hash`, but +`Set` would not be well-formed. Basically, a type is well-formed +if its parameters verify the where clauses written on the type definition. + +Hence, for every type definition: + +```rust, ignore +struct Type where WC { ... } +``` + +we produce the following rule: + +```text +// Rule WellFormed-Type +forall { + WellFormed(Type) :- WellFormed(WC) +} +``` + +Note that we use `struct` to define a type, but this should be understood as a +general type definition (it could be e.g. a generic `enum`). + +Conversely, we define rules which say that if we assume that a type is +well-formed, we can also assume that its where clauses hold. That is, +we produce the following family of rules: + +```text +// Rule Implied-Bound-From-Type +// +// For each where clause `WC` +forall { + FromEnv(WC) :- FromEnv(Type) +} +``` + +As for the implied bounds RFC, functions will *assume* that their arguments +are well-formed. For example, suppose we have the following bit of code: + +```rust,ignore +trait Hash: Eq { } +struct Set { ... } + +fn foo(collection: Set, x: K, y: K) { + // `x` and `y` can be equalized even if we did not explicitly write + // `where K: Eq` + if x == y { + ... + } +} +``` + +In the `foo` function, we assume that `Set` is well-formed, i.e. we have +`FromEnv(Set)` in our environment. Because of the previous rule, we get + `FromEnv(K: Hash)` without needing an explicit where clause. And because +of the `Hash` trait definition, there also exists a rule which says: + +```text +forall { + FromEnv(K: Eq) :- FromEnv(K: Hash) +} +``` + +which means that we finally get `FromEnv(K: Eq)` and then can compare `x` +and `y` without needing an explicit where clause. + + + +## Lowering trait items + +### Associated type declarations + +Given a trait that declares a (possibly generic) associated type: + +```rust,ignore +trait Trait // P0 == Self +where WC +{ + type AssocType: Bounds where WC1; +} +``` + +We will produce a number of program clauses. The first two define +the rules by which `AliasEq` for associated type projections can succeed; these two clauses are discussed +in detail in the [section on associated types](./type_equality.html), +but reproduced here for reference: + +```text +// Rule AliasEq-Normalize +// +// AliasEq can succeed by normalizing: +forall { + AliasEq(>::AssocType = U) :- + Normalize(>::AssocType -> U) +} +``` + +```text +// Rule AliasEq-Placeholder +// +// AliasEq can succeed through the placeholder associated type, +// see "associated type" chapter for more: +forall { + AliasEq( + >::AssocType = + (Trait::AssocType) + ) +} +``` + +The next rule covers implied bounds for the projection. In particular, +the `Bounds` declared on the associated type must have been proven to hold +to show that the impl is well-formed, and hence we can rely on them +elsewhere. + +```text +// Rule Implied-Bound-From-AssocTy +// +// For each `Bound` in `Bounds`: +forall { + FromEnv(>::AssocType>: Bound) :- + FromEnv(Self: Trait) && WC1 +} +``` + +Next, we define the requirements for an instantiation of our associated +type to be well-formed... + +```text +// Rule WellFormed-AssocTy +forall { + WellFormed((Trait::AssocType)) :- + WellFormed(Self: Trait) && WellFormed(WC1) +} +``` + +...along with the reverse implications, when we can assume that it is +well-formed. + +```text +// Rule Implied-WC-From-AssocTy +// +// For each where clause WC1: +forall { + FromEnv(WC1) :- FromEnv((Trait::AssocType)) +} +``` + +```text +// Rule Implied-Trait-From-AssocTy +forall { + FromEnv(Self: Trait) :- + FromEnv((Trait::AssocType)) +} +``` + +### Lowering function and constant declarations + +Chalk didn't model functions and constants, but I would eventually like to +treat them exactly like normalization. See [the section on function/constant +values below](#constant-vals) for more details. + +## Lowering impls + +Given an impl of a trait: + +```rust,ignore +impl Trait for A0 +where WC +{ + // zero or more impl items +} +``` + +Let `TraitRef` be the trait reference `A0: Trait`. Then we +will create the following rules: + +```text +// Rule Implemented-From-Impl +forall { + Implemented(TraitRef) :- WC +} +``` + +In addition, we will lower all of the *impl items*. + +## Lowering impl items + +### Associated type values + +Given an impl that contains: + +```rust,ignore +impl Trait for P0 +where WC_impl +{ + type AssocType = T; +} +``` + +and our where clause `WC1` on the trait associated type from above, we +produce the following rule: + +```text +// Rule Normalize-From-Impl +forall { + forall { + Normalize(>::AssocType -> T) :- + Implemented(P0 as Trait) && WC1 + } +} +``` + +Note that `WC_impl` and `WC1` both encode where-clauses that the impl can +rely on. (`WC_impl` is not used here, because it is implied by +`Implemented(P0 as Trait)`.) + + + +### Function and constant values + +Chalk didn't model functions and constants, but I would eventually +like to treat them exactly like normalization. This presumably +involves adding a new kind of parameter (constant), and then having a +`NormalizeValue` domain goal. This is *to be written* because the +details are a bit up in the air. diff --git a/book/src/clauses/opaque_types.md b/book/src/clauses/opaque_types.md new file mode 100644 index 00000000000..39c5a8545db --- /dev/null +++ b/book/src/clauses/opaque_types.md @@ -0,0 +1,91 @@ +# Opaque types (impl Trait) + +This chapter describes how "opaque types" are modeled in chalk. Opaque types are +the underlying concept used to implement "existential `impl Trait`" in Rust. +They don't have a direct surface syntax, but uses of `impl Trait` in particular +source locations create a hidden opaque type: + +```rust,ignore +fn as_u32s<'a, T: Copy + Into>( + elements: &'a [T], +) -> impl Iterator + 'a { + elements.iter().cloned().map(|e| -> u32 { e.into() }) +} + +#fn main() { +# let x: &[u16] = &[1, 2, 3]; +# let y = as_u32s(&x); +# for e in y { +# println!("e = {}", e); +# } +#} +``` + +Conceptually, the function `as_u32s` is desugared to return a reference to an +*opaque type*, let's call it `AsU32sReturn` (note that this is not valid +Rust syntax): + +```rust,ignore +opaque type AsU32sReturn<'a, T>: IntoIterator + 'a +where + T: Copy + Into; + +fn as_u32s<'a, T: Copy + Into>( + elements: &'a [T], +) -> AsU32sReturn<'a, T> { + ... +} +``` + +Opaque types are a kind of type alias. They are called *opaque* because, unlike +an ordinary type alias, most Rust code (e.g., the callers of `as_u32s`) doesn't +know what type `AsU32sReturn` represents. It only knows what traits that type +implements (e.g., `IntoIterator`). The actual type that is inferred +for `AsU32sReturn` is called the "hidden type". + +## Chalk syntax for an opaque type declaration + +Although the above is not valid Rust syntax, it is quite close to the +format that chalk unit tests use, which looks something like this: + +```rust,ignore +opaque type OpaqueTypeName: /* bounds */ +where + /* where clauses */ += /* hidden type */; +``` + +A chalk opaque type declaration has several parts: + +* The **name** `OpaqueTypeName`, which is the name we use to refer to the opaque type + within the chalk file. In real Rust opaque types are not explicitly declared + and hence they are identified just by internal ids (i.e., they are anonymous + in the same way that a closure type is anonymous), so this is just for unit + testing. +* The **generic parameters** `P0..Pn`. In real Rust, these parameters are inherited + from the context in which the `impl Trait` appeared. In our example, these + parameters come from the surrounding function. Note that in real Rust the set + of generic parameters is a *subset* of those that appear on the surrounding + function: in particular, lifetime parameters may not appear unless they explicitly + appear in the opaque type's bounds. +* The **bounds**, which would be `IntoIterator + 'a` in our example. + These are traits that the *hidden type* (see below) is supposed to implement. + They come from the `impl IntoIterator + 'a` type. Even when the hidden + type is, well, hidden, we can assume that the bounds hold. +* The **where clauses**, which would be `T: Copy` and `T: Into` in our + example. These are conditions that must hold on `V0..Vn` for + `OpaqueTypeName` to be a valid type. + * Note that this contrasts with bounds: bounds are things that the hidden type must meet + but which the rest of the code can assume to be true. Where clauses are things + that the rest of the code must prove to be true in order to use the opaque type. + In our example, then, a type like `AsU32sReturn<'a, String>` would be invalid + because `String: Copy` does not hold. + +## Representing opaque types in chalk types + +We represent opaque types as a kind of **[type alias]**. Like any type alias, +we have to define the conditions in which they can be normalized: + +[type alias]: ../types/rust_types/alias.md + +## Placeholder rules diff --git a/book/src/clauses/type_equality.md b/book/src/clauses/type_equality.md new file mode 100644 index 00000000000..3057f0672b2 --- /dev/null +++ b/book/src/clauses/type_equality.md @@ -0,0 +1,165 @@ +# Type equality and unification + +This section covers how the trait system handles equality between +associated types. The full system consists of several moving parts, +which we will introduce one by one: + +- Projection and the `Normalize` predicate +- Placeholder associated type projections +- The `AliasEq` predicate +- Integration with unification + +## Associated type projection and normalization + +When a trait defines an associated type (e.g., +[the `Item` type in the `IntoIterator` trait][intoiter-item]), that +type can be referenced by the user using an **associated type +projection** like ` as IntoIterator>::Item`. + +> Often, people will use the shorthand syntax `T::Item`. Presently, that +> syntax is expanded during ["type collection"](https://rustc-dev-guide.rust-lang.org/type-checking.html) into the +> explicit form, though that is something we may want to change in the future. + +[intoiter-item]: https://doc.rust-lang.org/nightly/core/iter/trait.IntoIterator.html#associatedtype.Item + + + +In some cases, associated type projections can be **normalized** – +that is, simplified – based on the types given in an impl. So, to +continue with our example, the impl of `IntoIterator` for `Option` +declares (among other things) that `Item = T`: + +```rust,ignore +impl IntoIterator for Option { + type Item = T; + ... +} +``` + +This means we can normalize the projection ` as +IntoIterator>::Item` to just `u32`. + +In this case, the projection was a "monomorphic" one – that is, it +did not have any type parameters. Monomorphic projections are special +because they can **always** be fully normalized. + +Often, we can normalize other associated type projections as well. For +example, ` as IntoIterator>::Item`, where `?T` is an inference +variable, can be normalized to just `?T`. + +In our logic, normalization is defined by a predicate +`Normalize`. The `Normalize` clauses arise only from +impls. For example, the `impl` of `IntoIterator` for `Option` that +we saw above would be lowered to a program clause like so: + +```text +forall { + Normalize( as IntoIterator>::Item -> T) :- + Implemented(Option: IntoIterator) +} +``` + +where in this case, the one `Implemented` condition is always true. + +> Since we do not permit quantification over traits, this is really more like +> a family of program clauses, one for each associated type. + +We could apply that rule to normalize either of the examples that +we've seen so far. + +## Placeholder associated types + +Sometimes however we want to work with associated types that cannot be +normalized. For example, consider this function: + +```rust,ignore +fn foo(...) { ... } +``` + +In this context, how would we normalize the type `T::Item`? + +Without knowing what `T` is, we can't really do so. To represent this case, +we introduce a type called a **placeholder associated type projection**. This +is written like so: `(IntoIterator::Item)`. + +You may note that it looks a lot like a regular type (e.g., `Option`), +except that the "name" of the type is `(IntoIterator::Item)`. This is not an +accident: placeholder associated type projections work just like ordinary +types like `Vec` when it comes to unification. That is, they are only +considered equal if (a) they are both references to the same associated type, +like `IntoIterator::Item` and (b) their type arguments are equal. + +Placeholder associated types are never written directly by the user. +They are used internally by the trait system only, as we will see +shortly. + +In rustc, they correspond to the `TyKind::UnnormalizedProjectionTy` enum +variant, declared in [`compiler/rustc_middle/src/ty/sty.rs`][sty]. In chalk, we use an +`AssociatedType`. + +[sty]: https://github.com/rust-lang/rust/blob/master/compiler/rustc_middle/src/ty/sty.rs + +## Projection equality + +So far we have seen two ways to answer the question of "When can we +consider an associated type projection equal to another type?": + +- the `Normalize` predicate could be used to transform projections when we + knew which impl applied; +- **placeholder** associated types can be used when we don't. This is also + known as **lazy normalization**. + +These two cases are brought together by the `AliasEq` predicate introduced +[before](../types/rust_types/alias.html) (where the [`AliasTy`](https://rust-lang.github.io/chalk/chalk_ir/enum.AliasTy.html) is `Projection`). The instantiated predicate for projection +equality looks then like so: + +```text +AliasEq(::Item = U) +``` + +and we will see that it can be proven *either* via normalization or +via the placeholder type. As part of lowering an associated type declaration from +some trait, we create two program clauses for `AliasEq`: + +```text +forall { + AliasEq(::Item = U) :- + Normalize(::Item -> U) +} + +forall { + AliasEq(::Item = (IntoIterator::Item)) +} +``` + +These are the only two `AliasEq` program clauses we ever make for +any given associated item. + +## Integration with unification + +Now we are ready to discuss how associated type equality integrates +with unification. As described in the +[type inference](https://rustc-dev-guide.rust-lang.org/type-inference.html) +section, unification is basically a procedure with a signature like this: + +```text +Unify(A, B) = Result +``` + +In other words, we try to unify two things A and B. That procedure +might just fail, in which case we get back `Err(NoSolution)`. This +would happen, for example, if we tried to unify `u32` and `i32`. + +The key point is that, on success, unification can also give back to +us a set of subgoals that still remain to be proven. + +Whenever unification encounters a non-placeholder associated type +projection P being equated with some other type T, it always succeeds, +but it produces a subgoal `AliasEq(P = T)` that is propagated +back up. Thus it falls to the ordinary workings of the trait system +to process that constraint. + +> If we unify two projections P1 and P2, then unification produces a +> variable X and asks us to prove that `AliasEq(P1 = X)` and +> `AliasEq(P2 = X)`. (That used to be needed in an older system to +> prevent cycles; I rather doubt it still is. -nmatsakis) diff --git a/book/src/clauses/well_known_traits.md b/book/src/clauses/well_known_traits.md new file mode 100644 index 00000000000..b1ac4ba2b70 --- /dev/null +++ b/book/src/clauses/well_known_traits.md @@ -0,0 +1,60 @@ +# Well known traits + +For most traits, the question of whether some type T implements the trait is determined by +looking solely at the impls that exist for the trait. But there are some well-known traits +where we have "built-in" impls that are never expressly written in the compiler, they are +built-in to the language itself. In some cases, these impls also encode complex conditions +that an ordinary impl cannot express. To address this, chalk has a notion of a `WellKnownTrait` +-- basically, a trait which is inherent to the language and where we will generate custom logic. + +As an example, consider the logic for `Sized` in regards to structs: A struct can have +at most one `!Sized` field, and it must be the last. And the last field isn't `Sized`, +then neither is the struct itself. + +Chalk has two main places that deal with well known trait logic: +1) [`chalk-solve\clauses\builtin_traits`][builtin_traits_mod], which generates built-in implementations +for well-known traits. +2) [well-formedness](wf.md) checks, some of which need to know about well known traits. + +[builtin_traits_mod]: https://github.com/rust-lang/chalk/blob/master/chalk-solve/src/clauses/builtin_traits.rs + +# Auto traits + +Auto traits, while not exactly well known traits, do also have special logic. +The idea is that the type implements an auto trait if all data owned by that type implements it, +with an ability to specifically opt-out or opt-in. Additionally, auto traits are [coinductive][coinductive_section]. +Some common examples of auto traits are `Send` and `Sync`. + +[coinductive_section]: ../engine/logic/coinduction.html#coinduction-and-refinement-strands + +# Current state +| Type | Copy | Clone | Sized | Unsize | CoerceUnsized | Drop | FnOnce/FnMut/Fn | Unpin | Coroutine | auto traits | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| tuple types | ✅ | ✅ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| structs | ⚬ | ⚬ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| scalar types | 📚 | 📚 | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| str | 📚 | 📚 | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| never type | 📚 | 📚 | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| trait objects | ⚬ | ⚬ | ⚬ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | +| functions defs | ✅ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ❌ | ⚬ | ⚬ | ✅ | +| functions ptrs | ✅ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ✅ | ⚬ | ⚬ | ✅ | +| raw ptrs | 📚 | 📚 | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| immutable refs | 📚 | 📚 | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| mutable refs | ⚬ | ⚬ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| slices | ⚬ | ⚬ | ⚬ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| arrays | ✅ | ✅ | ✅ | ❌ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | +| closures | ✅ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ✅ | ⚬ | ⚬ | ✅ | +| coroutines | ⚬ | ⚬ | ❌ | ⚬ | ⚬ | ⚬ | ⚬ | ✅ | ❌ | ✅ | +| gen. witness | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ❌ | +| opaque | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ❌ | +| foreign | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ⚬ | ❌ | +| ----------- | | | | | | | | | | | +| well-formedness | ✅ | ⚬ | ✅ | ⚬ | ✅ | ✅ | ⚬ | ⚬ | ⚬ | ⚬ | + +legend: +⚬ - not applicable +✅ - implemented +📚 - implementation provided in libcore +❌ - not implemented + +❌ after a type name means that type is not yet in chalk diff --git a/book/src/clauses/wf.md b/book/src/clauses/wf.md new file mode 100644 index 00000000000..73858689125 --- /dev/null +++ b/book/src/clauses/wf.md @@ -0,0 +1,469 @@ +# Well-formedness checking + +WF checking has the job of checking that the various declarations in a Rust +program are well-formed. This is the basis for implied bounds, and partly for +that reason, this checking can be surprisingly subtle! For example, we +have to be sure that each impl proves the WF conditions declared on +the trait. + +For each declaration in a Rust program, we will generate a logical goal and try +to prove it using the lowered rules we described in the +[lowering rules](./lowering_rules.md) chapter. If we are able to prove it, we +say that the construct is well-formed. If not, we report an error to the user. + +Well-formedness checking happens in the [`chalk/chalk-solve/src/wf.rs`][wf] +module in chalk. After you have read this chapter, you may find useful to see +an extended set of examples in the [`chalk/tests/test/wf_lowering.rs`][wf_test] submodule. + +The new-style WF checking has not been implemented in rustc yet. + +[wf]: https://github.com/rust-lang/chalk/blob/master/chalk-solve/src/wf.rs +[wf_test]: https://github.com/rust-lang/chalk/blob/master/tests/test/wf_lowering.rs + +We give here a complete reference of the generated goals for each Rust +declaration. + +In addition to the notations introduced in the chapter about +lowering rules, we'll introduce another notation: when checking WF of a +declaration, we'll often have to prove that all types that appear are +well-formed, except type parameters that we always assume to be WF. Hence, +we'll use the following notation: for a type `SomeType<...>`, we define +`InputTypes(SomeType<...>)` to be the set of all non-parameter types appearing +in `SomeType<...>`, including `SomeType<...>` itself. + +Examples: +* `InputTypes((u32, f32)) = [u32, f32, (u32, f32)]` +* `InputTypes(Box) = [Box]` (assuming that `T` is a type parameter) +* `InputTypes(Box>) = [Box, Box>]` + +We also extend the `InputTypes` notation to where clauses in the natural way. +So, for example `InputTypes(A0: Trait)` is the union of +`InputTypes(A0)`, `InputTypes(A1)`, ..., `InputTypes(An)`. + +# Type definitions + +Given a general type definition: +```rust,ignore +struct Type where WC_type { + field1: A1, + ... + fieldn: An, +} +``` + +we generate the following goal, which represents its well-formedness condition: +```text +forall { + if (FromEnv(WC_type)) { + WellFormed(InputTypes(WC_type)) && + WellFormed(InputTypes(A1)) && + ... + WellFormed(InputTypes(An)) + } +} +``` + +which in English states: assuming that the where clauses defined on the type +hold, prove that every type appearing in the type definition is well-formed. + +Some examples: +```rust,ignore +struct OnlyClone where T: Clone { + clonable: T, +} +// The only types appearing are type parameters: we have nothing to check, +// the type definition is well-formed. +``` + +```rust,ignore +struct Foo where T: Clone { + foo: OnlyClone, +} +// The only non-parameter type which appears in this definition is +// `OnlyClone`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(T: Clone)) { +// WellFormed(OnlyClone) +// } +// } +// ``` +// which is provable. +``` + +```rust,ignore +struct Bar where ::Item: Debug { + bar: i32, +} +// The only non-parameter types which appear in this definition are +// `::Item` and `i32`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(::Item: Debug)) { +// WellFormed(::Item) && +// WellFormed(i32) +// } +// } +// ``` +// which is not provable since `WellFormed(::Item)` requires +// proving `Implemented(T: Iterator)`, and we are unable to prove that for an +// unknown `T`. +// +// Hence, this type definition is considered illegal. An additional +// `where T: Iterator` would make it legal. +``` + +# Trait definitions + +Given a general trait definition: +```rust,ignore +trait Trait where WC_trait { + type Assoc: Bounds_assoc where WC_assoc; +} +``` + +we generate the following goal: +```text +forall { + if (FromEnv(WC_trait)) { + WellFormed(InputTypes(WC_trait)) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(InputTypes(Bounds_assoc)) && + WellFormed(InputTypes(WC_assoc)) + } + } + } +} +``` + +There is not much to verify in a trait definition. We just want +to prove that the types appearing in the trait definition are well-formed, +under the assumption that the different where clauses hold. + +Some examples: +```rust,ignore +trait Foo where T: Iterator, ::Item: Debug { + ... +} +// The only non-parameter type which appears in this definition is +// `::Item`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(T: Iterator), FromEnv(::Item: Debug)) { +// WellFormed(::Item) +// } +// } +// ``` +// which is provable thanks to the `FromEnv(T: Iterator)` assumption. +``` + +```rust,ignore +trait Bar { + type Assoc: From<::Item>; +} +// The only non-parameter type which appears in this definition is +// `::Item`. The generated goal is the following: +// ``` +// forall { +// WellFormed(::Item) +// } +// ``` +// which is not provable, hence the trait definition is considered illegal. +``` + +```rust,ignore +trait Baz { + type Assoc: From<::Item> where T: Iterator; +} +// The generated goal is now: +// ``` +// forall { +// if (FromEnv(T: Iterator)) { +// WellFormed(::Item) +// } +// } +// ``` +// which is now provable. +``` + +# Impls + +Now we give ourselves a general impl for the trait defined above: +```rust,ignore +impl Trait for SomeType where WC_impl { + type Assoc = SomeValue where WC_assoc; +} +``` + +Note that here, `WC_assoc` are the same where clauses as those defined on the +associated type definition in the trait declaration, *except* that type +parameters from the trait are substituted with values provided by the impl +(see example below). You cannot add new where clauses. You may omit to write +the where clauses if you want to emphasize the fact that you are actually not +relying on them. + +Some examples to illustrate that: +```rust,ignore +trait Foo { + type Assoc where T: Clone; +} + +struct OnlyClone { ... } + +impl Foo> for () { + // We substitute type parameters from the trait by the ones provided + // by the impl, that is instead of having a `T: Clone` where clause, + // we have an `Option: Clone` one. + type Assoc = OnlyClone> where Option: Clone; +} + +impl Foo for i32 { + // I'm not using the `T: Clone` where clause from the trait, so I can + // omit it. + type Assoc = u32; +} + +impl Foo for f32 { + type Assoc = OnlyClone> where Option: Clone; + // ^^^^^^^^^^^^^^^^^^^^^^ + // this where clause does not exist + // on the original trait decl: illegal +} +``` + +> So in Rust, where clauses on associated types work *exactly* like where +> clauses on trait methods: in an impl, we must substitute the parameters from +> the traits with values provided by the impl, we may omit them if we don't +> need them, but we cannot add new where clauses. + +Now let's see the generated goal for this general impl: +```text +forall { + // Well-formedness of types appearing in the impl + if (FromEnv(WC_impl), FromEnv(InputTypes(SomeType: Trait))) { + WellFormed(InputTypes(WC_impl)) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(InputTypes(SomeValue)) + } + } + } + + // Implied bounds checking + if (FromEnv(WC_impl), FromEnv(InputTypes(SomeType: Trait))) { + WellFormed(SomeType: Trait) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(SomeValue: Bounds_assoc) + } + } + } +} +``` + +Here is the most complex goal. As always, first, assuming that +the various where clauses hold, we prove that every type appearing in the impl +is well-formed, ***except*** types appearing in the impl header +`SomeType: Trait`. Instead, we *assume* that those types are +well-formed +(hence the `if (FromEnv(InputTypes(SomeType: Trait)))` +conditions). This is +part of the implied bounds proposal, so that we can rely on the bounds +written on the definition of e.g. the `SomeType` type (and that we don't +need to repeat those bounds). +> Note that we don't need to check well-formedness of types appearing in +> `WC_assoc` because we already did that in the trait decl (they are just +> repeated with some substitutions of values which we already assume to be +> well-formed) + +Next, still assuming that the where clauses on the impl `WC_impl` hold and that +the input types of `SomeType` are well-formed, we prove that +`WellFormed(SomeType: Trait)` hold. That is, we want to prove +that `SomeType` verify all the where clauses that might transitively +be required by the `Trait` definition (see +[this subsection](./implied_bounds.md#co-inductiveness-of-wellformed)). + +Lastly, assuming in addition that the where clauses on the associated type +`WC_assoc` hold, +we prove that `WellFormed(SomeValue: Bounds_assoc)` hold. Again, we are +not only proving `Implemented(SomeValue: Bounds_assoc)`, but also +all the facts that might transitively come from `Bounds_assoc`. We must do this +because we allow the use of implied bounds on associated types: if we have +`FromEnv(SomeType: Trait)` in our environment, the lowering rules +chapter indicates that we are able to deduce +`FromEnv(::Assoc: Bounds_assoc)` without knowing what the +precise value of `::Assoc` is. + +Some examples for the generated goal: +```rust,ignore +// Trait Program Clauses + +// These are program clauses that come from the trait definitions below +// and that the trait solver can use for its reasonings. I'm just restating +// them here so that we have them in mind. + +trait Copy { } +// This is a program clause that comes from the trait definition above +// and that the trait solver can use for its reasonings. I'm just restating +// it here (and also the few other ones coming just after) so that we have +// them in mind. +// `WellFormed(Self: Copy) :- Implemented(Self: Copy).` + +trait Partial where Self: Copy { } +// ``` +// WellFormed(Self: Partial) :- +// Implemented(Self: Partial) && +// WellFormed(Self: Copy). +// ``` + +trait Complete where Self: Partial { } +// ``` +// WellFormed(Self: Complete) :- +// Implemented(Self: Complete) && +// WellFormed(Self: Partial). +// ``` + +// Impl WF Goals + +impl Partial for T where T: Complete { } +// The generated goal is: +// ``` +// forall { +// if (FromEnv(T: Complete)) { +// WellFormed(T: Partial) +// } +// } +// ``` +// Then proving `WellFormed(T: Partial)` amounts to proving +// `Implemented(T: Partial)` and `Implemented(T: Copy)`. +// Both those facts can be deduced from the `FromEnv(T: Complete)` in our +// environment: this impl is legal. + +impl Complete for T { } +// The generated goal is: +// ``` +// forall { +// WellFormed(T: Complete) +// } +// ``` +// Then proving `WellFormed(T: Complete)` amounts to proving +// `Implemented(T: Complete)`, `Implemented(T: Partial)` and +// `Implemented(T: Copy)`. +// +// `Implemented(T: Complete)` can be proved thanks to the +// `impl Complete for T` blanket impl. +// +// `Implemented(T: Partial)` can be proved thanks to the +// `impl Partial for T where T: Complete` impl and because we know +// `T: Complete` holds. + +// However, `Implemented(T: Copy)` cannot be proved: the impl is illegal. +// An additional `where T: Copy` bound would be sufficient to make that impl +// legal. +``` + +```rust,ignore +trait Bar { } + +impl Bar for T where ::Item: Bar { } +// We have a non-parameter type appearing in the where clauses: +// `::Item`. The generated goal is: +// ``` +// forall { +// if (FromEnv(::Item: Bar)) { +// WellFormed(T: Bar) && +// WellFormed(::Item: Bar) +// } +// } +// ``` +// And `WellFormed(::Item: Bar)` is not provable: we'd need +// an additional `where T: Iterator` for example. +``` + +```rust,ignore +trait Foo { } + +trait Bar { + type Item: Foo; +} + +struct Stuff { } + +impl Bar for Stuff where T: Foo { + type Item = T; +} +// The generated goal is: +// ``` +// forall { +// if (FromEnv(T: Foo)) { +// WellFormed(T: Foo). +// } +// } +// ``` +// which is provable. +``` + +```rust,ignore +trait Debug { ... } +// `WellFormed(Self: Debug) :- Implemented(Self: Debug).` + +struct Box { ... } +impl Debug for Box where T: Debug { ... } + +trait PointerFamily { + type Pointer: Debug where T: Debug; +} +// `WellFormed(Self: PointerFamily) :- Implemented(Self: PointerFamily).` + +struct BoxFamily; + +impl PointerFamily for BoxFamily { + type Pointer = Box where T: Debug; +} +// The generated goal is: +// ``` +// forall { +// WellFormed(BoxFamily: PointerFamily) && +// +// if (FromEnv(T: Debug)) { +// WellFormed(Box: Debug) && +// WellFormed(Box) +// } +// } +// ``` +// `WellFormed(BoxFamily: PointerFamily)` amounts to proving +// `Implemented(BoxFamily: PointerFamily)`, which is ok thanks to our impl. +// +// `WellFormed(Box)` is always true (there are no where clauses on the +// `Box` type definition). +// +// Moreover, we have an `impl Debug for Box`, hence +// we can prove `WellFormed(Box: Debug)` and the impl is indeed legal. +``` + +```rust,ignore +trait Foo { + type Assoc; +} + +struct OnlyClone { ... } + +impl Foo for i32 { + type Assoc = OnlyClone; +} +// The generated goal is: +// ``` +// forall { +// WellFormed(i32: Foo) && +// WellFormed(OnlyClone) +// } +// ``` +// however `WellFormed(OnlyClone)` is not provable because it requires +// `Implemented(T: Clone)`. It would be tempting to just add a `where T: Clone` +// bound inside the `impl Foo for i32` block, however we saw that it was +// illegal to add where clauses that didn't come from the trait definition. +``` diff --git a/book/src/contribution_guide.md b/book/src/contribution_guide.md new file mode 100644 index 00000000000..0719a8bedc0 --- /dev/null +++ b/book/src/contribution_guide.md @@ -0,0 +1,95 @@ +# Contribution guide + +Thank you for your interest in contributing to chalk! There are many ways to +contribute, and we appreciate all of them. + +* [Bug Reports](#bug-reports) +* [Running and Debugging](#running-and-debugging) +* [Pull Requests](#pull-requests) +* [Writing Documentation](#writing-documentation) + +If you'd like to contribute, consider joining the [Traits Working Group][traits-working-group]. +We hang out on the [rust-lang zulip][rust-lang-zulip] in the [#wg-traits][wg-traits-stream] stream. + +As a reminder, all contributors are expected to follow our [Code of Conduct][coc]. + +[traits-working-group]: https://rust-lang.github.io/compiler-team/working-groups/traits/ +[rust-lang-zulip]:https://rust-lang.zulipchat.com +[wg-traits-stream]: https://rust-lang.zulipchat.com/#narrow/stream/144729-wg-traits +[coc]: https://www.rust-lang.org/conduct.html + +## Bug Reports +[bug-reports]: #bug-reports + +While bugs are unfortunate, they're a reality in software. We can't fix what we +don't know about, so please report liberally. If you're not sure if something +is a bug or not, feel free to file a bug anyway. + +If you have the chance, before reporting a bug, please search existing issues, +as it's possible that someone else has already reported your error. This doesn't +always work, and sometimes it's hard to know what to search for, so consider +this extra credit. We won't mind if you accidentally file a duplicate report. + +Sometimes, a backtrace is helpful, and so including that is nice. To get +a backtrace, set the `RUST_BACKTRACE` environment variable to a value +other than `0`. The easiest way to do this is to invoke `chalk` like this: + +```bash +$ RUST_BACKTRACE=1 chalk ... +``` + +## Running and Debugging +[running-and-debugging]: #running-and-debugging +There is a repl mainly for debugging purposes which can be run by `cargo run`. Some basic examples are in [libstd.chalk](https://github.com/rust-lang/chalk/blob/master/libstd.chalk): +```bash +$ cargo run +?- load libstd.chalk +?- Vec>: Clone +Unique; substitution [], lifetime constraints [] +``` + +More logging can be enabled by setting the `CHALK_DEBUG` environment variable. Set `CHALK_DEBUG=3` or `CHALK_DEBUG=info` to see `info!(...)` output, and `CHALK_DEBUG=4` or `CHALK_DEBUG=debug` to see `debug!(...)` output as well. In addition, logs may be filtered in a number of ways. The syntax for filtering logs is: + +```notrust + target[span{field=value}]=level +``` + +(Note: all parts of the filter are optional ) + +In more detail, the filter may consist of: + +- A target (location of origin) + - For example setting `CHALK_DEBUG='chalk_solve::infer::unify'` will filter logs to show only output originating from `chalk_solve::infer::unify`. +- A span (name provided to the logging macros, for instance `unify_var_ty` in `debug_span!("unify_var_ty")`) + - For example setting `CHALK_DEBUG='[unify_ty_ty]'` will show only logs where the span contains `unify_ty_ty`. +- A list of fields (variables recorded in the logs), for instance `ty` in `debug!("unify_var_ty", ?ty)` with values optionally specified + - For example setting `CHALK_DEBUG='[{ty}]'` will show only logs which contain a variable `ty` + - Setting `CHALK_DEBUG='[{ty=Bar}]'` will show only logs which contain a variable `ty` with the value `Bar` +- A maximum log level (one of `info`, `debug`, `trace`) which shows logs at or below the given level + +More documentation on the syntax and options can be found [here](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#Directives). + + +## Pull Requests +[pull-requests]: #pull-requests + +Pull requests are the primary mechanism we use to change Rust. GitHub itself +has some [great documentation][pull-request-documentation] on using the Pull Request feature. +We use the "fork and pull" model [described here][development-models], where +contributors push changes to their personal fork and create pull requests to +bring those changes into the source repository. + +Please make pull requests against the `master` branch. + +[pull-request-documentation]: https://help.github.com/articles/about-pull-requests/ +[development-models]: https://help.github.com/articles/about-collaborative-development-models/ + +## Writing Documentation +[writing-documentation]: #writing-documentation + +Documentation improvements are very welcome. Documentation pull requests +function in the same way as other pull requests. + +You can find documentation style guidelines in [RFC 1574][rfc1574]. + +[rfc1574]: https://github.com/rust-lang/rfcs/blob/master/text/1574-more-api-documentation-conventions.md#appendix-a-full-conventions-text diff --git a/book/src/engine.md b/book/src/engine.md new file mode 100644 index 00000000000..94b5c4d7d44 --- /dev/null +++ b/book/src/engine.md @@ -0,0 +1,23 @@ +# Chalk engine + +The `chalk-engine` crate is the core PROLOG-like solver for logical +predicates. Importantly, it is very general and not specific to Rust, +Rust types, or Rust logic. + +## Implemented PROLOG concepts + +The engine implements the following PROLOG logic concepts. Some of these +have been published on previously, and some are `Chalk`-specific. This isn't +necessarily an exhaustive list: +- Basic logic +- Negation +- Floundering +- Coinductive solving + +## Note + +Throughout most of this chapter, the specifics in regards to +`Canonicalization` and `UCanonicalization` are avoided. These are important +concepts to understand, but don't particularly help to understand how +`chalk-engine` *works*. In a few places, it may be highlighted if it *is* +important. diff --git a/book/src/engine/logic.md b/book/src/engine/logic.md new file mode 100644 index 00000000000..2ec3501dece --- /dev/null +++ b/book/src/engine/logic.md @@ -0,0 +1,131 @@ +# Logic + +## Overview + +`chalk-engine` solves a `Goal` using a hybrid search strategy with elements of depth- and breadth-first search. When asked to solve a +particular `Goal` it hasn't seen before, it will first try to +generate a set of program clauses, that get turned into [`Strand`]s, that could +solve that goal. Otherwise, if asked to solve a `Goal` it has seen before, it +will select the existing table. + +Once a table is selected, it will pick a `Strand` and a subgoal of that +`Strand`, try to solve that `Goal`, repeating the process. + +When an `Answer` is found for a `Goal`, it is merged into the parent `Strand`, +or returned if it was the root `Goal`. It will then go on to pick the next +subgoal of the `Strand` and continue on. + +If at any point the solving stops being "successful" (i.e. we definitely found +something to be unsolvable), the solving is restarted at the root `Goal`. + +## The stack + +In order to detect cycles (talked more about later), as well as keep track of +the selected [`Strand`] for each table, `chalk-engine` stores a [`Stack`] on the +`Forest`. Whenever a new goal is selected, a [`StackEntry`] is pushed onto the +`Stack`, as well as the "time" (which also gets incremented) that it was pushed. +This "time" can be compared later to check if all the `Strands` of a [`Table`] +have been checked in a single solve. + +As either `Answer`s are found for the selected `Table`, entries on the stack are +`pop`ed. If something is found to be unsolvable, the complete stack is unwound. + +## Table creation + +As mentioned before, whenever a new `Goal` is encountered, a new [`Table`] is +created to store current and future answers. First, the [`Goal`] is converted into +an `GoalData`. If it can be simplified, then a `Strand` with one or more +subgoals will be generated and can be followed as above. Otherwise, if it is a +`DomainGoal` (see above), then +[`program_clauses_for_goal`](https://rust-lang.github.io/chalk/chalk_solve/clauses/fn.program_clauses_for_goal.html) +is called and each clause is converted into a `Strand` and can be followed. + +## `root_answer` and `ensure_root_answer` + +The [`root_answer`](https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html#method.root_answer) function is the entry point to solve a `Goal`. Up until now, +the idea of `Answer` versus `CompleteAnswer` have been ignored. However, in +reality `Answer`s to `Goal`s may actually have delayed subgoals (see `ExClause` +and [Coinduction and refinement strands]), whereas [`CompleteAnswer`]s may not. +`root_answer` essentially just wraps [`ensure_root_answer`](https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html#method.ensure_root_answer) and converts the +`Goal`'s [`Answer`] to a [`CompleteAnswer`]. + +The [`ensure_root_answer`](https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html#method.ensure_root_answer) function contains the core skeleton of the logic around +`Strand` and subgoal selection. The majority of the logic, however, is split out +into separate functions that branch out from `ensure_root_answer`. + +## Subgoal selection + +Once a given `Strand` for a table has been selected, a subgoal has to be +selected. If there are no subgoals left, then there is nothing to do. Otherwise, +if there are subgoals left, then a subgoal will attempt to be selected (from +[`next_subgoal_index`](https://rust-lang.github.io/chalk/chalk_engine/slg/struct.SlgContext.html#method.next_subgoal_index)). +If the table for that subgoal had previously floundered (see next section), then +we mark that subgoal as floundered and try the next subgoal. If all subgoals are +marked as floundered, then this entire `Strand` is marked as floundered. If a +subgoal is successfully selected, there is nothing left to do. + +## Floundering + +There a couple cases where we "give up" - here called floundering - on trying to +solve a goal. The most easy to understand case is if the types for a `Goal` or +`Answer` are too large. (Side note, we *could* actually handle this - by +generalizing - but turns out to be quite buggy and probably unnecessary). +Another case where we flounder is if we try to solve a `Goal` where we try to +**enumerate** non-enumerable types (like auto traits). In general, floundering +just means that we *can't* know any more answers about a `Goal`, for some +reason. However, if there are other `Strands` that don't flounder, there may +still be other `Answer`s available. + +## Answers + +After an answer has been found for a subgoal, it must be *applied* to the parent +`Strand`. Specifically, it must be able to unify with any existing `Answers`. If +the `Answer`s are incompatible, the `Strand` is dropped since it can't lead +anywhere. + +## Cycles + +If while pursuing a `Goal`, the engine encounters the same `Table` twice, then a +cycle has occurred. If the cycle is not coinductive (see next), then there is +nothing that can be gained from taking this route. We mark how far up the stack +is in the cycle, and try the next `Strand`. If all `Strand`s for a table +encounter a cycle, then we know that the current selected `Goal` has no more +answers. + +## Coinduction and refinement strands +[Coinduction and refinement strands]: #coinduction-and-refinement-strands + +Coinduction basically means that two statements can rely on each other being +true, unless either is proven false. + +For example with the following program: +```notrust +#[coinductive] +trait C1 { } +forall { A: C1 if B: C1 } +``` +Then the goal `exists { T: C1 }` holds for all `T` and `U`. If the `C1` +trait was not coinductive, this would be a simple cycle. + +To implement coinduction in the engine, delayed subgoals were introduced. +Essentially, if a cycle is found, and the `Goal` is coinductive, then this is +"delayed" until the stack unwinds back to the top `Goal` and all other +non-coinductive cycles have been proven. Then, `Goal` has been proven itself. In +some cases, it is the *root* `Goal` that has delayed coinductive subgoals (see +above example). In this case, we create another "Refinement Strand" where the +only subgoals are the delayed coinductive subgoals. If this new `Strand` can be +proven, then any `Answer`s from that are valid answers for the root `Goal`. +However, since there are currently delayed coinductive subgoals, there are no +answers available yet. + +For much more in-depth + + +[`Strand`]: https://rust-lang.github.io/chalk/chalk_engine/strand/struct.Strand.html +[`Context`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.Context.html +[`Stack`]: https://rust-lang.github.io/chalk/chalk_engine/stack/struct.Stack.html +[`StackEntry`]: https://rust-lang.github.io/chalk/chalk_engine/stack/struct.StackEntry.html +[`Table`]: https://rust-lang.github.io/chalk/chalk_engine/table/struct.Table.html +[`Goal`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Goal.html +[`Answer`]: https://rust-lang.github.io/chalk/chalk_engine/struct.Answer.html +[`CompleteAnswer`]: https://rust-lang.github.io/chalk/chalk_engine/struct.CompleteAnswer.html diff --git a/book/src/engine/logic/coinduction.md b/book/src/engine/logic/coinduction.md new file mode 100644 index 00000000000..062d836b61d --- /dev/null +++ b/book/src/engine/logic/coinduction.md @@ -0,0 +1,287 @@ +# Coinduction + +This sub-chapter was originally prepared for wg-traits design meeting on 2019-11-08 (see the [Hackmd](https://hackmd.io/OJRi5OM6Twunw8ZmuLxfRA) doc). It briefly covers some tricky (and previously incorrectly handled) cases of coinduction, as well as two proposed solutions. The resulting and current solution ended up being something *pretty* close to Niko's solution. However, this is basically a copy-paste from the original document, and so shouldn't necessarily be taken as 100% truth as far as implementation. + +## The problem + +See [chalk#248] for details. The short version is that we fail to handle a case like this correctly, where `Ci` are all co-inductive goals: + +[chalk#248]: https://github.com/rust-lang/chalk/issues/248 + +```notrust +C1 :- C2, C3. +C2 :- C1. +``` + +What happens is that we + +* start to prove C1 +* start to prove C2 +* see a recursive attempt to prove C1, assume it is successful +* consider C2 proved **and cache this** +* start to prove C3, fail +* consider C1 **unproven** + +Now we incorrectly have a result that `C2` is true -- but that result was made on the assumption that `C1` was true, and it was not. + +## Some other tricky cases to consider + +### Unification failures + +One thing to consider is that even when we have "coinduction obligations" to prove, we have to remember their substitutions too: + +```notrust +C1(X) :- C2(Y), X = 22. +C2(X) :- C3(X), X = 44. +C3(X) :- C1(X), C2(X). +``` + +None of these predicates should be provable, because `C1(X)` and `C2(X)` don't hold for the same `X`. + +If we're not careful, we might: + +* start to prove C1 +* start to prove C2 +* start to prove C3, see the recursive calls to C1 and C2 + * maybe we wait to consider it proven until C1 and C2 complete + +In this case, it's not enough that C1 and C2 are provable at all, they have to be provable for the same X. + +### Non-trivial self-cycles + +```notrust +C1(A) :- C1(B), B = 22, C2(A). +C2(44). +``` + +This case is not provable, even though the only cycle is `C1(X) :- C1(Y)` -- but it turns out that `X` must not be 22. The catch is that while this might *appear* to be a trivial self-cycle, it is not! + +Actually I have to think about the best way to handle this case, as my proposed solution doesn't quite cut it. It wouldn't be *wrong* but it seems not ideal. -- Niko + +### Delayed trivial cycles + +```notrust +C1(A, B) :- C2(A, B), A = 22, B = 22. +C2(A, B) :- C1(B, A). +``` + +This should be provable, but the cycle from C2 to C1 is not immediately visible as a trivial cycle, at least if subgoals are solved in order. + + +### Delayed trivial cycles, variant 2 + +```notrust +C1(A, B) :- C2(A, B), A = 22. +C2(A, B) :- C1(B, A). +``` + +As above, here the only complete answer is `C1(22, 22)`. This is because the `C1`, `C2` cycle effectively guarantees equality. + +### Delayed trivial cycles, variant 3 + +```notrust +C1(A, B) :- C1(B, A). +``` + +This is true for all `A, B` + +### Other cases? + +## Approach in existing PR + +### High-level idea + +* When we encounter a co-inductive subgoal, we delay them in the current `Strand` +* When all subgoals have been tested, and there are remaining delayed co-inductive subgoals, this is propagated up, marking the current `Strand` as co-inductive +* When the co-inductive `Strand`s reach the root table, we only then pursue an answer + +## Niko's proposed solution + +### High-level idea + +* We only consider a co-induction subgoal proven for *trivial* recursion -- i.e., self-recursion where you have `C1 :- C1`. +* For non-trivial recursion, we propagate the co-inductive subgoal to the parent. This continues until it becomes trivial. + +### Implementation steps + +**Extend `Answer` in two ways.** + +Currently `Answer` has a "constrained substitution" that includes values for the table's substitution + region constraints: + +```notrust +struct Answer { + constrained_subst: Canonical, + is_ambiguous: bool +} + +struct ConstrainedSubst { + substitution: Substitution, + region_constraints: Vec, +} +``` + +we would first extend `ConstrainedSubst` to include as yet unproven co-inductive subgoals (this might actually be better done as a new type): + +```rust,ignore +struct ConstrainedSubst { + substitution: Substitution, + delayed_subgoals: Vec, + region_constraints: Vec, +} +``` + +then we would extend `Answer` slightly as well so it can be "ok" or ambiguous, as today, but also an *error* case + +```rust,ignore +enum AnswerMode { + OK, + Ambiguous, + Error, +} + +struct Answer { + constrained_subst: Canonical, + mode: AnswerMode +} +``` + +We won't need this error case till later, so let's ignore it for now. (And in a way, we never need it.) + +**Deferring coinductive subgoals** + +When we encounter a co-inductive subgoal, we check if it is **trivial cycle** or not. A trivial cycle is a case like `C1 :- C1`. We can simply consider such cycles to be true (but note the distinction between a *trivial* cycle and a *self-cycle* -- see the "non-trivial self-cycle" example above). + +For non-trivial cycles, we will want to store the cycle to be validated later. To accommodate that, we extend `ExClause` to include a `delayed_subgoals` list as well. We can write this the same way SLG does, so `Goal :- DelayedSubgoals | Subgoals` + +In our example, proving `C2 :- C1` would result in adding `C1` to the list of delayed subgoals. + +When we reach the end of the list of subgoals, we can create an answer that contains the delayed subgoals. We don't have to add all the goals -- we can check for those that are trivial self-cycles again and remove them (in some cases, something which was not trivial to start may have become trivial through later unifications, see Delayed Trivial Self-Cycle case). Note that we *do* have to add all non-trivial cycles, including non-trivial self-cycles -- see the walkthrough of Non-trivial self-cycle variant 3. + +So the answer to `C2` would be + +```notrust +substitution: [] // no variables +delayed_subgoals: ["C1"] +region_constraints: [] +``` + +We can denote this as `C2 :- C1 |`, to use SLG notation. + +**Incorporating an answer with deferred subgoals.** + +When a table gets back an answer that has deferred sub-goals, they get added to the current list of subgoals. + +So e.g. in our case, we had a `ExClause` like: + +```notrust +C1 :- | C2, C3 +``` + +and we get the answer `C2 :- C1 |`, so we would convert it to + +```notrust +C1 :- | C3, C1 +``` + +i.e., we have added `C1` to the list of goals to prove. When we go to prove `C3`, of course, we will fail -- but it had succeeded, we would go on to prove `C1` but encounter a trivial cycle and hence succeed. + +**Extending root answer** + +So we failed to prove C1, but we do have a (conditional) answer to C2 -- `C2 :- C1 |`, even though `C2` is unprovable. What happens if `ensure_root_answer` is invoked on `C2`? + +What we have here is a *conditional* answer. We know that `C1` must have ultimately resolved itself somehow (although it might not yet be proven). What we can do is create a strand in C2 to evaluate C1 again -- if this strand succeeds, it can actually overwrite the `C2 :- C1 |` answer in place with `C2 :-` (i.e., an unconditional answer). This is just a refinement of what we had. If the strand fails, though, we'll want to remember the error. + +I think when we get a new answer, we want it to *overwrite* the old answer in place, rather than create a new answer. This is valid because it's not a new answer, it's just a more refined form of the old answer (although note that it might have different substitutions and other details, see the "delayed trivial cycle" case). + +In particular, it could be that the table already has a "complete" set of answers -- i.e., somebody invoked `ensure_answer(N)` and got back `None`. We don't want to be adding new answers which would change the result of that call. It *is* a bit strange that we are changing the result of `ensure_answer(i)` for the current `i`, but then the result is the same answer, just a bit more elaborated. + +The idea then would be to create a strand *associated with this answer somehow* (it doesn't, I don't think, live in the normal strand table; we probably have a separate "refinement strand" table). This strand has as its subgoals the delayed subgoals. It pursues them. This either results in an answer (which replaces the existing answer) or an error (in which case the existing answer is marked as *error*). This may require extending strand with an optional answer index that it should overwrite, or perhaps we thread it down as an argument to `pursue_strand` (optional because, in the normal mode, we are just appending a new answer). + +(Question: What distinguishes root answer? Nothing -- we could actually do this process for any answer, so long as the delayed subgoals are not to tables actively on the stack. This just happens to be trivially true for root answers. The key part though is that the answer must be registered in the table first before the refinement strand is created, see Delayed Self-Cycle Variant 3.) + +This is complex, so let's walk through an example or two. + +**The original problem.** When we finish solving `C1`, we leave `C2` with a single answer `C2 :- C1 |`. If someone invokes `ensure_root_answer(C2, 0)`, we would see the delayed literal and create a refinement strand for the answer: `C2 :- | C1`. We would pursue `C1` and get back the successful answer. So the refinement strand would terminate and we can overwrite with the answer `C2 :- |`. + +**Delayed trivial self-cycle.** Similar to above, but the answer is `C2(?A, ?B) :- C1(?B, ?A) |`. In other words, in the canonical answer, we have a (identity) substitution of `[^0, ^1]` and a delayed goal of `C1(^1, ^0)`. The strand we create will find only one answer to `C1`, `C1(22, 22)`, so we wind up with an answer `C2(22, 22)`. + +**Handling error answers** + +We introduced the idea of an "error answer"...how do we handle that? It's fairly simple. If a strand encounters an error answer, it simply fails. Done. The *outer* search however needs to treat an error answer as basically a no-op -- so e.g. the answer iterator should simply increment the error counter and move to the next answer. + +### Walk through: delayed trivial self cycle, variant 2 + +```notrust +C1(A, B) :- C2(A, B), A = 22. +C2(A, B) :- C1(B, A). +``` + +* `ensure_root_answer(C1(?A, ?B))` is invoked + * We start solving `C1(?A, ?B)` with the ex-clause `C1(?A, ?B) :- | C2(?A, ?B), ?A = 22` + * That starts solving `C2(?A, ?B)` + * This gets an answer `C2(?A, ?B) :- C1(?B, ?A) |` + * When answer is incorporated, we get `C1(?A, ?B) :- | C1(?B, ?A), ?A = 22` + * `C1(?B, ?A)` is a non-trivial cycle, and so we get + * `C1(?A, ?B) :- C1(?B, ?A) | ?A = 22` + * Unification completes, leaving us with + * `C1(22, ?B) :- C1(?B, 22) |` + * This is a complete answer + * ensure root answer attempts to refine this answer, creating a strand for `C1(22, ?B) :- | C1(?B, 22)` + * This creates a table for `C1(?B, 22)` with ex-clause `C1(?B, 22) :- | C2(?B, 22), ?B = 22` + * We start solving `C2(?B, 22)`, which has ex-clause `C2(?B, 22) :- C1(22, ?B)` + * This creates a table for `C1(22, ?B)`, with ex-clause `C1(22, ?B) :- C2(22, ?B), 22 = 22` + * This starts solving `C2(22, ?B)`, which is a fresh table with ex-clause `C2(22, ?B) :- C1(?B, 22)` + * This is a co-inductive cycle + * So our answer is `C2(22, ?B) :- C1(?B, 22) |` + * Incorporating this answer yields `C1(22, ?B) :- 22 = 22, C1(?B, 22)` + * The unification constraint succeeds, leaving `C1(22, ?B) :- C1(?B, 22)` + * Co-inductive cycle detected, so answer is + * `C1(22, ?B) :- C1(?B, 22) |` + * This answer is incorporated into `C2`, yielding the ex-clause + * `C2(?B, 22) :- C1(?B, 22)` + * Pursuing that sub-goal gives a co-inductive cycle, so our final answer is + * `C2(?B, 22) :- C1(?B, 22) |` + * This answer is incorporated, yielding ex-clause `C1(?B, 22) :- | ?B = 22, C1(?B, 22)` + * Unification yields `C1(22, 22) :- C1(22, 22)` + * Trivial self-cycle detected, so final answer is + * `C1(22, 22)` + * the answer for `C1(?A, ?B)` is thus updated to `C1(22, 22)` + +### Walk through: delayed trivial self cycle, variant 3 + +```notrust +C1(A, B) :- C1(B, A). +``` + +This example is interesting because it shows that we have to incorporate non-trivial self cycles into an answer so they can recursively build on one another. + +* we get an initial answer of + * `C1(?A, ?B) :- C1(?B, ?A) |` +* if we attempt to refine this, we will get a strand `C1(?X, ?Y) :- C1(?Y, ?X)` + * pursuing the first subgoal `C1(?Y, ?X)` leads us to our own table, but at answer 0 + * (the very answer we are refining) + * the answer is `C1(?Y, ?X) :- C1(?X, ?Y) |` + * this strand incorporates its own answer, yielding + * `C1(?X, ?Y) :- C1(?X, ?Y)` + * next subgoal is a trivial self-cycle, discard, yielding + * `C1(?X, ?Y) :-` +* result: true + + + +### Walk through: non-trivial self cycle + +Let's walk through one more case, the non-trivial self cycle. + +```notrust +C1(A) :- C1(B), B = 22, C2(A). +C2(44). +``` + +What happens here is that we get an initial answer from `C1` that looks like: + +```notrust +C1(44) :- C1(22) | +``` + +Ensure root answer will thus try to refine by trying to solve `C1(22)`. Interestingly, this is going to go to a distinct table, because the canonical form is not the same, but that table will just fail. diff --git a/book/src/engine/major_concepts.md b/book/src/engine/major_concepts.md new file mode 100644 index 00000000000..f459906fce1 --- /dev/null +++ b/book/src/engine/major_concepts.md @@ -0,0 +1,92 @@ +# Major concepts + +This section goes over a few different concepts that are crucial to +understanding how `chalk-engine` works, without going over the exact solving +logic. + +## Goals + +A "goal" in Chalk can be thought of as "something we want to prove". The engine +itself understands `GoalData`s. `GoalData`s consist of the most basic logic, +such as introducing Binders (`Forall` or `Exists`) or combining goals (`All`). +On the other hand, `DomainGoal` represents an opaque goal generated +externally. As such, it may contain any extra information or may be interned. +When solving a logic predicate, Chalk will lazily convert `DomainGoal`s +into `GoalData`s. + +There are three types of completely opaque `GoalData`s that Chalk can solve: +`Unify`, `DomainGoal`, and `CannotProve`. Unlike the other types of goals, +these three cannot be simplified any further. `Unify` is the goal of unifying +any two types. `DomainGoal` is any goal that can solve by applying a +[`ProgramClause`]. To solve this, more `Goal`s may generated. Finally, +`CannotProve` is a special goal that *cannot* be proven true or false. + +## Answers and Solutions + +For every `Goal`, there are zero or more `Answer`s. Each [`Answer`] contains +values for the inference variables in the goal. + +For example, given the following program: +```notrust +trait Clone {} +struct A {} +struct B {} +impl Clone for A {} +impl Clone for B {} +``` +With the following goal: `exists { T: Clone }` +The following solutions would be given: +```notrust +T = A +T = B +``` +In other words, either `A` or `B` can substituted for `T` and the goal will +hold true. Moreover, either answer could be used when further solving other +goals that depend on this goal. + +However, oftentimes, this is not what external crates want when solving for a +goal. Instead, the may want a *unique* solution to this goal. Indeed, when we +solve for a given root [`Goal`], we return a single [`Solution`]. The +[`AntiUnifier`](https://rust-lang.github.io/chalk/chalk_engine/slg/aggregate/struct.AntiUnifier.html) +struct from `chalk-solve` then finds that solution, by finding a minimal +generalization of answers which don't +unify. (For the example above, it would return only `Ambiguous`, since `A` and +`B` can't unify.) + +## ExClauses and Strands + +An [`ExClause`] is described in literature as `A :- D | G` or +`A holds given that G holds with D delayed goals`. In `chalk-engine`, an +`ExClause` stores the current state of proving a goal, including existing +substitutions already found, subgoals yet to be proven, or delayed subgoals. A +[`Strand`] wraps both an [`ExClause`] and an [`InferenceTable`] together. + +## Tables and Forests + +A [`Strand`] represents a *single* direction to find an [`Answer`] - for example, an +implementation of a trait with a set of where clauses. However, in a program, +there may be *multiple* possible implementations that match a goal - e.g. +multiple impls with different where clauses. Every [`Table`] has a goal, and +stores existing `Answers`, as well as all `Strand`s that may result in more +answers. + +A [`Forest`] holds all the `Table`s that program generates, and is what most of +the logic is implemented on. It also stores the current state of solving (the +stack). + + + +[`Context`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.Context.html +[`ContextOps`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.ContextOps.html +[`InferenceTable`]: https://rust-lang.github.io/chalk/chalk_solve/infer/struct.InferenceTable.html +[`Solution`]: https://rust-lang.github.io/chalk/chalk_solve/solve/enum.Solution.html +[`ExClause`]: https://rust-lang.github.io/chalk/chalk_engine/struct.ExClause.html +[`Strand`]: https://rust-lang.github.io/chalk/chalk_engine/strand/struct.Strand.html +[`Table`]: https://rust-lang.github.io/chalk/chalk_engine/table/struct.Table.html +[`Forest`]: https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html +[`Goal`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Goal.html +[`UnificationOps`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.UnificationOps.html +[`TruncateOps`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.TruncateOps.html +[`ResolventOps`]: https://rust-lang.github.io/chalk/chalk_engine/context/trait.ResolventOps.html +[`ProgramClause`]: https://rust-lang.github.io/chalk/chalk_ir/struct.ProgramClause.html +[`Answer`]: https://rust-lang.github.io/chalk/chalk_engine/struct.Answer.html diff --git a/book/src/engine/slg.md b/book/src/engine/slg.md new file mode 100644 index 00000000000..18c8eb392ca --- /dev/null +++ b/book/src/engine/slg.md @@ -0,0 +1,302 @@ +# The On-Demand SLG solver + +Given a set of program clauses (provided by our [lowering rules][lowering]) +and a query, we need to return the result of the query and the value of any +type variables we can determine. This is the job of the solver. + +For example, `exists { Vec: FromIterator }` has one solution, so +its result is `Unique; substitution [?T := u32]`. A solution also comes with +a set of region constraints, which we'll ignore in this introduction. + +[lowering]: ../clauses.html + +## Goals of the Solver + +### On demand + +There are often many, or even infinitely many, solutions to a query. For +example, say we want to prove that `exists { Vec: Debug }` for _some_ +type `?T`. Our solver should be capable of yielding one answer at a time, say +`?T = u32`, then `?T = i32`, and so on, rather than iterating over every type +in the type system. If we need more answers, we can request more until we are +done. This is similar to how Prolog works. + +*See also: [The traditional, interactive Prolog query][pq]* + +[pq]: ../canonical_queries.html#the-traditional-interactive-prolog-query + +### Breadth-first + +`Vec: Debug` is true if `?T: Debug`. This leads to a cycle: `[Vec, +Vec>, Vec>>]`, and so on all implement `Debug`. Our +solver ought to be breadth first and consider answers like `[Vec: Debug, +Vec: Debug, ...]` before it recurses, or we may never find the answer +we're looking for. + +### Cachable + +To speed up compilation, we need to cache results, including partial results +left over from past solver queries. + +## Description of how it works + +The basis of the solver is the [`Forest`] type. A *forest* stores a +collection of *tables* as well as a *stack*. Each *table* represents +the stored results of a particular query that is being performed, as +well as the various *strands*, which are basically suspended +computations that may be used to find more answers. Tables are +interdependent: solving one query may require solving others. + +[`Forest`]: https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html + +### Walkthrough + +Perhaps the easiest way to explain how the solver works is to walk +through an example. Let's imagine that we have the following program: + +```rust,ignore +trait Debug { } + +struct u32 { } +impl Debug for u32 { } + +struct Rc { } +impl Debug for Rc { } + +struct Vec { } +impl Debug for Vec { } +``` + +Now imagine that we want to find answers for the query `exists { Rc: +Debug }`. The first step would be to u-canonicalize this query; this is the +act of giving canonical names to all the unbound inference variables based on +the order of their left-most appearance, as well as canonicalizing the +universes of any universally bound names (e.g., the `T` in `forall { ... +}`). In this case, there are no universally bound names, but the canonical +form Q of the query might look something like: + +```text +Rc: Debug +``` + +where `?0` is a variable in the root universe U0. We would then go and +look for a table with this canonical query as the key: since the forest is +empty, this lookup will fail, and we will create a new table T0, +corresponding to the u-canonical goal Q. + +**Ignoring negative reasoning and regions.** To start, we'll ignore +the possibility of negative goals like `not { Foo }`. We'll phase them +in later, as they bring several complications. + +**Creating a table.** When we first create a table, we also initialize +it with a set of *initial strands*. A "strand" is kind of like a +"thread" for the solver: it contains a particular way to produce an +answer. The initial set of strands for a goal like `Rc: Debug` +(i.e., a "domain goal") is determined by looking for *clauses* in the +environment. In Rust, these clauses derive from impls, but also from +where-clauses that are in scope. In the case of our example, there +would be three clauses, each coming from the program. Using a +Prolog-like notation, these look like: + +```text +(u32: Debug). +(Rc: Debug) :- (T: Debug). +(Vec: Debug) :- (T: Debug). +``` + +To create our initial strands, then, we will try to apply each of +these clauses to our goal of `Rc: Debug`. The first and third +clauses are inapplicable because `u32` and `Vec` cannot be unified +with `Rc`. The second clause, however, will work. + +**What is a strand?** Let's talk a bit more about what a strand *is*. In the code, a strand +is the combination of an inference table, an _X-clause_, and (possibly) +a selected subgoal from that X-clause. But what is an X-clause +([`ExClause`], in the code)? An X-clause pulls together a few things: + +- The current state of the goal we are trying to prove; +- A set of subgoals that have yet to be proven; +- There are also a few things we're ignoring for now: + - delayed literals, region constraints + +The general form of an X-clause is written much like a Prolog clause, +but with somewhat different semantics. Since we're ignoring delayed +literals and region constraints, an X-clause just looks like this: + +```text +G :- L +``` + +where G is a goal and L is a set of subgoals that must be proven. +(The L stands for *literal* -- when we address negative reasoning, a +literal will be either a positive or negative subgoal.) The idea is +that if we are able to prove L then the goal G can be considered true. + +In the case of our example, we would wind up creating one strand, with +an X-clause like so: + +```text +(Rc: Debug) :- (?T: Debug) +``` + +Here, the `?T` refers to one of the inference variables created in the +inference table that accompanies the strand. (I'll use named variables +to refer to inference variables, and numbered variables like `?0` to +refer to variables in a canonicalized goal; in the code, however, they +are both represented with an index.) + +For each strand, we also optionally store a *selected subgoal*. This +is the subgoal after the turnstile (`:-`) that we are currently trying +to prove in this strand. Initially, when a strand is first created, +there is no selected subgoal. + +[`ExClause`]: https://rust-lang.github.io/chalk/chalk_engine/struct.ExClause.html + +**Activating a strand.** Now that we have created the table T0 and +initialized it with strands, we have to actually try and produce an answer. +We do this by invoking the [`ensure_root_answer`] operation on the table: +specifically, we say `ensure_root_answer(T0, A0)`, meaning "ensure that there +is a 0th answer A0 to query T0". + +Remember that tables store not only strands, but also a vector of cached +answers. The first thing that [`ensure_root_answer`] does is to check whether +answer A0 is in this vector. If so, we can just return immediately. In this +case, the vector will be empty, and hence that does not apply (this becomes +important for cyclic checks later on). + +When there is no cached answer, [`ensure_root_answer`] will try to produce one. +It does this by selecting a strand from the set of active strands -- the +strands are stored in a `VecDeque` and hence processed in a round-robin +fashion. Right now, we have only one strand, storing the following X-clause +with no selected subgoal: + +```text +(Rc: Debug) :- (?T: Debug) +``` + +When we activate the strand, we see that we have no selected subgoal, +and so we first pick one of the subgoals to process. Here, there is only +one (`?T: Debug`), so that becomes the selected subgoal, changing +the state of the strand to: + +```text +(Rc: Debug) :- selected(?T: Debug, A0) +``` + +Here, we write `selected(L, An)` to indicate that (a) the literal `L` +is the selected subgoal and (b) which answer `An` we are looking for. We +start out looking for `A0`. + +[`ensure_root_answer`]: https://rust-lang.github.io/chalk/chalk_engine/forest/struct.Forest.html#method.ensure_root_answer + +**Processing the selected subgoal.** Next, we have to try and find an +answer to this selected goal. To do that, we will u-canonicalize it +and try to find an associated table. In this case, the u-canonical +form of the subgoal is `?0: Debug`: we don't have a table yet for +that, so we can create a new one, T1. As before, we'll initialize T1 +with strands. In this case, there will be three strands, because all +the program clauses are potentially applicable. Those three strands +will be: + +- `(u32: Debug) :-`, derived from the program clause `(u32: Debug).`. + - Note: This strand has no subgoals. +- `(Vec: Debug) :- (?U: Debug)`, derived from the `Vec` impl. +- `(Rc: Debug) :- (?U: Debug)`, derived from the `Rc` impl. + +We can thus summarize the state of the whole forest at this point as +follows: + +```text +Table T0 [Rc: Debug] + Strands: + (Rc: Debug) :- selected(?T: Debug, A0) + +Table T1 [?0: Debug] + Strands: + (u32: Debug) :- + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +**Delegation between tables.** Now that the active strand from T0 has +created the table T1, it can try to extract an answer. It does this +via that same `ensure_answer` operation we saw before. In this case, +the strand would invoke `ensure_answer(T1, A0)`, since we will start +with the first answer. This will cause T1 to activate its first +strand, `u32: Debug :-`. + +This strand is somewhat special: it has no subgoals at all. This means +that the goal is proven. We can therefore add `u32: Debug` to the set +of *answers* for our table, calling it answer A0 (it is the first +answer). The strand is then removed from the list of strands. + +The state of table T1 is therefore: + +```text +Table T1 [?0: Debug] + Answers: + A0 = [?0 = u32] + Strand: + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +Note that I am writing out the answer A0 as a substitution that can be +applied to the table goal; actually, in the code, the goals for each +X-clause are also represented as substitutions, but in this exposition +I've chosen to write them as full goals, following [NFTD]. + +[NFTD]: ../bibliography.html#slg + +Since we now have an answer, `ensure_answer(T1, A0)` will return `Ok` +to the table T0, indicating that answer A0 is available. T0 now has +the job of incorporating that result into its active strand. It does +this in two ways. First, it creates a new strand that is looking for +the next possible answer of T1. Next, it incorporates the answer from +A0 and removes the subgoal. The resulting state of table T0 is: + +```text +Table T0 [Rc: Debug] + Strands: + (Rc: Debug) :- selected(?T: Debug, A1) + (Rc: Debug) :- +``` + +We then immediately activate the strand that incorporated the answer +(the `Rc: Debug` one). In this case, that strand has no further +subgoals, so it becomes an answer to the table T0. This answer can +then be returned up to our caller, and the whole forest goes quiescent +at this point (remember, we only do enough work to generate *one* +answer). The ending state of the forest at this point will be: + +```text +Table T0 [Rc: Debug] + Answer: + A0 = [?0 = u32] + Strands: + (Rc: Debug) :- selected(?T: Debug, A1) + +Table T1 [?0: Debug] + Answers: + A0 = [?0 = u32] + Strand: + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +Here you can see how the forest captures both the answers we have +created thus far *and* the strands that will let us try to produce +more answers later on. + +## See also + +- [chalk_solve README][readme], which contains links to papers used and + acronyms referenced in the code +- This section is a lightly adapted version of the blog post [An on-demand + SLG solver for chalk][slg-blog] +- [Negative Reasoning in Chalk][negative-reasoning-blog] explains the need + for negative reasoning, but not how the SLG solver does it + +[readme]: https://github.com/rust-lang/chalk/blob/239e4ae4e69b2785b5f99e0f2b41fc16b0b4e65e/chalk-engine/src/README.md +[slg-blog]: https://smallcultfollowing.com/babysteps/blog/2018/01/31/an-on-demand-slg-solver-for-chalk/ +[negative-reasoning-blog]: https://aturon.github.io/blog/2017/04/24/negative-chalk/ diff --git a/book/src/glossary.md b/book/src/glossary.md new file mode 100644 index 00000000000..afa0aca6a02 --- /dev/null +++ b/book/src/glossary.md @@ -0,0 +1,243 @@ +# Glossary and terminology + +This is a glossary of terminology (possibly) used in the chalk crate. + +## Notation + +### Basic notation + +| Notation | Meaning | +|--------------|-----------------------------------------| +| `?0` | [Type inference variable] | +| `^0`, `^1.0` | [Bound variable]; bound in a [`forall`] | +| `!0`, `!1.0` | [Placeholder] | +| `A :- B` | [Clause]; A is true if B is true | + +### Rules + +- `forall { (Vec: Clone) :- (T: Clone)`: for every `T`, `Vec` + implements `Clone` if `T` implements `Clone` + +### Queries + +- `Vec: Clone`: does `Vec` implement `Clone`? +- `exists { Vec: Clone }`: does there exist a `T` such that `Vec` + implements `Clone`? + +[Type inference variable]: ./types/rust_types.md#inference-variables +[Bound variable]: ./types/rust_types.md#bound-variables +[`forall`]: #debruijn-index +[Placeholder]: ./types/rust_types.md#placeholders +[Clause]: ./clauses/goals_and_clauses.md + +## Binary connective +There are sixteen logical connectives on two boolean variables. The most +interesting in this context are listed below. There is also a truth table given +which encodes the possible results of the operations like this + +```notrust +f(false, false) f(false, true) f(true, false) f(true, true). +``` + +As a shorthand the resulting truth table is encoded with `true = 1` and `false = +0`. + +| Truth table | Operator symbol | Common name | +|-------------|-----------------|----------------------------------| +| 0001 | && | Conjunction; and | +| 1001 | <=> | Equivalence; if and only if; iff | +| 1101 | => | Implication; if ... then | + +## Binder +A binder is an expression that binds a literal to a certain expression. +Examples for binders: + +- The universal quantifier `forall(a)` states that a certain condition holds for + all allowed values for `a`. +- A function definition `f(x) = a * x` is a binder for the variable `x` whereas + `a` is a free variable. +- A sum `\sum_n x_n` binds the index variable `n`. + +## Canonical Form +A formula in canonical form has the property that its De Bruijn indices are +minimized. For example when the formula `forall<0, 1> { 0: A && 1: B }` is +processed, both "branches" `0: A` and `1: B` are processed individually. The +first branch would be in canonical form, the second branch not since the +occurring De Bruijn index `1` could be replaced with `0`. + +## Clause +A clause is the disjunction of several expressions. For example the clause +`condition_1 || condition_2 || ...` states that at least one of the conditions +holds. + +There are two notable special cases of clauses. A *Horn clause* has at most one +positive literal. A *Definite clause* has exactly one positive literal. + +*Horn clauses* can be written in the form `A || !B || !C || ...` with `A` being +the optional positive literal. Due to the equivalence `(P => Q) <=> (!P || Q)` +the clause can be expressed as `B && C && ... => A` which means that A is true +if `B`, `C`, etc. are all true. All rules in chalk are in this form. For example + +```rust,ignore +struct A {} +impl B for A where T: C + D {} +``` + +is expressed as the *Horn clause* `(T: C) && (T: D) => (A: B)`. This formula +has to hold for all values of `T`. The second example + +```rust,ignore +struct A {} +impl B for A {} +impl C for A {} +``` + +is expressed as the *Horn clause* `(A: B) && (A: C)`. Note the missing +consequence. + +## De Bruijn Index +De Bruijn indices numerate literals that are bound in an unambiguous way. The +literal is given the number of its binder. The indices start at zero from the +innermost binder increasing from the inside out. + +Given the example `forall { exists { T: Foo } }` the +literal names `U` and `T` are replaced with `0` and `1` respectively and the names are erased from the binders: `forall<_> +{ exists<_> { 1: Foo } }`. + +As another example, in `forall { forall { X } }`, `X` is represented +as `^1.0`. The `1` represents the De Bruijn index of the variable and the `0` +represents the index in that scope: `X` is bound in the second scope counting +from where it is referenced, and it is the first variable bound in that scope. + +## Formula +A formula is a logical expression consisting of literals and constants connected +by logical operators. + +## Goal +With a set of type variables, given types, traits and impls, a goal specifies a +problem which is solved by finding types for the type variables that satisfy the +formula. For example the goal `exists { T: u32 }` can be solved with `T = +u32`. + +## Literal +A literal is an atomic element of a formula together with the constants `true` +and `false`. It is equivalent to a variable in an algebraic expressions. Note +that literals are *not* the same as the type variables used in specifying a +goal. + +## Normal form +To say that a statement is in a certain *normal form* means that the pattern in +which the subformulas are arranged fulfill certain rules. The individual patterns +have different advantages for their manipulation. + +### Conjunctive normal form (CNF) +A formula in CNF is a conjunction of disjunctions. For example `(x1 || x2 || +x3) && (x4 || x5 || x6)` is in CNF. + +### Disjunctive normal form (DNF) +A formula in DNF is a disjunction of conjunctions. For example `(x1 && x2 && +x3) || (x4 && x5 && x6)` is in DNF. + +### Negation normal form (NNF) +A formula in NNF consists only of literals, the connectives `&&` and `||` and +`true` and `false`. + +### Prenex normal form (PNF) +All quantifiers are on the highest level of a formula and do not occur inside +the subformulas of the expression. + +- `forall(x). exists(y). forall(z). P(x) && P(y) => P(z)` is in PNF. +- `(exists(x). P(x)) => exists(y). P(y) && forall(z). P(z)` is *not* in PNF. + +## Normalization +Normalization is the process of converting an associated type to a concrete +type. In the case of an iterator this would mean that the associated `Item` type +is replaced with something more meaningful with respect to the individual +context (e.g. `u32`). + +## Projection +Projection is the reference to a field or (in the context of Rust) to a type +from another type. + +## Satisfiability +A formula is satisfiable iff there is a valuation for the atoms inside the +formula that makes it true. + +## Unification +Unification is the process of solving a formula. That means unification finds +values for all the free literals of the formula that satisfy it. In the context +of chalk the values refer to types. + +## Universe +A universe sets the scope in which a particular variable name is bound. (See +*Binder*.) A universe can encapsulate other universes. A universe can +be contained by only one parent universe. Universes have therefore a tree-like +structure. A universe can access the variable names of itself and the parent +universes but not of the sibling universes. + +## Well-formed +A formula is well-formed if it is constructed according to a predefined set of +syntactic rules. + +In the context of the Rust type system this means that basic rules for type +construction have to be met. Two examples: 1) Given a struct definition + +```rust,ignore +struct HashSet +``` +then a type `HashSet` is well-formed since `i32` implements `Hash`. A type +`HashSet` with a type `NoHash` that does not implement the `Hash` trait +is not well-formed. + +2) If a trait demands by its definition the implementation of further traits +for a certain type then these secondary traits have to be implemented as well. +If a type `Foo` implements `trait Eq: PartialEq` then this type has to implement +`trait PartialEq` as well. If it does not, then the type `Foo: Eq` is not well +formed according to Rust type building rules. + +## Quantifier + +### Existential quantifier +A formula with the existential quantifier `exists(x). P(x)` is satisfiable if +and only if there exists at least one value for all possible values of x which +satisfies the subformula `P(x)`. + +In the context of chalk, the existential quantifier usually demands the +existence of exactly one instance (i.e. type) that satisfies the formula (i.e. +type constraints). More than one instance means that the result is ambiguous. + +### Universal quantifier +A formula with the universal quantifier `forall(x). P(x)` is satisfiable +if and only if the subformula `P(x)` is true for all possible values for x. + +### Helpful equivalences +- `not(forall(x). P(x)) <=> exists(x). not(P(x))` +- `not(exists(x). P(x)) <=> forall(x). not(P(x))` + +## Skolemization +Skolemization is a technique of transferring a logical formula with existential +quantifiers to a statement without them. The resulting statement is in general +not equivalent to the original statement but equisatisfiable. + +## Validity +An argument (*premise* therefore *conclusion*) is valid iff there is no +valuation which makes the premise true and the conclusion false. + +Valid: `A && B therefore A || B`. Invalid: `A || B therefore A && B` because the +valuation `A = true, B = false` makes the premise true and the conclusion false. + +## Valuation +A valuation is an assignment of values to all variables inside a logical +formula. + +## Fixed-Points +A fixed-point of a function `f` is a value `x` for which `f(x)=x`. +Similarly a pre-fixed-point is defined as `x ≤ f(x)`, whereas for a post-fixed-point it holds that `f(x) ≤ x`. + +A least fixed-point (lfp) of `f` is the fixed-point `x` of `f` for which all other fixed-points `y` are greater or equal (i.e. if `f(y)=y` then `x ≤ y`). +Similarly, a greatest fixed-point (gfp) is greater or equal than all other fixed-points. +If `f` is a function on sets, the least fixed-point is defined as the intersection of all pre-fixed-points, which are then defined as sets `x` for which `x ⊆ f(x)`. +The greatest fixed-point is in this case the union of all post-fixed-points, respectively. + +This simple definition of lfp and gfp can also be lifted to general lattices. +The results for Chalk goals form such a lattice and, thus, every solver for such goals tries to find such fixed-points. \ No newline at end of file diff --git a/book/src/publishing.md b/book/src/publishing.md new file mode 100644 index 00000000000..65fc9de3cb5 --- /dev/null +++ b/book/src/publishing.md @@ -0,0 +1,35 @@ +# Publishing Chalk + +**Note: this is mostly only useful for maintainers** + +The following crates get published to crates.io: +- `chalk-derive` +- `chalk-engine` +- `chalk-ir` +- `chalk-recursive` +- `chalk-solve` + +The following crates get versioned without publishing: +- `chalk-parse` +- `chalk-integration` +- `chalk` (root directory) + +## Release Automation +Releases are fully automated. Once a week (Sunday at midnight UTC) a GitHub +Actions job is executed which generates the changelog, bumps crate versions, and +publishes the crates. If there have not been any changes since the last version, +the release is skipped. However, if the job is manually triggered then the +release will be published even if there are no changes. + +The release pipeline is located in [`publish.yml`]. + +[`publish.yml`]: https://github.com/rust-lang/chalk/blob/master/.github/workflows/publish.yml + +### Changelog Generation +The changelog is generated using [`auto-changelog`] and is stored in +[`RELEASES.md`]. The template used for the changelog is in +[`releases-template.hbs`]. + +[`auto-changelog`]: https://www.npmjs.com/package/auto-changelog +[`RELEASES.md`]: https://github.com/rust-lang/chalk/blob/master/RELEASES.md +[`releases-template.hbs`]: https://github.com/rust-lang/chalk/blob/master/releases-template.hbs diff --git a/book/src/recursive.md b/book/src/recursive.md new file mode 100644 index 00000000000..a7391cec688 --- /dev/null +++ b/book/src/recursive.md @@ -0,0 +1,98 @@ +# Chalk recursive solver + +The recursive solver, as its name suggests, is a logic solver that works +"recursively". In particular, its basic structure is a function like: + +```rust,ignore +fn(Goal) -> Solution +``` + +where the Goal is some [canonical goal](./canonical_queries.md) and +the Solution is a result like: + +* Provable(S): meaning the goal is provable and it is provably exactly (and + only) for the substitution S. S is a set of values for the inference variables + that appear in the goal. So if we had a goal like `Vec: Foo`, and we + returned `Provable(?X = u32)`, it would mean that only `Vec: Foo` and not + any other sort of vector (e.g., `Vec: Foo` does not hold). +* Ambiguous(S): meaning that we can't prove whether or not the goal is true. + This can sometimes come with a substitution S, which offers suggested values + for the inference variables that might make it provable. +* Error: the goal cannot be proven. + +## Recursion: pros and cons + +The recursive solver is so-called because, in the process of solving one goal, +it will "recurse" to solve another. Consider an example like this: + +```rust,ignore +trait A { } +impl A for Vec { } +impl A for u32 { } +impl A for i32 { } +``` + +which results in program clauses like: + +```notrust +forall { Implemented(Vec: A) :- Implemented(T: A) } +Implemented(u32: A) +Implemented(i32: A) +``` + +First, suppose that we have a goal like `Implemented(Vec: A)`. This would +proceed like so: + +* `Solve(Implemented(Vec: A))` + * `Solve(Implemented(u64: A))` + * returns `Error` + * returns `Error` + +In other words, the recursive solver would start by applying the first rule, +which would cause us recursively try to solve `Implemented(u64: A)`. This would +yield an Error result, because there are no applicable rules, and that error +would propagate back up, causing the entire attempt at proving things to fail. + +Next, consider `Implemented(Vec: A)`. This would proceed like so: + +* `Solve(Implemented(Vec: A))` + * `Solve(Implemented(u32: A))` + * returns `Provable` with no substitution (no variables) + * returns `Provable` + +Finally, consider `Implemented(Vec: A)`. This is more interesting because it +has a variable: + +* `Solve(Implemented(Vec: A))` + * `Solve(Implemented(?X: A))` + * finds two viable solutions, returns `Ambiguous` + * returns `Ambiguous` + +## Recursion and completeness + +One side-effect of the recursive solver's structure is that it +cannot solve find solutions in some cases where a traditional +Prolog solver would be successful. Consider this example: + +```rust +trait A { } +trait B { } + +impl A for Vec { } + +impl A for u32 { } +impl B for u32 { } + +impl A for i32 { } +impl B for i8 { } +``` + +In the recursive solver, with a goal of `Implemented(Vec: A)`, we +recursively try to prove `Implemented(?X: A)` and `Implemented(?X: B)`, which +are both ambiguous, and we get stuck there. + +The [SLG solver] in contrast starts by exploring `?X = u32` and finds +that it works, and then later tries to explore `?X = i32` and finds that it +fails (because `i32: B` is not true). + +[SLG solver]: ./engine.md diff --git a/book/src/recursive/coinduction.md b/book/src/recursive/coinduction.md new file mode 100644 index 00000000000..1eb234c4e7b --- /dev/null +++ b/book/src/recursive/coinduction.md @@ -0,0 +1,98 @@ +# Coinduction + +This sub-chapter is meant to describe the current handling of coinductive goals in the recursive solver rather than providing an extensive overview over the theoretical backgrounds and ideas. +It follows the description in [this GitHub comment](https://github.com/rust-lang/chalk/issues/399#issuecomment-643420016) and the Zulip topic linked there. +In general, coinductive cycles can arise for well-formedness checking and autotraits. +Therefore, correctly handling coinductive cycles is necessary to model the Rust trait system in its entirety. + +## General Idea +Coinductive cycles can be handled the same way as inductive cycles described [before](./inductive_cycles.md). +The only difference is the start value for coinductive goals. +Whereas inductive goals start with a negative result and are iterated until a least fixed-point is found, coinductive goals start with a positive result (i.e. a unique solution with identity substitution). +This negative result is then iterated until a greatest fixed-point is reached. + +## Mixed co-inductive and inductive Cycles +As described above, the handling of inductive and coindutive cycles differs only in the start value from which the computation begins. +Thus, it might seem reasonable to have mixed inductive and coinductive cycles as all goals inside these cycles would be handled the same way anyway. +Unfortunately, this is not possible for the kind of logic that Chalk is based on (i.e. essentially an extension of co-LP for Hereditary Harrop clauses, cf. [this paper][co-LP]). + +There is fundamental difference between results for inductive cycles and results for coinductive cycles of goals. +An inductive goal is provable if and only if there exists a proof for it consisting of a finite chain of derivations from axioms that are members of the least-fixed point of the underlying logic program. +On the other hand, coinductive goals are provable if there exists an at most infinite derivation starting from the axioms that proves it (this includes in particular all finite derivations). +This infinite derivation is then part of the greatest fixed-point of the logic program. +As infinite derivations are not feasible to compute, it is enough to show that such a derivation contains no contradiction. + +A simple example `X :- X.` (with `X` a free variable) is thus not provable by inductive reasoning (the least solution/lfp for this is the empty solution, a failure) but it is provable by coinductive reasoning (the greatest solution/gfp is the universe, i.e. all values). + +This difference between inductive and coinductive results becomes a problem when combined in a single cycle. +Consider a coinductive goal `CG` and an inductive goal `IG`. Now consider the simplest possible mixed cycle: +```notrust +CG :- IG +IG :- CG +``` +It is apparent, that there can not exist a solution for `IG` as the cyclic dependency prevents a finite proof derivation. +In contrast to that, `CG` could potentially be provable as the derivation *`CG` if `IG` if `CG` if `IG` ...* is infinite and based only on the two axioms. +As a result, `CG` would hold whereas `IG` would not hold, creating a contradiction. + +The simplest solution to this problem, proposed by Simon et al. in [their paper about co-LP][co-LP], is to disallow mixed inductive and coinductive cycles. +This approach is also used by Chalk. + +## Prevention of Invalid Results +The problem of invalid results propagated outside of the coinductive cycle is also described in the [Coinduction chapter](../engine/logic/coinduction.md) for the SLG solver alongside the rather complex handling used with it. +Whereas the SLG solver introduces [special constructs](../engine/logic/coinduction.html#nikos-proposed-solution) to handle coinduction, it is sufficient for the recursive solver to use the same logic for inductive and coinductive cycles. +The following is a description of how this works in more detail. + +### The Problem +The problem arises if a solution that is purely based on the positive starting value for the coinductive cycle is cached (or tabled in logic programming terms) and as such propagated to other goals that are possibly reliant on this. An example where all clause goals are assumedly coinductive may look like this (cf. the test case `coinduction::coinductive_unsound1`): + +```notrust +C :- C1. +C :- C2. +C1 :- C2, C3. +C2 :- C1. +``` +The following is a computation to find out whether there exists a type that implements `C`. +Here the implementation of `C` may be proved by either showing that the type implements `C1` or `C2`. +* Start proving `C` by trying to prove `C1`: + * For `C1` try to prove `C2` and `C3`: + * Start with `C2`. For `C2` we need to prove `C1`: + * This is a (coinductive) cycle. Assume that `C1` holds, i.e. use the positive start value. + * Based on this `C2` also holds. If this case is not handled specifically, the solution for `C2` is cached without a reference to the solution for `C1` on which it depends. + * Now try to prove `C3`: + * Find that there is no way do so from the given axioms. + * Thus, there exists no solution for `C3` and the computation fails. This valid result is cached and lifted back up. + * Due to the failure of `C3` there is also no solution for `C1`. This failure is also cached correctly and lifted back up. The cached solution for `C2` has now become invalid as it depends on a positive result for `C1`. +* As a result of the failure for `C1`, `C` can not be proved from `C1`. Try proving `C` from `C2` instead: + * Find the cached result that `C2` has a solution and lift it back up. +* Due to the solution for `C2`, `C` is also proved with the same solution. +* Stop with this positive but invalid result for `C`. + +### The Solution +The above example should make it evident that the caching of found solutions in coinductive cycles can lead to invalid results and should therefore be prevented. +This can be achieved by delaying the caching of all results inside the coinductive cycle until it is clear whether the start of the cycle (i.e. `C1` in the example above) is provable (cf. the handling of inductive cycles [before](./inductive_cycles.md)). +If the start of the cycle can be proven by the results of the cycle and related subgoals then the assumption about it was correct and thus all results for goals inside the cycle are also valid. +If, however, the start of the cycle can not be proved, i.e. the initial assumption was false, then a subset of the found solutions for the coinductive cycle may be invalid (i.e. the solution for `C2` in the example). + +To remove such invalid results, the cycle is restarted with a negative result for the cycle start. +With this approach, it is possible to remove all invalid result that would otherwise depend on the disproved cycle assumption. +To allow for the cycle to be restarted correctly, all nodes in the search graph after the cycle start are deleted. + +With this procedure, the example is handled as follows: +* Start proving `C` with `C1`: + * For `C1` prove `C2` and `C3`: + * For `C2` prove `C1`: + * This is a coinductive cycle. Assume that `C1` holds. + * Thus `C2` also holds. Delay the caching of the result about `C2`. + * There is no way to prove `C3`. Cache this result and lift the failure up. + * Due to the failure of `C3` there is also no solution for `C1`. Set `C1` to a negative result and restart the cycle. + * For `C2` prove `C1`: + * `C1` has now a negative result. + * Thus, `C2` also has a negative result which is not yet cached. + * Find the already cached negative result for `C3`. + * Nothing changed regarding `C1` (this would indicate a negative cycle which is currently not allowed) and the negative result for `C1` and `C2` are cached. Lift the negative result for `C1` back up. +* Start proving `C` with `C2`: + * Find negative cached result for `C2`. Lift the result back up. +* Neither `C1` nor `C2` have a positive result. Stop with the valid disproof of `C`. + + +[co-LP]: https://link.springer.com/chapter/10.1007%2F978-3-540-73420-8_42 \ No newline at end of file diff --git a/book/src/recursive/inductive_cycles.md b/book/src/recursive/inductive_cycles.md new file mode 100644 index 00000000000..8ef6e17cc14 --- /dev/null +++ b/book/src/recursive/inductive_cycles.md @@ -0,0 +1,148 @@ +# Inductive cycles + +Recursive solving without cycles is easy. Solving with cycles is rather more +complicated. Before we get into the details of the implementation, +let's talk a bit about what behavior we actually *expect* in the face +of possible cycles. + +## Inductive cycles + +By default, Rust trait solving is **inductive**. What that means is that, roughly +speaking, you have to prove something is true without any cycles (i.e., you +can't say "it's true because it's true"!). + +For our purpose, a "cycle" means that, in the course of proving some canonical +goal G, we had to prove that same goal G again. + +Consider this Rust program: + +```rust +trait A { } +impl A for Vec { } +impl A for u32 { } +``` + +Whether or not we hit a cycle will depend on the goal we are trying +to solve. If for example we are trying to prove `Implemented(Vec: A)`, +then we don't hit any cycle: + +* `Implemented(Vec: A) :- Implemented(u32: A)` // from the first impl + * `Implemented(u32: A)` // from the second impl + +But what if we are trying to prove `Implemented(?X: A)`? This is a bit +more interesting. Because we don't know what `?X` is, both impls are +actually potentially applicable, so we wind up with two ways to +prove our goal. We will try them out one after the other. + +One possible execution might be: + +* Prove `Implemented(?X: A)` + * we find the program clause `forall { Implemented(Vec: A) :- Implemented(T: A) }` from the first impl + * we create the variable `?Y` to represent `T` and unify `?X = Vec`. + * after unification, we have the subgoal `Implemented(?Y: A)` + * when we go to recursively prove this impl, however, we find that it is already on the stack + * this is because the [canonical form] of `Implemented(?X: A)` and `Implemented(?Y: A)` is the same + +[canonical form]: ../canonical_queries.md + +## What happens if we treat inductive cycles as errors? + +So, what do we do when we hit an inductive cycle? Given that we told you that an +inductive proof cannot contain cycles, you might imagine that we can just treat +such a cycle as an error. But this won't give us the correct result. + +Consider our previous example. If we just treat that cycle as an error, then we +will conclude that the impl for `Vec` doesn't apply to `?X: A`, and we'll +proceed to try the impl for `u32`. This will let us reason that `?X: A` is +provable if `?X = u32`. This is, in fact, correct: `?X = u32` *is* a possible +answer. The problem is, it's not the only one! + +In fact, `Implemented(?X: A)` has an **infinite** number of answers. It is true +for `?X = u32`. It is true for `?X = Vec`. It is also true for +`Vec>` and `Vec>>` and so on. + +Given this, the correct result for our query is actually "ambiguous" -- in +particular, there is no unique substitution that we can give that would make the +query provable. + +## How we solve cycles: loop and try again + +The way we actually handle cycles is by iterating until we reach a fixed point +(or ambiguity). We start out by assuming that all cycles are errors and we try +to find some solution S. If we succeed, then we can do a loop and iterate again +-- this time, for each cycle, we assume the result is S. This may yield some new +solution, S1. The key point here is that we now have **two possible solutions** +to the same goal, S and S1. This implies two possibilities: + +* If S == S1, then in fact there is a unique solution, so we can return `Provable(S)`. +* If S != S1, then we know there are two solutions, which means that there is + not one unique solution, and hence the correct result is **ambiguous**, + and in fact we can just stop and return right now. + +This technique is very similar to the traditional Prolog technique of handling +cycles, which is called **tabling**. The difference between our approach and +tabling is that we are always looking for a unique solution, whereas Prolog +(like the [SLG solver]) tries to enumerate all solutions (i.e., in Prolog, +solving a goal is not a function but an iterator that yields solutions, and +hence it would yield up S first, and then S1, and then any further answers we +might get). + +[SLG solver]: ../engine.md + +Intuitively, what is happening here is that we're building bigger and bigger +"proof trees" (i.e., trees of impl applications). In the first iteration, where +we assumed that all recursive calls were errors, we would find exactly one +solution, `u32: A` -- this is the root tree. In the next iteration, we can use +this result to build a tree for `Vec: A` and so forth. + +## Inductive cycles with no base case + +It is interesting to look at what happens without the base case. Consider this +program: + +```rust +trait B { } +impl B for Vec { } +``` + +In this case, there is no base case -- this means that in fact there are no +solutions at all to the query `?X: B`. The reason is that the only type that +could match would be a type of infinite size like `Vec>>: B`, where +the chain of `Vec` never terminates. + +In our solver, this will work out just fine. We will wind up recursing +and encountering a cycle. This will be treated as an error in the first +iteration -- and then, at the end, we'll still have an error. This means +that we've reached a fixed point, and we can stop. + + +## Inductive cycles: when do we ever terminate + +You might be wondering whether there are any examples of inductive cycles that +actually terminate successfully and without ambiguity. In fact, there are very +few, but you can construct an example like this: + +```rust +trait C { } +impl C for Vec { } +impl C for u32 { } + +trait D { } +``` + +In this case, the only valid result of `Implemented(?X: C)` is `?X = u32`. It can't +be `Vec` because `Implemented(u32: D)` is not true. + +How does this work out with the recursive solver? In the first iteration, +we wind up with `?X = u32`, but we do encounter a cycle: + +* proving `Implemented(?X: C)` has two possibilities... + * `?X = Vec` and `Implemented(?Y: C)`, which is a cycle (error, at least in this iteration) + * `?X = u32`, succeeds + +So then we try the next iteration: + +* proving `Implemented(?X: C)` has two possibilities... + * `?X = Vec` and `Implemented(?Y: C)`, which is a cycle, so we use our previous result of `?Y = u32` + * we then have to prove `Implemented(u32: D)`, which fails + * `?X = u32`, succeeds diff --git a/book/src/recursive/search_graph.md b/book/src/recursive/search_graph.md new file mode 100644 index 00000000000..932bb3265dd --- /dev/null +++ b/book/src/recursive/search_graph.md @@ -0,0 +1,193 @@ +# The search graph and caching + +So now we have a good idea of [what behavior we expect from cycles][cycles], or +at least inductive cycles (we'll talk about [coinduction] later). But how do we +actually implement this? That's where the [`SearchGraph`] comes into play. + +[cycles]: ./inductive_cycles.md +[coinduction]: ./coinduction.md +[stack]: ./stack.md +[`SearchGraph`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/search_graph/struct.SearchGraph.html +[`DepthFirstNumber`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/search_graph/struct.DepthFirstNumber.html +[`Node`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/search_graph/struct.Node.html +[`stack_depth`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/search_graph/struct.Node.html#structfield.stack_depth + +The role of the [`SearchGraph`] is to store information about each goal that we +are currently solving. Typically, these are goals on the stack -- but other +times, they are goals that are no longer on the stack, but whose results +(because of a cycle) were dependent on something that is still on the stack. +We'll work through some examples to make it all clear. + + +## Structure of the search graph + +The search graph consists of nodes, each of which is assigned an index called a +[`DepthFirstNumber`]. The name of this index alludes to the fact that, as we try +to prove a given goal, we are implicitly performing a "depth-first search" over +a graph of subgoals, and the index in the search graph is similar to a pre-order +index on the resulting tree. + +### Example search graph + +Consider this example trait plus impls: + +```rust +trait A { } +impl A for Result { } +impl A for u32 { } +impl A for i32 { } +impl A for f32 { } +``` + +If we consider the full set of goals/subgoals that are involved in proving `Implemented(Result: A)`, +it would look like this: + +```mermaid +graph TD + G1["Implemented(Result<u32, i32>: A)
Pre-order number: 0
DepthFirstNumber: 0"] + G2["Implemented(u32: A)
Pre-order number: 1
DepthFirstNumber: 1"] + G3["Implemented(i32: A)
Pre-order number: 2
DepthFirstNumber: 1"] + G1 --> G2 + G1 --> G3 +``` + +The graph also shows a possible set of pre-order numbers, as well as the +[`DepthFirstNumber`] that would be used in the search graph. You can see that +they start to diverge. Pre-order numbers uniquely identify each goal in the +graph. In contrast, after we finish proving `Implemented(u32: A)`, we remove +that node the graph, and hence its [`DepthFirstNumber`] is re-used. + +## Goal lifecycle + +Every goal that we prove in the recursive solver goes through the following states: + +```mermaid +graph TD + NewlyCreated["Newly created"] + OnStack["On stack and in the search graph"] + InGraph["Popped from stack but retained in search graph"] + ProcessingComplete["Processing complete"] + InCache["Stored in the cache"] + NewlyCreated --> OnStack + OnStack -- Explore all program clauses for the goal --> ProcessingComplete + ProcessingComplete -- If node is a participant in a cycle --> InGraph + InGraph -- On next iteration --> OnStack + ProcessingComplete -- If not part of a cycle or when fixed-point is reached --> InCache +``` + +At first, we create the goal and push it onto the stack, and we also add it to +the search graph. We then explore each of the relevant program clauses to try and +find the solution(s) to the goal. Along the way we update the overall solution: + +* If there are no valid solutions, then the result is an error. +* If there is exactly one solution, then we remember it as the unique solution. +* If there are multiple distinct solutions, the result is "ambiguous". + +While we are doing this solving, we also track what other goals this goal winds +up depending on. In particular, we are looking to see whether it winds up as a +participant in a cycle -- that is, if it depends on any goals further up the +goal stack. + +If, when we're done with all program clauses, the goal never participated in any +cycles, then we have reached our final solution. We can take that result and put +it into the cache. The next time we look for a solution to this goal, we'll +check that cache and return the result. + +But otherwise, if the goal *was* a participant in a cycle, then we have to +iterate, as described in the [section on cycles](./inductive_cycles.md). In that +case, we keep the goal in the search graph, but we remove it from the stack. +This allows the search graph to serve as a kind of "interim cache". If, as we +continue to search through the other nodes that remain on the stack, we have to +solve this same goal again, we will find it in the search cache and re-use the +result. + +For goals that are participants in a cycle, when the cycle reaches its +fixed-point (i.e., the top-most node has stopped changing), we go through and +take all the results for all the subgoals (which are still present in the search +graph) and move them all into the "final cache". + +In other words, any result that is present in the *search graph* can be +considered an "interim cache", with a result that is still being determined and +may be dependent on other goals on the stack. Once the goal is completely +processed, it is moved to the cache field where others can use it. + +## Processing a single goal, a flow chart + +Whenever we are asked to solve a goal, these are the steps we take: + +```mermaid +graph TD + GoalInGraph["Goal in search graph?"] + FlagAsHead["If goal is on stack,
flag as head of cycle."] + ReturnCurrentResult["Return result from
search graph."] + PushOnStack["Push goal on stack,
add to the search graph with index `G`,
initial result is error"] + ProcessEachClause["Process each program clause in turn,
computing result,
and tracking `Minimums`"] + IsCycleParticipant["Is G dependent on
goal lower in stack?"] + StoreInCache["Move results `G..`
from search graph to cache"] + PopFromCacheNotGraph["Pop goal from stack
but leave in search graph"] + CompareResult["Did result change from
what is stored in search graph?"] + UpdateSearchGraph["Update stored result
in search graph"] + ClearPreviousIteration["Clear search graph nodes `G+1..`
from previous iteration"] + + GoalInGraph -- Yes --> FlagAsHead + FlagAsHead --> ReturnCurrentResult + GoalInGraph -- No, not in the graph --> PushOnStack + PushOnStack --> ProcessEachClause + ProcessEachClause -- Is head of cycle --> CompareResult + ProcessEachClause -- Not head of cycle --> IsCycleParticipant + CompareResult -- No, fixed-point reached --> IsCycleParticipant + CompareResult -- Yes, result changed --> UpdateSearchGraph + UpdateSearchGraph --> ClearPreviousIteration + ClearPreviousIteration --> ProcessEachClause + IsCycleParticipant -- No --> StoreInCache + IsCycleParticipant -- Yes --> PopFromCacheNotGraph +``` + +## Starting to prove a goal + +The first thing we do when proving some goal G is to check the search graph to +see if there is already a node for this goal. + +### If there is a node for G + +If there is a node for G, that indicates that there is some sort of cycle +involved in the graph. For now, we will defer this case, and come back to it +after we've explained what happens without cycles. + +### If there is no node for G: pushing a new goal onto the stack + +If there is no node for G in the graph, then we have a new subgoal to add +to the graph. We will first push a new entry onto the [stack], yielding some new +stack depth `d`. Then we create a new [`Node`] in the search graph. It will be +assigned the next available [`DepthFirstNumber`]. The search graph node contains a field +[`stack_depth`] that will be set to `Some(d)`, where `d` is the depth of the +node on the stack. + +The search graph node also stores the "current solution" for the given goal. As +described in the search on [inductive cycles][cycles], this solution starts out +as an error but may be gradually widened as we iterate, if we find solutions. + +## Tracking dependencies + +The way that we track dependencies is through a structure called the +[`Minimums`]. The name comes from the idea that it is tracking the minimum +[`DepthFirstNumber`] of any goal whose result we depended on. The minimum for a +goal G1 starts out as G1, since its result depends on itself, but if it winds up +recursively processing some goal G2 that is on the stack, then the minimum will +be adjusted to G2. + +Along with the interim solution, the search graph node for a given goal also +stores the [`Minimums`] that resulted from computing that interim solution +(i.e., what goals did that solution depend on). If some goal G1 winds up +recursively invoking some goal G2 that is in the search graph but *not* present +on the stack, then we update the current [`Minimums`] with the values stored in +the search graph. + +[`Minimums`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/struct.Minimums.html + +## Removing nodes from the graph + +Once we complete the processing for a node, it needs to be removed from the +processing stack. But we wish to leave it in the graph if it is dependent on +something else that is already on the stack. We do that just by checking the +[`Minimums`] value to see if it is less than the current goal. diff --git a/book/src/recursive/stack.md b/book/src/recursive/stack.md new file mode 100644 index 00000000000..ac4e1fcc949 --- /dev/null +++ b/book/src/recursive/stack.md @@ -0,0 +1,77 @@ +# The stack + +The first "layer" of the recursive solver is the [`Stack`]. It is really just +what it sounds like: a stack that stores each thing that the recursive solver is +solving. Initially, it contains only one item, the root goal that was given by +the user. + +[`Stack`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/stack/struct.Stack.html + +Each frame on the stack has an associated [`StackDepth`], which is basically an +index that increases (so 0 is the top of the stack, 1 is the next thing pushed, +etc). + +[`StackDepth`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/stack/struct.StackDepth.html + +## How the recursive solver works at the highest level + +At the highest level, the recursive solver works like so. + +* Push the initial goal `G0` onto the stack. +* Find all the program clauses `G1 :- G2...Gn` that could apply to the goal `G0`. +* For each program clause, unify `G1` and `G0`. If that succeeds, then recursively try to prove each goal `Gi` in the list `G2..Gn`: + * If proving `Gi` yields an error, return an error. + * If proving `Gi` yields an ambiguity, keep going, but remember that we got an ambiguous result. + * If proving `Gi` succeeded, apply the resulting answer to our inference variables and keep going. +* At the end, if any result proved ambiguous, return ambiguous, otherwise construct the final answer and return success. + +## Example + +```rust +trait A { } +trait B { } + +impl A for Vec { } + +impl B for u32 { } +``` + +Imagine we are trying to prove `Implemented(Vec: A)`. There is one unbound +inference variable here, `?X`. We will ultimately get the result `Provable(?X = +u32)`. But how do we find it? + +* Initially we are solving `Implemented(Vec: A)` + * we find one applicable program clause, `forall { Implemented(Vec: A) :- Implemented(T: B) }`. + * after unification, the list of subgoals is `[Implemented(?X: B)]`. + * we recursively try to solve `Implemented(?X: B)` + * we find one applicable program clause, `Implemented(u32: B)`. + * after unification, `?X = u32`, but there are no more subgoals. + * we return the answer `Provable(?X = u32)`. + * we apply the substitution `?X = u32`, and find there are no more subgoals. + * we return the answer `Provable(?X = u32)`. + +## Why do we need the stack? + +You may have noticed that the description above never seemed to use the [`Stack`], +it only relied on the program stack. That's because I left out any discussion +of cycles. In fact, the [`Stack`] data structure does mirror the program stack, +it just adds some extra information we use in resolving cycles. We'll discuss +cycles in the next chapter, when we discuss the [search graph]. + +## Figuring out if something is on the stack + +The stack itself never stores the goal associated with a particular entry. That +information is found in the [search graph], which will be covered in detail in +the next section. For now it suffices to say that the search graph maps from +"some goal that we are currently solving" to "information about that goal", and +one of the bits of information is the [`StackDepth`] of its entry on the stack +(if any). + +Therefore, when we are about to start solving some (canonical) goal G, we can +detect a cycle by checking in the [search graph] to see whether G has an associated +[`StackDepth`]. If so, it must be on the stack already (and we can set the +[`cycle`] field to true...but I get ahead of myself, read the next chapters +to learn more about that). + +[search graph]: ./search_graph.md +[`cycle`]: https://rust-lang.github.io/chalk/chalk_recursive/fixed_point/stack/struct.StackEntry.html#structfield.cycle \ No newline at end of file diff --git a/book/src/repl.md b/book/src/repl.md new file mode 100644 index 00000000000..a2f7cb2b8ad --- /dev/null +++ b/book/src/repl.md @@ -0,0 +1 @@ +# REPL diff --git a/book/src/todo.md b/book/src/todo.md new file mode 100644 index 00000000000..c6da46b23c6 --- /dev/null +++ b/book/src/todo.md @@ -0,0 +1,8 @@ +## Incomplete chapters + +Some topics yet to be written: + +- Elaborate on the proof procedure +- SLG solving – introduce negative reasoning +- Go over how trait impls are selected and checked +- Add a section on higher-ranked trait bounds diff --git a/book/src/types.md b/book/src/types.md new file mode 100644 index 00000000000..5e84609dc01 --- /dev/null +++ b/book/src/types.md @@ -0,0 +1,67 @@ +# Representing and manipulating Rust types + +## Intermediate representations + +Intermediate representations (IR) are used to represent parts of Rust programs such as traits and impls. + +Chalk contains three levels of IR: + +- The **AST**. This is used purely for writing test cases + with a Rust-like syntax. This is consumed by **lowering** code, which + takes AST and produces **Rust IR** (the next bullet point). +- The **Rust IR**. This is a "HIR-like" notation that defines the + interesting properties of things like traits, impls, and structs. + It is an input to the **rules** code, which produces **Chalk IR** (the next bullet point). +- The **Chalk IR**. This is most "Prolog-like" of the various IRs. It + contains the definition of **types** as well as prolog-like concepts + such as goals (things that must be proven true) and clauses (things + that are assumed to be true). + + +## Goal of the chalk-ir crate + +To have an ergonomic, flexible library that can abstractly represent +Rust types and logical predicates. The library should be expose a +"minimal" set of types that is nonetheless able to capture the full +range of Rust types. "Minimal" here means that some of the surface +differences in Rust types -- e.g., the distinction between built-in +types like `u32` and user-defined types like a struct -- ought to be +minimized, so that code that works with these types (e.g., trait +solving) can focus on the most important differences. + +## Goal: support embedding and a variety of contexts + +One of our goals is to create a type representation that can be +readily embedded into a variety of contexts. Most specifically, we +would like to be able to embed into rustc and rust-analyzer, and +permit those two projects to use distinct memory management +strategies. This is primarily achieved via the `Interner` trait. + +Initially, at least in rustc, the goal is to be able to easily and +"reasonably efficiently" convert back and forth between rustc's native +type representation and chalk's representation. Once chalk's design +has stabilized, however, the goal would be for rustc to adopt this +format as its "native" representation. + +Note that even if the chalk type library were used everywhere, +however, it would still be useful for rustc to be able to control the +memory management strategy. (In other words, different consumers might +wish to use it in different ways.) + +## Note on status + +At the moment, this documentation is a "proposal". That means that it +diverges in some places from what is actually implemented. It has also +not been thoroughly discussed by the Rust compiler team as a whole. + +Here is a (partial) list of some things that have to be adapted in +Chalk as of today to match this document: + +* Extract `TypeName` into something opaque to chalk-ir. +* Dyn type equality should probably be driven by entailment. +* Projections need to be renamed to aliases. +* The variant we use for impl traits should be removed and folded into type aliases. +* Remove placeholders and projection placeholders from apply and create placeholder types. +* Move `Error` from a `TypeName` to its own variant. +* Introduce `GeneratorWitness` into chalk +* Complete transition from `ForAll` to `Fn` in chalk diff --git a/book/src/types/operations.md b/book/src/types/operations.md new file mode 100644 index 00000000000..002d75a3a2c --- /dev/null +++ b/book/src/types/operations.md @@ -0,0 +1,4 @@ +# Operations + +This chapter describes various patterns and utilities for manipulating +Rust types. diff --git a/book/src/types/operations/fold.md b/book/src/types/operations/fold.md new file mode 100644 index 00000000000..403e50431e7 --- /dev/null +++ b/book/src/types/operations/fold.md @@ -0,0 +1,103 @@ +# TypeFoldable and the TypeFolder trait + +The [`TypeFoldable`] trait permits one to traverse a type or other term in the +chalk-ir and make a copy of it, possibly making small substitutions or +alterations along the way. Folding also allows copying a term from one +interner to another. + +[`TypeFoldable`]: https://rust-lang.github.io/chalk/chalk_ir/fold/trait.TypeFoldable.html + +To use the [`TypeFoldable`] trait, one invokes the [`TypeFoldable::fold_with`] method, supplying some +"folder" as well as the number of "in scope binders" for that term (typically `0` +to start): + +```rust,ignore +let output_ty = input_ty.fold_with(&mut folder, 0); +``` + +[`TypeFoldable::fold_with`]: https://rust-lang.github.io/chalk/chalk_ir/fold/trait.TypeFoldable.html#tymethod.fold_with + +The folder is some instance of the [`TypeFolder`] trait. This trait +defines a few key callbacks that allow you to substitute different +values as the fold proceeds. For example, when a type is folded, the +folder can substitute a new type in its place. + +[`TypeFolder`]: https://rust-lang.github.io/chalk/chalk_ir/fold/trait.TypeFolder.html + +## Uses for folders + +A common use for `TypeFoldable` is to permit a substitution -- that is, +replacing generic type parameters with their values. + +## From TypeFoldable to TypeFolder to TypeSuperFoldable and back again + +The overall flow of folding is like this. + +1. [`TypeFoldable::fold_with`] is invoked on the outermost term. It recursively + walks the term. +2. For those sorts of terms (types, lifetimes, goals, program clauses) that have + callbacks in the [`TypeFolder`] trait, invoking [`TypeFoldable::fold_with`] will in turn + invoke the corresponding method on the [`TypeFolder`] trait, such as `TypeFolder::fold_ty`. +3. The default implementation of `TypeFolder::fold_ty`, in turn, invokes + `TypeSuperFoldable::super_fold_with`. This will recursively fold the + contents of the type. In some cases, the `super_fold_with` + implementation invokes more specialized methods on [`TypeFolder`], such + as [`TypeFolder::fold_free_var_ty`], which makes it easier to write + folders that just intercept *certain* types. + +[`TypeFolder::fold_free_var_ty`]: https://rust-lang.github.io/chalk/chalk_ir/fold/trait.TypeFolder.html#method.fold_free_var_ty + +Thus, as a user, you can customize folding by: + +* Defining your own `TypeFolder` type +* Implementing the appropriate methods to "intercept" types/lifetimes/etc at the right level of + detail +* In those methods, if you find a case where you would prefer not to + substitute a new value, then invoke `TypeSuperFoldable::super_fold_with` to + return to the default behavior. + +## The `binders` argument + +Each callback in the [`TypeFolder`] trait takes a `binders` argument. This indicates +the number of binders that we have traversed during folding, which is relevant for De Bruijn indices. +So e.g. a bound variable with depth 1, if invoked with a `binders` value of 1, indicates something that was bound to something external to the fold. + +For example, consider: + +```rust,ignore +Foo<'a>: for<'b> Bar<'b> +``` + +In this case, `Foo<'a>` gets visited with depth 0 and `Bar<'b>` gets visited with depth 1. + +## The `TypeFoldable::Result` associated type + +The `TypeFoldable` trait defines a [`Result`] associated type, indicating the +type that will result from folding. + +[`Result`]: https://rust-lang.github.io/chalk/chalk_ir/fold/trait.TypeFoldable.html#associatedtype.Result + +## When to implement the TypeFoldable and TypeSuperFoldable traits + +Any piece of IR that represents a kind of "term" (e.g., a type, part +of a type, or a goal, etc) in the logic should implement `TypeFoldable`. We +also implement `TypeFoldable` for common collection types like `Vec` as well +as tuples, references, etc. + +The `TypeSuperFoldable` trait should only be implemented for those types that +have a callback defined on the `TypeFolder` trait (e.g., types and +lifetimes). + +## Derives + +Using the `chalk-derive` crate, you can auto-derive the `TypeFoldable` trait. +There isn't presently a derive for `TypeSuperFoldable` since it is very rare +to require it. The derive for `TypeFoldable` is a bit cludgy and requires: + +* You must import `TypeFoldable` into scope. +* The type you are deriving `TypeFoldable` on must have either: + * A type parameter that has a `Interner` bound, like `I: Interner` + * A type parameter that has a `HasInterner` bound, like `I: HasInterner` + * The `has_interner(XXX)` attribute. + + diff --git a/book/src/types/role_of_interner.md b/book/src/types/role_of_interner.md new file mode 100644 index 00000000000..7b4c52d5f50 --- /dev/null +++ b/book/src/types/role_of_interner.md @@ -0,0 +1,87 @@ +## The role of the `Interner` + +Most everything in the IR is parameterized by the [`Interner`] trait: + +[`Interner`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html + +```rust,ignore +trait Interner: Copy + Clone + Debug + Eq + Ord { + .. +} +``` + +We'll go over the details later, but for now it suffices to say that +the interner is defined by the embedder and can be used to control +(to a certain extent) the actual representation of types, goals, and +other things in memory. For example, the `Interner` trait could be +used to intern all the types, as rustc does, or it could be used to +`Box` them instead, as the chalk testing harness currently does. + +### Controlling representation with `Interner` + +The purpose of the [`Interner`] trait is to give control over how +types and other bits of chalk-ir are represented in memory. This is +done via an "indirection" strategy. We'll explain that strategy here +in terms of [`Ty`] and [`TyKind`], the two types used to represent +Rust types, but the same pattern is repeated for many other things. + +[`Interner`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html +[`Ty`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Ty.html +[`TyKind`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html + +Types are represented by a [`Ty`] type and the [`TyKind`] enum. +There is no *direct* connection between them. The link is rather made +by the [`Interner`] trait, via the [`InternedTy`] associated type: + +[`Ty`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Ty.html +[`TyKind`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html +[`InternedTy`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html#associatedtype.InternedType + +```rust,ignore +struct Ty(I::InternedTy); +enum TyKind { .. } +``` + +The way this works is that the [`Interner`] trait has an associated +type [`InternedTy`] and two related methods, [`intern_ty`] and [`ty_data`]: + +[`intern_ty`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html#tymethod.intern_ty +[`ty_data`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html#tymethod.ty_data + +```rust,ignore +trait Interner { + type InternedTy; + + fn intern_ty(&self, data: &TyKind) -> Self::InternedTy; + fn ty_data(data: &Self::InternedTy) -> &TyData; +} +``` + +However, as a user you are not meant to use these directly. Rather, +they are encapsulated in methods on the [`Ty`] and [`TyKind`] types: + +```rust,ignore +impl Ty { + fn data(&self) -> &TyKind { + I::lookup_ty(self) + } +} +``` + +and + +```rust,ignore +impl TyKind { + fn intern(&self, i: &I) -> Ty { + Ty(i.intern_ty(self)) + } +} +``` + +Note that there is an assumption here that [`ty_data`] needs no +context. This effectively constrains the [`InternedTy`] representation +to be a `Box` or `&` type. To be more general, at the cost of some +convenience, we could make that a method as well, so that one would +invoke `ty.data(i)` instead of just `ty.data()`. This would permit us +to use (for example) integers to represent interned types, which might +be nice (e.g., to permit using generational indices). diff --git a/book/src/types/rust_lifetimes.md b/book/src/types/rust_lifetimes.md new file mode 100644 index 00000000000..21fdd1296bc --- /dev/null +++ b/book/src/types/rust_lifetimes.md @@ -0,0 +1,18 @@ +# Rust lifetimes + +Lifetimes are represented by the `Lifetime` and `LifetimeData` +types. As with types, the actual representation of a lifetime is +defined by the associated type `I::InternedLifetime`. + +### The `LifetimeData` variants + +This section covers the variants we use to categorize lifetimes. + +#### Variants and their equivalents in Rust syntax + +| Chalk variant | Example Rust types | +| ------------- | ------------------ | +| `BoundVar` | the `'a` in a type like `for<'a> fn(&'a u8)`, before it is instantiated | +| `InferenceVar` | a lifetime whose value is being inferred | +| `Placeholder` | how we represent `'a` when type checking `fn foo<'a>() { .. }` | +| `Static` | the lifetime `'static` | diff --git a/book/src/types/rust_types.md b/book/src/types/rust_types.md new file mode 100644 index 00000000000..fa5f5fd4964 --- /dev/null +++ b/book/src/types/rust_types.md @@ -0,0 +1,202 @@ +# Rust types + +Rust types are represented by the [`Ty`] and [`TyKind`] types. +You use [`Ty`] to represent "some Rust type". But to actually inspect +what sort of type you have, you invoke the [`kind`] method, which +returns a [`TyKind`]. As described earlier, the actual in-memory +representation of types is controlled by the [`Interner`] trait. + +[`Interner`]: https://rust-lang.github.io/chalk/chalk_ir/interner/trait.Interner.html +[`Ty`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Ty.html +[`TyKind`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html +[`kind`]: https://rust-lang.github.io/chalk/chalk_ir/struct.Ty.html#method.kind + +## The `TyKind` variants and how they map to Rust syntax + +This section covers the variants we use to categorize types. We have +endeavored to create a breakdown that simplifies the Rust "surface +syntax" of types to their "essence". In particular, the goal is to +group together types that are largely treated identically by the +system and to separate types when there are important semantic +differences in how they are handled. + +| Chalk variant | Example Rust types | +| ------------- | ------------------ | +| `Placeholder` | how we represent `T` when type checking `fn foo() { .. }` | +| `Dyn` | `dyn Trait` | +| `Fn` | `fn(&u8)` | +| `Alias` | `::Item`, or the `Foo` in `type Foo = impl Trait` and `type Foo = u32` | +| `BoundVariable` | an uninstantiated generic parameter like the `T` in `struct Foo` | +| `Adt` | `struct Foo` | +| ... | ... | + +## Justification for each variant + +Each variant of `TyKind` generally wraps a single struct, which +represents a type known to be of that particular variant. This section +goes through the variants in a bit more detail, and in particular +describes why each variant exists. + +### Application types + +Most of "normal rust types" like `Vec` or `(f32, Vec)` are represented with +`TyKind` variants containing some type-specific info ("type name") and a substitution +that is "applied" to that type. In this case, type names are `Vec` and "tuple of arity 2", +and substitutions are `[u32]` and `[f32, Vec]`. + +They are equal to other types (modulo aliases, see below) iff they +have the same "type name" and the generic arguments are +recursively equal + +### Placeholders + +The `Placeholder` variant contains a `PlaceholderIndex` type. It +represents a generic type that is being treated abstractly or -- more +generally -- the result of a "type function" that cannot be +evaluated. For example, when typing the body of a generic function +like `fn foo`, the type `T` would be represented with a +placeholder. Similarly, in that same function, the associated type +`T::Item` might be represented with a placeholder. + +Like application types, placeholder *types* are only known to be +equal. + +When proving negative goals, e.g., `not { Implemented(T: +Trait) }`, placeholders are treated quite differently from application +types, since they do not (in fact) represent a known type. When +solving negative goals, placeholders are replaced with inference +variables -- the idea is that this goal is only true if there is *no +type* `T` that implements `Trait`. Therefore, if we can find no +answers for `exists { Implemented(T: Trait) }`, then we know that +the negation is true. (Note that this means that e.g. `forall { X = +i32 }` is false but so is `forall { not { X = i32 } }`.) + +### Inference variables + +The `InferenceVar` variant wraps an `InferenceVar` type. This +represents a type whose value is being inferred. The value of an +inference variables may be "known" or "not known", but that state is +stored externally, in the inference context (see the section on +inference below). + +When equating, inference variables are treated specially in that they +become bound (or, if they have already been bound, they are replaced +with their value). + +Inference variables are also integral to canonicalization and +other types. + +### Dyn types + +The `Dyn` variant wraps a `DynTy` and represents a `dyn Trait` +type. In chalk, these are represented as an existential type where we +store the predicates that are known to be true. So a type like `dyn +Write` would be represented as, effectively, an `exists { T: Write +}` type. + +When equating, two `dyn P` and `dyn Q` types are equal if `P = Q` -- +i.e., they have the same bounds. Note that -- for this purpose -- +ordering of bounds is significant. That means that if you create a +`dyn Foo + Send` and a `dyn Send + Foo`, chalk would consider them +distinct types. The assumption is that bounds are ordered in some +canonical fashion somewhere else. This may want to change. + +There are "automatic" rules for proving that `dyn P: P` and so forth, but +that is outside the scope of the chalk-ir crate. + +### Function pointer types + +The `Function` variant wraps a `FnPointer` struct and represents a `fn()` type +(in other words, a function pointer). In some ways, fn types are like +application types, but with one crucial difference: they also contain +a `forall` binder that for lifetimes whose value is determined when +the function is called. Consider e.g. a type like `fn(&u32)` or -- +more explicitly -- `for<'a> fn(&'a u32)`. + +Two `Function` types `A, B` are equal `A = B` if `A <: B` and `B <: A` + +Two `Function` types `A, B` are subtypes `A <: B` if + +* After instantiating the lifetime parameters on `B` universally... + * You can instantiate the lifetime parameters on `A` existentially... + * And then you find that `P_B <: P_A` for every parameter type `P` on `A` and `B` and + `R_A <: R_B` for the return type `R` of `A` and `B`. + +We currently handle type inference with a bit of a hack (same as +rustc); when relating a `Fn` type `F` to an unbounded type +variable `V`, we instantiate `V` with `F`. But in practice +because of the above subtyping rules there are actually a range of +values that `V` could have and still be equal with `F`. This may +or may not be something to consider revisiting. + + +### Alias types + +The `Alias` variant wraps an `AliasTy` and is used to represent some form of *type +alias*. They are used to represent a number of related Rust concepts, include +actual type aliases, associated types, and opaque types -- you can read about +them in the [aliases chapter](./rust_types/alias.md). + +### Bound variables + +The `BoundVar` variant represents some variable that is bound in +an outer term. For example, given a term like `forall { +Implemented(X: Trait) }`, the `X` is bound. Bound variables in chalk +(like rustc) use De Bruijn indices (See below). + +Bound variables are never directly equated, as any bound variables would have +been instantiated with either inference variables or placeholders. + +They do appear in canonical forms and other terms that contain binders. + +### Error types + +The `Error` variant represents a type that resulted from some +erroneous expression. Error types generally propagate eagerly in an +attempt to suppress nonsense errors that are derived by interactions +with buggy code. + +`Error` should be its own variant because most bits of code will want +to handle it somewhat specially -- e.g., maybe it can "unify" with any +other type without any effect, and so forth. + +## Mapping to rustc types + +The rustc [`TyKind`][Rustc-TyKind] enum is almost equivalent to chalk's. This +section describes how the rustc types can be mapped to chalk +types. The intention is that, at least when transitioning, rustc would +implement the `Interner` trait and would map from the [`TyKind`][Rustc-TyKind] +enum to chalk's [`TyKind`] on the fly, when `data()` is invoked. + +[Rustc-TyKind]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_type_ir/ty_kind/enum.TyKind.html + +| rustc type | chalk variant (and some notes) | +| ------------- | ------------------ | +| `Bool` | `Scalar` | +| `Char` | `Scalar` | +| `Int` | `Scalar` | +| `Uint` | `Scalar` | +| `Float` | `Scalar` | +| `Adt` | `Adt` | +| `Foreign` | `Foreign` | +| `Str` | `Str` | +| `Array` | `Array` | +| `Slice` | `Slice` | +| `RawPtr` | `Raw` | +| `Ref` | `Ref` | +| `FnDef` | `FnDef` | +| `FnPtr` | `Function` | +| `Dynamic` | `Dyn` | +| `Closure` | `Closure` | +| `Coroutine` | `Coroutine` | +| `CoroutineWitness` | `CoroutineWitness` | +| `Never` | `Never` | +| `Tuple` | `Tuple` | +| `Projection` | `Alias` | +| `UnnormalizedProjection` | (see below) | +| `Opaque` | `Alias` | +| `Param` | XXX Placeholder? | +| `Bound` | `BoundVar` | +| `Placeholder` | `Placeholder` | +| `Infer` | `InferenceVar` | +| `Error` | `Error` | diff --git a/book/src/types/rust_types/alias.md b/book/src/types/rust_types/alias.md new file mode 100644 index 00000000000..4f62bd765a3 --- /dev/null +++ b/book/src/types/rust_types/alias.md @@ -0,0 +1,69 @@ +# Alias types + +**Alias types** are used in chalk to handle a number of distinct Rust +concepts: + +* Explicit type aliases like `type Foo = u32` (in theory) +* Associated types like `impl Iterator for Foo { type Item = Bar }` +* Opaque types generated by impl Traits, like `type Foo = impl Iterator` + or `fn foo() -> impl Iterator`. + +What all these aliases have in common is that they let the user write the name +of one type that turns out to be *equivalent* to another, although the +equivalent type is not always known: + +* In an explicit type alias like `type Foo = u32`, the user writes `Foo` + but it is always known to be equivalent to `u32` +* In an associated type, the user might write ` as + Iterator>::Item`, but the compiler knows that can be *normalized* (see below) + to `u32`. In generic functions, though, you might have a type like `T::Item` + where we *can't* normalize, because we don't know what `T` is. Even in that + case, though, we still know that `T::Item: Sized`, because that bound is + [declared in the `Iterator` trait][Iterator::Item] (by default, as it + happens). We describe how both cases are handled in more detail in the [section on associated types](../../clauses/type_equality.html). +* In an opaque type like `type Foo = impl Iterator`, the user might + write `Foo` (which indirectly references the opaque type) but they never get + to rely on the precise underlying type. However, when generating code, the + *compiler* does need to be able to normalize `Foo` to the precise underlying + type, so normalization still does occur. We describe this in more detail in the [opaque types](../../clauses/opaque_types.html) section. + +[Iterator::Item]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#associatedtype.Item + +## How aliases work + +All aliases have a few parts: + +* The *Alias* type, which represents what the user wrote directly, but where there is + some underlying type. +* *Normalization* rules, which indicate when the alias type can be converted + into its underlying type. +* A corresponding *Placeholder* type, which is used in cases where the alias **cannot** be + converted into its underlying type. + +## Equating an alias + +Alias types are integrated specially into unification. Whenever there is an +attempt to unify an *Alias* type `A` with some other type `T`, we generate an +`AliasEq` that must be solved: + +```notrust +AliasEq(A = T) +``` + +The rules for how to solve an `AliasEq` goal will be generated by lowering the alias +definition, and depend a bit on the kind of alias. We describe that lowering in the +[clauses](../../clauses.md) section. + +## Alias placeholders + +For each kind of alias (except for explicit type aliases), there is also a +corresponding *placeholder* variant in the [`TyKind`] enum. In those cases +where we cannot normalize the alias to something specific, it can be equated to +the placeholder type (see e.g. [`AssociatedType`], which is the placeholder +variant for associated type projections). Note that placeholders are +*application types* -- unlike an alias, a placeholder is only known to be equal +with itself, just like an application type. + +[`TyKind`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html +[`AssociatedType`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html#variant.AssociatedType + diff --git a/book/src/types/rust_types/application_ty.md b/book/src/types/rust_types/application_ty.md new file mode 100644 index 00000000000..cc7b101fcc5 --- /dev/null +++ b/book/src/types/rust_types/application_ty.md @@ -0,0 +1,76 @@ +# Application types + +[`TyKind`] variants that consist of some type-specific info ("type name") +and a substitution are usually referred to as application types. +These include most of the "normal Rust types", such as `Vec` and `(f32, u32)`. +Such types are only "equal" to themselves (modulo aliases, see below). +Scalar types (and some others) also fall into this category, despite having no +substitutions: we treat them as having zero-length substitutions. +Note that we group together *both* user-defined structs/enums/unions (like `Vec`) +as well as built-in types like `f32`, which effectively behave the +same. + +We used to have application types in chalk as a separate notion in the codebase, +but have since moved away from that; nevertheless, the term is still useful in discussions. + +[`TyKind`]: https://rust-lang.github.io/chalk/chalk_ir/enum.TyKind.html + +## Notable application types + +### Coroutine + +A `Coroutine` represents a Rust coroutine. There are three major components +to a coroutine: + +* Upvars - similar to closure upvars, they reference values outside of the coroutine, + and are stored across all yield points. +* Resume/yield/return types - the types produced/consumed by various coroutine methods. + These are not stored in the coroutine across yield points - they are only + used when the coroutine is running. +* Coroutine witness - see the `Coroutine Witness` section below. + +Of these types, only upvars and resume/yield/return are stored directly in `CoroutineDatum` +(which is accessed via `RustIrDatabase`). The coroutine witness is implicitly associated with +the coroutine by virtue of sharing the same `CoroutineId`. It is only used when determining +auto trait impls, where it is considered a 'constituent type'. + +For example: + +```rust,ignore +// This is not "real" syntax at the moment. +fn gen() -> Bar { + let a = yield 0usize; + use(a) +} + +fn use(_: usize) -> Bar {} +``` + +The type of yield would be `usize`, the resume type would be the type of `a` and the return type +would be `Bar`. + +### Coroutine witness types + +The `CoroutineWitness` variant represents the coroutine witness of +the coroutine with id `CoroutineId`. + +The coroutine witness contains multiple witness types, +which represent the types that may be part of a coroutine +state - that is, the types of all variables that may be live across +a `yield` point. + +Unlike other types, witnesses include bound, existential +lifetimes, which refer to lifetimes within the suspended stack frame. +You can think of it as a type like `exists<'a> { (T...) }`. +As an example, imagine that a type that isn't `Send` lives across a `yield`, then the coroutine +itself can't be `Send`. + +Witnesses have a binder for the erased lifetime(s), which must be +handled specifically in equating and so forth. In many ways, +witnesses are also quite similar to `Function` types, and it is not +out of the question that these two could be unified; however, they +are quite distinct semantically and so that would be an annoying +mismatch in other parts of the system. Witnesses are also similar +to a `Dyn` type, in that they represent an existential type, but +in contrast to `Dyn`, what we know here is not a *predicate* but +rather some upper bound on the set of types contained within. diff --git a/book/src/what_is_chalk.md b/book/src/what_is_chalk.md new file mode 100644 index 00000000000..045b65d43e0 --- /dev/null +++ b/book/src/what_is_chalk.md @@ -0,0 +1,90 @@ +# What is Chalk? + +> Chalk is under heavy development, so if any of these links are broken or if +> any of the information is inconsistent with the code or outdated, please +> [open an issue][issues] so we can fix it. If you are able to fix the +> issue yourself, we would love your contribution! + +Chalk is a library that implements the Rust trait system. The implementation is +meant to be practical and usable, but also high-level enough to map easily to a +full specification. It is also meant to be an independent library that can be +integrated both into the main rustc compiler and also other programs and +contexts. + +[issues]: https://github.com/rust-lang/chalk/issues + +## High-level view of how chalk works + +```mermaid +graph TD + Query["Does `Vec<u8>` implement `Debug`?"] + HighLevelInfo["How is the trait `Debug` declared?"] + Response["Yes, `Vec<u8>` implements `Debug`."] + Chalk + Query --> Chalk + HighLevelInfo --> Chalk + Chalk --> Response +``` + +Chalk is designed to answer queries about traits, such as "Does the type `Vec` implement `Debug`"? (Yes!). It can in some cases give inference feedback, such as "Is there a unique type `T` such that `str: AsRef`"? In that case, the answer might be "Yes, `T = str`." + +To do this, it takes as input key information about a Rust program, such as: + +* For a given trait, what are its type parameters, where clauses, and associated items +* For a given impl, what are the types that appear in the impl header +* For a given struct, what are the types of its fields + +## Chalk works by converting Rust goals into logical inference rules + +Internally, Chalk works by converting the Rust-specific information, like traits +and impls, into *logical predicates*. This process is called "lowering", and you +can learn more about it in the [*Lowering to Logic*][lowering-to-logic] and +[*Lowering Rules*][lowering-rules]) sections. + +[lowering-rules]: ./clauses/lowering_rules.html +[lowering-to-logic]: ./clauses.html + +After lowering to logical predicates, Chalk then deploys a *logical solver* to +find the answer to the original query; this solver is similar to a Prolog +engine, though different in its particulars. + +The following sequence diagram helps to illustrate the flow of information that occurs +when Chalk is solving a particular goal. It involves three participants: + +* The **host program**, which might be rustc, rust-analyzer, or chalk's internal + testing harness. The host program, importantly, only thinks about things in + **Rust terms**, like traits and impls. +* The **chalk-solve** crate, which converts between Rust terms and logical clauses. +* The **logic engine** layer, which knows how to solve logical clauses but knows nothing specific to Rust. + +```mermaid +sequenceDiagram + participant rustc as host program + participant chalkSolve as chalk-solve + participant chalkEngine as logic engine + rustc->>chalkSolve: Does Vec[u32] implement Debug? + chalkSolve->>chalkEngine: (Vec[u32]: Debug)? + chalkEngine->>chalkSolve: What clauses can I use? + chalkSolve->>rustc: What is the definition of `Debug`?
(via RustIrDatabase) + rustc-->>chalkSolve: `trait Debug { .. }`
(a TraitDatum) + chalkSolve->>rustc: What impls are there for Vec? + rustc-->>chalkSolve: `impl[T]: Debug] Debug for Vec[T]`
(an ImplDatum) + Note right of chalkSolve: "lowers" rust
declarations to logic + chalkSolve-->>chalkEngine: (Vec[T]: Debug) :- (T: Debug) + chalkSolve-->>chalkEngine: ... and other clauses ... + activate chalkEngine + Note right of chalkEngine: explores each clause
to see if it works + chalkEngine-->>chalkSolve: (Vec[u32]: Debug) is provable + deactivate chalkEngine + chalkSolve-->>rustc: Yes, Vec[u32] implements Debug +``` + +## Chalk repl + +In addition to being embedded into host programs, chalk also has its own testing +harness along with an associated REPL. This allows us to write unit tests that +use a "Rust-like" syntax. The REPL then makes it easy to experiment and get a +better feel for how chalk works. See the [walkthrough] for more details. + +[walkthrough]: what_is_chalk/walkthrough.html + diff --git a/book/src/what_is_chalk/crates.md b/book/src/what_is_chalk/crates.md new file mode 100644 index 00000000000..feacb411959 --- /dev/null +++ b/book/src/what_is_chalk/crates.md @@ -0,0 +1,75 @@ +# Crate breakdown + +Chalk is broken up into a number of crates. This chapter explains the +role of each crate. This crate structure helps to serve Chalk's two goals: + +* To serve as the trait engine for compilers and tools like rustc and rust-analyzer +* To be usable as a standalone REPL and testing harness + +## Crates for embedding chalk into other programs + +The following crates are "public facing" crates that you may use when embedding chalk into +other programs: + +* The `chalk-solve` crate, which defines the IR representing Rust concepts like + traits and impls and the rules that translate Rust IR into logical predicates. +* The `chalk-ir` crate, which defines the IR representing types and logical predicates. + +The following crate is an implementation detail, used internally by `chalk-solve`: + +* The `chalk-engine` crate, which defines the actual engine that solves logical predicate. This + engine is quite general and not really specific to Rust. +* The `chalk-derive` crate defines custom derives for the `chalk_ir::fold::TypeFoldable` trait and other + such things. + +## Crates for standalone REPL and testing + +The following crates are used to define the REPL and internal testing +harness. These crates build on the crates above. Essentially, they +define a kind of "minimal embedding" of chalk. + +* The `chalk-parser` crate can parse Rust syntax to produce an AST. +* The `chalk-integration` crate can take that AST and use it to drive the + `chalk-solve` crate above. The AST is converted into Rust IR by a process + called "lowering". +* Finally, the main `chalk` crate, along with the testing crate in the + `tests` directory, define the actual entry points. + +## The chalk-solve crate + +| The `chalk-solve` crate | | +| ----------------------- | --------------------- | +| Purpose: | to solve a given goal | +| Depends on IR: | chalk-ir and rust-ir | +| Context required: | `RustIrDatabase` | + +The `chalk-solve` crate exposes a key type called `Solver`. This is a +solver that, given a goal (expressed in chalk-ir) will solve the goal +and yield up a `Solution`. The solver caches intermediate data between +invocations, so solving the same goal twice in a row (or solving goals +with common subgoals) is faster. + +The solver is configured by a type that implements the +`RustIrDatabase` trait. This trait contains some callbacks that +provide needed context for the solver -- notably, the solver can ask: + +- **What are the program clauses that might solve given rule?** This + is answered by the code in the chalk-solve crate. +- **Is this trait coinductive?** This is answered by the chalk-ir. + + +## The chalk-engine crate + +| The `chalk-engine` crate | | +| ------------------------ | -------------------------------- | +| Purpose: | define the base solving strategy | +| IR: | none | +| Context required: | `Context` trait | + +For the purposes of chalk, the `chalk-engine` crate is effectively +encapsulated by `chalk-solve`. It defines the base SLG engine. It is +written in a very generic style that knows next to nothing about Rust +itself. The engine can be configured via the traits defined in +`chalk_engine::context::Context`, which contain (for example) +associated types that define what a goal or clause is, as well as +functions that operate on those things. diff --git a/book/src/what_is_chalk/repl.md b/book/src/what_is_chalk/repl.md new file mode 100644 index 00000000000..5f416ff9a29 --- /dev/null +++ b/book/src/what_is_chalk/repl.md @@ -0,0 +1,9 @@ +# REPL + +There is a repl mainly for debugging purposes which can be run by `cargo run`. Some basic examples are in [libstd.chalk](https://github.com/rust-lang/chalk/blob/master/libstd.chalk): +```bash +$ cargo run +?- load libstd.chalk +?- Vec>: Clone +Unique; substitution [], lifetime constraints [] +``` diff --git a/book/src/what_is_chalk/walkthrough.md b/book/src/what_is_chalk/walkthrough.md new file mode 100644 index 00000000000..38695fe5c13 --- /dev/null +++ b/book/src/what_is_chalk/walkthrough.md @@ -0,0 +1,218 @@ +# Walkthrough + +This section shows a sample session in the chalk repl, and then gives a tour +through the code to give you an idea of the phases involved. + +```rust,ignore +?- program +Enter a program; press Ctrl-D when finished +| struct Foo { } +| struct Bar { } +| struct Vec { } +| trait Clone { } +| impl Clone for Vec where T: Clone { } +| impl Clone for Foo { } + +?- Vec: Clone +Unique; substitution [], lifetime constraints [] + +?- Vec: Clone +No possible solution. + +?- exists { Vec: Clone } +Ambiguous; no inference guidance +``` + +You can see more examples of programs and queries in the [unit +tests][chalk-test-example]. + +Next we'll go through each stage required to produce the output above. + +### Parsing ([chalk_parse]) + +Chalk is designed to be incorporated with the Rust compiler, so the syntax and +concepts it deals with heavily borrow from Rust. It is convenient for the sake +of testing to be able to run chalk on its own, so chalk includes a parser for a +Rust-like syntax. This syntax is orthogonal to the Rust AST and grammar. It is +not intended to look exactly like it or support the exact same syntax. + +The parser takes that syntax and produces an [Abstract Syntax Tree (AST)][ast]. +You can find the [complete definition of the AST][chalk-ast] in the source code. + +The syntax contains things from Rust that we know and love, for example: traits, +impls, and struct definitions. Parsing is often the first "phase" of +transformation that a program goes through in order to become a format that +chalk can understand. + +### Rust Intermediate Representation ([chalk_solve::rust_ir]) + +After getting the AST we convert it to a more convenient intermediate +representation called `chalk_rust_ir`. This is sort of +analogous to the [HIR] in Rust. The process of converting to IR is called +*lowering*. + +The [`chalk::program::Program`][chalk-program] struct contains some "rust things" +but indexed and accessible in a different way. For example, if you have a +type like `Foo`, we would represent `Foo` as a string in the AST but in +`chalk::program::Program`, we use numeric indices (`ItemId`). + +The [IR source code][ir-code] contains the complete definition. + +### Chalk Intermediate Representation ([chalk_ir]) + +Once we have Rust IR it is time to convert it to "program clauses". A +[`ProgramClause`] is essentially one of the following: + +* A [clause] of the form `consequence :- conditions` where `:-` is read as + "if" and `conditions = cond1 && cond2 && ...` +* A universally quantified clause of the form + `forall { consequence :- conditions }` + * `forall { ... }` is used to represent [universal quantification]. See the + section on [Lowering to logic][lowering-forall] for more information. + * A key thing to note about `forall` is that we don't allow you to "quantify" + over traits, only types and regions (lifetimes). That is, you can't make a + rule like `forall { u32: Trait }` which would say "`u32` implements + all traits". You can however say `forall { T: Trait }` meaning "`Trait` + is implemented by all types". + * `forall { ... }` is represented in the code using the [`Binders` + struct][binders-struct]. + +*See also: [Goals and Clauses][goals-and-clauses]* + +This is where we encode the rules of the trait system into logic. For +example, if we have the following Rust: + +```rust,ignore +impl Clone for Vec {} +``` + +We generate the following program clause: + +```rust,ignore +forall { (Vec: Clone) :- (T: Clone) } +``` + +This rule dictates that `Vec: Clone` is only satisfied if `T: Clone` is also +satisfied (i.e. "provable"). + +Similar to [`chalk::program::Program`][chalk-program] which has "rust-like +things", chalk_ir defines [`ProgramEnvironment`] which is "pure logic". +The main field in that struct is `program_clauses`, which contains the +[`ProgramClause`]s generated by the rules module. + +### Rules ([chalk_solve]) + +The `chalk_solve` crate ([source code][chalk_solve]) defines the logic rules we +use for each item in the Rust IR. It works by iterating over every trait, impl, +etc. and emitting the rules that come from each one. + +*See also: [Lowering Rules][lowering-rules]* + +#### Well-formedness checks + +As part of lowering to logic, we also do some "well formedness" checks. See +the [`chalk_solve::wf` source code][solve-wf-src] for where those are done. + +*See also: [Well-formedness checking][wf-checking]* + +#### Coherence + +The method `CoherenceSolver::specialization_priorities` in the `coherence` module +([source code][coherence-src]) checks "coherence", which means that it +ensures that two impls of the same trait for the same type cannot exist. + +### Solver ([chalk_solve]) + +Finally, when we've collected all the program clauses we care about, we want +to perform queries on it. The component that finds the answer to these +queries is called the *solver*. + +*See also: [The SLG Solver][slg]* + +## Crates + +Chalk's functionality is broken up into the following crates: +- [**chalk_engine**][chalk_engine]: Defines the core [SLG solver][slg]. +- [**chalk_ir**][chalk_ir]: Defines chalk's internal representation of + types, lifetimes, and goals. +- [**chalk_solve**][chalk_solve]: Combines `chalk_ir` and `chalk_engine`, + effectively, which implements logic rules converting `chalk_rust_ir` to + `chalk_ir` + - Contains the `rust_ir` module, which defines the "HIR-like" Rust IR + - Defines the `coherence` module, which implements coherence rules + - [`chalk_engine::context`][engine-context] provides the necessary hooks. +- [**chalk_parse**][chalk_parse]: Defines the raw AST and a parser. +- [**chalk**][doc-chalk]: Brings everything together. Defines the following + modules: + - `chalk::lowering`, which converts AST to `chalk_rust_ir` + - Also includes [chalki][chalki], chalk's REPL. + +[Browse source code on GitHub](https://github.com/rust-lang/chalk) + +## Testing + +chalk has a test framework for lowering programs to logic, checking the +lowered logic, and performing queries on it. This is how we test the +implementation of chalk itself, and the viability of the [lowering +rules][lowering-rules]. + +The main kind of tests in chalk are **goal tests**. They contain a program, +which is expected to lower to logic successfully, and a set of queries +(goals) along with the expected output. Here's an +[example][chalk-test-example]. Since chalk's output can be quite long, goal +tests support specifying only a prefix of the output. + +**Lowering tests** check the stages that occur before we can issue queries +to the solver: the [lowering to chalk_rust_ir][chalk-test-lowering], and the +[well-formedness checks][chalk-test-wf] that occur after that. + +### Testing internals + +Goal tests use a [`test!` macro][test-macro] that takes chalk's Rust-like +syntax and runs it through the full pipeline described above. The macro +ultimately calls the [`solve_goal` function][solve_goal]. + +Likewise, lowering tests use the [`lowering_success!` and +`lowering_error!` macros][test-lowering-macros]. + +## More Resources + +* [Chalk Source Code](https://github.com/rust-lang/chalk) +* [Chalk Glossary](../glossary.md) + +[goals-and-clauses]: ../clauses/goals_and_clauses.html +[HIR]: https://rustc-dev-guide.rust-lang.org/hir.html +[lowering-forall]: ../clauses.html#type-checking-generic-functions-beyond-horn-clauses +[lowering-rules]: ../clauses/lowering_rules.html +[slg]: ../engine/slg.html +[wf-checking]: ../clauses/wf.html + +[ast]: https://en.wikipedia.org/wiki/Abstract_syntax_tree +[chalk]: https://github.com/rust-lang/chalk +[universal quantification]: https://en.wikipedia.org/wiki/Universal_quantification + +[`ProgramClause`]: https://rust-lang.github.io/chalk/chalk_ir/struct.ProgramClause.html +[`ProgramEnvironment`]: https://rust-lang.github.io/chalk/chalk_integration/program_environment/struct.ProgramEnvironment.html +[chalk_engine]: https://rust-lang.github.io/chalk/chalk_engine +[chalk_ir]: https://rust-lang.github.io/chalk/chalk_ir/index.html +[chalk_parse]: https://rust-lang.github.io/chalk/chalk_parse/index.html +[chalk_solve]: https://rust-lang.github.io/chalk/chalk_solve/index.html +[chalk_solve::rust_ir]: https://rust-lang.github.io/chalk/chalk_solve/rust_ir/index.html +[doc-chalk]: https://rust-lang.github.io/chalk/chalk/index.html +[engine-context]: https://rust-lang.github.io/chalk/chalk_engine/context/index.html +[chalk-program]: https://rust-lang.github.io/chalk/chalk_integration/program/struct.Program.html + +[binders-struct]: https://rust-lang.github.io/chalk/chalk_ir/struct.Binders.html +[chalk-ast]: https://rust-lang.github.io/chalk/chalk_parse/ast/index.html +[chalk-test-example]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L115 +[chalk-test-lowering-example]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rust_ir/lowering/test.rs#L8-L31 +[chalk-test-lowering]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rust_ir/lowering/test.rs +[chalk-test-wf]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rules/wf/test.rs#L1 +[chalki]: https://github.com/rust-lang/chalk/blob/master/src/main.rs +[clause]: https://github.com/rust-lang/chalk/blob/master/GLOSSARY.md#clause +[coherence-src]: https://rust-lang.github.io/chalk/chalk_solve/coherence/index.html +[ir-code]: https://rust-lang.github.io/chalk/chalk_solve/rust_ir/ +[solve-wf-src]: https://rust-lang.github.io/chalk/chalk_solve/wf/index.html +[solve_goal]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L85 +[test-lowering-macros]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test_util.rs#L21-L54 +[test-macro]: https://github.com/rust-lang/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L33 diff --git a/chalk-derive/Cargo.toml b/chalk-derive/Cargo.toml new file mode 100644 index 00000000000..ec25a327fcb --- /dev/null +++ b/chalk-derive/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "chalk-derive" +version = "0.104.0-dev.0" +description = "A helper crate for use by chalk crates for `derive` macros." +license = "MIT OR Apache-2.0" +authors = ["Rust Compiler Team", "Chalk developers"] +repository = "https://github.com/rust-lang/chalk" +readme = "README.md" +keywords = ["compiler", "traits", "prolog"] +edition = "2018" + +[lib] +proc-macro = true + +[dependencies] +synstructure = "0.13.0" +quote = "1.0" +proc-macro2 = "1.0" +syn = { version = "2.0", features = ["full"] } diff --git a/chalk-derive/README.md b/chalk-derive/README.md new file mode 100644 index 00000000000..43fa19a0385 --- /dev/null +++ b/chalk-derive/README.md @@ -0,0 +1,3 @@ +A helper crate for use by chalk crates for `derive` macros. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-derive/src/lib.rs b/chalk-derive/src/lib.rs new file mode 100644 index 00000000000..598e12ffb58 --- /dev/null +++ b/chalk-derive/src/lib.rs @@ -0,0 +1,453 @@ +extern crate proc_macro; + +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use quote::ToTokens; +use syn::{parse_quote, DeriveInput, Ident, TypeParam, TypeParamBound}; + +use synstructure::decl_derive; + +/// Checks whether a generic parameter has a `: HasInterner` bound +fn has_interner(param: &TypeParam) -> Option<&Ident> { + bounded_by_trait(param, "HasInterner") +} + +/// Checks whether a generic parameter has a `: Interner` bound +fn is_interner(param: &TypeParam) -> Option<&Ident> { + bounded_by_trait(param, "Interner") +} + +fn has_interner_attr(input: &DeriveInput) -> Option { + Some( + input + .attrs + .iter() + .find(|a| a.path().is_ident("has_interner"))? + .parse_args::() + .expect("Expected has_interner argument"), + ) +} + +fn bounded_by_trait<'p>(param: &'p TypeParam, name: &str) -> Option<&'p Ident> { + let name = Some(String::from(name)); + param.bounds.iter().find_map(|b| { + if let TypeParamBound::Trait(trait_bound) = b { + if trait_bound + .path + .segments + .last() + .map(|s| s.ident.to_string()) + == name + { + return Some(¶m.ident); + } + } + None + }) +} + +fn get_intern_param(input: &DeriveInput) -> Option<(DeriveKind, &Ident)> { + let mut params = input.generics.type_params().filter_map(|param| { + has_interner(param) + .map(|ident| (DeriveKind::FromHasInterner, ident)) + .or_else(|| is_interner(param).map(|ident| (DeriveKind::FromInterner, ident))) + }); + + let param = params.next(); + assert!(params.next().is_none(), "deriving this trait only works with at most one type parameter that implements HasInterner or Interner"); + + param +} + +fn get_intern_param_name(input: &DeriveInput) -> &Ident { + get_intern_param(input) + .expect("deriving this trait requires a parameter that implements HasInterner or Interner") + .1 +} + +fn try_find_interner(s: &mut synstructure::Structure) -> Option<(TokenStream, DeriveKind)> { + let input = s.ast(); + + if let Some(arg) = has_interner_attr(input) { + // Hardcoded interner: + // + // #[has_interner(ChalkIr)] + // struct S { + // + // } + return Some((arg, DeriveKind::FromHasInternerAttr)); + } + + get_intern_param(input).map(|generic_param0| match generic_param0 { + (DeriveKind::FromHasInterner, param) => { + // HasInterner bound: + // + // Example: + // + // struct Binders { } + s.add_impl_generic(parse_quote! { _I }); + + s.add_where_predicate(parse_quote! { _I: ::chalk_ir::interner::Interner }); + s.add_where_predicate( + parse_quote! { #param: ::chalk_ir::interner::HasInterner }, + ); + + (quote! { _I }, DeriveKind::FromHasInterner) + } + (DeriveKind::FromInterner, i) => { + // Interner bound: + // + // Example: + // + // struct Foo { } + (quote! { #i }, DeriveKind::FromInterner) + } + _ => unreachable!(), + }) +} + +fn find_interner(s: &mut synstructure::Structure) -> (TokenStream, DeriveKind) { + try_find_interner(s) + .expect("deriving this trait requires a `#[has_interner]` attr or a parameter that implements HasInterner or Interner") +} + +#[derive(Copy, Clone, PartialEq)] +enum DeriveKind { + FromHasInternerAttr, + FromHasInterner, + FromInterner, +} + +decl_derive!([FallibleTypeFolder, attributes(has_interner)] => derive_fallible_type_folder); +decl_derive!([HasInterner, attributes(has_interner)] => derive_has_interner); +decl_derive!([TypeVisitable, attributes(has_interner)] => derive_type_visitable); +decl_derive!([TypeSuperVisitable, attributes(has_interner)] => derive_type_super_visitable); +decl_derive!([TypeFoldable, attributes(has_interner)] => derive_type_foldable); +decl_derive!([Zip, attributes(has_interner)] => derive_zip); + +fn derive_has_interner(mut s: synstructure::Structure) -> TokenStream { + s.underscore_const(true); + let (interner, _) = find_interner(&mut s); + + s.add_bounds(synstructure::AddBounds::None); + s.bound_impl( + quote!(::chalk_ir::interner::HasInterner), + quote! { + type Interner = #interner; + }, + ) +} + +/// Derives TypeVisitable for structs and enums for which one of the following is true: +/// - It has a `#[has_interner(TheInterner)]` attribute +/// - There is a single parameter `T: HasInterner` (does not have to be named `T`) +/// - There is a single parameter `I: Interner` (does not have to be named `I`) +fn derive_type_visitable(s: synstructure::Structure) -> TokenStream { + derive_any_type_visitable( + s, + parse_quote! { TypeVisitable }, + parse_quote! { visit_with }, + ) +} + +/// Same as TypeVisitable, but derives TypeSuperVisitable instead +fn derive_type_super_visitable(s: synstructure::Structure) -> TokenStream { + derive_any_type_visitable( + s, + parse_quote! { TypeSuperVisitable }, + parse_quote! { super_visit_with }, + ) +} + +fn derive_any_type_visitable( + mut s: synstructure::Structure, + trait_name: Ident, + method_name: Ident, +) -> TokenStream { + s.underscore_const(true); + let input = s.ast(); + let (interner, kind) = find_interner(&mut s); + + let body = s.each(|bi| { + quote! { + ::chalk_ir::try_break!(::chalk_ir::visit::TypeVisitable::visit_with(#bi, visitor, outer_binder)); + } + }); + + if kind == DeriveKind::FromHasInterner { + let param = get_intern_param_name(input); + s.add_where_predicate(parse_quote! { #param: ::chalk_ir::visit::TypeVisitable<#interner> }); + } + + s.add_bounds(synstructure::AddBounds::None); + s.bound_impl( + quote!(::chalk_ir::visit:: #trait_name <#interner>), + quote! { + fn #method_name ( + &self, + visitor: &mut dyn ::chalk_ir::visit::TypeVisitor < #interner, BreakTy = B >, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> std::ops::ControlFlow { + match *self { + #body + } + std::ops::ControlFlow::Continue(()) + } + }, + ) +} + +fn each_variant_pair( + a: &mut synstructure::Structure, + b: &mut synstructure::Structure, + mut f: F, +) -> TokenStream +where + F: FnMut(&synstructure::VariantInfo<'_>, &synstructure::VariantInfo<'_>) -> R, + R: ToTokens, +{ + let mut t = TokenStream::new(); + for (v_a, v_b) in a.variants_mut().iter_mut().zip(b.variants_mut().iter_mut()) { + v_a.binding_name(|_, i| Ident::new(&format!("a_{}", i), Span::call_site())); + v_b.binding_name(|_, i| Ident::new(&format!("b_{}", i), Span::call_site())); + + let pat_a = v_a.pat(); + let pat_b = v_b.pat(); + let body = f(v_a, v_b); + + quote!((#pat_a, #pat_b) => {#body}).to_tokens(&mut t); + } + t +} + +fn derive_zip(mut s: synstructure::Structure) -> TokenStream { + s.underscore_const(true); + let (interner, _) = find_interner(&mut s); + + let mut a = s.clone(); + let mut b = s.clone(); + + let mut body = each_variant_pair(&mut a, &mut b, |v_a, v_b| { + let mut t = TokenStream::new(); + for (b_a, b_b) in v_a.bindings().iter().zip(v_b.bindings().iter()) { + quote!(chalk_ir::zip::Zip::zip_with(zipper, variance, #b_a, #b_b)?;).to_tokens(&mut t); + } + quote!(Ok(())).to_tokens(&mut t); + t + }); + + // when the two variants are different + quote!((_, _) => Err(::chalk_ir::NoSolution)).to_tokens(&mut body); + + s.add_bounds(synstructure::AddBounds::None); + s.bound_impl( + quote!(::chalk_ir::zip::Zip<#interner>), + quote! { + + fn zip_with>( + zipper: &mut Z, + variance: ::chalk_ir::Variance, + a: &Self, + b: &Self, + ) -> ::chalk_ir::Fallible<()> { + match (a, b) { #body } + } + }, + ) +} + +/// Derives TypeFoldable for structs and enums for which one of the following is true: +/// - It has a `#[has_interner(TheInterner)]` attribute +/// - There is a single parameter `T: HasInterner` (does not have to be named `T`) +/// - There is a single parameter `I: Interner` (does not have to be named `I`) +fn derive_type_foldable(mut s: synstructure::Structure) -> TokenStream { + s.underscore_const(true); + s.bind_with(|_| synstructure::BindStyle::Move); + + let (interner, kind) = find_interner(&mut s); + + let body = s.each_variant(|vi| { + let bindings = vi.bindings(); + vi.construct(|_, index| { + let bind = &bindings[index]; + quote! { + ::chalk_ir::fold::TypeFoldable::try_fold_with(#bind, folder, outer_binder)? + } + }) + }); + + let input = s.ast(); + + if kind == DeriveKind::FromHasInterner { + let param = get_intern_param_name(input); + s.add_where_predicate(parse_quote! { #param: ::chalk_ir::fold::TypeFoldable<#interner> }); + }; + + s.add_bounds(synstructure::AddBounds::None); + s.bound_impl( + quote!(::chalk_ir::fold::TypeFoldable<#interner>), + quote! { + fn try_fold_with( + self, + folder: &mut dyn ::chalk_ir::fold::FallibleTypeFolder < #interner, Error = E >, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::std::result::Result { + Ok(match self { #body }) + } + }, + ) +} + +fn derive_fallible_type_folder(mut s: synstructure::Structure) -> TokenStream { + let interner = try_find_interner(&mut s).map_or_else( + || { + s.add_impl_generic(parse_quote! { _I }); + s.add_where_predicate(parse_quote! { _I: ::chalk_ir::interner::Interner }); + quote! { _I } + }, + |(interner, _)| interner, + ); + s.underscore_const(true); + s.unbound_impl( + quote!(::chalk_ir::fold::FallibleTypeFolder<#interner>), + quote! { + type Error = ::core::convert::Infallible; + + fn as_dyn(&mut self) -> &mut dyn ::chalk_ir::fold::FallibleTypeFolder<#interner, Error = Self::Error> { + self + } + + fn try_fold_ty( + &mut self, + ty: ::chalk_ir::Ty<#interner>, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Ty<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_ty(self, ty, outer_binder)) + } + + fn try_fold_lifetime( + &mut self, + lifetime: ::chalk_ir::Lifetime<#interner>, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Lifetime<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_lifetime(self, lifetime, outer_binder)) + } + + fn try_fold_const( + &mut self, + constant: ::chalk_ir::Const<#interner>, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Const<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_const(self, constant, outer_binder)) + } + + fn try_fold_program_clause( + &mut self, + clause: ::chalk_ir::ProgramClause<#interner>, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::ProgramClause<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_program_clause(self, clause, outer_binder)) + } + + fn try_fold_goal( + &mut self, + goal: ::chalk_ir::Goal<#interner>, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Goal<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_goal(self, goal, outer_binder)) + } + + fn forbid_free_vars(&self) -> bool { + ::chalk_ir::fold::TypeFolder::forbid_free_vars(self) + } + + fn try_fold_free_var_ty( + &mut self, + bound_var: ::chalk_ir::BoundVar, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Ty<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_var_ty(self, bound_var, outer_binder)) + } + + fn try_fold_free_var_lifetime( + &mut self, + bound_var: ::chalk_ir::BoundVar, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Lifetime<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_var_lifetime(self, bound_var, outer_binder)) + } + + fn try_fold_free_var_const( + &mut self, + ty: ::chalk_ir::Ty<#interner>, + bound_var: ::chalk_ir::BoundVar, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Const<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_var_const(self, ty, bound_var, outer_binder)) + } + + fn forbid_free_placeholders(&self) -> bool { + ::chalk_ir::fold::TypeFolder::forbid_free_placeholders(self) + } + + fn try_fold_free_placeholder_ty( + &mut self, + universe: ::chalk_ir::PlaceholderIndex, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Ty<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_placeholder_ty(self, universe, outer_binder)) + } + + fn try_fold_free_placeholder_lifetime( + &mut self, + universe: ::chalk_ir::PlaceholderIndex, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Lifetime<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_placeholder_lifetime(self, universe, outer_binder)) + } + + fn try_fold_free_placeholder_const( + &mut self, + ty: ::chalk_ir::Ty<#interner>, + universe: ::chalk_ir::PlaceholderIndex, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Const<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_free_placeholder_const(self, ty, universe, outer_binder)) + } + + fn forbid_inference_vars(&self) -> bool { + ::chalk_ir::fold::TypeFolder::forbid_inference_vars(self) + } + + fn try_fold_inference_ty( + &mut self, + var: ::chalk_ir::InferenceVar, + kind: ::chalk_ir::TyVariableKind, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Ty<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_inference_ty(self, var, kind, outer_binder)) + } + + fn try_fold_inference_lifetime( + &mut self, + var: ::chalk_ir::InferenceVar, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Lifetime<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_inference_lifetime(self, var, outer_binder)) + } + + fn try_fold_inference_const( + &mut self, + ty: ::chalk_ir::Ty<#interner>, + var: ::chalk_ir::InferenceVar, + outer_binder: ::chalk_ir::DebruijnIndex, + ) -> ::core::result::Result<::chalk_ir::Const<#interner>, Self::Error> { + ::core::result::Result::Ok(::chalk_ir::fold::TypeFolder::fold_inference_const(self, ty, var, outer_binder)) + } + + fn interner(&self) -> #interner { + ::chalk_ir::fold::TypeFolder::interner(self) + } + }, + ) +} diff --git a/chalk-engine/Cargo.toml b/chalk-engine/Cargo.toml index 9c9534c3c49..4d6f94b865a 100644 --- a/chalk-engine/Cargo.toml +++ b/chalk-engine/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "chalk-engine" -version = "0.9.0" +version = "0.104.0-dev.0" description = "Core trait engine from Chalk project" -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" +repository = "https://github.com/rust-lang/chalk" readme = "README.md" keywords = ["compiler", "traits", "prolog"] +edition = "2018" [features] -default = ["stack_protection"] -stack_protection = ["stacker"] +default = [] [dependencies] -stacker = { version = "0.1.2", optional = true } -rustc-hash = { version = "1.0.0" } +rustc-hash = { version = "1.1.0" } +tracing = "0.1" -[dependencies.chalk-macros] -version = "0.1.0" -path = "../chalk-macros" +chalk-derive = { version = "0.104.0-dev.0", path = "../chalk-derive" } +chalk-ir = { version = "0.104.0-dev.0", path = "../chalk-ir" } +chalk-solve = { version = "0.104.0-dev.0", path = "../chalk-solve" } + +[dev-dependencies] +chalk-integration = { path = "../chalk-integration" } diff --git a/chalk-engine/README.md b/chalk-engine/README.md index 51907628151..b396aa45668 100644 --- a/chalk-engine/README.md +++ b/chalk-engine/README.md @@ -1,3 +1,3 @@ -The core trait solving engine used in Chalk. This engine is meant to -be reused by rustc. Other projects may of course re-use it too, if you -have a need, but don't expect much stability in the interface yet. +The core crate for Chalk. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-engine/src/README.md b/chalk-engine/src/README.md index 2dc965326d7..b7cf2d45346 100644 --- a/chalk-engine/src/README.md +++ b/chalk-engine/src/README.md @@ -1,7 +1,5 @@ # The on-demand SLG solver -## Description of how it works - The basis of the solver is the `Forest` type. A *forest* stores a collection of *tables* as well as a *stack*. Each *table* represents the stored results of a particular query that is being performed, as @@ -9,7 +7,7 @@ well as the various *strands*, which are basically suspended computations that may be used to find more answers. Tables are interdependent: solving one query may require solving others. -### Walkthrough +## Walkthrough Perhaps the easiest way to explain how the solver works is to walk through an example. Let's imagine that we have the following program: @@ -41,19 +39,23 @@ look for a table with this as the key: since the forest is empty, this lookup will fail, and we will create a new table T0, corresponding to the u-canonical goal Q. -**Ignoring negative reasoning and regions.** To start, we'll ignore -the possibility of negative goals like `not { Foo }`. We'll phase them -in later, as they bring several complications. +### Ignoring negative reasoning and regions + +To start, we'll ignore the possibility of negative goals like `not { +Foo }`. We'll phase them in later, as they bring several +complications. + +### Creating a table -**Creating a table.** When we first create a table, we also initialize -it with a set of *initial strands*. A "strand" is kind of like a -"thread" for the solver: it contains a particular way to produce an -answer. The initial set of strands for a goal like `Rc: Debug` -(i.e., a "domain goal") is determined by looking for *clauses* in the -environment. In Rust, these clauses derive from impls, but also from -where-clauses that are in scope. In the case of our example, there -would be three clauses, each coming from the program. Using a -Prolog-like notation, these look like: +When we first create a table, we also initialize it with a set of +*initial strands*. A "strand" is kind of like a "thread" for the +solver: it contains a particular way to produce an answer. The initial +set of strands for a goal like `Rc: Debug` (i.e., a "domain goal") +is determined by looking for *clauses* in the environment. In Rust, +these clauses derive from impls, but also from where-clauses that are +in scope. In the case of our example, there would be three clauses, +each coming from the program. Using a Prolog-like notation, these look +like: ``` (u32: Debug). @@ -66,13 +68,16 @@ these clauses to our goal of `Rc: Debug`. The first and third clauses are inapplicable because `u32` and `Vec` cannot be unified with `Rc`. The second clause, however, will work. -**What is a strand?** Let's talk a bit more about what a strand *is*. In the code, a strand +### What is a strand? + +Let's talk a bit more about what a strand *is*. In the code, a strand is the combination of an inference table, an X-clause, and (possibly) a selected subgoal from that X-clause. But what is an X-clause (`ExClause`, in the code)? An X-clause pulls together a few things: - The current state of the goal we are trying to prove; - A set of subgoals that have yet to be proven; +- A set of floundered subgoals (see the section on floundering below); - There are also a few things we're ignoring for now: - delayed literals, region constraints @@ -100,14 +105,15 @@ are both represented with an index.) For each strand, we also optionally store a *selected subgoal*. This is the subgoal after the turnstile (`:-`) that we are currently trying -to prove in this strand. Initally, when a strand is first created, +to prove in this strand. Initially, when a strand is first created, there is no selected subgoal. -**Activating a strand.** Now that we have created the table T0 and -initialized it with strands, we have to actually try and produce an -answer. We do this by invoking the `ensure_answer` operation on the -table: specifically, we say `ensure_answer(T0, A0)`, meaning "ensure -that there is a 0th answer". +### Activating a strand + +Now that we have created the table T0 and initialized it with strands, +we have to actually try and produce an answer. We do this by invoking +the `ensure_answer` operation on the table: specifically, we say +`ensure_answer(T0, A0)`, meaning "ensure that there is a 0th answer". Remember that tables store not only strands, but also a vector of cached answers. The first thing that `ensure_answer` does is to check @@ -134,14 +140,15 @@ Here, we write `selected(L, An)` to indicate that (a) the literal `L` is the selected subgoal and (b) which answer `An` we are looking for. We start out looking for `A0`. -**Processing the selected subgoal.** Next, we have to try and find an -answer to this selected goal. To do that, we will u-canonicalize it -and try to find an associated table. In this case, the u-canonical -form of the subgoal is `?0: Debug`: we don't have a table yet for -that, so we can create a new one, T1. As before, we'll initialize T1 -with strands. In this case, there will be three strands, because all -the program clauses are potentially applicable. Those three strands -will be: +### Processing the selected subgoal + +Next, we have to try and find an answer to this selected goal. To do +that, we will u-canonicalize it and try to find an associated +table. In this case, the u-canonical form of the subgoal is `?0: +Debug`: we don't have a table yet for that, so we can create a new +one, T1. As before, we'll initialize T1 with strands. In this case, +there will be three strands, because all the program clauses are +potentially applicable. Those three strands will be: - `(u32: Debug) :-`, derived from the program clause `(u32: Debug).`. - Note: This strand has no subgoals. @@ -163,12 +170,14 @@ Table T1 [?0: Debug] (Rc: Debug) :- (?V: Debug) ``` -**Delegation between tables.** Now that the active strand from T0 has -created the table T1, it can try to extract an answer. It does this -via that same `ensure_answer` operation we saw before. In this case, -the strand would invoke `ensure_answer(T1, A0)`, since we will start -with the first answer. This will cause T1 to activate its first -strand, `u32: Debug :-`. +### Delegation between tables + +Now that the active strand from T0 has created the table T1, it can +try to extract an answer. It does this via that same `ensure_answer` +operation we saw before. In this case, the strand would invoke +`ensure_answer(T1, A0)`, since we will start with the first +answer. This will cause T1 to activate its first strand, `u32: Debug +:-`. This strand is somewhat special: it has no subgoals at all. This means that the goal is proven. We can therefore add `u32: Debug` to the set @@ -195,7 +204,7 @@ Since we now have an answer, `ensure_answer(T1, A0)` will return `Ok` to the table T0, indicating that answer A0 is available. T0 now has the job of incorporating that result into its active strand. It does this in two ways. First, it creates a new strand that is looking for -the next possible answer of T1. Next, it incorpoates the answer from +the next possible answer of T1. Next, it incorporates the answer from A0 and removes the subgoal. The resulting state of table T0 is: ``` @@ -231,7 +240,123 @@ Here you can see how the forest captures both the answers we have created thus far *and* the strands that will let us try to produce more answers later on. -## Heritage and acroynms +### Floundering + +The first thing we do when we create a table is to initialize it with +a set of strands. These strands represent all the ways that one can +solve the table's associated goal. For an ordinary trait, we would +effectively create one strand per "relevant impl". But sometimes the +goals are too vague for this to be possible; other times, it may be possible +but just really inefficient, since all of those strands must be explored. + +As an example of when it may not be possible, consider a goal like +`?T: Sized`. This goal can in principle enumerate **every sized type** +that exists -- that includes not only struct/enum types, but also +closure types, fn types with arbitrary arity, tuple types with +arbitrary arity, and so forth. In other words, there are not only an +infinite set of **answers** but actually an infinite set of +**strands**. The same applies to auto traits like `Send` as well as +"hybrid" traits like `Clone`, which contain *some* auto-generated sets +of impls. + +Another example of floundering comes from negative logic. In general, +we cannot process negative subgoals if they have unbound existential +variables, such as `not { Vec: Foo }`. This is because we can only +enumerate things that *do* match a given trait (or which *are* +provable, more generally). We cannot enumerate out possible types `?T` +that *are not* provable (there is an infinite set, to be sure). + +To handle this, we allow tables to enter a **floundered** state. This +state begins when we try to create the program clauses for a table -- +if that is not possible (e.g., in one of the cases above) then the +table enters a floundered state. Attempts to get an answer from a +floundered table result in an error (e.g., +`RecursiveSearchFail::Floundered`). + +Whenever a goal results in a floundered result, that goal is placed +into a distinct list (the "floundered subgoals"). We then go on and +process the rest of the subgoals. Once all the normal subgoals have +completed, floundered subgoals are removed from the floundered list +and re-attempted: the idea is that we may have accumulated more type +information in the meantime. If they continue to flounder, then we +stop. + +Let's look at an example. Imagine that we have: + +```rust +trait Foo { } +trait Bar { } + +impl Foo for T { } + +impl Bar for u32 { } +impl Bar for i32 { } +``` + +Now imagine we are trying to prove `?T: Foo`. There is only one impl, +so we will create a state like: + +``` +(?T: Foo) :- (?T: Send), (?T: Bar) +``` + +When we attempt to solve `?T: Send`, however, that subgoal will +flounder, because `Send` is an auto-trait. So it will be put into a +floundered list: + +``` +(?T: Foo) :- (?T: Bar) [ floundered: (?T: Send) ] +``` + +and we will go on to solve `?T: Bar`. `Bar` is an ordinary trait -- so +we can enumerate two strands (one for `u32` and one for `i32`). When +we process the first answer, we wind up with: + +``` +(u32: Foo) :- [ floundered: (u32: Send) ] +``` + +At this point we can move the floundered subgoal back into the main +list and process: + +``` +(u32: Foo) :- (u32: Send) +``` + +This time, the goal does not flounder. + +But how do we detect when it makes sense to move a floundered subgoal +into the main list? To handle this, we use a timestamp scheme. We +keep a counter as part of the strand -- each time we succeed in +solving some subgoal, we increment the counter, as that *may* have +provided more information about some type variables. When we move a +goal to the floundered list, we also track the current value of the +timestamp. Then, when it comes time to move things *from* the +floundered list, we can compare if the timestamp has been changed +since the goal was marked as floundering. If not, then no new +information can have been attempted, and we can mark the current table +as being floundered itself. + +This mechanism allows floundered to propagate up many levels, e.g. +in an example like this: + +```rust +trait Foo { } +trait Bar { } +trait Baz { } + +impl Foo for T { } + +impl Bar for u32 { } +impl Bar for i32 { } + +impl Baz for T { } +``` + +Here, solving `?T: Baz` will in turn invoke `?T: Sized` -- this +floundering state will be propagated up to the `?T: Foo` table. + +## Heritage and acronyms This solver implements the SLG solving technique, though extended to accommodate hereditary harrop (HH) predicates, as well as the needs of diff --git a/chalk-engine/src/context.rs b/chalk-engine/src/context.rs index 28d9cdf19cf..fa418dfd49c 100644 --- a/chalk-engine/src/context.rs +++ b/chalk-engine/src/context.rs @@ -1,378 +1,72 @@ -use fallible::Fallible; -use hh::HhGoal; -use {DelayedLiteral, ExClause, SimplifiedAnswer}; +//! Defines traits used to embed the chalk-engine in another crate. +//! +//! chalk and rustc both define types which implement the traits in this +//! module. This allows each user of chalk-engine to define their own +//! `DomainGoal` type, add arena lifetime parameters, and more. See +//! [`Context`] trait for a list of types. + +use crate::CompleteAnswer; +use chalk_ir::interner::Interner; +use chalk_ir::Substitution; use std::fmt::Debug; -use std::hash::Hash; -pub(crate) mod prelude; +pub enum AnswerResult { + /// The next available answer. + Answer(CompleteAnswer), -/// The "context" in which the SLG solver operates. It defines all the -/// types that the SLG solver may need to refer to, as well as a few -/// very simple interconversion methods. -/// -/// At any given time, the SLG solver may have more than one context -/// active. First, there is always the *global* context, but when we -/// are in the midst of pursuing some particular strand, we will -/// instantiate a second context just for that work, via the -/// `instantiate_ucanonical_goal` and `instantiate_ex_clause` methods. -/// -/// In the chalk implementation, these two contexts are mapped to the -/// same type. But in the rustc implementation, this second context -/// corresponds to a fresh arena, and data allocated in that second -/// context will be freed once the work is done. (The "canonicalizing" -/// steps offer a way to convert data from the inference context back -/// into the global context.) -/// -/// FIXME: Clone and Debug bounds are just for easy derive, they are -/// not actually necessary. But dang are they convenient. -pub trait Context: Clone + Debug { - type CanonicalExClause: Debug; + /// No answer could be returned because there are no more solutions. + NoMoreSolutions, - /// A map between universes. These are produced when - /// u-canonicalizing something; they map canonical results back to - /// the universes from the original. - type UniverseMap: Clone + Debug; + /// No answer could be returned because the goal has floundered. + Floundered, - /// Extracted from a canonicalized substitution or canonicalized ex clause, this is the type of - /// substitution that is fully normalized with respect to inference variables. - type InferenceNormalizedSubst: Debug; - - /// A canonicalized `GoalInEnvironment` -- that is, one where all - /// free inference variables have been bound into the canonical - /// binder. See [the rustc-guide] for more information. - /// - /// [the rustc-guide]: https://rust-lang-nursery.github.io/rustc-guide/traits-canonicalization.html - type CanonicalGoalInEnvironment: Debug; - - /// A u-canonicalized `GoalInEnvironment` -- this is one where the - /// free universes are renumbered to consecutive integers starting - /// from U1 (but preserving their relative order). - type UCanonicalGoalInEnvironment: Debug + Clone + Eq + Hash; - - /// A final solution that is passed back to the user. This is - /// completely opaque to the SLG solver; it is produced by - /// `make_solution`. - type Solution; - - /// Part of an answer: represents a canonicalized substitution, - /// combined with region constraints. See [the rustc-guide] for more information. - /// - /// [the rustc-guide]: https://rust-lang-nursery.github.io/rustc-guide/traits-canonicalization.html#canonicalizing-the-query-result - type CanonicalConstrainedSubst: Clone + Debug + Eq + Hash; - - /// Represents a substitution from the "canonical variables" found - /// in a canonical goal to specific values. - type Substitution: Debug; - - /// Represents a region constraint that will be propagated back - /// (but not verified). - type RegionConstraint: Debug; - - /// Represents a goal along with an environment. - type GoalInEnvironment: Debug + Clone + Eq + Hash; - - /// Represents a set of hypotheses that are assumed to be true. - type Environment: Debug + Clone; - - /// Goals correspond to things we can prove. - type Goal: Clone + Debug + Eq; - - /// A goal that can be targeted by a program clause. The SLG - /// solver treats these opaquely; in contrast, it understands - /// "meta" goals like `G1 && G2` and so forth natively. - type DomainGoal: Debug; - - /// A "higher-order" goal, quantified over some types and/or - /// lifetimes. When you have a quantification, like `forall { G - /// }` or `exists { G }`, this represents the ` { G }` part. - /// - /// (In Lambda Prolog, this would be a "lambda predicate", like `T - /// \ Goal`). - type BindersGoal: Debug; - - /// A term that can be quantified over and unified -- in current - /// Chalk, either a type or lifetime. - type Parameter: Debug; - - /// A rule like `DomainGoal :- Goal`. - /// - /// `resolvent_clause` combines a program-clause and a concrete - /// goal we are trying to solve to produce an ex-clause. - type ProgramClause: Debug; - - /// A vector of program clauses. - type ProgramClauses: Debug; - - /// How to relate two kinds when unifying: for example in rustc, we - /// may want to unify parameters either for the sub-typing relation or for - /// the equality relation. - type Variance; - - /// The successful result from unification: contains new subgoals - /// and things that can be attached to an ex-clause. - type UnificationResult; - - /// Given an environment and a goal, glue them together to create - /// a `GoalInEnvironment`. - fn goal_in_environment( - environment: &Self::Environment, - goal: Self::Goal, - ) -> Self::GoalInEnvironment; + // No answer could be returned *yet*, because we exceeded our + // quantum (`should_continue` returned false). + QuantumExceeded, } -pub trait ContextOps: Sized + Clone + Debug + AggregateOps { - /// True if this is a coinductive goal -- e.g., proving an auto trait. - fn is_coinductive(&self, goal: &C::UCanonicalGoalInEnvironment) -> bool; - - /// Create an inference table for processing a new goal and instantiate that goal - /// in that context, returning "all the pieces". - /// - /// More specifically: given a u-canonical goal `arg`, creates a - /// new inference table `T` and populates it with the universes - /// found in `arg`. Then, creates a substitution `S` that maps - /// each bound variable in `arg` to a fresh inference variable - /// from T. Returns: - /// - /// - the table `T` - /// - the substitution `S` - /// - the environment and goal found by substitution `S` into `arg` - fn instantiate_ucanonical_goal( - &self, - arg: &C::UCanonicalGoalInEnvironment, - op: impl WithInstantiatedUCanonicalGoal, - ) -> R; - - fn instantiate_ex_clause( - &self, - num_universes: usize, - canonical_ex_clause: &C::CanonicalExClause, - op: impl WithInstantiatedExClause, - ) -> R; - - /// Extracts the inner normalized substitution from a canonical ex-clause. - fn inference_normalized_subst_from_ex_clause( - canon_ex_clause: &C::CanonicalExClause, - ) -> &C::InferenceNormalizedSubst; - - /// Extracts the inner normalized substitution from a canonical constraint subst. - fn inference_normalized_subst_from_subst( - canon_ex_clause: &C::CanonicalConstrainedSubst, - ) -> &C::InferenceNormalizedSubst; - - /// True if this solution has no region constraints. - fn empty_constraints(ccs: &C::CanonicalConstrainedSubst) -> bool; - - fn canonical(u_canon: &C::UCanonicalGoalInEnvironment) -> &C::CanonicalGoalInEnvironment; - fn is_trivial_substitution(u_canon: &C::UCanonicalGoalInEnvironment, - canonical_subst: &C::CanonicalConstrainedSubst) -> bool; - fn num_universes(&C::UCanonicalGoalInEnvironment) -> usize; - - /// Convert a goal G *from* the canonical universes *into* our - /// local universes. This will yield a goal G' that is the same - /// but for the universes of universally quantified names. - fn map_goal_from_canonical( - &C::UniverseMap, - value: &C::CanonicalGoalInEnvironment, - ) -> C::CanonicalGoalInEnvironment; - - /// Convert a substitution *from* the canonical universes *into* - /// our local universes. This will yield a substitution S' that is - /// the same but for the universes of universally quantified - /// names. - fn map_subst_from_canonical( - &C::UniverseMap, - value: &C::CanonicalConstrainedSubst, - ) -> C::CanonicalConstrainedSubst; +impl AnswerResult { + pub fn is_answer(&self) -> bool { + matches!(self, Self::Answer(_)) + } + + pub fn answer(self) -> CompleteAnswer { + match self { + Self::Answer(answer) => answer, + _ => panic!("Not an answer."), + } + } + + pub fn is_no_more_solutions(&self) -> bool { + matches!(self, Self::NoMoreSolutions) + } + + pub fn is_quantum_exceeded(&self) -> bool { + matches!(self, Self::QuantumExceeded) + } } -/// Callback trait for `instantiate_ucanonical_goal`. Unlike the other -/// traits in this file, this is not implemented by the context crate, but rather -/// by code in this crate. -/// -/// This basically plays the role of an `FnOnce` -- but unlike an -/// `FnOnce`, the `with` method is generic. -pub trait WithInstantiatedUCanonicalGoal { - type Output; - - fn with( - self, - infer: &mut dyn InferenceTable, - subst: I::Substitution, - environment: I::Environment, - goal: I::Goal, - ) -> Self::Output; +impl Debug for AnswerResult { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AnswerResult::Answer(answer) => write!(fmt, "{:?}", answer), + AnswerResult::Floundered => write!(fmt, "Floundered"), + AnswerResult::NoMoreSolutions => write!(fmt, "None"), + AnswerResult::QuantumExceeded => write!(fmt, "QuantumExceeded"), + } + } } -/// Callback trait for `instantiate_ex_clause`. Unlike the other -/// traits in this file, this is not implemented by the context crate, -/// but rather by code in this crate. -/// -/// This basically plays the role of an `FnOnce` -- but unlike an -/// `FnOnce`, the `with` method is generic. -pub trait WithInstantiatedExClause { - type Output; - - fn with( - self, - infer: &mut dyn InferenceTable, - ex_clause: ExClause, - ) -> Self::Output; -} - -/// Methods for combining solutions to yield an aggregate solution. -pub trait AggregateOps { - fn make_solution( - &self, - root_goal: &C::CanonicalGoalInEnvironment, - simplified_answers: impl AnswerStream, - ) -> Option; -} - -/// An "inference table" contains the state to support unification and -/// other operations on terms. -pub trait InferenceTable: - ResolventOps + TruncateOps + UnificationOps -{ - /// Convert the context's goal type into the `HhGoal` type that - /// the SLG solver understands. The expectation is that the - /// context's goal type has the same set of variants, but with - /// different names and a different setup. If you inspect - /// `HhGoal`, you will see that this is a "shallow" or "lazy" - /// conversion -- that is, we convert the outermost goal into an - /// `HhGoal`, but the goals contained within are left as context - /// goals. - fn into_hh_goal(&mut self, goal: I::Goal) -> HhGoal; - - // Used by: simplify - fn add_clauses( - &mut self, - env: &I::Environment, - clauses: I::ProgramClauses, - ) -> I::Environment; - - /// Upcast this domain goal into a more general goal. - fn into_goal(&self, domain_goal: I::DomainGoal) -> I::Goal; - - /// Create a "cannot prove" goal (see `HhGoal::CannotProve`). - fn cannot_prove(&self) -> I::Goal; -} - -/// Methods for unifying and manipulating terms and binders. -pub trait UnificationOps { - /// Returns the set of program clauses that might apply to - /// `goal`. (This set can be over-approximated, naturally.) - fn program_clauses( - &self, - environment: &I::Environment, - goal: &I::DomainGoal, - ) -> Vec; - - // Used by: simplify - fn instantiate_binders_universally(&mut self, arg: &I::BindersGoal) -> I::Goal; - - // Used by: simplify - fn instantiate_binders_existentially(&mut self, arg: &I::BindersGoal) -> I::Goal; - - // Used by: logic (but for debugging only) - fn debug_ex_clause(&mut self, value: &'v ExClause) -> Box; - - // Used by: logic - fn canonicalize_goal(&mut self, value: &I::GoalInEnvironment) -> C::CanonicalGoalInEnvironment; - - // Used by: logic - fn canonicalize_ex_clause(&mut self, value: &ExClause) -> C::CanonicalExClause; - - // Used by: logic - fn canonicalize_constrained_subst( - &mut self, - subst: I::Substitution, - constraints: Vec, - ) -> C::CanonicalConstrainedSubst; - - // Used by: logic - fn u_canonicalize_goal( - &mut self, - value: &C::CanonicalGoalInEnvironment, - ) -> (C::UCanonicalGoalInEnvironment, C::UniverseMap); - - fn sink_answer_subset( - &self, - value: &C::CanonicalConstrainedSubst, - ) -> I::CanonicalConstrainedSubst; - - fn lift_delayed_literal( - &self, - value: DelayedLiteral, - ) -> DelayedLiteral; - - // Used by: logic - fn invert_goal(&mut self, value: &I::GoalInEnvironment) -> Option; - - // Used by: simplify - fn unify_parameters( - &mut self, - environment: &I::Environment, - variance: I::Variance, - a: &I::Parameter, - b: &I::Parameter, - ) -> Fallible; - - /// Add the residual subgoals as new subgoals of the ex-clause. - /// Also add region constraints. - fn into_ex_clause(&mut self, result: I::UnificationResult, ex_clause: &mut ExClause); -} - -/// "Truncation" (called "abstraction" in the papers referenced below) -/// refers to the act of modifying a goal or answer that has become -/// too large in order to guarantee termination. The SLG solver -/// doesn't care about the precise truncation function, so long as -/// it's deterministic and so forth. -/// -/// Citations: -/// -/// - Terminating Evaluation of Logic Programs with Finite Three-Valued Models -/// - Riguzzi and Swift; ACM Transactions on Computational Logic 2013 -/// - Radial Restraint -/// - Grosof and Swift; 2013 -pub trait TruncateOps { - /// If `subgoal` is too large, return a truncated variant (else - /// return `None`). - fn truncate_goal(&mut self, subgoal: &I::GoalInEnvironment) -> Option; - - /// If `subst` is too large, return a truncated variant (else - /// return `None`). - fn truncate_answer(&mut self, subst: &I::Substitution) -> Option; -} - -pub trait ResolventOps { - /// Combines the `goal` (instantiated within `infer`) with the - /// given program clause to yield the start of a new strand (a - /// canonical ex-clause). - /// - /// The bindings in `infer` are unaffected by this operation. - fn resolvent_clause( - &mut self, - environment: &I::Environment, - goal: &I::DomainGoal, - subst: &I::Substitution, - clause: &I::ProgramClause, - ) -> Fallible; - - fn apply_answer_subst( - &mut self, - ex_clause: ExClause, - selected_goal: &I::GoalInEnvironment, - answer_table_goal: &C::CanonicalGoalInEnvironment, - canonical_answer_subst: &C::CanonicalConstrainedSubst, - ) -> Fallible>; -} +pub trait AnswerStream { + /// Gets the next answer for a given goal, but doesn't increment the answer index. + /// Calling this or `next_answer` again will give the same answer. + fn peek_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult; -pub trait AnswerStream { - fn peek_answer(&mut self) -> Option>; - fn next_answer(&mut self) -> Option>; + /// Gets the next answer for a given goal, incrementing the answer index. + /// Calling this or `peek_answer` again will give the next answer. + fn next_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult; /// Invokes `test` with each possible future answer, returning true immediately /// if we find any answer for which `test` returns true. - fn any_future_answer(&mut self, test: impl FnMut(&C::InferenceNormalizedSubst) -> bool) - -> bool; + fn any_future_answer(&self, test: impl Fn(&Substitution) -> bool) -> bool; } diff --git a/chalk-engine/src/context/prelude.rs b/chalk-engine/src/context/prelude.rs deleted file mode 100644 index 4cb85c47173..00000000000 --- a/chalk-engine/src/context/prelude.rs +++ /dev/null @@ -1,8 +0,0 @@ -#![allow(unused_imports)] // rustc bug - -pub(crate) use super::Context; -pub(crate) use super::ContextOps; -pub(crate) use super::AggregateOps; -pub(crate) use super::ResolventOps; -pub(crate) use super::TruncateOps; -pub(crate) use super::InferenceTable; diff --git a/chalk-engine/src/derived.rs b/chalk-engine/src/derived.rs index 37b95ad1909..b3cdc3d0db7 100644 --- a/chalk-engine/src/derived.rs +++ b/chalk-engine/src/derived.rs @@ -2,71 +2,15 @@ // because the `#[derive()]` would add requirements onto the context // object that are not needed. -use std::cmp::{PartialEq, Eq}; +use super::*; +use std::cmp::{Eq, PartialEq}; use std::hash::{Hash, Hasher}; use std::mem; -use super::*; - -impl PartialEq for DelayedLiteralSet { - fn eq(&self, other: &Self) -> bool { - let DelayedLiteralSet { delayed_literals: a1 } = self; - let DelayedLiteralSet { delayed_literals: a2 } = other; - a1 == a2 - } -} - -impl Eq for DelayedLiteralSet { -} /////////////////////////////////////////////////////////////////////////// -impl PartialEq for DelayedLiteral { - fn eq(&self, other: &Self) -> bool { - if mem::discriminant(self) != mem::discriminant(other) { - return false; - } - - match (self, other) { - (DelayedLiteral::CannotProve(()), DelayedLiteral::CannotProve(())) => - true, - - (DelayedLiteral::Negative(a1), DelayedLiteral::Negative(a2)) => - a1 == a2, - - (DelayedLiteral::Positive(a1, b1), DelayedLiteral::Positive(a2, b2)) => - a1 == a2 && b1 == b2, - - _ => panic!() - } - } -} - -impl Eq for DelayedLiteral { -} - -impl Hash for DelayedLiteral { - fn hash(&self, hasher: &mut H) { - mem::discriminant(self).hash(hasher); - - match self { - DelayedLiteral::CannotProve(()) => (), - - DelayedLiteral::Negative(a) => { - a.hash(hasher); - } - - DelayedLiteral::Positive(a, b) => { - a.hash(hasher); - b.hash(hasher); - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// - -impl PartialEq for Literal { - fn eq(&self, other: &Literal) -> bool { +impl PartialEq for Literal { + fn eq(&self, other: &Literal) -> bool { match (self, other) { (Literal::Positive(goal1), Literal::Positive(goal2)) | (Literal::Negative(goal1), Literal::Negative(goal2)) => goal1 == goal2, @@ -76,10 +20,9 @@ impl PartialEq for Literal { } } -impl Eq for Literal { -} +impl Eq for Literal {} -impl Hash for Literal { +impl Hash for Literal { fn hash(&self, state: &mut H) { mem::discriminant(self).hash(state); match self { @@ -89,4 +32,3 @@ impl Hash for Literal { } } } - diff --git a/chalk-engine/src/fallible.rs b/chalk-engine/src/fallible.rs deleted file mode 100644 index b54a7af2664..00000000000 --- a/chalk-engine/src/fallible.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// Many of our internal operations (e.g., unification) are an attempt -/// to perform some operation which may not complete. -pub type Fallible = Result; - -/// Indicates that the attempted operation has "no solution" -- i.e., -/// cannot be performed. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct NoSolution; diff --git a/chalk-engine/src/forest.rs b/chalk-engine/src/forest.rs index 83daebc0436..410b9cd0a6b 100644 --- a/chalk-engine/src/forest.rs +++ b/chalk-engine/src/forest.rs @@ -1,181 +1,116 @@ -use {DepthFirstNumber, SimplifiedAnswer, TableIndex}; -use context::prelude::*; -use context::AnswerStream; -use logic::RootSearchFail; -use stack::{Stack, StackIndex}; -use tables::Tables; -use table::{Answer, AnswerIndex}; - -pub struct Forest> { - #[allow(dead_code)] - pub(crate) context: CO, - pub(crate) tables: Tables, - pub(crate) stack: Stack, - - dfn: DepthFirstNumber, +use crate::context::{AnswerResult, AnswerStream}; +use crate::logic::RootSearchFail; +use crate::slg::SlgContextOps; +use crate::table::AnswerIndex; +use crate::tables::Tables; +use crate::{TableIndex, TimeStamp}; + +use chalk_ir::interner::Interner; +use chalk_ir::{Goal, InEnvironment, Substitution, UCanonical}; +use tracing::debug; + +pub(crate) struct Forest { + pub(crate) tables: Tables, + + /// This is a clock which always increases. It is + /// incremented every time a new subgoal is followed. + /// This effectively gives us way to track what depth + /// and loop a table or strand was last followed. + pub(crate) clock: TimeStamp, } -impl> Forest { - pub fn new(context: CO) -> Self { +impl Forest { + pub fn new() -> Self { Forest { - context, tables: Tables::new(), - stack: Stack::default(), - dfn: DepthFirstNumber::MIN, + clock: TimeStamp::default(), } } - // Gets the next depth-first number. This number never decreases. - pub(super) fn next_dfn(&mut self) -> DepthFirstNumber { - self.dfn.next() - } - - /// Finds the first N answers, looping as much as needed to get - /// them. - /// - /// Thanks to subgoal abstraction and so forth, this should always - /// terminate. - pub fn force_answers( - &mut self, - goal: C::UCanonicalGoalInEnvironment, - num_answers: usize, - ) -> Vec> { - let table = self.get_or_create_table_for_ucanonical_goal(goal); - let mut answers = Vec::with_capacity(num_answers); - for i in 0..num_answers { - let i = AnswerIndex::from(i); - loop { - match self.ensure_root_answer(table, i) { - Ok(()) => break, - Err(RootSearchFail::QuantumExceeded) => continue, - Err(RootSearchFail::NoMoreSolutions) => return answers, - } - } - - answers.push(self.answer(table, i).clone()); - } - - answers + // Gets the next clock TimeStamp. This will never decrease. + pub(crate) fn increment_clock(&mut self) -> TimeStamp { + self.clock.increment(); + self.clock } /// Returns a "solver" for a given goal in the form of an /// iterator. Each time you invoke `next`, it will do the work to /// extract one more answer. These answers are cached in between /// invocations. Invoking `next` fewer times is preferable =) - fn iter_answers<'f>( + pub fn iter_answers<'f>( &'f mut self, - goal: &C::UCanonicalGoalInEnvironment, - ) -> impl AnswerStream + 'f { - let table = self.get_or_create_table_for_ucanonical_goal(goal.clone()); + context: &'f SlgContextOps<'f, I>, + goal: &UCanonical>>, + ) -> impl AnswerStream + 'f { + let table = self.get_or_create_table_for_ucanonical_goal(context, goal.clone()); let answer = AnswerIndex::ZERO; ForestSolver { forest: self, + context, table, answer, } } - - /// Solves a given goal, producing the solution. This will do only - /// as much work towards `goal` as it has to (and that works is - /// cached for future attempts). - pub fn solve(&mut self, goal: &C::UCanonicalGoalInEnvironment) -> Option { - self.context.clone().make_solution(CO::canonical(&goal), self.iter_answers(goal)) - } - - /// True if all the tables on the stack starting from `depth` and - /// continuing until the top of the stack are coinductive. - /// - /// Example: Given a program like: - /// - /// ``` - /// struct Foo { a: Option> } - /// struct Bar { a: Option> } - /// trait XXX { } - /// impl XXX for T { } - /// ``` - /// - /// and then a goal of `Foo: XXX`, we would eventually wind up - /// with a stack like this: - /// - /// | StackIndex | Table Goal | - /// | ---------- | ----------- | - /// | 0 | `Foo: XXX` | - /// | 1 | `Foo: Send` | - /// | 2 | `Bar: Send` | - /// - /// Here, the top of the stack is `Bar: Send`. And now we are - /// asking `top_of_stack_is_coinductive_from(1)` -- the answer - /// would be true, since `Send` is an auto trait, which yields a - /// coinductive goal. But `top_of_stack_is_coinductive_from(0)` is - /// false, since `XXX` is not an auto trait. - pub(super) fn top_of_stack_is_coinductive_from(&self, depth: StackIndex) -> bool { - self.stack.top_of_stack_from(depth).all(|d| { - let table = self.stack[d].table; - self.tables[table].coinductive_goal - }) - } - - /// Useful for testing. - pub fn num_cached_answers_for_goal(&mut self, goal: &C::UCanonicalGoalInEnvironment) -> usize { - let table = self.get_or_create_table_for_ucanonical_goal(goal.clone()); - self.tables[table].num_cached_answers() - } } -struct ForestSolver<'forest, C: Context + 'forest, CO: ContextOps + 'forest> { - forest: &'forest mut Forest, +struct ForestSolver<'me, I: Interner> { + forest: &'me mut Forest, + context: &'me SlgContextOps<'me, I>, table: TableIndex, answer: AnswerIndex, } -impl<'forest, C, CO: ContextOps> AnswerStream for ForestSolver<'forest, C, CO> -where - C: Context, -{ - fn peek_answer(&mut self) -> Option> { +impl<'me, I: Interner> AnswerStream for ForestSolver<'me, I> { + /// # Panics + /// + /// Panics if a negative cycle was detected. + fn peek_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult { loop { - match self.forest.ensure_root_answer(self.table, self.answer) { - Ok(()) => { - let answer = self.forest.answer(self.table, self.answer); - - // FIXME(rust-lang-nursery/chalk#79) -- if answer - // has delayed literals, we *should* try to - // simplify here (which might involve forcing - // `table` and its dependencies to completion. But - // instead we'll err on the side of ambiguity for - // now. This will sometimes lose us completeness - // around negative reasoning (we'll give ambig - // when we could have given a concrete yes/no - // answer). - - let simplified_answer = SimplifiedAnswer { - subst: answer.subst.clone(), - ambiguous: !answer.delayed_literals.is_empty(), - }; + match self + .forest + .root_answer(self.context, self.table, self.answer) + { + Ok(answer) => { + debug!(answer = ?(&answer)); + return AnswerResult::Answer(answer); + } - return Some(simplified_answer); + Err(RootSearchFail::InvalidAnswer) => { + self.answer.increment(); + } + Err(RootSearchFail::Floundered) => { + return AnswerResult::Floundered; } Err(RootSearchFail::NoMoreSolutions) => { - return None; + return AnswerResult::NoMoreSolutions; } - Err(RootSearchFail::QuantumExceeded) => {} + Err(RootSearchFail::QuantumExceeded) => { + if !should_continue() { + return AnswerResult::QuantumExceeded; + } + } + + Err(RootSearchFail::NegativeCycle) => { + // Negative cycles *ought* to be avoided by construction. Hence panic + // if we find one, as that likely indicates a problem in the chalk-solve + // lowering rules. (In principle, we could propagate this error out, + // and let chalk-solve do the asserting, but that seemed like it would + // complicate the function signature more than it's worth.) + panic!("negative cycle was detected"); + } } } } - fn next_answer(&mut self) -> Option> { - self.peek_answer().map(|answer| { - self.answer.increment(); - answer - }) + fn next_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult { + let answer = self.peek_answer(should_continue); + self.answer.increment(); + answer } - fn any_future_answer( - &mut self, - test: impl FnMut(&C::InferenceNormalizedSubst) -> bool, - ) -> bool { + fn any_future_answer(&self, test: impl Fn(&Substitution) -> bool) -> bool { self.forest.any_future_answer(self.table, self.answer, test) } } diff --git a/chalk-engine/src/hh.rs b/chalk-engine/src/hh.rs deleted file mode 100644 index 156addbcb13..00000000000 --- a/chalk-engine/src/hh.rs +++ /dev/null @@ -1,25 +0,0 @@ -use context::Context; - -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -/// A general goal; this is the full range of questions you can pose to Chalk. -pub enum HhGoal { - /// Introduces a binding at depth 0, shifting other bindings up - /// (deBruijn index). - ForAll(C::BindersGoal), - Exists(C::BindersGoal), - Implies(C::ProgramClauses, C::Goal), - And(C::Goal, C::Goal), - Not(C::Goal), - Unify(C::Variance, C::Parameter, C::Parameter), - DomainGoal(C::DomainGoal), - - /// Indicates something that cannot be proven to be true or false - /// definitively. This can occur with overflow but also with - /// unifications of placeholder variables like `forall { X = Y - /// }`. Of course, that statement is false, as there exist types - /// X, Y where `X = Y` is not true. But we treat it as "cannot - /// prove" so that `forall { not { X = Y } }` also winds up - /// as cannot prove. - CannotProve, -} - diff --git a/chalk-engine/src/lib.rs b/chalk-engine/src/lib.rs index f2952ad5872..f139f5cc033 100644 --- a/chalk-engine/src/lib.rs +++ b/chalk-engine/src/lib.rs @@ -1,9 +1,13 @@ //! An alternative solver based around the SLG algorithm, which -//! implements the well-formed semantics. This algorithm is very -//! closed based on the description found in the following paper, -//! which I will refer to in the comments as EWFS: +//! implements the well-formed semantics. For an overview of how the solver +//! works, see [The On-Demand SLG Solver][guide] in the chalk book. //! -//! > Efficient Top-Down Computation of Queries Under the Well-formed Semantics +//! [guide]: https://rust-lang.github.io/chalk/book/engine/slg.html +//! +//! This algorithm is very closed based on the description found in the +//! following paper, which I will refer to in the comments as EWFS: +//! +//! > Efficient Top-Down Computation of Queries Under the Well-founded Semantics //! > (Chen, Swift, and Warren; Journal of Logic Programming '95) //! //! However, to understand that paper, I would recommend first @@ -37,7 +41,7 @@ //! with hereditary harrop predicates and our version of unification //! (which produces subgoals). I believe those to be largely faithful //! extensions. However, there are some other places where I -//! intentionally dieverged from the semantics as described in the +//! intentionally diverged from the semantics as described in the //! papers -- e.g. by more aggressively approximating -- which I //! marked them with a comment DIVERGENCE. Those places may want to be //! evaluated in the future. @@ -49,149 +53,167 @@ //! - HH: Hereditary harrop predicates. What Chalk deals in. //! Popularized by Lambda Prolog. -#![feature(in_band_lifetimes)] -#![feature(step_trait)] -#![feature(non_modrs_mods)] - -#[macro_use] -extern crate chalk_macros; - -#[cfg(feature = "stack_protection")] -extern crate stacker; - -extern crate rustc_hash; - -use context::Context; -use rustc_hash::FxHashSet; use std::cmp::min; use std::usize; +use chalk_derive::{HasInterner, TypeFoldable, TypeVisitable}; +use chalk_ir::interner::Interner; +use chalk_ir::{ + AnswerSubst, Canonical, ConstrainedSubst, Constraint, DebruijnIndex, Goal, InEnvironment, + Substitution, +}; +use std::ops::ControlFlow; + pub mod context; mod derived; -pub mod fallible; pub mod forest; -pub mod hh; mod logic; +mod normalize_deep; mod simplify; +pub mod slg; +pub mod solve; mod stack; mod strand; mod table; mod tables; index_struct! { - pub struct TableIndex { // FIXME: pub b/c Fold - value: usize, - } -} - -/// The StackIndex identifies the position of a table's goal in the -/// stack of goals that are actively being processed. Note that once a -/// table is completely evaluated, it may be popped from the stack, -/// and hence no longer have a stack index. -index_struct! { - struct StackIndex { + pub struct TableIndex { // FIXME: pub b/c TypeFoldable value: usize, } } -/// The `DepthFirstNumber` (DFN) is a sequential number assigned to -/// each goal when it is first encountered. The naming (taken from -/// EWFS) refers to the idea that this number tracks the index of when -/// we encounter the goal during a depth-first traversal of the proof -/// tree. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -struct DepthFirstNumber { - value: u64, -} - /// The paper describes these as `A :- D | G`. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ExClause { +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct ExClause { /// The substitution which, applied to the goal of our table, /// would yield A. - pub subst: C::Substitution, + pub subst: Substitution, - /// Delayed literals: things that we depend on negatively, - /// but which have not yet been fully evaluated. - pub delayed_literals: Vec>, + /// True if any subgoals were depended upon negatively and + /// were not fully evaluated, or if we encountered a `CannotProve` + /// goal. (In the full SLG algorithm, we would use delayed literals here, + /// but we don't bother, as we don't need that support.) + pub ambiguous: bool, /// Region constraints we have accumulated. - pub constraints: Vec, + pub constraints: Vec>>, /// Subgoals: literals that must be proven - pub subgoals: Vec>, + pub subgoals: Vec>, + + /// We assume that negative literals cannot have coinductive cycles. + pub delayed_subgoals: Vec>>, + + /// Time stamp that is incremented each time we find an answer to + /// some subgoal. This is used to figure out whether any of the + /// floundered subgoals may no longer be floundered: we record the + /// current time when we add something to the list of floundered + /// subgoals, and then we can compare whether its value has + /// changed since then. This is not the same `TimeStamp` of + /// `Forest`'s clock. + pub answer_time: TimeStamp, + + /// List of subgoals that have floundered. See `FlounderedSubgoal` + /// for more information. + pub floundered_subgoals: Vec>, } -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct SimplifiedAnswers { - answers: Vec>, +/// The "time stamp" is a simple clock that gets incremented each time +/// we encounter a positive answer in processing a particular +/// strand. This is used as an optimization to help us figure out when +/// we *may* have changed inference variables. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct TimeStamp { + clock: u64, } -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SimplifiedAnswer { - /// A fully instantiated version of the goal for which the query - /// is true (including region constraints). - pub subst: C::CanonicalConstrainedSubst, +impl TimeStamp { + const MAX: TimeStamp = TimeStamp { + clock: ::std::u64::MAX, + }; - /// If this flag is set, then the answer could be neither proven - /// nor disproven. In general, the existence of a non-empty set of - /// delayed literals simply means the answer's status is UNKNOWN, - /// either because the size of the answer exceeded `max_size` or - /// because of a negative loop (e.g., `P :- not { P }`). - pub ambiguous: bool, + fn increment(&mut self) { + self.clock += 1; + } } -#[derive(Debug)] -struct DelayedLiteralSets(InnerDelayedLiteralSets); - -#[derive(Clone, Debug, PartialEq, Eq)] -enum InnerDelayedLiteralSets { - /// Corresponds to a single, empty set. - None, - - /// Some (non-zero) number of non-empty sets. - /// Must be a set of sets, but HashSets are not Hash so we manually ensure uniqueness. - Some(Vec>), +/// A "floundered" subgoal is one that contains unbound existential +/// variables for which it cannot produce a value. The classic example +/// of floundering is a negative subgoal: +/// +/// ```notrust +/// not { Implemented(?T: Foo) } +/// ``` +/// +/// The way the prolog solver works, it basically enumerates all the +/// ways that a given goal can be *true*. But we can't use this +/// technique to find all the ways that `?T: Foo` can be *false* -- so +/// we call it floundered. In other words, we can evaluate a negative +/// goal, but only if we know what `?T` is -- we can't use the +/// negative goal to help us figuring out `?T`. +/// +/// In addition to negative goals, we use floundering to prevent the +/// trait solver from trying to enumerate very large goals with tons +/// of answers. For example, we consider a goal like `?T: Sized` to +/// "flounder", since we can't hope to enumerate all types that are +/// `Sized`. The same is true for other special traits like `Clone`. +/// +/// Floundering can also occur indirectly. For example: +/// +/// ```notrust +/// trait Foo { } +/// impl Foo for T { } +/// ``` +/// +/// trying to solve `?T: Foo` would immediately require solving `?T: +/// Sized`, and hence would flounder. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +pub struct FlounderedSubgoal { + /// Literal that floundered. + pub floundered_literal: Literal, + + /// Current value of the strand's clock at the time of + /// floundering. + pub floundered_time: TimeStamp, } -/// A set of delayed literals. -/// -/// (One might expect delayed literals to always be ground, since -/// non-ground negative literals result in flounded -/// executions. However, due to the approximations introduced via RR -/// to ensure termination, it *is* in fact possible for delayed goals -/// to contain free variables. For example, what could happen is that -/// we get back an approximated answer with `Goal::CannotProve` as a -/// delayed literal, which in turn forces its subgoal to be delayed, -/// and so forth. Therefore, we store canonicalized goals.) -#[derive(Clone, Debug, Default)] -struct DelayedLiteralSet { - delayed_literals: FxHashSet>, +/// An "answer" in the on-demand solver corresponds to a fully solved +/// goal for a particular table (modulo delayed literals). It contains +/// a substitution +#[derive(Clone, Debug)] +pub struct Answer { + /// Contains values for the unbound inference variables for which + /// the table is true, along with any delayed subgoals (Which must + /// still be proven) and region constrained (which must still be + /// proven, but not by chalk). + pub subst: Canonical>, + + /// If this flag is set, then the answer could be neither proven + /// nor disproven. This could be the size of the answer exceeded + /// `max_size` or because of a negative loop (e.g., `P :- not { P }`). + pub ambiguous: bool, } #[derive(Clone, Debug)] -pub enum DelayedLiteral { - /// Something which can never be proven nor disproven. Inserted - /// when truncation triggers; doesn't arise normally. - CannotProve(()), - - /// We are blocked on a negative literal `~G`, where `G` is the - /// goal of the given table. Because negative goals must always be - /// ground, we don't need any other information. - Negative(TableIndex), - - /// We are blocked on a positive literal `Li`; we found a - /// **conditional** answer (the `CanonicalConstrainedSubst`) within the - /// given table, but we have to come back later and see whether - /// that answer turns out to be true. - Positive(TableIndex, C::CanonicalConstrainedSubst), +pub struct CompleteAnswer { + /// Contains values for the unbound inference variables for which + /// the table is true, along with any region constrained (which must still be + /// proven, but not by chalk). + pub subst: Canonical>, + + /// If this flag is set, then the answer could be neither proven + /// nor disproven. This could be the size of the answer exceeded + /// `max_size` or because of a negative loop (e.g., `P :- not { P }`). + pub ambiguous: bool, } /// Either `A` or `~A`, where `A` is a `Env |- Goal`. -#[derive(Clone, Debug)] -pub enum Literal { // FIXME: pub b/c fold - Positive(C::GoalInEnvironment), - Negative(C::GoalInEnvironment), +#[derive(Clone, Debug, TypeFoldable, TypeVisitable)] +pub enum Literal { + // FIXME: pub b/c fold + Positive(InEnvironment>), + Negative(InEnvironment>), } /// The `Minimums` structure is used to track the dependencies between @@ -205,9 +227,11 @@ pub enum Literal { // FIXME: pub b/c fold /// initialized with the index of the predicate on the stack. So /// imagine we have a stack like this: /// +/// ```notrust /// // 0 foo(X) <-- bottom of stack /// // 1 bar(X) /// // 2 baz(X) <-- top of stack +/// ``` /// /// In this case, `positive` would be initially 0, 1, and 2 for `foo`, /// `bar`, and `baz` respectively. This reflects the fact that the @@ -228,61 +252,14 @@ pub enum Literal { // FIXME: pub b/c fold /// however, this value must be updated. #[derive(Copy, Clone, Debug)] struct Minimums { - positive: DepthFirstNumber, - negative: DepthFirstNumber, -} - -impl DelayedLiteralSets { - fn singleton(set: DelayedLiteralSet) -> Self { - if set.is_empty() { - DelayedLiteralSets(InnerDelayedLiteralSets::None) - } else { - DelayedLiteralSets(InnerDelayedLiteralSets::Some(vec![set])) - } - } - - /// Inserts the set if it is minimal in the family. - /// Returns true iff the set was inserted. - fn insert_if_minimal(&mut self, set: &DelayedLiteralSet) -> bool { - match self.0 { - // The empty set is always minimal. - InnerDelayedLiteralSets::None => false, - // Are we inserting an empty set? - InnerDelayedLiteralSets::Some(_) if set.is_empty() => { - self.0 = InnerDelayedLiteralSets::None; - true - } - InnerDelayedLiteralSets::Some(ref mut sets) => { - // Look for a subset. - if sets.iter().any(|set| set.is_subset(&set)) { - false - } else { - // No subset therefore `set` is minimal, discard supersets and insert. - sets.retain(|set| !set.is_subset(set)); - sets.push(set.clone()); - true - } - } - } - } -} - -impl DelayedLiteralSet { - fn is_empty(&self) -> bool { - self.delayed_literals.is_empty() - } - - fn is_subset(&self, other: &DelayedLiteralSet) -> bool { - self.delayed_literals - .iter() - .all(|elem| other.delayed_literals.contains(elem)) - } + positive: TimeStamp, + negative: TimeStamp, } impl Minimums { const MAX: Minimums = Minimums { - positive: DepthFirstNumber::MAX, - negative: DepthFirstNumber::MAX, + positive: TimeStamp::MAX, + negative: TimeStamp::MAX, }; /// Update our fields to be the minimum of our current value @@ -292,43 +269,66 @@ impl Minimums { self.negative = min(self.negative, other.negative); } - fn minimum_of_pos_and_neg(&self) -> DepthFirstNumber { + fn minimum_of_pos_and_neg(&self) -> TimeStamp { min(self.positive, self.negative) } } -impl DepthFirstNumber { - const MIN: DepthFirstNumber = DepthFirstNumber { value: 0 }; - const MAX: DepthFirstNumber = DepthFirstNumber { - value: ::std::u64::MAX, - }; - - fn next(&mut self) -> DepthFirstNumber { - let value = self.value; - assert!(value < ::std::u64::MAX); - self.value += 1; - DepthFirstNumber { value } - } +#[derive(Copy, Clone, Debug)] +pub(crate) enum AnswerMode { + Complete, + Ambiguous, } -/// Because we recurse so deeply, we rely on stacker to -/// avoid overflowing the stack. -#[cfg(feature = "stack_protection")] -fn maybe_grow_stack(op: F) -> R -where - F: FnOnce() -> R, -{ - // These numbers are somewhat randomly chosen to make tests work - // well enough on my system. In particular, because we only test - // for growing the stack in `new_clause`, a red zone of 32K was - // insufficient to prevent stack overflow. - nikomatsakis - stacker::maybe_grow(256 * 1024, 2 * 1024 * 1024, op) -} +chalk_ir::copy_fold!(TableIndex); +chalk_ir::copy_fold!(TimeStamp); + +chalk_ir::const_visit!(TableIndex); +chalk_ir::const_visit!(TimeStamp); + +#[macro_export] +macro_rules! index_struct { + ($(#[$m:meta])* $v:vis struct $n:ident { + $vf:vis value: usize, + }) => { + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + $(#[$m])* + $v struct $n { + $vf value: usize, + } -#[cfg(not(feature = "stack_protection"))] -fn maybe_grow_stack(op: F) -> R -where - F: FnOnce() -> R, -{ - op() + impl $n { + // Not all index structs need this, so allow it to be dead + // code. + #[allow(dead_code)] + $v fn get_and_increment(&mut self) -> Self { + let old_value = *self; + self.increment(); + old_value + } + + #[allow(dead_code)] + $v fn increment(&mut self) { + self.value += 1; + } + + // TODO: Once the Step trait is stabilized (https://github.com/rust-lang/rust/issues/42168), instead implement it and use the Iterator implementation of Range + #[allow(dead_code)] + pub fn iterate_range(range: ::std::ops::Range) -> impl Iterator { + (range.start.value..range.end.value).into_iter().map(|i| Self { value: i }) + } + } + + impl ::std::fmt::Debug for $n { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(fmt, "{}({})", stringify!($n), self.value) + } + } + + impl From for $n { + fn from(value: usize) -> Self { + Self { value } + } + } + } } diff --git a/chalk-engine/src/logic.rs b/chalk-engine/src/logic.rs index 8117424db90..e38ed9187ac 100644 --- a/chalk-engine/src/logic.rs +++ b/chalk-engine/src/logic.rs @@ -1,368 +1,1288 @@ -use {DelayedLiteral, DelayedLiteralSet, DepthFirstNumber, ExClause, Literal, Minimums, - TableIndex}; -use fallible::NoSolution; -use context::{WithInstantiatedExClause, WithInstantiatedUCanonicalGoal, prelude::*}; -use forest::Forest; -use hh::HhGoal; -use stack::StackIndex; -use strand::{CanonicalStrand, SelectedSubgoal, Strand}; -use table::{Answer, AnswerIndex}; -use rustc_hash::FxHashSet; -use std::marker::PhantomData; -use std::mem; +use crate::forest::Forest; +use crate::normalize_deep::DeepNormalizer; +use crate::slg::{ResolventOps, SlgContext, SlgContextOps}; +use crate::stack::{Stack, StackIndex}; +use crate::strand::{CanonicalStrand, SelectedSubgoal, Strand}; +use crate::table::{AnswerIndex, Table}; +use crate::{ + Answer, AnswerMode, CompleteAnswer, ExClause, FlounderedSubgoal, Literal, Minimums, TableIndex, + TimeStamp, +}; + +use chalk_ir::could_match::CouldMatch; +use chalk_ir::interner::Interner; +use chalk_ir::{ + AnswerSubst, Canonical, ConstrainedSubst, Constraints, FallibleOrFloundered, Floundered, Goal, + GoalData, InEnvironment, NoSolution, ProgramClause, Substitution, UCanonical, UniverseMap, +}; +use chalk_solve::clauses::program_clauses_that_could_match; +use chalk_solve::coinductive_goal::IsCoinductive; +use chalk_solve::infer::ucanonicalize::UCanonicalized; +use chalk_solve::infer::InferenceTable; +use chalk_solve::solve::truncate; +use tracing::{debug, debug_span, info, instrument}; type RootSearchResult = Result; /// The different ways that a *root* search (which potentially pursues /// many strands) can fail. A root search is one that begins with an /// empty stack. -/// -/// (This is different from `RecursiveSearchFail` because nothing can -/// be on the stack, so cycles are ruled out.) #[derive(Debug)] pub(super) enum RootSearchFail { - /// The subgoal we were trying to solve cannot succeed. + /// The table we were trying to solve cannot succeed. NoMoreSolutions, + /// The table cannot be solved without more type information. + Floundered, + /// We did not find a solution, but we still have things to try. /// Repeat the request, and we'll give one of those a spin. /// /// (In a purely depth-first-based solver, like Prolog, this /// doesn't appear.) QuantumExceeded, -} -type RecursiveSearchResult = Result; + /// A negative cycle was found. This is fail-fast, so even if there was + /// possibly a solution (ambiguous or not), it may not have been found. + NegativeCycle, -/// The different ways that a recursive search (which potentially -/// pursues many strands) can fail -- a "recursive" search is one that -/// did not start with an empty stack. -#[derive(Debug)] -enum RecursiveSearchFail { - /// The subgoal we were trying to solve cannot succeed. - NoMoreSolutions, - - /// **All** avenues to solve the subgoal we were trying solve - /// encountered a cyclic dependency on something higher up in the - /// stack. The `Minimums` encodes how high up (and whether - /// positive or negative). - Cycle(Minimums), - - /// We did not find a solution, but we still have things to try. - /// Repeat the request, and we'll give one of those a spin. - /// - /// (In a purely depth-first-based solver, like Prolog, this - /// doesn't appear.) - QuantumExceeded, + /// The current answer index is not useful. Currently, this is returned + /// because the current answer needs refining. + InvalidAnswer, } -#[allow(type_alias_bounds)] -type StrandResult = Result>; - -/// Possible failures from pursuing a particular strand. -#[derive(Debug)] -pub(super) enum StrandFail { - /// The strand has no solution. - NoSolution, +/// This is returned when we try to select a subgoal for a strand. +#[derive(PartialEq)] +enum SubGoalSelection { + /// A subgoal was successfully selected. It has already been checked + /// to not be floundering. However, it may have an answer already, be + /// coinductive, or create a cycle. + Selected, + + /// This strand has no remaining subgoals, but there may still be + /// floundered subgoals. + NotSelected, +} - /// We did not yet figure out a solution; the strand will have - /// been rescheduled for later. - QuantumExceeded, +/// This is returned `on_no_remaining_subgoals` +enum NoRemainingSubgoalsResult { + /// There is an answer available for the root table + RootAnswerAvailable, - /// The strand hit a cyclic dependency. In this case, - /// we return the strand, as well as a `Minimums` struct. - Cycle(CanonicalStrand, Minimums), -} + /// There was a `RootSearchFail` + RootSearchFail(RootSearchFail), -#[derive(Debug)] -enum EnsureSuccess { - AnswerAvailable, - Coinductive, + // This was a success + Success, } -impl> Forest { - /// Ensures that answer with the given index is available from the - /// given table. This may require activating a strand. Returns - /// `Ok(())` if the answer is available and otherwise a +impl Forest { + /// Returns an answer with a given index for the given table. This + /// may require activating a strand and following it. It returns + /// `Ok(answer)` if they answer is available and otherwise a /// `RootSearchFail` result. - pub(super) fn ensure_root_answer( + pub(super) fn root_answer( &mut self, + context: &SlgContextOps, table: TableIndex, - answer: AnswerIndex, - ) -> RootSearchResult<()> { - assert!(self.stack.is_empty()); - - match self.ensure_answer_recursively(table, answer) { - Ok(EnsureSuccess::AnswerAvailable) => Ok(()), - Err(RecursiveSearchFail::NoMoreSolutions) => Err(RootSearchFail::NoMoreSolutions), - Err(RecursiveSearchFail::QuantumExceeded) => Err(RootSearchFail::QuantumExceeded), + answer_index: AnswerIndex, + ) -> RootSearchResult> { + let stack = Stack::default(); + + let mut state = SolveState { + forest: self, + context, + stack, + }; - // Things involving cycles should be impossible since our - // stack was empty on entry: - Ok(EnsureSuccess::Coinductive) | Err(RecursiveSearchFail::Cycle(..)) => { - panic!("ensure_root_answer: nothing on the stack but cyclic result") + match state.ensure_root_answer(table, answer_index) { + Ok(()) => { + assert!(state.stack.is_empty()); + let answer = state.forest.answer(table, answer_index); + if !answer.subst.value.delayed_subgoals.is_empty() { + return Err(RootSearchFail::InvalidAnswer); + } + Ok(CompleteAnswer { + subst: Canonical { + binders: answer.subst.binders.clone(), + value: ConstrainedSubst { + subst: answer.subst.value.subst.clone(), + constraints: answer.subst.value.constraints.clone(), + }, + }, + ambiguous: answer.ambiguous, + }) } + Err(err) => Err(err), } } pub(super) fn any_future_answer( - &mut self, + &self, table: TableIndex, - answer: AnswerIndex, - mut test: impl FnMut(&C::InferenceNormalizedSubst) -> bool, + mut answer_index: AnswerIndex, + mut test: impl FnMut(&Substitution) -> bool, ) -> bool { - if let Some(answer) = self.tables[table].answer(answer) { + // Check any cached answers, starting at `answer_index`. + while let Some(answer) = self.tables[table].answer(answer_index) { info!("answer cached = {:?}", answer); - return test(CO::inference_normalized_subst_from_subst(&answer.subst)); + if test(&answer.subst.value.subst) { + return true; + } + answer_index.increment(); } - self.tables[table].strands_mut().any(|strand| { - test(CO::inference_normalized_subst_from_ex_clause(&strand.canonical_ex_clause)) - }) + // Check any unsolved strands, which may give further answers. + self.tables[table] + .strands() + .any(|strand| test(&strand.value.ex_clause.subst)) } - /// Ensures that answer with the given index is available from the - /// given table. Returns `Ok` if there is an answer: + pub(crate) fn answer(&self, table: TableIndex, answer: AnswerIndex) -> &Answer { + self.tables[table].answer(answer).unwrap() + } + + fn canonicalize_strand_from( + context: &SlgContextOps, + infer: &mut InferenceTable, + strand: &Strand, + ) -> CanonicalStrand { + infer + .canonicalize(context.program().interner(), strand.clone()) + .quantified + } + + /// Given a subgoal, converts the literal into u-canonical form + /// and searches for an existing table. If one is found, it is + /// returned, but otherwise a new table is created (and populated + /// with its initial set of strands). /// - /// - `EnsureSuccess::AnswerAvailable` means that the answer is - /// cached in the table (and can be fetched with e.g. `self.answer()`). - /// - `EnsureSuccess::Coinductive` means that this was a cyclic - /// request of a coinductive goal and is thus considered true; - /// in this case, the answer is not cached in the table (it is - /// only true in this cyclic context). + /// Returns `None` if the literal cannot be converted into a table + /// -- for example, this can occur when we have selected a + /// negative literal with free existential variables, in which + /// case the execution is said to "flounder". /// - /// This function first attempts to fetch answer that is cached in - /// the table. If none is found, then we will if the table is on - /// the stack; if so, that constitutes a cycle (producing a new - /// result for the table X required producing a new result for the - /// table X), and we return a suitable result. Otherwise, we can - /// push the table onto the stack and select the next available - /// strand -- if none are available, then no more answers are - /// possible. - fn ensure_answer_recursively( + /// In terms of the NFTD paper, creating a new table corresponds + /// to the *New Subgoal* step as well as the *Program Clause + /// Resolution* steps. + #[instrument(level = "debug", skip(self, context, infer))] + fn get_or_create_table_for_subgoal( &mut self, - table: TableIndex, - answer: AnswerIndex, - ) -> RecursiveSearchResult { - info_heading!( - "ensure_answer_recursively(table={:?}, answer={:?})", - table, - answer - ); - info!("table goal = {:#?}", self.tables[table].table_goal); + context: &SlgContextOps, + infer: &mut InferenceTable, + subgoal: &Literal, + ) -> Option<(TableIndex, UniverseMap)> { + // Subgoal abstraction: + let (ucanonical_subgoal, universe_map) = match subgoal { + Literal::Positive(subgoal) => { + Forest::abstract_positive_literal(context, infer, subgoal.clone())? + } + Literal::Negative(subgoal) => { + Forest::abstract_negative_literal(context, infer, subgoal.clone())? + } + }; - // First, check for a tabled answer. - if self.tables[table].answer(answer).is_some() { - info!("answer cached = {:?}", self.tables[table].answer(answer)); - return Ok(EnsureSuccess::AnswerAvailable); - } + debug!(?ucanonical_subgoal, ?universe_map); - // If no tabled answer is present, we ought to be requesting - // the next available index. - assert_eq!(self.tables[table].next_answer_index(), answer); + let table = self.get_or_create_table_for_ucanonical_goal(context, ucanonical_subgoal); - // Next, check if the table is already active. If so, then we - // have a recursive attempt. - if let Some(depth) = self.stack.is_active(table) { - info!("ensure_answer: cycle detected at depth {:?}", depth); + Some((table, universe_map)) + } + + /// Given a u-canonical goal, searches for an existing table. If + /// one is found, it is returned, but otherwise a new table is + /// created (and populated with its initial set of strands). + /// + /// In terms of the NFTD paper, creating a new table corresponds + /// to the *New Subgoal* step as well as the *Program Clause + /// Resolution* steps. + #[instrument(level = "debug", skip(self, context))] + pub(crate) fn get_or_create_table_for_ucanonical_goal( + &mut self, + context: &SlgContextOps, + goal: UCanonical>>, + ) -> TableIndex { + if let Some(table) = self.tables.index_of(&goal) { + debug!(?table, "found existing table"); + return table; + } + + info!( + table = ?self.tables.next_index(), + "creating new table with goal = {:#?}", + goal, + ); + let table = Self::build_table(context, self.tables.next_index(), goal); + self.tables.insert(table) + } - if self.top_of_stack_is_coinductive_from(depth) { - return Ok(EnsureSuccess::Coinductive); + /// When a table is first created, this function is invoked to + /// create the initial set of strands. If the table represents a + /// domain goal, these strands are created from the program + /// clauses as well as the clauses found in the environment. If + /// the table represents a non-domain goal, such as `for G` + /// etc, then `simplify_goal` is invoked to create a strand + /// that breaks the goal down. + /// + /// In terms of the NFTD paper, this corresponds to the *Program + /// Clause Resolution* step being applied eagerly, as many times + /// as possible. + fn build_table( + context: &SlgContextOps, + table_idx: TableIndex, + goal: UCanonical>>, + ) -> Table { + let coinductive = goal.is_coinductive(context.program()); + let mut table = Table::new(goal.clone(), coinductive); + + let goal_data = goal.canonical.value.goal.data(context.program().interner()); + match goal_data { + GoalData::DomainGoal(domain_goal) => { + let canon_domain_goal = UCanonical { + canonical: Canonical { + binders: goal.canonical.binders, + value: InEnvironment::new( + &goal.canonical.value.environment, + domain_goal.clone(), + ), + }, + universes: goal.universes, + }; + + let db = context.program(); + let canon_goal = canon_domain_goal.canonical.value.goal.clone(); + let could_match = |c: &ProgramClause| { + c.could_match(db.interner(), db.unification_database(), &canon_goal) + }; + + match program_clauses_that_could_match(db, &canon_domain_goal) { + Ok(mut clauses) => { + clauses.retain(could_match); + clauses.extend(db.custom_clauses().into_iter().filter(could_match)); + + let (infer, subst, goal) = + chalk_solve::infer::InferenceTable::from_canonical( + context.program().interner(), + canon_domain_goal.universes, + canon_domain_goal.canonical, + ); + + clauses.extend( + db.program_clauses_for_env(&goal.environment) + .iter(db.interner()) + .cloned() + .filter(could_match), + ); + + let InEnvironment { environment, goal } = goal; + + for clause in clauses { + info!("program clause = {:#?}", clause); + let mut infer = infer.clone(); + if let Ok(resolvent) = infer.resolvent_clause( + context.unification_database(), + context.program().interner(), + &environment, + &goal, + &subst, + &clause, + ) { + info!("pushing initial strand with ex-clause: {:#?}", &resolvent,); + let strand = Strand { + ex_clause: resolvent, + selected_subgoal: None, + last_pursued_time: TimeStamp::default(), + }; + let canonical_strand = + Self::canonicalize_strand_from(context, &mut infer, &strand); + table.enqueue_strand(canonical_strand); + } + } + } + Err(Floundered) => { + debug!( + table = ?table_idx, + "Marking table {:?} as floundered! (failed to create program clauses)", + table_idx + ); + table.mark_floundered(); + } + } } - return Err(RecursiveSearchFail::Cycle(Minimums { - positive: self.stack[depth].dfn, - negative: DepthFirstNumber::MAX, - })); + _ => { + let (mut infer, subst, InEnvironment { environment, goal }) = + chalk_solve::infer::InferenceTable::from_canonical( + context.program().interner(), + goal.universes, + goal.canonical, + ); + // The goal for this table is not a domain goal, so we instead + // simplify it into a series of *literals*, all of which must be + // true. Thus, in EWFS terms, we are effectively creating a + // single child of the `A :- A` goal that is like `A :- B, C, D` + // where B, C, and D are the simplified subgoals. You can think + // of this as applying built-in "meta program clauses" that + // reduce goals into Domain goals. + match Self::simplify_goal(context, &mut infer, subst, environment, goal) { + FallibleOrFloundered::Ok(ex_clause) => { + info!( + ex_clause = ?DeepNormalizer::normalize_deep( + &mut infer, + context.program().interner(), + ex_clause.clone(), + ), + "pushing initial strand" + ); + let strand = Strand { + ex_clause, + selected_subgoal: None, + last_pursued_time: TimeStamp::default(), + }; + let canonical_strand = + Self::canonicalize_strand_from(context, &mut infer, &strand); + table.enqueue_strand(canonical_strand); + } + FallibleOrFloundered::NoSolution => {} + FallibleOrFloundered::Floundered => table.mark_floundered(), + } + } } - let dfn = self.next_dfn(); - let depth = self.stack.push(table, dfn); - let result = self.pursue_next_strand(depth); - self.stack.pop(table, depth); - info!("ensure_answer: result = {:?}", result); - result.map(|()| EnsureSuccess::AnswerAvailable) + table } - pub(crate) fn answer(&self, table: TableIndex, answer: AnswerIndex) -> &Answer { - self.tables[table].answer(answer).unwrap() + /// Given a selected positive subgoal, applies the subgoal + /// abstraction function to yield the canonical form that will be + /// used to pick a table. Typically, this abstraction has no + /// effect, and hence we are simply returning the canonical form + /// of `subgoal`; but if the subgoal is getting too big, we return + /// `None`, which causes the subgoal to flounder. + fn abstract_positive_literal( + context: &SlgContextOps, + infer: &mut InferenceTable, + subgoal: InEnvironment>, + ) -> Option<(UCanonical>>, UniverseMap)> { + if truncate::needs_truncation( + context.program().interner(), + infer, + context.max_size(), + &subgoal, + ) { + None + } else { + let canonicalized_goal = infer + .canonicalize(context.program().interner(), subgoal) + .quantified; + let UCanonicalized { + quantified, + universes, + } = InferenceTable::u_canonicalize(context.program().interner(), &canonicalized_goal); + Some((quantified, universes)) + } } - /// Selects the next eligible strand from the table at depth - /// `depth` and pursues it. If that strand encounters a cycle, - /// then this function will loop and keep trying strands until it - /// reaches one that did not encounter a cycle; that result is - /// propagated. If all strands return a cycle, then the entire - /// subtree is "completed" by invoking `cycle`. - fn pursue_next_strand(&mut self, depth: StackIndex) -> RecursiveSearchResult<()> { - // This is a bit complicated because this is where we handle cycles. - let table = self.stack[depth].table; + /// Given a selected negative subgoal, the subgoal is "inverted" + /// (see `InferenceTable::invert`) and then potentially truncated + /// (see `abstract_positive_literal`). The result subgoal is + /// canonicalized. In some cases, this may return `None` and hence + /// fail to yield a useful result, for example if free existential + /// variables appear in `subgoal` (in which case the execution is + /// said to "flounder"). + fn abstract_negative_literal( + context: &SlgContextOps, + infer: &mut InferenceTable, + subgoal: InEnvironment>, + ) -> Option<(UCanonical>>, UniverseMap)> { + // First, we have to check that the selected negative literal + // is ground, and invert any universally quantified variables. + // + // DIVERGENCE -- In the RR paper, to ensure completeness, they + // permit non-ground negative literals, but only consider + // them to succeed when the target table has no answers at + // all. This is equivalent inverting those free existentials + // into universals, as discussed in the comments of + // `invert`. This is clearly *sound*, but the completeness is + // a subtle point. In particular, it can cause **us** to reach + // false conclusions, because e.g. given a program like + // (selected left-to-right): + // + // not { ?T: Copy }, ?T = Vec + // + // we would select `not { ?T: Copy }` first. For this goal to + // succeed we would require that -- effectively -- `forall + // { not { T: Copy } }`, which clearly doesn't hold. (In the + // terms of RR, we would require that the table for `?T: Copy` + // has failed before we can continue.) + // + // In the RR paper, this is acceptable because they assume all + // of their input programs are both **normal** (negative + // literals are selected after positive ones) and **safe** + // (all free variables in negative literals occur in positive + // literals). It is plausible for us to guarantee "normal" + // form, we can reorder clauses as we need. I suspect we can + // guarantee safety too, but I have to think about it. + // + // For now, we opt for the safer route of terming such + // executions as floundering, because I think our use of + // negative goals is sufficiently limited we can get away with + // it. The practical effect is that we will judge more + // executions as floundering than we ought to (i.e., where we + // could instead generate an (imprecise) result). As you can + // see a bit later, we also diverge in some other aspects that + // affect completeness when it comes to subgoal abstraction. + let inverted_subgoal = infer.invert(context.program().interner(), subgoal)?; - // Strands that encountered a cyclic error. - let mut cyclic_strands = vec![]; + if truncate::needs_truncation( + context.program().interner(), + infer, + context.max_size(), + &inverted_subgoal, + ) { + None + } else { + let canonicalized_goal = infer + .canonicalize(context.program().interner(), inverted_subgoal) + .quantified; + let UCanonicalized { + quantified, + universes, + } = InferenceTable::u_canonicalize(context.program().interner(), &canonicalized_goal); + Some((quantified, universes)) + } + } +} - // The minimum of all cyclic strands. - let mut cyclic_minimums = Minimums::MAX; +pub(crate) struct SolveState<'forest, I: Interner> { + forest: &'forest mut Forest, + context: &'forest SlgContextOps<'forest, I>, + stack: Stack, +} - loop { - match self.tables[table].pop_next_strand() { - Some(canonical_strand) => { - let num_universes = CO::num_universes(&self.tables[table].table_goal); - let result = Self::with_instantiated_strand( - self.context.clone(), - num_universes, - &canonical_strand, - PursueStrand { - forest: self, - depth, - }, - ); - match result { - Ok(answer) => { - // Now that we produced an answer, these - // cyclic strands need to be retried. - self.tables[table].extend_strands(cyclic_strands); - return Ok(answer); - } +impl<'forest, I: Interner> Drop for SolveState<'forest, I> { + fn drop(&mut self) { + if !self.stack.is_empty() { + if let Some(active_strand) = self.stack.top().active_strand.take() { + let table = self.stack.top().table; + self.forest.tables[table].enqueue_strand(active_strand); + } + self.unwind_stack(); + } + } +} - Err(StrandFail::NoSolution) | Err(StrandFail::QuantumExceeded) => { - // This strand did not produce an answer, - // but either it (or some other, pending - // strands) may do so in the - // future. Enqueue the cyclic strands to - // be retried after that point. - self.tables[table].extend_strands(cyclic_strands); - return Err(RecursiveSearchFail::QuantumExceeded); - } +impl<'forest, I: Interner> SolveState<'forest, I> { + /// Ensures that answer with the given index is available from the + /// given table. Returns `Ok` if there is an answer. + /// + /// This function first attempts to fetch answer that is cached in + /// the table. If none is found, then it will recursively search + /// to find an answer. + #[instrument(level = "info", skip(self))] + fn ensure_root_answer( + &mut self, + initial_table: TableIndex, + initial_answer: AnswerIndex, + ) -> RootSearchResult<()> { + info!( + "table goal = {:#?}", + self.forest.tables[initial_table].table_goal + ); + // Check if this table has floundered. + if self.forest.tables[initial_table].is_floundered() { + return Err(RootSearchFail::Floundered); + } + // Check for a tabled answer. + if let Some(answer) = self.forest.tables[initial_table].answer(initial_answer) { + info!("answer cached = {:?}", answer); + return Ok(()); + } + + // If no tabled answer is present, we ought to be requesting + // the next available index. + assert_eq!( + self.forest.tables[initial_table].next_answer_index(), + initial_answer + ); - Err(StrandFail::Cycle(canonical_strand, strand_minimums)) => { - // This strand encountered a cycle. Stash - // it for later and try the next one until - // we know that *all* available strands - // are hitting a cycle. - cyclic_strands.push(canonical_strand); - cyclic_minimums.take_minimums(&strand_minimums); + self.stack + .push(initial_table, Minimums::MAX, self.forest.increment_clock()); + loop { + let clock = self.stack.top().clock; + // If we had an active strand, continue to pursue it + let table = self.stack.top().table; + let table_answer_mode = self.forest.tables[table].answer_mode; + + // We track when we last pursued each strand. If all the strands have been + // pursued at this depth, then that means they all encountered a cycle. + // We also know that if the first strand has been pursued at this depth, + // then all have. Otherwise, an answer to any strand would have provided an + // answer for the table. + let forest = &mut self.forest; + let next_strand = self.stack.top().active_strand.take().or_else(|| { + forest.tables[table].dequeue_next_strand_that(|strand| { + let time_eligble = strand.value.last_pursued_time < clock; + let mode_eligble = match (table_answer_mode, strand.value.ex_clause.ambiguous) { + (AnswerMode::Complete, false) => true, + (AnswerMode::Complete, true) => false, + (AnswerMode::Ambiguous, _) => true, + }; + time_eligble && mode_eligble + }) + }); + match next_strand { + Some(mut canonical_strand) => { + debug!("starting next strand = {:#?}", canonical_strand); + + canonical_strand.value.last_pursued_time = clock; + match self.select_subgoal(&mut canonical_strand) { + SubGoalSelection::Selected => { + // A subgoal has been selected. We now check this subgoal + // table for an existing answer or if it's in a cycle. + // If neither of those are the case, a strand is selected + // and the next loop iteration happens. + self.on_subgoal_selected(canonical_strand)?; + continue; + } + SubGoalSelection::NotSelected => { + match self.on_no_remaining_subgoals(canonical_strand) { + NoRemainingSubgoalsResult::RootAnswerAvailable => return Ok(()), + NoRemainingSubgoalsResult::RootSearchFail(e) => return Err(e), + NoRemainingSubgoalsResult::Success => continue, + }; } } } - None => { - // No more strands left to try! That means either we started - // with no strands, or all available strands encountered a cycle. - - if cyclic_strands.is_empty() { - // We started with no strands! - return Err(RecursiveSearchFail::NoMoreSolutions); - } else { - let c = mem::replace(&mut cyclic_strands, vec![]); - if let Some(err) = self.cycle(depth, c, cyclic_minimums) { - return Err(err); - } - } + self.on_no_strands_left()?; + continue; } } } } - fn with_instantiated_strand( - context: CO, - num_universes: usize, - canonical_strand: &CanonicalStrand, - op: impl WithInstantiatedStrand, - ) -> R { - let CanonicalStrand { - canonical_ex_clause, - selected_subgoal, - } = canonical_strand; - return context.instantiate_ex_clause( - num_universes, - &canonical_ex_clause, - With { - op, - selected_subgoal: selected_subgoal.clone(), - ops: PhantomData, - }, - ); + /// This is called when an answer is available for the selected subgoal + /// of the strand. First, if the selected subgoal is a `Positive` subgoal, + /// it first clones the strand pursuing the next answer. Then, it merges the + /// answer into the provided `Strand`. + /// On success, `Ok` is returned and the `Strand` can be continued to process + /// On failure, `Err` is returned and the `Strand` should be discarded + fn merge_answer_into_strand( + &mut self, + infer: &mut InferenceTable, + strand: &mut Strand, + ) -> RootSearchResult<()> { + // At this point, we know we have an answer for + // the selected subgoal of the strand. + // Now, we have to unify that answer onto the strand. + + // If this answer is ambiguous and we don't want ambiguous answers + // yet, then we act like this is a floundered subgoal. + let ambiguous = { + let selected_subgoal = strand.selected_subgoal.as_ref().unwrap(); + let answer = self.forest.answer( + selected_subgoal.subgoal_table, + selected_subgoal.answer_index, + ); + answer.ambiguous + }; + if let AnswerMode::Complete = self.forest.tables[self.stack.top().table].answer_mode { + if ambiguous { + // FIXME: we could try to be a little bit smarter here. This can + // really be split into cases: + // 1) Cases where no amount of solving will cause this ambiguity to change. + // (e.g. `CannnotProve`) + // 2) Cases where we may be able to get a better answer if we + // solve other subgoals first. + // (e.g. the `non_enumerable_traits_reorder` test) + // We really only need to delay merging an ambiguous answer for + // case 2. Do note, though, that even if we *do* merge the answer + // case 1, we should stop solving this strand when in + // `AnswerMode::Complete` since we wouldn't use this answer yet + // *anyways*. + + // The selected subgoal returned an ambiguous answer, but we don't want that. + // So, we treat this subgoal as floundered. + let selected_subgoal = strand.selected_subgoal.take().unwrap(); + self.flounder_subgoal(&mut strand.ex_clause, selected_subgoal.subgoal_index); + return Ok(()); + } + } - struct With, OP: WithInstantiatedStrand> { - op: OP, - selected_subgoal: Option>, - ops: PhantomData, + // If this subgoal was a `Positive` one, whichever way this + // particular answer turns out, there may yet be *more* answers, + // if this isn't a trivial substitution. + // Enqueue that alternative for later. + // NOTE: this is separate from the match below because we `take` the selected_subgoal + // below, but here we keep it for the new `Strand`. + let selected_subgoal = strand.selected_subgoal.as_ref().unwrap(); + if let Literal::Positive(_) = strand.ex_clause.subgoals[selected_subgoal.subgoal_index] { + let answer = self.forest.answer( + selected_subgoal.subgoal_table, + selected_subgoal.answer_index, + ); + if !self.forest.tables[selected_subgoal.subgoal_table] + .table_goal + .is_trivial_substitution(self.context.program().interner(), &answer.subst) + { + let mut next_subgoal = selected_subgoal.clone(); + next_subgoal.answer_index.increment(); + let next_strand = Strand { + ex_clause: strand.ex_clause.clone(), + selected_subgoal: Some(next_subgoal), + last_pursued_time: strand.last_pursued_time, + }; + let table = self.stack.top().table; + let canonical_next_strand = + Forest::canonicalize_strand_from(self.context, infer, &next_strand); + self.forest.tables[table].enqueue_strand(canonical_next_strand); + } } - impl, OP: WithInstantiatedStrand> - WithInstantiatedExClause for With { - type Output = OP::Output; - - fn with( - self, - infer: &mut dyn InferenceTable, - ex_clause: ExClause, - ) -> OP::Output { - self.op.with(Strand { - infer, - ex_clause, - selected_subgoal: self.selected_subgoal.clone(), - }) + // Deselect and remove the selected subgoal, now that we have an answer for it. + let selected_subgoal = strand.selected_subgoal.take().unwrap(); + let subgoal = strand + .ex_clause + .subgoals + .remove(selected_subgoal.subgoal_index); + match subgoal { + Literal::Positive(subgoal) => { + let SelectedSubgoal { + subgoal_index: _, + subgoal_table, + answer_index, + ref universe_map, + } = selected_subgoal; + use chalk_solve::infer::ucanonicalize::UniverseMapExt; + let table_goal = universe_map.map_from_canonical( + self.context.program().interner(), + &self.forest.tables[subgoal_table].table_goal.canonical, + ); + let answer_subst = universe_map.map_from_canonical( + self.context.program().interner(), + &self.forest.answer(subgoal_table, answer_index).subst, + ); + match infer.apply_answer_subst( + self.context.program().interner(), + self.context.unification_database(), + &mut strand.ex_clause, + &subgoal, + &table_goal, + answer_subst, + ) { + Ok(()) => { + let ex_clause = &mut strand.ex_clause; + + // If the answer had was ambiguous, we have to + // ensure that `ex_clause` is also ambiguous. This is + // the SLG FACTOR operation, though NFTD just makes it + // part of computing the SLG resolvent. + if self.forest.answer(subgoal_table, answer_index).ambiguous { + debug!("Marking Strand as ambiguous because answer to (positive) subgoal was ambiguous"); + ex_clause.ambiguous = true; + } + + // Increment the answer time for the `ex_clause`. Floundered + // subgoals may be eligble to be pursued again. + ex_clause.answer_time.increment(); + + // Ok, we've applied the answer to this Strand. + Ok(()) + } + + // This answer led nowhere. Give up for now, but of course + // there may still be other strands to pursue, so return + // `QuantumExceeded`. + Err(NoSolution) => { + info!("answer not unifiable -> NoSolution"); + // This strand as no solution. It is no longer active, + // so it dropped at the end of this scope. + + // Now we want to propogate back to the up with `QuantumExceeded` + self.unwind_stack(); + Err(RootSearchFail::QuantumExceeded) + } + } + } + Literal::Negative(_) => { + let SelectedSubgoal { + subgoal_index: _, + subgoal_table, + answer_index, + universe_map: _, + } = selected_subgoal; + // We got back an answer. This is bad, because we want + // to disprove the subgoal, but it may be + // "conditional" (maybe true, maybe not). + let answer = self.forest.answer(subgoal_table, answer_index); + + // By construction, we do not expect negative subgoals + // to have delayed subgoals. This is because we do not + // need to permit `not { L }` where `L` is a + // coinductive goal. We could improve this if needed, + // but it keeps things simple. + if !answer.subst.value.delayed_subgoals.is_empty() { + panic!("Negative subgoal had delayed_subgoals"); + } + + if !answer.ambiguous { + // We want to disproval the subgoal, but we + // have an unconditional answer for the subgoal, + // therefore we have failed to disprove it. + info!("found unconditional answer to neg literal -> NoSolution"); + + // This strand as no solution. By returning an Err, + // the caller should discard this `Strand`. + + // Now we want to propogate back to the up with `QuantumExceeded` + self.unwind_stack(); + return Err(RootSearchFail::QuantumExceeded); + } + + // Otherwise, the answer is ambiguous. We can keep going, + // but we have to mark our strand, too, as ambiguous. + // + // We want to disproval the subgoal, but we + // have an unconditional answer for the subgoal, + // therefore we have failed to disprove it. + debug!(?strand, "Marking Strand as ambiguous because answer to (negative) subgoal was ambiguous"); + strand.ex_clause.ambiguous = true; + + // Strand is ambigious. + Ok(()) } } } - fn canonicalize_strand(strand: Strand<'_, C, impl Context>) -> CanonicalStrand { - let Strand { - infer, - ex_clause, - selected_subgoal, - } = strand; - Self::canonicalize_strand_from(&mut *infer, &ex_clause, selected_subgoal) + /// This is called when the selected subgoal for a strand has floundered. + /// We have to decide what this means for the strand. + /// - If the strand was positively dependent on the subgoal, we flounder, + /// the subgoal, then return `false`. This strand may be able to be + /// retried later. + /// - If the strand was negatively dependent on the subgoal, then strand + /// has led nowhere of interest and we return `true`. This strand should + /// be discarded. + /// + /// In other words, we return whether this strand flounders. + fn propagate_floundered_subgoal(&mut self, strand: &mut CanonicalStrand) -> bool { + // This subgoal selection for the strand is finished, so take it + let selected_subgoal = strand.value.selected_subgoal.take().unwrap(); + match strand.value.ex_clause.subgoals[selected_subgoal.subgoal_index] { + Literal::Positive(_) => { + // If this strand depends on this positively, then we can + // come back to it later. So, we mark that subgoal as + // floundered and yield `QuantumExceeded` up the stack + + // If this subgoal floundered, push it onto the + // floundered list, along with the time that it + // floundered. We'll try to solve some other subgoals + // and maybe come back to it. + self.flounder_subgoal(&mut strand.value.ex_clause, selected_subgoal.subgoal_index); + + false + } + Literal::Negative(_) => { + // Floundering on a negative literal isn't like a + // positive search: we only pursue negative literals + // when we already know precisely the type we are + // looking for. So there's no point waiting for other + // subgoals, we'll never recover more information. + // + // In fact, floundering on negative searches shouldn't + // normally happen, since there are no uninferred + // variables in the goal, but it can with forall + // goals: + // + // forall { not { T: Debug } } + // + // Here, the table we will be searching for answers is + // `?T: Debug`, so it could well flounder. + + // This strand has no solution. It is no longer active, + // so it dropped at the end of this scope. + + true + } + } } - fn canonicalize_strand_from( - infer: &mut dyn InferenceTable, - ex_clause: &ExClause, - selected_subgoal: Option>, - ) -> CanonicalStrand { - let canonical_ex_clause = infer.canonicalize_ex_clause(&ex_clause); - CanonicalStrand { - canonical_ex_clause, - selected_subgoal, + /// This is called if the selected subgoal for a `Strand` is + /// a coinductive cycle. + fn on_coinductive_subgoal( + &mut self, + mut canonical_strand: CanonicalStrand, + ) -> Result<(), RootSearchFail> { + // This is a co-inductive cycle. That is, this table + // appears somewhere higher on the stack, and has now + // recursively requested an answer for itself. This + // means that we have to delay this subgoal until we + // reach a trivial self-cycle. + + // This subgoal selection for the strand is finished, so take it + let selected_subgoal = canonical_strand.value.selected_subgoal.take().unwrap(); + match canonical_strand + .value + .ex_clause + .subgoals + .remove(selected_subgoal.subgoal_index) + { + Literal::Positive(subgoal) => { + // We delay this subgoal + let table = self.stack.top().table; + assert!( + self.forest.tables[table].coinductive_goal + && self.forest.tables[selected_subgoal.subgoal_table].coinductive_goal + ); + + canonical_strand + .value + .ex_clause + .delayed_subgoals + .push(subgoal); + + self.stack.top().active_strand = Some(canonical_strand); + Ok(()) + } + Literal::Negative(_) => { + // We don't allow coinduction for negative literals + info!("found coinductive answer to negative literal"); + panic!("Coinductive cycle with negative literal"); + } } } - /// Invoked when all available strands for a table have - /// encountered a cycle. In this case, the vector `strands` are - /// the set of strands that encountered cycles, and `minimums` is - /// the minimum stack depths that they were dependent on. + /// This is called if the selected subgoal for `strand` is + /// a positive, non-coinductive cycle. + /// + /// # Parameters /// - /// Returns `None` if we have resolved the cycle and should try to - /// pick a strand again. Returns `Some(_)` if the cycle indicates - /// an error that we can propagate higher up. - fn cycle( + /// * `strand` the strand from the top of the stack we are pursuing + /// * `minimums` is the collected minimum clock times + fn on_positive_cycle( &mut self, - depth: StackIndex, - strands: Vec>, + canonical_strand: CanonicalStrand, minimums: Minimums, - ) -> Option { - let table = self.stack[depth].table; - assert!(self.tables[table].pop_next_strand().is_none()); + ) -> Result<(), RootSearchFail> { + // We can't take this because we might need it later to clear the cycle + let selected_subgoal = canonical_strand.value.selected_subgoal.as_ref().unwrap(); + + match canonical_strand.value.ex_clause.subgoals[selected_subgoal.subgoal_index] { + Literal::Positive(_) => { + self.stack.top().cyclic_minimums.take_minimums(&minimums); + } + Literal::Negative(_) => { + // We depend on `not(subgoal)`. For us to continue, + // `subgoal` must be completely evaluated. Therefore, + // we depend (negatively) on the minimum link of + // `subgoal` as a whole -- it doesn't matter whether + // it's pos or neg. + let mins = Minimums { + positive: self.stack.top().clock, + negative: minimums.minimum_of_pos_and_neg(), + }; + self.stack.top().cyclic_minimums.take_minimums(&mins); + } + } + + // Ok, we've taken the minimums from this cycle above. Now, + // we just return the strand to the table. The table only + // pulls strands if they have not been checked at this + // depth. + // + // We also can't mark these and return early from this + // because the stack above us might change. + let table = self.stack.top().table; + self.forest.tables[table].enqueue_strand(canonical_strand); + + // The strand isn't active, but the table is, so just continue + Ok(()) + } + + /// Invoked after we've selected a (new) subgoal for the top-most + /// strand. Attempts to pursue this selected subgoal. + /// + /// Returns: + /// + /// * `Ok` if we should keep searching. + /// * `Err` if the subgoal failed in some way such that the strand can be abandoned. + fn on_subgoal_selected( + &mut self, + mut canonical_strand: CanonicalStrand, + ) -> Result<(), RootSearchFail> { + // This may be a newly selected subgoal or an existing selected subgoal. + + let SelectedSubgoal { + subgoal_index: _, + subgoal_table, + answer_index, + universe_map: _, + } = *canonical_strand.value.selected_subgoal.as_ref().unwrap(); + + debug!( + ?subgoal_table, + goal = ?self.forest.tables[subgoal_table].table_goal, + "table selection {:?} with goal: {:?}", + subgoal_table, self.forest.tables[subgoal_table].table_goal + ); + + // This is checked inside select_subgoal + assert!(!self.forest.tables[subgoal_table].is_floundered()); + + // Check for a tabled answer. + if let Some(answer) = self.forest.tables[subgoal_table].answer(answer_index) { + info!("answer cached = {:?}", answer); + + // There was a previous answer available for this table + // We need to check if we can merge it into the current `Strand`. + let num_universes = self.forest.tables[self.stack.top().table] + .table_goal + .universes; + let (mut infer, _, mut strand) = chalk_solve::infer::InferenceTable::from_canonical( + self.context.program().interner(), + num_universes, + canonical_strand.clone(), + ); + match self.merge_answer_into_strand(&mut infer, &mut strand) { + Err(e) => { + debug!(?strand, "could not merge into current strand"); + drop(strand); + return Err(e); + } + Ok(_) => { + debug!(?strand, "merged answer into current strand"); + canonical_strand = + Forest::canonicalize_strand_from(self.context, &mut infer, &strand); + self.stack.top().active_strand = Some(canonical_strand); + return Ok(()); + } + } + } + + // If no tabled answer is present, we ought to be requesting + // the next available index. + assert_eq!( + self.forest.tables[subgoal_table].next_answer_index(), + answer_index + ); + + // Next, check if the table is already active. If so, then we + // have a recursive attempt. + if let Some(cyclic_depth) = self.stack.is_active(subgoal_table) { + info!("cycle detected at depth {:?}", cyclic_depth); + let minimums = Minimums { + positive: self.stack[cyclic_depth].clock, + negative: TimeStamp::MAX, + }; + + if self.top_of_stack_is_coinductive_from(cyclic_depth) { + debug!("table is coinductive"); + return self.on_coinductive_subgoal(canonical_strand); + } + + debug!("table encountered a positive cycle"); + return self.on_positive_cycle(canonical_strand, minimums); + } + + // We don't know anything about the selected subgoal table. + // Set this strand as active and push it onto the stack. + self.stack.top().active_strand = Some(canonical_strand); + + let cyclic_minimums = Minimums::MAX; + self.stack.push( + subgoal_table, + cyclic_minimums, + self.forest.increment_clock(), + ); + Ok(()) + } + + /// This is called when there are no remaining subgoals for a strand, so + /// it represents an answer. If the strand is ambiguous and we don't want + /// it yet, we just enqueue it again to pick it up later. Otherwise, we + /// add the answer from the strand onto the table. + fn on_no_remaining_subgoals( + &mut self, + canonical_strand: CanonicalStrand, + ) -> NoRemainingSubgoalsResult { + let ambiguous = canonical_strand.value.ex_clause.ambiguous; + if let AnswerMode::Complete = self.forest.tables[self.stack.top().table].answer_mode { + if ambiguous { + // The strand can only return an ambiguous answer, but we don't + // want that right now, so requeue and we'll deal with it later. + self.forest.tables[self.stack.top().table].enqueue_strand(canonical_strand); + return NoRemainingSubgoalsResult::RootSearchFail(RootSearchFail::QuantumExceeded); + } + } + let floundered = !canonical_strand + .value + .ex_clause + .floundered_subgoals + .is_empty(); + if floundered { + debug!("all remaining subgoals floundered for the table"); + } else { + debug!("no remaining subgoals for the table"); + }; + match self.pursue_answer(canonical_strand) { + Some(answer_index) => { + debug!("answer is available"); + + // We found an answer for this strand, and therefore an + // answer for this table. Now, this table was either a + // subgoal for another strand, or was the root table. + let table = self.stack.top().table; + match self.stack.pop_and_take_caller_strand() { + Some(caller_strand) => { + self.stack.top().active_strand = Some(caller_strand); + NoRemainingSubgoalsResult::Success + } + None => { + // That was the root table, so we are done -- + // *well*, unless there were delayed + // subgoals. In that case, we want to evaluate + // those delayed subgoals to completion, so we + // have to create a fresh strand that will + // take them as goals. Note that we *still + // need the original answer in place*, because + // we might have to build on it (see the + // Delayed Trivial Self Cycle, Variant 3 + // example). + + let answer = self.forest.answer(table, answer_index); + if let Some(strand) = self.create_refinement_strand(table, answer) { + self.forest.tables[table].enqueue_strand(strand); + } + + NoRemainingSubgoalsResult::RootAnswerAvailable + } + } + } + None => { + debug!("answer is not available (or not new)"); + + // This strand led nowhere of interest. There might be *other* + // answers on this table, but we don't care right now, we'll + // try again at another time. + + // Now we yield with `QuantumExceeded` + self.unwind_stack(); + NoRemainingSubgoalsResult::RootSearchFail(RootSearchFail::QuantumExceeded) + } + } + } + + /// A "refinement" strand is used in coinduction. When the root + /// table on the stack publishes an answer has delayed subgoals, + /// we create a new strand that will attempt to prove out those + /// delayed subgoals (the root answer here is not *special* except + /// in so far as that there is nothing above it, and hence we know + /// that the delayed subgoals (which resulted in some cycle) must + /// be referring to a table that now has completed). + /// + /// Note that it is important for this to be a *refinement* strand + /// -- meaning that the answer with delayed subgoals has been + /// published. This is necessary because sometimes the strand must + /// build on that very answer that it is refining. See Delayed + /// Trivial Self Cycle, Variant 3. + fn create_refinement_strand( + &self, + table: TableIndex, + answer: &Answer, + ) -> Option> { + // If there are no delayed subgoals, then there is no need for + // a refinement strand. + if answer.subst.value.delayed_subgoals.is_empty() { + return None; + } + + let num_universes = self.forest.tables[table].table_goal.universes; + let ( + mut infer, + _, + AnswerSubst { + subst, + constraints, + delayed_subgoals, + }, + ) = chalk_solve::infer::InferenceTable::from_canonical( + self.context.program().interner(), + num_universes, + answer.subst.clone(), + ); + + let delayed_subgoals = delayed_subgoals + .into_iter() + .map(Literal::Positive) + .collect(); + + let strand = Strand { + ex_clause: ExClause { + subst, + ambiguous: answer.ambiguous, + constraints: constraints + .as_slice(self.context.program().interner()) + .to_vec(), + subgoals: delayed_subgoals, + delayed_subgoals: Vec::new(), + answer_time: TimeStamp::default(), + floundered_subgoals: Vec::new(), + }, + selected_subgoal: None, + last_pursued_time: TimeStamp::default(), + }; + + Some(Forest::canonicalize_strand_from( + self.context, + &mut infer, + &strand, + )) + } + + fn on_no_strands_left(&mut self) -> Result<(), RootSearchFail> { + let table = self.stack.top().table; + debug!("no more strands available (or all cycles) for {:?}", table); + + // No more strands left to try! This is either because all + // strands have failed, because all strands encountered a + // cycle, or all strands have would give ambiguous answers. + + if self.forest.tables[table].strands_mut().count() == 0 { + // All strands for the table T on the top of the stack + // have **failed**. Hence we can pop it off the stack and + // check what this means for the table T' that was just + // below T on the stack (if any). + debug!("no more strands available"); + let caller_strand = match self.stack.pop_and_borrow_caller_strand() { + Some(s) => s, + None => { + // T was the root table, so we are done. + debug!("no more solutions"); + return Err(RootSearchFail::NoMoreSolutions); + } + }; + + // This subgoal selection for the strand is finished, so take it + let caller_selected_subgoal = caller_strand.value.selected_subgoal.take().unwrap(); + return match caller_strand.value.ex_clause.subgoals + [caller_selected_subgoal.subgoal_index] + { + // T' wanted an answer from T, but none is + // forthcoming. Therefore, the active strand from T' + // has failed and can be discarded. + Literal::Positive(_) => { + debug!("discarding strand because positive literal"); + self.stack.top().active_strand.take(); + self.unwind_stack(); + Err(RootSearchFail::QuantumExceeded) + } + + // T' wanted there to be no answer from T, but none is forthcoming. + Literal::Negative(_) => { + debug!("subgoal was proven because negative literal"); + + // There is no solution for this strand. But, this + // is what we want, so can remove this subgoal and + // keep going. + caller_strand + .value + .ex_clause + .subgoals + .remove(caller_selected_subgoal.subgoal_index); + + // This strand is still active, so continue + Ok(()) + } + }; + } + + // We can't consider this table as part of a cycle unless we've handled + // all strands, not just non-ambiguous ones. See chalk#571. + if let AnswerMode::Complete = self.forest.tables[table].answer_mode { + debug!("Allowing ambiguous answers."); + self.forest.tables[table].answer_mode = AnswerMode::Ambiguous; + return Err(RootSearchFail::QuantumExceeded); + } + + let clock = self.stack.top().clock; + let cyclic_minimums = self.stack.top().cyclic_minimums; + if cyclic_minimums.positive >= clock && cyclic_minimums.negative >= clock { + debug!("cycle with no new answers"); + + if cyclic_minimums.negative < TimeStamp::MAX { + // This is a negative cycle. + self.unwind_stack(); + return Err(RootSearchFail::NegativeCycle); + } - let dfn = self.stack[depth].dfn; - if minimums.positive == dfn && minimums.negative == DepthFirstNumber::MAX { // If all the things that we recursively depend on have // positive dependencies on things below us in the stack, // then no more answers are forthcoming. We can clear all // the strands for those things recursively. - self.clear_strands_after_cycle(table, strands); - Some(RecursiveSearchFail::NoMoreSolutions) - } else if minimums.positive >= dfn && minimums.negative >= dfn { - let mut visited = FxHashSet::default(); - visited.insert(table); - self.tables[table].extend_strands(strands); - self.delay_strands_after_cycle(table, &mut visited); - None + let table = self.stack.top().table; + let cyclic_strands = self.forest.tables[table].take_strands(); + self.clear_strands_after_cycle(cyclic_strands); + + // Now we yield with `QuantumExceeded` + self.unwind_stack(); + Err(RootSearchFail::QuantumExceeded) } else { - self.tables[table].extend_strands(strands); - Some(RecursiveSearchFail::Cycle(minimums)) + debug!("table part of a cycle"); + + // This table resulted in a positive cycle, so we have + // to check what this means for the subgoal containing + // this strand + let caller_strand = match self.stack.pop_and_borrow_caller_strand() { + Some(s) => s, + None => { + panic!("nothing on the stack but cyclic result"); + } + }; + + // We can't take this because we might need it later to clear the cycle + let caller_selected_subgoal = caller_strand.value.selected_subgoal.as_ref().unwrap(); + match caller_strand.value.ex_clause.subgoals[caller_selected_subgoal.subgoal_index] { + Literal::Positive(_) => { + self.stack + .top() + .cyclic_minimums + .take_minimums(&cyclic_minimums); + } + Literal::Negative(_) => { + // We depend on `not(subgoal)`. For us to continue, + // `subgoal` must be completely evaluated. Therefore, + // we depend (negatively) on the minimum link of + // `subgoal` as a whole -- it doesn't matter whether + // it's pos or neg. + let mins = Minimums { + positive: self.stack.top().clock, + negative: cyclic_minimums.minimum_of_pos_and_neg(), + }; + self.stack.top().cyclic_minimums.take_minimums(&mins); + } + } + + // We can't pursue this strand anymore, so push it back onto the table + let active_strand = self.stack.top().active_strand.take().unwrap(); + let table = self.stack.top().table; + self.forest.tables[table].enqueue_strand(active_strand); + + // The strand isn't active, but the table is, so just continue + Ok(()) + } + } + + /// Unwinds the entire stack, returning all active strands back to + /// their tables (this time at the end of the queue). + fn unwind_stack(&mut self) { + loop { + match self.stack.pop_and_take_caller_strand() { + Some(active_strand) => { + let table = self.stack.top().table; + self.forest.tables[table].enqueue_strand(active_strand); + } + + None => return, + } } } @@ -372,162 +1292,110 @@ impl> Forest { /// recursively clears the active strands from the tables /// referenced in `strands`, since all of them must encounter /// cycles too. - fn clear_strands_after_cycle( - &mut self, - table: TableIndex, - strands: impl IntoIterator>, - ) { - assert!(self.tables[table].pop_next_strand().is_none()); + fn clear_strands_after_cycle(&mut self, strands: impl IntoIterator>) { for strand in strands { - let CanonicalStrand { - canonical_ex_clause, - selected_subgoal, - } = strand; + let selected_subgoal = strand.value.selected_subgoal; + let ex_clause = strand.value.ex_clause; let selected_subgoal = selected_subgoal.unwrap_or_else(|| { panic!( - "clear_strands_after_cycle invoked on strand in table {:?} \ + "clear_strands_after_cycle invoked on strand in table \ without a selected subgoal: {:?}", - table, canonical_ex_clause, + ex_clause, ) }); let strand_table = selected_subgoal.subgoal_table; - let strands = self.tables[strand_table].take_strands(); - self.clear_strands_after_cycle(strand_table, strands); + let strands = self.forest.tables[strand_table].take_strands(); + self.clear_strands_after_cycle(strands); } } - /// Invoked after we have determined that every strand in `table` - /// encounters a cycle, and that some of those cycles involve - /// negative edges. In that case, walks all negative edges and - /// converts them to delayed literals. - fn delay_strands_after_cycle(&mut self, table: TableIndex, visited: &mut FxHashSet) { - let mut tables = vec![]; - - let num_universes = CO::num_universes(&self.tables[table].table_goal); - for canonical_strand in self.tables[table].strands_mut() { - // FIXME if CanonicalExClause were not held abstract, we - // could do this in place like we used to (and - // `instantiate_strand` could take ownership), since we - // don't really need to instantiate here to do this - // operation. - let (delayed_strand, subgoal_table) = Self::with_instantiated_strand( - self.context.clone(), - num_universes, - canonical_strand, - DelayStrandAfterCycle { table }, - ); - - *canonical_strand = delayed_strand; + fn select_subgoal(&mut self, canonical_strand: &mut CanonicalStrand) -> SubGoalSelection { + loop { + while canonical_strand.value.selected_subgoal.is_none() { + if canonical_strand.value.ex_clause.subgoals.is_empty() { + if canonical_strand + .value + .ex_clause + .floundered_subgoals + .is_empty() + { + return SubGoalSelection::NotSelected; + } - if visited.insert(subgoal_table) { - tables.push(subgoal_table); - } - } + self.reconsider_floundered_subgoals(&mut canonical_strand.value.ex_clause); + + if canonical_strand.value.ex_clause.subgoals.is_empty() { + // All the subgoals of this strand floundered. We may be able + // to get helpful information from this strand still, but it + // will *always* be ambiguous, so mark it as so. + assert!(!canonical_strand + .value + .ex_clause + .floundered_subgoals + .is_empty()); + canonical_strand.value.ex_clause.ambiguous = true; + return SubGoalSelection::NotSelected; + } - for table in tables { - self.delay_strands_after_cycle(table, visited); - } - } + continue; + } - fn delay_strand_after_cycle( - table: TableIndex, - mut strand: Strand<'_, C, impl Context>, - ) -> (CanonicalStrand, TableIndex) { - let (subgoal_index, subgoal_table) = match &strand.selected_subgoal { - Some(selected_subgoal) => ( - selected_subgoal.subgoal_index, - selected_subgoal.subgoal_table, - ), - None => { - panic!( - "delay_strands_after_cycle invoked on strand in table {:?} \ - without a selected subgoal: {:?}", - table, strand, + let subgoal_index = + SlgContext::next_subgoal_index(&canonical_strand.value.ex_clause); + + // Get or create table for this subgoal. + let num_universes = self.forest.tables[self.stack.top().table] + .table_goal + .universes; + let (mut infer, _, strand) = chalk_solve::infer::InferenceTable::from_canonical( + self.context.program().interner(), + num_universes, + canonical_strand.clone(), ); - } - }; - - // Delay negative literals. - if let Literal::Negative(_) = strand.ex_clause.subgoals[subgoal_index] { - strand.ex_clause.subgoals.remove(subgoal_index); - strand - .ex_clause - .delayed_literals - .push(DelayedLiteral::Negative(subgoal_table)); - strand.selected_subgoal = None; - } - - (Self::canonicalize_strand(strand), subgoal_table) - } - - /// Pursues `strand` to see if it leads us to a new answer, either - /// by selecting a new subgoal or by checking to see if the - /// selected subgoal has an answer. `strand` is associated with - /// the table on the stack at the given `depth`. - fn pursue_strand( - &mut self, - depth: StackIndex, - mut strand: Strand<'_, C, impl Context>, - ) -> StrandResult { - info_heading!( - "pursue_strand(table={:?}, depth={:?}, ex_clause={:#?}, selected_subgoal={:?})", - self.stack[depth].table, - depth, - strand.infer.debug_ex_clause(&strand.ex_clause), - strand.selected_subgoal, - ); - - // If no subgoal has yet been selected, select one. - while strand.selected_subgoal.is_none() { - if strand.ex_clause.subgoals.len() == 0 { - return self.pursue_answer(depth, strand); - } - - // For now, we always pick the last subgoal in the - // list. - // - // FIXME(rust-lang-nursery/chalk#80) -- we should be more - // selective. For example, we don't want to pick a - // negative literal that will flounder, and we don't want - // to pick things like `?T: Sized` if we can help it. - let subgoal_index = strand.ex_clause.subgoals.len() - 1; - - // Get or create table for this subgoal. - match self.get_or_create_table_for_subgoal( - &mut *strand.infer, - &strand.ex_clause.subgoals[subgoal_index], - ) { - Some((subgoal_table, universe_map)) => { - strand.selected_subgoal = Some(SelectedSubgoal { - subgoal_index, - subgoal_table, - universe_map, - answer_index: AnswerIndex::ZERO, - }); - } + match self.forest.get_or_create_table_for_subgoal( + self.context, + &mut infer, + &strand.ex_clause.subgoals[subgoal_index], + ) { + Some((subgoal_table, universe_map)) => { + canonical_strand.value.selected_subgoal = Some(SelectedSubgoal { + subgoal_index, + subgoal_table, + universe_map, + answer_index: AnswerIndex::ZERO, + }); + } - None => { - // If we failed to create a table for the subgoal, - // then the execution has "floundered" (cannot yield - // a complete result). We choose to handle this by - // removing the subgoal and inserting a - // `CannotProve` result. This can only happen with - // ill-formed negative literals or with overflow. - strand.ex_clause.subgoals.remove(subgoal_index); - strand - .ex_clause - .delayed_literals - .push(DelayedLiteral::CannotProve(())); + None => { + // If we failed to create a table for the subgoal, + // that is because we have a floundered negative + // literal. + self.flounder_subgoal(&mut canonical_strand.value.ex_clause, subgoal_index); + } } } - } - // Find the selected subgoal and ask it for the next answer. - let selected_subgoal = strand.selected_subgoal.clone().unwrap(); - match strand.ex_clause.subgoals[selected_subgoal.subgoal_index] { - Literal::Positive(_) => self.pursue_positive_subgoal(depth, strand, &selected_subgoal), - Literal::Negative(_) => self.pursue_negative_subgoal(depth, strand, &selected_subgoal), + let selected_subgoal_table = canonical_strand + .value + .selected_subgoal + .as_ref() + .unwrap() + .subgoal_table; + if self.forest.tables[selected_subgoal_table].is_floundered() { + if self.propagate_floundered_subgoal(canonical_strand) { + // This strand will never lead anywhere of interest. + return SubGoalSelection::NotSelected; + } else { + // This subgoal has floundered and has been marked. + // We previously would immediately mark the table as + // floundered too, and maybe come back to it. Now, we + // try to see if any other subgoals can be pursued first. + continue; + } + } else { + return SubGoalSelection::Selected; + } } } @@ -535,44 +1403,93 @@ impl> Forest { /// that the strand has no subgoals left. There are two possibilities: /// /// - the strand may represent an answer we have already found; in - /// that case, we can return `StrandFail::NoSolution`, as this + /// that case, we can return `None`, as this /// strand led nowhere of interest. /// - the strand may represent a new answer, in which case it is - /// added to the table and `Ok` is returned. - fn pursue_answer( - &mut self, - depth: StackIndex, - strand: Strand<'_, C, impl Context>, - ) -> StrandResult { - let table = self.stack[depth].table; - let Strand { - infer, - ex_clause: - ExClause { - subst, - constraints, - delayed_literals, - subgoals, - }, - selected_subgoal: _, - } = strand; + /// added to the table and `Some(())` is returned. + fn pursue_answer(&mut self, canonical_strand: CanonicalStrand) -> Option { + let table = self.stack.top().table; + let Canonical { + binders, + value: strand, + } = canonical_strand; + let ExClause { + subst, + constraints, + ambiguous, + subgoals, + delayed_subgoals, + answer_time: _, + floundered_subgoals, + } = strand.ex_clause; + // If there are subgoals left, they should be followed assert!(subgoals.is_empty()); + // We can still try to get an ambiguous answer if there are floundered subgoals + let floundered = !floundered_subgoals.is_empty(); + // So let's make sure that it *really* is an ambiguous answer (this should be set previously) + assert!(!floundered || ambiguous); + + // FIXME: If there are floundered subgoals, we *could* potentially + // actually check if the partial answers to any of these subgoals + // conflict. But this requires that we think about whether they are + // positive or negative subgoals. This duplicates some of the logic + // in `merge_answer_into_strand`, so a bit of refactoring is needed. + + // If the answer gets too large, mark the table as floundered. + // This is the *most conservative* course. There are a few alternatives: + // 1) Replace the answer with a truncated version of it. (This was done + // previously, but turned out to be more complicated than we wanted and + // and a source of multiple bugs.) + // 2) Mark this *strand* as floundered. We don't currently have a mechanism + // for this (only floundered subgoals), so implementing this is more + // difficult because we don't want to just *remove* this strand from the + // table, because that might make the table give `NoMoreSolutions`, which + // is *wrong*. + // 3) Do something fancy with delayed subgoals, effectively delayed the + // truncated bits to a different strand (and a more "refined" answer). + // (This one probably needs more thought, but is here for "completeness") + // + // Ultimately, the current decision to flounder the entire table mostly boils + // down to "it works as we expect for the current tests". And, we likely don't + // even *need* the added complexity just for potentially more answers. + if truncate::needs_truncation( + self.context.program().interner(), + &mut InferenceTable::new(), + self.context.max_size(), + &subst, + ) { + self.forest.tables[table].mark_floundered(); + return None; + } - let answer_subst = infer.canonicalize_constrained_subst(subst, constraints); - debug!("answer: table={:?}, answer_subst={:?}", table, answer_subst); + let table_goal = &self.forest.tables[table].table_goal; - let delayed_literals = { - let mut delayed_literals: FxHashSet<_> = delayed_literals.into_iter() - .map(|dl| infer.lift_delayed_literal(dl)) - .collect(); - DelayedLiteralSet { delayed_literals } + let filtered_delayed_subgoals = delayed_subgoals + .into_iter() + .filter(|delayed_subgoal| { + let canonicalized = InferenceTable::u_canonicalize( + self.context.program().interner(), + &chalk_ir::Canonical { + binders: binders.clone(), + value: delayed_subgoal.clone(), + }, + ) + .quantified; + *table_goal != canonicalized + }) + .collect(); + + let subst = Canonical { + binders, + value: AnswerSubst { + subst, + constraints: Constraints::from_iter(self.context.program().interner(), constraints), + delayed_subgoals: filtered_delayed_subgoals, + }, }; - debug!("answer: delayed_literals={:?}", delayed_literals); + debug!(?table, ?subst, ?floundered, "found answer"); - let answer = Answer { - subst: answer_subst, - delayed_literals, - }; + let answer = Answer { subst, ambiguous }; // A "trivial" answer is one that is 'just true for all cases' // -- in other words, it gives no information back to the @@ -633,709 +1550,96 @@ impl> Forest { // is a *bit* suspect; e.g., those things in the environment // must be backed by an impl *eventually*). let is_trivial_answer = { - answer.delayed_literals.is_empty() - && CO::is_trivial_substitution(&self.tables[table].table_goal, &answer.subst) - && CO::empty_constraints(&answer.subst) + self.forest.tables[table] + .table_goal + .is_trivial_substitution(self.context.program().interner(), &answer.subst) + && answer + .subst + .value + .constraints + .is_empty(self.context.program().interner()) }; - if self.tables[table].push_answer(answer) { - if is_trivial_answer { - self.tables[table].take_strands(); + if let Some(answer_index) = self.forest.tables[table].push_answer(answer) { + // See above, if we have a *complete* and trivial answer, we don't + // want to follow any more strands + if !ambiguous && is_trivial_answer { + self.forest.tables[table].take_strands(); } - Ok(()) + Some(answer_index) } else { - info!("answer: not a new answer, returning StrandFail::NoSolution"); - Err(StrandFail::NoSolution) - } - } - - /// Given a subgoal, converts the literal into u-canonical form - /// and searches for an existing table. If one is found, it is - /// returned, but otherwise a new table is created (and populated - /// with its initial set of strands). - /// - /// Returns `None` if the literal cannot be converted into a table - /// -- for example, this can occur when we have selected a - /// negative literal with free existential variables, in which - /// case the execution is said to "flounder". - /// - /// In terms of the NFTD paper, creating a new table corresponds - /// to the *New Subgoal* step as well as the *Program Clause - /// Resolution* steps. - fn get_or_create_table_for_subgoal( - &mut self, - infer: &mut dyn InferenceTable, - subgoal: &Literal, - ) -> Option<(TableIndex, C::UniverseMap)> { - debug_heading!("get_or_create_table_for_subgoal(subgoal={:?})", subgoal); - - // Subgoal abstraction: - let canonical_subgoal = match subgoal { - Literal::Positive(subgoal) => self.abstract_positive_literal(infer, subgoal), - Literal::Negative(subgoal) => self.abstract_negative_literal(infer, subgoal)?, - }; - - debug!("canonical_subgoal={:?}", canonical_subgoal); - - let (ucanonical_subgoal, universe_map) = infer.u_canonicalize_goal(&canonical_subgoal); - - let table = self.get_or_create_table_for_ucanonical_goal(ucanonical_subgoal); - - Some((table, universe_map)) - } - - /// Given a u-canonical goal, searches for an existing table. If - /// one is found, it is returned, but otherwise a new table is - /// created (and populated with its initial set of strands). - /// - /// In terms of the NFTD paper, creating a new table corresponds - /// to the *New Subgoal* step as well as the *Program Clause - /// Resolution* steps. - pub(crate) fn get_or_create_table_for_ucanonical_goal( - &mut self, - goal: C::UCanonicalGoalInEnvironment, - ) -> TableIndex { - debug_heading!("get_or_create_table_for_ucanonical_goal({:?})", goal); - - if let Some(table) = self.tables.index_of(&goal) { - debug!("found existing table {:?}", table); - return table; + info!("answer: not a new answer, returning None"); + None } - - info_heading!( - "creating new table {:?} and goal {:#?}", - self.tables.next_index(), - goal - ); - let coinductive_goal = self.context.is_coinductive(&goal); - let table = self.tables.insert(goal, coinductive_goal); - self.push_initial_strands(table); - table } - /// When a table is first created, this function is invoked to - /// create the initial set of strands. If the table represents a - /// domain goal, these strands are created from the program - /// clauses as well as the clauses found in the environment. If - /// the table represents a non-domain goal, such as `for G` - /// etc, then `simplify_hh_goal` is invoked to create a strand - /// that breaks the goal down. - /// - /// In terms of the NFTD paper, this corresponds to the *Program - /// Clause Resolution* step being applied eagerly, as many times - /// as possible. - fn push_initial_strands(&mut self, table: TableIndex) { - // Instantiate the table goal with fresh inference variables. - let table_goal = self.tables[table].table_goal.clone(); - self.context.clone().instantiate_ucanonical_goal( - &table_goal, - PushInitialStrandsInstantiated { table, this: self }, - ); - - struct PushInitialStrandsInstantiated<'a, C: Context + 'a, CO: ContextOps + 'a> { - table: TableIndex, - this: &'a mut Forest, - } - - impl> WithInstantiatedUCanonicalGoal - for PushInitialStrandsInstantiated<'a, C, CO> { - type Output = (); - - fn with( - self, - infer: &mut dyn InferenceTable, - subst: I::Substitution, - environment: I::Environment, - goal: I::Goal, - ) { - let PushInitialStrandsInstantiated { table, this } = self; - this.push_initial_strands_instantiated(table, infer, subst, environment, goal); + fn reconsider_floundered_subgoals(&mut self, ex_clause: &mut ExClause) { + info!("reconsider_floundered_subgoals(ex_clause={:#?})", ex_clause,); + let ExClause { + answer_time, + subgoals, + floundered_subgoals, + .. + } = ex_clause; + for i in (0..floundered_subgoals.len()).rev() { + if floundered_subgoals[i].floundered_time < *answer_time { + let floundered_subgoal = floundered_subgoals.swap_remove(i); + subgoals.push(floundered_subgoal.floundered_literal); } } } - fn push_initial_strands_instantiated( - &mut self, - table: TableIndex, - infer: &mut dyn InferenceTable, - subst: I::Substitution, - environment: I::Environment, - goal: I::Goal, - ) { - let table_ref = &mut self.tables[table]; - match infer.into_hh_goal(goal) { - HhGoal::DomainGoal(domain_goal) => { - let clauses = infer.program_clauses(&environment, &domain_goal); - for clause in clauses { - debug!("program clause = {:#?}", clause); - if let Ok(resolvent) = - infer.resolvent_clause(&environment, &domain_goal, &subst, &clause) - { - info!("pushing initial strand with ex-clause: {:#?}", &resolvent,); - table_ref.push_strand(CanonicalStrand { - canonical_ex_clause: resolvent, - selected_subgoal: None, - }); - } - } - } - - hh_goal => { - // `canonical_goal` is an HH goal. We can simplify it - // into a series of *literals*, all of which must be - // true. Thus, in EWFS terms, we are effectively - // creating a single child of the `A :- A` goal that - // is like `A :- B, C, D` where B, C, and D are the - // simplified subgoals. You can think of this as - // applying built-in "meta program clauses" that - // reduce HH goals into Domain goals. - if let Ok(ex_clause) = - Self::simplify_hh_goal(&mut *infer, subst, &environment, hh_goal) - { - info!( - "pushing initial strand with ex-clause: {:#?}", - infer.debug_ex_clause(&ex_clause), - ); - table_ref.push_strand(Self::canonicalize_strand(Strand { - infer, - ex_clause, - selected_subgoal: None, - })); - } - } - } + /// Removes the subgoal at `subgoal_index` from the strand's + /// subgoal list and adds it to the strand's floundered subgoal + /// list. + fn flounder_subgoal(&self, ex_clause: &mut ExClause, subgoal_index: usize) { + let _s = debug_span!( + "flounder_subgoal", + answer_time = ?ex_clause.answer_time, + subgoal = ?ex_clause.subgoals[subgoal_index], + ); + let _s = _s.enter(); + + let floundered_time = ex_clause.answer_time; + let floundered_literal = ex_clause.subgoals.remove(subgoal_index); + ex_clause.floundered_subgoals.push(FlounderedSubgoal { + floundered_literal, + floundered_time, + }); + debug!(?ex_clause); } - /// Given a selected positive subgoal, applies the subgoal - /// abstraction function to yield the canonical form that will be - /// used to pick a table. Typically, this abstraction has no - /// effect, and hence we are simply returning the canonical form - /// of `subgoal`, but if the subgoal is getting too big, we may - /// truncate the goal to ensure termination. + /// True if all the tables on the stack starting from `depth` and + /// continuing until the top of the stack are coinductive. /// - /// This technique is described in the SA paper. - fn abstract_positive_literal( - &mut self, - infer: &mut dyn InferenceTable, - subgoal: &I::GoalInEnvironment, - ) -> C::CanonicalGoalInEnvironment { - // Subgoal abstraction: Rather than looking up the table for - // `selected_goal` directly, first apply the truncation - // function. This may introduce fresh variables, making the - // goal that we are looking up more general, and forcing us to - // reuse an existing table. For example, if we had a selected - // goal of - // - // // Vec>>>: Sized - // - // we might now produce a truncated goal of - // - // // Vec>: Sized - // - // Obviously, the answer we are looking for -- if it exists -- will be - // found amongst the answers of this new, truncated goal. - // - // Subtle point: Note that the **selected goal** remains - // unchanged and will be carried over into the "pending - // clause" for the positive link on the new subgoal. This - // means that if our new, truncated subgoal produces - // irrelevant answers (e.g., `Vec>: Sized`), they - // will fail to unify with our selected goal, producing no - // resolvent. - match infer.truncate_goal(subgoal) { - None => infer.canonicalize_goal(subgoal), - Some(truncated_subgoal) => { - debug!("truncated={:?}", truncated_subgoal); - infer.canonicalize_goal(&truncated_subgoal) - } - } - } - - /// Given a selected negative subgoal, the subgoal is "inverted" - /// (see `InferenceTable::invert`) and then potentially truncated - /// (see `abstract_positive_literal`). The result subgoal is - /// canonicalized. In some cases, this may return `None` and hence - /// fail to yield a useful result, for example if free existential - /// variables appear in `subgoal` (in which case the execution is - /// said to "flounder"). - fn abstract_negative_literal( - &mut self, - infer: &mut dyn InferenceTable, - subgoal: &I::GoalInEnvironment, - ) -> Option { - // First, we have to check that the selected negative literal - // is ground, and invert any universally quantified variables. - // - // DIVERGENCE -- In the RR paper, to ensure completeness, they - // permit non-ground negative literals, but only consider - // them to succeed when the target table has no answers at - // all. This is equivalent inverting those free existentials - // into universals, as discussed in the comments of - // `invert`. This is clearly *sound*, but the completeness is - // a subtle point. In particular, it can cause **us** to reach - // false conclusions, because e.g. given a program like - // (selected left-to-right): - // - // not { ?T: Copy }, ?T = Vec - // - // we would select `not { ?T: Copy }` first. For this goal to - // succeed we would require that -- effectively -- `forall - // { not { T: Copy } }`, which clearly doesn't hold. (In the - // terms of RR, we would require that the table for `?T: Copy` - // has failed before we can continue.) - // - // In the RR paper, this is acceptable because they assume all - // of their input programs are both **normal** (negative - // literals are selected after positive ones) and **safe** - // (all free variables in negative literals occur in positive - // literals). It is plausible for us to guarantee "normal" - // form, we can reorder clauses as we need. I suspect we can - // guarantee safety too, but I have to think about it. - // - // For now, we opt for the safer route of terming such - // executions as floundering, because I think our use of - // negative goals is sufficiently limited we can get away with - // it. The practical effect is that we will judge more - // executions as floundering than we ought to (i.e., where we - // could instead generate an (imprecise) result). As you can - // see a bit later, we also diverge in some other aspects that - // affect completeness when it comes to subgoal abstraction. - let inverted_subgoal = infer.invert_goal(subgoal)?; - - // DIVERGENCE - // - // If the negative subgoal has grown so large that we would have - // to truncate it, we currently just abort the computation - // entirely. This is not necessary -- the SA paper makes no - // such distinction, for example, and applies truncation equally - // for positive/negative literals. However, there are some complications - // that arise that I do not wish to deal with right now. - // - // Let's work through an example to show you what I - // mean. Imagine we have this (negative) selected literal; - // hence `selected_subgoal` will just be the inner part: - // - // // not { Vec>>>: Sized } - // // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - // // `selected_goal` - // - // (In this case, the `inverted_subgoal` would be the same, - // since there are no free universal variables.) - // - // If truncation **doesn't apply**, we would go and lookup the - // table for the selected goal (`Vec>: Sized`) and see - // whether it has any answers. If it does, and they are - // definite, then this negative literal is false. We don't - // really care even how many answers there are and so forth - // (if the goal is ground, as in this case, there can be at - // most one definite answer, but if there are universals, then - // the inverted goal would have variables; even so, a single - // definite answer suffices to show that the `not { .. }` goal - // is false). - // - // Truncation muddies the water, because the table may - // generate answers that are not relevant to our original, - // untracted literal. Suppose that we truncate the selected - // goal to: - // - // // Vec: Sized - // - // Clearly this table will have some solutions that don't - // apply to us. e.g., `Vec>: Sized` is a solution to - // this table, but that doesn't imply that `not { - // Vec>>: Sized }` is false. - // - // This can be made to work -- we carry along the original - // selected goal when we establish links between tables, and - // we could use that to screen the resulting answers. (There - // are some further complications around the fact that - // selected goal may contain universally quantified free - // variables that have been inverted, as discussed in the - // prior paragraph above.) I just didn't feel like dealing - // with it yet. - match infer.truncate_goal(&inverted_subgoal) { - Some(_) => None, - None => Some(infer.canonicalize_goal(&inverted_subgoal)), - } - } - - /// Invoked when we have selected a positive literal, created its - /// table, and selected a particular answer index N we are looking - /// for. Searches for that answer. If we find one, we can do two things: + /// Example: Given a program like: /// - /// - create a new strand with the same selected subgoal, but searching for the - /// answer with index N+1 - /// - use the answer to resolve our selected literal and select the next subgoal - /// in this strand to pursue + /// ```notrust + /// struct Foo { a: Option> } + /// struct Bar { a: Option> } + /// trait XXX { } + /// impl XXX for T { } + /// ``` /// - /// When an answer is found, that corresponds to *Positive Return* - /// from the NFTD paper. - fn pursue_positive_subgoal( - &mut self, - depth: StackIndex, - mut strand: Strand<'_, C, impl Context>, - selected_subgoal: &SelectedSubgoal, - ) -> StrandResult { - let table = self.stack[depth].table; - let SelectedSubgoal { - subgoal_index, - subgoal_table, - answer_index, - ref universe_map, - } = *selected_subgoal; - - match self.ensure_answer_recursively(subgoal_table, answer_index) { - Ok(EnsureSuccess::AnswerAvailable) => { - // The given answer is available; we'll process it below. - } - Ok(EnsureSuccess::Coinductive) => { - // This is a co-inductive cycle. That is, this table - // appears somewhere higher on the stack, and has now - // recursively requested an answer for itself. That - // means that our subgoal is unconditionally true, so - // we can drop it and pursue the next thing. - assert!( - self.tables[table].coinductive_goal - && self.tables[subgoal_table].coinductive_goal - ); - let Strand { - infer, - mut ex_clause, - selected_subgoal: _, - } = strand; - ex_clause.subgoals.remove(subgoal_index); - return self.pursue_strand_recursively( - depth, - Strand { - infer, - ex_clause, - selected_subgoal: None, - }, - ); - } - Err(RecursiveSearchFail::NoMoreSolutions) => { - info!("pursue_positive_subgoal: no more solutions"); - return Err(StrandFail::NoSolution); - } - Err(RecursiveSearchFail::QuantumExceeded) => { - // We'll have to revisit this strand later - info!("pursue_positive_subgoal: quantum exceeded"); - self.tables[table].push_strand(Self::canonicalize_strand(strand)); - return Err(StrandFail::QuantumExceeded); - } - Err(RecursiveSearchFail::Cycle(minimums)) => { - info!( - "pursue_positive_subgoal: cycle with minimums {:?}", - minimums - ); - let canonical_strand = Self::canonicalize_strand(strand); - return Err(StrandFail::Cycle(canonical_strand, minimums)); - } - } - - // Whichever way this particular answer turns out, there may - // yet be *more* answers. Enqueue that alternative for later. - self.push_strand_pursuing_next_answer(depth, &mut strand, selected_subgoal); - - // OK, let's follow *this* answer and see where it leads. - let Strand { - infer, - mut ex_clause, - selected_subgoal: _, - } = strand; - let subgoal = match ex_clause.subgoals.remove(subgoal_index) { - Literal::Positive(g) => g, - Literal::Negative(g) => panic!( - "pursue_positive_subgoal invoked with negative selected literal: {:?}", - g - ), - }; - - let table_goal = &CO::map_goal_from_canonical(&universe_map, - &CO::canonical(&self.tables[subgoal_table].table_goal)); - let answer_subst = - &CO::map_subst_from_canonical(&universe_map, &self.answer(subgoal_table, answer_index).subst); - match infer.apply_answer_subst(ex_clause, &subgoal, table_goal, answer_subst) { - Ok(mut ex_clause) => { - // If the answer had delayed literals, we have to - // ensure that `ex_clause` is also delayed. This is - // the SLG FACTOR operation, though NFTD just makes it - // part of computing the SLG resolvent. - { - let answer = self.answer(subgoal_table, answer_index); - if !answer.delayed_literals.is_empty() { - ex_clause.delayed_literals.push(DelayedLiteral::Positive( - subgoal_table, - infer.sink_answer_subset(&answer.subst), - )); - } - } - - // Apply answer abstraction. - let ex_clause = self.truncate_returned(ex_clause, &mut *infer); - - self.pursue_strand_recursively( - depth, - Strand { - infer, - ex_clause, - selected_subgoal: None, - }, - ) - } - - // This answer led nowhere. Give up for now, but of course - // there may still be other strands to pursue, so return - // `QuantumExceeded`. - Err(NoSolution) => { - info!("pursue_positive_subgoal: answer not unifiable -> NoSolution"); - Err(StrandFail::NoSolution) - } - } - } - - /// Used whenever we process an answer (whether new or cached) on - /// a positive edge (the SLG POSITIVE RETURN operation). Truncates - /// the resolvent (or factor) if it has grown too large. - fn truncate_returned( - &self, - ex_clause: ExClause, - infer: &mut dyn InferenceTable, - ) -> ExClause { - // DIVERGENCE - // - // In the original RR paper, truncation is only applied - // when the result of resolution is a new answer (i.e., - // `ex_clause.subgoals.is_empty()`). I've chosen to be - // more aggressive here, precisely because or our extended - // semantics for unification. In particular, unification - // can insert new goals, so I fear that positive feedback - // loops could still run indefinitely in the original - // formulation. I would like to revise our unification - // mechanism to avoid that problem, in which case this could - // be tightened up to be more like the original RR paper. - // - // Still, I *believe* this more aggressive approx. should - // not interfere with any of the properties of the - // original paper. In particular, applying truncation only - // when the resolvent has no subgoals seems like it is - // aimed at giving us more times to eliminate this - // ambiguous answer. - - match infer.truncate_answer(&ex_clause.subst) { - // No need to truncate? Just propagate the resolvent back. - None => ex_clause, - - // Resolvent got too large. Have to introduce approximation. - Some(truncated_subst) => { - // DIVERGENCE - // - // In RR, `self.delayed_literals` would be - // preserved. I have chosen to drop them. Keeping - // them does allow for the possibility of - // eliminating this answer if any of them turn out - // to be satisfiable. However, it also introduces - // an annoying edge case I didn't want to think - // about -- one which, interestingly, the paper - // did not discuss, which may indicate it is - // impossible for some subtle reason. In - // particular, a truncated delayed literal has a - // sort of inverse semantics. i.e. if we convert - // `Foo :- ~Bar(Rc>) |` to `Foo :- - // ~Bar(Rc), Unknown |`, then this could be - // invalidated by an instance of `Bar(Rc)`, - // which is irrelevant to the original - // clause. (There is an additional annoyance, - // which is that we may not have tried to solve - // `Bar(Rc)` at all.) - - ExClause { - subst: truncated_subst, - delayed_literals: vec![DelayedLiteral::CannotProve(())], - constraints: vec![], - subgoals: vec![], - } - } - } - } - - // We can recursive arbitrarily deep while pursuing a strand, so - // check in case we have to grow the stack. - fn pursue_strand_recursively( - &mut self, - depth: StackIndex, - strand: Strand<'_, C, impl Context>, - ) -> StrandResult { - ::maybe_grow_stack(|| self.pursue_strand(depth, strand)) - } - - /// Invoked when we have found a successful answer to the given - /// table. Queues up a strand to look for the *next* answer from - /// that table. - fn push_strand_pursuing_next_answer( - &mut self, - depth: StackIndex, - strand: &mut Strand<'_, C, impl Context>, - selected_subgoal: &SelectedSubgoal, - ) { - let table = self.stack[depth].table; - let mut selected_subgoal = selected_subgoal.clone(); - selected_subgoal.answer_index.increment(); - self.tables[table].push_strand(Self::canonicalize_strand_from( - &mut *strand.infer, - &strand.ex_clause, - Some(selected_subgoal), - )); - } - - fn pursue_negative_subgoal( - &mut self, - depth: StackIndex, - strand: Strand<'_, C, impl Context>, - selected_subgoal: &SelectedSubgoal, - ) -> StrandResult { - let table = self.stack[depth].table; - let SelectedSubgoal { - subgoal_index: _, - subgoal_table, - answer_index, - universe_map: _, - } = *selected_subgoal; - - // In the match below, we will either (a) return early with an - // error or some kind or (b) continue on to pursue this strand - // further. We continue onward in the case where we either - // proved that `answer_index` does not exist (in which case - // the negative literal is true) or if we found a delayed - // literal (in which case the negative literal *may* be true). - // Before exiting the match, then, we set `delayed_literal` to - // either `Some` or `None` depending. - let delayed_literal: Option>; - match self.ensure_answer_recursively(subgoal_table, answer_index) { - Ok(EnsureSuccess::AnswerAvailable) => { - if self.answer(subgoal_table, answer_index).is_unconditional() { - // We want to disproval the subgoal, but we - // have an unconditional answer for the subgoal, - // therefore we have failed to disprove it. - info!("pursue_negative_subgoal: found unconditional answer to neg literal -> NoSolution"); - return Err(StrandFail::NoSolution); - } - - // Got back a conditional answer. We neither succeed - // nor fail yet; so what we do is to delay the - // selected literal and keep going. - // - // This corresponds to the Delaying action in NFTD. - // It also interesting to compare this with the EWFS - // paper; there, when we encounter a delayed cached - // answer in `negative_subgoal`, we do not immediately - // convert to a delayed literal, but instead simply - // stop. However, in EWFS, we *do* add the strand to - // the table as a negative pending subgoal, and also - // update the link to depend negatively on the - // table. Then later, when all pending work from that - // table is completed, all negative links are - // converted to delays. - delayed_literal = Some(DelayedLiteral::Negative(subgoal_table)); - } - - Ok(EnsureSuccess::Coinductive) => { - // This is a co-inductive cycle. That is, this table - // appears somewhere higher on the stack, and has now - // recursively requested an answer for itself. That - // means that our subgoal is unconditionally true, so - // our negative goal fails. - info!("pursue_negative_subgoal: found coinductive answer to neg literal -> NoSolution"); - return Err(StrandFail::NoSolution); - } - - Err(RecursiveSearchFail::Cycle(minimums)) => { - // We depend on `not(subgoal)`. For us to continue, - // `subgoal` must be completely evaluated. Therefore, - // we depend (negatively) on the minimum link of - // `subgoal` as a whole -- it doesn't matter whether - // it's pos or neg. - let min = minimums.minimum_of_pos_and_neg(); - info!( - "pursue_negative_subgoal: found neg cycle at depth {:?}", - min - ); - let canonical_strand = Self::canonicalize_strand(strand); - return Err(StrandFail::Cycle( - canonical_strand, - Minimums { - positive: self.stack[depth].dfn, - negative: min, - }, - )); - } - - Err(RecursiveSearchFail::NoMoreSolutions) => { - // This answer does not exist. Huzzah, happy days are - // here again! =) We can just remove this subgoal and continue - // with no need for a delayed literal. - delayed_literal = None; - } - - // Learned nothing yet. Have to try again some other time. - Err(RecursiveSearchFail::QuantumExceeded) => { - info!("pursue_negative_subgoal: quantum exceeded"); - self.tables[table].push_strand(Self::canonicalize_strand(strand)); - return Err(StrandFail::QuantumExceeded); - } - } - - // We have found that there is at least a *chance* that - // `answer_index` of the subgoal is a failure, so let's keep - // going. We can just remove the subgoal from the list without - // any need to unify things, because the subgoal must be - // ground (i). We may need to add a delayed literal, though (ii). - let Strand { - infer, - mut ex_clause, - selected_subgoal: _, - } = strand; - ex_clause.subgoals.remove(selected_subgoal.subgoal_index); // (i) - ex_clause.delayed_literals.extend(delayed_literal); // (ii) - self.pursue_strand_recursively( - depth, - Strand { - infer, - ex_clause, - selected_subgoal: None, - }, - ) - } -} - -trait WithInstantiatedStrand> { - type Output; - - fn with(self, strand: Strand<'_, C, impl Context>) -> Self::Output; -} - -struct PursueStrand<'a, C: Context + 'a, CO: ContextOps + 'a> { - forest: &'a mut Forest, - depth: StackIndex, -} - -impl> WithInstantiatedStrand for PursueStrand<'a, C, CO> { - type Output = StrandResult; - - fn with(self, strand: Strand<'_, C, impl Context>) -> Self::Output { - self.forest.pursue_strand(self.depth, strand) - } -} - -struct DelayStrandAfterCycle { - table: TableIndex, -} - -impl> WithInstantiatedStrand for DelayStrandAfterCycle { - type Output = (CanonicalStrand, TableIndex); - - fn with(self, strand: Strand<'_, C, impl Context>) -> Self::Output { - >::delay_strand_after_cycle(self.table, strand) + /// and then a goal of `Foo: XXX`, we would eventually wind up + /// with a stack like this: + /// + /// | StackIndex | Table Goal | + /// | ---------- | ----------- | + /// | 0 | `Foo: XXX` | + /// | 1 | `Foo: Send` | + /// | 2 | `Bar: Send` | + /// + /// Here, the top of the stack is `Bar: Send`. And now we are + /// asking `top_of_stack_is_coinductive_from(1)` -- the answer + /// would be true, since `Send` is an auto trait, which yields a + /// coinductive goal. But `top_of_stack_is_coinductive_from(0)` is + /// false, since `XXX` is not an auto trait. + pub(super) fn top_of_stack_is_coinductive_from(&self, depth: StackIndex) -> bool { + StackIndex::iterate_range(self.stack.top_of_stack_from(depth)).all(|d| { + let table = self.stack[d].table; + self.forest.tables[table].coinductive_goal + }) } } diff --git a/chalk-engine/src/normalize_deep.rs b/chalk-engine/src/normalize_deep.rs new file mode 100644 index 00000000000..9f36f3ce526 --- /dev/null +++ b/chalk-engine/src/normalize_deep.rs @@ -0,0 +1,172 @@ +use chalk_derive::FallibleTypeFolder; +use chalk_ir::fold::shift::Shift; +use chalk_ir::fold::{TypeFoldable, TypeFolder}; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use chalk_solve::infer::InferenceTable; + +#[derive(FallibleTypeFolder)] +pub(crate) struct DeepNormalizer<'table, I: Interner> { + table: &'table mut InferenceTable, + interner: I, +} + +impl DeepNormalizer<'_, I> { + /// Given a value `value` with variables in it, replaces those variables + /// with their instantiated values (if any). Uninstantiated variables are + /// left as-is. + /// + /// This is mainly intended for getting final values to dump to + /// the user and its use should otherwise be avoided, particularly + /// given the possibility of snapshots and rollbacks. + /// + /// See also `InferenceTable::canonicalize`, which -- during real + /// processing -- is often used to capture the "current state" of + /// variables. + pub fn normalize_deep>( + table: &mut InferenceTable, + interner: I, + value: T, + ) -> T { + value + .try_fold_with( + &mut DeepNormalizer { interner, table }, + DebruijnIndex::INNERMOST, + ) + .unwrap() + } +} + +impl TypeFolder for DeepNormalizer<'_, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } + + fn fold_inference_ty( + &mut self, + var: InferenceVar, + kind: TyVariableKind, + _outer_binder: DebruijnIndex, + ) -> Ty { + let interner = self.interner; + match self.table.probe_var(var) { + Some(ty) => ty + .assert_ty_ref(interner) + .clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in(interner), // FIXME shift + None => { + // Normalize all inference vars which have been unified into a + // single variable. Ena calls this the "root" variable. + self.table.inference_var_root(var).to_ty(interner, kind) + } + } + } + + fn fold_inference_lifetime( + &mut self, + var: InferenceVar, + _outer_binder: DebruijnIndex, + ) -> Lifetime { + let interner = self.interner; + match self.table.probe_var(var) { + Some(l) => l + .assert_lifetime_ref(interner) + .clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in(interner), + None => var.to_lifetime(interner), // FIXME shift + } + } + + fn fold_inference_const( + &mut self, + ty: Ty, + var: InferenceVar, + _outer_binder: DebruijnIndex, + ) -> Const { + let interner = self.interner; + match self.table.probe_var(var) { + Some(c) => c + .assert_const_ref(interner) + .clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in(interner), + None => var.to_const(interner, ty), // FIXME shift + } + } + + fn forbid_free_vars(&self) -> bool { + true + } + + fn interner(&self) -> I { + self.interner + } +} + +#[cfg(test)] +mod test { + use super::*; + use chalk_integration::interner::ChalkIr; + use chalk_integration::{arg, ty}; + + const U0: UniverseIndex = UniverseIndex { counter: 0 }; + + // We just use a vec of 20 `Invariant`, since this is zipped and no substs are + // longer than this + #[derive(Debug)] + struct TestDatabase; + impl UnificationDatabase for TestDatabase { + fn fn_def_variance(&self, _fn_def_id: FnDefId) -> Variances { + Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied()) + } + + fn adt_variance(&self, _adt_id: AdtId) -> Variances { + Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied()) + } + } + + #[test] + fn infer() { + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U0).to_ty(interner); + table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (expr b)), + ) + .unwrap(); + // a is unified to Adt<#0>(c), where 'c' is a new inference var + // created by the generalizer to generalize 'b'. It then unifies 'b' + // and 'c', and when we normalize them, they'll both be output as + // the same "root" variable. However, there are no guarantees for + // _which_ of 'b' and 'c' becomes the root. We need to normalize + // "b" too, then, to ensure we get a consistent result. + assert_eq!( + DeepNormalizer::normalize_deep(&mut table, interner, a.clone()), + ty!(apply (item 0) (expr DeepNormalizer::normalize_deep(&mut table, interner, b.clone()))), + ); + table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &b, + &ty!(apply (item 1)), + ) + .unwrap(); + assert_eq!( + DeepNormalizer::normalize_deep(&mut table, interner, a), + ty!(apply (item 0) (apply (item 1))) + ); + } +} diff --git a/chalk-engine/src/simplify.rs b/chalk-engine/src/simplify.rs index 003591db090..1c594af0149 100644 --- a/chalk-engine/src/simplify.rs +++ b/chalk-engine/src/simplify.rs @@ -1,79 +1,141 @@ -use fallible::Fallible; -use {ExClause, Literal}; -use forest::Forest; -use hh::HhGoal; -use context::prelude::*; +use crate::forest::Forest; +use crate::slg::SlgContextOps; +use crate::{ExClause, Literal, TimeStamp}; -impl> Forest { - /// Simplifies an HH goal into a series of positive domain goals - /// and negative HH goals. This operation may fail if the HH goal +use chalk_ir::cast::{Cast, Caster}; +use chalk_ir::interner::Interner; +use chalk_ir::{ + Environment, FallibleOrFloundered, Goal, GoalData, InEnvironment, QuantifierKind, Substitution, + TyKind, TyVariableKind, Variance, +}; +use chalk_solve::infer::InferenceTable; +use tracing::debug; + +impl Forest { + /// Simplifies a goal into a series of positive domain goals + /// and negative goals. This operation may fail if the goal /// includes unifications that cannot be completed. - pub(super) fn simplify_hh_goal( - infer: &mut dyn InferenceTable, - subst: I::Substitution, - initial_environment: &I::Environment, - initial_hh_goal: HhGoal, - ) -> Fallible> { + pub(super) fn simplify_goal( + context: &SlgContextOps, + infer: &mut InferenceTable, + subst: Substitution, + initial_environment: Environment, + initial_goal: Goal, + ) -> FallibleOrFloundered> { let mut ex_clause = ExClause { subst, - delayed_literals: vec![], + ambiguous: false, constraints: vec![], subgoals: vec![], + delayed_subgoals: vec![], + answer_time: TimeStamp::default(), + floundered_subgoals: vec![], }; // A stack of higher-level goals to process. - let mut pending_goals = vec![(initial_environment.clone(), initial_hh_goal)]; + let mut pending_goals = vec![(initial_environment, initial_goal)]; - while let Some((environment, hh_goal)) = pending_goals.pop() { - match hh_goal { - HhGoal::ForAll(subgoal) => { - let subgoal = infer.instantiate_binders_universally(&subgoal); - pending_goals.push((environment, infer.into_hh_goal(subgoal))); + while let Some((environment, goal)) = pending_goals.pop() { + match goal.data(context.program().interner()) { + GoalData::Quantified(QuantifierKind::ForAll, subgoal) => { + let subgoal = infer.instantiate_binders_universally( + context.program().interner(), + subgoal.clone(), + ); + pending_goals.push((environment, subgoal.clone())); } - HhGoal::Exists(subgoal) => { - let subgoal = infer.instantiate_binders_existentially(&subgoal); - pending_goals.push((environment, infer.into_hh_goal(subgoal))) + GoalData::Quantified(QuantifierKind::Exists, subgoal) => { + let subgoal = infer.instantiate_binders_existentially( + context.program().interner(), + subgoal.clone(), + ); + pending_goals.push((environment, subgoal.clone())); } - HhGoal::Implies(wc, subgoal) => { - let new_environment = infer.add_clauses(&environment, wc); - pending_goals.push((new_environment, infer.into_hh_goal(subgoal))); + GoalData::Implies(wc, subgoal) => { + let new_environment = environment.add_clauses( + context.program().interner(), + wc.iter(context.program().interner()).cloned(), + ); + pending_goals.push((new_environment, subgoal.clone())); } - HhGoal::And(subgoal1, subgoal2) => { - pending_goals.push((environment.clone(), infer.into_hh_goal(subgoal1))); - pending_goals.push((environment, infer.into_hh_goal(subgoal2))); + GoalData::All(subgoals) => { + for subgoal in subgoals.iter(context.program().interner()) { + pending_goals.push((environment.clone(), subgoal.clone())); + } } - HhGoal::Not(subgoal) => { + GoalData::Not(subgoal) => { ex_clause .subgoals - .push(Literal::Negative(I::goal_in_environment(&environment, subgoal))); + .push(Literal::Negative(InEnvironment::new( + &environment, + subgoal.clone(), + ))); } - HhGoal::Unify(variance, a, b) => { - let result = infer.unify_parameters(&environment, variance, &a, &b)?; - infer.into_ex_clause(result, &mut ex_clause) + GoalData::EqGoal(goal) => { + let interner = context.program().interner(); + let db = context.unification_database(); + let a = &goal.a; + let b = &goal.b; + + let result = + match infer.relate(interner, db, &environment, Variance::Invariant, a, b) { + Ok(r) => r, + Err(_) => return FallibleOrFloundered::NoSolution, + }; + ex_clause.subgoals.extend( + result + .goals + .into_iter() + .casted(interner) + .map(Literal::Positive), + ); } - HhGoal::DomainGoal(domain_goal) => { + GoalData::SubtypeGoal(goal) => { + let interner = context.program().interner(); + let db = context.unification_database(); + let a_norm = infer.normalize_ty_shallow(interner, &goal.a); + let a = a_norm.as_ref().unwrap_or(&goal.a); + let b_norm = infer.normalize_ty_shallow(interner, &goal.b); + let b = b_norm.as_ref().unwrap_or(&goal.b); + + if matches!( + a.kind(interner), + TyKind::InferenceVar(_, TyVariableKind::General) + ) && matches!( + b.kind(interner), + TyKind::InferenceVar(_, TyVariableKind::General) + ) { + return FallibleOrFloundered::Floundered; + } + + let result = + match infer.relate(interner, db, &environment, Variance::Covariant, a, b) { + Ok(r) => r, + Err(_) => return FallibleOrFloundered::Floundered, + }; + ex_clause.subgoals.extend( + result + .goals + .into_iter() + .casted(interner) + .map(Literal::Positive), + ); + } + GoalData::DomainGoal(domain_goal) => { ex_clause .subgoals - .push(Literal::Positive(I::goal_in_environment( + .push(Literal::Positive(InEnvironment::new( &environment, - infer.into_goal(domain_goal), + domain_goal.clone().cast(context.program().interner()), ))); } - HhGoal::CannotProve => { - // You can think of `CannotProve` as a special - // goal that is only provable if `not { - // CannotProve }`. Trying to prove this, of - // course, will always create a negative cycle and - // hence a delayed literal that cannot be - // resolved. - let goal = infer.cannot_prove(); - ex_clause - .subgoals - .push(Literal::Negative(I::goal_in_environment(&environment, goal))); + GoalData::CannotProve => { + debug!("Marking Strand as ambiguous because of a `CannotProve` subgoal"); + ex_clause.ambiguous = true; } } } - Ok(ex_clause) + FallibleOrFloundered::Ok(ex_clause) } } diff --git a/chalk-engine/src/slg.rs b/chalk-engine/src/slg.rs new file mode 100644 index 00000000000..cffb9994ed5 --- /dev/null +++ b/chalk-engine/src/slg.rs @@ -0,0 +1,378 @@ +use crate::ExClause; + +use chalk_derive::HasInterner; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use chalk_solve::infer::InferenceTable; +use chalk_solve::RustIrDatabase; + +use std::fmt::Debug; +use std::marker::PhantomData; + +pub(crate) mod aggregate; +mod resolvent; + +#[derive(Clone, Debug, HasInterner)] +pub(crate) struct SlgContext { + phantom: PhantomData, +} + +impl SlgContext { + pub(crate) fn next_subgoal_index(ex_clause: &ExClause) -> usize { + // For now, we always pick the last subgoal in the + // list. + // + // FIXME(rust-lang-nursery/chalk#80) -- we should be more + // selective. For example, we don't want to pick a + // negative literal that will flounder, and we don't want + // to pick things like `?T: Sized` if we can help it. + ex_clause.subgoals.len() - 1 + } +} +#[derive(Clone, Debug)] +pub(crate) struct SlgContextOps<'me, I: Interner> { + program: &'me dyn RustIrDatabase, + max_size: usize, + expected_answers: Option, +} + +impl SlgContextOps<'_, I> { + pub(crate) fn new( + program: &dyn RustIrDatabase, + max_size: usize, + expected_answers: Option, + ) -> SlgContextOps<'_, I> { + SlgContextOps { + program, + max_size, + expected_answers, + } + } + + fn identity_constrained_subst( + &self, + goal: &UCanonical>>, + ) -> Canonical> { + let (mut infer, subst, _) = InferenceTable::from_canonical( + self.program.interner(), + goal.universes, + goal.canonical.clone(), + ); + infer + .canonicalize( + self.program.interner(), + ConstrainedSubst { + subst, + constraints: Constraints::empty(self.program.interner()), + }, + ) + .quantified + } + + pub(crate) fn program(&self) -> &dyn RustIrDatabase { + self.program + } + + pub(crate) fn max_size(&self) -> usize { + self.max_size + } + + pub(crate) fn unification_database(&self) -> &dyn UnificationDatabase { + self.program.unification_database() + } +} + +pub trait ResolventOps { + /// Combines the `goal` (instantiated within `infer`) with the + /// given program clause to yield the start of a new strand (a + /// canonical ex-clause). + /// + /// The bindings in `infer` are unaffected by this operation. + fn resolvent_clause( + &mut self, + ops: &dyn UnificationDatabase, + interner: I, + environment: &Environment, + goal: &DomainGoal, + subst: &Substitution, + clause: &ProgramClause, + ) -> Fallible>; + + fn apply_answer_subst( + &mut self, + interner: I, + unification_database: &dyn UnificationDatabase, + ex_clause: &mut ExClause, + selected_goal: &InEnvironment>, + answer_table_goal: &Canonical>>, + canonical_answer_subst: Canonical>, + ) -> Fallible<()>; +} + +trait SubstitutionExt { + fn may_invalidate(&self, interner: I, subst: &Canonical>) -> bool; +} + +impl SubstitutionExt for Substitution { + fn may_invalidate(&self, interner: I, subst: &Canonical>) -> bool { + self.iter(interner) + .zip(subst.value.iter(interner)) + .any(|(new, current)| MayInvalidate { interner }.aggregate_generic_args(new, current)) + } +} + +// This is a struct in case we need to add state at any point like in AntiUnifier +struct MayInvalidate { + interner: I, +} + +impl MayInvalidate { + fn aggregate_generic_args(&mut self, new: &GenericArg, current: &GenericArg) -> bool { + let interner = self.interner; + match (new.data(interner), current.data(interner)) { + (GenericArgData::Ty(ty1), GenericArgData::Ty(ty2)) => self.aggregate_tys(ty1, ty2), + (GenericArgData::Lifetime(l1), GenericArgData::Lifetime(l2)) => { + self.aggregate_lifetimes(l1, l2) + } + (GenericArgData::Const(c1), GenericArgData::Const(c2)) => self.aggregate_consts(c1, c2), + (GenericArgData::Ty(_), _) + | (GenericArgData::Lifetime(_), _) + | (GenericArgData::Const(_), _) => panic!( + "mismatched parameter kinds: new={:?} current={:?}", + new, current + ), + } + } + + /// Returns true if the two types could be unequal. + fn aggregate_tys(&mut self, new: &Ty, current: &Ty) -> bool { + let interner = self.interner; + match (new.kind(interner), current.kind(interner)) { + (_, TyKind::BoundVar(_)) => { + // If the aggregate solution already has an inference + // variable here, then no matter what type we produce, + // the aggregate cannot get 'more generalized' than it + // already is. So return false, we cannot invalidate. + // + // (Note that "inference variables" show up as *bound + // variables* here, because we are looking at the + // canonical form.) + false + } + + (TyKind::BoundVar(_), _) => { + // If we see a type variable in the potential future + // solution, we have to be conservative. We don't know + // what type variable will wind up being! Remember + // that the future solution could be any instantiation + // of `ty0` -- or it could leave this variable + // unbound, if the result is true for all types. + // + // (Note that "inference variables" show up as *bound + // variables* here, because we are looking at the + // canonical form.) + true + } + + (TyKind::InferenceVar(_, _), _) | (_, TyKind::InferenceVar(_, _)) => { + panic!( + "unexpected free inference variable in may-invalidate: {:?} vs {:?}", + new, current, + ); + } + + (TyKind::Placeholder(p1), TyKind::Placeholder(p2)) => { + self.aggregate_placeholders(p1, p2) + } + + ( + TyKind::Alias(AliasTy::Projection(proj1)), + TyKind::Alias(AliasTy::Projection(proj2)), + ) => self.aggregate_projection_tys(proj1, proj2), + + ( + TyKind::Alias(AliasTy::Opaque(opaque_ty1)), + TyKind::Alias(AliasTy::Opaque(opaque_ty2)), + ) => self.aggregate_opaque_ty_tys(opaque_ty1, opaque_ty2), + + (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => { + self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + } + ( + TyKind::AssociatedType(id_a, substitution_a), + TyKind::AssociatedType(id_b, substitution_b), + ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b), + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => scalar_a != scalar_b, + (TyKind::Str, TyKind::Str) => false, + (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => { + self.aggregate_name_and_substs(arity_a, substitution_a, arity_b, substitution_b) + } + ( + TyKind::OpaqueType(id_a, substitution_a), + TyKind::OpaqueType(id_b, substitution_b), + ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b), + (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => self.aggregate_tys(ty_a, ty_b), + (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => { + self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + } + (TyKind::Ref(id_a, lifetime_a, ty_a), TyKind::Ref(id_b, lifetime_b, ty_b)) => { + id_a != id_b + || self.aggregate_lifetimes(lifetime_a, lifetime_b) + || self.aggregate_tys(ty_a, ty_b) + } + (TyKind::Raw(id_a, ty_a), TyKind::Raw(id_b, ty_b)) => { + id_a != id_b || self.aggregate_tys(ty_a, ty_b) + } + (TyKind::Never, TyKind::Never) => false, + (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => { + self.aggregate_tys(ty_a, ty_b) || self.aggregate_consts(const_a, const_b) + } + (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => { + self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + } + (TyKind::Coroutine(id_a, substitution_a), TyKind::Coroutine(id_b, substitution_b)) => { + self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + } + ( + TyKind::CoroutineWitness(id_a, substitution_a), + TyKind::CoroutineWitness(id_b, substitution_b), + ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b), + (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => id_a != id_b, + (TyKind::Error, TyKind::Error) => false, + + (_, _) => true, + } + } + + /// Returns true if the two consts could be unequal. + fn aggregate_lifetimes(&mut self, _: &Lifetime, _: &Lifetime) -> bool { + true + } + + /// Returns true if the two consts could be unequal. + fn aggregate_consts(&mut self, new: &Const, current: &Const) -> bool { + let interner = self.interner; + let ConstData { + ty: new_ty, + value: new_value, + } = new.data(interner); + let ConstData { + ty: current_ty, + value: current_value, + } = current.data(interner); + + if self.aggregate_tys(new_ty, current_ty) { + return true; + } + + match (new_value, current_value) { + (_, ConstValue::BoundVar(_)) => { + // see comment in aggregate_tys + false + } + + (ConstValue::BoundVar(_), _) => { + // see comment in aggregate_tys + true + } + + (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => { + panic!( + "unexpected free inference variable in may-invalidate: {:?} vs {:?}", + new, current, + ); + } + + (ConstValue::Placeholder(p1), ConstValue::Placeholder(p2)) => { + self.aggregate_placeholders(p1, p2) + } + + (ConstValue::Concrete(c1), ConstValue::Concrete(c2)) => { + !c1.const_eq(new_ty, c2, interner) + } + + // Only variants left are placeholder = concrete, which always fails + (ConstValue::Placeholder(_), _) | (ConstValue::Concrete(_), _) => true, + } + } + + fn aggregate_placeholders( + &mut self, + new: &PlaceholderIndex, + current: &PlaceholderIndex, + ) -> bool { + new != current + } + + fn aggregate_projection_tys( + &mut self, + new: &ProjectionTy, + current: &ProjectionTy, + ) -> bool { + let ProjectionTy { + associated_ty_id: new_name, + substitution: new_substitution, + } = new; + let ProjectionTy { + associated_ty_id: current_name, + substitution: current_substitution, + } = current; + + self.aggregate_name_and_substs( + new_name, + new_substitution, + current_name, + current_substitution, + ) + } + + fn aggregate_opaque_ty_tys(&mut self, new: &OpaqueTy, current: &OpaqueTy) -> bool { + let OpaqueTy { + opaque_ty_id: new_name, + substitution: new_substitution, + } = new; + let OpaqueTy { + opaque_ty_id: current_name, + substitution: current_substitution, + } = current; + + self.aggregate_name_and_substs( + new_name, + new_substitution, + current_name, + current_substitution, + ) + } + + fn aggregate_name_and_substs( + &mut self, + new_name: N, + new_substitution: &Substitution, + current_name: N, + current_substitution: &Substitution, + ) -> bool + where + N: Copy + Eq + Debug, + { + let interner = self.interner; + if new_name != current_name { + return true; + } + + let name = new_name; + + assert_eq!( + new_substitution.len(interner), + current_substitution.len(interner), + "does {:?} take {} substitution or {}? can't both be right", + name, + new_substitution.len(interner), + current_substitution.len(interner) + ); + + new_substitution + .iter(interner) + .zip(current_substitution.iter(interner)) + .any(|(new, current)| self.aggregate_generic_args(new, current)) + } +} diff --git a/chalk-engine/src/slg/aggregate.rs b/chalk-engine/src/slg/aggregate.rs new file mode 100644 index 00000000000..f8a1f84de81 --- /dev/null +++ b/chalk-engine/src/slg/aggregate.rs @@ -0,0 +1,642 @@ +use crate::context::{self, AnswerResult}; +use crate::slg::SlgContextOps; +use crate::slg::SubstitutionExt; +use crate::CompleteAnswer; +use chalk_ir::cast::Cast; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use chalk_solve::ext::*; +use chalk_solve::infer::InferenceTable; +use chalk_solve::solve::{Guidance, Solution}; + +use std::fmt::Debug; + +/// Methods for combining solutions to yield an aggregate solution. +pub trait AggregateOps { + fn make_solution( + &self, + root_goal: &UCanonical>>, + answers: impl context::AnswerStream, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Option>; +} + +/// Draws as many answers as it needs from `answers` (but +/// no more!) in order to come up with a solution. +impl AggregateOps for SlgContextOps<'_, I> { + fn make_solution( + &self, + root_goal: &UCanonical>>, + mut answers: impl context::AnswerStream, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Option> { + let interner = self.program.interner(); + let CompleteAnswer { subst, ambiguous } = match answers.next_answer(&should_continue) { + AnswerResult::NoMoreSolutions => { + // No answers at all + return None; + } + AnswerResult::Answer(answer) => answer, + AnswerResult::Floundered => CompleteAnswer { + subst: self.identity_constrained_subst(root_goal), + ambiguous: true, + }, + AnswerResult::QuantumExceeded => { + return Some(Solution::Ambig(Guidance::Unknown)); + } + }; + + // Exactly 1 unconditional answer? + let next_answer = answers.peek_answer(&should_continue); + if next_answer.is_quantum_exceeded() { + if subst.value.subst.is_identity_subst(interner) { + return Some(Solution::Ambig(Guidance::Unknown)); + } else { + return Some(Solution::Ambig(Guidance::Suggested( + subst.map(interner, |cs| cs.subst), + ))); + } + } + if next_answer.is_no_more_solutions() && !ambiguous { + return Some(Solution::Unique(subst)); + } + + // Otherwise, we either have >1 answer, or else we have + // ambiguity. Either way, we are only going to be giving back + // **guidance**, and with guidance, the caller doesn't get + // back any region constraints. So drop them from our `subst` + // variable. + // + // FIXME-- there is actually a 3rd possibility. We could have + // >1 answer where all the answers have the same substitution, + // but different region constraints. We should collapse those + // cases into an `OR` region constraint at some point, but I + // leave that for future work. This is basically + // rust-lang/rust#21974. + let mut subst = subst.map(interner, |cs| cs.subst); + + // Extract answers and merge them into `subst`. Stop once we have + // a trivial subst (or run out of answers). + let mut num_answers = 1; + let guidance = loop { + if subst.value.is_empty(interner) || is_trivial(interner, &subst) { + break Guidance::Unknown; + } + + if !answers + .any_future_answer(|ref mut new_subst| new_subst.may_invalidate(interner, &subst)) + { + break Guidance::Definite(subst); + } + + if let Some(expected_answers) = self.expected_answers { + if num_answers >= expected_answers { + panic!("Too many answers for solution."); + } + } + + let new_subst = match answers.next_answer(&should_continue) { + AnswerResult::Answer(answer1) => answer1.subst, + AnswerResult::Floundered => { + // FIXME: this doesn't trigger for any current tests + self.identity_constrained_subst(root_goal) + } + AnswerResult::NoMoreSolutions => { + break Guidance::Definite(subst); + } + AnswerResult::QuantumExceeded => { + break Guidance::Suggested(subst); + } + }; + subst = merge_into_guidance(interner, &root_goal.canonical, subst, &new_subst); + num_answers += 1; + }; + + if let Some(expected_answers) = self.expected_answers { + assert_eq!( + expected_answers, num_answers, + "Not enough answers for solution." + ); + } + Some(Solution::Ambig(guidance)) + } +} + +/// Given a current substitution used as guidance for `root_goal`, and +/// a new possible answer to `root_goal`, returns a new set of +/// guidance that encompasses both of them. This is often more general +/// than the old guidance. For example, if we had a guidance of `?0 = +/// u32` and the new answer is `?0 = i32`, then the guidance would +/// become `?0 = ?X` (where `?X` is some fresh variable). +fn merge_into_guidance( + interner: I, + root_goal: &Canonical>>, + guidance: Canonical>, + answer: &Canonical>, +) -> Canonical> { + let mut infer = InferenceTable::new(); + let Canonical { + value: ConstrainedSubst { + subst: subst1, + constraints: _, + }, + binders: _, + } = answer; + + // Collect the types that the two substitutions have in + // common. + let aggr_generic_args: Vec<_> = guidance + .value + .iter(interner) + .zip(subst1.iter(interner)) + .enumerate() + .map(|(index, (p1, p2))| { + // We have two values for some variable X that + // appears in the root goal. Find out the universe + // of X. + let universe = *root_goal.binders.as_slice(interner)[index].skip_kind(); + + match p1.data(interner) { + GenericArgData::Ty(_) => (), + GenericArgData::Lifetime(_) => { + // Ignore the lifetimes from the substitution: we're just + // creating guidance here anyway. + return infer + .new_variable(universe) + .to_lifetime(interner) + .cast(interner); + } + GenericArgData::Const(_) => (), + }; + + // Combine the two types into a new type. + let mut aggr = AntiUnifier { + infer: &mut infer, + universe, + interner, + }; + aggr.aggregate_generic_args(p1, p2) + }) + .collect(); + + let aggr_subst = Substitution::from_iter(interner, aggr_generic_args); + + infer.canonicalize(interner, aggr_subst).quantified +} + +fn is_trivial(interner: I, subst: &Canonical>) -> bool { + // A subst is trivial if.. + subst + .value + .iter(interner) + .enumerate() + .all(|(index, parameter)| { + let is_trivial = |b: Option| match b { + None => false, + Some(bound_var) => { + if let Some(index1) = bound_var.index_if_innermost() { + index == index1 + } else { + false + } + } + }; + + match parameter.data(interner) { + // All types and consts are mapped to distinct variables. Since this + // has been canonicalized, those will also be the first N + // variables. + GenericArgData::Ty(t) => is_trivial(t.bound_var(interner)), + GenericArgData::Const(t) => is_trivial(t.bound_var(interner)), + + // And no lifetime mappings. (This is too strict, but we never + // product substs with lifetimes.) + GenericArgData::Lifetime(_) => false, + } + }) +} + +/// [Anti-unification] is the act of taking two things that do not +/// unify and finding a minimal generalization of them. So for +/// example `Vec` anti-unified with `Vec` might be +/// `Vec`. This is a **very simplistic** anti-unifier. +/// +/// NOTE: The values here are canonicalized, but output is not, this means +/// that any escaping bound variables that we see have to be replaced with +/// inference variables. +/// +/// [Anti-unification]: https://en.wikipedia.org/wiki/Anti-unification_(computer_science) +struct AntiUnifier<'infer, I: Interner> { + infer: &'infer mut InferenceTable, + universe: UniverseIndex, + interner: I, +} + +impl AntiUnifier<'_, I> { + fn aggregate_tys(&mut self, ty0: &Ty, ty1: &Ty) -> Ty { + let interner = self.interner; + match (ty0.kind(interner), ty1.kind(interner)) { + // If we see bound things on either side, just drop in a + // fresh variable. This means we will sometimes + // overgeneralize. So for example if we have two + // solutions that are both `(X, X)`, we just produce `(Y, + // Z)` in all cases. + (TyKind::InferenceVar(_, _), TyKind::InferenceVar(_, _)) => self.new_ty_variable(), + + // Ugh. Aggregating two types like `for<'a> fn(&'a u32, + // &'a u32)` and `for<'a, 'b> fn(&'a u32, &'b u32)` seems + // kinda hard. Don't try to be smart for now, just plop a + // variable in there and be done with it. + // This also ensures that any bound variables we do see + // were bound by `Canonical`. + (TyKind::BoundVar(_), TyKind::BoundVar(_)) + | (TyKind::Function(_), TyKind::Function(_)) + | (TyKind::Dyn(_), TyKind::Dyn(_)) => self.new_ty_variable(), + + ( + TyKind::Alias(AliasTy::Projection(proj1)), + TyKind::Alias(AliasTy::Projection(proj2)), + ) => self.aggregate_projection_tys(proj1, proj2), + + ( + TyKind::Alias(AliasTy::Opaque(opaque_ty1)), + TyKind::Alias(AliasTy::Opaque(opaque_ty2)), + ) => self.aggregate_opaque_ty_tys(opaque_ty1, opaque_ty2), + + (TyKind::Placeholder(placeholder1), TyKind::Placeholder(placeholder2)) => { + self.aggregate_placeholder_tys(placeholder1, placeholder2) + } + + (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| TyKind::Adt(name, substitution).intern(interner)) + .unwrap_or_else(|| self.new_ty_variable()), + ( + TyKind::AssociatedType(id_a, substitution_a), + TyKind::AssociatedType(id_b, substitution_b), + ) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| { + TyKind::AssociatedType(name, substitution).intern(interner) + }) + .unwrap_or_else(|| self.new_ty_variable()), + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => { + if scalar_a == scalar_b { + TyKind::Scalar(*scalar_a).intern(interner) + } else { + self.new_ty_variable() + } + } + (TyKind::Str, TyKind::Str) => TyKind::Str.intern(interner), + (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => { + self.aggregate_name_and_substs(arity_a, substitution_a, arity_b, substitution_b) + .map(|(&name, substitution)| TyKind::Tuple(name, substitution).intern(interner)) + .unwrap_or_else(|| self.new_ty_variable()) + } + ( + TyKind::OpaqueType(id_a, substitution_a), + TyKind::OpaqueType(id_b, substitution_b), + ) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| { + TyKind::OpaqueType(name, substitution).intern(interner) + }) + .unwrap_or_else(|| self.new_ty_variable()), + (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => { + TyKind::Slice(self.aggregate_tys(ty_a, ty_b)).intern(interner) + } + (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| TyKind::FnDef(name, substitution).intern(interner)) + .unwrap_or_else(|| self.new_ty_variable()), + (TyKind::Ref(id_a, lifetime_a, ty_a), TyKind::Ref(id_b, lifetime_b, ty_b)) => { + if id_a == id_b { + TyKind::Ref( + *id_a, + self.aggregate_lifetimes(lifetime_a, lifetime_b), + self.aggregate_tys(ty_a, ty_b), + ) + .intern(interner) + } else { + self.new_ty_variable() + } + } + (TyKind::Raw(id_a, ty_a), TyKind::Raw(id_b, ty_b)) => { + if id_a == id_b { + TyKind::Raw(*id_a, self.aggregate_tys(ty_a, ty_b)).intern(interner) + } else { + self.new_ty_variable() + } + } + (TyKind::Never, TyKind::Never) => TyKind::Never.intern(interner), + (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => TyKind::Array( + self.aggregate_tys(ty_a, ty_b), + self.aggregate_consts(const_a, const_b), + ) + .intern(interner), + (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| TyKind::Closure(name, substitution).intern(interner)) + .unwrap_or_else(|| self.new_ty_variable()), + (TyKind::Coroutine(id_a, substitution_a), TyKind::Coroutine(id_b, substitution_b)) => { + self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| { + TyKind::Coroutine(name, substitution).intern(interner) + }) + .unwrap_or_else(|| self.new_ty_variable()) + } + ( + TyKind::CoroutineWitness(id_a, substitution_a), + TyKind::CoroutineWitness(id_b, substitution_b), + ) => self + .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b) + .map(|(&name, substitution)| { + TyKind::CoroutineWitness(name, substitution).intern(interner) + }) + .unwrap_or_else(|| self.new_ty_variable()), + (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => { + if id_a == id_b { + TyKind::Foreign(*id_a).intern(interner) + } else { + self.new_ty_variable() + } + } + (TyKind::Error, TyKind::Error) => TyKind::Error.intern(interner), + + (_, _) => self.new_ty_variable(), + } + } + + fn aggregate_placeholder_tys( + &mut self, + index1: &PlaceholderIndex, + index2: &PlaceholderIndex, + ) -> Ty { + let interner = self.interner; + if index1 != index2 { + self.new_ty_variable() + } else { + TyKind::Placeholder(*index1).intern(interner) + } + } + + fn aggregate_projection_tys( + &mut self, + proj1: &ProjectionTy, + proj2: &ProjectionTy, + ) -> Ty { + let interner = self.interner; + let ProjectionTy { + associated_ty_id: name1, + substitution: substitution1, + } = proj1; + let ProjectionTy { + associated_ty_id: name2, + substitution: substitution2, + } = proj2; + + self.aggregate_name_and_substs(name1, substitution1, name2, substitution2) + .map(|(&associated_ty_id, substitution)| { + TyKind::Alias(AliasTy::Projection(ProjectionTy { + associated_ty_id, + substitution, + })) + .intern(interner) + }) + .unwrap_or_else(|| self.new_ty_variable()) + } + + fn aggregate_opaque_ty_tys( + &mut self, + opaque_ty1: &OpaqueTy, + opaque_ty2: &OpaqueTy, + ) -> Ty { + let OpaqueTy { + opaque_ty_id: name1, + substitution: substitution1, + } = opaque_ty1; + let OpaqueTy { + opaque_ty_id: name2, + substitution: substitution2, + } = opaque_ty2; + + self.aggregate_name_and_substs(name1, substitution1, name2, substitution2) + .map(|(&opaque_ty_id, substitution)| { + TyKind::Alias(AliasTy::Opaque(OpaqueTy { + opaque_ty_id, + substitution, + })) + .intern(self.interner) + }) + .unwrap_or_else(|| self.new_ty_variable()) + } + + fn aggregate_name_and_substs( + &mut self, + name1: N, + substitution1: &Substitution, + name2: N, + substitution2: &Substitution, + ) -> Option<(N, Substitution)> + where + N: Copy + Eq + Debug, + { + let interner = self.interner; + if name1 != name2 { + return None; + } + + let name = name1; + + assert_eq!( + substitution1.len(interner), + substitution2.len(interner), + "does {:?} take {} substitution or {}? can't both be right", + name, + substitution1.len(interner), + substitution2.len(interner) + ); + + let substitution = Substitution::from_iter( + interner, + substitution1 + .iter(interner) + .zip(substitution2.iter(interner)) + .map(|(p1, p2)| self.aggregate_generic_args(p1, p2)), + ); + + Some((name, substitution)) + } + + fn aggregate_generic_args(&mut self, p1: &GenericArg, p2: &GenericArg) -> GenericArg { + let interner = self.interner; + match (p1.data(interner), p2.data(interner)) { + (GenericArgData::Ty(ty1), GenericArgData::Ty(ty2)) => { + self.aggregate_tys(ty1, ty2).cast(interner) + } + (GenericArgData::Lifetime(l1), GenericArgData::Lifetime(l2)) => { + self.aggregate_lifetimes(l1, l2).cast(interner) + } + (GenericArgData::Const(c1), GenericArgData::Const(c2)) => { + self.aggregate_consts(c1, c2).cast(interner) + } + (GenericArgData::Ty(_), _) + | (GenericArgData::Lifetime(_), _) + | (GenericArgData::Const(_), _) => { + panic!("mismatched parameter kinds: p1={:?} p2={:?}", p1, p2) + } + } + } + + fn aggregate_lifetimes(&mut self, l1: &Lifetime, l2: &Lifetime) -> Lifetime { + let interner = self.interner; + match (l1.data(interner), l2.data(interner)) { + (LifetimeData::Phantom(void, ..), _) | (_, LifetimeData::Phantom(void, ..)) => { + match *void {} + } + (LifetimeData::BoundVar(..), _) | (_, LifetimeData::BoundVar(..)) => { + self.new_lifetime_variable() + } + _ => { + if l1 == l2 { + l1.clone() + } else { + self.new_lifetime_variable() + } + } + } + } + + fn aggregate_consts(&mut self, c1: &Const, c2: &Const) -> Const { + let interner = self.interner; + + // It would be nice to check that c1 and c2 have the same type, even though + // on this stage of solving they should already have the same type. + + let ConstData { + ty: c1_ty, + value: c1_value, + } = c1.data(interner); + let ConstData { + ty: _c2_ty, + value: c2_value, + } = c2.data(interner); + + let ty = c1_ty.clone(); + + match (c1_value, c2_value) { + (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => { + self.new_const_variable(ty) + } + + (ConstValue::BoundVar(_), _) | (_, ConstValue::BoundVar(_)) => { + self.new_const_variable(ty) + } + + (ConstValue::Placeholder(_), ConstValue::Placeholder(_)) => { + if c1 == c2 { + c1.clone() + } else { + self.new_const_variable(ty) + } + } + (ConstValue::Concrete(e1), ConstValue::Concrete(e2)) => { + if e1.const_eq(&ty, e2, interner) { + c1.clone() + } else { + self.new_const_variable(ty) + } + } + + (ConstValue::Placeholder(_), _) | (_, ConstValue::Placeholder(_)) => { + self.new_const_variable(ty) + } + } + } + + fn new_ty_variable(&mut self) -> Ty { + let interner = self.interner; + self.infer.new_variable(self.universe).to_ty(interner) + } + + fn new_lifetime_variable(&mut self) -> Lifetime { + let interner = self.interner; + self.infer.new_variable(self.universe).to_lifetime(interner) + } + + fn new_const_variable(&mut self, ty: Ty) -> Const { + let interner = self.interner; + self.infer + .new_variable(self.universe) + .to_const(interner, ty) + } +} + +#[cfg(test)] +mod test { + use crate::slg::aggregate::AntiUnifier; + use chalk_integration::{arg, ty}; + use chalk_ir::UniverseIndex; + use chalk_solve::infer::InferenceTable; + + /// Test the equivalent of `Vec` vs `Vec` + #[test] + fn vec_i32_vs_vec_u32() { + use chalk_integration::interner::ChalkIr; + let mut infer: InferenceTable = InferenceTable::new(); + let mut anti_unifier = AntiUnifier { + infer: &mut infer, + universe: UniverseIndex::root(), + interner: ChalkIr, + }; + + let ty = anti_unifier.aggregate_tys( + &ty!(apply (item 0) (apply (item 1))), + &ty!(apply (item 0) (apply (item 2))), + ); + assert_eq!(ty!(apply (item 0) (infer 0)), ty); + } + + /// Test the equivalent of `Vec` vs `Vec` + #[test] + fn vec_i32_vs_vec_i32() { + use chalk_integration::interner::ChalkIr; + let interner = ChalkIr; + let mut infer: InferenceTable = InferenceTable::new(); + let mut anti_unifier = AntiUnifier { + interner, + infer: &mut infer, + universe: UniverseIndex::root(), + }; + + let ty = anti_unifier.aggregate_tys( + &ty!(apply (item 0) (apply (item 1))), + &ty!(apply (item 0) (apply (item 1))), + ); + assert_eq!(ty!(apply (item 0) (apply (item 1))), ty); + } + + /// Test the equivalent of `Vec` vs `Vec` + #[test] + fn vec_x_vs_vec_y() { + use chalk_integration::interner::ChalkIr; + let interner = ChalkIr; + let mut infer: InferenceTable = InferenceTable::new(); + let mut anti_unifier = AntiUnifier { + interner, + infer: &mut infer, + universe: UniverseIndex::root(), + }; + + // Note that the `var 0` and `var 1` in these types would be + // referring to canonicalized free variables, not variables in + // `infer`. + let ty = anti_unifier.aggregate_tys( + &ty!(apply (item 0) (infer 0)), + &ty!(apply (item 0) (infer 1)), + ); + + // But this `var 0` is from `infer. + assert_eq!(ty!(apply (item 0) (infer 0)), ty); + } +} diff --git a/chalk-engine/src/slg/resolvent.rs b/chalk-engine/src/slg/resolvent.rs new file mode 100644 index 00000000000..8606026006a --- /dev/null +++ b/chalk-engine/src/slg/resolvent.rs @@ -0,0 +1,730 @@ +use crate::normalize_deep::DeepNormalizer; +use crate::slg::ResolventOps; +use crate::{ExClause, Literal, TimeStamp}; +use chalk_ir::cast::Caster; +use chalk_ir::fold::shift::Shift; +use chalk_ir::fold::TypeFoldable; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::zip::{Zip, Zipper}; +use chalk_ir::*; +use chalk_solve::infer::InferenceTable; +use tracing::{debug, instrument}; + +/////////////////////////////////////////////////////////////////////////// +// SLG RESOLVENTS +// +// The "SLG Resolvent" is used to combine a *goal* G with some +// clause or answer *C*. It unifies the goal's selected literal +// with the clause and then inserts the clause's conditions into +// the goal's list of things to prove, basically. Although this is +// one operation in EWFS, we have specialized variants for merging +// a program clause and an answer (though they share some code in +// common). +// +// Terminology note: The NFTD and RR papers use the term +// "resolvent" to mean both the factor and the resolvent, but EWFS +// distinguishes the two. We follow EWFS here since -- in the code +// -- we tend to know whether there are delayed literals or not, +// and hence to know which code path we actually want. +// +// From EWFS: +// +// Let G be an X-clause A :- D | L1,...Ln, where N > 0, and Li be selected atom. +// +// Let C be an X-clause with no delayed literals. Let +// +// C' = A' :- L'1...L'm +// +// be a variant of C such that G and C' have no variables in +// common. +// +// Let Li and A' be unified with MGU S. +// +// Then: +// +// S(A :- D | L1...Li-1, L1'...L'm, Li+1...Ln) +// +// is the SLG resolvent of G with C. + +impl ResolventOps for InferenceTable { + /// Applies the SLG resolvent algorithm to incorporate a program + /// clause into the main X-clause, producing a new X-clause that + /// must be solved. + /// + /// # Parameters + /// + /// - `goal` is the goal G that we are trying to solve + /// - `clause` is the program clause that may be useful to that end + #[instrument(level = "debug", skip(self, interner, environment, subst))] + fn resolvent_clause( + &mut self, + db: &dyn UnificationDatabase, + interner: I, + environment: &Environment, + goal: &DomainGoal, + subst: &Substitution, + clause: &ProgramClause, + ) -> Fallible> { + // Relating the above description to our situation: + // + // - `goal` G, except with binders for any existential variables. + // - Also, we always select the first literal in `ex_clause.literals`, so `i` is 0. + // - `clause` is C, except with binders for any existential variables. + + // C' in the description above is `consequence :- conditions`. + // + // Note that G and C' have no variables in common. + let ProgramClauseImplication { + consequence, + conditions, + constraints, + priority: _, + } = { + let ProgramClauseData(implication) = clause.data(interner); + + self.instantiate_binders_existentially(interner, implication.clone()) + }; + debug!(?consequence, ?conditions, ?constraints); + + // Unify the selected literal Li with C'. + let unification_result = self.relate( + interner, + db, + environment, + Variance::Invariant, + goal, + &consequence, + )?; + + // Final X-clause that we will return. + let mut ex_clause = ExClause { + subst: subst.clone(), + ambiguous: false, + constraints: vec![], + subgoals: vec![], + delayed_subgoals: vec![], + answer_time: TimeStamp::default(), + floundered_subgoals: vec![], + }; + + // Add the subgoals/region-constraints that unification gave us. + ex_clause.subgoals.extend( + unification_result + .goals + .into_iter() + .casted(interner) + .map(Literal::Positive), + ); + + ex_clause + .constraints + .extend(constraints.as_slice(interner).to_owned()); + + // Add the `conditions` from the program clause into the result too. + ex_clause + .subgoals + .extend(conditions.iter(interner).map(|c| match c.data(interner) { + GoalData::Not(c1) => { + Literal::Negative(InEnvironment::new(environment, Goal::clone(c1))) + } + _ => Literal::Positive(InEnvironment::new(environment, Goal::clone(c))), + })); + + Ok(ex_clause) + } + + /////////////////////////////////////////////////////////////////////////// + // apply_answer_subst + // + // Apply answer subst has the job of "plugging in" the answer to a + // query into the pending ex-clause. To see how it works, it's worth stepping + // up one level. Imagine that first we are trying to prove a goal A: + // + // A :- T: Foo>, ?U: Bar + // + // this spawns a subgoal `T: Foo>`, and it's this subgoal that + // has now produced an answer `?0 = u32`. When the goal A spawned the + // subgoal, it will also have registered a `PendingExClause` with its + // current state. At the point where *this* method has been invoked, + // that pending ex-clause has been instantiated with fresh variables and setup, + // so we have four bits of incoming information: + // + // - `ex_clause`, which is the remaining stuff to prove for the goal A. + // Here, the inference variable `?U` has been instantiated with a fresh variable + // `?X`. + // - `A :- ?X: Bar` + // - `selected_goal`, which is the thing we were trying to prove when we + // spawned the subgoal. It shares inference variables with `ex_clause`. + // - `T: Foo>` + // - `answer_table_goal`, which is the subgoal in canonical form: + // - `for T: Foo>` + // - `canonical_answer_subst`, which is an answer to `answer_table_goal`. + // - `[?0 = u32]` + // + // In this case, this function will (a) unify `u32` and `?X` and then + // (b) return `ex_clause` (extended possibly with new region constraints + // and subgoals). + // + // One way to do this would be to (a) substitute + // `canonical_answer_subst` into `answer_table_goal` (yielding `T: + // Foo>`) and then (b) instantiate the result with fresh + // variables (no effect in this instance) and then (c) unify that with + // `selected_goal` (yielding, indirectly, that `?X = u32`). But that + // is not what we do: it's inefficient, to start, but it also causes + // problems because unification of projections can make new + // sub-goals. That is, even if the answers don't involve any + // projections, the table goals might, and this can create an infinite + // loop (see also #74). + // + // What we do instead is to (a) instantiate the substitution, which + // may have free variables in it (in this case, it would not, and the + // instantiation would have no effect) and then (b) zip + // `answer_table_goal` and `selected_goal` without having done any + // substitution. After all, these ought to be basically the same, + // since `answer_table_goal` was created by canonicalizing (and + // possibly truncating, but we'll get to that later) + // `selected_goal`. Then, whenever we reach a "free variable" in + // `answer_table_goal`, say `?0`, we go to the instantiated answer + // substitution and lookup the result (in this case, `u32`). We take + // that result and unify it with whatever we find in `selected_goal` + // (in this case, `?X`). + // + // Let's cover then some corner cases. First off, what is this + // business of instantiating the answer? Well, the answer may not be a + // simple type like `u32`, it could be a "family" of types, like + // `for Vec` -- i.e., `Vec: Bar` for *any* `X`. In that + // case, the instantiation would produce a substitution `[?0 := + // Vec]` (note that the key is not affected, just the value). So + // when we do the unification, instead of unifying `?X = u32`, we + // would unify `?X = Vec`. + // + // Next, truncation. One key thing is that the `answer_table_goal` may + // not be *exactly* the same as the `selected_goal` -- we will + // truncate it if it gets too deep. so, in our example, it may be that + // instead of `answer_table_goal` being `for T: Foo>`, + // it could have been truncated to `for T: Foo` (which is a + // more general goal). In that case, let's say that the answer is + // still `[?0 = u32]`, meaning that `T: Foo` is true (which isn't + // actually interesting to our original goal). When we do the zip + // then, we will encounter `?0` in the `answer_table_goal` and pair + // that with `Vec` from the pending goal. We will attempt to unify + // `Vec` with `u32` (from the substitution), which will fail. That + // failure will get propagated back up. + + #[instrument(level = "debug", skip(self, interner))] + fn apply_answer_subst( + &mut self, + interner: I, + unification_database: &dyn UnificationDatabase, + ex_clause: &mut ExClause, + selected_goal: &InEnvironment>, + answer_table_goal: &Canonical>>, + canonical_answer_subst: Canonical>, + ) -> Fallible<()> { + debug!(selected_goal = ?DeepNormalizer::normalize_deep(self, interner, selected_goal.clone())); + + // C' is now `answer`. No variables in common with G. + let AnswerSubst { + subst: answer_subst, + + // Assuming unification succeeds, we incorporate the + // region constraints from the answer into the result; + // we'll need them if this answer (which is not yet known + // to be true) winds up being true, and otherwise (if the + // answer is false or unknown) it doesn't matter. + constraints: answer_constraints, + + delayed_subgoals, + } = self.instantiate_canonical(interner, canonical_answer_subst); + + AnswerSubstitutor::substitute( + interner, + unification_database, + self, + &selected_goal.environment, + &answer_subst, + ex_clause, + &answer_table_goal.value, + selected_goal, + )?; + ex_clause + .constraints + .extend(answer_constraints.as_slice(interner).to_vec()); + // at that point we should only have goals that stemmed + // from non trivial self cycles + ex_clause.delayed_subgoals.extend(delayed_subgoals); + Ok(()) + } +} + +struct AnswerSubstitutor<'t, I: Interner> { + table: &'t mut InferenceTable, + environment: &'t Environment, + answer_subst: &'t Substitution, + + /// Tracks the debrujn index of the first binder that is outside + /// the term we are traversing. This starts as `DebruijnIndex::INNERMOST`, + /// since we have not yet traversed *any* binders, but when we visit + /// the inside of a binder, it would be incremented. + /// + /// Example: If we are visiting `(for A, B, C, for for D)`, + /// then this would be: + /// + /// * `1`, when visiting `A`, + /// * `0`, when visiting `B` and `C`, + /// * `2`, when visiting `D`. + outer_binder: DebruijnIndex, + + ex_clause: &'t mut ExClause, + interner: I, + unification_database: &'t dyn UnificationDatabase, +} + +impl AnswerSubstitutor<'_, I> { + fn substitute>( + interner: I, + unification_database: &dyn UnificationDatabase, + table: &mut InferenceTable, + environment: &Environment, + answer_subst: &Substitution, + ex_clause: &mut ExClause, + answer: &T, + pending: &T, + ) -> Fallible<()> { + let mut this = AnswerSubstitutor { + interner, + unification_database, + table, + environment, + answer_subst, + ex_clause, + outer_binder: DebruijnIndex::INNERMOST, + }; + Zip::zip_with(&mut this, Variance::Invariant, answer, pending)?; + Ok(()) + } + + fn unify_free_answer_var( + &mut self, + interner: I, + db: &dyn UnificationDatabase, + variance: Variance, + answer_var: BoundVar, + pending: GenericArgData, + ) -> Fallible { + let answer_index = match answer_var.index_if_bound_at(self.outer_binder) { + Some(index) => index, + + // This variable is bound in the answer, not free, so it + // doesn't represent a reference into the answer substitution. + None => return Ok(false), + }; + + let answer_param = self.answer_subst.at(interner, answer_index); + + let pending_shifted = pending + .shifted_out_to(interner, self.outer_binder) + .expect("truncate extracted a pending value that references internal binder"); + + let result = self.table.relate( + interner, + db, + self.environment, + variance, + answer_param, + &GenericArg::new(interner, pending_shifted), + )?; + + self.ex_clause.subgoals.extend( + result + .goals + .into_iter() + .casted(interner) + .map(Literal::Positive), + ); + + Ok(true) + } + + /// When we encounter a variable in the answer goal, we first try + /// `unify_free_answer_var`. Assuming that this fails, the + /// variable must be a bound variable in the answer goal -- in + /// that case, there should be a corresponding bound variable in + /// the pending goal. This bit of code just checks that latter + /// case. + fn assert_matching_vars( + &mut self, + answer_var: BoundVar, + pending_var: BoundVar, + ) -> Fallible<()> { + let BoundVar { + debruijn: answer_depth, + index: answer_index, + } = answer_var; + let BoundVar { + debruijn: pending_depth, + index: pending_index, + } = pending_var; + + // Both bound variables are bound within the term we are matching + assert!(answer_depth.within(self.outer_binder)); + + // They are bound at the same (relative) depth + assert_eq!(answer_depth, pending_depth); + + // They are bound at the same index within the binder + assert_eq!(answer_index, pending_index,); + + Ok(()) + } +} + +impl<'i, I: Interner> Zipper for AnswerSubstitutor<'i, I> { + fn zip_tys(&mut self, variance: Variance, answer: &Ty, pending: &Ty) -> Fallible<()> { + let interner = self.interner; + + if let Some(pending) = self.table.normalize_ty_shallow(interner, pending) { + return Zip::zip_with(self, variance, answer, &pending); + } + + // If the answer has a variable here, then this is one of the + // "inputs" to the subgoal table. We need to extract the + // resulting answer that the subgoal found and unify it with + // the value from our "pending subgoal". + if let TyKind::BoundVar(answer_depth) = answer.kind(interner) { + if self.unify_free_answer_var( + interner, + self.unification_database, + variance, + *answer_depth, + GenericArgData::Ty(pending.clone()), + )? { + return Ok(()); + } + } + + // Otherwise, the answer and the selected subgoal ought to be a perfect match for + // one another. + match (answer.kind(interner), pending.kind(interner)) { + (TyKind::BoundVar(answer_depth), TyKind::BoundVar(pending_depth)) => { + self.assert_matching_vars(*answer_depth, *pending_depth) + } + + (TyKind::Dyn(answer), TyKind::Dyn(pending)) => { + Zip::zip_with(self, variance, answer, pending) + } + + (TyKind::Alias(answer), TyKind::Alias(pending)) => { + Zip::zip_with(self, variance, answer, pending) + } + + (TyKind::Placeholder(answer), TyKind::Placeholder(pending)) => { + Zip::zip_with(self, variance, answer, pending) + } + + (TyKind::Function(answer), TyKind::Function(pending)) => Zip::zip_with( + self, + variance, + &answer.clone().into_binders(interner), + &pending.clone().into_binders(interner), + ), + + (TyKind::InferenceVar(_, _), _) | (_, TyKind::InferenceVar(_, _)) => panic!( + "unexpected inference var in answer `{:?}` or pending goal `{:?}`", + answer, pending, + ), + + (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + Some(self.unification_database().adt_variance(*id_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::AssociatedType(id_a, substitution_a), + TyKind::AssociatedType(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => { + Zip::zip_with(self, variance, scalar_a, scalar_b) + } + (TyKind::Str, TyKind::Str) => Ok(()), + (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => { + if arity_a != arity_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::OpaqueType(id_a, substitution_a), + TyKind::OpaqueType(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => Zip::zip_with(self, variance, ty_a, ty_b), + (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + Some(self.unification_database().fn_def_variance(*id_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::Ref(mutability_a, lifetime_a, ty_a), + TyKind::Ref(mutability_b, lifetime_b, ty_b), + ) => { + if mutability_a != mutability_b { + return Err(NoSolution); + } + // The lifetime is `Contravariant` + Zip::zip_with( + self, + variance.xform(Variance::Contravariant), + lifetime_a, + lifetime_b, + )?; + // The type is `Covariant` when not mut, `Invariant` otherwise + let output_variance = match mutability_a { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + Zip::zip_with(self, variance.xform(output_variance), ty_a, ty_b) + } + (TyKind::Raw(mutability_a, ty_a), TyKind::Raw(mutability_b, ty_b)) => { + if mutability_a != mutability_b { + return Err(NoSolution); + } + let ty_variance = match mutability_a { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + Zip::zip_with(self, variance.xform(ty_variance), ty_a, ty_b) + } + (TyKind::Never, TyKind::Never) => Ok(()), + (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => { + Zip::zip_with(self, variance, ty_a, ty_b)?; + Zip::zip_with(self, variance, const_a, const_b) + } + (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Coroutine(id_a, substitution_a), TyKind::Coroutine(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::CoroutineWitness(id_a, substitution_a), + TyKind::CoroutineWitness(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => { + Zip::zip_with(self, variance, id_a, id_b) + } + (TyKind::Error, TyKind::Error) => Ok(()), + + (_, _) => panic!( + "structural mismatch between answer `{:?}` and pending goal `{:?}`", + answer, pending, + ), + } + } + + fn zip_lifetimes( + &mut self, + variance: Variance, + answer: &Lifetime, + pending: &Lifetime, + ) -> Fallible<()> { + let interner = self.interner; + if let Some(pending) = self.table.normalize_lifetime_shallow(interner, pending) { + return Zip::zip_with(self, variance, answer, &pending); + } + + if let LifetimeData::BoundVar(answer_depth) = answer.data(interner) { + if self.unify_free_answer_var( + interner, + self.unification_database, + variance, + *answer_depth, + GenericArgData::Lifetime(pending.clone()), + )? { + return Ok(()); + } + } + + match (answer.data(interner), pending.data(interner)) { + (LifetimeData::BoundVar(answer_depth), LifetimeData::BoundVar(pending_depth)) => { + self.assert_matching_vars(*answer_depth, *pending_depth) + } + + (LifetimeData::Static, LifetimeData::Static) + | (LifetimeData::Placeholder(_), LifetimeData::Placeholder(_)) + | (LifetimeData::Erased, LifetimeData::Erased) => { + assert_eq!(answer, pending); + Ok(()) + } + + (LifetimeData::InferenceVar(_), _) | (_, LifetimeData::InferenceVar(_)) => panic!( + "unexpected inference var in answer `{:?}` or pending goal `{:?}`", + answer, pending, + ), + + (LifetimeData::Static, _) + | (LifetimeData::BoundVar(_), _) + | (LifetimeData::Placeholder(_), _) + | (LifetimeData::Erased, _) + | (LifetimeData::Error, _) => panic!( + "structural mismatch between answer `{:?}` and pending goal `{:?}`", + answer, pending, + ), + + (LifetimeData::Phantom(void, _), _) => match *void {}, + } + } + + fn zip_consts( + &mut self, + variance: Variance, + answer: &Const, + pending: &Const, + ) -> Fallible<()> { + let interner = self.interner; + if let Some(pending) = self.table.normalize_const_shallow(interner, pending) { + return Zip::zip_with(self, variance, answer, &pending); + } + + let ConstData { + ty: answer_ty, + value: answer_value, + } = answer.data(interner); + let ConstData { + ty: pending_ty, + value: pending_value, + } = pending.data(interner); + + self.zip_tys(variance, answer_ty, pending_ty)?; + + if let ConstValue::BoundVar(answer_depth) = answer_value { + if self.unify_free_answer_var( + interner, + self.unification_database, + variance, + *answer_depth, + GenericArgData::Const(pending.clone()), + )? { + return Ok(()); + } + } + + match (answer_value, pending_value) { + (ConstValue::BoundVar(answer_depth), ConstValue::BoundVar(pending_depth)) => { + self.assert_matching_vars(*answer_depth, *pending_depth) + } + + (ConstValue::Placeholder(_), ConstValue::Placeholder(_)) => { + assert_eq!(answer, pending); + Ok(()) + } + + (ConstValue::Concrete(c1), ConstValue::Concrete(c2)) => { + assert!(c1.const_eq(answer_ty, c2, interner)); + Ok(()) + } + + (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => panic!( + "unexpected inference var in answer `{:?}` or pending goal `{:?}`", + answer, pending, + ), + + (ConstValue::BoundVar(_), _) + | (ConstValue::Placeholder(_), _) + | (ConstValue::Concrete(_), _) => panic!( + "structural mismatch between answer `{:?}` and pending goal `{:?}`", + answer, pending, + ), + } + } + + fn zip_binders( + &mut self, + variance: Variance, + answer: &Binders, + pending: &Binders, + ) -> Fallible<()> + where + T: HasInterner + Zip + TypeFoldable, + { + self.outer_binder.shift_in(); + Zip::zip_with( + self, + variance, + answer.skip_binders(), + pending.skip_binders(), + )?; + self.outer_binder.shift_out(); + Ok(()) + } + + fn interner(&self) -> I { + self.interner + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self.unification_database + } +} diff --git a/chalk-engine/src/solve.rs b/chalk-engine/src/solve.rs new file mode 100644 index 00000000000..fc35adb6638 --- /dev/null +++ b/chalk-engine/src/solve.rs @@ -0,0 +1,89 @@ +use crate::context::{AnswerResult, AnswerStream}; +use crate::forest::Forest; +use crate::slg::aggregate::AggregateOps; +use crate::slg::SlgContextOps; +use chalk_ir::interner::Interner; +use chalk_ir::{Canonical, ConstrainedSubst, Goal, InEnvironment, UCanonical}; +use chalk_solve::{RustIrDatabase, Solution, Solver, SubstitutionResult}; + +use std::fmt; + +pub struct SLGSolver { + pub(crate) forest: Forest, + pub(crate) max_size: usize, + pub(crate) expected_answers: Option, +} + +impl SLGSolver { + pub fn new(max_size: usize, expected_answers: Option) -> Self { + Self { + forest: Forest::new(), + max_size, + expected_answers, + } + } +} + +impl fmt::Debug for SLGSolver { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "SLGSolver") + } +} + +impl Solver for SLGSolver { + fn solve( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + ) -> Option> { + let ops = SlgContextOps::new(program, self.max_size, self.expected_answers); + ops.make_solution(goal, self.forest.iter_answers(&ops, goal), || true) + } + + fn solve_limited( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + should_continue: &dyn std::ops::Fn() -> bool, + ) -> Option> { + let ops = SlgContextOps::new(program, self.max_size, self.expected_answers); + ops.make_solution(goal, self.forest.iter_answers(&ops, goal), should_continue) + } + + fn solve_multiple( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + f: &mut dyn FnMut(SubstitutionResult>>, bool) -> bool, + ) -> bool { + let ops = SlgContextOps::new(program, self.max_size, self.expected_answers); + let mut answers = self.forest.iter_answers(&ops, goal); + loop { + let subst = match answers.next_answer(|| true) { + AnswerResult::Answer(answer) => { + if !answer.ambiguous { + SubstitutionResult::Definite(answer.subst) + } else if answer + .subst + .value + .subst + .is_identity_subst(ops.program().interner()) + { + SubstitutionResult::Floundered + } else { + SubstitutionResult::Ambiguous(answer.subst) + } + } + AnswerResult::Floundered => SubstitutionResult::Floundered, + AnswerResult::NoMoreSolutions => { + return true; + } + AnswerResult::QuantumExceeded => continue, + }; + + if !f(subst, !answers.peek_answer(|| true).is_no_more_solutions()) { + return false; + } + } + } +} diff --git a/chalk-engine/src/stack.rs b/chalk-engine/src/stack.rs index 8b804e8333a..66b53fa582f 100644 --- a/chalk-engine/src/stack.rs +++ b/chalk-engine/src/stack.rs @@ -1,32 +1,90 @@ -use {DepthFirstNumber, TableIndex}; +use crate::index_struct; +use crate::strand::CanonicalStrand; +use crate::tables::Tables; +use crate::{Minimums, TableIndex, TimeStamp}; +use std::fmt; use std::ops::{Index, IndexMut, Range}; +use chalk_ir::interner::Interner; + /// See `Forest`. -#[derive(Default)] -pub(crate) struct Stack { +#[derive(Debug)] +pub(crate) struct Stack { /// Stack: as described above, stores the in-progress goals. - stack: Vec, + stack: Vec>, +} + +impl Stack { + // This isn't actually used, but it can be helpful when debugging stack issues + #[allow(dead_code)] + pub(crate) fn debug_with<'a>(&'a self, tables: &'a Tables) -> StackDebug<'_, I> { + StackDebug { + stack: self, + tables, + } + } +} + +pub(crate) struct StackDebug<'a, I: Interner> { + stack: &'a Stack, + tables: &'a Tables, +} + +impl fmt::Debug for StackDebug<'_, I> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "---- Stack ----")?; + for entry in self.stack.stack.iter() { + writeln!(f, " --- StackEntry ---")?; + writeln!( + f, + " Table {:?} with goal {:?}", + entry.table, self.tables[entry.table].table_goal + )?; + writeln!(f, " Active strand: {:#?}", entry.active_strand)?; + writeln!( + f, + " Additional strands: {:#?}", + self.tables[entry.table].strands().collect::>() + )?; + } + write!(f, "---- End Stack ----")?; + Ok(()) + } +} + +impl Default for Stack { + fn default() -> Self { + Stack { stack: vec![] } + } } -/// The StackIndex identifies the position of a table's goal in the -/// stack of goals that are actively being processed. Note that once a -/// table is completely evaluated, it may be popped from the stack, -/// and hence no longer have a stack index. index_struct! { + /// The StackIndex identifies the position of a table's goal in the + /// stack of goals that are actively being processed. Note that once a + /// table is completely evaluated, it may be popped from the stack, + /// and hence no longer have a stack index. pub(crate) struct StackIndex { value: usize, } } -pub(crate) struct StackEntry { +#[derive(Debug)] +pub(crate) struct StackEntry { /// The goal G from the stack entry `A :- G` represented here. pub(super) table: TableIndex, - /// The DFN of this computation. - pub(super) dfn: DepthFirstNumber, + /// The clock TimeStamp of this stack entry. + pub(super) clock: TimeStamp, + + pub(super) cyclic_minimums: Minimums, + + // FIXME: should store this as an index. + // This would mean that if we unwind, + // we don't need to worry about losing a strand + pub(super) active_strand: Option>, } -impl Stack { +impl Stack { pub(super) fn is_empty(&self) -> bool { self.stack.is_empty() } @@ -48,32 +106,70 @@ impl Stack { } pub(super) fn top_of_stack_from(&self, depth: StackIndex) -> Range { - depth .. StackIndex::from(self.stack.len()) + depth..StackIndex::from(self.stack.len()) } - pub(super) fn push(&mut self, table: TableIndex, dfn: DepthFirstNumber) -> StackIndex { + pub(super) fn push( + &mut self, + table: TableIndex, + cyclic_minimums: Minimums, + clock: TimeStamp, + ) -> StackIndex { let old_len = self.stack.len(); - self.stack.push(StackEntry { table, dfn }); + self.stack.push(StackEntry { + table, + clock, + cyclic_minimums, + active_strand: None, + }); StackIndex::from(old_len) } - pub(super) fn pop(&mut self, table: TableIndex, depth: StackIndex) { - assert_eq!(self.stack.len(), depth.value + 1); - assert_eq!(self[depth].table, table); + /// Pops the top-most entry from the stack: + /// * If the stack is now empty, returns false. + /// * Otherwise, returns true. + fn pop_and_adjust_depth(&mut self) -> bool { self.stack.pop(); + !self.stack.is_empty() + } + + /// Pops the top-most entry from the stack, which should have the depth `*depth`: + /// * If the stack is now empty, returns None. + /// * Otherwise, `take`s the active strand from the new top and returns it. + pub(super) fn pop_and_take_caller_strand(&mut self) -> Option> { + if self.pop_and_adjust_depth() { + Some(self.top().active_strand.take().unwrap()) + } else { + None + } + } + + /// Pops the top-most entry from the stack, which should have the depth `*depth`: + /// * If the stack is now empty, returns None. + /// * Otherwise, borrows the active strand (mutably) from the new top and returns it. + pub(super) fn pop_and_borrow_caller_strand(&mut self) -> Option<&mut CanonicalStrand> { + if self.pop_and_adjust_depth() { + Some(self.top().active_strand.as_mut().unwrap()) + } else { + None + } + } + + pub(super) fn top(&mut self) -> &mut StackEntry { + self.stack.last_mut().unwrap() } } -impl Index for Stack { - type Output = StackEntry; +impl Index for Stack { + type Output = StackEntry; - fn index(&self, index: StackIndex) -> &StackEntry { + fn index(&self, index: StackIndex) -> &StackEntry { &self.stack[index.value] } } -impl IndexMut for Stack { - fn index_mut(&mut self, index: StackIndex) -> &mut StackEntry { +impl IndexMut for Stack { + fn index_mut(&mut self, index: StackIndex) -> &mut StackEntry { &mut self.stack[index.value] } } diff --git a/chalk-engine/src/strand.rs b/chalk-engine/src/strand.rs index 39cbb7653a4..da25a778d87 100644 --- a/chalk-engine/src/strand.rs +++ b/chalk-engine/src/strand.rs @@ -1,27 +1,26 @@ -use std::fmt::{Debug, Error, Formatter}; -use {ExClause, TableIndex}; -use context::{Context, InferenceTable}; -use table::AnswerIndex; +use crate::table::AnswerIndex; +use crate::{ExClause, TableIndex, TimeStamp}; +use std::fmt::Debug; -#[derive(Debug)] -pub(crate) struct CanonicalStrand { - pub(super) canonical_ex_clause: C::CanonicalExClause, - - /// Index into `ex_clause.subgoals`. - pub(crate) selected_subgoal: Option>, -} - -pub(crate) struct Strand<'table, C: Context + 'table, I: Context + 'table> { - pub(crate) infer: &'table mut dyn InferenceTable, +use chalk_derive::HasInterner; +use chalk_ir::fold::{FallibleTypeFolder, TypeFoldable}; +use chalk_ir::interner::Interner; +use chalk_ir::{Canonical, DebruijnIndex, UniverseMap}; +#[derive(Clone, Debug, HasInterner)] +pub(crate) struct Strand { pub(super) ex_clause: ExClause, /// Index into `ex_clause.subgoals`. - pub(crate) selected_subgoal: Option>, + pub(crate) selected_subgoal: Option, + + pub(crate) last_pursued_time: TimeStamp, } +pub(crate) type CanonicalStrand = Canonical>; + #[derive(Clone, Debug)] -pub(crate) struct SelectedSubgoal { +pub(crate) struct SelectedSubgoal { /// The index of the subgoal in `ex_clause.subgoals` pub(crate) subgoal_index: usize, @@ -33,14 +32,19 @@ pub(crate) struct SelectedSubgoal { /// Maps the universes of the subgoal to the canonical universes /// used in the table - pub(crate) universe_map: C::UniverseMap, + pub(crate) universe_map: UniverseMap, } -impl<'table, C: Context, I: Context> Debug for Strand<'table, C, I> { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - fmt.debug_struct("Strand") - .field("ex_clause", &self.ex_clause) - .field("selected_subgoal", &self.selected_subgoal) - .finish() +impl TypeFoldable for Strand { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + Ok(Strand { + ex_clause: self.ex_clause.try_fold_with(folder, outer_binder)?, + last_pursued_time: self.last_pursued_time, + selected_subgoal: self.selected_subgoal, + }) } } diff --git a/chalk-engine/src/table.rs b/chalk-engine/src/table.rs index c0f60701bf3..e883cad322e 100644 --- a/chalk-engine/src/table.rs +++ b/chalk-engine/src/table.rs @@ -1,34 +1,50 @@ -use {DelayedLiteralSet, DelayedLiteralSets}; -use context::prelude::*; -use strand::CanonicalStrand; +use crate::index_struct; +use crate::strand::CanonicalStrand; +use crate::{Answer, AnswerMode}; use rustc_hash::FxHashMap; -use std::collections::VecDeque; use std::collections::hash_map::Entry; +use std::collections::VecDeque; use std::mem; -pub(crate) struct Table { +use chalk_ir::interner::Interner; +use chalk_ir::{AnswerSubst, Canonical, Goal, InEnvironment, UCanonical}; +use tracing::{debug, info, instrument}; + +#[derive(Debug)] +pub(crate) struct Table { /// The goal this table is trying to solve (also the key to look /// it up). - pub(crate) table_goal: C::UCanonicalGoalInEnvironment, + pub(crate) table_goal: UCanonical>>, /// A goal is coinductive if it can assume itself to be true, more /// or less. This is true for auto traits. pub(crate) coinductive_goal: bool, + /// True if this table is floundered, meaning that it doesn't have + /// enough types specified for us to solve. + floundered: bool, + /// Stores the answers that we have found thus far. When we get a request /// for an answer N, we will first check this vector. - answers: Vec>, + answers: Vec>, /// An alternative storage for the answers we have so far, used to /// detect duplicates. Not every answer in `answers` will be /// represented here -- we discard answers from `answers_hash` /// (but not `answers`) when better answers arrive (in particular, - /// answers with fewer delayed literals). - answers_hash: FxHashMap>, + /// answers with no ambiguity). + /// + /// FIXME -- Ideally we would exclude the region constraints and + /// delayed subgoals from the hash, but that's a bit tricky to do + /// with the current canonicalization setup. It should be ok not + /// to do so though it can result in more answers than we need. + answers_hash: FxHashMap>, bool>, /// Stores the active strands that we can "pull on" to find more /// answers. - strands: VecDeque>, + strands: VecDeque>, + + pub(crate) answer_mode: AnswerMode, } index_struct! { @@ -37,55 +53,79 @@ index_struct! { } } - -/// An "answer" in the on-demand solver corresponds to a fully solved -/// goal for a particular table (modulo delayed literals). It contains -/// a substitution -#[derive(Clone, Debug)] -pub struct Answer { - pub(crate) subst: C::CanonicalConstrainedSubst, - pub(crate) delayed_literals: DelayedLiteralSet, -} - -impl Table { - pub(crate) fn new(table_goal: C::UCanonicalGoalInEnvironment, coinductive_goal: bool) -> Table { +impl Table { + pub(crate) fn new( + table_goal: UCanonical>>, + coinductive_goal: bool, + ) -> Table { Table { table_goal, coinductive_goal, answers: Vec::new(), + floundered: false, answers_hash: FxHashMap::default(), strands: VecDeque::new(), + answer_mode: AnswerMode::Complete, } } - pub(crate) fn push_strand(&mut self, strand: CanonicalStrand) { + /// Push a strand to the back of the queue of strands to be processed. + pub(crate) fn enqueue_strand(&mut self, strand: CanonicalStrand) { self.strands.push_back(strand); } - pub(crate) fn extend_strands(&mut self, strands: impl IntoIterator>) { - self.strands.extend(strands); + pub(crate) fn strands_mut(&mut self) -> impl Iterator> { + self.strands.iter_mut() } - pub(crate) fn strands_mut(&mut self) -> impl Iterator> { - self.strands.iter_mut() + pub(crate) fn strands(&self) -> impl Iterator> { + self.strands.iter() } - pub(crate) fn take_strands(&mut self) -> VecDeque> { - mem::replace(&mut self.strands, VecDeque::new()) + pub(crate) fn take_strands(&mut self) -> VecDeque> { + mem::take(&mut self.strands) } - pub(crate) fn pop_next_strand(&mut self) -> Option> { - self.strands.pop_front() + /// Remove the next strand from the queue that meets the given criteria + pub(crate) fn dequeue_next_strand_that( + &mut self, + test: impl Fn(&CanonicalStrand) -> bool, + ) -> Option> { + let first = self.strands.iter().position(test); + if let Some(first) = first { + self.strands.rotate_left(first); + self.strands.pop_front() + } else { + None + } } - /// Adds `answer` to our list of answers, unless it (or some - /// better answer) is already present. An answer A is better than - /// an answer B if their substitutions are the same, but A has a subset - /// of the delayed literals that B does. + /// Mark the table as floundered -- this also discards all pre-existing answers, + /// as they are no longer relevant. + pub(crate) fn mark_floundered(&mut self) { + self.floundered = true; + self.strands = Default::default(); + self.answers = Default::default(); + } + + /// Returns true if the table is floundered. + pub(crate) fn is_floundered(&self) -> bool { + self.floundered + } + + /// Adds `answer` to our list of answers, unless it is already present. /// /// Returns true if `answer` was added. - pub(super) fn push_answer(&mut self, answer: Answer) -> bool { - debug_heading!("push_answer(answer={:?})", answer); + /// + /// # Panics + /// This will panic if a previous answer with the same substitution + /// was marked as ambgiuous, but the new answer is not. No current + /// tests trigger this case, and assumptions upstream assume that when + /// `true` is returned here, that a *new* answer was added (instead of an) + /// existing answer replaced. + #[instrument(level = "debug", skip(self))] + pub(super) fn push_answer(&mut self, answer: Answer) -> Option { + assert!(!self.floundered); debug!( "pre-existing entry: {:?}", self.answers_hash.get(&answer.subst) @@ -93,32 +133,34 @@ impl Table { let added = match self.answers_hash.entry(answer.subst.clone()) { Entry::Vacant(entry) => { - entry.insert(DelayedLiteralSets::singleton(answer.delayed_literals.clone())); + entry.insert(answer.ambiguous); true } - Entry::Occupied(mut entry) => { - entry.get_mut().insert_if_minimal(&answer.delayed_literals) + Entry::Occupied(entry) => { + let was_ambiguous = entry.get(); + if *was_ambiguous && !answer.ambiguous { + panic!("New answer was not ambiguous whereas previous answer was."); + } + false } }; info!( - "new answer to table with goal {:?}: answer={:?}", - self.table_goal, answer, + goal = ?self.table_goal, ?answer, + "new answer to table", ); - if added { - self.answers.push(answer); + if !added { + return None; } - added - } - pub(super) fn answer(&self, index: AnswerIndex) -> Option<&Answer> { - self.answers.get(index.value) + let index = self.answers.len(); + self.answers.push(answer); + Some(AnswerIndex::from(index)) } - /// Useful for testing. - pub fn num_cached_answers(&self) -> usize { - self.answers.len() + pub(super) fn answer(&self, index: AnswerIndex) -> Option<&Answer> { + self.answers.get(index.value) } pub(super) fn next_answer_index(&self) -> AnswerIndex { @@ -129,11 +171,3 @@ impl Table { impl AnswerIndex { pub(crate) const ZERO: AnswerIndex = AnswerIndex { value: 0 }; } - -impl Answer { - /// An "unconditional" answer is one that must be true -- this is - /// the case so long as we have no delayed literals. - pub(super) fn is_unconditional(&self) -> bool { - self.delayed_literals.is_empty() - } -} diff --git a/chalk-engine/src/tables.rs b/chalk-engine/src/tables.rs index 072003826cb..76508eaa8dd 100644 --- a/chalk-engine/src/tables.rs +++ b/chalk-engine/src/tables.rs @@ -1,21 +1,24 @@ -use TableIndex; -use context::prelude::*; -use table::Table; +use crate::table::Table; +use crate::TableIndex; use rustc_hash::FxHashMap; use std::ops::{Index, IndexMut}; +use chalk_ir::interner::Interner; +use chalk_ir::{Goal, InEnvironment, UCanonical}; + /// See `Forest`. -pub(crate) struct Tables { +#[derive(Debug)] +pub(crate) struct Tables { /// Maps from a canonical goal to the index of its table. - table_indices: FxHashMap, + table_indices: FxHashMap>>, TableIndex>, /// Table: as described above, stores the key information for each /// tree in the forest. - tables: Vec>, + tables: Vec>, } -impl Tables { - pub(crate) fn new() -> Tables { +impl Tables { + pub(crate) fn new() -> Tables { Tables { table_indices: FxHashMap::default(), tables: Vec::default(), @@ -29,38 +32,41 @@ impl Tables { } } - pub(super) fn insert(&mut self, goal: C::UCanonicalGoalInEnvironment, coinductive_goal: bool) -> TableIndex { + pub(super) fn insert(&mut self, table: Table) -> TableIndex { + let goal = table.table_goal.clone(); let index = self.next_index(); - self.tables.push(Table::new(goal.clone(), coinductive_goal)); + self.tables.push(table); self.table_indices.insert(goal, index); index } - pub(super) fn index_of(&self, literal: &C::UCanonicalGoalInEnvironment) -> Option { + pub(super) fn index_of( + &self, + literal: &UCanonical>>, + ) -> Option { self.table_indices.get(literal).cloned() } } -impl Index for Tables { - type Output = Table; +impl Index for Tables { + type Output = Table; - fn index(&self, index: TableIndex) -> &Table { + fn index(&self, index: TableIndex) -> &Table { &self.tables[index.value] } } -impl IndexMut for Tables { - fn index_mut(&mut self, index: TableIndex) -> &mut Table { +impl IndexMut for Tables { + fn index_mut(&mut self, index: TableIndex) -> &mut Table { &mut self.tables[index.value] } } -impl<'a, C: Context> IntoIterator for &'a mut Tables { - type IntoIter = <&'a mut Vec> as IntoIterator>::IntoIter; - type Item = <&'a mut Vec> as IntoIterator>::Item; +impl<'a, I: Interner> IntoIterator for &'a mut Tables { + type IntoIter = <&'a mut Vec> as IntoIterator>::IntoIter; + type Item = <&'a mut Vec> as IntoIterator>::Item; fn into_iter(self) -> Self::IntoIter { IntoIterator::into_iter(&mut self.tables) } } - diff --git a/chalk-integration/Cargo.toml b/chalk-integration/Cargo.toml new file mode 100644 index 00000000000..5faeba0a89e --- /dev/null +++ b/chalk-integration/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "chalk-integration" +version = "0.104.0-dev.0" +license = "MIT OR Apache-2.0" +description = "Sample solver setup for Chalk" +authors = ["Rust Compiler Team", "Chalk developers"] +repository = "https://github.com/rust-lang/chalk" +keywords = ["compiler", "traits", "prolog"] +edition = "2018" +publish = false + +[dependencies] +string_cache = "0.8.0" +salsa = "0.16.0" +tracing = "0.1" + +chalk-derive = { version = "0.104.0-dev.0", path = "../chalk-derive" } +chalk-ir = { version = "0.104.0-dev.0", path = "../chalk-ir" } +chalk-solve = { version = "0.104.0-dev.0", path = "../chalk-solve" } +chalk-recursive = { version = "0.104.0-dev.0", path = "../chalk-recursive" } +chalk-engine = { version = "0.104.0-dev.0", path = "../chalk-engine" } +chalk-parse = { version = "0.104.0-dev.0", path = "../chalk-parse" } +indexmap = "2" diff --git a/chalk-integration/README.md b/chalk-integration/README.md new file mode 100644 index 00000000000..1adfbed0b90 --- /dev/null +++ b/chalk-integration/README.md @@ -0,0 +1,3 @@ +A library that takes AST from `chalk-parse` and uses it to drive `chalk-solve`. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-integration/src/db.rs b/chalk-integration/src/db.rs new file mode 100644 index 00000000000..4fa796d099d --- /dev/null +++ b/chalk-integration/src/db.rs @@ -0,0 +1,281 @@ +use crate::{ + error::ChalkError, + interner::ChalkIr, + lowering::lower_goal, + program::Program, + query::{Lowering, LoweringDatabase}, + tls, SolverChoice, +}; +use chalk_ir::{ + AdtId, AssocTypeId, Binders, Canonical, CanonicalVarKinds, ClosureId, ConstrainedSubst, + CoroutineId, Environment, FnDefId, GenericArg, Goal, ImplId, InEnvironment, OpaqueTyId, + ProgramClause, ProgramClauses, Substitution, TraitId, Ty, TyKind, UCanonical, + UnificationDatabase, Variances, +}; +use chalk_solve::rust_ir::{ + AdtDatum, AdtRepr, AdtSizeAlign, AssociatedTyDatum, AssociatedTyValue, AssociatedTyValueId, + ClosureKind, CoroutineDatum, CoroutineWitnessDatum, FnDefDatum, FnDefInputsAndOutputDatum, + ImplDatum, OpaqueTyDatum, TraitDatum, WellKnownAssocType, WellKnownTrait, +}; +use chalk_solve::{RustIrDatabase, Solution, SubstitutionResult}; +use salsa::Database; +use std::fmt; +use std::sync::Arc; + +#[salsa::database(Lowering)] +#[derive(Default)] +pub struct ChalkDatabase { + storage: salsa::Storage, +} + +impl Database for ChalkDatabase {} + +impl ChalkDatabase { + pub fn with(program_text: &str, solver_choice: SolverChoice) -> Self { + let mut db = ChalkDatabase::default(); + db.set_program_text(Arc::new(program_text.to_string())); + db.set_solver_choice(solver_choice); + db + } + + pub fn with_program(&self, op: impl FnOnce(&Program) -> R) -> R { + let program = &self.checked_program().unwrap(); + tls::set_current_program(program, || op(program)) + } + + pub fn parse_and_lower_goal(&self, text: &str) -> Result, ChalkError> { + let program = self.checked_program()?; + Ok(lower_goal(&*chalk_parse::parse_goal(text)?, &*program)?) + } + + pub fn solve( + &self, + goal: &UCanonical>>, + ) -> Option> { + let solver = self.solver(); + let solution = solver.lock().unwrap().solve(self, goal); + solution + } + + /// Solves a given goal, producing the solution. This will do only + /// as much work towards `goal` as it has to (and that works is + /// cached for future attempts). Calls provided function `f` to + /// iterate over multiple solutions until the function return `false`. + pub fn solve_multiple( + &self, + goal: &UCanonical>>, + f: &mut dyn FnMut(SubstitutionResult>>, bool) -> bool, + ) -> bool { + let solver = self.solver(); + let solution = solver.lock().unwrap().solve_multiple(self, goal, f); + solution + } +} + +impl UnificationDatabase for ChalkDatabase { + fn fn_def_variance(&self, fn_def_id: FnDefId) -> Variances { + self.program_ir().unwrap().fn_def_variance(fn_def_id) + } + + fn adt_variance(&self, adt_id: AdtId) -> Variances { + self.program_ir().unwrap().adt_variance(adt_id) + } +} + +impl RustIrDatabase for ChalkDatabase { + fn custom_clauses(&self) -> Vec> { + self.program_ir().unwrap().custom_clauses() + } + + fn associated_ty_data(&self, ty: AssocTypeId) -> Arc> { + self.program_ir().unwrap().associated_ty_data(ty) + } + + fn trait_datum(&self, id: TraitId) -> Arc> { + self.program_ir().unwrap().trait_datum(id) + } + + fn impl_datum(&self, id: ImplId) -> Arc> { + self.program_ir().unwrap().impl_datum(id) + } + + fn associated_ty_from_impl( + &self, + impl_id: ImplId, + assoc_type_id: AssocTypeId, + ) -> Option> { + let ir = self.program_ir().unwrap(); + ir.impl_data[&impl_id] + .associated_ty_value_ids + .iter() + .copied() + .find(|id| ir.associated_ty_values[id].associated_ty_id == assoc_type_id) + } + + fn associated_ty_value( + &self, + id: AssociatedTyValueId, + ) -> Arc> { + self.program_ir().unwrap().associated_ty_values[&id].clone() + } + + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc> { + self.program_ir().unwrap().opaque_ty_data(id) + } + + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty { + self.program_ir().unwrap().hidden_opaque_type(id) + } + + fn adt_datum(&self, id: AdtId) -> Arc> { + self.program_ir().unwrap().adt_datum(id) + } + + fn coroutine_datum(&self, id: CoroutineId) -> Arc> { + self.program_ir().unwrap().coroutine_datum(id) + } + + fn coroutine_witness_datum( + &self, + id: CoroutineId, + ) -> Arc> { + self.program_ir().unwrap().coroutine_witness_datum(id) + } + + fn adt_repr(&self, id: AdtId) -> Arc> { + self.program_ir().unwrap().adt_repr(id) + } + + fn adt_size_align(&self, id: AdtId) -> Arc { + self.program_ir().unwrap().adt_size_align(id) + } + + fn fn_def_datum(&self, id: FnDefId) -> Arc> { + self.program_ir().unwrap().fn_def_datum(id) + } + + fn impls_for_trait( + &self, + trait_id: TraitId, + generic_args: &[GenericArg], + binders: &CanonicalVarKinds, + ) -> Vec> { + self.program_ir() + .unwrap() + .impls_for_trait(trait_id, generic_args, binders) + } + + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec> { + self.program_ir() + .unwrap() + .local_impls_to_coherence_check(trait_id) + } + + fn impl_provided_for(&self, auto_trait_id: TraitId, ty: &TyKind) -> bool { + self.program_ir() + .unwrap() + .impl_provided_for(auto_trait_id, ty) + } + + fn well_known_trait_id(&self, well_known_trait: WellKnownTrait) -> Option> { + self.program_ir() + .unwrap() + .well_known_trait_id(well_known_trait) + } + + fn well_known_assoc_type_id( + &self, + assoc_type: WellKnownAssocType, + ) -> Option> { + self.program_ir() + .unwrap() + .well_known_assoc_type_id(assoc_type) + } + + fn program_clauses_for_env( + &self, + environment: &Environment, + ) -> ProgramClauses { + chalk_solve::program_clauses_for_env(self, environment) + } + + fn interner(&self) -> ChalkIr { + ChalkIr + } + + fn is_object_safe(&self, trait_id: TraitId) -> bool { + self.program_ir().unwrap().is_object_safe(trait_id) + } + + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + self.program_ir() + .unwrap() + .closure_inputs_and_output(closure_id, substs) + } + + fn closure_kind( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> ClosureKind { + self.program_ir().unwrap().closure_kind(closure_id, substs) + } + + fn closure_upvars( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + self.program_ir() + .unwrap() + .closure_upvars(closure_id, substs) + } + + fn closure_fn_substitution( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution { + self.program_ir() + .unwrap() + .closure_fn_substitution(closure_id, substs) + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } + + fn trait_name(&self, trait_id: TraitId) -> String { + self.program_ir().unwrap().trait_name(trait_id) + } + + fn adt_name(&self, struct_id: AdtId) -> String { + self.program_ir().unwrap().adt_name(struct_id) + } + + fn assoc_type_name(&self, assoc_ty_id: AssocTypeId) -> String { + self.program_ir().unwrap().assoc_type_name(assoc_ty_id) + } + + fn opaque_type_name(&self, opaque_ty_id: OpaqueTyId) -> String { + self.program_ir().unwrap().opaque_type_name(opaque_ty_id) + } + + fn fn_def_name(&self, fn_def_id: FnDefId) -> String { + self.program_ir().unwrap().fn_def_name(fn_def_id) + } + + fn discriminant_type(&self, ty: Ty) -> Ty { + self.program_ir().unwrap().discriminant_type(ty) + } +} + +impl fmt::Debug for ChalkDatabase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ChalkDatabase {{ }}") + } +} diff --git a/chalk-integration/src/error.rs b/chalk-integration/src/error.rs new file mode 100644 index 00000000000..ba223d2d249 --- /dev/null +++ b/chalk-integration/src/error.rs @@ -0,0 +1,207 @@ +use crate::interner::ChalkIr; +use chalk_parse::ast::{Identifier, Kind}; +use chalk_solve::coherence::CoherenceError; +use chalk_solve::wf::WfError; +use string_cache::DefaultAtom as Atom; + +/// Wrapper type for the various errors that can occur during chalk +/// processing. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct ChalkError { + /// For now, we just convert the error into a string, which makes + /// it trivially hashable etc. + error_text: String, +} + +impl From> for ChalkError { + fn from(value: Box) -> Self { + ChalkError { + error_text: value.to_string(), + } + } +} + +impl From> for ChalkError { + fn from(value: WfError) -> Self { + ChalkError { + error_text: value.to_string(), + } + } +} + +impl From> for ChalkError { + fn from(value: CoherenceError) -> Self { + ChalkError { + error_text: value.to_string(), + } + } +} + +impl From for ChalkError { + fn from(value: RustIrError) -> Self { + ChalkError { + error_text: value.to_string(), + } + } +} + +impl std::fmt::Display for ChalkError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.error_text) + } +} + +impl std::error::Error for ChalkError {} + +#[derive(Debug)] +pub enum RustIrError { + InvalidParameterName(Identifier), + InvalidTraitName(Identifier), + NotTrait(Identifier), + NotStruct(Identifier), + DuplicateOrShadowedParameters, + AutoTraitAssociatedTypes(Identifier), + AutoTraitParameters(Identifier), + AutoTraitWhereClauses(Identifier), + InvalidFundamentalTypesParameters(Identifier), + NegativeImplAssociatedValues(Identifier), + MissingAssociatedType(Identifier), + IncorrectNumberOfVarianceParameters { + identifier: Identifier, + expected: usize, + actual: usize, + }, + IncorrectNumberOfTypeParameters { + identifier: Identifier, + expected: usize, + actual: usize, + }, + IncorrectNumberOfAssociatedTypeParameters { + identifier: Identifier, + expected: usize, + actual: usize, + }, + IncorrectParameterKind { + identifier: Identifier, + expected: Kind, + actual: Kind, + }, + IncorrectTraitParameterKind { + identifier: Identifier, + expected: Kind, + actual: Kind, + }, + IncorrectAssociatedTypeParameterKind { + identifier: Identifier, + expected: Kind, + actual: Kind, + }, + CannotApplyTypeParameter(Identifier), + InvalidExternAbi(Atom), +} + +impl std::fmt::Display for RustIrError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RustIrError::InvalidParameterName(name) => { + write!(f, "invalid parameter name `{}`", name) + } + RustIrError::InvalidTraitName(name) => write!(f, "invalid trait name `{}`", name), + RustIrError::NotTrait(name) => write!( + f, + "expected a trait, found `{}`, which is not a trait", + name + ), + RustIrError::NotStruct(name) => write!( + f, + "expected a struct, found `{}`, which is not a struct", + name + ), + RustIrError::DuplicateOrShadowedParameters => { + write!(f, "duplicate or shadowed parameters") + } + RustIrError::AutoTraitAssociatedTypes(name) => { + write!(f, "auto trait `{}` cannot define associated types", name) + } + RustIrError::AutoTraitParameters(name) => { + write!(f, "auto trait `{}` cannot have parameters", name) + } + RustIrError::AutoTraitWhereClauses(name) => { + write!(f, "auto trait `{}` cannot have where clauses", name) + } + RustIrError::InvalidFundamentalTypesParameters(name) => write!( + f, + "only a single parameter supported for fundamental type `{}`", + name + ), + RustIrError::NegativeImplAssociatedValues(name) => write!( + f, + "negative impl for trait `{}` cannot define associated values", + name + ), + RustIrError::MissingAssociatedType(name) => { + write!(f, "no associated type `{}` defined in trait", name) + } + RustIrError::IncorrectNumberOfVarianceParameters { + identifier, + expected, + actual, + } => write!( + f, + "`{}` has {} type parameters, not {}, which were passed for variance", + identifier, expected, actual + ), + RustIrError::IncorrectNumberOfTypeParameters { + identifier, + expected, + actual, + } => write!( + f, + "`{}` takes {} type parameters, not {}", + identifier, expected, actual + ), + RustIrError::IncorrectNumberOfAssociatedTypeParameters { + identifier, + expected, + actual, + } => write!( + f, + "wrong number of parameters for associated type `{}` (expected {}, got {})", + identifier, expected, actual + ), + RustIrError::IncorrectParameterKind { + identifier, + expected, + actual, + } => write!( + f, + "incorrect parameter kind for `{}`: expected {}, found {}", + identifier, expected, actual + ), + RustIrError::IncorrectTraitParameterKind { + identifier, + expected, + actual, + } => write!( + f, + "incorrect parameter kind for trait `{}`: expected {}, found {}", + identifier, expected, actual + ), + RustIrError::IncorrectAssociatedTypeParameterKind { + identifier, + expected, + actual, + } => write!( + f, + "incorrect associated type parameter kind for `{}`: expected {}, found {}", + identifier, expected, actual + ), + RustIrError::CannotApplyTypeParameter(name) => { + write!(f, "cannot apply type parameter `{}`", name) + } + RustIrError::InvalidExternAbi(abi) => write!(f, "invalid extern ABI `{}`", abi), + } + } +} + +impl std::error::Error for RustIrError {} diff --git a/chalk-integration/src/interner.rs b/chalk-integration/src/interner.rs new file mode 100644 index 00000000000..cef01c2cd42 --- /dev/null +++ b/chalk-integration/src/interner.rs @@ -0,0 +1,394 @@ +use crate::tls; +use chalk_ir::{ + interner::{HasInterner, Interner}, + TyKind, +}; +use chalk_ir::{ + AdtId, AliasTy, AssocTypeId, CanonicalVarKind, CanonicalVarKinds, ConstData, Constraint, + Constraints, FnDefId, Goals, InEnvironment, Lifetime, OpaqueTy, OpaqueTyId, + ProgramClauseImplication, ProgramClauses, ProjectionTy, QuantifiedWhereClauses, + SeparatorTraitRef, Substitution, TraitId, Ty, TyData, VariableKind, VariableKinds, Variances, +}; +use chalk_ir::{ + GenericArg, GenericArgData, Goal, GoalData, LifetimeData, ProgramClause, ProgramClauseData, + QuantifiedWhereClause, Variance, +}; +use std::fmt; +use std::fmt::Debug; +use std::sync::Arc; +use string_cache::DefaultAtom; + +pub type Identifier = DefaultAtom; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct RawId { + pub index: u32, +} + +impl Debug for RawId { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "#{}", self.index) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub enum ChalkFnAbi { + Rust, + C, +} + +impl Debug for ChalkFnAbi { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + fmt, + "{}", + match self { + ChalkFnAbi::Rust => "\"rust\"", + ChalkFnAbi::C => "\"c\"", + }, + ) + } +} + +/// The default "interner" and the only interner used by chalk +/// itself. In this interner, no interning actually occurs. +#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] +pub struct ChalkIr; + +impl Interner for ChalkIr { + type InternedType = Arc>; + type InternedLifetime = LifetimeData; + type InternedConst = Arc>; + type InternedConcreteConst = u32; + type InternedGenericArg = GenericArgData; + type InternedGoal = Arc>; + type InternedGoals = Vec>; + type InternedSubstitution = Vec>; + type InternedProgramClause = ProgramClauseData; + type InternedProgramClauses = Vec>; + type InternedQuantifiedWhereClauses = Vec>; + type InternedVariableKinds = Vec>; + type InternedCanonicalVarKinds = Vec>; + type InternedConstraints = Vec>>; + type InternedVariances = Vec; + type DefId = RawId; + type InternedAdtId = RawId; + type Identifier = Identifier; + type FnAbi = ChalkFnAbi; + + fn debug_adt_id( + type_kind_id: AdtId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_adt_id(type_kind_id, fmt))) + } + + fn debug_trait_id( + type_kind_id: TraitId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_trait_id(type_kind_id, fmt))) + } + + fn debug_assoc_type_id( + id: AssocTypeId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_assoc_type_id(id, fmt))) + } + + fn debug_opaque_ty_id( + id: OpaqueTyId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_opaque_ty_id(id, fmt))) + } + + fn debug_fn_def_id(id: FnDefId, fmt: &mut fmt::Formatter<'_>) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_fn_def_id(id, fmt))) + } + + fn debug_alias(alias: &AliasTy, fmt: &mut fmt::Formatter<'_>) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_alias(alias, fmt))) + } + + fn debug_projection_ty( + proj: &ProjectionTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_projection_ty(proj, fmt))) + } + + fn debug_opaque_ty( + opaque_ty: &OpaqueTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_opaque_ty(opaque_ty, fmt))) + } + + fn debug_ty(ty: &Ty, fmt: &mut fmt::Formatter<'_>) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_ty(ty, fmt))) + } + + fn debug_lifetime( + lifetime: &Lifetime, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_lifetime(lifetime, fmt))) + .or_else(|| Some(write!(fmt, "{:?}", lifetime.interned()))) + } + + fn debug_generic_arg( + generic_arg: &GenericArg, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_generic_arg(generic_arg, fmt))) + } + + fn debug_variable_kinds( + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_variable_kinds(variable_kinds, fmt))) + } + + fn debug_variable_kinds_with_angles( + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| { + Some(prog?.debug_variable_kinds_with_angles(variable_kinds, fmt)) + }) + } + + fn debug_canonical_var_kinds( + canonical_var_kinds: &CanonicalVarKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| { + Some(prog?.debug_canonical_var_kinds(canonical_var_kinds, fmt)) + }) + } + + fn debug_goal(goal: &Goal, fmt: &mut fmt::Formatter<'_>) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_goal(goal, fmt))) + } + + fn debug_goals(goals: &Goals, fmt: &mut fmt::Formatter<'_>) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_goals(goals, fmt))) + } + + fn debug_program_clause_implication( + pci: &ProgramClauseImplication, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_program_clause_implication(pci, fmt))) + } + + fn debug_program_clause( + clause: &ProgramClause, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_program_clause(clause, fmt))) + } + + fn debug_program_clauses( + clause: &ProgramClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_program_clauses(clause, fmt))) + } + + fn debug_substitution( + substitution: &Substitution, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_substitution(substitution, fmt))) + } + + fn debug_separator_trait_ref( + separator_trait_ref: &SeparatorTraitRef<'_, ChalkIr>, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| { + Some(prog?.debug_separator_trait_ref(separator_trait_ref, fmt)) + }) + } + + fn debug_quantified_where_clauses( + clauses: &QuantifiedWhereClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_quantified_where_clauses(clauses, fmt))) + } + + fn debug_constraints( + constraints: &Constraints, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_constraints(constraints, fmt))) + } + + fn debug_variances( + variances: &Variances, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + tls::with_current_program(|prog| Some(prog?.debug_variances(variances, fmt))) + } + + fn intern_ty(self, kind: TyKind) -> Arc> { + let flags = kind.compute_flags(self); + Arc::new(TyData { kind, flags }) + } + + fn ty_data(self, ty: &Arc>) -> &TyData { + ty + } + + fn intern_lifetime(self, lifetime: LifetimeData) -> LifetimeData { + lifetime + } + + fn lifetime_data(self, lifetime: &LifetimeData) -> &LifetimeData { + lifetime + } + + fn intern_const(self, constant: ConstData) -> Arc> { + Arc::new(constant) + } + + fn const_data(self, constant: &Arc>) -> &ConstData { + constant + } + + fn const_eq(self, _ty: &Arc>, c1: &u32, c2: &u32) -> bool { + c1 == c2 + } + + fn intern_generic_arg(self, generic_arg: GenericArgData) -> GenericArgData { + generic_arg + } + + fn generic_arg_data(self, generic_arg: &GenericArgData) -> &GenericArgData { + generic_arg + } + + fn intern_goal(self, goal: GoalData) -> Arc> { + Arc::new(goal) + } + + fn goal_data(self, goal: &Arc>) -> &GoalData { + goal + } + + fn intern_goals( + self, + data: impl IntoIterator, E>>, + ) -> Result>, E> { + data.into_iter().collect() + } + + fn goals_data(self, goals: &Vec>) -> &[Goal] { + goals + } + + fn intern_substitution( + self, + data: impl IntoIterator, E>>, + ) -> Result>, E> { + data.into_iter().collect() + } + + fn substitution_data(self, substitution: &Vec>) -> &[GenericArg] { + substitution + } + + fn intern_program_clause(self, data: ProgramClauseData) -> ProgramClauseData { + data + } + + fn program_clause_data(self, clause: &ProgramClauseData) -> &ProgramClauseData { + clause + } + + fn intern_program_clauses( + self, + data: impl IntoIterator, E>>, + ) -> Result>, E> { + data.into_iter().collect() + } + + fn program_clauses_data(self, clauses: &Vec>) -> &[ProgramClause] { + clauses + } + + fn intern_quantified_where_clauses( + self, + data: impl IntoIterator, E>>, + ) -> Result { + data.into_iter().collect() + } + + fn quantified_where_clauses_data( + self, + clauses: &Self::InternedQuantifiedWhereClauses, + ) -> &[QuantifiedWhereClause] { + clauses + } + fn intern_generic_arg_kinds( + self, + data: impl IntoIterator, E>>, + ) -> Result { + data.into_iter().collect() + } + + fn variable_kinds_data( + self, + variable_kinds: &Self::InternedVariableKinds, + ) -> &[VariableKind] { + variable_kinds + } + + fn intern_canonical_var_kinds( + self, + data: impl IntoIterator, E>>, + ) -> Result { + data.into_iter().collect() + } + + fn canonical_var_kinds_data( + self, + canonical_var_kinds: &Self::InternedCanonicalVarKinds, + ) -> &[CanonicalVarKind] { + canonical_var_kinds + } + + fn intern_constraints( + self, + data: impl IntoIterator>, E>>, + ) -> Result { + data.into_iter().collect() + } + + fn constraints_data( + self, + constraints: &Self::InternedConstraints, + ) -> &[InEnvironment>] { + constraints + } + + fn intern_variances( + self, + data: impl IntoIterator>, + ) -> Result { + data.into_iter().collect() + } + + fn variances_data(self, variances: &Self::InternedVariances) -> &[Variance] { + variances + } +} + +impl HasInterner for ChalkIr { + type Interner = ChalkIr; +} diff --git a/chalk-integration/src/lib.rs b/chalk-integration/src/lib.rs new file mode 100644 index 00000000000..f77030b9407 --- /dev/null +++ b/chalk-integration/src/lib.rs @@ -0,0 +1,121 @@ +#![recursion_limit = "1024"] +#![cfg_attr(feature = "bench", feature(test))] + +pub mod db; +pub mod error; +pub mod interner; +pub mod lowering; +pub mod program; +pub mod program_environment; +pub mod query; +pub mod test_macros; +pub mod tls; + +use chalk_engine::solve::SLGSolver; +use chalk_ir::interner::HasInterner; +use chalk_ir::Binders; +use chalk_recursive::{Cache, RecursiveSolver}; +use chalk_solve::Solver; +use interner::ChalkIr; + +pub use interner::{Identifier, RawId}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum TypeSort { + Adt, + FnDef, + Closure, + Trait, + Opaque, + Coroutine, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct Unit; + +impl HasInterner for Unit { + type Interner = ChalkIr; +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TypeKind { + pub sort: TypeSort, + pub name: Identifier, + pub binders: Binders, +} + +#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] +pub enum SolverChoice { + /// Run the SLG solver, producing a Solution. + SLG { + max_size: usize, + expected_answers: Option, + }, + /// Run the recursive solver. + Recursive { + overflow_depth: usize, + caching_enabled: bool, + max_size: usize, + }, +} + +impl SolverChoice { + /// Returns specific SLG parameters. + pub fn slg(max_size: usize, expected_answers: Option) -> Self { + SolverChoice::SLG { + max_size, + expected_answers, + } + } + + /// Returns the default SLG parameters. + pub fn slg_default() -> Self { + SolverChoice::slg(10, None) + } + + /// Returns the default recursive solver setup. + pub fn recursive_default() -> Self { + SolverChoice::Recursive { + overflow_depth: 100, + caching_enabled: true, + max_size: 30, + } + } + + /// Returns a recursive solver with specific parameters. + pub fn recursive(max_size: usize, overflow_depth: usize) -> Self { + SolverChoice::Recursive { + overflow_depth, + caching_enabled: true, + max_size, + } + } + + pub fn into_solver(self) -> Box> { + match self { + SolverChoice::SLG { + max_size, + expected_answers, + } => Box::new(SLGSolver::new(max_size, expected_answers)), + SolverChoice::Recursive { + overflow_depth, + caching_enabled, + max_size, + } => Box::new(RecursiveSolver::new( + overflow_depth, + max_size, + if caching_enabled { + Some(Cache::default()) + } else { + None + }, + )), + } + } +} + +impl Default for SolverChoice { + fn default() -> Self { + SolverChoice::slg(10, None) + } +} diff --git a/chalk-integration/src/lowering.rs b/chalk-integration/src/lowering.rs new file mode 100644 index 00000000000..4d2f0d8cefc --- /dev/null +++ b/chalk-integration/src/lowering.rs @@ -0,0 +1,1265 @@ +mod env; +mod program_lowerer; + +use chalk_ir::cast::{Cast, Caster}; +use chalk_ir::{ + self, BoundVar, ClausePriority, DebruijnIndex, ImplId, QuantifiedWhereClauses, Substitution, + TyVariableKind, +}; +use chalk_parse::ast::*; +use chalk_solve::rust_ir::{self, IntoWhereClauses}; +use program_lowerer::ProgramLowerer; +use std::collections::BTreeMap; +use string_cache::DefaultAtom as Atom; +use tracing::debug; + +use crate::error::RustIrError; +use crate::interner::{ChalkFnAbi, ChalkIr}; +use crate::program::Program as LoweredProgram; +use crate::{Identifier as Ident, TypeSort}; +use env::*; + +const SELF: &str = "Self"; +const FIXME_SELF: &str = "__FIXME_SELF__"; + +trait LowerWithEnv { + type Lowered; + + fn lower(&self, env: &Env) -> LowerResult; +} + +pub trait Lower { + type Lowered; + + fn lower(&self) -> Self::Lowered; +} + +impl Lower for Program { + type Lowered = LowerResult; + + fn lower(&self) -> Self::Lowered { + let mut lowerer = ProgramLowerer::default(); + + // Make a vector mapping each thing in `items` to an id, + // based just on its position: + let raw_ids = self + .items + .iter() + .map(|_| lowerer.next_item_id()) + .collect::>(); + + lowerer.extract_associated_types(self, &raw_ids)?; + lowerer.extract_ids(self, &raw_ids)?; + lowerer.lower(self, &raw_ids) + } +} + +trait LowerParameterMap { + fn synthetic_parameters(&self) -> Option>; + fn declared_parameters(&self) -> &[VariableKind]; + fn all_parameters(&self) -> Vec> { + self.synthetic_parameters() + .into_iter() + .chain(self.declared_parameters().iter().map(|id| id.lower())) + .collect() + + /* TODO: switch to this ordering, but adjust *all* the code to match + + self.declared_parameters() + .iter() + .map(|id| id.lower()) + .chain(self.synthetic_parameters()) // (*) see below + .collect() + */ + + // (*) It is important that the declared parameters come + // before the synthetic parameters in the ordering. This is + // because of traits, when used as types, only have the first + // N parameters in their kind (that is, they do not have Self). + // + // Note that if `Self` appears in the where-clauses etc, the + // trait is not object-safe, and hence not supposed to be used + // as an object. Actually the handling of object types is + // probably just kind of messed up right now. That's ok. + } +} + +macro_rules! lower_param_map { + ($type: ident, $synthetic: expr) => { + impl LowerParameterMap for $type { + fn synthetic_parameters(&self) -> Option> { + $synthetic + } + fn declared_parameters(&self) -> &[VariableKind] { + &self.variable_kinds + } + } + }; +} +lower_param_map!(AdtDefn, None); +lower_param_map!(FnDefn, None); +lower_param_map!(ClosureDefn, None); +lower_param_map!(Impl, None); +lower_param_map!(AssocTyDefn, None); +lower_param_map!(AssocTyValue, None); +lower_param_map!(Clause, None); +lower_param_map!( + TraitDefn, + Some(chalk_ir::WithKind::new( + chalk_ir::VariableKind::Ty(TyVariableKind::General), + Atom::from(SELF), + )) +); + +fn get_type_of_usize() -> chalk_ir::Ty { + chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::Usize)).intern(ChalkIr) +} + +impl Lower for VariableKind { + type Lowered = chalk_ir::WithKind; + fn lower(&self) -> Self::Lowered { + let (kind, n) = match self { + VariableKind::Ty(n) => ( + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General), + n, + ), + VariableKind::IntegerTy(n) => ( + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Integer), + n, + ), + VariableKind::FloatTy(n) => ( + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Float), + n, + ), + VariableKind::Lifetime(n) => (chalk_ir::VariableKind::Lifetime, n), + VariableKind::Const(ref n) => (chalk_ir::VariableKind::Const(get_type_of_usize()), n), + }; + + chalk_ir::WithKind::new(kind, n.str.clone()) + } +} + +impl LowerWithEnv for [QuantifiedWhereClause] { + type Lowered = Vec>; + + fn lower(&self, env: &Env) -> LowerResult { + self.iter() + .flat_map(|wc| match wc.lower(env) { + Ok(v) => v.into_iter().map(Ok).collect(), + Err(e) => vec![Err(e)], + }) + .collect() + } +} + +impl LowerWithEnv for WhereClause { + type Lowered = Vec>; + + /// Lower from an AST `where` clause to an internal IR. + /// Some AST `where` clauses can lower to multiple ones, this is why we return a `Vec`. + /// As for now, this is the only the case for `where T: Foo` which lowers to + /// `Implemented(T: Foo)` and `ProjectionEq(::Item = U)`. + fn lower(&self, env: &Env) -> LowerResult { + Ok(match self { + WhereClause::Implemented { trait_ref } => { + vec![chalk_ir::WhereClause::Implemented(trait_ref.lower(env)?)] + } + WhereClause::ProjectionEq { projection, ty } => vec![ + chalk_ir::WhereClause::AliasEq(chalk_ir::AliasEq { + alias: chalk_ir::AliasTy::Projection(projection.lower(env)?), + ty: ty.lower(env)?, + }), + chalk_ir::WhereClause::Implemented(projection.trait_ref.lower(env)?), + ], + WhereClause::LifetimeOutlives { a, b } => { + vec![chalk_ir::WhereClause::LifetimeOutlives( + chalk_ir::LifetimeOutlives { + a: a.lower(env)?, + b: b.lower(env)?, + }, + )] + } + WhereClause::TypeOutlives { ty, lifetime } => { + vec![chalk_ir::WhereClause::TypeOutlives( + chalk_ir::TypeOutlives { + ty: ty.lower(env)?, + lifetime: lifetime.lower(env)?, + }, + )] + } + }) + } +} + +impl LowerWithEnv for QuantifiedWhereClause { + type Lowered = Vec>; + + /// Lower from an AST `where` clause to an internal IR. + /// Some AST `where` clauses can lower to multiple ones, this is why we return a `Vec`. + /// As for now, this is the only the case for `where T: Foo` which lowers to + /// `Implemented(T: Foo)` and `ProjectionEq(::Item = U)`. + fn lower(&self, env: &Env) -> LowerResult { + let variable_kinds = self.variable_kinds.iter().map(|k| k.lower()); + let binders = env.in_binders(variable_kinds, |env| self.where_clause.lower(env))?; + Ok(binders.into_iter().collect()) + } +} + +impl LowerWithEnv for DomainGoal { + type Lowered = Vec>; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + Ok(match self { + DomainGoal::Holds { where_clause } => where_clause + .lower(env)? + .into_iter() + .casted(interner) + .collect(), + DomainGoal::Normalize { projection, ty } => { + vec![chalk_ir::DomainGoal::Normalize(chalk_ir::Normalize { + alias: chalk_ir::AliasTy::Projection(projection.lower(env)?), + ty: ty.lower(env)?, + })] + } + DomainGoal::TyWellFormed { ty } => vec![chalk_ir::DomainGoal::WellFormed( + chalk_ir::WellFormed::Ty(ty.lower(env)?), + )], + DomainGoal::TraitRefWellFormed { trait_ref } => vec![chalk_ir::DomainGoal::WellFormed( + chalk_ir::WellFormed::Trait(trait_ref.lower(env)?), + )], + DomainGoal::TyFromEnv { ty } => vec![chalk_ir::DomainGoal::FromEnv( + chalk_ir::FromEnv::Ty(ty.lower(env)?), + )], + DomainGoal::TraitRefFromEnv { trait_ref } => vec![chalk_ir::DomainGoal::FromEnv( + chalk_ir::FromEnv::Trait(trait_ref.lower(env)?), + )], + DomainGoal::IsLocal { ty } => vec![chalk_ir::DomainGoal::IsLocal(ty.lower(env)?)], + DomainGoal::IsUpstream { ty } => vec![chalk_ir::DomainGoal::IsUpstream(ty.lower(env)?)], + DomainGoal::IsFullyVisible { ty } => { + vec![chalk_ir::DomainGoal::IsFullyVisible(ty.lower(env)?)] + } + DomainGoal::LocalImplAllowed { trait_ref } => { + vec![chalk_ir::DomainGoal::LocalImplAllowed( + trait_ref.lower(env)?, + )] + } + DomainGoal::Compatible => vec![chalk_ir::DomainGoal::Compatible], + DomainGoal::DownstreamType { ty } => { + vec![chalk_ir::DomainGoal::DownstreamType(ty.lower(env)?)] + } + DomainGoal::Reveal => vec![chalk_ir::DomainGoal::Reveal], + DomainGoal::ObjectSafe { id } => { + vec![chalk_ir::DomainGoal::ObjectSafe(env.lookup_trait(id)?)] + } + }) + } +} + +impl LowerWithEnv for LeafGoal { + type Lowered = chalk_ir::Goal; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + Ok(match self { + LeafGoal::DomainGoal { goal } => { + chalk_ir::Goal::all(interner, goal.lower(env)?.into_iter().casted(interner)) + } + LeafGoal::UnifyGenericArgs { a, b } => chalk_ir::EqGoal { + a: a.lower(env)?.cast(interner), + b: b.lower(env)?.cast(interner), + } + .cast::>(interner), + LeafGoal::SubtypeGenericArgs { a, b } => chalk_ir::SubtypeGoal { + a: a.lower(env)?, + b: b.lower(env)?, + } + .cast::>(interner), + }) + } +} + +impl LowerWithEnv for (&AdtDefn, chalk_ir::AdtId) { + type Lowered = rust_ir::AdtDatum; + + fn lower(&self, env: &Env) -> LowerResult { + let (adt_defn, adt_id) = self; + + if adt_defn.flags.fundamental && adt_defn.all_parameters().is_empty() { + return Err(RustIrError::InvalidFundamentalTypesParameters( + adt_defn.name.clone(), + )); + } + + let binders = env.in_binders(adt_defn.all_parameters(), |env| { + Ok(rust_ir::AdtDatumBound { + variants: adt_defn + .variants + .iter() + .map(|v| { + let fields: LowerResult<_> = + v.fields.iter().map(|f| f.ty.lower(env)).collect(); + Ok(rust_ir::AdtVariantDatum { fields: fields? }) + }) + .collect::>()?, + where_clauses: adt_defn.where_clauses.lower(env)?, + }) + })?; + + let flags = rust_ir::AdtFlags { + upstream: adt_defn.flags.upstream, + fundamental: adt_defn.flags.fundamental, + phantom_data: adt_defn.flags.phantom_data, + }; + + Ok(rust_ir::AdtDatum { + id: *adt_id, + binders, + flags, + kind: match adt_defn.flags.kind { + AdtKind::Struct => rust_ir::AdtKind::Struct, + AdtKind::Enum => rust_ir::AdtKind::Enum, + AdtKind::Union => rust_ir::AdtKind::Union, + }, + }) + } +} + +pub fn lower_adt_size_align(flags: &AdtFlags) -> rust_ir::AdtSizeAlign { + rust_ir::AdtSizeAlign::from_one_zst(flags.one_zst) +} + +impl LowerWithEnv for AdtRepr { + type Lowered = rust_ir::AdtRepr; + + fn lower(&self, env: &Env) -> LowerResult { + Ok(rust_ir::AdtRepr { + c: self.c, + packed: self.packed, + int: self.int.as_ref().map(|i| i.lower(env)).transpose()?, + }) + } +} + +impl LowerWithEnv for (&FnDefn, chalk_ir::FnDefId) { + type Lowered = rust_ir::FnDefDatum; + + fn lower(&self, env: &Env) -> LowerResult { + let (fn_defn, fn_def_id) = self; + + let binders = env.in_binders(fn_defn.all_parameters(), |env| { + let where_clauses = fn_defn.where_clauses.lower(env)?; + + let inputs_and_output = env.in_binders(vec![], |env| { + let args: LowerResult<_> = fn_defn + .argument_types + .iter() + .map(|t| t.lower(env)) + .collect(); + let return_type = fn_defn.return_type.lower(env)?; + Ok(rust_ir::FnDefInputsAndOutputDatum { + argument_types: args?, + return_type, + }) + })?; + Ok(rust_ir::FnDefDatumBound { + inputs_and_output, + where_clauses, + }) + })?; + + Ok(rust_ir::FnDefDatum { + id: *fn_def_id, + sig: fn_defn.sig.lower()?, + binders, + }) + } +} + +impl Lower for FnSig { + type Lowered = LowerResult>; + + fn lower(&self) -> Self::Lowered { + Ok(chalk_ir::FnSig { + abi: self.abi.lower()?, + safety: self.safety.lower(), + variadic: self.variadic, + }) + } +} + +impl Lower for FnAbi { + type Lowered = LowerResult; + fn lower(&self) -> Self::Lowered { + match self.0.as_ref() { + "Rust" => Ok(ChalkFnAbi::Rust), + "C" => Ok(ChalkFnAbi::C), + _ => Err(RustIrError::InvalidExternAbi(self.0.clone())), + } + } +} + +impl LowerWithEnv for ClosureDefn { + type Lowered = ( + rust_ir::ClosureKind, + chalk_ir::Binders>, + ); + + fn lower(&self, env: &Env) -> LowerResult { + let inputs_and_output = env.in_binders(self.all_parameters(), |env| { + let args: LowerResult<_> = self.argument_types.iter().map(|t| t.lower(env)).collect(); + let return_type = self.return_type.lower(env)?; + Ok(rust_ir::FnDefInputsAndOutputDatum { + argument_types: args?, + return_type, + }) + })?; + + Ok((self.kind.lower(), inputs_and_output)) + } +} + +impl Lower for ClosureKind { + type Lowered = rust_ir::ClosureKind; + + fn lower(&self) -> Self::Lowered { + match self { + ClosureKind::Fn => rust_ir::ClosureKind::Fn, + ClosureKind::FnMut => rust_ir::ClosureKind::FnMut, + ClosureKind::FnOnce => rust_ir::ClosureKind::FnOnce, + } + } +} + +impl LowerWithEnv for TraitRef { + type Lowered = chalk_ir::TraitRef; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + let without_self = TraitBound { + trait_name: self.trait_name.clone(), + args_no_self: self.args.iter().cloned().skip(1).collect(), + } + .lower(env)?; + + let self_parameter = self.args[0].lower(env)?; + Ok(without_self.as_trait_ref(interner, self_parameter.assert_ty_ref(interner).clone())) + } +} + +impl LowerWithEnv for TraitBound { + type Lowered = rust_ir::TraitBound; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + let trait_id = env.lookup_trait(&self.trait_name)?; + + let k = env.trait_kind(trait_id); + if k.sort != TypeSort::Trait { + return Err(RustIrError::NotTrait(self.trait_name.clone())); + } + + let parameters = self + .args_no_self + .iter() + .map(|a| a.lower(env)) + .collect::>>()?; + + if parameters.len() != k.binders.len(interner) { + return Err(RustIrError::IncorrectNumberOfTypeParameters { + identifier: self.trait_name.clone(), + expected: k.binders.len(interner), + actual: parameters.len(), + }); + } + + for (binder, param) in k.binders.binders.iter(interner).zip(parameters.iter()) { + if binder.kind() != param.kind() { + return Err(RustIrError::IncorrectTraitParameterKind { + identifier: self.trait_name.clone(), + expected: binder.kind(), + actual: param.kind(), + }); + } + } + + Ok(rust_ir::TraitBound { + trait_id, + args_no_self: parameters, + }) + } +} + +impl LowerWithEnv for AliasEqBound { + type Lowered = rust_ir::AliasEqBound; + + fn lower(&self, env: &Env) -> LowerResult { + let trait_bound = self.trait_bound.lower(env)?; + let lookup = env.lookup_associated_ty(trait_bound.trait_id, &self.name)?; + let args: Vec<_> = self + .args + .iter() + .map(|a| a.lower(env)) + .collect::>()?; + + if args.len() != lookup.addl_variable_kinds.len() { + return Err(RustIrError::IncorrectNumberOfAssociatedTypeParameters { + identifier: self.name.clone(), + expected: lookup.addl_variable_kinds.len(), + actual: args.len(), + }); + } + + for (param, arg) in lookup.addl_variable_kinds.iter().zip(args.iter()) { + if param.kind() != arg.kind() { + return Err(RustIrError::IncorrectAssociatedTypeParameterKind { + identifier: self.name.clone(), + expected: param.kind(), + actual: arg.kind(), + }); + } + } + + Ok(rust_ir::AliasEqBound { + trait_bound, + associated_ty_id: lookup.id, + parameters: args, + value: self.value.lower(env)?, + }) + } +} + +impl LowerWithEnv for InlineBound { + type Lowered = rust_ir::InlineBound; + + fn lower(&self, env: &Env) -> LowerResult { + Ok(match self { + InlineBound::TraitBound(b) => rust_ir::InlineBound::TraitBound(b.lower(env)?), + InlineBound::AliasEqBound(b) => rust_ir::InlineBound::AliasEqBound(b.lower(env)?), + }) + } +} + +impl LowerWithEnv for QuantifiedInlineBound { + type Lowered = rust_ir::QuantifiedInlineBound; + + fn lower(&self, env: &Env) -> LowerResult { + let variable_kinds = self.variable_kinds.iter().map(|k| k.lower()); + env.in_binders(variable_kinds, |env| self.bound.lower(env)) + } +} + +impl LowerWithEnv for [QuantifiedInlineBound] { + type Lowered = Vec>; + + fn lower(&self, env: &Env) -> LowerResult { + fn trait_identifier(bound: &InlineBound) -> &Identifier { + match bound { + InlineBound::TraitBound(tb) => &tb.trait_name, + InlineBound::AliasEqBound(ab) => &ab.trait_bound.trait_name, + } + } + + let mut regular_traits = Vec::new(); + let mut auto_traits = Vec::new(); + + for b in self { + let id = env.lookup_trait(trait_identifier(&b.bound))?; + if env.auto_trait(id) { + auto_traits.push((b, id)) + } else { + regular_traits.push((b, id)) + } + } + + auto_traits.sort_by_key(|b| b.1); + + regular_traits + .iter() + .chain(auto_traits.iter()) + .map(|(b, _)| b.lower(env)) + .collect() + } +} + +impl Lower for Polarity { + type Lowered = rust_ir::Polarity; + + fn lower(&self) -> Self::Lowered { + match self { + Polarity::Positive => rust_ir::Polarity::Positive, + Polarity::Negative => rust_ir::Polarity::Negative, + } + } +} + +impl Lower for ImplType { + type Lowered = rust_ir::ImplType; + fn lower(&self) -> Self::Lowered { + match self { + ImplType::Local => rust_ir::ImplType::Local, + ImplType::External => rust_ir::ImplType::External, + } + } +} + +impl Lower for TraitFlags { + type Lowered = rust_ir::TraitFlags; + + fn lower(&self) -> Self::Lowered { + rust_ir::TraitFlags { + auto: self.auto, + marker: self.marker, + upstream: self.upstream, + fundamental: self.fundamental, + non_enumerable: self.non_enumerable, + coinductive: self.coinductive, + } + } +} + +impl LowerWithEnv for ProjectionTy { + type Lowered = chalk_ir::ProjectionTy; + + fn lower(&self, env: &Env) -> LowerResult { + let ProjectionTy { + ref trait_ref, + ref name, + ref args, + } = *self; + let interner = env.interner(); + let chalk_ir::TraitRef { + trait_id, + substitution: trait_substitution, + } = trait_ref.lower(env)?; + let lookup = env.lookup_associated_ty(trait_id, name)?; + + let mut all_args: Vec<_> = trait_substitution.iter(interner).cloned().collect(); + + let args: Vec<_> = args + .iter() + .map(|a| a.lower(env)) + .collect::>()?; + + if args.len() != lookup.addl_variable_kinds.len() { + return Err(RustIrError::IncorrectNumberOfAssociatedTypeParameters { + identifier: self.name.clone(), + expected: lookup.addl_variable_kinds.len(), + actual: args.len(), + }); + } + + for (param, arg) in lookup.addl_variable_kinds.iter().zip(args.iter()) { + if param.kind() != arg.kind() { + return Err(RustIrError::IncorrectAssociatedTypeParameterKind { + identifier: self.name.clone(), + expected: param.kind(), + actual: arg.kind(), + }); + } + } + + all_args.extend(args.into_iter()); + + Ok(chalk_ir::ProjectionTy { + associated_ty_id: lookup.id, + substitution: chalk_ir::Substitution::from_iter(interner, all_args), + }) + } +} + +impl LowerWithEnv for Ty { + type Lowered = chalk_ir::Ty; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + Ok(match self { + Ty::Id { name } => { + let parameter = env.lookup_generic_arg(name)?; + parameter.ty(interner).cloned().ok_or_else(|| { + RustIrError::IncorrectParameterKind { + identifier: name.clone(), + expected: Kind::Ty, + actual: parameter.kind(), + } + })? + } + Ty::Dyn { + ref bounds, + ref lifetime, + } => chalk_ir::TyKind::Dyn(chalk_ir::DynTy { + bounds: env.in_binders( + // FIXME: Figure out a proper name for this type parameter + Some(chalk_ir::WithKind::new( + chalk_ir::VariableKind::Ty(TyVariableKind::General), + Atom::from(FIXME_SELF), + )), + |env| { + Ok(QuantifiedWhereClauses::from_iter( + interner, + bounds.lower(env)?.iter().flat_map(|qil| { + qil.into_where_clauses( + interner, + chalk_ir::TyKind::BoundVar(BoundVar::new( + DebruijnIndex::INNERMOST, + 0, + )) + .intern(interner), + ) + }), + )) + }, + )?, + lifetime: lifetime.lower(env)?, + }) + .intern(interner), + + Ty::Apply { name, ref args } => { + macro_rules! tykind { + ($k:expr, $tykind:ident, $id:expr) => {{ + if $k.binders.len(interner) != args.len() { + return Err(RustIrError::IncorrectNumberOfTypeParameters { + identifier: name.clone(), + expected: $k.binders.len(interner), + actual: args.len(), + }); + } + + let substitution = chalk_ir::Substitution::from_fallible( + interner, + args.iter().map(|t| t.lower(env)), + )?; + + for (param, arg) in $k + .binders + .binders + .iter(interner) + .zip(substitution.iter(interner)) + { + if param.kind() != arg.kind() { + return Err(RustIrError::IncorrectParameterKind { + identifier: name.clone(), + expected: param.kind(), + actual: arg.kind(), + }); + } + } + chalk_ir::TyKind::$tykind($id, substitution).intern(interner) + }}; + } + match env.lookup_type(name)? { + TypeLookup::Parameter(_) => { + return Err(RustIrError::CannotApplyTypeParameter(name.clone())) + } + TypeLookup::Adt(id) => tykind!(env.adt_kind(id), Adt, id), + TypeLookup::FnDef(id) => tykind!(env.fn_def_kind(id), FnDef, id), + TypeLookup::Closure(id) => tykind!(env.closure_kind(id), Closure, id), + TypeLookup::Opaque(id) => tykind!(env.opaque_kind(id), OpaqueType, id), + TypeLookup::Coroutine(id) => tykind!(env.coroutine_kind(id), Coroutine, id), + TypeLookup::Foreign(_) | TypeLookup::Trait(_) => { + panic!("Unexpected apply type") + } + } + } + + Ty::Projection { ref proj } => { + chalk_ir::TyKind::Alias(chalk_ir::AliasTy::Projection(proj.lower(env)?)) + .intern(interner) + } + + Ty::ForAll { + lifetime_names, + types, + sig, + } => { + let quantified_env = env.introduce(lifetime_names.iter().map(|id| { + chalk_ir::WithKind::new(chalk_ir::VariableKind::Lifetime, id.str.clone()) + }))?; + + let mut lowered_tys = Vec::with_capacity(types.len()); + for ty in types { + lowered_tys.push(ty.lower(&quantified_env)?.cast(interner)); + } + + let function = chalk_ir::FnPointer { + num_binders: lifetime_names.len(), + substitution: chalk_ir::FnSubst(Substitution::from_iter(interner, lowered_tys)), + sig: sig.lower()?, + }; + chalk_ir::TyKind::Function(function).intern(interner) + } + Ty::Tuple { ref types } => chalk_ir::TyKind::Tuple( + types.len(), + chalk_ir::Substitution::from_fallible( + interner, + types.iter().map(|t| t.lower(env)), + )?, + ) + .intern(interner), + + Ty::Scalar { ty } => chalk_ir::TyKind::Scalar(ty.lower()).intern(interner), + + Ty::Array { ty, len } => { + chalk_ir::TyKind::Array(ty.lower(env)?, len.lower(env)?).intern(interner) + } + + Ty::Slice { ty } => chalk_ir::TyKind::Slice(ty.lower(env)?).intern(interner), + + Ty::Raw { mutability, ty } => { + chalk_ir::TyKind::Raw(mutability.lower(), ty.lower(env)?).intern(interner) + } + + Ty::Ref { + mutability, + lifetime, + ty, + } => chalk_ir::TyKind::Ref(mutability.lower(), lifetime.lower(env)?, ty.lower(env)?) + .intern(interner), + + Ty::Str => chalk_ir::TyKind::Str.intern(interner), + + Ty::Never => chalk_ir::TyKind::Never.intern(interner), + }) + } +} + +impl LowerWithEnv for Const { + type Lowered = chalk_ir::Const; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + match self { + Const::Id(name) => { + let parameter = env.lookup_generic_arg(name)?; + parameter + .constant(interner) + .ok_or_else(|| RustIrError::IncorrectParameterKind { + identifier: name.clone(), + expected: Kind::Const, + actual: parameter.kind(), + }) + .map(|c| c.clone()) + } + Const::Value(value) => Ok(chalk_ir::ConstData { + ty: get_type_of_usize(), + value: chalk_ir::ConstValue::Concrete(chalk_ir::ConcreteConst { interned: *value }), + } + .intern(interner)), + } + } +} + +impl LowerWithEnv for GenericArg { + type Lowered = chalk_ir::GenericArg; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + match self { + GenericArg::Ty(ref t) => Ok(t.lower(env)?.cast(interner)), + GenericArg::Lifetime(ref l) => Ok(l.lower(env)?.cast(interner)), + GenericArg::Id(name) => env.lookup_generic_arg(name), + GenericArg::Const(c) => Ok(c.lower(env)?.cast(interner)), + } + } +} + +impl LowerWithEnv for Lifetime { + type Lowered = chalk_ir::Lifetime; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + match self { + Lifetime::Id { name } => { + let parameter = env.lookup_generic_arg(name)?; + parameter.lifetime(interner).copied().ok_or_else(|| { + RustIrError::IncorrectParameterKind { + identifier: name.clone(), + expected: Kind::Lifetime, + actual: parameter.kind(), + } + }) + } + Lifetime::Static => Ok(chalk_ir::Lifetime::new( + interner, + chalk_ir::LifetimeData::Static, + )), + Lifetime::Erased => Ok(chalk_ir::Lifetime::new( + interner, + chalk_ir::LifetimeData::Erased, + )), + } + } +} + +impl LowerWithEnv for (&Impl, ImplId, &AssociatedTyValueIds) { + type Lowered = rust_ir::ImplDatum; + + fn lower(&self, env: &Env) -> LowerResult { + let (impl_, impl_id, associated_ty_value_ids) = self; + + let polarity = impl_.polarity.lower(); + let binders = env.in_binders(impl_.all_parameters(), |env| { + let trait_ref = impl_.trait_ref.lower(env)?; + debug!(?trait_ref); + + if !polarity.is_positive() && !impl_.assoc_ty_values.is_empty() { + return Err(RustIrError::NegativeImplAssociatedValues( + impl_.trait_ref.trait_name.clone(), + )); + } + + let where_clauses = impl_.where_clauses.lower(env)?; + debug!(where_clauses = ?trait_ref); + Ok(rust_ir::ImplDatumBound { + trait_ref, + where_clauses, + }) + })?; + + // lookup the ids for each of the "associated type values" + // within the impl, which should have already assigned and + // stored in the map + let associated_ty_value_ids = impl_ + .assoc_ty_values + .iter() + .map(|atv| associated_ty_value_ids[&(*impl_id, atv.name.str.clone())]) + .collect(); + + debug!(?associated_ty_value_ids); + + Ok(rust_ir::ImplDatum { + polarity, + binders, + impl_type: impl_.impl_type.lower(), + associated_ty_value_ids, + }) + } +} + +impl LowerWithEnv for Clause { + type Lowered = Vec>; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + let implications = env.in_binders(self.all_parameters(), |env| { + let consequences: Vec> = self.consequence.lower(env)?; + + let conditions = chalk_ir::Goals::from_fallible( + interner, + // Subtle: in the SLG solver, we pop conditions from R to + // L. To preserve the expected order (L to R), we must + // therefore reverse. + self.conditions.iter().map(|g| g.lower(env)).rev(), + )?; + + let implications = consequences + .into_iter() + .map(|consequence| chalk_ir::ProgramClauseImplication { + consequence, + conditions: conditions.clone(), + constraints: chalk_ir::Constraints::empty(interner), + priority: ClausePriority::High, + }) + .collect::>(); + Ok(implications) + })?; + + let clauses = implications + .into_iter() + .map( + |implication: chalk_ir::Binders>| { + chalk_ir::ProgramClauseData(implication).intern(interner) + }, + ) + .collect(); + Ok(clauses) + } +} + +impl LowerWithEnv for (&TraitDefn, chalk_ir::TraitId) { + type Lowered = rust_ir::TraitDatum; + + fn lower(&self, env: &Env) -> LowerResult { + let (trait_defn, trait_id) = self; + + let all_parameters = trait_defn.all_parameters(); + let all_parameters_len = all_parameters.len(); + let binders = env.in_binders(all_parameters, |env| { + if trait_defn.flags.auto { + if all_parameters_len > 1 { + return Err(RustIrError::AutoTraitParameters(trait_defn.name.clone())); + } + if !trait_defn.where_clauses.is_empty() { + return Err(RustIrError::AutoTraitWhereClauses(trait_defn.name.clone())); + } + } + + Ok(rust_ir::TraitDatumBound { + where_clauses: trait_defn.where_clauses.lower(env)?, + }) + })?; + + let associated_ty_ids: Vec<_> = trait_defn + .assoc_ty_defns + .iter() + .map(|defn| env.lookup_associated_ty(*trait_id, &defn.name).unwrap().id) + .collect(); + + let trait_datum = rust_ir::TraitDatum { + id: *trait_id, + binders, + flags: trait_defn.flags.lower(), + associated_ty_ids, + well_known: trait_defn.well_known.map(|def| def.lower()), + }; + + debug!(?trait_datum); + + Ok(trait_datum) + } +} + +pub fn lower_goal(goal: &Goal, program: &LoweredProgram) -> LowerResult> { + let interner = ChalkIr; + let associated_ty_lookups: BTreeMap<_, _> = program + .associated_ty_data + .iter() + .map(|(&associated_ty_id, datum)| { + let trait_datum = &program.trait_data[&datum.trait_id]; + let num_trait_params = trait_datum.binders.len(interner); + let addl_variable_kinds = + datum.binders.binders.as_slice(interner)[num_trait_params..].to_owned(); + let lookup = AssociatedTyLookup { + id: associated_ty_id, + addl_variable_kinds, + }; + ((datum.trait_id, datum.name.clone()), lookup) + }) + .collect(); + + let auto_traits = program + .trait_data + .iter() + .map(|(&trait_id, datum)| (trait_id, datum.flags.auto)) + .collect(); + + let env = Env { + adt_ids: &program.adt_ids, + fn_def_ids: &program.fn_def_ids, + closure_ids: &program.closure_ids, + trait_ids: &program.trait_ids, + opaque_ty_ids: &program.opaque_ty_ids, + coroutine_ids: &program.coroutine_ids, + coroutine_kinds: &program.coroutine_kinds, + adt_kinds: &program.adt_kinds, + fn_def_kinds: &program.fn_def_kinds, + closure_kinds: &program.closure_kinds, + trait_kinds: &program.trait_kinds, + opaque_ty_kinds: &program.opaque_ty_kinds, + associated_ty_lookups: &associated_ty_lookups, + foreign_ty_ids: &program.foreign_ty_ids, + parameter_map: BTreeMap::new(), + auto_traits: &auto_traits, + }; + + goal.lower(&env) +} + +impl LowerWithEnv for Goal { + type Lowered = chalk_ir::Goal; + + fn lower(&self, env: &Env) -> LowerResult { + let interner = env.interner(); + match self { + Goal::ForAll(ids, g) => (&**g, chalk_ir::QuantifierKind::ForAll, ids).lower(env), + Goal::Exists(ids, g) => (&**g, chalk_ir::QuantifierKind::Exists, ids).lower(env), + Goal::Implies(hyp, g) => { + // We "elaborate" implied bounds by lowering goals like `T: Trait` and + // `T: Trait` to `FromEnv(T: Trait)` and `FromEnv(T: Trait)` + // in the assumptions of an `if` goal, e.g. `if (T: Trait) { ... }` lowers to + // `if (FromEnv(T: Trait)) { ... /* this part is untouched */ ... }`. + let where_clauses = hyp + .iter() + .flat_map(|clause| match clause.lower(env) { + Ok(v) => v.into_iter().map(Ok).collect(), + Err(e) => vec![Err(e)], + }) + .map(|result| result.map(|h| h.into_from_env_clause(interner))); + let where_clauses = + chalk_ir::ProgramClauses::from_fallible(interner, where_clauses); + Ok(chalk_ir::GoalData::Implies(where_clauses?, g.lower(env)?).intern(interner)) + } + Goal::And(g1, g2s) => { + let goals = chalk_ir::Goals::from_fallible( + interner, + Some(g1).into_iter().chain(g2s).map(|g| g.lower(env)), + )?; + Ok(chalk_ir::GoalData::All(goals).intern(interner)) + } + Goal::Not(g) => Ok(chalk_ir::GoalData::Not(g.lower(env)?).intern(interner)), + Goal::Compatible(g) => Ok(g.lower(env)?.compatible(interner)), + Goal::Leaf(leaf) => { + // A where clause can lower to multiple leaf goals; wrap these in Goal::And. + Ok(leaf.lower(env)?) + } + } + } +} + +impl LowerWithEnv for (&Goal, chalk_ir::QuantifierKind, &Vec) { + type Lowered = chalk_ir::Goal; + + fn lower(&self, env: &Env) -> LowerResult { + let (goal, quantifier_kind, variable_kinds) = self; + + let interner = env.interner(); + if variable_kinds.is_empty() { + return goal.lower(env); + } + + let variable_kinds = variable_kinds.iter().map(|k| k.lower()); + let subgoal = env.in_binders(variable_kinds, |env| goal.lower(env))?; + Ok(chalk_ir::GoalData::Quantified(*quantifier_kind, subgoal).intern(interner)) + } +} + +impl Lower for WellKnownTrait { + type Lowered = rust_ir::WellKnownTrait; + + fn lower(&self) -> Self::Lowered { + match self { + WellKnownTrait::Sized => rust_ir::WellKnownTrait::Sized, + WellKnownTrait::Copy => rust_ir::WellKnownTrait::Copy, + WellKnownTrait::Clone => rust_ir::WellKnownTrait::Clone, + WellKnownTrait::Drop => rust_ir::WellKnownTrait::Drop, + WellKnownTrait::FnOnce => rust_ir::WellKnownTrait::FnOnce, + WellKnownTrait::FnMut => rust_ir::WellKnownTrait::FnMut, + WellKnownTrait::Fn => rust_ir::WellKnownTrait::Fn, + WellKnownTrait::AsyncFnOnce => rust_ir::WellKnownTrait::AsyncFnOnce, + WellKnownTrait::AsyncFnMut => rust_ir::WellKnownTrait::AsyncFnMut, + WellKnownTrait::AsyncFn => rust_ir::WellKnownTrait::AsyncFn, + WellKnownTrait::Unsize => rust_ir::WellKnownTrait::Unsize, + WellKnownTrait::Unpin => rust_ir::WellKnownTrait::Unpin, + WellKnownTrait::CoerceUnsized => rust_ir::WellKnownTrait::CoerceUnsized, + WellKnownTrait::DiscriminantKind => rust_ir::WellKnownTrait::DiscriminantKind, + WellKnownTrait::Coroutine => rust_ir::WellKnownTrait::Coroutine, + WellKnownTrait::DispatchFromDyn => rust_ir::WellKnownTrait::DispatchFromDyn, + WellKnownTrait::Tuple => rust_ir::WellKnownTrait::Tuple, + WellKnownTrait::Pointee => rust_ir::WellKnownTrait::Pointee, + WellKnownTrait::FnPtr => rust_ir::WellKnownTrait::FnPtr, + WellKnownTrait::Future => rust_ir::WellKnownTrait::Future, + } + } +} + +trait Kinded { + fn kind(&self) -> Kind; +} + +impl Kinded for chalk_ir::VariableKind { + fn kind(&self) -> Kind { + match self { + chalk_ir::VariableKind::Ty(_) => Kind::Ty, + chalk_ir::VariableKind::Lifetime => Kind::Lifetime, + chalk_ir::VariableKind::Const(_) => Kind::Const, + } + } +} + +impl Kinded for chalk_ir::GenericArg { + fn kind(&self) -> Kind { + let interner = ChalkIr; + match self.data(interner) { + chalk_ir::GenericArgData::Ty(_) => Kind::Ty, + chalk_ir::GenericArgData::Lifetime(_) => Kind::Lifetime, + chalk_ir::GenericArgData::Const(_) => Kind::Const, + } + } +} + +impl Lower for IntTy { + type Lowered = chalk_ir::IntTy; + + fn lower(&self) -> Self::Lowered { + match self { + IntTy::I8 => chalk_ir::IntTy::I8, + IntTy::I16 => chalk_ir::IntTy::I16, + IntTy::I32 => chalk_ir::IntTy::I32, + IntTy::I64 => chalk_ir::IntTy::I64, + IntTy::I128 => chalk_ir::IntTy::I128, + IntTy::Isize => chalk_ir::IntTy::Isize, + } + } +} + +impl Lower for UintTy { + type Lowered = chalk_ir::UintTy; + + fn lower(&self) -> Self::Lowered { + match self { + UintTy::U8 => chalk_ir::UintTy::U8, + UintTy::U16 => chalk_ir::UintTy::U16, + UintTy::U32 => chalk_ir::UintTy::U32, + UintTy::U64 => chalk_ir::UintTy::U64, + UintTy::U128 => chalk_ir::UintTy::U128, + UintTy::Usize => chalk_ir::UintTy::Usize, + } + } +} + +impl Lower for FloatTy { + type Lowered = chalk_ir::FloatTy; + + fn lower(&self) -> Self::Lowered { + match self { + FloatTy::F16 => chalk_ir::FloatTy::F16, + FloatTy::F32 => chalk_ir::FloatTy::F32, + FloatTy::F64 => chalk_ir::FloatTy::F64, + FloatTy::F128 => chalk_ir::FloatTy::F128, + } + } +} + +impl Lower for ScalarType { + type Lowered = chalk_ir::Scalar; + + fn lower(&self) -> Self::Lowered { + match self { + ScalarType::Int(int) => chalk_ir::Scalar::Int(int.lower()), + ScalarType::Uint(uint) => chalk_ir::Scalar::Uint(uint.lower()), + ScalarType::Float(float) => chalk_ir::Scalar::Float(float.lower()), + ScalarType::Bool => chalk_ir::Scalar::Bool, + ScalarType::Char => chalk_ir::Scalar::Char, + } + } +} + +impl Lower for Mutability { + type Lowered = chalk_ir::Mutability; + fn lower(&self) -> Self::Lowered { + match self { + Mutability::Mut => chalk_ir::Mutability::Mut, + Mutability::Not => chalk_ir::Mutability::Not, + } + } +} + +impl Lower for Safety { + type Lowered = chalk_ir::Safety; + fn lower(&self) -> Self::Lowered { + match self { + Safety::Safe => chalk_ir::Safety::Safe, + Safety::Unsafe => chalk_ir::Safety::Unsafe, + } + } +} + +impl Lower for Movability { + type Lowered = rust_ir::Movability; + fn lower(&self) -> Self::Lowered { + match self { + Movability::Static => rust_ir::Movability::Static, + Movability::Movable => rust_ir::Movability::Movable, + } + } +} diff --git a/chalk-integration/src/lowering/env.rs b/chalk-integration/src/lowering/env.rs new file mode 100644 index 00000000000..f399830bbc4 --- /dev/null +++ b/chalk-integration/src/lowering/env.rs @@ -0,0 +1,278 @@ +use chalk_ir::interner::HasInterner; +use chalk_ir::{ + self, AdtId, BoundVar, ClosureId, CoroutineId, DebruijnIndex, FnDefId, OpaqueTyId, TraitId, + VariableKinds, +}; +use chalk_ir::{cast::Cast, ForeignDefId, WithKind}; +use chalk_parse::ast::*; +use chalk_solve::rust_ir::AssociatedTyValueId; +use std::collections::BTreeMap; + +use crate::error::RustIrError; +use crate::interner::ChalkIr; +use crate::{Identifier as Ident, TypeKind}; + +pub type AdtIds = BTreeMap>; +pub type FnDefIds = BTreeMap>; +pub type ClosureIds = BTreeMap>; +pub type TraitIds = BTreeMap>; +pub type CoroutineIds = BTreeMap>; +pub type OpaqueTyIds = BTreeMap>; +pub type AdtKinds = BTreeMap, TypeKind>; +pub type FnDefKinds = BTreeMap, TypeKind>; +pub type ClosureKinds = BTreeMap, TypeKind>; +pub type TraitKinds = BTreeMap, TypeKind>; +pub type AutoTraits = BTreeMap, bool>; +pub type OpaqueTyVariableKinds = BTreeMap, TypeKind>; +pub type CoroutineKinds = BTreeMap, TypeKind>; +pub type AssociatedTyLookups = BTreeMap<(chalk_ir::TraitId, Ident), AssociatedTyLookup>; +pub type AssociatedTyValueIds = + BTreeMap<(chalk_ir::ImplId, Ident), AssociatedTyValueId>; +pub type ForeignIds = BTreeMap>; + +pub type ParameterMap = BTreeMap>; + +pub type LowerResult = Result; + +#[derive(Clone, Debug)] +pub struct Env<'k> { + pub adt_ids: &'k AdtIds, + pub adt_kinds: &'k AdtKinds, + pub fn_def_ids: &'k FnDefIds, + pub fn_def_kinds: &'k FnDefKinds, + pub closure_ids: &'k ClosureIds, + pub closure_kinds: &'k ClosureKinds, + pub trait_ids: &'k TraitIds, + pub trait_kinds: &'k TraitKinds, + pub opaque_ty_ids: &'k OpaqueTyIds, + pub opaque_ty_kinds: &'k OpaqueTyVariableKinds, + pub associated_ty_lookups: &'k AssociatedTyLookups, + pub auto_traits: &'k AutoTraits, + pub foreign_ty_ids: &'k ForeignIds, + pub coroutine_ids: &'k CoroutineIds, + pub coroutine_kinds: &'k CoroutineKinds, + /// GenericArg identifiers are used as keys, therefore + /// all identifiers in an environment must be unique (no shadowing). + pub parameter_map: ParameterMap, +} + +/// Information about an associated type **declaration** (i.e., an +/// `AssociatedTyDatum`). This information is gathered in the first +/// phase of creating the Rust IR and is then later used to lookup the +/// "id" of an associated type. +/// +/// ```ignore +/// trait Foo { +/// type Bar<'a>; // <-- associated type declaration +/// // ---- +/// // | +/// // addl_variable_kinds +/// } +/// ``` +#[derive(Debug, PartialEq, Eq)] +pub struct AssociatedTyLookup { + pub id: chalk_ir::AssocTypeId, + pub addl_variable_kinds: Vec>, +} + +pub enum TypeLookup<'k> { + Parameter(&'k WithKind), + Adt(AdtId), + FnDef(FnDefId), + Closure(ClosureId), + Opaque(OpaqueTyId), + Foreign(ForeignDefId), + Trait(TraitId), + Coroutine(CoroutineId), +} + +impl Env<'_> { + pub fn interner(&self) -> ChalkIr { + ChalkIr + } + + pub fn lookup_generic_arg( + &self, + name: &Identifier, + ) -> LowerResult> { + let interner = self.interner(); + + macro_rules! tykind { + ($k:expr, $tykind:ident, $id:expr) => { + if $k.binders.len(interner) > 0 { + Err(RustIrError::IncorrectNumberOfTypeParameters { + identifier: name.clone(), + expected: $k.binders.len(interner), + actual: 0, + }) + } else { + Ok( + chalk_ir::TyKind::$tykind($id, chalk_ir::Substitution::empty(interner)) + .intern(interner), + ) + .cast(interner) + } + }; + } + + match self.lookup_type(name) { + Ok(TypeLookup::Parameter(p)) => { + let b = p.skip_kind(); + Ok(match &p.kind { + chalk_ir::VariableKind::Ty(_) => chalk_ir::TyKind::BoundVar(*b) + .intern(interner) + .cast(interner), + chalk_ir::VariableKind::Lifetime => chalk_ir::LifetimeData::BoundVar(*b) + .intern(interner) + .cast(interner), + chalk_ir::VariableKind::Const(ty) => { + b.to_const(interner, ty.clone()).cast(interner) + } + }) + } + Ok(TypeLookup::Adt(id)) => tykind!(self.adt_kind(id), Adt, id), + Ok(TypeLookup::FnDef(id)) => tykind!(self.fn_def_kind(id), FnDef, id), + Ok(TypeLookup::Closure(id)) => tykind!(self.closure_kind(id), Closure, id), + Ok(TypeLookup::Coroutine(id)) => tykind!(self.coroutine_kind(id), Coroutine, id), + Ok(TypeLookup::Opaque(id)) => Ok(chalk_ir::TyKind::Alias(chalk_ir::AliasTy::Opaque( + chalk_ir::OpaqueTy { + opaque_ty_id: id, + substitution: chalk_ir::Substitution::empty(interner), + }, + )) + .intern(interner) + .cast(interner)), + Ok(TypeLookup::Foreign(id)) => Ok(chalk_ir::TyKind::Foreign(id) + .intern(interner) + .cast(interner)), + Ok(TypeLookup::Trait(_)) => Err(RustIrError::NotStruct(name.clone())), + Err(_) => Err(RustIrError::InvalidParameterName(name.clone())), + } + } + + pub fn lookup_type(&self, name: &Identifier) -> LowerResult { + if let Some(id) = self.parameter_map.get(&name.str) { + Ok(TypeLookup::Parameter(id)) + } else if let Some(id) = self.adt_ids.get(&name.str) { + Ok(TypeLookup::Adt(*id)) + } else if let Some(id) = self.fn_def_ids.get(&name.str) { + Ok(TypeLookup::FnDef(*id)) + } else if let Some(id) = self.closure_ids.get(&name.str) { + Ok(TypeLookup::Closure(*id)) + } else if let Some(id) = self.opaque_ty_ids.get(&name.str) { + Ok(TypeLookup::Opaque(*id)) + } else if let Some(id) = self.foreign_ty_ids.get(&name.str) { + Ok(TypeLookup::Foreign(*id)) + } else if let Some(id) = self.trait_ids.get(&name.str) { + Ok(TypeLookup::Trait(*id)) + } else if let Some(id) = self.coroutine_ids.get(&name.str) { + Ok(TypeLookup::Coroutine(*id)) + } else { + Err(RustIrError::NotStruct(name.clone())) + } + } + + pub fn auto_trait(&self, id: chalk_ir::TraitId) -> bool { + self.auto_traits[&id] + } + + pub fn lookup_trait(&self, name: &Identifier) -> LowerResult> { + if let Some(&id) = self.trait_ids.get(&name.str) { + Ok(id) + } else if self.parameter_map.get(&name.str).is_some() + || self.adt_ids.get(&name.str).is_some() + { + Err(RustIrError::NotTrait(name.clone())) + } else { + Err(RustIrError::InvalidTraitName(name.clone())) + } + } + + pub fn trait_kind(&self, id: chalk_ir::TraitId) -> &TypeKind { + &self.trait_kinds[&id] + } + + pub fn adt_kind(&self, id: chalk_ir::AdtId) -> &TypeKind { + &self.adt_kinds[&id] + } + + pub fn fn_def_kind(&self, id: chalk_ir::FnDefId) -> &TypeKind { + &self.fn_def_kinds[&id] + } + + pub fn closure_kind(&self, id: chalk_ir::ClosureId) -> &TypeKind { + &self.closure_kinds[&id] + } + + pub fn opaque_kind(&self, id: chalk_ir::OpaqueTyId) -> &TypeKind { + &self.opaque_ty_kinds[&id] + } + + pub fn coroutine_kind(&self, id: chalk_ir::CoroutineId) -> &TypeKind { + &self.coroutine_kinds[&id] + } + + pub fn lookup_associated_ty( + &self, + trait_id: TraitId, + ident: &Identifier, + ) -> LowerResult<&AssociatedTyLookup> { + self.associated_ty_lookups + .get(&(trait_id, ident.str.clone())) + .ok_or_else(|| RustIrError::MissingAssociatedType(ident.clone())) + } + + /// Introduces new parameters, shifting the indices of existing + /// parameters to accommodate them. The indices of the new binders + /// will be assigned in order as they are iterated. + pub fn introduce(&self, binders: I) -> LowerResult + where + I: IntoIterator>, + I::IntoIter: ExactSizeIterator, + { + // As binders to introduce we receive `ParameterKind`, + // which we need to transform into `(Ident, ParameterKind)`, + // because that is the key-value pair for ParameterMap. + // `swap_inner` lets us do precisely that, replacing `Ident` inside + // `ParameterKind` with a `BoundVar` and returning both. + let binders = binders.into_iter().enumerate().map(|(i, k)| { + let (kind, name) = k.into(); + ( + name, + chalk_ir::WithKind::new(kind, BoundVar::new(DebruijnIndex::INNERMOST, i)), + ) + }); + let len = binders.len(); + + // For things already in the parameter map, we take each existing key-value pair + // `(Ident, ParameterKind)` and shift in the inner `BoundVar`. + let parameter_map: ParameterMap = self + .parameter_map + .iter() + .map(|(k, v)| (k.clone(), v.map_ref(|b| b.shifted_in()))) + .chain(binders) + .collect(); + if parameter_map.len() != self.parameter_map.len() + len { + return Err(RustIrError::DuplicateOrShadowedParameters); + } + Ok(Env { + parameter_map, + ..*self + }) + } + + pub fn in_binders(&self, binders: I, op: OP) -> LowerResult> + where + I: IntoIterator>, + I::IntoIter: ExactSizeIterator, + T: HasInterner, + OP: FnOnce(&Self) -> LowerResult, + { + let binders: Vec<_> = binders.into_iter().collect(); + let env = self.introduce(binders.iter().cloned())?; + Ok(chalk_ir::Binders::new( + VariableKinds::from_iter(self.interner(), binders.iter().map(|v| v.kind.clone())), + op(&env)?, + )) + } +} diff --git a/chalk-integration/src/lowering/program_lowerer.rs b/chalk-integration/src/lowering/program_lowerer.rs new file mode 100644 index 00000000000..2659301b878 --- /dev/null +++ b/chalk-integration/src/lowering/program_lowerer.rs @@ -0,0 +1,561 @@ +use chalk_ir::cast::Cast; +use chalk_ir::{ + self, AdtId, AssocTypeId, BoundVar, ClosureId, CoroutineId, DebruijnIndex, FnDefId, + ForeignDefId, ImplId, OpaqueTyId, TraitId, TyVariableKind, VariableKinds, +}; +use chalk_parse::ast::*; +use chalk_solve::rust_ir::{ + self, Anonymize, AssociatedTyValueId, CoroutineDatum, CoroutineInputOutputDatum, + CoroutineWitnessDatum, CoroutineWitnessExistential, OpaqueTyDatum, OpaqueTyDatumBound, +}; +use rust_ir::IntoWhereClauses; +use std::collections::{BTreeMap, HashSet}; +use std::sync::Arc; +use string_cache::DefaultAtom as Atom; + +use super::{env::*, lower_adt_size_align, Lower, LowerParameterMap, LowerWithEnv, FIXME_SELF}; +use crate::error::RustIrError; +use crate::program::Program as LoweredProgram; +use crate::RawId; +use crate::{interner::ChalkIr, TypeKind, TypeSort}; + +#[derive(Default)] +pub(super) struct ProgramLowerer { + next_item_index: u32, + + associated_ty_lookups: AssociatedTyLookups, + associated_ty_value_ids: AssociatedTyValueIds, + adt_ids: AdtIds, + fn_def_ids: FnDefIds, + closure_ids: ClosureIds, + trait_ids: TraitIds, + auto_traits: AutoTraits, + opaque_ty_ids: OpaqueTyIds, + adt_kinds: AdtKinds, + fn_def_kinds: FnDefKinds, + coroutine_ids: CoroutineIds, + coroutine_kinds: CoroutineKinds, + closure_kinds: ClosureKinds, + trait_kinds: TraitKinds, + opaque_ty_kinds: OpaqueTyVariableKinds, + object_safe_traits: HashSet>, + foreign_ty_ids: ForeignIds, +} + +impl ProgramLowerer { + pub fn next_item_id(&mut self) -> RawId { + let index = self.next_item_index; + self.next_item_index += 1; + RawId { index } + } + + /// Create ids for associated type declarations and values + pub fn extract_associated_types( + &mut self, + program: &Program, + raw_ids: &[RawId], + ) -> LowerResult<()> { + for (item, &raw_id) in program.items.iter().zip(raw_ids) { + match item { + Item::TraitDefn(d) => { + if d.flags.auto && !d.assoc_ty_defns.is_empty() { + return Err(RustIrError::AutoTraitAssociatedTypes(d.name.clone())); + } + for defn in &d.assoc_ty_defns { + let addl_variable_kinds = defn.all_parameters(); + let lookup = AssociatedTyLookup { + id: AssocTypeId(self.next_item_id()), + addl_variable_kinds: addl_variable_kinds.anonymize(), + }; + self.associated_ty_lookups + .insert((TraitId(raw_id), defn.name.str.clone()), lookup); + } + } + + Item::Impl(d) => { + for atv in &d.assoc_ty_values { + let atv_id = AssociatedTyValueId(self.next_item_id()); + self.associated_ty_value_ids + .insert((ImplId(raw_id), atv.name.str.clone()), atv_id); + } + } + + _ => {} + } + } + Ok(()) + } + + pub fn extract_ids(&mut self, program: &Program, raw_ids: &[RawId]) -> LowerResult<()> { + for (item, &raw_id) in program.items.iter().zip(raw_ids) { + match item { + Item::AdtDefn(defn) => { + let type_kind = defn.lower_type_kind()?; + let id = AdtId(raw_id); + self.adt_ids.insert(type_kind.name.clone(), id); + self.adt_kinds.insert(id, type_kind); + } + Item::FnDefn(defn) => { + let type_kind = defn.lower_type_kind()?; + let id = FnDefId(raw_id); + self.fn_def_ids.insert(type_kind.name.clone(), id); + self.fn_def_kinds.insert(id, type_kind); + } + Item::ClosureDefn(defn) => { + let type_kind = defn.lower_type_kind()?; + let id = ClosureId(raw_id); + self.closure_ids.insert(defn.name.str.clone(), id); + self.closure_kinds.insert(id, type_kind); + } + Item::TraitDefn(defn) => { + let type_kind = defn.lower_type_kind()?; + let id = TraitId(raw_id); + self.trait_ids.insert(type_kind.name.clone(), id); + self.trait_kinds.insert(id, type_kind); + self.auto_traits.insert(id, defn.flags.auto); + + if defn.flags.object_safe { + self.object_safe_traits.insert(id); + } + } + Item::OpaqueTyDefn(defn) => { + let type_kind = defn.lower_type_kind()?; + let id = OpaqueTyId(raw_id); + self.opaque_ty_ids.insert(defn.name.str.clone(), id); + self.opaque_ty_kinds.insert(id, type_kind); + } + Item::Foreign(ForeignDefn(ref ident)) => { + self.foreign_ty_ids + .insert(ident.str.clone(), ForeignDefId(raw_id)); + } + Item::CoroutineDefn(defn) => { + let id = CoroutineId(raw_id); + self.coroutine_ids.insert(defn.name.str.clone(), id); + self.coroutine_kinds.insert(id, defn.lower_type_kind()?); + } + Item::Impl(_) => continue, + Item::Clause(_) => continue, + }; + } + Ok(()) + } + + pub fn lower(self, program: &Program, raw_ids: &[RawId]) -> LowerResult { + let mut adt_data = BTreeMap::new(); + let mut adt_reprs = BTreeMap::new(); + let mut adt_size_aligns = BTreeMap::new(); + let mut adt_variances = BTreeMap::new(); + let mut fn_def_data = BTreeMap::new(); + let mut fn_def_variances = BTreeMap::new(); + let mut closure_inputs_and_output = BTreeMap::new(); + let mut closure_closure_kind = BTreeMap::new(); + let mut closure_upvars = BTreeMap::new(); + let mut trait_data = BTreeMap::new(); + let mut well_known_traits = BTreeMap::new(); + let mut well_known_assoc_types = BTreeMap::new(); + let mut impl_data = BTreeMap::new(); + let mut associated_ty_data = BTreeMap::new(); + let mut associated_ty_values = BTreeMap::new(); + let mut opaque_ty_data = BTreeMap::new(); + let mut coroutine_data = BTreeMap::new(); + let mut coroutine_witness_data = BTreeMap::new(); + let mut hidden_opaque_types = BTreeMap::new(); + let mut custom_clauses = Vec::new(); + + for (item, &raw_id) in program.items.iter().zip(raw_ids) { + let empty_env = Env { + adt_ids: &self.adt_ids, + adt_kinds: &self.adt_kinds, + fn_def_ids: &self.fn_def_ids, + fn_def_kinds: &self.fn_def_kinds, + closure_ids: &self.closure_ids, + closure_kinds: &self.closure_kinds, + trait_ids: &self.trait_ids, + trait_kinds: &self.trait_kinds, + opaque_ty_ids: &self.opaque_ty_ids, + opaque_ty_kinds: &self.opaque_ty_kinds, + coroutine_ids: &self.coroutine_ids, + coroutine_kinds: &self.coroutine_kinds, + associated_ty_lookups: &self.associated_ty_lookups, + parameter_map: BTreeMap::new(), + auto_traits: &self.auto_traits, + foreign_ty_ids: &self.foreign_ty_ids, + }; + + match *item { + Item::AdtDefn(ref d) => { + let identifier = d.name.clone(); + let adt_id = AdtId(raw_id); + adt_data.insert(adt_id, Arc::new((d, adt_id).lower(&empty_env)?)); + adt_reprs.insert(adt_id, Arc::new(d.repr.lower(&empty_env)?)); + adt_size_aligns.insert(adt_id, Arc::new(lower_adt_size_align(&d.flags))); + let n_params = d.all_parameters().len(); + let variances = match d.variances.clone() { + Some(v) => { + if v.len() != n_params { + return Err(RustIrError::IncorrectNumberOfVarianceParameters { + identifier, + expected: n_params, + actual: v.len(), + }); + } + v.into_iter() + .map(|v| match v { + Variance::Invariant => chalk_ir::Variance::Invariant, + Variance::Covariant => chalk_ir::Variance::Covariant, + Variance::Contravariant => chalk_ir::Variance::Contravariant, + }) + .collect() + } + None => (0..n_params) + .map(|_| chalk_ir::Variance::Invariant) + .collect(), + }; + adt_variances.insert(adt_id, variances); + } + Item::FnDefn(ref defn) => { + let identifier = defn.name.clone(); + let fn_def_id = FnDefId(raw_id); + fn_def_data.insert(fn_def_id, Arc::new((defn, fn_def_id).lower(&empty_env)?)); + let n_params = defn.all_parameters().len(); + let variances = match defn.variances.clone() { + Some(v) => { + if v.len() != n_params { + return Err(RustIrError::IncorrectNumberOfVarianceParameters { + identifier, + expected: n_params, + actual: v.len(), + }); + } + v.into_iter() + .map(|v| match v { + Variance::Invariant => chalk_ir::Variance::Invariant, + Variance::Covariant => chalk_ir::Variance::Covariant, + Variance::Contravariant => chalk_ir::Variance::Contravariant, + }) + .collect() + } + None => (0..n_params) + .map(|_| chalk_ir::Variance::Invariant) + .collect(), + }; + fn_def_variances.insert(fn_def_id, variances); + } + Item::ClosureDefn(ref defn) => { + let closure_def_id = ClosureId(raw_id); + let (kind, inputs_and_output) = defn.lower(&empty_env)?; + closure_closure_kind.insert(closure_def_id, kind); + closure_inputs_and_output.insert(closure_def_id, inputs_and_output); + let upvars = + empty_env.in_binders(defn.all_parameters(), |env| { + let upvar_tys: LowerResult>> = + defn.upvars.iter().map(|ty| ty.lower(env)).collect(); + let substitution = chalk_ir::Substitution::from_iter( + ChalkIr, + upvar_tys?.into_iter().map(|ty| ty.cast(ChalkIr)), + ); + Ok(chalk_ir::TyKind::Tuple(defn.upvars.len(), substitution) + .intern(ChalkIr)) + })?; + closure_upvars.insert(closure_def_id, upvars); + } + Item::TraitDefn(ref trait_defn) => { + let trait_id = TraitId(raw_id); + let trait_datum = (trait_defn, trait_id).lower(&empty_env)?; + + if let Some(well_known) = trait_datum.well_known { + well_known_traits.insert(well_known, trait_id); + } + + trait_data.insert(trait_id, Arc::new(trait_datum)); + + for assoc_ty_defn in &trait_defn.assoc_ty_defns { + let lookup = &self.associated_ty_lookups + [&(trait_id, assoc_ty_defn.name.str.clone())]; + + if let Some(well_known) = assoc_ty_defn.well_known { + let well_known = match well_known { + chalk_parse::ast::WellKnownAssocType::AsyncFnOnceOutput => { + chalk_solve::rust_ir::WellKnownAssocType::AsyncFnOnceOutput + } + }; + well_known_assoc_types.insert(well_known, lookup.id); + } + + // The parameters in scope for the associated + // type definitions are *both* those from the + // trait *and* those from the associated type + // itself. + // + // Insert the associated type parameters first + // into the list so that they are given the + // indices starting from 0. This corresponds + // to the "de bruijn" convention where "more + // inner" sets of parameters get the lower + // indices: + // + // e.g., in this example, the indices would be + // assigned `[A0, A1, T0, T1]`: + // + // ``` + // trait Foo { + // type Bar; + // } + // ``` + let mut variable_kinds = trait_defn.all_parameters(); + variable_kinds.extend(assoc_ty_defn.all_parameters()); + + let binders = empty_env.in_binders(variable_kinds, |env| { + Ok(rust_ir::AssociatedTyDatumBound { + bounds: assoc_ty_defn.bounds.lower(env)?, + where_clauses: assoc_ty_defn.where_clauses.lower(env)?, + }) + })?; + + associated_ty_data.insert( + lookup.id, + Arc::new(rust_ir::AssociatedTyDatum { + trait_id: TraitId(raw_id), + id: lookup.id, + name: assoc_ty_defn.name.str.clone(), + binders, + }), + ); + } + } + Item::Impl(ref impl_defn) => { + let impl_id = ImplId(raw_id); + let impl_datum = Arc::new( + (impl_defn, impl_id, &self.associated_ty_value_ids).lower(&empty_env)?, + ); + impl_data.insert(impl_id, impl_datum.clone()); + let trait_id = impl_datum.trait_id(); + + for atv in &impl_defn.assoc_ty_values { + let atv_id = self.associated_ty_value_ids[&(impl_id, atv.name.str.clone())]; + let lookup = &self.associated_ty_lookups[&(trait_id, atv.name.str.clone())]; + + // The parameters in scope for the associated + // type definitions are *both* those from the + // impl *and* those from the associated type + // itself. As in the "trait" case above, we begin + // with the parameters from the impl. + let mut variable_kinds = impl_defn.all_parameters(); + variable_kinds.extend(atv.all_parameters()); + + let value = empty_env.in_binders(variable_kinds, |env| { + Ok(rust_ir::AssociatedTyValueBound { + ty: atv.value.lower(env)?, + }) + })?; + + associated_ty_values.insert( + atv_id, + Arc::new(rust_ir::AssociatedTyValue { + impl_id, + associated_ty_id: lookup.id, + value, + }), + ); + } + } + Item::Clause(ref clause) => { + custom_clauses.extend(clause.lower(&empty_env)?); + } + Item::OpaqueTyDefn(ref opaque_ty) => { + if let Some(&opaque_ty_id) = self.opaque_ty_ids.get(&opaque_ty.name.str) { + let variable_kinds = opaque_ty + .variable_kinds + .iter() + .map(|k| k.lower()) + .collect::>(); + + // Introduce the parameters declared on the opaque type definition. + // So if we have `type Foo = impl Trait`, this would introduce `P1..Pn` + let binders = empty_env.in_binders(variable_kinds, |env| { + let hidden_ty = opaque_ty.ty.lower(env)?; + hidden_opaque_types.insert(opaque_ty_id, Arc::new(hidden_ty)); + + // Introduce a variable to represent the hidden "self type". This will be used in the bounds. + // So the `impl Trait` will be lowered to `exists { Self: Trait }`. + let bounds: chalk_ir::Binders>> = env + .in_binders( + Some(chalk_ir::WithKind::new( + chalk_ir::VariableKind::Ty(TyVariableKind::General), + Atom::from(FIXME_SELF), + )), + |env| { + let interner = env.interner(); + Ok(opaque_ty + .bounds + .lower(env)? + .iter() + .flat_map(|qil| { + // Instantiate the bounds with the innermost bound variable, which represents Self, as the self type. + qil.into_where_clauses( + interner, + chalk_ir::TyKind::BoundVar(BoundVar::new( + DebruijnIndex::INNERMOST, + 0, + )) + .intern(interner), + ) + }) + .collect()) + }, + )?; + let where_clauses: chalk_ir::Binders>> = env + .in_binders( + Some(chalk_ir::WithKind::new( + chalk_ir::VariableKind::Ty(TyVariableKind::General), + Atom::from(FIXME_SELF), + )), + |env| opaque_ty.where_clauses.lower(env), + )?; + + Ok(OpaqueTyDatumBound { + bounds, + where_clauses, + }) + })?; + + opaque_ty_data.insert( + opaque_ty_id, + Arc::new(OpaqueTyDatum { + opaque_ty_id, + bound: binders, + }), + ); + } + } + Item::CoroutineDefn(ref defn) => { + let variable_kinds = defn + .variable_kinds + .iter() + .map(|k| k.lower()) + .collect::>(); + + let witness_lifetimes = defn + .witness_lifetimes + .iter() + .map(|i| VariableKind::Lifetime(i.clone()).lower()) + .collect::>(); + + let input_output = empty_env.in_binders(variable_kinds.clone(), |env| { + let yield_type = defn.yield_ty.lower(env)?; + let resume_type = defn.resume_ty.lower(env)?; + let return_type = defn.return_ty.lower(env)?; + let upvars: Result, _> = + defn.upvars.iter().map(|ty| ty.lower(env)).collect(); + + Ok(CoroutineInputOutputDatum { + resume_type, + yield_type, + return_type, + upvars: upvars?, + }) + })?; + + let inner_types = empty_env.in_binders(variable_kinds, |env| { + let witnesses = env.in_binders(witness_lifetimes, |env| { + let witnesses: Result, _> = + defn.witness_types.iter().map(|ty| ty.lower(env)).collect(); + witnesses + })?; + + Ok(CoroutineWitnessExistential { types: witnesses }) + })?; + + let coroutine_datum = CoroutineDatum { + movability: defn.movability.lower(), + input_output, + }; + let coroutine_witness = CoroutineWitnessDatum { inner_types }; + + let id = self.coroutine_ids[&defn.name.str]; + coroutine_data.insert(id, Arc::new(coroutine_datum)); + coroutine_witness_data.insert(id, Arc::new(coroutine_witness)); + } + Item::Foreign(_) => {} + } + } + + Ok(LoweredProgram { + adt_ids: self.adt_ids, + fn_def_ids: self.fn_def_ids, + closure_ids: self.closure_ids, + closure_upvars, + closure_kinds: self.closure_kinds, + trait_ids: self.trait_ids, + adt_kinds: self.adt_kinds, + fn_def_kinds: self.fn_def_kinds, + trait_kinds: self.trait_kinds, + adt_data, + adt_reprs, + adt_size_aligns, + adt_variances, + fn_def_data, + fn_def_variances, + closure_inputs_and_output, + closure_closure_kind, + coroutine_ids: self.coroutine_ids, + coroutine_kinds: self.coroutine_kinds, + coroutine_data, + coroutine_witness_data, + trait_data, + well_known_traits, + well_known_assoc_types, + impl_data, + associated_ty_values, + associated_ty_data, + opaque_ty_ids: self.opaque_ty_ids, + opaque_ty_kinds: self.opaque_ty_kinds, + opaque_ty_data, + hidden_opaque_types, + custom_clauses, + object_safe_traits: self.object_safe_traits, + foreign_ty_ids: self.foreign_ty_ids, + }) + } +} + +trait LowerTypeKind { + fn lower_type_kind(&self) -> LowerResult; +} + +macro_rules! lower_type_kind { + ($type: ident, $sort: ident, $params: expr) => { + impl LowerTypeKind for $type { + fn lower_type_kind(&self) -> LowerResult { + Ok(TypeKind { + sort: TypeSort::$sort, + name: self.name.str.clone(), + binders: chalk_ir::Binders::new( + VariableKinds::from_iter(ChalkIr, $params(self).anonymize()), + crate::Unit, + ), + }) + } + } + }; +} + +lower_type_kind!(AdtDefn, Adt, |defn: &AdtDefn| defn.all_parameters()); +lower_type_kind!(FnDefn, FnDef, |defn: &FnDefn| defn.all_parameters()); +lower_type_kind!(ClosureDefn, Closure, |defn: &ClosureDefn| defn + .all_parameters()); +lower_type_kind!(CoroutineDefn, Coroutine, |defn: &CoroutineDefn| defn + .variable_kinds + .iter() + .map(|k| k.lower()) + .collect::>()); +lower_type_kind!(TraitDefn, Trait, |defn: &TraitDefn| defn + .variable_kinds + .iter() + .map(|k| k.lower()) + .collect::>()); +lower_type_kind!(OpaqueTyDefn, Opaque, |defn: &OpaqueTyDefn| defn + .variable_kinds + .iter() + .map(|k| k.lower()) + .collect::>()); diff --git a/chalk-integration/src/program.rs b/chalk-integration/src/program.rs new file mode 100644 index 00000000000..ebf205140af --- /dev/null +++ b/chalk-integration/src/program.rs @@ -0,0 +1,633 @@ +use crate::interner::ChalkIr; +use crate::{tls, Identifier, TypeKind}; +use chalk_ir::{could_match::CouldMatch, UnificationDatabase}; +use chalk_ir::{debug::Angle, Variance}; +use chalk_ir::{ + debug::SeparatorTraitRef, AdtId, AliasTy, AssocTypeId, Binders, CanonicalVarKinds, ClosureId, + CoroutineId, FnDefId, ForeignDefId, GenericArg, Goal, Goals, ImplId, IntTy, Lifetime, OpaqueTy, + OpaqueTyId, ProgramClause, ProgramClauseImplication, ProgramClauses, ProjectionTy, Scalar, + Substitution, TraitId, Ty, TyKind, UintTy, Variances, +}; +use chalk_solve::rust_ir::{ + AdtDatum, AdtRepr, AdtSizeAlign, AssociatedTyDatum, AssociatedTyValue, AssociatedTyValueId, + ClosureKind, CoroutineDatum, CoroutineWitnessDatum, FnDefDatum, FnDefInputsAndOutputDatum, + ImplDatum, ImplType, OpaqueTyDatum, TraitDatum, WellKnownAssocType, WellKnownTrait, +}; +use chalk_solve::split::Split; +use chalk_solve::RustIrDatabase; +use std::collections::{BTreeMap, HashSet}; +use std::fmt; +use std::sync::Arc; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Program { + /// From ADT name to item-id. Used during lowering only. + pub adt_ids: BTreeMap>, + + /// For each ADT: + pub adt_kinds: BTreeMap, TypeKind>, + + pub adt_variances: BTreeMap, Vec>, + + pub fn_def_ids: BTreeMap>, + + pub fn_def_kinds: BTreeMap, TypeKind>, + + pub fn_def_variances: BTreeMap, Vec>, + + pub closure_ids: BTreeMap>, + + pub closure_upvars: BTreeMap, Binders>>, + + pub closure_kinds: BTreeMap, TypeKind>, + + /// For each coroutine + pub coroutine_ids: BTreeMap>, + + pub coroutine_kinds: BTreeMap, TypeKind>, + + pub coroutine_data: BTreeMap, Arc>>, + + pub coroutine_witness_data: BTreeMap, Arc>>, + + /// From trait name to item-id. Used during lowering only. + pub trait_ids: BTreeMap>, + + /// For each trait: + pub trait_kinds: BTreeMap, TypeKind>, + + /// For each ADT: + pub adt_data: BTreeMap, Arc>>, + + pub adt_reprs: BTreeMap, Arc>>, + + pub adt_size_aligns: BTreeMap, Arc>, + + pub fn_def_data: BTreeMap, Arc>>, + + pub closure_inputs_and_output: + BTreeMap, Binders>>, + + // Weird name, but otherwise would overlap with `closure_kinds` above. + pub closure_closure_kind: BTreeMap, ClosureKind>, + + /// For each impl: + pub impl_data: BTreeMap, Arc>>, + + /// For each associated ty value `type Foo = XXX` found in an impl: + pub associated_ty_values: + BTreeMap, Arc>>, + + // From opaque type name to item-id. Used during lowering only. + pub opaque_ty_ids: BTreeMap>, + + /// For each opaque type: + pub opaque_ty_kinds: BTreeMap, TypeKind>, + + /// For each opaque type: + pub opaque_ty_data: BTreeMap, Arc>>, + + /// Stores the hidden types for opaque types + pub hidden_opaque_types: BTreeMap, Arc>>, + + /// For each trait: + pub trait_data: BTreeMap, Arc>>, + + /// For each trait lang item + pub well_known_traits: BTreeMap>, + + /// For each assoc type lang item + pub well_known_assoc_types: BTreeMap>, + + /// For each associated ty declaration `type Foo` found in a trait: + pub associated_ty_data: BTreeMap, Arc>>, + + /// For each user-specified clause + pub custom_clauses: Vec>, + + /// Store the traits marked with `#[object_safe]` + pub object_safe_traits: HashSet>, + + /// For each foreign type `extern { type A; }` + pub foreign_ty_ids: BTreeMap>, +} + +impl Program { + /// Returns the ids for all impls declared in this crate. + pub(crate) fn local_impl_ids(&self) -> Vec> { + self.impl_data + .iter() + .filter(|(_, impl_datum)| impl_datum.impl_type == ImplType::Local) + .map(|(&impl_id, _)| impl_id) + .collect() + } +} + +impl tls::DebugContext for Program { + fn debug_adt_id( + &self, + adt_id: AdtId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + if let Some(k) = self.adt_kinds.get(&adt_id) { + write!(fmt, "{}", k.name) + } else { + fmt.debug_struct("InvalidAdtId") + .field("index", &adt_id.0) + .finish() + } + } + + fn debug_trait_id( + &self, + trait_id: TraitId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + if let Some(k) = self.trait_kinds.get(&trait_id) { + write!(fmt, "{}", k.name) + } else { + fmt.debug_struct("InvalidTraitId") + .field("index", &trait_id.0) + .finish() + } + } + + fn debug_assoc_type_id( + &self, + assoc_type_id: AssocTypeId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + if let Some(d) = self.associated_ty_data.get(&assoc_type_id) { + write!(fmt, "({:?}::{})", d.trait_id, d.name) + } else { + fmt.debug_struct("InvalidAssocTypeId") + .field("index", &assoc_type_id.0) + .finish() + } + } + + fn debug_opaque_ty_id( + &self, + opaque_ty_id: OpaqueTyId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + if let Some(k) = self.opaque_ty_kinds.get(&opaque_ty_id) { + write!(fmt, "{}", k.name) + } else { + fmt.debug_struct("InvalidOpaqueTyId") + .field("index", &opaque_ty_id.0) + .finish() + } + } + + fn debug_fn_def_id( + &self, + fn_def_id: FnDefId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + if let Some(k) = self.fn_def_kinds.get(&fn_def_id) { + write!(fmt, "{}", k.name) + } else { + fmt.debug_struct("InvalidFnDefId") + .field("index", &fn_def_id.0) + .finish() + } + } + + fn debug_alias( + &self, + alias_ty: &AliasTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + match alias_ty { + AliasTy::Projection(projection_ty) => self.debug_projection_ty(projection_ty, fmt), + AliasTy::Opaque(opaque_ty) => self.debug_opaque_ty(opaque_ty, fmt), + } + } + + fn debug_projection_ty( + &self, + projection_ty: &ProjectionTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let (associated_ty_data, trait_params, other_params) = self.split_projection(projection_ty); + write!( + fmt, + "<{:?} as {:?}{:?}>::{}{:?}", + &trait_params[0], + associated_ty_data.trait_id, + Angle(&trait_params[1..]), + associated_ty_data.name, + Angle(other_params) + ) + } + + fn debug_opaque_ty( + &self, + opaque_ty: &OpaqueTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + write!(fmt, "{:?}", opaque_ty.opaque_ty_id) + } + + fn debug_ty(&self, ty: &Ty, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", ty.kind(interner).debug(interner)) + } + + fn debug_lifetime( + &self, + lifetime: &Lifetime, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", lifetime.data(interner)) + } + + fn debug_generic_arg( + &self, + generic_arg: &GenericArg, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", generic_arg.data(interner).inner_debug()) + } + + fn debug_variable_kinds( + &self, + variable_kinds: &chalk_ir::VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", variable_kinds.as_slice(interner)) + } + + fn debug_variable_kinds_with_angles( + &self, + variable_kinds: &chalk_ir::VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", variable_kinds.inner_debug(interner)) + } + + fn debug_canonical_var_kinds( + &self, + variable_kinds: &chalk_ir::CanonicalVarKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", variable_kinds.as_slice(interner)) + } + + fn debug_goal( + &self, + goal: &Goal, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", goal.data(interner)) + } + + fn debug_goals( + &self, + goals: &Goals, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", goals.debug(interner)) + } + + fn debug_program_clause_implication( + &self, + pci: &ProgramClauseImplication, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", pci.debug(interner)) + } + + fn debug_program_clause( + &self, + clause: &ProgramClause, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", clause.data(interner)) + } + + fn debug_program_clauses( + &self, + clauses: &ProgramClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", clauses.as_slice(interner)) + } + + fn debug_substitution( + &self, + substitution: &Substitution, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", substitution.debug(interner)) + } + + fn debug_separator_trait_ref( + &self, + separator_trait_ref: &SeparatorTraitRef<'_, ChalkIr>, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", separator_trait_ref.debug(interner)) + } + + fn debug_quantified_where_clauses( + &self, + clauses: &chalk_ir::QuantifiedWhereClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", clauses.as_slice(interner)) + } + + fn debug_constraints( + &self, + constraints: &chalk_ir::Constraints, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", constraints.as_slice(interner)) + } + + fn debug_variances( + &self, + variances: &chalk_ir::Variances, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error> { + let interner = self.interner(); + write!(fmt, "{:?}", variances.as_slice(interner)) + } +} + +impl UnificationDatabase for Program { + fn fn_def_variance(&self, fn_def_id: FnDefId) -> Variances { + Variances::from_iter( + self.interner(), + self.fn_def_variances[&fn_def_id].iter().copied(), + ) + } + + fn adt_variance(&self, adt_id: AdtId) -> Variances { + Variances::from_iter(self.interner(), self.adt_variances[&adt_id].iter().copied()) + } +} + +impl RustIrDatabase for Program { + fn custom_clauses(&self) -> Vec> { + self.custom_clauses.clone() + } + + fn associated_ty_data(&self, ty: AssocTypeId) -> Arc> { + self.associated_ty_data[&ty].clone() + } + + fn trait_datum(&self, id: TraitId) -> Arc> { + self.trait_data[&id].clone() + } + + fn impl_datum(&self, id: ImplId) -> Arc> { + self.impl_data[&id].clone() + } + + fn associated_ty_from_impl( + &self, + impl_id: ImplId, + assoc_type_id: AssocTypeId, + ) -> Option> { + self.impl_data[&impl_id] + .associated_ty_value_ids + .iter() + .copied() + .find(|id| self.associated_ty_values[id].associated_ty_id == assoc_type_id) + } + + fn associated_ty_value( + &self, + id: AssociatedTyValueId, + ) -> Arc> { + self.associated_ty_values[&id].clone() + } + + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc> { + self.opaque_ty_data[&id].clone() + } + + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty { + (*self.hidden_opaque_types[&id]).clone() + } + + fn adt_datum(&self, id: AdtId) -> Arc> { + self.adt_data[&id].clone() + } + + fn coroutine_datum(&self, id: CoroutineId) -> Arc> { + self.coroutine_data[&id].clone() + } + + fn coroutine_witness_datum( + &self, + id: CoroutineId, + ) -> Arc> { + self.coroutine_witness_data[&id].clone() + } + + fn adt_repr(&self, id: AdtId) -> Arc> { + self.adt_reprs[&id].clone() + } + + fn adt_size_align(&self, id: AdtId) -> Arc { + self.adt_size_aligns[&id].clone() + } + + fn fn_def_datum(&self, id: FnDefId) -> Arc> { + self.fn_def_data[&id].clone() + } + + fn impls_for_trait( + &self, + trait_id: TraitId, + parameters: &[GenericArg], + _binders: &CanonicalVarKinds, + ) -> Vec> { + let interner = self.interner(); + self.impl_data + .iter() + .filter(|(_, impl_datum)| { + let trait_ref = &impl_datum.binders.skip_binders().trait_ref; + trait_id == trait_ref.trait_id && { + assert_eq!(trait_ref.substitution.len(interner), parameters.len()); + parameters.could_match( + interner, + self.unification_database(), + trait_ref.substitution.as_slice(interner), + ) + } + }) + .map(|(&impl_id, _)| impl_id) + .collect() + } + + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec> { + self.impl_data + .iter() + .filter(|(_, impl_datum)| { + impl_datum.trait_id() == trait_id && impl_datum.impl_type == ImplType::Local + }) + .map(|(&impl_id, _)| impl_id) + .collect() + } + + fn impl_provided_for( + &self, + auto_trait_id: TraitId, + impl_ty: &TyKind, + ) -> bool { + let interner = self.interner(); + + // we don't compare actual substitutions as + // - given a `struct S`; an implementation for `S
` should suppress an auto impl for `S`, and + // - an implementation for `[A]` should suppress an auto impl for `[B]`, and + // - an implementation for `(A, B, C)` should suppress an auto impl for `(D, E, F)` + // this may change later + self.impl_data.values().any(|impl_datum| { + if impl_datum.trait_id() != auto_trait_id { + return false; + } + + let ty = impl_datum + .binders + .skip_binders() + .trait_ref + .self_type_parameter(interner); + match (impl_ty, ty.kind(interner)) { + (TyKind::Adt(id_a, _), TyKind::Adt(id_b, _)) => id_a == id_b, + (TyKind::AssociatedType(id_a, _), TyKind::AssociatedType(id_b, _)) => id_a == id_b, + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => scalar_a == scalar_b, + (TyKind::Str, TyKind::Str) => true, + (TyKind::Tuple(arity_a, _), TyKind::Tuple(arity_b, _)) => arity_a == arity_b, + (TyKind::OpaqueType(id_a, _), TyKind::OpaqueType(id_b, _)) => id_a == id_b, + (TyKind::Slice(_), TyKind::Slice(_)) => true, + (TyKind::FnDef(id_a, _), TyKind::FnDef(id_b, _)) => id_a == id_b, + (TyKind::Ref(id_a, _, _), TyKind::Ref(id_b, _, _)) => id_a == id_b, + (TyKind::Raw(id_a, _), TyKind::Raw(id_b, _)) => id_a == id_b, + (TyKind::Never, TyKind::Never) => true, + (TyKind::Array(_, _), TyKind::Array(_, _)) => true, + (TyKind::Closure(id_a, _), TyKind::Closure(id_b, _)) => id_a == id_b, + (TyKind::Coroutine(id_a, _), TyKind::Coroutine(id_b, _)) => id_a == id_b, + (TyKind::CoroutineWitness(id_a, _), TyKind::CoroutineWitness(id_b, _)) => { + id_a == id_b + } + (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => id_a == id_b, + (TyKind::Error, TyKind::Error) => true, + (_, _) => false, + } + }) + } + + fn well_known_trait_id(&self, well_known_trait: WellKnownTrait) -> Option> { + self.well_known_traits.get(&well_known_trait).copied() + } + + fn well_known_assoc_type_id( + &self, + assoc_type: WellKnownAssocType, + ) -> Option> { + self.well_known_assoc_types.get(&assoc_type).copied() + } + + fn program_clauses_for_env( + &self, + environment: &chalk_ir::Environment, + ) -> ProgramClauses { + chalk_solve::program_clauses_for_env(self, environment) + } + + fn interner(&self) -> ChalkIr { + ChalkIr + } + + fn is_object_safe(&self, trait_id: TraitId) -> bool { + self.object_safe_traits.contains(&trait_id) + } + + // For all the closure functions: this is different than how rustc does it. + // In rustc, the substitution, closure kind, fnsig, and upvars are stored + // together. Here, we store the closure kind, signature, and upvars + // separately, since it's easier. And this is opaque to `chalk-solve`. + + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + _substs: &Substitution, + ) -> Binders> { + self.closure_inputs_and_output[&closure_id].clone() + } + + fn closure_kind( + &self, + closure_id: ClosureId, + _substs: &Substitution, + ) -> ClosureKind { + self.closure_closure_kind[&closure_id] + } + + fn closure_upvars( + &self, + closure_id: ClosureId, + _substs: &Substitution, + ) -> Binders> { + self.closure_upvars[&closure_id].clone() + } + + fn closure_fn_substitution( + &self, + _closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution { + substs.clone() + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } + + // The default implementation for `RustIrDatabase::assoc_type_name` outputs + // the name in the format `(Trait::AssocTypeName)`, which is reformatted to + // `_Trait__AssocTypeName_`. This doesn't match the input names, which is + // normally acceptable, but causes the re-parse tests for the .chalk syntax + // writer to fail. This is because they use the `Eq` implementation on + // Program, which checks for name equality. + fn assoc_type_name(&self, assoc_type_id: AssocTypeId) -> String { + self.associated_ty_data + .get(&assoc_type_id) + .unwrap() + .name + .to_string() + } + + // Mirrors current (07a63e6d1fabf3560e8e1e17c1d56b10a06152d9) implementation in rustc + fn discriminant_type(&self, ty: Ty) -> Ty { + let interner = self.interner(); + match ty.data(interner).kind { + TyKind::Adt(id, _) => self + .adt_repr(id) + .int + .clone() + .unwrap_or_else(|| TyKind::Scalar(Scalar::Int(IntTy::Isize)).intern(interner)), + TyKind::Coroutine(..) => TyKind::Scalar(Scalar::Uint(UintTy::U32)).intern(interner), + _ => TyKind::Scalar(Scalar::Uint(UintTy::U8)).intern(interner), + } + } +} diff --git a/chalk-integration/src/program_environment.rs b/chalk-integration/src/program_environment.rs new file mode 100644 index 00000000000..b06c56b6774 --- /dev/null +++ b/chalk-integration/src/program_environment.rs @@ -0,0 +1,14 @@ +use crate::interner::ChalkIr; +use chalk_ir::ProgramClause; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ProgramEnvironment { + /// Compiled forms of the above: + pub program_clauses: Vec>, +} + +impl ProgramEnvironment { + pub fn new(program_clauses: Vec>) -> Self { + Self { program_clauses } + } +} diff --git a/chalk-integration/src/query.rs b/chalk-integration/src/query.rs new file mode 100644 index 00000000000..ad493be94a6 --- /dev/null +++ b/chalk-integration/src/query.rs @@ -0,0 +1,257 @@ +// https://crates.io/crates/salsa +// hello world https://github.com/salsa-rs/salsa/blob/master/examples/hello_world/main.rs + +use crate::error::ChalkError; +use crate::interner::ChalkIr; +use crate::lowering::Lower; +use crate::program::Program; +use crate::program_environment::ProgramEnvironment; +use crate::tls; +use crate::SolverChoice; +use chalk_ir::TraitId; +use chalk_solve::clauses::builder::ClauseBuilder; +use chalk_solve::clauses::program_clauses::ToProgramClauses; +use chalk_solve::coherence::orphan; +use chalk_solve::coherence::{CoherenceSolver, SpecializationPriorities}; +use chalk_solve::wf; +use chalk_solve::RustIrDatabase; +use chalk_solve::Solver; +use salsa::Database; +use std::clone::Clone; +use std::cmp::{Eq, PartialEq}; +use std::collections::BTreeMap; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use std::sync::Mutex; + +#[salsa::query_group(Lowering)] +pub trait LoweringDatabase: + RustIrDatabase + Database + Upcast> +{ + #[salsa::input] + fn program_text(&self) -> Arc; + + #[salsa::input] + fn solver_choice(&self) -> SolverChoice; + + fn program_ir(&self) -> Result, ChalkError>; + + /// Performs coherence check and computes which impls specialize + /// one another (the "specialization priorities"). + fn coherence( + &self, + ) -> Result, Arc>>, ChalkError>; + + fn orphan_check(&self) -> Result<(), ChalkError>; + + /// The lowered IR, with coherence, orphan, and WF checks performed. + fn checked_program(&self) -> Result, ChalkError>; + + /// The program as logic. + fn environment(&self) -> Result, ChalkError>; + + /// Creates the solver we can use to solve goals. This solver + /// stores intermediate, cached state, which is why it is behind a + /// mutex. Moreover, if the set of program clauses change, that + /// cached state becomes invalid, so the query is marked as + /// volatile, thus ensuring that the solver is recreated in every + /// revision (i.e., each time source program changes). + // HACK: salsa requires that queries return types that implement `Eq` + fn solver(&self) -> ArcEq>>>; +} + +// Needed to go from dyn LoweringDatabase -> dyn RustIrDatabase +// These traits are basically vendored (slightly modified) from https://github.com/connicpu/upcast +pub trait Upcast { + fn upcast(&self) -> &U; +} + +pub trait UpcastFrom { + fn upcast_from(val: &T) -> &Self; +} + +impl<'a, T: RustIrDatabase + 'a> UpcastFrom for dyn RustIrDatabase + 'a { + fn upcast_from(val: &T) -> &(dyn RustIrDatabase + 'a) { + val + } +} + +impl Upcast for T +where + U: UpcastFrom, +{ + fn upcast(&self) -> &U { + U::upcast_from(self) + } +} + +#[derive(Debug)] +#[repr(transparent)] +pub struct ArcEq(Arc); + +impl ArcEq { + pub fn new(value: T) -> Self { + Self(Arc::new(value)) + } +} + +impl Deref for ArcEq { + type Target = Arc; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ArcEq { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl PartialEq> for ArcEq { + fn eq(&self, other: &ArcEq) -> bool { + Arc::ptr_eq(&self.0, &other.0) + } +} + +impl Eq for ArcEq {} + +impl Clone for ArcEq { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} + +fn program_ir(db: &dyn LoweringDatabase) -> Result, ChalkError> { + let text = db.program_text(); + Ok(Arc::new(chalk_parse::parse_program(&text)?.lower()?)) +} + +fn orphan_check(db: &dyn LoweringDatabase) -> Result<(), ChalkError> { + let program = db.program_ir()?; + + tls::set_current_program(&program, || -> Result<(), ChalkError> { + let local_impls = program.local_impl_ids(); + for impl_id in local_impls { + let mut solver = db.solver_choice().into_solver(); + orphan::perform_orphan_check::(db.upcast(), &mut *solver, impl_id)?; + } + Ok(()) + }) +} + +fn coherence( + db: &dyn LoweringDatabase, +) -> Result, Arc>>, ChalkError> { + let program = db.program_ir()?; + let solver_choice = db.solver_choice(); + let priorities_map = tls::set_current_program(&program, || -> Result<_, ChalkError> { + let solver_builder = || solver_choice.into_solver(); + let priorities_map: Result, ChalkError> = program + .trait_data + .keys() + .map(|&trait_id| { + let solver: CoherenceSolver = + CoherenceSolver::new(db.upcast(), &solver_builder, trait_id); + let priorities = solver.specialization_priorities()?; + Ok((trait_id, priorities)) + }) + .collect(); + let priorities_map = priorities_map?; + Ok(priorities_map) + }); + let () = db.orphan_check()?; + priorities_map +} + +fn checked_program(db: &dyn LoweringDatabase) -> Result, ChalkError> { + let program = db.program_ir()?; + + db.coherence()?; + + let solver_choice = db.solver_choice(); + let () = tls::set_current_program(&program, || -> Result<(), ChalkError> { + let solver_builder = || solver_choice.into_solver(); + let solver: wf::WfSolver = wf::WfSolver::new(db.upcast(), &solver_builder); + for &id in program.adt_data.keys() { + solver.verify_adt_decl(id)?; + } + + for &opaque_ty_id in program.opaque_ty_data.keys() { + solver.verify_opaque_ty_decl(opaque_ty_id)?; + } + + for &impl_id in program.impl_data.keys() { + solver.verify_trait_impl(impl_id)?; + } + + Ok(()) + })?; + + Ok(program) +} + +fn environment(db: &dyn LoweringDatabase) -> Result, ChalkError> { + let program = db.program_ir()?; + + // Construct the set of *clauses*; these are sort of a compiled form + // of the data above that always has the form: + // + // forall P0...Pn. Something :- Conditions + let mut program_clauses = program.custom_clauses.clone(); + + let builder = &mut ClauseBuilder::new(db.upcast(), &mut program_clauses); + + let env = chalk_ir::Environment::new(builder.interner()); + + program + .associated_ty_data + .values() + .for_each(|d| d.to_program_clauses(builder, &env)); + + program + .trait_data + .values() + .for_each(|d| d.to_program_clauses(builder, &env)); + + program + .adt_data + .values() + .for_each(|d| d.to_program_clauses(builder, &env)); + + for (&auto_trait_id, _) in program + .trait_data + .iter() + .filter(|(_, auto_trait)| auto_trait.is_auto_trait()) + { + for adt_datum in program.adt_data.values() { + builder.push_binders(adt_datum.binders.clone(), |builder, _| { + let ty = chalk_ir::TyKind::Adt(adt_datum.id, builder.substitution_in_scope()); + chalk_solve::clauses::push_auto_trait_impls(builder, auto_trait_id, &ty) + .map_err(|_| ()) + .unwrap(); + }); + } + } + + for datum in program.impl_data.values() { + // If we encounter a negative impl, do not generate any rule. Negative impls + // are currently just there to deactivate default impls for auto traits. + if datum.is_positive() { + datum.to_program_clauses(builder, &env); + datum + .associated_ty_value_ids + .iter() + .map(|&atv_id| db.associated_ty_value(atv_id)) + .for_each(|atv| atv.to_program_clauses(builder, &env)); + } + } + + Ok(Arc::new(ProgramEnvironment::new(program_clauses))) +} + +fn solver(db: &dyn LoweringDatabase) -> ArcEq>>> { + db.salsa_runtime().report_untracked_read(); + let choice = db.solver_choice(); + ArcEq::new(Mutex::new(choice.into_solver())) +} diff --git a/chalk-integration/src/test_macros.rs b/chalk-integration/src/test_macros.rs new file mode 100644 index 00000000000..96bfeb0d61e --- /dev/null +++ b/chalk-integration/src/test_macros.rs @@ -0,0 +1,128 @@ +//! Useful macros for writing unit tests. They let you gin up dummy types and things. + +#[macro_export] +macro_rules! ty { + (apply (item $n:expr) $($arg:tt)*) => { + chalk_ir::TyKind::Adt( + chalk_ir::AdtId(chalk_integration::interner::RawId { + index: $n, + + }), + chalk_ir::Substitution::from_iter( + chalk_integration::interner::ChalkIr, + vec![$(arg!($arg)),*] as Vec> + ), + ) + .intern(chalk_integration::interner::ChalkIr) + }; + + (function $n:tt $($arg:tt)*) => { + chalk_ir::TyKind::Function(chalk_ir::FnPointer { + num_binders: $n, + substitution: chalk_ir::FnSubst(chalk_ir::Substitution::from_iter( + chalk_integration::interner::ChalkIr, + vec![$(arg!($arg)),*] as Vec> + )), + sig: chalk_ir::FnSig { + safety: chalk_ir::Safety::Safe, + abi: ::FnAbi::Rust, + variadic: false, + } + }).intern(chalk_integration::interner::ChalkIr) + }; + + (placeholder $n:expr) => { + chalk_ir::TyKind::Placeholder(PlaceholderIndex { + ui: UniverseIndex { counter: $n }, + idx: 0, + }).intern(chalk_integration::interner::ChalkIr) + }; + + (projection (item $n:tt) $($arg:tt)*) => { + chalk_ir::AliasTy::Projection(chalk_ir::ProjectionTy { + associated_ty_id: AssocTypeId(chalk_integration::interner::RawId { index: $n }), + substitution: chalk_ir::Substitution::from_iter( + chalk_integration::interner::ChalkIr, + vec![$(arg!($arg)),*] as Vec> + ), + }).intern(chalk_integration::interner::ChalkIr) + }; + + (infer $b:expr) => { + chalk_ir::TyKind::InferenceVar(chalk_ir::InferenceVar::from($b), chalk_ir::TyVariableKind::General) + .intern(chalk_integration::interner::ChalkIr) + }; + + (bound $d:tt $b:tt) => { + chalk_ir::TyKind::BoundVar(chalk_ir::BoundVar::new(chalk_ir::DebruijnIndex::new($d), $b)) + .intern(chalk_integration::interner::ChalkIr) + }; + + (bound $b:expr) => { + chalk_ir::TyKind::BoundVar(chalk_ir::BoundVar::new(chalk_ir::DebruijnIndex::INNERMOST, $b)) + .intern(chalk_integration::interner::ChalkIr) + }; + + (expr $b:expr) => { + $b.clone() + }; + + (($($b:tt)*)) => { + ty!($($b)*) + }; +} + +#[macro_export] +macro_rules! arg { + ((lifetime $b:tt)) => { + chalk_ir::GenericArg::new( + chalk_integration::interner::ChalkIr, + chalk_ir::GenericArgData::Lifetime(lifetime!($b)), + ) + }; + + ($arg:tt) => { + chalk_ir::GenericArg::new( + chalk_integration::interner::ChalkIr, + chalk_ir::GenericArgData::Ty(ty!($arg)), + ) + }; +} + +#[macro_export] +macro_rules! lifetime { + (infer $b:expr) => { + chalk_ir::LifetimeData::InferenceVar(chalk_ir::InferenceVar::from($b)) + .intern(chalk_integration::interner::ChalkIr) + }; + + (bound $d:tt $b:tt) => { + chalk_ir::LifetimeData::BoundVar(chalk_ir::BoundVar::new(chalk_ir::DebruijnIndex::new($d), $b)) + .intern(chalk_integration::interner::ChalkIr) + }; + + (bound $b:expr) => { + chalk_ir::LifetimeData::BoundVar(chalk_ir::BoundVar::new(chalk_ir::DebruijnIndex::INNERMOST, $b)) + .intern(chalk_integration::interner::ChalkIr) + }; + + (placeholder $b:expr) => { + chalk_ir::LifetimeData::Placeholder(PlaceholderIndex { ui: UniverseIndex { counter: $b }, idx: 0}) + .intern(chalk_integration::interner::ChalkIr) + }; + + (expr $b:expr) => { + $b.clone() + }; + + (($($b:tt)*)) => { + lifetime!($($b)*) + }; +} + +#[macro_export] +macro_rules! empty_substitution { + () => { + chalk_ir::Substitution::empty(chalk_integration::interner::ChalkIr) + }; +} diff --git a/chalk-integration/src/tls.rs b/chalk-integration/src/tls.rs new file mode 100644 index 00000000000..58b1da3dfa8 --- /dev/null +++ b/chalk-integration/src/tls.rs @@ -0,0 +1,176 @@ +use crate::interner::ChalkIr; +use chalk_ir::{ + debug::SeparatorTraitRef, AdtId, AliasTy, AssocTypeId, CanonicalVarKinds, Constraints, FnDefId, + GenericArg, Goal, Goals, Lifetime, OpaqueTy, OpaqueTyId, ProgramClause, + ProgramClauseImplication, ProgramClauses, ProjectionTy, QuantifiedWhereClauses, Substitution, + TraitId, Ty, VariableKinds, Variances, +}; +use std::cell::RefCell; +use std::fmt; +use std::sync::Arc; + +thread_local! { + static PROGRAM: RefCell>> = RefCell::new(None) +} + +pub trait DebugContext { + fn debug_adt_id( + &self, + id: AdtId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_trait_id( + &self, + id: TraitId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_assoc_type_id( + &self, + id: AssocTypeId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_opaque_ty_id( + &self, + id: OpaqueTyId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_fn_def_id( + &self, + fn_def_id: FnDefId, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_alias( + &self, + alias: &AliasTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_opaque_ty( + &self, + opaque_ty: &OpaqueTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_projection_ty( + &self, + proj: &ProjectionTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_ty(&self, ty: &Ty, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error>; + + fn debug_lifetime( + &self, + lifetime: &Lifetime, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_generic_arg( + &self, + generic_arg: &GenericArg, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_variable_kinds( + &self, + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_variable_kinds_with_angles( + &self, + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_canonical_var_kinds( + &self, + variable_kinds: &CanonicalVarKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_goal( + &self, + goal: &Goal, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_goals( + &self, + goals: &Goals, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_program_clause_implication( + &self, + pci: &ProgramClauseImplication, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_program_clause( + &self, + clause: &ProgramClause, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_program_clauses( + &self, + clauses: &ProgramClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_substitution( + &self, + substitution: &Substitution, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_separator_trait_ref( + &self, + separator_trait_ref: &SeparatorTraitRef<'_, ChalkIr>, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_quantified_where_clauses( + &self, + clauses: &QuantifiedWhereClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_constraints( + &self, + constraints: &Constraints, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; + + fn debug_variances( + &self, + variances: &Variances, + fmt: &mut fmt::Formatter<'_>, + ) -> Result<(), fmt::Error>; +} + +pub fn with_current_program(op: impl FnOnce(Option<&Arc>) -> R) -> R { + PROGRAM.with(|prog_cell| { + let p = prog_cell.borrow(); + op(p.as_ref()) + }) +} + +pub fn set_current_program(p: &Arc, op: OP) -> R +where + OP: FnOnce() -> R, +{ + let p: Arc = p.clone(); + PROGRAM.with(|prog_cell| { + *prog_cell.borrow_mut() = Some(p); + let r = op(); + *prog_cell.borrow_mut() = None; + r + }) +} diff --git a/chalk-ir/Cargo.toml b/chalk-ir/Cargo.toml index 695623beac4..42a7a994b8c 100644 --- a/chalk-ir/Cargo.toml +++ b/chalk-ir/Cargo.toml @@ -1,20 +1,14 @@ [package] name = "chalk-ir" -version = "0.1.0" +version = "0.104.0-dev.0" description = "Chalk's internal representation of types, goals, and clauses" -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" +repository = "https://github.com/rust-lang/chalk" readme = "README.md" keywords = ["compiler", "traits", "prolog"] +edition = "2018" [dependencies] -lalrpop-intern = "0.15.1" - -[dependencies.chalk-macros] -version = "0.1.0" -path = "../chalk-macros" - -[dependencies.chalk-engine] -version = "0.9.0" -path = "../chalk-engine" +bitflags = "2.4.1" +chalk-derive = { version = "0.104.0-dev.0", path = "../chalk-derive" } diff --git a/chalk-ir/README.md b/chalk-ir/README.md new file mode 100644 index 00000000000..968ad493786 --- /dev/null +++ b/chalk-ir/README.md @@ -0,0 +1,3 @@ +A rust type library for chalk. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-ir/src/cast.rs b/chalk-ir/src/cast.rs index 26c64f8cc13..0c6b682caf6 100644 --- a/chalk-ir/src/cast.rs +++ b/chalk-ir/src/cast.rs @@ -1,4 +1,6 @@ -use ::*; +//! Upcasts, to avoid writing out wrapper types. + +use crate::*; use std::marker::PhantomData; /// The `Cast` trait is used to make annoying upcasts between @@ -26,217 +28,315 @@ use std::marker::PhantomData; /// as part of this, they should always use the same set of free /// variables (the `Canonical` implementation, for example, relies on /// that). -pub trait Cast: Sized { - fn cast(self) -> T; +/// +/// # Iterators +/// +/// If you import the `Caster` trait, you can also write `.casted()` on an +/// iterator chain to cast every instance within. +/// +/// # Implementing Cast +/// +/// Do not implement `Cast` directly. Instead, implement `CastTo`. +/// This split setup allows us to write `foo.cast::()` to mean +/// "cast to T". +pub trait Cast: Sized { + /// Cast a value to type `U` using `CastTo`. + fn cast(self, interner: U::Interner) -> U + where + Self: CastTo, + U: HasInterner, + { + self.cast_to(interner) + } +} + +impl Cast for T {} + +/// The "helper" trait for `cast` that actually implements the +/// transformations. You can also use this if you want to have +/// functions that take (e.g.) an `impl CastTo>` or something +/// like that. +pub trait CastTo: Sized { + /// Cast a value to type `T`. + fn cast_to(self, interner: T::Interner) -> T; } macro_rules! reflexive_impl { (for($($t:tt)*) $u:ty) => { - impl<$($t)*> Cast<$u> for $u { - fn cast(self) -> $u { + impl<$($t)*> CastTo<$u> for $u { + fn cast_to(self, _interner: <$u as HasInterner>::Interner) -> $u { self } } }; ($u:ty) => { - impl Cast<$u> for $u { - fn cast(self) -> $u { + impl CastTo<$u> for $u { + fn cast_to(self, interner: <$u as HasInterner>::Interner) -> $u { self } } }; } -reflexive_impl!(TraitRef); -reflexive_impl!(LeafGoal); -reflexive_impl!(DomainGoal); -reflexive_impl!(WhereClause); - -impl Cast for TraitRef { - fn cast(self) -> WhereClause { +reflexive_impl!(for(I: Interner) TyKind); +reflexive_impl!(for(I: Interner) LifetimeData); +reflexive_impl!(for(I: Interner) ConstData); +reflexive_impl!(for(I: Interner) TraitRef); +reflexive_impl!(for(I: Interner) DomainGoal); +reflexive_impl!(for(I: Interner) Goal); +reflexive_impl!(for(I: Interner) WhereClause); +reflexive_impl!(for(I: Interner) ProgramClause); +reflexive_impl!(for(I: Interner) QuantifiedWhereClause); +reflexive_impl!(for(I: Interner) VariableKind); +reflexive_impl!(for(I: Interner) VariableKinds); +reflexive_impl!(for(I: Interner) CanonicalVarKind); +reflexive_impl!(for(I: Interner) CanonicalVarKinds); +reflexive_impl!(for(I: Interner) Constraint); + +impl CastTo> for TraitRef { + fn cast_to(self, _interner: I) -> WhereClause { WhereClause::Implemented(self) } } -impl Cast for ProjectionEq { - fn cast(self) -> WhereClause { - WhereClause::ProjectionEq(self) +impl CastTo> for AliasEq { + fn cast_to(self, _interner: I) -> WhereClause { + WhereClause::AliasEq(self) } } -impl Cast for T where T: Cast { - fn cast(self) -> DomainGoal { - DomainGoal::Holds(self.cast()) +impl CastTo> for LifetimeOutlives { + fn cast_to(self, _interner: I) -> WhereClause { + WhereClause::LifetimeOutlives(self) } } -impl Cast for T where T: Cast { - fn cast(self) -> LeafGoal { - LeafGoal::DomainGoal(self.cast()) +impl CastTo> for TypeOutlives { + fn cast_to(self, _interner: I) -> WhereClause { + WhereClause::TypeOutlives(self) } } -impl Cast for T where T: Cast { - fn cast(self) -> Goal { - Goal::Leaf(self.cast()) +impl CastTo> for T +where + T: CastTo>, + I: Interner, +{ + fn cast_to(self, interner: I) -> DomainGoal { + DomainGoal::Holds(self.cast(interner)) } } -impl Cast for Normalize { - fn cast(self) -> DomainGoal { - DomainGoal::Normalize(self) +impl CastTo> for T +where + T: CastTo>, +{ + fn cast_to(self, interner: I) -> Goal { + GoalData::DomainGoal(self.cast(interner)).intern(interner) } } -impl Cast for UnselectedNormalize { - fn cast(self) -> DomainGoal { - DomainGoal::UnselectedNormalize(self) +impl CastTo> for Normalize { + fn cast_to(self, _interner: I) -> DomainGoal { + DomainGoal::Normalize(self) } } -impl Cast for WellFormed { - fn cast(self) -> DomainGoal { +impl CastTo> for WellFormed { + fn cast_to(self, _interner: I) -> DomainGoal { DomainGoal::WellFormed(self) } } -impl Cast for FromEnv { - fn cast(self) -> DomainGoal { +impl CastTo> for FromEnv { + fn cast_to(self, _interner: I) -> DomainGoal { DomainGoal::FromEnv(self) } } -impl Cast for EqGoal { - fn cast(self) -> LeafGoal { - LeafGoal::EqGoal(self) +impl CastTo> for EqGoal { + fn cast_to(self, interner: I) -> Goal { + GoalData::EqGoal(self).intern(interner) } } -impl> Cast for Binders { - fn cast(self) -> Goal { - if self.binders.is_empty() { - self.value.cast() - } else { - Goal::Quantified( - QuantifierKind::ForAll, - self.map(|bound| Box::new(bound.cast())) - ) - } +impl CastTo> for SubtypeGoal { + fn cast_to(self, interner: I) -> Goal { + GoalData::SubtypeGoal(self).intern(interner) } } -impl Cast for ApplicationTy { - fn cast(self) -> Ty { - Ty::Apply(self) +impl + CastTo>> CastTo> for Binders { + fn cast_to(self, interner: I) -> Goal { + GoalData::Quantified( + QuantifierKind::ForAll, + self.map(|bound| bound.cast(interner)), + ) + .intern(interner) } } -impl Cast for ProjectionTy { - fn cast(self) -> Ty { - Ty::Projection(self) +impl CastTo> for AliasTy { + fn cast_to(self, _interner: I) -> TyKind { + TyKind::Alias(self) } } -impl Cast for Ty { - fn cast(self) -> Parameter { - ParameterKind::Ty(self) +impl CastTo> for Ty { + fn cast_to(self, interner: I) -> GenericArg { + GenericArg::new(interner, GenericArgData::Ty(self)) } } -impl Cast for Lifetime { - fn cast(self) -> Parameter { - ParameterKind::Lifetime(self) +impl CastTo> for Lifetime { + fn cast_to(self, interner: I) -> GenericArg { + GenericArg::new(interner, GenericArgData::Lifetime(self)) } } -impl Cast for T where T: Cast { - fn cast(self) -> ProgramClause { - ProgramClause::Implies(ProgramClauseImplication { - consequence: self.cast(), - conditions: vec![], - }) +impl CastTo> for Const { + fn cast_to(self, interner: I) -> GenericArg { + GenericArg::new(interner, GenericArgData::Const(self)) } } -impl> Cast for Binders { - fn cast(self) -> ProgramClause { - if self.binders.is_empty() { - Cast::::cast(self.value) - } else { - ProgramClause::ForAll( - self.map(|bound| ProgramClauseImplication { - consequence: bound.cast(), - conditions: vec![], - }) - ) - } +impl CastTo> for GenericArg { + fn cast_to(self, _interner: I) -> GenericArg { + self } } -impl Cast for ProgramClauseImplication { - fn cast(self) -> ProgramClause { - ProgramClause::Implies(self) +impl CastTo> for T +where + T: CastTo>, + I: Interner, +{ + fn cast_to(self, interner: I) -> ProgramClause { + let implication = ProgramClauseImplication { + consequence: self.cast(interner), + conditions: Goals::empty(interner), + constraints: Constraints::empty(interner), + priority: ClausePriority::High, + }; + + ProgramClauseData(Binders::empty(interner, implication.shifted_in(interner))) + .intern(interner) } } -impl Cast for Binders { - fn cast(self) -> ProgramClause { - ProgramClause::ForAll(self) +impl CastTo> for Binders +where + I: Interner, + T: HasInterner + CastTo>, +{ + fn cast_to(self, interner: I) -> ProgramClause { + ProgramClauseData(self.map(|bound| ProgramClauseImplication { + consequence: bound.cast(interner), + conditions: Goals::empty(interner), + constraints: Constraints::empty(interner), + priority: ClausePriority::High, + })) + .intern(interner) } } -macro_rules! map_impl { - (impl[$($t:tt)*] Cast<$b:ty> for $a:ty) => { - impl<$($t)*> Cast<$b> for $a { - fn cast(self) -> $b { - self.map(|v| v.cast()) - } - } +impl CastTo> for Option +where + T: CastTo, + U: HasInterner, +{ + fn cast_to(self, interner: U::Interner) -> Option { + self.map(|v| v.cast(interner)) } } -map_impl!(impl[T: Cast, U] Cast> for Option); -map_impl!(impl[T: Cast, U] Cast> for InEnvironment); -map_impl!(impl[T: Cast, U, E] Cast> for Result); +impl CastTo> for InEnvironment +where + T: HasInterner + CastTo, + U: HasInterner, + I: Interner, +{ + fn cast_to(self, interner: U::Interner) -> InEnvironment { + self.map(|v| v.cast(interner)) + } +} + +impl CastTo> for Result +where + T: CastTo, + U: HasInterner, +{ + fn cast_to(self, interner: U::Interner) -> Result { + self.map(|v| v.cast(interner)) + } +} + +impl HasInterner for Option +where + T: HasInterner, +{ + type Interner = T::Interner; +} -impl Cast> for Canonical +impl HasInterner for Result where - T: Cast, + T: HasInterner, { - fn cast(self) -> Canonical { + type Interner = T::Interner; +} + +impl CastTo> for Canonical +where + T: CastTo + HasInterner, + U: HasInterner, +{ + fn cast_to(self, interner: T::Interner) -> Canonical { // Subtle point: It should be ok to re-use the binders here, // because `cast()` never introduces new inference variables, // nor changes the "substance" of the type we are working // with. It just introduces new wrapper types. Canonical { - value: self.value.cast(), - binders: self.binders, + value: self.value.cast(interner), + binders: self.binders.cast(interner), } } } -impl Cast> for Vec +impl CastTo> for Vec +where + T: CastTo + HasInterner, + U: HasInterner, +{ + fn cast_to(self, interner: U::Interner) -> Vec { + self.into_iter().casted(interner).collect() + } +} + +impl CastTo for &T where - T: Cast, + T: Clone + HasInterner, { - fn cast(self) -> Vec { - self.into_iter().casted().collect() + fn cast_to(self, _interner: T::Interner) -> T { + self.clone() } } -pub struct Casted { - iterator: I, +/// An iterator that casts each element to some other type. +pub struct Casted { + interner: U::Interner, + iterator: IT, _cast: PhantomData, } -impl Iterator for Casted +impl Iterator for Casted where - I::Item: Cast, + IT::Item: CastTo, + U: HasInterner, { type Item = U; fn next(&mut self) -> Option { - self.iterator.next().map(|item| item.cast()) + self.iterator.next().map(|item| item.cast_to(self.interner)) } fn size_hint(&self) -> (usize, Option) { @@ -246,15 +346,19 @@ where /// An iterator adapter that casts each element we are iterating over /// to some other type. -pub trait Caster: Sized { - fn casted(self) -> Casted; -} - -impl Caster for I { - fn casted(self) -> Casted { +pub trait Caster: Iterator + Sized { + /// Cast each element in this iterator. + fn casted(self, interner: U::Interner) -> Casted + where + Self::Item: CastTo, + U: HasInterner, + { Casted { + interner, iterator: self, _cast: PhantomData, } } } + +impl Caster for I where I: Iterator {} diff --git a/chalk-ir/src/could_match.rs b/chalk-ir/src/could_match.rs index e9294de9c75..da69d6f22cd 100644 --- a/chalk-ir/src/could_match.rs +++ b/chalk-ir/src/could_match.rs @@ -1,29 +1,143 @@ -use ::*; -use zip::{Zip, Zipper}; +//! Fast matching check for zippable values. + +use crate::interner::HasInterner; +use crate::zip::{Zip, Zipper}; +use crate::*; /// A fast check to see whether two things could ever possibly match. -pub trait CouldMatch { - fn could_match(&self, other: &T) -> bool; +pub trait CouldMatch { + /// Checks whether `self` and `other` could possibly match. + fn could_match( + &self, + interner: T::Interner, + db: &dyn UnificationDatabase, + other: &T, + ) -> bool; } -impl CouldMatch for T { - fn could_match(&self, other: &T) -> bool { - return Zip::zip_with(&mut MatchZipper, self, other).is_ok(); - - struct MatchZipper; +#[allow(unreachable_code, unused_variables)] +impl CouldMatch for T +where + T: Zip + ?Sized + HasInterner, + I: Interner, +{ + fn could_match(&self, interner: I, db: &dyn UnificationDatabase, other: &T) -> bool { + return Zip::zip_with( + &mut MatchZipper { interner, db }, + Variance::Invariant, + self, + other, + ) + .is_ok(); - impl Zipper for MatchZipper { - fn zip_tys(&mut self, a: &Ty, b: &Ty) -> Fallible<()> { - let could_match = match (a, b) { - (&Ty::Apply(ref a), &Ty::Apply(ref b)) => { - let names_could_match = a.name == b.name; + struct MatchZipper<'i, I> { + interner: I, + db: &'i dyn UnificationDatabase, + } - names_could_match - && a.parameters - .iter() - .zip(&b.parameters) - .all(|(p_a, p_b)| p_a.could_match(p_b)) + impl<'i, I: Interner> Zipper for MatchZipper<'i, I> { + fn zip_tys(&mut self, variance: Variance, a: &Ty, b: &Ty) -> Fallible<()> { + let interner = self.interner; + let matches = |a: &Substitution, b: &Substitution| { + a.iter(interner) + .zip(b.iter(interner)) + .all(|(p_a, p_b)| p_a.could_match(interner, self.db, p_b)) + }; + let could_match = match (a.kind(interner), b.kind(interner)) { + (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => { + id_a == id_b + && self + .zip_substs( + variance, + Some(self.unification_database().adt_variance(*id_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + .is_ok() + } + ( + TyKind::AssociatedType(assoc_ty_a, substitution_a), + TyKind::AssociatedType(assoc_ty_b, substitution_b), + ) => assoc_ty_a == assoc_ty_b && matches(substitution_a, substitution_b), + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => scalar_a == scalar_b, + (TyKind::Str, TyKind::Str) => true, + ( + TyKind::Tuple(arity_a, substitution_a), + TyKind::Tuple(arity_b, substitution_b), + ) => arity_a == arity_b && matches(substitution_a, substitution_b), + ( + TyKind::OpaqueType(opaque_ty_a, substitution_a), + TyKind::OpaqueType(opaque_ty_b, substitution_b), + ) => opaque_ty_a == opaque_ty_b && matches(substitution_a, substitution_b), + (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => { + ty_a.could_match(interner, self.db, ty_b) + } + ( + TyKind::FnDef(fn_def_a, substitution_a), + TyKind::FnDef(fn_def_b, substitution_b), + ) => { + fn_def_a == fn_def_b + && self + .zip_substs( + variance, + Some(self.unification_database().fn_def_variance(*fn_def_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + .is_ok() + } + ( + TyKind::Ref(mutability_a, lifetime_a, ty_a), + TyKind::Ref(mutability_b, lifetime_b, ty_b), + ) => { + mutability_a == mutability_b + && lifetime_a.could_match(interner, self.db, lifetime_b) + && ty_a.could_match(interner, self.db, ty_b) } + (TyKind::Raw(mutability_a, ty_a), TyKind::Raw(mutability_b, ty_b)) => { + mutability_a == mutability_b && ty_a.could_match(interner, self.db, ty_b) + } + (TyKind::Never, TyKind::Never) => true, + (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => { + ty_a.could_match(interner, self.db, ty_b) + && const_a.could_match(interner, self.db, const_b) + } + ( + TyKind::Closure(id_a, substitution_a), + TyKind::Closure(id_b, substitution_b), + ) => id_a == id_b && matches(substitution_a, substitution_b), + ( + TyKind::Coroutine(coroutine_a, substitution_a), + TyKind::Coroutine(coroutine_b, substitution_b), + ) => { + coroutine_a == coroutine_b + && self + .zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + .is_ok() + } + ( + TyKind::CoroutineWitness(coroutine_a, substitution_a), + TyKind::CoroutineWitness(coroutine_b, substitution_b), + ) => { + coroutine_a == coroutine_b + && self + .zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + .is_ok() + } + (TyKind::Foreign(foreign_ty_a), TyKind::Foreign(foreign_ty_b)) => { + foreign_ty_a == foreign_ty_b + } + (TyKind::Error, TyKind::Error) => true, _ => true, }; @@ -35,29 +149,65 @@ impl CouldMatch for T { } } - fn zip_lifetimes(&mut self, _: &Lifetime, _: &Lifetime) -> Fallible<()> { + fn zip_lifetimes( + &mut self, + variance: Variance, + _: &Lifetime, + _: &Lifetime, + ) -> Fallible<()> { Ok(()) } - fn zip_binders(&mut self, a: &Binders, b: &Binders) -> Fallible<()> + fn zip_consts( + &mut self, + variance: Variance, + _: &Const, + _: &Const, + ) -> Fallible<()> { + Ok(()) + } + + fn zip_binders( + &mut self, + variance: Variance, + a: &Binders, + b: &Binders, + ) -> Fallible<()> where - T: Zip, + T: HasInterner + Zip, { - Zip::zip_with(self, &a.value, &b.value) + Zip::zip_with(self, variance, &a.value, &b.value) } - } - } -} -impl CouldMatch for ProgramClause { - fn could_match(&self, other: &DomainGoal) -> bool { - match self { - ProgramClause::Implies(implication) => { - implication.consequence.could_match(other) + fn interner(&self) -> I { + self.interner } - ProgramClause::ForAll(clause) => { - clause.value.consequence.could_match(other) + + fn unification_database(&self) -> &dyn UnificationDatabase { + self.db } } } } + +impl CouldMatch> for ProgramClauseData { + fn could_match( + &self, + interner: I, + db: &dyn UnificationDatabase, + other: &DomainGoal, + ) -> bool { + self.0.value.consequence.could_match(interner, db, other) + } +} + +impl CouldMatch> for ProgramClause { + fn could_match( + &self, + interner: I, + db: &dyn UnificationDatabase, + other: &DomainGoal, + ) -> bool { + self.data(interner).could_match(interner, db, other) + } +} diff --git a/chalk-ir/src/debug.rs b/chalk-ir/src/debug.rs index 91f0a70de4c..e57185bb034 100644 --- a/chalk-ir/src/debug.rs +++ b/chalk-ir/src/debug.rs @@ -1,136 +1,760 @@ -use std::fmt::{Debug, Display, Error, Formatter}; +//! Debug impls for types. + +use std::fmt::{self, Debug, Display, Error, Formatter}; use super::*; -impl Debug for ItemId { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - tls::with_current_program(|p| match p { - Some(prog) => prog.debug_item_id(*self, fmt), - None => fmt - .debug_struct("ItemId") - .field("index", &self.index) - .finish(), - }) +/// Wrapper to allow forwarding to `Display::fmt`, `Debug::fmt`, etc. +pub struct Fmt(pub F) +where + F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result; + +impl fmt::Display for Fmt +where + F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (self.0)(f) + } +} + +impl Debug for TraitId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_trait_id(*self, fmt).unwrap_or_else(|| write!(fmt, "TraitId({:?})", self.0)) + } +} + +impl Debug for AdtId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_adt_id(*self, fmt).unwrap_or_else(|| write!(fmt, "AdtId({:?})", self.0)) + } +} + +impl Debug for AssocTypeId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_assoc_type_id(*self, fmt) + .unwrap_or_else(|| write!(fmt, "AssocTypeId({:?})", self.0)) + } +} + +impl Debug for FnDefId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { + I::debug_fn_def_id(*self, fmt).unwrap_or_else(|| write!(fmt, "FnDefId({:?})", self.0)) + } +} + +impl Debug for ClosureId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { + I::debug_closure_id(*self, fmt).unwrap_or_else(|| write!(fmt, "ClosureId({:?})", self.0)) + } +} + +impl Debug for CoroutineId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { + I::debug_coroutine_id(*self, fmt) + .unwrap_or_else(|| write!(fmt, "CoroutineId({:?})", self.0)) + } +} + +impl Debug for ForeignDefId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { + I::debug_foreign_def_id(*self, fmt) + .unwrap_or_else(|| write!(fmt, "ForeignDefId({:?})", self.0)) + } +} + +impl Debug for Ty { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_ty(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for Lifetime { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_lifetime(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for Const { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_const(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for ConcreteConst { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "{:?}", self.interned) + } +} + +impl Debug for GenericArg { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_generic_arg(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for Goal { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_goal(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for Goals { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_goals(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for ProgramClauseImplication { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_program_clause_implication(self, fmt) + .unwrap_or_else(|| write!(fmt, "ProgramClauseImplication(?)")) + } +} + +impl Debug for ProgramClause { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_program_clause(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for ProgramClauses { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_program_clauses(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for Constraints { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_constraints(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for SeparatorTraitRef<'_, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_separator_trait_ref(self, fmt) + .unwrap_or_else(|| write!(fmt, "SeparatorTraitRef(?)")) + } +} + +impl Debug for AliasTy { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_alias(self, fmt).unwrap_or_else(|| write!(fmt, "AliasTy(?)")) + } +} + +impl Debug for QuantifiedWhereClauses { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_quantified_where_clauses(self, fmt) + .unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for ProjectionTy { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_projection_ty(self, fmt).unwrap_or_else(|| fmt.write_str("")) + } +} + +impl Debug for OpaqueTy { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_opaque_ty(self, fmt).unwrap_or_else(|| fmt.write_str("")) + } +} + +impl Display for Substitution { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_substitution(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} + +impl Debug for OpaqueTyId { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_opaque_ty_id(*self, fmt).unwrap_or_else(|| write!(fmt, "OpaqueTyId({:?})", self.0)) } } impl Display for UniverseIndex { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { write!(fmt, "U{}", self.counter) } } impl Debug for UniverseIndex { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { write!(fmt, "U{}", self.counter) } } -impl Debug for TypeName { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match self { - TypeName::ItemId(id) => write!(fmt, "{:?}", id), - TypeName::Placeholder(index) => write!(fmt, "{:?}", index), - TypeName::AssociatedType(assoc_ty) => write!(fmt, "{:?}", assoc_ty), - } +impl Debug for TyData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + self.kind.fmt(fmt) } } -impl Debug for Ty { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for TyKind { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - Ty::BoundVar(depth) => write!(fmt, "^{}", depth), - Ty::InferenceVar(var) => write!(fmt, "{:?}", var), - Ty::Apply(apply) => write!(fmt, "{:?}", apply), - Ty::Projection(proj) => write!(fmt, "{:?}", proj), - Ty::UnselectedProjection(proj) => write!(fmt, "{:?}", proj), - Ty::ForAll(quantified_ty) => write!(fmt, "{:?}", quantified_ty), + TyKind::BoundVar(db) => write!(fmt, "{:?}", db), + TyKind::Dyn(clauses) => write!(fmt, "{:?}", clauses), + TyKind::InferenceVar(var, TyVariableKind::General) => write!(fmt, "{:?}", var), + TyKind::InferenceVar(var, TyVariableKind::Integer) => write!(fmt, "{:?}i", var), + TyKind::InferenceVar(var, TyVariableKind::Float) => write!(fmt, "{:?}f", var), + TyKind::Alias(alias) => write!(fmt, "{:?}", alias), + TyKind::Placeholder(index) => write!(fmt, "{:?}", index), + TyKind::Function(function) => write!(fmt, "{:?}", function), + TyKind::Adt(id, substitution) => write!(fmt, "{:?}<{:?}>", id, substitution), + TyKind::AssociatedType(assoc_ty, substitution) => { + write!(fmt, "{:?}<{:?}>", assoc_ty, substitution) + } + TyKind::Scalar(scalar) => write!(fmt, "{:?}", scalar), + TyKind::Str => write!(fmt, "Str"), + TyKind::Tuple(arity, substitution) => write!(fmt, "{:?}<{:?}>", arity, substitution), + TyKind::OpaqueType(opaque_ty, substitution) => { + write!(fmt, "!{:?}<{:?}>", opaque_ty, substitution) + } + TyKind::Slice(substitution) => write!(fmt, "{{slice}}<{:?}>", substitution), + TyKind::FnDef(fn_def, substitution) => write!(fmt, "{:?}<{:?}>", fn_def, substitution), + TyKind::Ref(mutability, lifetime, ty) => match mutability { + Mutability::Mut => write!(fmt, "(&{:?} mut {:?})", lifetime, ty), + Mutability::Not => write!(fmt, "(&{:?} {:?})", lifetime, ty), + }, + TyKind::Raw(mutability, ty) => match mutability { + Mutability::Mut => write!(fmt, "(*mut {:?})", ty), + Mutability::Not => write!(fmt, "(*const {:?})", ty), + }, + TyKind::Never => write!(fmt, "Never"), + TyKind::Array(ty, const_) => write!(fmt, "[{:?}; {:?}]", ty, const_), + TyKind::Closure(id, substitution) => { + write!(fmt, "{{closure:{:?}}}<{:?}>", id, substitution) + } + TyKind::Coroutine(coroutine, substitution) => { + write!(fmt, "{:?}<{:?}>", coroutine, substitution) + } + TyKind::CoroutineWitness(witness, substitution) => { + write!(fmt, "{:?}<{:?}>", witness, substitution) + } + TyKind::Foreign(foreign_ty) => write!(fmt, "{:?}", foreign_ty), + TyKind::Error => write!(fmt, "{{error}}"), } } } +impl Debug for BoundVar { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let BoundVar { debruijn, index } = self; + write!(fmt, "{:?}.{:?}", debruijn, index) + } +} + +impl Debug for DebruijnIndex { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let DebruijnIndex { depth } = self; + write!(fmt, "^{}", depth) + } +} + +impl Debug for DynTy { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let DynTy { bounds, lifetime } = self; + write!(fmt, "dyn {:?} + {:?}", bounds, lifetime) + } +} + impl Debug for InferenceVar { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { write!(fmt, "?{}", self.index) } } +impl Debug for FnSubst { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "{:?}", self.0) + } +} -impl Debug for QuantifiedTy { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for FnPointer { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { // FIXME -- we should introduce some names or something here - let QuantifiedTy { num_binders, ty } = self; - write!(fmt, "for<{}> {:?}", num_binders, ty) + let FnPointer { + num_binders, + substitution, + sig, + } = self; + write!( + fmt, + "{}{:?} for<{}> {:?}", + match sig.safety { + Safety::Unsafe => "unsafe ", + Safety::Safe => "", + }, + sig.abi, + num_binders, + substitution + ) + } +} + +impl Debug for LifetimeData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + match self { + LifetimeData::BoundVar(db) => write!(fmt, "'{:?}", db), + LifetimeData::InferenceVar(var) => write!(fmt, "'{:?}", var), + LifetimeData::Placeholder(index) => write!(fmt, "'{:?}", index), + LifetimeData::Static => write!(fmt, "'static"), + LifetimeData::Erased => write!(fmt, "'"), + LifetimeData::Error => write!(fmt, "'{{error}}"), + LifetimeData::Phantom(..) => unreachable!(), + } + } +} + +impl VariableKinds { + fn debug(&self) -> VariableKindsDebug<'_, I> { + VariableKindsDebug(self) + } + + /// Helper method for debugging variable kinds. + pub fn inner_debug(&self, interner: I) -> VariableKindsInnerDebug<'_, I> { + VariableKindsInnerDebug { + variable_kinds: self, + interner, + } + } +} + +struct VariableKindsDebug<'a, I: Interner>(&'a VariableKinds); + +impl<'a, I: Interner> Debug for VariableKindsDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_variable_kinds_with_angles(self.0, fmt) + .unwrap_or_else(|| write!(fmt, "{:?}", self.0.interned)) + } +} + +/// Helper struct for showing debug output for `VariableKinds`. +pub struct VariableKindsInnerDebug<'a, I: Interner> { + variable_kinds: &'a VariableKinds, + interner: I, +} + +impl<'a, I: Interner> Debug for VariableKindsInnerDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + // NB: We print variable kinds as a list delimited by `<>`, + // like ``. This is because variable kind lists + // are always associated with binders like `forall { + // ... }`. + write!(fmt, "<")?; + for (index, binder) in self.variable_kinds.iter(self.interner).enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + match binder { + VariableKind::Ty(TyVariableKind::General) => write!(fmt, "type")?, + VariableKind::Ty(TyVariableKind::Integer) => write!(fmt, "integer type")?, + VariableKind::Ty(TyVariableKind::Float) => write!(fmt, "float type")?, + VariableKind::Lifetime => write!(fmt, "lifetime")?, + VariableKind::Const(ty) => write!(fmt, "const: {:?}", ty)?, + } + } + write!(fmt, ">") + } +} + +impl Debug for ConstData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + match &self.value { + ConstValue::BoundVar(db) => write!(fmt, "{:?}", db), + ConstValue::InferenceVar(var) => write!(fmt, "{:?}", var), + ConstValue::Placeholder(index) => write!(fmt, "{:?}", index), + ConstValue::Concrete(evaluated) => write!(fmt, "{:?}", evaluated), + } } } -impl Debug for Lifetime { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for GoalData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - Lifetime::BoundVar(depth) => write!(fmt, "'^{}", depth), - Lifetime::InferenceVar(var) => write!(fmt, "'{:?}", var), - Lifetime::Placeholder(index) => write!(fmt, "'{:?}", index), + GoalData::Quantified(qkind, ref subgoal) => write!( + fmt, + "{:?}{:?} {{ {:?} }}", + qkind, + subgoal.binders.debug(), + subgoal.value + ), + GoalData::Implies(ref wc, ref g) => write!(fmt, "if ({:?}) {{ {:?} }}", wc, g), + GoalData::All(ref goals) => write!(fmt, "all{:?}", goals), + GoalData::Not(ref g) => write!(fmt, "not {{ {:?} }}", g), + GoalData::EqGoal(ref wc) => write!(fmt, "{:?}", wc), + GoalData::SubtypeGoal(ref wc) => write!(fmt, "{:?}", wc), + GoalData::DomainGoal(ref wc) => write!(fmt, "{:?}", wc), + GoalData::CannotProve => write!(fmt, r"¯\_(ツ)_/¯"), + } + } +} + +/// Helper struct for showing debug output for `Goals`. +pub struct GoalsDebug<'a, I: Interner> { + goals: &'a Goals, + interner: I, +} + +impl<'a, I: Interner> Debug for GoalsDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "(")?; + for (goal, index) in self.goals.iter(self.interner).zip(0..) { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{:?}", goal)?; + } + write!(fmt, ")")?; + Ok(()) + } +} + +impl Goals { + /// Show debug output for `Goals`. + pub fn debug(&self, interner: I) -> GoalsDebug<'_, I> { + GoalsDebug { + goals: self, + interner, + } + } +} + +/// Helper struct for showing debug output for `GenericArgData`. +pub struct GenericArgDataInnerDebug<'a, I: Interner>(&'a GenericArgData); + +impl<'a, I: Interner> Debug for GenericArgDataInnerDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + match self.0 { + GenericArgData::Ty(n) => write!(fmt, "{:?}", n), + GenericArgData::Lifetime(n) => write!(fmt, "{:?}", n), + GenericArgData::Const(n) => write!(fmt, "{:?}", n), + } + } +} + +impl GenericArgData { + /// Helper method for debugging `GenericArgData`. + pub fn inner_debug(&self) -> GenericArgDataInnerDebug<'_, I> { + GenericArgDataInnerDebug(self) + } +} + +/// Helper struct for showing debug output for program clause implications. +pub struct ProgramClauseImplicationDebug<'a, I: Interner> { + pci: &'a ProgramClauseImplication, + interner: I, +} + +impl<'a, I: Interner> Debug for ProgramClauseImplicationDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let ProgramClauseImplicationDebug { pci, interner } = self; + write!(fmt, "{:?}", pci.consequence)?; + + let conditions = pci.conditions.as_slice(*interner); + + let conds = conditions.len(); + if conds == 0 { + return Ok(()); + } + + write!(fmt, " :- ")?; + for cond in &conditions[..conds - 1] { + write!(fmt, "{:?}, ", cond)?; + } + write!(fmt, "{:?}", conditions[conds - 1]) + } +} + +impl ProgramClauseImplication { + /// Show debug output for the program clause implication. + pub fn debug(&self, interner: I) -> ProgramClauseImplicationDebug<'_, I> { + ProgramClauseImplicationDebug { + pci: self, + interner, + } + } +} + +/// Helper struct for showing debug output for application types. +pub struct TyKindDebug<'a, I: Interner> { + ty: &'a TyKind, + interner: I, +} + +impl<'a, I: Interner> Debug for TyKindDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let interner = self.interner; + match self.ty { + TyKind::BoundVar(db) => write!(fmt, "{:?}", db), + TyKind::Dyn(clauses) => write!(fmt, "{:?}", clauses), + TyKind::InferenceVar(var, TyVariableKind::General) => write!(fmt, "{:?}", var), + TyKind::InferenceVar(var, TyVariableKind::Integer) => write!(fmt, "{:?}i", var), + TyKind::InferenceVar(var, TyVariableKind::Float) => write!(fmt, "{:?}f", var), + TyKind::Alias(alias) => write!(fmt, "{:?}", alias), + TyKind::Placeholder(index) => write!(fmt, "{:?}", index), + TyKind::Function(function) => write!(fmt, "{:?}", function), + TyKind::Adt(id, substitution) => { + write!(fmt, "{:?}{:?}", id, substitution.with_angle(interner)) + } + TyKind::AssociatedType(assoc_ty, substitution) => { + write!(fmt, "{:?}{:?}", assoc_ty, substitution.with_angle(interner)) + } + TyKind::Scalar(scalar) => write!(fmt, "{:?}", scalar), + TyKind::Str => write!(fmt, "Str"), + TyKind::Tuple(arity, substitution) => { + write!(fmt, "{:?}{:?}", arity, substitution.with_angle(interner)) + } + TyKind::OpaqueType(opaque_ty, substitution) => write!( + fmt, + "!{:?}{:?}", + opaque_ty, + substitution.with_angle(interner) + ), + TyKind::Slice(ty) => write!(fmt, "[{:?}]", ty), + TyKind::FnDef(fn_def, substitution) => { + write!(fmt, "{:?}{:?}", fn_def, substitution.with_angle(interner)) + } + TyKind::Ref(mutability, lifetime, ty) => match mutability { + Mutability::Mut => write!(fmt, "(&{:?} mut {:?})", lifetime, ty), + Mutability::Not => write!(fmt, "(&{:?} {:?})", lifetime, ty), + }, + TyKind::Raw(mutability, ty) => match mutability { + Mutability::Mut => write!(fmt, "(*mut {:?})", ty), + Mutability::Not => write!(fmt, "(*const {:?})", ty), + }, + TyKind::Never => write!(fmt, "Never"), + TyKind::Array(ty, const_) => write!(fmt, "[{:?}; {:?}]", ty, const_), + TyKind::Closure(id, substitution) => write!( + fmt, + "{{closure:{:?}}}{:?}", + id, + substitution.with_angle(interner) + ), + TyKind::Coroutine(coroutine, substitution) => write!( + fmt, + "{:?}{:?}", + coroutine, + substitution.with_angle(interner) + ), + TyKind::CoroutineWitness(witness, substitution) => { + write!(fmt, "{:?}{:?}", witness, substitution.with_angle(interner)) + } + TyKind::Foreign(foreign_ty) => write!(fmt, "{:?}", foreign_ty,), + TyKind::Error => write!(fmt, "{{error}}"), + } + } +} + +impl TyKind { + /// Show debug output for the application type. + pub fn debug(&self, interner: I) -> TyKindDebug<'_, I> { + TyKindDebug { ty: self, interner } + } +} + +/// Helper struct for showing debug output for substitutions. +pub struct SubstitutionDebug<'a, I: Interner> { + substitution: &'a Substitution, + interner: I, +} + +impl<'a, I: Interner> Debug for SubstitutionDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let SubstitutionDebug { + substitution, + interner, + } = self; + let mut first = true; + + write!(fmt, "[")?; + + for (index, value) in substitution.iter(*interner).enumerate() { + if first { + first = false; + } else { + write!(fmt, ", ")?; + } + + write!(fmt, "?{} := {:?}", index, value)?; + } + + write!(fmt, "]")?; + + Ok(()) + } +} + +impl Substitution { + /// Show debug output for the substitution. + pub fn debug(&self, interner: I) -> SubstitutionDebug<'_, I> { + SubstitutionDebug { + substitution: self, + interner, } } } impl Debug for PlaceholderIndex { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { let PlaceholderIndex { ui, idx } = self; write!(fmt, "!{}_{}", ui.counter, idx) } } -impl Debug for ApplicationTy { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!(fmt, "{:?}{:?}", self.name, Angle(&self.parameters)) +impl TraitRef { + /// Returns a "Debuggable" type that prints like `P0 as Trait`. + pub fn with_as(&self) -> impl std::fmt::Debug + '_ { + SeparatorTraitRef { + trait_ref: self, + separator: " as ", + } + } + + /// Returns a "Debuggable" type that prints like `P0: Trait`. + pub fn with_colon(&self) -> impl std::fmt::Debug + '_ { + SeparatorTraitRef { + trait_ref: self, + separator: ": ", + } + } +} + +impl Debug for TraitRef { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + Debug::fmt(&self.with_as(), fmt) } } -impl Debug for TraitRef { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +/// Trait ref with associated separator used for debug output. +pub struct SeparatorTraitRef<'me, I: Interner> { + /// The `TraitRef` itself. + pub trait_ref: &'me TraitRef, + + /// The separator used for displaying the `TraitRef`. + pub separator: &'me str, +} + +/// Helper struct for showing debug output for the `SeperatorTraitRef`. +pub struct SeparatorTraitRefDebug<'a, 'me, I: Interner> { + separator_trait_ref: &'a SeparatorTraitRef<'me, I>, + interner: I, +} + +impl<'a, 'me, I: Interner> Debug for SeparatorTraitRefDebug<'a, 'me, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let SeparatorTraitRefDebug { + separator_trait_ref, + interner, + } = self; + let parameters = separator_trait_ref + .trait_ref + .substitution + .as_slice(*interner); write!( fmt, - "{:?} as {:?}{:?}", - self.parameters[0], - self.trait_id, - Angle(&self.parameters[1..]) + "{:?}{}{:?}{:?}", + parameters[0], + separator_trait_ref.separator, + separator_trait_ref.trait_ref.trait_id, + Angle(¶meters[1..]) ) } } -impl Debug for ProjectionTy { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - tls::with_current_program(|p| match p { - Some(program) => program.debug_projection(self, fmt), - None => write!( - fmt, - "({:?}){:?}", - self.associated_ty_id, - Angle(&self.parameters) - ), - }) +impl<'me, I: Interner> SeparatorTraitRef<'me, I> { + /// Show debug output for the `SeperatorTraitRef`. + pub fn debug<'a>(&'a self, interner: I) -> SeparatorTraitRefDebug<'a, 'me, I> { + SeparatorTraitRefDebug { + separator_trait_ref: self, + interner, + } } } -impl Debug for UnselectedProjectionTy { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - let len = self.parameters.len(); +impl Debug for LifetimeOutlives { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "{:?}: {:?}", self.a, self.b) + } +} + +impl Debug for TypeOutlives { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "{:?}: {:?}", self.ty, self.lifetime) + } +} + +/// Helper struct for showing debug output for projection types. +pub struct ProjectionTyDebug<'a, I: Interner> { + projection_ty: &'a ProjectionTy, + interner: I, +} + +impl<'a, I: Interner> Debug for ProjectionTyDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let ProjectionTyDebug { + projection_ty, + interner, + } = self; write!( fmt, - "{:?}::{}{:?}", - self.parameters[len - 1], - self.type_name, - Angle(&self.parameters[0..(len - 1)]) + "({:?}){:?}", + projection_ty.associated_ty_id, + projection_ty.substitution.with_angle(*interner) ) } } -pub struct Angle<'a, T: 'a>(pub &'a [T]); +impl ProjectionTy { + /// Show debug output for the projection type. + pub fn debug(&self, interner: I) -> ProjectionTyDebug<'_, I> { + ProjectionTyDebug { + projection_ty: self, + interner, + } + } +} + +/// Helper struct for showing debug output for opaque types. +pub struct OpaqueTyDebug<'a, I: Interner> { + opaque_ty: &'a OpaqueTy, + interner: I, +} + +impl<'a, I: Interner> Debug for OpaqueTyDebug<'a, I> { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let OpaqueTyDebug { + opaque_ty, + interner, + } = self; + write!( + fmt, + "{:?}{:?}", + opaque_ty.opaque_ty_id, + opaque_ty.substitution.with_angle(*interner) + ) + } +} + +impl OpaqueTy { + /// Show debug output for the opaque type. + pub fn debug(&self, interner: I) -> OpaqueTyDebug<'_, I> { + OpaqueTyDebug { + opaque_ty: self, + interner, + } + } +} + +/// Wraps debug output in angle brackets (`<>`). +pub struct Angle<'a, T>(pub &'a [T]); impl<'a, T: Debug> Debug for Angle<'a, T> { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - if self.0.len() > 0 { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + if !self.0.is_empty() { write!(fmt, "<")?; for (index, elem) in self.0.iter().enumerate() { if index > 0 { @@ -145,187 +769,138 @@ impl<'a, T: Debug> Debug for Angle<'a, T> { } } -impl Debug for Normalize { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!(fmt, "Normalize({:?} -> {:?})", self.projection, self.ty) +impl Debug for Normalize { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "Normalize({:?} -> {:?})", self.alias, self.ty) } } -impl Debug for ProjectionEq { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!(fmt, "ProjectionEq({:?} = {:?})", self.projection, self.ty) - } -} - -impl Debug for UnselectedNormalize { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!( - fmt, - "UnselectedNormalize({:?} -> {:?})", - self.projection, self.ty - ) +impl Debug for AliasEq { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "AliasEq({:?} = {:?})", self.alias, self.ty) } } -impl Debug for WhereClause { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for WhereClause { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - WhereClause::Implemented(tr) => write!( - fmt, - "Implemented({:?}: {:?}{:?})", - tr.parameters[0], - tr.trait_id, - Angle(&tr.parameters[1..]) - ), - WhereClause::ProjectionEq(p) => write!(fmt, "{:?}", p), + WhereClause::Implemented(tr) => write!(fmt, "Implemented({:?})", tr.with_colon()), + WhereClause::AliasEq(a) => write!(fmt, "{:?}", a), + WhereClause::LifetimeOutlives(l_o) => write!(fmt, "{:?}", l_o), + WhereClause::TypeOutlives(t_o) => write!(fmt, "{:?}", t_o), } } } -impl Debug for FromEnv { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for FromEnv { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - FromEnv::Trait(t) => write!(fmt, "FromEnv({:?})", t), + FromEnv::Trait(t) => write!(fmt, "FromEnv({:?})", t.with_colon()), FromEnv::Ty(t) => write!(fmt, "FromEnv({:?})", t), } } } -impl Debug for WellFormed { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for WellFormed { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - WellFormed::Trait(t) => write!(fmt, "WellFormed({:?})", t), + WellFormed::Trait(t) => write!(fmt, "WellFormed({:?})", t.with_colon()), WellFormed::Ty(t) => write!(fmt, "WellFormed({:?})", t), } } } -impl Debug for DomainGoal { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for DomainGoal { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { DomainGoal::Holds(n) => write!(fmt, "{:?}", n), DomainGoal::WellFormed(n) => write!(fmt, "{:?}", n), DomainGoal::FromEnv(n) => write!(fmt, "{:?}", n), DomainGoal::Normalize(n) => write!(fmt, "{:?}", n), - DomainGoal::UnselectedNormalize(n) => write!(fmt, "{:?}", n), - DomainGoal::InScope(n) => write!(fmt, "InScope({:?})", n), - DomainGoal::Derefs(n) => write!(fmt, "Derefs({:?})", n), DomainGoal::IsLocal(n) => write!(fmt, "IsLocal({:?})", n), DomainGoal::IsUpstream(n) => write!(fmt, "IsUpstream({:?})", n), DomainGoal::IsFullyVisible(n) => write!(fmt, "IsFullyVisible({:?})", n), - DomainGoal::LocalImplAllowed(tr) => write!( - fmt, - "LocalImplAllowed({:?}: {:?}{:?})", - tr.parameters[0], - tr.trait_id, - Angle(&tr.parameters[1..]) - ), - DomainGoal::Compatible(_) => write!(fmt, "Compatible"), + DomainGoal::LocalImplAllowed(tr) => { + write!(fmt, "LocalImplAllowed({:?})", tr.with_colon(),) + } + DomainGoal::Compatible => write!(fmt, "Compatible"), DomainGoal::DownstreamType(n) => write!(fmt, "DownstreamType({:?})", n), + DomainGoal::Reveal => write!(fmt, "Reveal"), + DomainGoal::ObjectSafe(n) => write!(fmt, "ObjectSafe({:?})", n), } } } -impl Debug for LeafGoal { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match *self { - LeafGoal::EqGoal(ref eq) => write!(fmt, "{:?}", eq), - LeafGoal::DomainGoal(ref dom) => write!(fmt, "{:?}", dom), - } - } -} - -impl Debug for EqGoal { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for EqGoal { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { write!(fmt, "({:?} = {:?})", self.a, self.b) } } -impl Debug for Goal { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match *self { - Goal::Quantified(qkind, ref subgoal) => { - write!(fmt, "{:?}<", qkind)?; - for (index, binder) in subgoal.binders.iter().enumerate() { - if index > 0 { - write!(fmt, ", ")?; - } - match *binder { - ParameterKind::Ty(()) => write!(fmt, "type")?, - ParameterKind::Lifetime(()) => write!(fmt, "lifetime")?, - } - } - write!(fmt, "> {{ {:?} }}", subgoal.value) - } - Goal::Implies(ref wc, ref g) => write!(fmt, "if ({:?}) {{ {:?} }}", wc, g), - Goal::And(ref g1, ref g2) => write!(fmt, "({:?}, {:?})", g1, g2), - Goal::Not(ref g) => write!(fmt, "not {{ {:?} }}", g), - Goal::Leaf(ref wc) => write!(fmt, "{:?}", wc), - Goal::CannotProve(()) => write!(fmt, r"¯\_(ツ)_/¯"), - } +impl Debug for SubtypeGoal { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "({:?} <: {:?})", self.a, self.b) } } -impl Debug for Binders { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for Binders { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { let Binders { ref binders, ref value, } = *self; - if !binders.is_empty() { - write!(fmt, "for<")?; - for (index, binder) in binders.iter().enumerate() { - if index > 0 { - write!(fmt, ", ")?; - } - match *binder { - ParameterKind::Ty(()) => write!(fmt, "type")?, - ParameterKind::Lifetime(()) => write!(fmt, "lifetime")?, - } - } - write!(fmt, "> ")?; - } + write!(fmt, "for{:?} ", binders.debug())?; Debug::fmt(value, fmt) } } -impl Debug for ProgramClause { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match self { - ProgramClause::Implies(pc) => write!(fmt, "{:?}", pc), - ProgramClause::ForAll(pc) => write!(fmt, "{:?}", pc), - } +impl Debug for ProgramClauseData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "{:?}", self.0) } } -impl Debug for ProgramClauseImplication { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!(fmt, "{:?}", self.consequence)?; - - let conds = self.conditions.len(); - if conds == 0 { - return Ok(()); - } +impl Debug for Environment { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + write!(fmt, "Env({:?})", self.clauses) + } +} - write!(fmt, " :- ")?; - for cond in &self.conditions[..conds - 1] { - write!(fmt, "{:?}, ", cond)?; - } - write!(fmt, "{:?}", self.conditions[conds - 1]) +impl Debug for CanonicalVarKinds { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_canonical_var_kinds(self, fmt) + .unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) } } -impl Debug for Environment { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - write!(fmt, "Env({:?})", self.clauses) +impl Canonical { + /// Display the canonicalized item. + pub fn display(&self, interner: T::Interner) -> CanonicalDisplay<'_, T> { + CanonicalDisplay { + canonical: self, + interner, + } } } -impl Display for Canonical { - fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - let Canonical { binders, value } = self; +/// Helper struct for displaying canonicalized items. +pub struct CanonicalDisplay<'a, T: HasInterner> { + canonical: &'a Canonical, + interner: T::Interner, +} +impl<'a, T: HasInterner + Display> Display for CanonicalDisplay<'a, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + let Canonical { binders, value } = self.canonical; + let interner = self.interner; + let binders = binders.as_slice(interner); if binders.is_empty() { + // Ordinarily, we try to print all binder levels, if they + // are empty, but we can skip in this *particular* case + // because we know that `Canonical` terms are never + // supposed to contain free variables. In other words, + // all "bound variables" that appear inside the canonical + // value must reference values that appear in `binders`. write!(f, "{}", value)?; } else { write!(f, "for<")?; @@ -334,7 +909,7 @@ impl Display for Canonical { if i > 0 { write!(f, ",")?; } - write!(f, "?{}", pk.into_inner())?; + write!(f, "?{}", pk.skip_kind())?; } write!(f, "> {{ {} }}", value)?; @@ -344,68 +919,95 @@ impl Display for Canonical { } } -impl Debug for ParameterKind { - default fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match *self { - ParameterKind::Ty(ref n) => write!(fmt, "Ty({:?})", n), - ParameterKind::Lifetime(ref n) => write!(fmt, "Lifetime({:?})", n), +impl Debug for GenericArgData { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + match self { + GenericArgData::Ty(t) => write!(fmt, "Ty({:?})", t), + GenericArgData::Lifetime(l) => write!(fmt, "Lifetime({:?})", l), + GenericArgData::Const(c) => write!(fmt, "Const({:?})", c), } } } -impl Debug for Constraint { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { +impl Debug for VariableKind { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { match self { - Constraint::LifetimeEq(a, b) => write!(fmt, "{:?} == {:?}", a, b), + VariableKind::Ty(TyVariableKind::General) => write!(fmt, "type"), + VariableKind::Ty(TyVariableKind::Integer) => write!(fmt, "integer type"), + VariableKind::Ty(TyVariableKind::Float) => write!(fmt, "float type"), + VariableKind::Lifetime => write!(fmt, "lifetime"), + VariableKind::Const(ty) => write!(fmt, "const: {:?}", ty), } } } -impl Debug for Parameter { - fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { - match *self { - ParameterKind::Ty(ref n) => write!(fmt, "{:?}", n), - ParameterKind::Lifetime(ref n) => write!(fmt, "{:?}", n), +impl Debug for WithKind { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + let value = self.skip_kind(); + match &self.kind { + VariableKind::Ty(TyVariableKind::General) => write!(fmt, "{:?} with kind type", value), + VariableKind::Ty(TyVariableKind::Integer) => { + write!(fmt, "{:?} with kind integer type", value) + } + VariableKind::Ty(TyVariableKind::Float) => { + write!(fmt, "{:?} with kind float type", value) + } + VariableKind::Lifetime => write!(fmt, "{:?} with kind lifetime", value), + VariableKind::Const(ty) => write!(fmt, "{:?} with kind {:?}", value, ty), } } } -impl Display for ConstrainedSubst { - fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - let ConstrainedSubst { subst, constraints } = self; - - write!( - f, - "substitution {}, lifetime constraints {:?}", - subst, constraints, - ) +impl Debug for Constraint { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + match self { + Constraint::LifetimeOutlives(a, b) => write!(fmt, "{:?}: {:?}", a, b), + Constraint::TypeOutlives(ty, lifetime) => write!(fmt, "{:?}: {:?}", ty, lifetime), + } } } -impl Debug for Substitution { - fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - Display::fmt(self, f) - } -} +impl Display for ConstrainedSubst { + #[rustfmt::skip] + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + let ConstrainedSubst { subst, constraints } = self; -impl Display for Substitution { - fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { let mut first = true; - write!(f, "[")?; - - for (index, value) in self.parameters.iter().enumerate() { - if first { - first = false; - } else { - write!(f, ", ")?; - } - - write!(f, "?{} := {:?}", index, value)?; + let subst = format!("{}", Fmt(|f| Display::fmt(subst, f))); + if subst != "[]" { + write!(f, "substitution {}", subst)?; + first = false; } - write!(f, "]")?; + let constraints = format!("{}", Fmt(|f| Debug::fmt(constraints, f))); + if constraints != "[]" { + if !first { write!(f, ", ")?; } + write!(f, "lifetime constraints {}", constraints)?; + first = false; + } + let _ = first; Ok(()) } } + +impl Substitution { + /// Displays the substitution in the form `< P0, .. Pn >`, or (if + /// the substitution is empty) as an empty string. + pub fn with_angle(&self, interner: I) -> Angle<'_, GenericArg> { + Angle(self.as_slice(interner)) + } +} + +impl Debug for Substitution { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + Display::fmt(self, fmt) + } +} + +impl Debug for Variances { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { + I::debug_variances(self, fmt).unwrap_or_else(|| write!(fmt, "{:?}", self.interned)) + } +} diff --git a/chalk-ir/src/display.rs b/chalk-ir/src/display.rs deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/chalk-ir/src/fold.rs b/chalk-ir/src/fold.rs index b958da1fd2b..d0524db6541 100644 --- a/chalk-ir/src/fold.rs +++ b/chalk-ir/src/fold.rs @@ -1,24 +1,30 @@ //! Traits for transforming bits of IR. -use cast::Cast; -use chalk_engine::context::Context; -use chalk_engine::{DelayedLiteral, ExClause, Literal}; +use crate::*; +use std::convert::Infallible; use std::fmt::Debug; -use std::sync::Arc; -use *; +mod binder_impls; +mod boring_impls; +mod in_place; pub mod shift; mod subst; +pub use self::shift::Shift; pub use self::subst::Subst; /// A "folder" is a transformer that can be used to make a copy of /// some term -- that is, some bit of IR, such as a `Goal` -- with /// certain changes applied. The idea is that it contains methods that /// let you swap types/lifetimes for new types/lifetimes; meanwhile, -/// each bit of IR implements the `Fold` trait which, given a -/// `Folder`, will reconstruct itself, invoking the folder's methods -/// to transform each of the types/lifetimes embedded within. +/// each bit of IR implements the `TypeFoldable` trait which, given a +/// `FallibleTypeFolder`, will reconstruct itself, invoking the folder's +/// methods to transform each of the types/lifetimes embedded within. +/// +/// As the name suggests, folds performed by `FallibleTypeFolder` can +/// fail (with type `Error`); if the folder cannot fail, consider +/// implementing `TypeFolder` instead (which is an infallible, but +/// otherwise equivalent, trait). /// /// # Usage patterns /// @@ -29,15 +35,15 @@ pub use self::subst::Subst; /// more often, just free existential variables) that appear within /// the term. /// -/// For this reason, the `Folder` trait extends two other traits that -/// contain methods that are invoked when just those particular +/// For this reason, the `FallibleTypeFolder` trait extends two other +/// traits that contain methods that are invoked when just those particular /// /// In particular, folders can intercept references to free variables /// (either existentially or universally quantified) and replace them /// with other types/lifetimes as appropriate. /// -/// To create a folder `F`, one never implements `Folder` directly, but instead -/// implements one of each of these three sub-traits: +/// To create a folder `F`, one never implements `FallibleTypeFolder` +/// directly, but instead implements one of each of these three sub-traits: /// /// - `FreeVarFolder` -- folds `BoundVar` instances that appear free /// in the term being folded (use `DefaultFreeVarFolder` to @@ -49,692 +55,867 @@ pub use self::subst::Subst; /// that appear in the term being folded (use /// `DefaultPlaceholderFolder` to ignore/forbid these altogether) /// -/// To **apply** a folder, use the `Fold::fold_with` method, like so +/// To **apply** a folder, use the `TypeFoldable::try_fold_with` method, +/// like so /// /// ```rust,ignore -/// let x = x.fold_with(&mut folder, 0); +/// let x = x.try_fold_with(&mut folder, 0); /// ``` -pub trait Folder: FreeVarFolder + InferenceFolder + PlaceholderFolder + TypeFolder { - /// Returns a "dynamic" version of this trait. There is no - /// **particular** reason to require this, except that I didn't - /// feel like making `super_fold_ty` generic for no reason. - fn to_dyn(&mut self) -> &mut dyn Folder; -} +pub trait FallibleTypeFolder { + /// The type this folder returns when folding fails. This is + /// commonly [`NoSolution`]. + type Error; + + /// Creates a `dyn` value from this folder. Unfortunately, this + /// must be added manually to each impl of FallibleTypeFolder; it + /// permits the default implements below to create a + /// `&mut dyn FallibleTypeFolder` from `Self` without knowing what + /// `Self` is (by invoking this method). Effectively, this limits + /// impls of `FallibleTypeFolder` to types for which we are able to + /// create a dyn value (i.e., not `[T]` types). + fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder; + + /// Top-level callback: invoked for each `Ty` that is + /// encountered when folding. By default, invokes + /// `try_super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `try_fold_free_var_ty`. + fn try_fold_ty( + &mut self, + ty: Ty, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + ty.try_super_fold_with(self.as_dyn(), outer_binder) + } -pub trait TypeFolder { - fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Fallible; - fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Fallible; -} + /// Top-level callback: invoked for each `Lifetime` that is + /// encountered when folding. By default, invokes + /// `try_super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `try_fold_free_var_lifetime`. + fn try_fold_lifetime( + &mut self, + lifetime: Lifetime, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + lifetime.try_super_fold_with(self.as_dyn(), outer_binder) + } -impl Folder for T -where - T: FreeVarFolder + InferenceFolder + PlaceholderFolder + TypeFolder, -{ - fn to_dyn(&mut self) -> &mut dyn Folder { - self + /// Top-level callback: invoked for each `Const` that is + /// encountered when folding. By default, invokes + /// `try_super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `try_fold_free_var_const`. + fn try_fold_const( + &mut self, + constant: Const, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + constant.try_super_fold_with(self.as_dyn(), outer_binder) } -} -/// A convenience trait that indicates that this folder doesn't take -/// any action on types in particular, but just recursively folds -/// their contents (note that free variables that are encountered in -/// that process may still be substituted). The vast majority of -/// folders implement this trait. -pub trait DefaultTypeFolder {} + /// Invoked for every program clause. By default, recursively folds the goals contents. + fn try_fold_program_clause( + &mut self, + clause: ProgramClause, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + clause.try_super_fold_with(self.as_dyn(), outer_binder) + } -impl TypeFolder for T -where - T: FreeVarFolder + InferenceFolder + PlaceholderFolder + DefaultTypeFolder, -{ - fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Fallible { - super_fold_ty(self.to_dyn(), ty, binders) + /// Invoked for every goal. By default, recursively folds the goals contents. + fn try_fold_goal( + &mut self, + goal: Goal, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + goal.try_super_fold_with(self.as_dyn(), outer_binder) } - fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Fallible { - super_fold_lifetime(self.to_dyn(), lifetime, binders) + /// If overridden to return true, then folding will panic if a + /// free variable is encountered. This should be done if free + /// type/lifetime variables are not expected. + fn forbid_free_vars(&self) -> bool { + false } -} -/// The methods for folding **free variables**. These are `BoundVar` -/// instances where the binder is not something we folded over. This -/// is used when you are instanting previously bound things with some -/// replacement. -pub trait FreeVarFolder { - /// Invoked for `Ty::BoundVar` instances that are not bound within the type being folded - /// over: + /// Invoked for `TyKind::BoundVar` instances that are not bound + /// within the type being folded over: /// - /// - `depth` is the depth of the `Ty::BoundVar`; this has been adjusted to account for binders - /// in scope. + /// - `depth` is the depth of the `TyKind::BoundVar`; this has + /// been adjusted to account for binders in scope. /// - `binders` is the number of binders in scope. /// - /// This should return a type suitable for a context with `binders` in scope. - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible; - - /// As `fold_free_var_ty`, but for lifetimes. - fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible; -} - -/// A convenience trait. If you implement this, you get an -/// implementation of `FreeVarFolder` for free that simply ignores -/// free values (that is, it replaces them with themselves). -/// -/// You can make it panic if a free-variable is found by overriding -/// `forbid` to return true. -pub trait DefaultFreeVarFolder { - fn forbid() -> bool { - false + /// This should return a type suitable for a context with + /// `binders` in scope. + fn try_fold_free_var_ty( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_vars() { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) + } else { + let bound_var = bound_var.shifted_in_from(outer_binder); + Ok(TyKind::::BoundVar(bound_var).intern(self.interner())) + } } -} -impl FreeVarFolder for T { - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible { - if T::forbid() { - panic!("unexpected free variable with depth `{:?}`", depth) + /// As `try_fold_free_var_ty`, but for lifetimes. + fn try_fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_vars() { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) } else { - Ok(Ty::BoundVar(depth + binders)) + let bound_var = bound_var.shifted_in_from(outer_binder); + Ok(LifetimeData::::BoundVar(bound_var).intern(self.interner())) } } - fn fold_free_var_lifetime( + /// As `try_fold_free_var_ty`, but for constants. + fn try_fold_free_var_const( &mut self, - depth: usize, - binders: usize, - ) -> Fallible { - if T::forbid() { - panic!("unexpected free variable with depth `{:?}`", depth) + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_vars() { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) } else { - Ok(Lifetime::BoundVar(depth + binders)) + let bound_var = bound_var.shifted_in_from(outer_binder); + Ok(ConstData { + ty: ty.try_fold_with(self.as_dyn(), outer_binder)?, + value: ConstValue::::BoundVar(bound_var), + } + .intern(self.interner())) } } -} -pub trait PlaceholderFolder { - /// Invoked for each occurence of a placeholder type; these are + /// If overridden to return true, we will panic when a free + /// placeholder type/lifetime/const is encountered. + fn forbid_free_placeholders(&self) -> bool { + false + } + + /// Invoked for each occurrence of a placeholder type; these are /// used when we instantiate binders universally. Returns a type /// to use instead, which should be suitably shifted to account /// for `binders`. /// /// - `universe` is the universe of the `TypeName::ForAll` that was found /// - `binders` is the number of binders in scope - fn fold_free_placeholder_ty( + #[allow(unused_variables)] + fn try_fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, - binders: usize, - ) -> Fallible; + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_placeholders() { + panic!("unexpected placeholder type `{:?}`", universe) + } else { + Ok(universe.to_ty::(self.interner())) + } + } - /// As with `fold_free_placeholder_ty`, but for lifetimes. - fn fold_free_placeholder_lifetime( + /// As with `try_fold_free_placeholder_ty`, but for lifetimes. + #[allow(unused_variables)] + fn try_fold_free_placeholder_lifetime( &mut self, universe: PlaceholderIndex, - binders: usize, - ) -> Fallible; -} - -/// A convenience trait. If you implement this, you get an -/// implementation of `PlaceholderFolder` for free that simply ignores -/// placeholder values (that is, it replaces them with themselves). -/// -/// You can make it panic if a free-variable is found by overriding -/// `forbid` to return true. -pub trait DefaultPlaceholderFolder { - fn forbid() -> bool { - false - } -} - -impl PlaceholderFolder for T { - fn fold_free_placeholder_ty(&mut self, universe: PlaceholderIndex, _binders: usize) -> Fallible { - if T::forbid() { - panic!("unexpected placeholder type `{:?}`", universe) + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_placeholders() { + panic!("unexpected placeholder lifetime `{:?}`", universe) } else { - Ok(universe.to_ty()) + Ok(universe.to_lifetime(self.interner())) } } - fn fold_free_placeholder_lifetime( + /// As with `try_fold_free_placeholder_ty`, but for constants. + #[allow(unused_variables)] + fn try_fold_free_placeholder_const( &mut self, + ty: Ty, universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { - if T::forbid() { - panic!("unexpected placeholder lifetime `{:?}`", universe) + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_free_placeholders() { + panic!("unexpected placeholder const `{:?}`", universe) } else { - Ok(universe.to_lifetime()) + Ok(universe.to_const( + self.interner(), + ty.try_fold_with(self.as_dyn(), outer_binder)?, + )) } } -} -pub trait InferenceFolder { - /// Invoked for each occurence of a inference type; these are + /// If overridden to return true, inference variables will trigger + /// panics when folded. Used when inference variables are + /// unexpected. + fn forbid_inference_vars(&self) -> bool { + false + } + + /// Invoked for each occurrence of a inference type; these are /// used when we instantiate binders universally. Returns a type /// to use instead, which should be suitably shifted to account /// for `binders`. /// /// - `universe` is the universe of the `TypeName::ForAll` that was found /// - `binders` is the number of binders in scope - fn fold_inference_ty(&mut self, var: InferenceVar, binders: usize) -> Fallible; - - /// As with `fold_free_inference_ty`, but for lifetimes. - fn fold_inference_lifetime(&mut self, var: InferenceVar, binders: usize) -> Fallible; -} - -/// A convenience trait. If you implement this, you get an -/// implementation of `InferenceFolder` for free that simply ignores -/// inference values (that is, it replaces them with themselves). -/// -/// You can make it panic if a free-variable is found by overriding -/// `forbid` to return true. -pub trait DefaultInferenceFolder { - fn forbid() -> bool { - false - } -} - -impl InferenceFolder for T { - fn fold_inference_ty(&mut self, var: InferenceVar, _binders: usize) -> Fallible { - if T::forbid() { + #[allow(unused_variables)] + fn try_fold_inference_ty( + &mut self, + var: InferenceVar, + kind: TyVariableKind, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_inference_vars() { panic!("unexpected inference type `{:?}`", var) } else { - Ok(var.to_ty()) + Ok(var.to_ty(self.interner(), kind)) } } - fn fold_inference_lifetime( + /// As with `try_fold_inference_ty`, but for lifetimes. + #[allow(unused_variables)] + fn try_fold_inference_lifetime( &mut self, var: InferenceVar, - _binders: usize, - ) -> Fallible { - if T::forbid() { + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_inference_vars() { panic!("unexpected inference lifetime `'{:?}`", var) } else { - Ok(var.to_lifetime()) + Ok(var.to_lifetime(self.interner())) } } -} -/// Applies the given folder to a value. -pub trait Fold: Debug { - /// The type of value that will be produced once folding is done. - /// Typically this is `Self`, unless `Self` contains borrowed - /// values, in which case owned values are produced (for example, - /// one can fold over a `&T` value where `T: Fold`, in which case - /// you get back a `T`, not a `&T`). - type Result: Fold; + /// As with `try_fold_inference_ty`, but for constants. + #[allow(unused_variables)] + fn try_fold_inference_const( + &mut self, + ty: Ty, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Result, Self::Error> { + if self.forbid_inference_vars() { + panic!("unexpected inference const `{:?}`", var) + } else { + Ok(var.to_const( + self.interner(), + ty.try_fold_with(self.as_dyn(), outer_binder)?, + )) + } + } - /// Apply the given folder `folder` to `self`; `binders` is the - /// number of binders that are in scope when beginning the - /// folder. Typically `binders` starts as 0, but is adjusted when - /// we encounter `Binders` in the IR or other similar - /// constructs. - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible; + /// Gets the interner that is being folded from. + fn interner(&self) -> I; } -impl<'a, T: Fold> Fold for &'a T { - type Result = T::Result; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - (**self).fold_with(folder, binders) +/// A "folder" is a transformer that can be used to make a copy of +/// some term -- that is, some bit of IR, such as a `Goal` -- with +/// certain changes applied. The idea is that it contains methods that +/// let you swap types/lifetimes for new types/lifetimes; meanwhile, +/// each bit of IR implements the `TypeFoldable` trait which, given a +/// `TypeFolder`, will reconstruct itself, invoking the folder's methods +/// to transform each of the types/lifetimes embedded within. +/// +/// Folds performed by `TypeFolder` cannot fail. If folds might fail, +/// consider implementing `FallibleTypeFolder` instead (which is a +/// fallible, but otherwise equivalent, trait). +/// +/// # Usage patterns +/// +/// ## Substituting for free variables +/// +/// Most of the time, though, we are not interested in adjust +/// arbitrary types/lifetimes, but rather just free variables (even +/// more often, just free existential variables) that appear within +/// the term. +/// +/// For this reason, the `TypeFolder` trait extends two other traits that +/// contain methods that are invoked when just those particular +/// +/// In particular, folders can intercept references to free variables +/// (either existentially or universally quantified) and replace them +/// with other types/lifetimes as appropriate. +/// +/// To create a folder `F`, one never implements `TypeFolder` directly, but instead +/// implements one of each of these three sub-traits: +/// +/// - `FreeVarFolder` -- folds `BoundVar` instances that appear free +/// in the term being folded (use `DefaultFreeVarFolder` to +/// ignore/forbid these altogether) +/// - `InferenceFolder` -- folds existential `InferenceVar` instances +/// that appear in the term being folded (use +/// `DefaultInferenceFolder` to ignore/forbid these altogether) +/// - `PlaceholderFolder` -- folds universal `Placeholder` instances +/// that appear in the term being folded (use +/// `DefaultPlaceholderFolder` to ignore/forbid these altogether) +/// +/// To **apply** a folder, use the `TypeFoldable::fold_with` method, like so +/// +/// ```rust,ignore +/// let x = x.fold_with(&mut folder, 0); +/// ``` +pub trait TypeFolder: FallibleTypeFolder { + /// Creates a `dyn` value from this folder. Unfortunately, this + /// must be added manually to each impl of TypeFolder; it permits the + /// default implements below to create a `&mut dyn TypeFolder` from + /// `Self` without knowing what `Self` is (by invoking this + /// method). Effectively, this limits impls of `TypeFolder` to types + /// for which we are able to create a dyn value (i.e., not `[T]` + /// types). + fn as_dyn(&mut self) -> &mut dyn TypeFolder; + + /// Top-level callback: invoked for each `Ty` that is + /// encountered when folding. By default, invokes + /// `super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `fold_free_var_ty`. + fn fold_ty(&mut self, ty: Ty, outer_binder: DebruijnIndex) -> Ty { + ty.super_fold_with(TypeFolder::as_dyn(self), outer_binder) } -} -impl Fold for Vec { - type Result = Vec; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - self.iter().map(|e| e.fold_with(folder, binders)).collect() + /// Top-level callback: invoked for each `Lifetime` that is + /// encountered when folding. By default, invokes + /// `super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `fold_free_var_lifetime`. + fn fold_lifetime(&mut self, lifetime: Lifetime, outer_binder: DebruijnIndex) -> Lifetime { + lifetime.super_fold_with(TypeFolder::as_dyn(self), outer_binder) } -} -impl Fold for Box { - type Result = Box; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - Ok(Box::new((**self).fold_with(folder, binders)?)) + /// Top-level callback: invoked for each `Const` that is + /// encountered when folding. By default, invokes + /// `super_fold_with`, which will in turn invoke the more + /// specialized folding methods below, like `fold_free_var_const`. + fn fold_const(&mut self, constant: Const, outer_binder: DebruijnIndex) -> Const { + constant.super_fold_with(TypeFolder::as_dyn(self), outer_binder) } -} -impl Fold for Arc { - type Result = Arc; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - Ok(Arc::new((**self).fold_with(folder, binders)?)) + /// Invoked for every program clause. By default, recursively folds the goals contents. + fn fold_program_clause( + &mut self, + clause: ProgramClause, + outer_binder: DebruijnIndex, + ) -> ProgramClause { + clause.super_fold_with(TypeFolder::as_dyn(self), outer_binder) } -} -macro_rules! tuple_fold { - ($($n:ident),*) => { - impl<$($n: Fold,)*> Fold for ($($n,)*) { - type Result = ($($n::Result,)*); - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - #[allow(non_snake_case)] - let &($(ref $n),*) = self; - Ok(($($n.fold_with(folder, binders)?,)*)) - } + /// Invoked for every goal. By default, recursively folds the goals contents. + fn fold_goal(&mut self, goal: Goal, outer_binder: DebruijnIndex) -> Goal { + goal.super_fold_with(TypeFolder::as_dyn(self), outer_binder) + } + + /// If overridden to return true, then folding will panic if a + /// free variable is encountered. This should be done if free + /// type/lifetime variables are not expected. + fn forbid_free_vars(&self) -> bool { + false + } + + /// Invoked for `TyKind::BoundVar` instances that are not bound + /// within the type being folded over: + /// + /// - `depth` is the depth of the `TyKind::BoundVar`; this has + /// been adjusted to account for binders in scope. + /// - `binders` is the number of binders in scope. + /// + /// This should return a type suitable for a context with + /// `binders` in scope. + fn fold_free_var_ty(&mut self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Ty { + if TypeFolder::forbid_free_vars(self) { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) + } else { + let bound_var = bound_var.shifted_in_from(outer_binder); + TyKind::::BoundVar(bound_var).intern(TypeFolder::interner(self)) } } -} -tuple_fold!(A, B); -tuple_fold!(A, B, C); -tuple_fold!(A, B, C, D); -tuple_fold!(A, B, C, D, E); - -impl Fold for Option { - type Result = Option; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - match self { - None => Ok(None), - Some(e) => Ok(Some(e.fold_with(folder, binders)?)), + /// As `fold_free_var_ty`, but for lifetimes. + fn fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + if TypeFolder::forbid_free_vars(self) { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) + } else { + let bound_var = bound_var.shifted_in_from(outer_binder); + LifetimeData::::BoundVar(bound_var).intern(TypeFolder::interner(self)) } } -} -impl Fold for Ty { - type Result = Self; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - folder.fold_ty(self, binders) + /// As `fold_free_var_ty`, but for constants. + fn fold_free_var_const( + &mut self, + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Const { + if TypeFolder::forbid_free_vars(self) { + panic!( + "unexpected free variable with depth `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) + } else { + let bound_var = bound_var.shifted_in_from(outer_binder); + ConstData { + ty: ty.fold_with(TypeFolder::as_dyn(self), outer_binder), + value: ConstValue::::BoundVar(bound_var), + } + .intern(TypeFolder::interner(self)) + } } -} -pub fn super_fold_ty(folder: &mut dyn Folder, ty: &Ty, binders: usize) -> Fallible { - match *ty { - Ty::BoundVar(depth) => if depth >= binders { - folder.fold_free_var_ty(depth - binders, binders) + /// If overridden to return true, we will panic when a free + /// placeholder type/lifetime/const is encountered. + fn forbid_free_placeholders(&self) -> bool { + false + } + + /// Invoked for each occurrence of a placeholder type; these are + /// used when we instantiate binders universally. Returns a type + /// to use instead, which should be suitably shifted to account + /// for `binders`. + /// + /// - `universe` is the universe of the `TypeName::ForAll` that was found + /// - `binders` is the number of binders in scope + #[allow(unused_variables)] + fn fold_free_placeholder_ty( + &mut self, + universe: PlaceholderIndex, + outer_binder: DebruijnIndex, + ) -> Ty { + if TypeFolder::forbid_free_placeholders(self) { + panic!("unexpected placeholder type `{:?}`", universe) } else { - Ok(Ty::BoundVar(depth)) - }, - Ty::InferenceVar(var) => folder.fold_inference_ty(var, binders), - Ty::Apply(ref apply) => { - let ApplicationTy { - name, - ref parameters, - } = *apply; - match name { - TypeName::Placeholder(ui) => { - assert!( - parameters.is_empty(), - "type {:?} with parameters {:?}", - ty, - parameters - ); - folder.fold_free_placeholder_ty(ui, binders) - } + universe.to_ty::(TypeFolder::interner(self)) + } + } - TypeName::ItemId(_) | TypeName::AssociatedType(_) => { - let parameters = parameters.fold_with(folder, binders)?; - Ok(ApplicationTy { name, parameters }.cast()) - } - } + /// As with `fold_free_placeholder_ty`, but for lifetimes. + #[allow(unused_variables)] + fn fold_free_placeholder_lifetime( + &mut self, + universe: PlaceholderIndex, + outer_binder: DebruijnIndex, + ) -> Lifetime { + if TypeFolder::forbid_free_placeholders(self) { + panic!("unexpected placeholder lifetime `{:?}`", universe) + } else { + universe.to_lifetime(TypeFolder::interner(self)) } - Ty::Projection(ref proj) => Ok(Ty::Projection(proj.fold_with(folder, binders)?)), - Ty::UnselectedProjection(ref proj) => { - Ok(Ty::UnselectedProjection(proj.fold_with(folder, binders)?)) + } + + /// As with `fold_free_placeholder_ty`, but for constants. + #[allow(unused_variables)] + fn fold_free_placeholder_const( + &mut self, + ty: Ty, + universe: PlaceholderIndex, + outer_binder: DebruijnIndex, + ) -> Const { + if TypeFolder::forbid_free_placeholders(self) { + panic!("unexpected placeholder const `{:?}`", universe) + } else { + universe.to_const( + TypeFolder::interner(self), + ty.fold_with(TypeFolder::as_dyn(self), outer_binder), + ) } - Ty::ForAll(ref quantified_ty) => Ok(Ty::ForAll(quantified_ty.fold_with(folder, binders)?)), } -} -impl Fold for QuantifiedTy { - type Result = Self; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - let QuantifiedTy { - num_binders, - ref ty, - } = *self; - Ok(QuantifiedTy { - num_binders, - ty: ty.fold_with(folder, binders + num_binders)?, - }) + /// If overridden to return true, inference variables will trigger + /// panics when folded. Used when inference variables are + /// unexpected. + fn forbid_inference_vars(&self) -> bool { + false } -} -impl Fold for Binders -where - T: Fold, -{ - type Result = Binders; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - let Binders { - binders: ref self_binders, - value: ref self_value, - } = *self; - let value = self_value.fold_with(folder, binders + self_binders.len())?; - Ok(Binders { - binders: self_binders.clone(), - value: value, - }) + /// Invoked for each occurrence of a inference type; these are + /// used when we instantiate binders universally. Returns a type + /// to use instead, which should be suitably shifted to account + /// for `binders`. + /// + /// - `universe` is the universe of the `TypeName::ForAll` that was found + /// - `binders` is the number of binders in scope + #[allow(unused_variables)] + fn fold_inference_ty( + &mut self, + var: InferenceVar, + kind: TyVariableKind, + outer_binder: DebruijnIndex, + ) -> Ty { + if TypeFolder::forbid_inference_vars(self) { + panic!("unexpected inference type `{:?}`", var) + } else { + var.to_ty(TypeFolder::interner(self), kind) + } } -} -impl Fold for Canonical -where - T: Fold, -{ - type Result = Canonical; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - let Canonical { - binders: ref self_binders, - value: ref self_value, - } = *self; - let value = self_value.fold_with(folder, binders + self_binders.len())?; - Ok(Canonical { - binders: self_binders.clone(), - value: value, - }) + /// As with `fold_inference_ty`, but for lifetimes. + #[allow(unused_variables)] + fn fold_inference_lifetime( + &mut self, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + if TypeFolder::forbid_inference_vars(self) { + panic!("unexpected inference lifetime `'{:?}`", var) + } else { + var.to_lifetime(TypeFolder::interner(self)) + } } -} -impl Fold for Lifetime { - type Result = Self; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - folder.fold_lifetime(self, binders) + /// As with `fold_inference_ty`, but for constants. + #[allow(unused_variables)] + fn fold_inference_const( + &mut self, + ty: Ty, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Const { + if TypeFolder::forbid_inference_vars(self) { + panic!("unexpected inference const `{:?}`", var) + } else { + var.to_const( + TypeFolder::interner(self), + ty.fold_with(TypeFolder::as_dyn(self), outer_binder), + ) + } } + + /// Gets the interner that is being folded from. + fn interner(&self) -> I; } -pub fn super_fold_lifetime( - folder: &mut dyn Folder, - lifetime: &Lifetime, - binders: usize, -) -> Fallible { - match *lifetime { - Lifetime::BoundVar(depth) => if depth >= binders { - folder.fold_free_var_lifetime(depth - binders, binders) - } else { - Ok(Lifetime::BoundVar(depth)) - }, - Lifetime::InferenceVar(var) => folder.fold_inference_lifetime(var, binders), - Lifetime::Placeholder(universe) => folder.fold_free_placeholder_lifetime(universe, binders), +/// Applies the given `TypeFolder` to a value, producing a folded result +/// of type `Self::Result`. The result type is typically the same as +/// the source type, but in some cases we convert from borrowed +/// to owned as well (e.g., the folder for `&T` will fold to a fresh +/// `T`; well, actually `T::Result`). +pub trait TypeFoldable: Debug + Sized { + /// Apply the given folder `folder` to `self`; `binders` is the + /// number of binders that are in scope when beginning the + /// folder. Typically `binders` starts as 0, but is adjusted when + /// we encounter `Binders` in the IR or other similar + /// constructs. + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result; + + /// A convenient alternative to `try_fold_with` for use with infallible + /// folders. Do not override this method, to ensure coherence with + /// `try_fold_with`. + fn fold_with(self, folder: &mut dyn TypeFolder, outer_binder: DebruijnIndex) -> Self { + self.try_fold_with(FallibleTypeFolder::as_dyn(folder), outer_binder) + .unwrap() } } -impl Fold for Substitution { - type Result = Substitution; - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - let parameters = self.parameters.fold_with(folder, binders)?; - Ok(Substitution { parameters }) +/// For types where "fold" invokes a callback on the `TypeFolder`, the +/// `TypeSuperFoldable` trait captures the recursive behavior that folds all +/// the contents of the type. +pub trait TypeSuperFoldable: TypeFoldable { + /// Recursively folds the value. + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result; + + /// A convenient alternative to `try_super_fold_with` for use with + /// infallible folders. Do not override this method, to ensure coherence + /// with `try_super_fold_with`. + fn super_fold_with(self, folder: &mut dyn TypeFolder, outer_binder: DebruijnIndex) -> Self { + self.try_super_fold_with(FallibleTypeFolder::as_dyn(folder), outer_binder) + .unwrap() } } -#[macro_export] -macro_rules! copy_fold { - ($t:ty) => { - impl $crate::fold::Fold for $t { - type Result = Self; - fn fold_with( - &self, - _folder: &mut dyn ($crate::fold::Folder), - _binders: usize, - ) -> ::chalk_engine::fallible::Fallible { - Ok(*self) - } - } - }; +/// "Folding" a type invokes the `try_fold_ty` method on the folder; this +/// usually (in turn) invokes `try_super_fold_ty` to fold the individual +/// parts. +impl TypeFoldable for Ty { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + folder.try_fold_ty(self, outer_binder) + } } -copy_fold!(Identifier); -copy_fold!(UniverseIndex); -copy_fold!(ItemId); -copy_fold!(usize); -copy_fold!(QuantifierKind); -copy_fold!(chalk_engine::TableIndex); -// copy_fold!(TypeName); -- intentionally omitted! This is folded via `fold_ap` -copy_fold!(()); - -#[macro_export] -macro_rules! enum_fold { - ($s:ident [$($n:ident),*] { $($variant:ident($($name:ident),*)),* } $($w:tt)*) => { - impl<$($n),*> $crate::fold::Fold for $s<$($n),*> $($w)* { - type Result = $s<$($n :: Result),*>; - fn fold_with(&self, - folder: &mut dyn ($crate::fold::Folder), - binders: usize) - -> ::chalk_engine::fallible::Fallible { - match self { - $( - $s::$variant( $($name),* ) => { - Ok($s::$variant( $($name.fold_with(folder, binders)?),* )) - } - )* +/// "Super fold" for a type invokes te more detailed callbacks on the type +impl TypeSuperFoldable for Ty +where + I: Interner, +{ + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result, E> { + let interner = folder.interner(); + Ok(match self.kind(interner) { + TyKind::BoundVar(bound_var) => { + if let Some(bound_var1) = bound_var.shifted_out_to(outer_binder) { + // This variable was bound outside of the binders + // that we have traversed during folding; + // therefore, it is free. Let the folder have a + // crack at it. + folder.try_fold_free_var_ty(bound_var1, outer_binder)? + } else { + // This variable was bound within the binders that + // we folded over, so just return a bound + // variable. + self } } - } - }; - - // Hacky variant for use in slg::context::implementation - ($s:ty { $p:ident :: { $($variant:ident($($name:ident),*)),* } }) => { - impl $crate::fold::Fold for $s { - type Result = $s; - fn fold_with(&self, - folder: &mut dyn ($crate::fold::Folder), - binders: usize) - -> ::chalk_engine::fallible::Fallible { - match self { - $( - $p::$variant( $($name),* ) => { - Ok($p::$variant( $($name.fold_with(folder, binders)?),* )) - } - )* - } + TyKind::Dyn(clauses) => { + TyKind::Dyn(clauses.clone().try_fold_with(folder, outer_binder)?) + .intern(folder.interner()) } - } + TyKind::InferenceVar(var, kind) => { + folder.try_fold_inference_ty(*var, *kind, outer_binder)? + } + TyKind::Placeholder(ui) => folder.try_fold_free_placeholder_ty(*ui, outer_binder)?, + TyKind::Alias(proj) => TyKind::Alias(proj.clone().try_fold_with(folder, outer_binder)?) + .intern(folder.interner()), + TyKind::Function(fun) => { + TyKind::Function(fun.clone().try_fold_with(folder, outer_binder)?) + .intern(folder.interner()) + } + TyKind::Adt(id, substitution) => TyKind::Adt( + id.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::AssociatedType(assoc_ty, substitution) => TyKind::AssociatedType( + assoc_ty.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Scalar(scalar) => TyKind::Scalar(scalar.try_fold_with(folder, outer_binder)?) + .intern(folder.interner()), + TyKind::Str => TyKind::Str.intern(folder.interner()), + TyKind::Tuple(arity, substitution) => TyKind::Tuple( + *arity, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::OpaqueType(opaque_ty, substitution) => TyKind::OpaqueType( + opaque_ty.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Slice(substitution) => { + TyKind::Slice(substitution.clone().try_fold_with(folder, outer_binder)?) + .intern(folder.interner()) + } + TyKind::FnDef(fn_def, substitution) => TyKind::FnDef( + fn_def.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Ref(mutability, lifetime, ty) => TyKind::Ref( + mutability.try_fold_with(folder, outer_binder)?, + lifetime.clone().try_fold_with(folder, outer_binder)?, + ty.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Raw(mutability, ty) => TyKind::Raw( + mutability.try_fold_with(folder, outer_binder)?, + ty.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Never => TyKind::Never.intern(folder.interner()), + TyKind::Array(ty, const_) => TyKind::Array( + ty.clone().try_fold_with(folder, outer_binder)?, + const_.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Closure(id, substitution) => TyKind::Closure( + id.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Coroutine(id, substitution) => TyKind::Coroutine( + id.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::CoroutineWitness(id, substitution) => TyKind::CoroutineWitness( + id.try_fold_with(folder, outer_binder)?, + substitution.clone().try_fold_with(folder, outer_binder)?, + ) + .intern(folder.interner()), + TyKind::Foreign(id) => { + TyKind::Foreign(id.try_fold_with(folder, outer_binder)?).intern(folder.interner()) + } + TyKind::Error => TyKind::Error.intern(folder.interner()), + }) } } -enum_fold!(ParameterKind[T,L] { Ty(a), Lifetime(a) } where T: Fold, L: Fold); -enum_fold!(WhereClause[] { Implemented(a), ProjectionEq(a) }); -enum_fold!(WellFormed[] { Trait(a), Ty(a) }); -enum_fold!(FromEnv[] { Trait(a), Ty(a) }); -enum_fold!(DomainGoal[] { Holds(a), WellFormed(a), FromEnv(a), Normalize(a), UnselectedNormalize(a), - InScope(a), Derefs(a), IsLocal(a), IsUpstream(a), IsFullyVisible(a), - LocalImplAllowed(a), Compatible(a), DownstreamType(a) }); -enum_fold!(LeafGoal[] { EqGoal(a), DomainGoal(a) }); -enum_fold!(Constraint[] { LifetimeEq(a, b) }); -enum_fold!(Goal[] { Quantified(qkind, subgoal), Implies(wc, subgoal), And(g1, g2), Not(g), - Leaf(wc), CannotProve(a) }); -enum_fold!(ProgramClause[] { Implies(a), ForAll(a) }); - -#[macro_export] -macro_rules! struct_fold { - ($s:ident $([$($tt_args:tt)*])* { $($name:ident),* $(,)* } $($w:tt)*) => { - struct_fold! { - @parse_tt_args($($($tt_args)*)*) - struct_name($s) - parameters() - self_args() - result_args() - field_names($($name),*) - where_clauses($($w)*) - } - }; - - ( - @parse_tt_args() - struct_name($s:ident) - parameters($($parameters:tt)*) - self_args($($self_args:tt)*) - result_args($($result_args:tt)*) - field_names($($field_names:tt)*) - where_clauses($($where_clauses:tt)*) - ) => { - struct_fold! { - @parsed_tt_args - struct_name($s) - parameters($($parameters)*) - self_ty($s < $($self_args)* >) - result_ty($s < $($result_args)* >) - field_names($($field_names)*) - where_clauses($($where_clauses)*) - } - }; - - ( - @parse_tt_args(, $($input:tt)*) - struct_name($s:ident) - parameters($($parameters:tt)*) - self_args($($self_args:tt)*) - result_args($($result_args:tt)*) - field_names($($field_names:tt)*) - where_clauses($($where_clauses:tt)*) - ) => { - struct_fold! { - @parse_tt_args($($input)*) - struct_name($s) - parameters($($parameters)*,) - self_args($($self_args)*,) - result_args($($result_args)*,) - field_names($($field_names)*) - where_clauses($($where_clauses)*) - } - }; - - ( - @parse_tt_args(- $n:ident $($input:tt)*) - struct_name($s:ident) - parameters($($parameters:tt)*) - self_args($($self_args:tt)*) - result_args($($result_args:tt)*) - field_names($($field_names:tt)*) - where_clauses($($where_clauses:tt)*) - ) => { - struct_fold! { - @parse_tt_args($($input)*) - struct_name($s) - parameters($($parameters)* $n) - self_args($($self_args)* $n) - result_args($($result_args)* $n) - field_names($($field_names)*) - where_clauses($($where_clauses)*) - } - }; - - ( - @parse_tt_args($n:ident $($input:tt)*) - struct_name($s:ident) - parameters($($parameters:tt)*) - self_args($($self_args:tt)*) - result_args($($result_args:tt)*) - field_names($($field_names:tt)*) - where_clauses($($where_clauses:tt)*) - ) => { - struct_fold! { - @parse_tt_args($($input)*) - struct_name($s) - parameters($($parameters)* $n) - self_args($($self_args)* $n) - result_args($($result_args)* $n :: Result) - field_names($($field_names)*) - where_clauses($($where_clauses)*) - } - }; - - ( - @parsed_tt_args - struct_name($s:ident) - parameters($($parameters:tt)*) - self_ty($self_ty:ty) - result_ty($result_ty:ty) - field_names($($field_name:ident),*) - where_clauses($($where_clauses:tt)*) - ) => { - impl<$($parameters)*> $crate::fold::Fold for $self_ty $($where_clauses)* { - type Result = $result_ty; - fn fold_with(&self, - folder: &mut dyn ($crate::fold::Folder), - binders: usize) - -> ::chalk_engine::fallible::Fallible { - Ok($s { - $($field_name: self.$field_name.fold_with(folder, binders)?),* - }) - } - } - }; +/// "Folding" a lifetime invokes the `fold_lifetime` method on the folder; this +/// usually (in turn) invokes `super_fold_lifetime` to fold the individual +/// parts. +impl TypeFoldable for Lifetime { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + folder.try_fold_lifetime(self, outer_binder) + } } -struct_fold!(ProjectionTy { - associated_ty_id, - parameters, -}); -struct_fold!(UnselectedProjectionTy { - type_name, - parameters, -}); -struct_fold!(TraitRef { - trait_id, - parameters, -}); -struct_fold!(Normalize { projection, ty }); -struct_fold!(ProjectionEq { projection, ty }); -struct_fold!(UnselectedNormalize { projection, ty }); -struct_fold!(Environment { clauses }); -struct_fold!(InEnvironment[F] { environment, goal } where F: Fold); -struct_fold!(EqGoal { a, b }); -struct_fold!(Derefs { source, target }); -struct_fold!(ProgramClauseImplication { - consequence, - conditions, -}); - -struct_fold!(ConstrainedSubst { - subst, /* NB: The `is_trivial` routine relies on the fact that `subst` is folded first. */ - constraints, -}); - -// struct_fold!(ApplicationTy { name, parameters }); -- intentionally omitted, folded through Ty - -impl Fold for ExClause +impl TypeSuperFoldable for Lifetime where - C: Context, - C::Substitution: Fold, - C::RegionConstraint: Fold, - C::CanonicalConstrainedSubst: Fold, - C::GoalInEnvironment: Fold, + I: Interner, { - type Result = ExClause; - - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - let ExClause { - subst, - delayed_literals, - constraints, - subgoals, - } = self; - Ok(ExClause { - subst: subst.fold_with(folder, binders)?, - delayed_literals: delayed_literals.fold_with(folder, binders)?, - constraints: constraints.fold_with(folder, binders)?, - subgoals: subgoals.fold_with(folder, binders)?, - }) + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result, E> { + let interner = folder.interner(); + match self.data(interner) { + LifetimeData::BoundVar(bound_var) => { + if let Some(bound_var1) = bound_var.shifted_out_to(outer_binder) { + // This variable was bound outside of the binders + // that we have traversed during folding; + // therefore, it is free. Let the folder have a + // crack at it. + folder.try_fold_free_var_lifetime(bound_var1, outer_binder) + } else { + // This variable was bound within the binders that + // we folded over, so just return a bound + // variable. + Ok(self) + } + } + LifetimeData::InferenceVar(var) => { + folder.try_fold_inference_lifetime(*var, outer_binder) + } + LifetimeData::Placeholder(universe) => { + folder.try_fold_free_placeholder_lifetime(*universe, outer_binder) + } + LifetimeData::Static => Ok(LifetimeData::::Static.intern(folder.interner())), + LifetimeData::Erased => Ok(LifetimeData::::Erased.intern(folder.interner())), + LifetimeData::Error => Ok(LifetimeData::::Error.intern(folder.interner())), + LifetimeData::Phantom(void, ..) => match *void {}, + } } } -impl Fold for DelayedLiteral +/// "Folding" a const invokes the `fold_const` method on the folder; this +/// usually (in turn) invokes `super_fold_const` to fold the individual +/// parts. +impl TypeFoldable for Const { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + folder.try_fold_const(self, outer_binder) + } +} + +impl TypeSuperFoldable for Const where - C: Context, - C::CanonicalConstrainedSubst: Fold, + I: Interner, { - type Result = DelayedLiteral; - - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - match self { - DelayedLiteral::CannotProve(()) => Ok(DelayedLiteral::CannotProve(())), - DelayedLiteral::Negative(table_index) => Ok(DelayedLiteral::Negative( - table_index.fold_with(folder, binders)?, - )), - DelayedLiteral::Positive(table_index, subst) => Ok(DelayedLiteral::Positive( - table_index.fold_with(folder, binders)?, - subst.fold_with(folder, binders)?, - )), + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result, E> { + let interner = folder.interner(); + let ConstData { ref ty, ref value } = self.data(interner); + let mut fold_ty = || ty.clone().try_fold_with(folder, outer_binder); + match value { + ConstValue::BoundVar(bound_var) => { + if let Some(bound_var1) = bound_var.shifted_out_to(outer_binder) { + folder.try_fold_free_var_const(ty.clone(), bound_var1, outer_binder) + } else { + Ok(self) + } + } + ConstValue::InferenceVar(var) => { + folder.try_fold_inference_const(ty.clone(), *var, outer_binder) + } + ConstValue::Placeholder(universe) => { + folder.try_fold_free_placeholder_const(ty.clone(), *universe, outer_binder) + } + ConstValue::Concrete(ev) => Ok(ConstData { + ty: fold_ty()?, + value: ConstValue::Concrete(ConcreteConst { + interned: ev.interned.clone(), + }), + } + .intern(folder.interner())), } } } -impl Fold for Literal -where - C: Context, - C::GoalInEnvironment: Fold, -{ - type Result = Literal; +/// Folding a goal invokes the `fold_goal` callback (which will, by +/// default, invoke super-fold). +impl TypeFoldable for Goal { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + folder.try_fold_goal(self, outer_binder) + } +} - fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible { - match self { - Literal::Positive(goal) => Ok(Literal::Positive(goal.fold_with(folder, binders)?)), - Literal::Negative(goal) => Ok(Literal::Negative(goal.fold_with(folder, binders)?)), - } +/// Superfold folds recursively. +impl TypeSuperFoldable for Goal { + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + Ok(Goal::new( + interner, + self.data(interner) + .clone() + .try_fold_with(folder, outer_binder)?, + )) + } +} + +/// Folding a program clause invokes the `fold_program_clause` +/// callback on the folder (which will, by default, invoke the +/// `super_fold_with` method on the program clause). +impl TypeFoldable for ProgramClause { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + folder.try_fold_program_clause(self, outer_binder) } } diff --git a/chalk-ir/src/fold/binder_impls.rs b/chalk-ir/src/fold/binder_impls.rs new file mode 100644 index 00000000000..1f44c162962 --- /dev/null +++ b/chalk-ir/src/fold/binder_impls.rs @@ -0,0 +1,73 @@ +//! This module contains impls of `TypeFoldable` for those types that +//! introduce binders. +//! +//! The more interesting impls of `TypeFoldable` remain in the `fold` module. + +use crate::*; + +impl TypeFoldable for FnPointer { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let FnPointer { + num_binders, + substitution, + sig, + } = self; + Ok(FnPointer { + num_binders, + substitution: substitution.try_fold_with(folder, outer_binder.shifted_in())?, + sig: FnSig { + abi: sig.abi, + safety: sig.safety, + variadic: sig.variadic, + }, + }) + } +} + +impl TypeFoldable for Binders +where + T: HasInterner + TypeFoldable, + I: Interner, +{ + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let Binders { + binders: self_binders, + value: self_value, + } = self; + let value = self_value.try_fold_with(folder, outer_binder.shifted_in())?; + let binders = VariableKinds { + interned: self_binders.interned().clone(), + }; + Ok(Binders::new(binders, value)) + } +} + +impl TypeFoldable for Canonical +where + I: Interner, + T: HasInterner + TypeFoldable, +{ + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let Canonical { + binders: self_binders, + value: self_value, + } = self; + let value = self_value.try_fold_with(folder, outer_binder.shifted_in())?; + let binders = CanonicalVarKinds { + interned: self_binders.interned().clone(), + }; + Ok(Canonical { binders, value }) + } +} diff --git a/chalk-ir/src/fold/boring_impls.rs b/chalk-ir/src/fold/boring_impls.rs new file mode 100644 index 00000000000..f484cbe2684 --- /dev/null +++ b/chalk-ir/src/fold/boring_impls.rs @@ -0,0 +1,244 @@ +//! This module contains "rote and uninteresting" impls of `TypeFoldable` for +//! various types. In general, we prefer to derive `TypeFoldable`, but +//! sometimes that doesn't work for whatever reason. +//! +//! The more interesting impls of `TypeFoldable` remain in the `fold` module. + +use super::in_place; +use crate::*; +use std::marker::PhantomData; + +impl, I: Interner> TypeFoldable for Vec { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + in_place::fallible_map_vec(self, |e| e.try_fold_with(folder, outer_binder)) + } +} + +impl, I: Interner> TypeFoldable for Box { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + in_place::fallible_map_box(self, |e| e.try_fold_with(folder, outer_binder)) + } +} + +macro_rules! tuple_fold { + ($($n:ident),*) => { + impl<$($n: TypeFoldable,)* I: Interner> TypeFoldable for ($($n,)*) { + fn try_fold_with(self, folder: &mut dyn FallibleTypeFolder, outer_binder: DebruijnIndex) -> Result + { + #[allow(non_snake_case)] + let ($($n),*) = self; + Ok(($($n.try_fold_with(folder, outer_binder)?,)*)) + } + } + } +} + +tuple_fold!(A, B); +tuple_fold!(A, B, C); +tuple_fold!(A, B, C, D); +tuple_fold!(A, B, C, D, E); + +impl, I: Interner> TypeFoldable for Option { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + match self { + None => Ok(None), + Some(e) => Ok(Some(e.try_fold_with(folder, outer_binder)?)), + } + } +} + +impl TypeFoldable for GenericArg { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + + let data = self + .data(interner) + .clone() + .try_fold_with(folder, outer_binder)?; + Ok(GenericArg::new(interner, data)) + } +} + +impl TypeFoldable for Substitution { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + + let folded = self + .iter(interner) + .cloned() + .map(|p| p.try_fold_with(folder, outer_binder)); + Substitution::from_fallible(interner, folded) + } +} + +impl TypeFoldable for Goals { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + let folded = self + .iter(interner) + .cloned() + .map(|p| p.try_fold_with(folder, outer_binder)); + Goals::from_fallible(interner, folded) + } +} + +impl TypeFoldable for ProgramClauses { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + let folded = self + .iter(interner) + .cloned() + .map(|p| p.try_fold_with(folder, outer_binder)); + ProgramClauses::from_fallible(interner, folded) + } +} + +impl TypeFoldable for QuantifiedWhereClauses { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + let folded = self + .iter(interner) + .cloned() + .map(|p| p.try_fold_with(folder, outer_binder)); + QuantifiedWhereClauses::from_fallible(interner, folded) + } +} + +impl TypeFoldable for Constraints { + fn try_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> Result { + let interner = folder.interner(); + let folded = self + .iter(interner) + .cloned() + .map(|p| p.try_fold_with(folder, outer_binder)); + Constraints::from_fallible(interner, folded) + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! copy_fold { + ($t:ty) => { + impl $crate::fold::TypeFoldable for $t { + fn try_fold_with( + self, + _folder: &mut dyn ($crate::fold::FallibleTypeFolder), + _outer_binder: DebruijnIndex, + ) -> ::std::result::Result { + Ok(self) + } + } + }; +} + +copy_fold!(bool); +copy_fold!(usize); +copy_fold!(UniverseIndex); +copy_fold!(PlaceholderIndex); +copy_fold!(QuantifierKind); +copy_fold!(DebruijnIndex); +copy_fold!(()); +copy_fold!(UintTy); +copy_fold!(IntTy); +copy_fold!(FloatTy); +copy_fold!(Scalar); +copy_fold!(ClausePriority); +copy_fold!(Mutability); +copy_fold!(Safety); + +#[doc(hidden)] +#[macro_export] +macro_rules! id_fold { + ($t:ident) => { + impl $crate::fold::TypeFoldable for $t { + fn try_fold_with( + self, + _folder: &mut dyn ($crate::fold::FallibleTypeFolder), + _outer_binder: DebruijnIndex, + ) -> ::std::result::Result { + Ok(self) + } + } + }; +} + +id_fold!(ImplId); +id_fold!(AdtId); +id_fold!(TraitId); +id_fold!(AssocTypeId); +id_fold!(OpaqueTyId); +id_fold!(FnDefId); +id_fold!(ClosureId); +id_fold!(CoroutineId); +id_fold!(ForeignDefId); + +impl TypeSuperFoldable for ProgramClauseData { + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> ::std::result::Result { + Ok(ProgramClauseData( + self.0.try_fold_with(folder, outer_binder)?, + )) + } +} + +impl TypeSuperFoldable for ProgramClause { + fn try_super_fold_with( + self, + folder: &mut dyn FallibleTypeFolder, + outer_binder: DebruijnIndex, + ) -> ::std::result::Result { + let clause = self.data(folder.interner()).clone(); + Ok(clause + .try_super_fold_with(folder, outer_binder)? + .intern(folder.interner())) + } +} + +impl TypeFoldable for PhantomData { + fn try_fold_with( + self, + _folder: &mut dyn FallibleTypeFolder, + _outer_binder: DebruijnIndex, + ) -> ::std::result::Result { + Ok(PhantomData) + } +} diff --git a/chalk-ir/src/fold/in_place.rs b/chalk-ir/src/fold/in_place.rs new file mode 100644 index 00000000000..263da617d97 --- /dev/null +++ b/chalk-ir/src/fold/in_place.rs @@ -0,0 +1,263 @@ +//! Subroutines to help implementers of `TypeFoldable` avoid unnecessary heap allocations. + +use std::marker::PhantomData; +use std::{mem, ptr}; + +fn is_zst() -> bool { + mem::size_of::() == 0 +} + +fn is_layout_identical() -> bool { + mem::size_of::() == mem::size_of::() && mem::align_of::() == mem::align_of::() +} + +/// Maps a `Box` to a `Box`, reusing the underlying storage if possible. +pub(super) fn fallible_map_box( + b: Box, + map: impl FnOnce(T) -> Result, +) -> Result, E> { + // This optimization is only valid when `T` and `U` have the same size/alignment and is not + // useful for ZSTs. + if !is_layout_identical::() || is_zst::() { + return map(*b).map(Box::new); + } + + let raw = Box::into_raw(b); + unsafe { + let val = ptr::read(raw); + + // Box -> Box> + let mut raw: Box> = Box::from_raw(raw.cast()); + + // If `map` panics or returns an error, `raw` will free the memory associated with `b`, but + // not drop the boxed value itself since it is wrapped in `MaybeUninit`. This is what we + // want since the boxed value was moved into `map`. + let mapped_val = map(val)?; + ptr::write(raw.as_mut_ptr(), mapped_val); + + // Box> -> Box + Ok(Box::from_raw(Box::into_raw(raw).cast())) + } +} + +/// Maps a `Vec` to a `Vec`, reusing the underlying storage if possible. +pub(super) fn fallible_map_vec( + vec: Vec, + mut map: impl FnMut(T) -> Result, +) -> Result, E> { + // This optimization is only valid when `T` and `U` have the same size/alignment and is not + // useful for ZSTs. + if !is_layout_identical::() || is_zst::() { + return vec.into_iter().map(map).collect(); + } + + let mut vec = VecMappedInPlace::::new(vec); + + unsafe { + for i in 0..vec.len { + let place = vec.ptr.add(i); + let val = ptr::read(place); + + // Set `map_in_progress` so the drop impl for `VecMappedInPlace` can handle the other + // elements correctly in case `map` panics or returns an error. + vec.map_in_progress = i; + let mapped_val = map(val)?; + + ptr::write(place as *mut U, mapped_val); + } + + Ok(vec.finish()) + } +} + +/// Takes ownership of a `Vec` that is being mapped in place, cleaning up if the map fails. +struct VecMappedInPlace { + ptr: *mut T, + len: usize, + cap: usize, + + map_in_progress: usize, + _elem_tys: PhantomData<(T, U)>, +} + +impl VecMappedInPlace { + fn new(mut vec: Vec) -> Self { + assert!(is_layout_identical::()); + + // FIXME: This is just `Vec::into_raw_parts`. Use that instead when it is stabilized. + let ptr = vec.as_mut_ptr(); + let len = vec.len(); + let cap = vec.capacity(); + mem::forget(vec); + + VecMappedInPlace { + ptr, + len, + cap, + + map_in_progress: 0, + _elem_tys: PhantomData, + } + } + + /// Converts back into a `Vec` once the map is complete. + unsafe fn finish(self) -> Vec { + let this = mem::ManuallyDrop::new(self); + Vec::from_raw_parts(this.ptr as *mut U, this.len, this.cap) + } +} + +/// `VecMappedInPlace` drops everything but the element that was passed to `map` when it panicked or +/// returned an error. Everything before that index in the vector has type `U` (it has been mapped) +/// and everything after it has type `T` (it has not been mapped). +/// +/// ```text +/// mapped +/// | not yet mapped +/// |----| |-----| +/// [UUUU UxTT TTTT] +/// ^ +/// `map_in_progress` (not dropped) +/// ``` +impl Drop for VecMappedInPlace { + fn drop(&mut self) { + // Drop mapped elements of type `U`. + for i in 0..self.map_in_progress { + unsafe { + ptr::drop_in_place(self.ptr.add(i) as *mut U); + } + } + + // Drop unmapped elements of type `T`. + for i in (self.map_in_progress + 1)..self.len { + unsafe { + ptr::drop_in_place(self.ptr.add(i)); + } + } + + // Free the underlying storage for the `Vec`. + // `len` is 0 because the elements were handled above. + unsafe { + Vec::from_raw_parts(self.ptr, 0, self.cap); + } + } +} + +#[cfg(test)] +mod tests { + use std::fmt; + use std::sync::{Arc, Mutex}; + + /// A wrapper around `T` that records when it is dropped. + struct RecordDrop { + id: T, + drops: Arc>>, + } + + impl RecordDrop { + fn new(id: T, drops: &Arc>>) -> Self { + RecordDrop { + id, + drops: drops.clone(), + } + } + } + + impl RecordDrop { + fn map_to_char(self) -> RecordDrop { + let this = std::mem::ManuallyDrop::new(self); + RecordDrop { + id: (this.id + b'A') as char, + drops: this.drops.clone(), + } + } + } + + impl Drop for RecordDrop { + fn drop(&mut self) { + self.drops.lock().unwrap().push(format!("{}", self.id)); + } + } + + #[test] + fn vec_no_cleanup_after_success() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect(); + + let res: Result<_, ()> = super::fallible_map_vec(to_fold, |x| Ok(x.map_to_char())); + + assert!(res.is_ok()); + assert!(drops.lock().unwrap().is_empty()); + } + + #[test] + fn vec_cleanup_after_panic() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect(); + + let res = std::panic::catch_unwind(|| { + let _: Result<_, ()> = super::fallible_map_vec(to_fold, |x| { + if x.id == 3 { + panic!(); + } + + Ok(x.map_to_char()) + }); + }); + + assert!(res.is_err()); + assert_eq!(*drops.lock().unwrap(), &["3", "A", "B", "C", "4"]); + } + + #[test] + fn vec_cleanup_after_early_return() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect(); + + let res = super::fallible_map_vec(to_fold, |x| { + if x.id == 2 { + return Err(()); + } + + Ok(x.map_to_char()) + }); + + assert!(res.is_err()); + assert_eq!(*drops.lock().unwrap(), &["2", "A", "B", "3", "4"]); + } + + #[test] + fn box_no_cleanup_after_success() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = Box::new(RecordDrop::new(0, &drops)); + + let res: Result, ()> = super::fallible_map_box(to_fold, |x| Ok(x.map_to_char())); + + assert!(res.is_ok()); + assert!(drops.lock().unwrap().is_empty()); + } + + #[test] + fn box_cleanup_after_panic() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = Box::new(RecordDrop::new(0, &drops)); + + let res = std::panic::catch_unwind(|| { + let _: Result, ()> = super::fallible_map_box(to_fold, |_| panic!()); + }); + + assert!(res.is_err()); + assert_eq!(*drops.lock().unwrap(), &["0"]); + } + + #[test] + fn box_cleanup_after_early_return() { + let drops = Arc::new(Mutex::new(Vec::new())); + let to_fold = Box::new(RecordDrop::new(0, &drops)); + + let res: Result, _> = super::fallible_map_box(to_fold, |_| Err(())); + + assert!(res.is_err()); + assert_eq!(*drops.lock().unwrap(), &["0"]); + } +} diff --git a/chalk-ir/src/fold/shift.rs b/chalk-ir/src/fold/shift.rs index 0098b8cb271..f7e5e4a4abd 100644 --- a/chalk-ir/src/fold/shift.rs +++ b/chalk-ir/src/fold/shift.rs @@ -1,117 +1,122 @@ -use super::{ - DefaultTypeFolder, Fold, FreeVarFolder, DefaultInferenceFolder, DefaultPlaceholderFolder, -}; -use *; +//! Shifting of debruijn indices + +use crate::*; /// Methods for converting debruijn indices to move values into or out /// of binders. -pub trait Shift: Fold { - /// Shifts debruijn indices in `self` **up**, which is used when a - /// value is being placed under additional levels of binders. - /// - /// For example, if we had some goal - /// like: - /// - /// ```notrust - /// T: Trait - /// ``` - /// - /// where `?X` refers to some inference variable (and hence has depth 3), - /// we might use `up_shift` when constructing a goal like: - /// - /// ```notrust - /// exists { T = U, T: Trait } - /// ``` - /// - /// This is because, internally, the inference variable `?X` (as - /// well as the new quantified variable `U`) are going to be - /// represented by debruijn indices. So if the index of `X` is - /// zero, then while originally we might have had `T: Trait`, - /// inside the `exists` we want to represent `X` with `?1`, to - /// account for the binder: - /// - /// ```notrust - /// exists { T = ?0, T: Trait } - /// ^^ ^^ refers to `?X` - /// refers to `U` - /// ``` - fn shifted_in(&self, adjustment: usize) -> Self::Result; - - /// Shifts debruijn indices in `self` **down**, hence **removing** - /// a value from binders. This will fail with `Err(NoSolution)` in - /// the case that the value refers to something from one of those - /// binders. - /// - /// Consider the final example from `up_shift`: - /// - /// ```notrust - /// exists { T = ?0, T: Trait } - /// ^^ ^^ refers to `?X` - /// refers to `U` - /// ``` - /// - /// If we `down_shift` the `T: Trait` goal by 1, - /// we will get `T: Trait`, which is what we started with. - /// In other words, we will have extracted it from the `exists` - /// binder. - /// - /// But if we try to `down_shift` the `T = ?0` goal by 1, we will - /// get `Err`, because it refers to the type bound by the - /// `exists`. - fn shifted_out(&self, adjustment: usize) -> Fallible; +pub trait Shift: TypeFoldable { + /// Shifts this term in one level of binders. + fn shifted_in(self, interner: I) -> Self; + + /// Shifts a term valid at `outer_binder` so that it is + /// valid at the innermost binder. See [`DebruijnIndex::shifted_in_from`] + /// for a detailed explanation. + fn shifted_in_from(self, interner: I, source_binder: DebruijnIndex) -> Self; + + /// Shifts this term out one level of binders. + fn shifted_out(self, interner: I) -> Fallible; + + /// Shifts a term valid at the innermost binder so that it is + /// valid at `outer_binder`. See [`DebruijnIndex::shifted_out_to`] + /// for a detailed explanation. + fn shifted_out_to(self, interner: I, target_binder: DebruijnIndex) -> Fallible; } -impl Shift for T { - fn shifted_in(&self, adjustment: usize) -> T::Result { - self.fold_with(&mut Shifter { adjustment }, 0).unwrap() +impl, I: Interner> Shift for T { + fn shifted_in(self, interner: I) -> Self { + self.shifted_in_from(interner, DebruijnIndex::ONE) + } + + fn shifted_in_from(self, interner: I, source_binder: DebruijnIndex) -> T { + self.try_fold_with( + &mut Shifter { + source_binder, + interner, + }, + DebruijnIndex::INNERMOST, + ) + .unwrap() + } + + fn shifted_out_to(self, interner: I, target_binder: DebruijnIndex) -> Fallible { + self.try_fold_with( + &mut DownShifter { + target_binder, + interner, + }, + DebruijnIndex::INNERMOST, + ) } - fn shifted_out(&self, adjustment: usize) -> Fallible { - self.fold_with(&mut DownShifter { adjustment }, 0) + fn shifted_out(self, interner: I) -> Fallible { + self.shifted_out_to(interner, DebruijnIndex::ONE) } } /// A folder that adjusts debruijn indices by a certain amount. -/// -struct Shifter { - adjustment: usize, +#[derive(FallibleTypeFolder)] +struct Shifter { + source_binder: DebruijnIndex, + interner: I, } -impl Shifter { +impl Shifter { /// Given a free variable at `depth`, shifts that depth to `depth /// + self.adjustment`, and then wraps *that* within the internal /// set `binders`. - fn adjust(&self, depth: usize, binders: usize) -> usize { - depth + self.adjustment + binders + fn adjust(&self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> BoundVar { + bound_var + .shifted_in_from(self.source_binder) + .shifted_in_from(outer_binder) } } -impl DefaultTypeFolder for Shifter {} +impl TypeFolder for Shifter { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } -impl FreeVarFolder for Shifter { - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible { - Ok(Ty::BoundVar(self.adjust(depth, binders))) + fn fold_free_var_ty(&mut self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Ty { + TyKind::::BoundVar(self.adjust(bound_var, outer_binder)) + .intern(TypeFolder::interner(self)) } - fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible { - Ok(Lifetime::BoundVar(self.adjust(depth, binders))) + fn fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + LifetimeData::::BoundVar(self.adjust(bound_var, outer_binder)) + .intern(TypeFolder::interner(self)) } -} -impl DefaultPlaceholderFolder for Shifter {} + fn fold_free_var_const( + &mut self, + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Const { + // const types don't have free variables, so we can skip folding `ty` + self.adjust(bound_var, outer_binder) + .to_const(TypeFolder::interner(self), ty) + } -impl DefaultInferenceFolder for Shifter {} + fn interner(&self) -> I { + self.interner + } +} //--------------------------------------------------------------------------- /// A shifter that reduces debruijn indices -- in other words, which lifts a value /// *out* from binders. Consider this example: /// -struct DownShifter { - adjustment: usize, +struct DownShifter { + target_binder: DebruijnIndex, + interner: I, } -impl DownShifter { +impl DownShifter { /// Given a reference to a free variable at depth `depth` /// (appearing within `binders` internal binders), attempts to /// lift that free variable out from `adjustment` levels of @@ -120,26 +125,53 @@ impl DownShifter { /// those internal binders (i.e., `depth < self.adjustment`) the /// this will fail with `Err`. Otherwise, returns the variable at /// this new depth (but adjusted to appear within `binders`). - fn adjust(&self, depth: usize, binders: usize) -> Fallible { - match depth.checked_sub(self.adjustment) { - Some(new_depth) => Ok(new_depth + binders), + fn adjust(&self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Fallible { + match bound_var.shifted_out_to(self.target_binder) { + Some(bound_var1) => Ok(bound_var1.shifted_in_from(outer_binder)), None => Err(NoSolution), } } } -impl DefaultTypeFolder for DownShifter {} +impl FallibleTypeFolder for DownShifter { + type Error = NoSolution; -impl FreeVarFolder for DownShifter { - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible { - Ok(Ty::BoundVar(self.adjust(depth, binders)?)) + fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder { + self } - fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible { - Ok(Lifetime::BoundVar(self.adjust(depth, binders)?)) + fn try_fold_free_var_ty( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Fallible> { + Ok(TyKind::::BoundVar(self.adjust(bound_var, outer_binder)?).intern(self.interner())) + } + + fn try_fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Fallible> { + Ok( + LifetimeData::::BoundVar(self.adjust(bound_var, outer_binder)?) + .intern(self.interner()), + ) } -} -impl DefaultPlaceholderFolder for DownShifter {} + fn try_fold_free_var_const( + &mut self, + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Fallible> { + // const types don't have free variables, so we can skip folding `ty` + Ok(self + .adjust(bound_var, outer_binder)? + .to_const(self.interner(), ty)) + } -impl DefaultInferenceFolder for DownShifter {} + fn interner(&self) -> I { + self.interner + } +} diff --git a/chalk-ir/src/fold/subst.rs b/chalk-ir/src/fold/subst.rs index 314c1691ec2..7cff8d89c91 100644 --- a/chalk-ir/src/fold/subst.rs +++ b/chalk-ir/src/fold/subst.rs @@ -1,58 +1,118 @@ -use fold::shift::Shift; -use ::*; - use super::*; +use crate::fold::shift::Shift; -pub struct Subst<'s> { +/// Substitution used during folding +#[derive(FallibleTypeFolder)] +pub struct Subst<'s, I: Interner> { /// Values to substitute. A reference to a free variable with /// index `i` will be mapped to `parameters[i]` -- if `i > /// parameters.len()`, then we will leave the variable untouched. - parameters: &'s [Parameter], + parameters: &'s [GenericArg], + interner: I, } -impl<'s> Subst<'s> { - pub fn apply(parameters: &[Parameter], value: &T) -> T::Result { - value.fold_with(&mut Subst { parameters }, 0).unwrap() +impl Subst<'_, I> { + /// Applies the substitution by folding + pub fn apply>(interner: I, parameters: &[GenericArg], value: T) -> T { + value + .try_fold_with( + &mut Subst { + parameters, + interner, + }, + DebruijnIndex::INNERMOST, + ) + .unwrap() } } -impl QuantifiedTy { - pub fn substitute(&self, parameters: &[Parameter]) -> Ty { - assert_eq!(self.num_binders, parameters.len()); - Subst::apply(parameters, &self.ty) +impl TypeFolder for Subst<'_, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self } -} -impl<'b> DefaultTypeFolder for Subst<'b> {} - -impl<'b> FreeVarFolder for Subst<'b> { - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible { - if depth >= self.parameters.len() { - Ok(Ty::BoundVar(depth - self.parameters.len() + binders)) - } else { - match self.parameters[depth] { - ParameterKind::Ty(ref t) => Ok(t.shifted_in(binders)), + /// We are eliminating one binder, but binders outside of that get preserved. + /// + /// So e.g. consider this: + /// + /// ```notrust + /// for { for { [A, C] } } + /// // ^ the binder we are substituing with `[u32]` + /// ``` + /// + /// Here, `A` would be `^1.0` and `C` would be `^0.0`. We will replace `^0.0` with the + /// 0th index from the list (`u32`). We will convert `^1.0` (A) to `^0.0` -- i.e., shift + /// it **out** of one level of binder (the `for` binder we are eliminating). + /// + /// This gives us as a result: + /// + /// ```notrust + /// for { [A, u32] } + /// ^ represented as `^0.0` + /// ``` + fn fold_free_var_ty(&mut self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Ty { + if let Some(index) = bound_var.index_if_innermost() { + match self.parameters[index].data(TypeFolder::interner(self)) { + GenericArgData::Ty(t) => t + .clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder), _ => panic!("mismatched kinds in substitution"), } + } else { + bound_var + .shifted_out() + .expect("cannot fail because this is not the innermost") + .shifted_in_from(outer_binder) + .to_ty(TypeFolder::interner(self)) } } + /// see `fold_free_var_ty` fn fold_free_var_lifetime( &mut self, - depth: usize, - binders: usize, - ) -> Fallible { - if depth >= self.parameters.len() { - Ok(Lifetime::BoundVar(depth - self.parameters.len() + binders)) - } else { - match self.parameters[depth] { - ParameterKind::Lifetime(ref l) => Ok(l.shifted_in(binders)), + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + if let Some(index) = bound_var.index_if_innermost() { + match self.parameters[index].data(TypeFolder::interner(self)) { + GenericArgData::Lifetime(l) => l + .clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder), _ => panic!("mismatched kinds in substitution"), } + } else { + bound_var + .shifted_out() + .unwrap() + .shifted_in_from(outer_binder) + .to_lifetime(TypeFolder::interner(self)) } } -} -impl<'b> DefaultPlaceholderFolder for Subst<'b> {} + /// see `fold_free_var_ty` + fn fold_free_var_const( + &mut self, + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Const { + if let Some(index) = bound_var.index_if_innermost() { + match self.parameters[index].data(TypeFolder::interner(self)) { + GenericArgData::Const(c) => c + .clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder), + _ => panic!("mismatched kinds in substitution"), + } + } else { + bound_var + .shifted_out() + .unwrap() + .shifted_in_from(outer_binder) + .to_const(TypeFolder::interner(self), ty) + } + } -impl<'b> DefaultInferenceFolder for Subst<'b> {} + fn interner(&self) -> I { + self.interner + } +} diff --git a/chalk-ir/src/interner.rs b/chalk-ir/src/interner.rs new file mode 100644 index 00000000000..e0bd4e7b602 --- /dev/null +++ b/chalk-ir/src/interner.rs @@ -0,0 +1,702 @@ +//! Encapsulates the concrete representation of core types such as types and goals. +use crate::AliasTy; +use crate::AssocTypeId; +use crate::CanonicalVarKind; +use crate::CanonicalVarKinds; +use crate::ClosureId; +use crate::Constraint; +use crate::Constraints; +use crate::CoroutineId; +use crate::FnDefId; +use crate::ForeignDefId; +use crate::GenericArg; +use crate::GenericArgData; +use crate::Goal; +use crate::GoalData; +use crate::Goals; +use crate::InEnvironment; +use crate::Lifetime; +use crate::LifetimeData; +use crate::OpaqueTy; +use crate::OpaqueTyId; +use crate::ProgramClause; +use crate::ProgramClauseData; +use crate::ProgramClauseImplication; +use crate::ProgramClauses; +use crate::ProjectionTy; +use crate::QuantifiedWhereClause; +use crate::QuantifiedWhereClauses; +use crate::SeparatorTraitRef; +use crate::Substitution; +use crate::TraitId; +use crate::Ty; +use crate::TyData; +use crate::VariableKind; +use crate::VariableKinds; +use crate::Variance; +use crate::Variances; +use crate::{AdtId, TyKind}; +use crate::{Const, ConstData}; +use std::fmt::{self, Debug}; +use std::hash::Hash; +use std::marker::PhantomData; +use std::sync::Arc; + +/// A "interner" encapsulates the concrete representation of +/// certain "core types" from chalk-ir. All the types in chalk-ir are +/// parameterized by a `I: Interner`, and so (e.g.) if they want to +/// store a type, they don't store a `Ty` instance directly, but +/// rather prefer a `Ty`. You can think of `I::Type` as the +/// interned representation (and, indeed, it may well be an interned +/// pointer, e.g. in rustc). +/// +/// Type families allow chalk to be embedded in different contexts +/// where the concrete representation of core types varies. They also +/// allow us to write generic code that reasons about multiple +/// distinct sets of types by using distinct generic type parameters +/// (e.g., `SourceI` and `TargetI`) -- even if those type parameters +/// wind up being mapped to the same underlying type families in the +/// end. +pub trait Interner: Debug + Copy + Eq + Hash + Sized { + /// "Interned" representation of types. In normal user code, + /// `Self::InternedType` is not referenced. Instead, we refer to + /// `Ty`, which wraps this type. + /// + /// An `InternedType` must be something that can be created from a + /// `TyKind` (by the [`intern_ty`][Self::intern_ty] method) and then later + /// converted back (by the [`ty_data`][Self::ty_data] method). The interned form + /// must also introduce indirection, either via a `Box`, `&`, or + /// other pointer type. + type InternedType: Debug + Clone + Eq + Hash; + + /// "Interned" representation of lifetimes. In normal user code, + /// `Self::InternedLifetime` is not referenced. Instead, we refer to + /// `Lifetime`, which wraps this type. + /// + /// An `InternedLifetime` must be something that can be created + /// from a `LifetimeData` (by the [`intern_lifetime`][Self::intern_lifetime] method) and + /// then later converted back (by the [`lifetime_data`][Self::lifetime_data] method). + type InternedLifetime: Debug + Clone + Eq + Hash; + + /// "Interned" representation of const expressions. In normal user code, + /// `Self::InternedConst` is not referenced. Instead, we refer to + /// `Const`, which wraps this type. + /// + /// An `InternedConst` must be something that can be created + /// from a `ConstData` (by the [`intern_const`][Self::intern_const] method) and + /// then later converted back (by the [`const_data`][Self::const_data] method). + type InternedConst: Debug + Clone + Eq + Hash; + + /// "Interned" representation of an evaluated const value. + /// `Self::InternedConcreteConst` is not referenced. Instead, + /// we refer to `ConcreteConst`, which wraps this type. + /// + /// `InternedConcreteConst` instances are not created by chalk, + /// it can only make a query asking about equality of two + /// evaluated consts. + type InternedConcreteConst: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a "generic parameter", which can + /// be either a type or a lifetime. In normal user code, + /// `Self::InternedGenericArg` is not referenced. Instead, we refer to + /// `GenericArg`, which wraps this type. + /// + /// An `InternedType` is created by `intern_generic_arg` and can be + /// converted back to its underlying data via `generic_arg_data`. + type InternedGenericArg: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a "goal". In normal user code, + /// `Self::InternedGoal` is not referenced. Instead, we refer to + /// `Goal`, which wraps this type. + /// + /// An `InternedGoal` is created by `intern_goal` and can be + /// converted back to its underlying data via `goal_data`. + type InternedGoal: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of goals. In normal user code, + /// `Self::InternedGoals` is not referenced. Instead, we refer to + /// `Goals`, which wraps this type. + /// + /// An `InternedGoals` is created by `intern_goals` and can be + /// converted back to its underlying data via `goals_data`. + type InternedGoals: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a "substitution". In normal user code, + /// `Self::InternedSubstitution` is not referenced. Instead, we refer to + /// `Substitution`, which wraps this type. + /// + /// An `InternedSubstitution` is created by `intern_substitution` and can be + /// converted back to its underlying data via `substitution_data`. + type InternedSubstitution: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of program clauses. In normal user code, + /// `Self::InternedProgramClauses` is not referenced. Instead, we refer to + /// `ProgramClauses`, which wraps this type. + /// + /// An `InternedProgramClauses` is created by `intern_program_clauses` and can be + /// converted back to its underlying data via `program_clauses_data`. + type InternedProgramClauses: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a "program clause". In normal user code, + /// `Self::InternedProgramClause` is not referenced. Instead, we refer to + /// `ProgramClause`, which wraps this type. + /// + /// An `InternedProgramClause` is created by `intern_program_clause` and can be + /// converted back to its underlying data via `program_clause_data`. + type InternedProgramClause: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of quantified where clauses. + /// In normal user code, `Self::InternedQuantifiedWhereClauses` is not referenced. + /// Instead, we refer to `QuantifiedWhereClauses`, which wraps this type. + /// + /// An `InternedQuantifiedWhereClauses` is created by `intern_quantified_where_clauses` + /// and can be converted back to its underlying data via `quantified_where_clauses_data`. + type InternedQuantifiedWhereClauses: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of variable kinds. + /// In normal user code, `Self::InternedVariableKinds` is not referenced. + /// Instead, we refer to `VariableKinds`, which wraps this type. + /// + /// An `InternedVariableKinds` is created by `intern_generic_arg_kinds` + /// and can be converted back to its underlying data via `variable_kinds_data`. + type InternedVariableKinds: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of variable kinds with universe index. + /// In normal user code, `Self::InternedCanonicalVarKinds` is not referenced. + /// Instead, we refer to `CanonicalVarKinds`, which wraps this type. + /// + /// An `InternedCanonicalVarKinds` is created by + /// `intern_canonical_var_kinds` and can be converted back + /// to its underlying data via `canonical_var_kinds_data`. + type InternedCanonicalVarKinds: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of region constraints. + /// In normal user code, `Self::InternedConstraints` is not referenced. + /// Instead, we refer to `Constraints`, which wraps this type. + /// + /// An `InternedConstraints` is created by `intern_constraints` + /// and can be converted back to its underlying data via `constraints_data`. + type InternedConstraints: Debug + Clone + Eq + Hash; + + /// "Interned" representation of a list of `chalk_ir::Variance`. + /// In normal user code, `Self::InternedVariances` is not referenced. + /// Instead, we refer to `Variances`, which wraps this type. + /// + /// An `InternedVariances` is created by + /// `intern_variances` and can be converted back + /// to its underlying data via `variances_data`. + type InternedVariances: Debug + Clone + Eq + Hash; + + /// The core "id" type used for trait-ids and the like. + type DefId: Debug + Copy + Eq + Hash; + + /// The ID type for ADTs + type InternedAdtId: Debug + Copy + Eq + Hash; + + /// Representation of identifiers. + type Identifier: Debug + Clone + Eq + Hash; + + /// Representation of function ABI (e.g. calling convention). + type FnAbi: Debug + Copy + Eq + Hash; + + /// Prints the debug representation of a type-kind-id. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_adt_id(adt_id: AdtId, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of a type-kind-id. + /// Returns `None` to fallback to the default debug output (e.g., + /// if no info about current program is available from TLS). + #[allow(unused_variables)] + fn debug_trait_id( + trait_id: TraitId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a type-kind-id. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_assoc_type_id( + type_id: AssocTypeId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an opaque type. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_opaque_ty_id( + opaque_ty_id: OpaqueTyId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a function-def-id. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_fn_def_id( + fn_def_id: FnDefId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a closure id. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_closure_id( + fn_def_id: ClosureId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a foreign-def-id. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_foreign_def_id( + foreign_def_id: ForeignDefId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an alias. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_coroutine_id( + coroutine_id: CoroutineId, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an alias. To get good + /// results, this requires inspecting TLS, and is difficult to + /// code without reference to a specific interner (and hence + /// fully known types). + /// + /// Returns `None` to fallback to the default debug output (e.g., + /// if no info about current program is available from TLS). + #[allow(unused_variables)] + fn debug_alias(alias: &AliasTy, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of a ProjectionTy. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_projection_ty( + projection_ty: &ProjectionTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an OpaqueTy. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_opaque_ty( + opaque_ty: &OpaqueTy, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a type. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_ty(ty: &Ty, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of a lifetime. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_lifetime( + lifetime: &Lifetime, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a const. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_const(constant: &Const, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of an parameter. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_generic_arg( + generic_arg: &GenericArg, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a parameter kinds list. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_variable_kinds( + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a parameter kinds list, with angle brackets. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_variable_kinds_with_angles( + variable_kinds: &VariableKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an parameter kinds list with universe index. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_canonical_var_kinds( + canonical_var_kinds: &CanonicalVarKinds, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of an goal. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_goal(goal: &Goal, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of a list of goals. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_goals(goals: &Goals, fmt: &mut fmt::Formatter<'_>) -> Option { + None + } + + /// Prints the debug representation of a ProgramClauseImplication. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_program_clause_implication( + pci: &ProgramClauseImplication, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a ProgramClause. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_program_clause( + clause: &ProgramClause, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a ProgramClauses. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_program_clauses( + clauses: &ProgramClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a Substitution. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_substitution( + substitution: &Substitution, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a SeparatorTraitRef. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_separator_trait_ref( + separator_trait_ref: &SeparatorTraitRef<'_, Self>, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a QuantifiedWhereClauses. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_quantified_where_clauses( + clauses: &QuantifiedWhereClauses, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a Constraints. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_constraints( + clauses: &Constraints, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Prints the debug representation of a Variances. + /// Returns `None` to fallback to the default debug output. + #[allow(unused_variables)] + fn debug_variances( + variances: &Variances, + fmt: &mut fmt::Formatter<'_>, + ) -> Option { + None + } + + /// Create an "interned" type from `ty`. This is not normally + /// invoked directly; instead, you invoke `TyKind::intern` (which + /// will ultimately call this method). + fn intern_ty(self, kind: TyKind) -> Self::InternedType; + + /// Lookup the `TyKind` from an interned type. + fn ty_data(self, ty: &Self::InternedType) -> &TyData; + + /// Create an "interned" lifetime from `lifetime`. This is not + /// normally invoked directly; instead, you invoke + /// `LifetimeData::intern` (which will ultimately call this + /// method). + fn intern_lifetime(self, lifetime: LifetimeData) -> Self::InternedLifetime; + + /// Lookup the `LifetimeData` that was interned to create a `InternedLifetime`. + fn lifetime_data(self, lifetime: &Self::InternedLifetime) -> &LifetimeData; + + /// Create an "interned" const from `const`. This is not + /// normally invoked directly; instead, you invoke + /// `ConstData::intern` (which will ultimately call this + /// method). + fn intern_const(self, constant: ConstData) -> Self::InternedConst; + + /// Lookup the `ConstData` that was interned to create a `InternedConst`. + fn const_data(self, constant: &Self::InternedConst) -> &ConstData; + + /// Determine whether two concrete const values are equal. + fn const_eq( + self, + ty: &Self::InternedType, + c1: &Self::InternedConcreteConst, + c2: &Self::InternedConcreteConst, + ) -> bool; + + /// Create an "interned" parameter from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `GenericArgData::intern` (which will ultimately call this + /// method). + fn intern_generic_arg(self, data: GenericArgData) -> Self::InternedGenericArg; + + /// Lookup the `LifetimeData` that was interned to create a `InternedLifetime`. + fn generic_arg_data(self, lifetime: &Self::InternedGenericArg) -> &GenericArgData; + + /// Create an "interned" goal from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `GoalData::intern` (which will ultimately call this + /// method). + fn intern_goal(self, data: GoalData) -> Self::InternedGoal; + + /// Lookup the `GoalData` that was interned to create a `InternedGoal`. + fn goal_data(self, goal: &Self::InternedGoal) -> &GoalData; + + /// Create an "interned" goals from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `GoalsData::intern` (which will ultimately call this + /// method). + fn intern_goals( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the `GoalsData` that was interned to create a `InternedGoals`. + fn goals_data(self, goals: &Self::InternedGoals) -> &[Goal]; + + /// Create an "interned" substitution from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `SubstitutionData::intern` (which will ultimately call this + /// method). + fn intern_substitution( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the `SubstitutionData` that was interned to create a `InternedSubstitution`. + fn substitution_data(self, substitution: &Self::InternedSubstitution) -> &[GenericArg]; + + /// Create an "interned" program clause from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `ProgramClauseData::intern` (which will ultimately call this + /// method). + fn intern_program_clause(self, data: ProgramClauseData) -> Self::InternedProgramClause; + + /// Lookup the `ProgramClauseData` that was interned to create a `ProgramClause`. + fn program_clause_data(self, clause: &Self::InternedProgramClause) -> &ProgramClauseData; + + /// Create an "interned" program clauses from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `ProgramClauses::from_iter` (which will ultimately call this + /// method). + fn intern_program_clauses( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the `ProgramClauseData` that was interned to create a `ProgramClause`. + fn program_clauses_data(self, clauses: &Self::InternedProgramClauses) + -> &[ProgramClause]; + + /// Create an "interned" quantified where clauses from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `QuantifiedWhereClauses::from_iter` (which will ultimately call this + /// method). + fn intern_quantified_where_clauses( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the slice of `QuantifiedWhereClause` that was interned to + /// create a `QuantifiedWhereClauses`. + fn quantified_where_clauses_data( + self, + clauses: &Self::InternedQuantifiedWhereClauses, + ) -> &[QuantifiedWhereClause]; + + /// Create an "interned" parameter kinds from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `VariableKinds::from_iter` (which will ultimately call this + /// method). + fn intern_generic_arg_kinds( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the slice of `VariableKinds` that was interned to + /// create a `VariableKinds`. + fn variable_kinds_data( + self, + variable_kinds: &Self::InternedVariableKinds, + ) -> &[VariableKind]; + + /// Create "interned" variable kinds with universe index from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `CanonicalVarKinds::from_iter` (which will ultimately call this + /// method). + fn intern_canonical_var_kinds( + self, + data: impl IntoIterator, E>>, + ) -> Result; + + /// Lookup the slice of `CanonicalVariableKind` that was interned to + /// create a `CanonicalVariableKinds`. + fn canonical_var_kinds_data( + self, + canonical_var_kinds: &Self::InternedCanonicalVarKinds, + ) -> &[CanonicalVarKind]; + + /// Create "interned" constraints from `data`. This is not + /// normally invoked dirctly; instead, you invoke + /// `Constraints::from_iter` (which will ultimately call this + /// method). + fn intern_constraints( + self, + data: impl IntoIterator>, E>>, + ) -> Result; + + /// Lookup the slice of `Constraint` that was interned to + /// create a `Constraints`. + fn constraints_data( + self, + constraints: &Self::InternedConstraints, + ) -> &[InEnvironment>]; + + /// Create "interned" variances from `data`. This is not + /// normally invoked directly; instead, you invoke + /// `Variances::from` (which will ultimately call this + /// method). + fn intern_variances( + self, + data: impl IntoIterator>, + ) -> Result; + + /// Lookup the slice of `Variance` that was interned to + /// create a `Variances`. + fn variances_data(self, variances: &Self::InternedVariances) -> &[Variance]; +} + +/// Implemented by types that have an associated interner (which +/// are virtually all of the types in chalk-ir, for example). +/// This lets us map from a type like `Ty` to the parameter `I`. +/// +/// It's particularly useful for writing `TypeFoldable` impls for generic types like +/// `Binder`, since it allows us to figure out the interner of `T`. +pub trait HasInterner { + /// The interner associated with the type. + type Interner: Interner; +} + +impl HasInterner for [T] { + type Interner = T::Interner; +} + +impl HasInterner for Vec { + type Interner = T::Interner; +} + +impl HasInterner for Box { + type Interner = T::Interner; +} + +impl HasInterner for Arc { + type Interner = T::Interner; +} + +impl HasInterner for &T { + type Interner = T::Interner; +} + +impl HasInterner for PhantomData { + type Interner = I; +} + +impl HasInterner for (A, B) +where + A: HasInterner, + B: HasInterner, + I: Interner, +{ + type Interner = I; +} + +impl HasInterner for (A, B, C) +where + A: HasInterner, + B: HasInterner, + C: HasInterner, + I: Interner, +{ + type Interner = I; +} + +impl<'a, T: HasInterner> HasInterner for std::slice::Iter<'a, T> { + type Interner = T::Interner; +} diff --git a/chalk-ir/src/lib.rs b/chalk-ir/src/lib.rs index 44d2302b320..7763738264a 100644 --- a/chalk-ir/src/lib.rs +++ b/chalk-ir/src/lib.rs @@ -1,23 +1,62 @@ -#![feature(crate_visibility_modifier)] -#![feature(specialization)] -#![feature(non_modrs_mods)] - -use cast::Cast; -use chalk_engine::fallible::*; -use fold::shift::Shift; -use fold::{ - DefaultInferenceFolder, DefaultPlaceholderFolder, DefaultTypeFolder, Fold, FreeVarFolder, -}; -use lalrpop_intern::InternedString; -use std::collections::BTreeSet; -use std::iter; -use std::sync::Arc; +//! Defines the IR for types and logical predicates. -extern crate chalk_engine; -extern crate lalrpop_intern; +#![deny(rust_2018_idioms)] +#![warn(missing_docs)] -#[macro_use] -mod macros; +// Allows macros to refer to this crate as `::chalk_ir` +extern crate self as chalk_ir; + +use crate::cast::{Cast, CastTo, Caster}; +use crate::fold::shift::Shift; +use crate::fold::{FallibleTypeFolder, Subst, TypeFoldable, TypeFolder, TypeSuperFoldable}; +use crate::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor, VisitExt}; +use chalk_derive::{ + FallibleTypeFolder, HasInterner, TypeFoldable, TypeSuperVisitable, TypeVisitable, Zip, +}; +use std::marker::PhantomData; +use std::ops::ControlFlow; + +pub use crate::debug::SeparatorTraitRef; +#[macro_use(bitflags)] +extern crate bitflags; +/// Uninhabited (empty) type, used in combination with `PhantomData`. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Void {} + +/// Many of our internal operations (e.g., unification) are an attempt +/// to perform some operation which may not complete. +pub type Fallible = Result; + +/// A combination of `Fallible` and `Floundered`. +pub enum FallibleOrFloundered { + /// Success + Ok(T), + /// No solution. See `chalk_ir::NoSolution`. + NoSolution, + /// Floundered. See `chalk_ir::Floundered`. + Floundered, +} + +/// Indicates that the attempted operation has "no solution" -- i.e., +/// cannot be performed. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct NoSolution; + +/// Indicates that the complete set of program clauses for this goal +/// cannot be enumerated. +pub struct Floundered; + +macro_rules! impl_debugs { + ($($id:ident), *) => { + $( + impl std::fmt::Debug for $id { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "{}({:?})", stringify!($id), self.0) + } + } + )* + }; +} #[macro_use] pub mod zip; @@ -25,64 +64,167 @@ pub mod zip; #[macro_use] pub mod fold; +#[macro_use] +pub mod visit; + pub mod cast; +pub mod interner; +use interner::{HasInterner, Interner}; + pub mod could_match; pub mod debug; -pub mod tls; -pub type Identifier = InternedString; +/// Variance +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum Variance { + /// a <: b + Covariant, + /// a == b + Invariant, + /// b <: a + Contravariant, +} -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ProgramEnvironment { - /// Indicates whether a given trait has coinductive semantics -- - /// at present, this is true only for auto traits. - pub coinductive_traits: BTreeSet, +impl Variance { + /// `a.xform(b)` combines the variance of a context with the + /// variance of a type with the following meaning. If we are in a + /// context with variance `a`, and we encounter a type argument in + /// a position with variance `b`, then `a.xform(b)` is the new + /// variance with which the argument appears. + /// + /// Example 1: + /// + /// ```ignore + /// *mut Vec + /// ``` + /// + /// Here, the "ambient" variance starts as covariant. `*mut T` is + /// invariant with respect to `T`, so the variance in which the + /// `Vec` appears is `Covariant.xform(Invariant)`, which + /// yields `Invariant`. Now, the type `Vec` is covariant with + /// respect to its type argument `T`, and hence the variance of + /// the `i32` here is `Invariant.xform(Covariant)`, which results + /// (again) in `Invariant`. + /// + /// Example 2: + /// + /// ```ignore + /// fn(*const Vec, *mut Vec` appears is + /// `Contravariant.xform(Covariant)` or `Contravariant`. The same + /// is true for its `i32` argument. In the `*mut T` case, the + /// variance of `Vec` is `Contravariant.xform(Invariant)`, + /// and hence the outermost type is `Invariant` with respect to + /// `Vec` (and its `i32` argument). + /// + /// Source: Figure 1 of "Taming the Wildcards: + /// Combining Definition- and Use-Site Variance" published in PLDI'11. + /// (Doc from rustc) + pub fn xform(self, other: Variance) -> Variance { + match (self, other) { + (Variance::Invariant, _) => Variance::Invariant, + (_, Variance::Invariant) => Variance::Invariant, + (_, Variance::Covariant) => self, + (Variance::Covariant, Variance::Contravariant) => Variance::Contravariant, + (Variance::Contravariant, Variance::Contravariant) => Variance::Covariant, + } + } - /// Compiled forms of the above: - pub program_clauses: Vec, + /// Converts `Covariant` into `Contravariant` and vice-versa. `Invariant` + /// stays the same. + pub fn invert(self) -> Variance { + match self { + Variance::Invariant => Variance::Invariant, + Variance::Covariant => Variance::Contravariant, + Variance::Contravariant => Variance::Covariant, + } + } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] /// The set of assumptions we've made so far, and the current number of /// universal (forall) quantifiers we're within. -pub struct Environment { - pub clauses: Vec, +pub struct Environment { + /// The clauses in the environment. + pub clauses: ProgramClauses, } -impl Environment { - pub fn new() -> Arc { - Arc::new(Environment { clauses: vec![] }) +impl Copy for Environment where I::InternedProgramClauses: Copy {} + +impl Environment { + /// Creates a new environment. + pub fn new(interner: I) -> Self { + Environment { + clauses: ProgramClauses::empty(interner), + } } - pub fn add_clauses(&self, clauses: I) -> Arc + /// Adds (an iterator of) clauses to the environment. + pub fn add_clauses(&self, interner: I, clauses: II) -> Self where - I: IntoIterator, + II: IntoIterator>, { let mut env = self.clone(); - let env_clauses: BTreeSet<_> = env.clauses.into_iter().chain(clauses).collect(); - env.clauses = env_clauses.into_iter().collect(); - Arc::new(env) + env.clauses = + ProgramClauses::from_iter(interner, env.clauses.iter(interner).cloned().chain(clauses)); + env + } + + /// True if any of the clauses in the environment have a consequence of `Compatible`. + /// Panics if the conditions or constraints of that clause are not empty. + pub fn has_compatible_clause(&self, interner: I) -> bool { + self.clauses.as_slice(interner).iter().any(|c| { + let ProgramClauseData(implication) = c.data(interner); + match implication.skip_binders().consequence { + DomainGoal::Compatible => { + // We currently don't generate `Compatible` with any conditions or constraints + // If this was needed, for whatever reason, then a third "yes, but must evaluate" + // return value would have to be added. + assert!(implication.skip_binders().conditions.is_empty(interner)); + assert!(implication.skip_binders().constraints.is_empty(interner)); + true + } + _ => false, + } + }) } } -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct InEnvironment { - pub environment: Arc, +/// A goal with an environment to solve it in. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +#[allow(missing_docs)] +pub struct InEnvironment { + pub environment: Environment, pub goal: G, } -impl InEnvironment { - pub fn new(environment: &Arc, goal: G) -> Self { +impl + Copy, I: Interner> Copy for InEnvironment where + I::InternedProgramClauses: Copy +{ +} + +impl InEnvironment { + /// Creates a new environment/goal pair. + pub fn new(environment: &Environment, goal: G) -> Self { InEnvironment { environment: environment.clone(), goal, } } + /// Maps the goal without touching the environment. pub fn map(self, op: OP) -> InEnvironment where OP: FnOnce(G) -> H, + H: HasInterner, { InEnvironment { environment: self.environment, @@ -91,43 +233,101 @@ impl InEnvironment { } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum TypeName { - /// a type like `Vec` - ItemId(ItemId), +impl HasInterner for InEnvironment { + type Interner = G::Interner; +} - /// instantiated form a universally quantified type, e.g., from - /// `forall { .. }`. Stands in as a representative of "some - /// unknown type". - Placeholder(PlaceholderIndex), +/// Different signed int types. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[allow(missing_docs)] +pub enum IntTy { + Isize, + I8, + I16, + I32, + I64, + I128, +} - /// an associated type like `Iterator::Item`; see `AssociatedType` for details - AssociatedType(ItemId), +/// Different unsigned int types. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[allow(missing_docs)] +pub enum UintTy { + Usize, + U8, + U16, + U32, + U64, + U128, +} + +/// Different kinds of float types. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[allow(missing_docs)] +pub enum FloatTy { + F16, + F32, + F64, + F128, +} + +/// Types of scalar values. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[allow(missing_docs)] +pub enum Scalar { + Bool, + Char, + Int(IntTy), + Uint(UintTy), + Float(FloatTy), +} + +/// Whether a function is safe or not. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Safety { + /// Safe + Safe, + /// Unsafe + Unsafe, +} + +/// Whether a type is mutable or not. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Mutability { + /// Mutable + Mut, + /// Immutable + Not, } /// An universe index is how a universally quantified parameter is /// represented when it's binder is moved into the environment. /// An example chain of transformations would be: -/// `forall { Goal(T) }` (syntatical representation) +/// `forall { Goal(T) }` (syntactical representation) /// `forall { Goal(?0) }` (used a DeBruijn index) /// `Goal(!U1)` (the quantifier was moved to the environment and replaced with a universe index) -/// See https://rust-lang-nursery.github.io/rustc-guide/mir/regionck.html#skol for more. +/// See for more. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct UniverseIndex { + /// The counter for the universe index, starts with 0. pub counter: usize, } impl UniverseIndex { + /// Root universe index (0). pub const ROOT: UniverseIndex = UniverseIndex { counter: 0 }; + /// Root universe index (0). pub fn root() -> UniverseIndex { Self::ROOT } + /// Whether one universe can "see" another. pub fn can_see(self, ui: UniverseIndex) -> bool { self.counter >= ui.counter } + /// Increases the index counter. pub fn next(self) -> UniverseIndex { UniverseIndex { counter: self.counter + 1, @@ -135,328 +335,1459 @@ impl UniverseIndex { } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ItemId { - pub index: usize, +/// Maps the universes found in the `u_canonicalize` result (the +/// "canonical" universes) to the universes found in the original +/// value (and vice versa). When used as a folder -- i.e., from +/// outside this module -- converts from "canonical" universes to the +/// original (but see the `UMapToCanonical` folder). +#[derive(Clone, Debug)] +pub struct UniverseMap { + /// A reverse map -- for each universe Ux that appears in + /// `quantified`, the corresponding universe in the original was + /// `universes[x]`. + pub universes: Vec, } -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum TypeSort { - Struct, - Trait, +impl UniverseMap { + /// Creates a new universe map. + pub fn new() -> Self { + UniverseMap { + universes: vec![UniverseIndex::root()], + } + } + + /// Number of canonical universes. + pub fn num_canonical_universes(&self) -> usize { + self.universes.len() + } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum Ty { - Apply(ApplicationTy), - Projection(ProjectionTy), - UnselectedProjection(UnselectedProjectionTy), - ForAll(Box), +/// The id for an Abstract Data Type (i.e. structs, unions and enums). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AdtId(pub I::InternedAdtId); + +/// The id of a trait definition; could be used to load the trait datum by +/// invoking the [`trait_datum`] method. +/// +/// [`trait_datum`]: ../chalk_solve/trait.RustIrDatabase.html#tymethod.trait_datum +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct TraitId(pub I::DefId); - /// References the binding at the given depth (deBruijn index - /// style). - BoundVar(usize), +/// The id for an impl. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ImplId(pub I::DefId); - /// Inference variable. - InferenceVar(InferenceVar), +/// Id for a specific clause. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ClauseId(pub I::DefId); + +/// The id for the associated type member of a trait. The details of the type +/// can be found by invoking the [`associated_ty_data`] method. +/// +/// [`associated_ty_data`]: ../chalk_solve/trait.RustIrDatabase.html#tymethod.associated_ty_data +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AssocTypeId(pub I::DefId); + +/// Id for an opaque type. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct OpaqueTyId(pub I::DefId); + +/// Function definition id. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct FnDefId(pub I::DefId); + +/// Id for Rust closures. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ClosureId(pub I::DefId); + +/// Id for Rust coroutines. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct CoroutineId(pub I::DefId); + +/// Id for foreign types. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ForeignDefId(pub I::DefId); + +impl_debugs!(ImplId, ClauseId); + +/// A Rust type. The actual type data is stored in `TyKind`. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct Ty { + interned: I::InternedType, } -impl Ty { - /// If this is a `Ty::BoundVar(d)`, returns `Some(d)` else `None`. - pub fn bound(&self) -> Option { - if let Ty::BoundVar(depth) = *self { - Some(depth) +impl Ty { + /// Creates a type from `TyKind`. + pub fn new(interner: I, data: impl CastTo>) -> Self { + let ty_kind = data.cast(interner); + Ty { + interned: I::intern_ty(interner, ty_kind), + } + } + + /// Gets the interned type. + pub fn interned(&self) -> &I::InternedType { + &self.interned + } + + /// Gets the underlying type data. + pub fn data(&self, interner: I) -> &TyData { + I::ty_data(interner, &self.interned) + } + + /// Gets the underlying type kind. + pub fn kind(&self, interner: I) -> &TyKind { + &I::ty_data(interner, &self.interned).kind + } + + /// Creates a `FromEnv` constraint using this type. + pub fn from_env(&self) -> FromEnv { + FromEnv::Ty(self.clone()) + } + + /// Creates a WF-constraint for this type. + pub fn well_formed(&self) -> WellFormed { + WellFormed::Ty(self.clone()) + } + + /// Creates a domain goal `FromEnv(T)` where `T` is this type. + pub fn into_from_env_goal(self, interner: I) -> DomainGoal { + self.from_env().cast(interner) + } + + /// If this is a `TyKind::BoundVar(d)`, returns `Some(d)` else `None`. + pub fn bound_var(&self, interner: I) -> Option { + if let TyKind::BoundVar(bv) = self.kind(interner) { + Some(*bv) } else { None } } - /// If this is a `Ty::InferenceVar(d)`, returns `Some(d)` else `None`. - pub fn inference_var(&self) -> Option { - if let Ty::InferenceVar(depth) = *self { - Some(depth) + /// If this is a `TyKind::InferenceVar(d)`, returns `Some(d)` else `None`. + pub fn inference_var(&self, interner: I) -> Option { + if let TyKind::InferenceVar(depth, _) = self.kind(interner) { + Some(*depth) } else { None } } - pub fn as_projection_ty_enum(&self) -> ProjectionTyRefEnum { - match *self { - Ty::Projection(ref proj) => ProjectionTyEnum::Selected(proj), - Ty::UnselectedProjection(ref proj) => ProjectionTyEnum::Unselected(proj), - _ => panic!("{:?} is not a projection", self), + /// Returns true if this is a `BoundVar` or an `InferenceVar` of `TyVariableKind::General`. + pub fn is_general_var(&self, interner: I, binders: &CanonicalVarKinds) -> bool { + match self.kind(interner) { + TyKind::BoundVar(bv) + if bv.debruijn == DebruijnIndex::INNERMOST + && binders.at(interner, bv.index).kind + == VariableKind::Ty(TyVariableKind::General) => + { + true + } + TyKind::InferenceVar(_, TyVariableKind::General) => true, + _ => false, } } - pub fn is_projection(&self) -> bool { - match *self { - Ty::Projection(..) | Ty::UnselectedProjection(..) => true, - _ => false, + /// Returns true if this is an `Alias`. + pub fn is_alias(&self, interner: I) -> bool { + matches!(self.kind(interner), TyKind::Alias(..)) + } + + /// Returns true if this is an `IntTy` or `UintTy`. + pub fn is_integer(&self, interner: I) -> bool { + matches!( + self.kind(interner), + TyKind::Scalar(Scalar::Int(_) | Scalar::Uint(_)) + ) + } + + /// Returns true if this is a `FloatTy`. + pub fn is_float(&self, interner: I) -> bool { + matches!(self.kind(interner), TyKind::Scalar(Scalar::Float(_))) + } + + /// Returns `Some(adt_id)` if this is an ADT, `None` otherwise + pub fn adt_id(&self, interner: I) -> Option> { + match self.kind(interner) { + TyKind::Adt(adt_id, _) => Some(*adt_id), + _ => None, } } /// True if this type contains "bound" types/lifetimes, and hence /// needs to be shifted across binders. This is a very inefficient /// check, intended only for debug assertions, because I am lazy. - pub fn needs_shift(&self) -> bool { - *self != self.shifted_in(1) + pub fn needs_shift(&self, interner: I) -> bool { + self.has_free_vars(interner) } } -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct InferenceVar { - index: u32, +/// Contains the data for a Ty +#[derive(Clone, PartialEq, Eq, Hash, HasInterner)] +pub struct TyData { + /// The kind + pub kind: TyKind, + /// Type flags + pub flags: TypeFlags, } -impl From for InferenceVar { - fn from(index: u32) -> InferenceVar { - InferenceVar { index } +bitflags! { + /// Contains flags indicating various properties of a Ty + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] + pub struct TypeFlags : u16 { + /// Does the type contain an InferenceVar + const HAS_TY_INFER = 1; + /// Does the type contain a lifetime with an InferenceVar + const HAS_RE_INFER = 1 << 1; + /// Does the type contain a ConstValue with an InferenceVar + const HAS_CT_INFER = 1 << 2; + /// Does the type contain a Placeholder TyKind + const HAS_TY_PLACEHOLDER = 1 << 3; + /// Does the type contain a lifetime with a Placeholder + const HAS_RE_PLACEHOLDER = 1 << 4; + /// Does the type contain a ConstValue Placeholder + const HAS_CT_PLACEHOLDER = 1 << 5; + /// True when the type has free lifetimes related to a local context + const HAS_FREE_LOCAL_REGIONS = 1 << 6; + /// Does the type contain a projection of an associated type + const HAS_TY_PROJECTION = 1 << 7; + /// Does the type contain an opaque type + const HAS_TY_OPAQUE = 1 << 8; + /// Does the type contain an unevaluated const projection + const HAS_CT_PROJECTION = 1 << 9; + /// Does the type contain an error + const HAS_ERROR = 1 << 10; + /// Does the type contain an error lifetime + const HAS_RE_ERROR = 1 << 11; + /// Does the type contain any free lifetimes + const HAS_FREE_REGIONS = 1 << 12; + /// True when the type contains lifetimes that will be substituted when function is called + const HAS_RE_LATE_BOUND = 1 << 13; + /// True when the type contains an erased lifetime + const HAS_RE_ERASED = 1 << 14; + /// Does the type contain placeholders or inference variables that could be replaced later + const STILL_FURTHER_SPECIALIZABLE = 1 << 15; + + /// True when the type contains free names local to a particular context + const HAS_FREE_LOCAL_NAMES = TypeFlags::HAS_TY_INFER.bits() + | TypeFlags::HAS_CT_INFER.bits() + | TypeFlags::HAS_TY_PLACEHOLDER.bits() + | TypeFlags::HAS_CT_PLACEHOLDER.bits() + | TypeFlags::HAS_FREE_LOCAL_REGIONS.bits(); + + /// Does the type contain any form of projection + const HAS_PROJECTION = TypeFlags::HAS_TY_PROJECTION.bits() + | TypeFlags::HAS_TY_OPAQUE.bits() + | TypeFlags::HAS_CT_PROJECTION.bits(); } } +/// Type data, which holds the actual type information. +#[derive(Clone, PartialEq, Eq, Hash, HasInterner)] +pub enum TyKind { + /// Abstract data types, i.e., structs, unions, or enumerations. + /// For example, a type like `Vec`. + Adt(AdtId, Substitution), -impl InferenceVar { - pub fn index(self) -> u32 { - self.index - } + /// an associated type like `Iterator::Item`; see `AssociatedType` for details + AssociatedType(AssocTypeId, Substitution), - pub fn to_ty(self) -> Ty { - Ty::InferenceVar(self) - } + /// a scalar type like `bool` or `u32` + Scalar(Scalar), - pub fn to_lifetime(self) -> Lifetime { - Lifetime::InferenceVar(self) - } -} + /// a tuple of the given arity + Tuple(usize, Substitution), -/// for<'a...'z> X -- all binders are instantiated at once, -/// and we use deBruijn indices within `self.ty` -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct QuantifiedTy { - pub num_binders: usize, - pub ty: Ty, -} + /// an array type like `[T; N]` + Array(Ty, Const), -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum Lifetime { - /// See Ty::Var(_). - BoundVar(usize), - InferenceVar(InferenceVar), - Placeholder(PlaceholderIndex), -} + /// a slice type like `[T]` + Slice(Ty), -impl Lifetime { - /// If this is a `Lifetime::InferenceVar(d)`, returns `Some(d)` else `None`. - pub fn inference_var(&self) -> Option { - if let Lifetime::InferenceVar(depth) = *self { - Some(depth) - } else { - None - } - } + /// a raw pointer type like `*const T` or `*mut T` + Raw(Mutability, Ty), - /// True if this lifetime is a "bound" lifetime, and hence - /// needs to be shifted across binders. Meant for debug assertions. - pub fn needs_shift(&self) -> bool { - match self { - Lifetime::BoundVar(_) => true, - Lifetime::InferenceVar(_) => false, - Lifetime::Placeholder(_) => false, - } - } + /// a reference type like `&T` or `&mut T` + Ref(Mutability, Lifetime, Ty), + + /// a placeholder for opaque types like `impl Trait` + OpaqueType(OpaqueTyId, Substitution), + + /// a function definition + FnDef(FnDefId, Substitution), + + /// the string primitive type + Str, + + /// the never type `!` + Never, + + /// A closure. + Closure(ClosureId, Substitution), + + /// A coroutine. + Coroutine(CoroutineId, Substitution), + + /// A coroutine witness. + CoroutineWitness(CoroutineId, Substitution), + + /// foreign types + Foreign(ForeignDefId), + + /// This can be used to represent an error, e.g. during name resolution of a type. + /// Chalk itself will not produce this, just pass it through when given. + Error, + + /// instantiated from a universally quantified type, e.g., from + /// `forall { .. }`. Stands in as a representative of "some + /// unknown type". + Placeholder(PlaceholderIndex), + + /// A "dyn" type is a trait object type created via the "dyn Trait" syntax. + /// In the chalk parser, the traits that the object represents is parsed as + /// a QuantifiedInlineBound, and is then changed to a list of where clauses + /// during lowering. + /// + /// See the `Opaque` variant for a discussion about the use of + /// binders here. + Dyn(DynTy), + + /// An "alias" type represents some form of type alias, such as: + /// - An associated type projection like `::Item` + /// - `impl Trait` types + /// - Named type aliases like `type Foo = Vec` + Alias(AliasTy), + + /// A function type such as `for<'a> fn(&'a u32)`. + /// Note that "higher-ranked" types (starting with `for<>`) are either + /// function types or dyn types, and do not appear otherwise in Rust + /// surface syntax. + Function(FnPointer), + + /// References the binding at the given depth. The index is a [de + /// Bruijn index], so it counts back through the in-scope binders. + BoundVar(BoundVar), + + /// Inference variable defined in the current inference context. + InferenceVar(InferenceVar, TyVariableKind), } -/// Index of an universally quantified parameter in the environment. -/// Two indexes are required, the one of the universe itself -/// and the relative index inside the universe. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct PlaceholderIndex { - /// Index *of* the universe. - pub ui: UniverseIndex, - /// Index *in* the universe. - pub idx: usize, +impl Copy for TyKind +where + I::InternedLifetime: Copy, + I::InternedSubstitution: Copy, + I::InternedVariableKinds: Copy, + I::InternedQuantifiedWhereClauses: Copy, + I::InternedType: Copy, + I::InternedConst: Copy, +{ } -impl PlaceholderIndex { - pub fn to_lifetime(self) -> Lifetime { - Lifetime::Placeholder(self) +impl TyKind { + /// Casts the type data to a type. + pub fn intern(self, interner: I) -> Ty { + Ty::new(interner, self) } - pub fn to_ty(self) -> Ty { - Ty::Apply(ApplicationTy { - name: TypeName::Placeholder(self), - parameters: vec![], - }) + /// Compute type flags for a TyKind + pub fn compute_flags(&self, interner: I) -> TypeFlags { + match self { + TyKind::Adt(_, substitution) + | TyKind::AssociatedType(_, substitution) + | TyKind::Tuple(_, substitution) + | TyKind::Closure(_, substitution) + | TyKind::Coroutine(_, substitution) + | TyKind::CoroutineWitness(_, substitution) + | TyKind::FnDef(_, substitution) + | TyKind::OpaqueType(_, substitution) => substitution.compute_flags(interner), + TyKind::Scalar(_) | TyKind::Str | TyKind::Never | TyKind::Foreign(_) => { + TypeFlags::empty() + } + TyKind::Error => TypeFlags::HAS_ERROR, + TyKind::Slice(ty) | TyKind::Raw(_, ty) => ty.data(interner).flags, + TyKind::Ref(_, lifetime, ty) => { + lifetime.compute_flags(interner) | ty.data(interner).flags + } + TyKind::Array(ty, const_ty) => { + let flags = ty.data(interner).flags; + let const_data = const_ty.data(interner); + flags + | const_data.ty.data(interner).flags + | match const_data.value { + ConstValue::BoundVar(_) | ConstValue::Concrete(_) => TypeFlags::empty(), + ConstValue::InferenceVar(_) => { + TypeFlags::HAS_CT_INFER | TypeFlags::STILL_FURTHER_SPECIALIZABLE + } + ConstValue::Placeholder(_) => { + TypeFlags::HAS_CT_PLACEHOLDER | TypeFlags::STILL_FURTHER_SPECIALIZABLE + } + } + } + TyKind::Placeholder(_) => TypeFlags::HAS_TY_PLACEHOLDER, + TyKind::Dyn(dyn_ty) => { + let lifetime_flags = dyn_ty.lifetime.compute_flags(interner); + let mut dyn_flags = TypeFlags::empty(); + for var_kind in dyn_ty.bounds.skip_binders().iter(interner) { + match &(var_kind.skip_binders()) { + WhereClause::Implemented(trait_ref) => { + dyn_flags |= trait_ref.substitution.compute_flags(interner) + } + WhereClause::AliasEq(alias_eq) => { + dyn_flags |= alias_eq.alias.compute_flags(interner); + dyn_flags |= alias_eq.ty.data(interner).flags; + } + WhereClause::LifetimeOutlives(lifetime_outlives) => { + dyn_flags |= lifetime_outlives.a.compute_flags(interner) + | lifetime_outlives.b.compute_flags(interner); + } + WhereClause::TypeOutlives(type_outlives) => { + dyn_flags |= type_outlives.ty.data(interner).flags; + dyn_flags |= type_outlives.lifetime.compute_flags(interner); + } + } + } + lifetime_flags | dyn_flags + } + TyKind::Alias(alias_ty) => alias_ty.compute_flags(interner), + TyKind::BoundVar(_) => TypeFlags::empty(), + TyKind::InferenceVar(_, _) => TypeFlags::HAS_TY_INFER, + TyKind::Function(fn_pointer) => fn_pointer.substitution.0.compute_flags(interner), + } } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct ApplicationTy { - pub name: TypeName, - pub parameters: Vec, +/// Identifies a particular bound variable within a binder. +/// Variables are identified by the combination of a [`DebruijnIndex`], +/// which identifies the *binder*, and an index within that binder. +/// +/// Consider this case: +/// +/// ```ignore +/// forall<'a, 'b> { forall<'c, 'd> { ... } } +/// ``` +/// +/// Within the `...` term: +/// +/// * the variable `'a` have a debruijn index of 1 and index 0 +/// * the variable `'b` have a debruijn index of 1 and index 1 +/// * the variable `'c` have a debruijn index of 0 and index 0 +/// * the variable `'d` have a debruijn index of 0 and index 1 +/// +/// The variables `'a` and `'b` both have debruijn index of 1 because, +/// counting out, they are the 2nd binder enclosing `...`. The indices +/// identify the location *within* that binder. +/// +/// The variables `'c` and `'d` both have debruijn index of 0 because +/// they appear in the *innermost* binder enclosing the `...`. The +/// indices identify the location *within* that binder. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct BoundVar { + /// Debruijn index, which identifies the binder. + pub debruijn: DebruijnIndex, + /// Index within the binder. + pub index: usize, } -impl ApplicationTy { - pub fn type_parameters<'a>(&'a self) -> impl Iterator + 'a { - self.parameters - .iter() - .cloned() - .filter_map(|p| p.ty()) +impl BoundVar { + /// Creates a new bound variable. + pub fn new(debruijn: DebruijnIndex, index: usize) -> Self { + Self { debruijn, index } } - pub fn first_type_parameter(&self) -> Option { - self.type_parameters().next() + /// Casts the bound variable to a type. + pub fn to_ty(self, interner: I) -> Ty { + TyKind::::BoundVar(self).intern(interner) } - pub fn len_type_parameters(&self) -> usize { - self.type_parameters().count() + /// Wrap the bound variable in a lifetime. + pub fn to_lifetime(self, interner: I) -> Lifetime { + LifetimeData::::BoundVar(self).intern(interner) } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum ParameterKind { - Ty(T), - Lifetime(L), -} -impl ParameterKind { - pub fn into_inner(self) -> T { - match self { - ParameterKind::Ty(t) => t, - ParameterKind::Lifetime(t) => t, + /// Wraps the bound variable in a constant. + pub fn to_const(self, interner: I, ty: Ty) -> Const { + ConstData { + ty, + value: ConstValue::::BoundVar(self), } + .intern(interner) } - pub fn map(self, op: OP) -> ParameterKind - where - OP: FnOnce(T) -> U, - { - match self { - ParameterKind::Ty(t) => ParameterKind::Ty(op(t)), - ParameterKind::Lifetime(t) => ParameterKind::Lifetime(op(t)), - } + /// True if this variable is bound within the `amount` innermost binders. + pub fn bound_within(self, outer_binder: DebruijnIndex) -> bool { + self.debruijn.within(outer_binder) } -} -impl ParameterKind { - pub fn assert_ty_ref(&self) -> &T { - self.as_ref().ty().unwrap() + /// Adjusts the debruijn index (see [`DebruijnIndex::shifted_in`]). + #[must_use] + pub fn shifted_in(self) -> Self { + BoundVar::new(self.debruijn.shifted_in(), self.index) } - pub fn assert_lifetime_ref(&self) -> &L { - self.as_ref().lifetime().unwrap() + /// Adjusts the debruijn index (see [`DebruijnIndex::shifted_in`]). + #[must_use] + pub fn shifted_in_from(self, outer_binder: DebruijnIndex) -> Self { + BoundVar::new(self.debruijn.shifted_in_from(outer_binder), self.index) } - pub fn as_ref(&self) -> ParameterKind<&T, &L> { - match *self { - ParameterKind::Ty(ref t) => ParameterKind::Ty(t), - ParameterKind::Lifetime(ref l) => ParameterKind::Lifetime(l), - } + /// Adjusts the debruijn index (see [`DebruijnIndex::shifted_in`]). + #[must_use] + pub fn shifted_out(self) -> Option { + self.debruijn + .shifted_out() + .map(|db| BoundVar::new(db, self.index)) } - pub fn is_ty(&self) -> bool { - match self { - ParameterKind::Ty(_) => true, - ParameterKind::Lifetime(_) => false, - } + /// Adjusts the debruijn index (see [`DebruijnIndex::shifted_in`]). + #[must_use] + pub fn shifted_out_to(self, outer_binder: DebruijnIndex) -> Option { + self.debruijn + .shifted_out_to(outer_binder) + .map(|db| BoundVar::new(db, self.index)) } - pub fn ty(self) -> Option { - match self { - ParameterKind::Ty(t) => Some(t), - _ => None, - } + /// Return the index of the bound variable, but only if it is bound + /// at the innermost binder. Otherwise, returns `None`. + pub fn index_if_innermost(self) -> Option { + self.index_if_bound_at(DebruijnIndex::INNERMOST) } - pub fn lifetime(self) -> Option { - match self { - ParameterKind::Lifetime(t) => Some(t), - _ => None, + /// Return the index of the bound variable, but only if it is bound + /// at the innermost binder. Otherwise, returns `None`. + pub fn index_if_bound_at(self, debruijn: DebruijnIndex) -> Option { + if self.debruijn == debruijn { + Some(self.index) + } else { + None } } } -pub type Parameter = ParameterKind; - -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct ProjectionTy { - pub associated_ty_id: ItemId, - pub parameters: Vec, -} - -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct UnselectedProjectionTy { - pub type_name: Identifier, - pub parameters: Vec, +/// References the binder at the given depth. The index is a [de +/// Bruijn index], so it counts back through the in-scope binders, +/// with 0 being the innermost binder. This is used in impls and +/// the like. For example, if we had a rule like `for { (T: +/// Clone) :- (T: Copy) }`, then `T` would be represented as a +/// `BoundVar(0)` (as the `for` is the innermost binder). +/// +/// [de Bruijn index]: https://en.wikipedia.org/wiki/De_Bruijn_index +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct DebruijnIndex { + depth: u32, } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum ProjectionTyEnum { - Selected(S), - Unselected(U), -} +impl DebruijnIndex { + /// Innermost index. + pub const INNERMOST: DebruijnIndex = DebruijnIndex { depth: 0 }; + /// One level higher than the innermost index. + pub const ONE: DebruijnIndex = DebruijnIndex { depth: 1 }; -pub type ProjectionTyRefEnum<'a> = ProjectionTyEnum<&'a ProjectionTy, &'a UnselectedProjectionTy>; + /// Creates a new de Bruijn index with a given depth. + pub fn new(depth: u32) -> Self { + DebruijnIndex { depth } + } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct TraitRef { - pub trait_id: ItemId, - pub parameters: Vec, -} + /// Depth of the De Bruijn index, counting from 0 starting with + /// the innermost binder. + pub fn depth(self) -> u32 { + self.depth + } -impl TraitRef { - pub fn type_parameters<'a>(&'a self) -> impl Iterator + 'a { - self.parameters - .iter() - .cloned() - .filter_map(|p| p.ty()) + /// True if the binder identified by this index is within the + /// binder identified by the index `outer_binder`. + /// + /// # Example + /// + /// Imagine you have the following binders in scope + /// + /// ```ignore + /// forall forall forall + /// ``` + /// + /// then the Debruijn index for `c` would be `0`, the index for + /// `b` would be 1, and so on. Now consider the following calls: + /// + /// * `c.within(a) = true` + /// * `b.within(a) = true` + /// * `a.within(a) = false` + /// * `a.within(c) = false` + pub fn within(self, outer_binder: DebruijnIndex) -> bool { + self < outer_binder } -} -/// Where clauses that can be written by a Rust programmer. -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum WhereClause { - Implemented(TraitRef), - ProjectionEq(ProjectionEq), -} + /// Returns the resulting index when this value is moved into + /// through one binder. + #[must_use] + pub fn shifted_in(self) -> DebruijnIndex { + self.shifted_in_from(DebruijnIndex::ONE) + } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] -pub struct Derefs { - pub source: Ty, - pub target: Ty, -} + /// Update this index in place by shifting it "in" through + /// `amount` number of binders. + pub fn shift_in(&mut self) { + *self = self.shifted_in(); + } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum WellFormed { - /// A predicate which is true is some trait ref is well-formed. - /// For example, given the following trait definitions: + /// Adds `outer_binder` levels to the `self` index. Intuitively, this + /// shifts the `self` index, which was valid at the outer binder, + /// so that it is valid at the innermost binder. /// - /// ```notrust - /// trait Clone { ... } - /// trait Copy where Self: Clone { ... } + /// Example: Assume that the following binders are in scope: + /// + /// ```ignore + /// for for for for + /// ^ outer binder /// ``` /// - /// then we have the following rule: + /// Assume further that the `outer_binder` argument is 2, + /// which means that it is referring to the `for` binder + /// (since `D` would be the innermost binder). /// - /// ```notrust - /// WellFormed(?Self: Copy) :- ?Self: Copy, WellFormed(?Self: Clone) - /// ``` - Trait(TraitRef), + /// This means that `self` is relative to the binder `B` -- so + /// if `self` is 0 (`INNERMOST`), then it refers to `B`, + /// and if `self` is 1, then it refers to `A`. + /// + /// We will return as follows: + /// + /// * `0.shifted_in_from(2) = 2` -- i.e., `B`, when shifted in to the binding level `D`, has index 2 + /// * `1.shifted_in_from(2) = 3` -- i.e., `A`, when shifted in to the binding level `D`, has index 3 + /// * `2.shifted_in_from(1) = 3` -- here, we changed the `outer_binder` to refer to `C`. + /// Therefore `2` (relative to `C`) refers to `A`, so the result is still 3 (since `A`, relative to the + /// innermost binder, has index 3). + #[must_use] + pub fn shifted_in_from(self, outer_binder: DebruijnIndex) -> DebruijnIndex { + DebruijnIndex::new(self.depth() + outer_binder.depth()) + } - /// A predicate which is true is some type is well-formed. - /// For example, given the following type definition: + /// Returns the resulting index when this value is moved out from + /// `amount` number of new binders. + #[must_use] + pub fn shifted_out(self) -> Option { + self.shifted_out_to(DebruijnIndex::ONE) + } + + /// Update in place by shifting out from `amount` binders. + pub fn shift_out(&mut self) { + *self = self.shifted_out().unwrap(); + } + + /// Subtracts `outer_binder` levels from the `self` index. Intuitively, this + /// shifts the `self` index, which was valid at the innermost + /// binder, to one that is valid at the binder `outer_binder`. /// - /// ```notrust - /// struct Set where K: Hash { - /// ... - /// } + /// This will return `None` if the `self` index is internal to the + /// outer binder (i.e., if `self < outer_binder`). + /// + /// Example: Assume that the following binders are in scope: + /// + /// ```ignore + /// for for for for + /// ^ outer binder /// ``` /// - /// then we have the following rule: `WellFormedTy(Set) :- Implemented(K: Hash)`. - Ty(Ty), + /// Assume further that the `outer_binder` argument is 2, + /// which means that it is referring to the `for` binder + /// (since `D` would be the innermost binder). + /// + /// This means that the result is relative to the binder `B` -- so + /// if `self` is 0 (`INNERMOST`), then it refers to `B`, + /// and if `self` is 1, then it refers to `A`. + /// + /// We will return as follows: + /// + /// * `1.shifted_out_to(2) = None` -- i.e., the binder for `C` can't be named from the binding level `B` + /// * `3.shifted_out_to(2) = Some(1)` -- i.e., `A`, when shifted out to the binding level `B`, has index 1 + pub fn shifted_out_to(self, outer_binder: DebruijnIndex) -> Option { + if self.within(outer_binder) { + None + } else { + Some(DebruijnIndex::new(self.depth() - outer_binder.depth())) + } + } +} + +/// A "DynTy" represents a trait object (`dyn Trait`). Trait objects +/// are conceptually very related to an "existential type" of the form +/// `exists { T: Trait }` (another example of such type is `impl Trait`). +/// `DynTy` represents the bounds on that type. +/// +/// The "bounds" here represents the unknown self type. So, a type like +/// `dyn for<'a> Fn(&'a u32)` would be represented with two-levels of +/// binder, as "depicted" here: +/// +/// ```notrust +/// exists { +/// vec![ +/// // A QuantifiedWhereClause: +/// forall { ^1.0: Fn(&^0.0 u32) } +/// ] +/// } +/// ``` +/// +/// The outer `exists` binder indicates that there exists +/// some type that meets the criteria within, but that type is not +/// known. It is referenced within the type using `^1.0`, indicating +/// a bound type with debruijn index 1 (i.e., skipping through one +/// level of binder). +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct DynTy { + /// The unknown self type. + pub bounds: Binders>, + /// Lifetime of the `DynTy`. + pub lifetime: Lifetime, +} + +impl Copy for DynTy +where + I::InternedLifetime: Copy, + I::InternedQuantifiedWhereClauses: Copy, + I::InternedVariableKinds: Copy, +{ +} + +/// A type, lifetime or constant whose value is being inferred. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct InferenceVar { + index: u32, +} + +impl From for InferenceVar { + fn from(index: u32) -> InferenceVar { + InferenceVar { index } + } +} + +impl InferenceVar { + /// Gets the underlying index value. + pub fn index(self) -> u32 { + self.index + } + + /// Wraps the inference variable in a type. + pub fn to_ty(self, interner: I, kind: TyVariableKind) -> Ty { + TyKind::::InferenceVar(self, kind).intern(interner) + } + + /// Wraps the inference variable in a lifetime. + pub fn to_lifetime(self, interner: I) -> Lifetime { + LifetimeData::::InferenceVar(self).intern(interner) + } + + /// Wraps the inference variable in a constant. + pub fn to_const(self, interner: I, ty: Ty) -> Const { + ConstData { + ty, + value: ConstValue::::InferenceVar(self), + } + .intern(interner) + } +} + +/// A function signature. +#[derive(Clone, Copy, PartialEq, Eq, Hash, HasInterner, Debug)] +#[allow(missing_docs)] +pub struct FnSig { + pub abi: I::FnAbi, + pub safety: Safety, + pub variadic: bool, +} +/// A wrapper for the substs on a Fn. +#[derive(Clone, PartialEq, Eq, Hash, HasInterner, TypeFoldable, TypeVisitable)] +pub struct FnSubst(pub Substitution); + +impl Copy for FnSubst where I::InternedSubstitution: Copy {} + +/// for<'a...'z> X -- all binders are instantiated at once, +/// and we use deBruijn indices within `self.ty` +#[derive(Clone, PartialEq, Eq, Hash, HasInterner)] +#[allow(missing_docs)] +pub struct FnPointer { + pub num_binders: usize, + pub sig: FnSig, + pub substitution: FnSubst, +} + +impl Copy for FnPointer where I::InternedSubstitution: Copy {} + +impl FnPointer { + /// Represent the current `Fn` as if it was wrapped in `Binders` + pub fn into_binders(self, interner: I) -> Binders> { + Binders::new( + VariableKinds::from_iter( + interner, + (0..self.num_binders).map(|_| VariableKind::Lifetime), + ), + self.substitution, + ) + } + + /// Represent the current `Fn` as if it was wrapped in `Binders` + pub fn as_binders(&self, interner: I) -> Binders<&FnSubst> { + Binders::new( + VariableKinds::from_iter( + interner, + (0..self.num_binders).map(|_| VariableKind::Lifetime), + ), + &self.substitution, + ) + } +} + +/// Constants. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct Const { + interned: I::InternedConst, +} + +impl Const { + /// Create a `Const` using something that can be cast to const data. + pub fn new(interner: I, data: impl CastTo>) -> Self { + Const { + interned: I::intern_const(interner, data.cast(interner)), + } + } + + /// Gets the interned constant. + pub fn interned(&self) -> &I::InternedConst { + &self.interned + } + + /// Gets the constant data from the interner. + pub fn data(&self, interner: I) -> &ConstData { + I::const_data(interner, &self.interned) + } + + /// If this is a `ConstData::BoundVar(d)`, returns `Some(d)` else `None`. + pub fn bound_var(&self, interner: I) -> Option { + if let ConstValue::BoundVar(bv) = &self.data(interner).value { + Some(*bv) + } else { + None + } + } + + /// If this is a `ConstData::InferenceVar(d)`, returns `Some(d)` else `None`. + pub fn inference_var(&self, interner: I) -> Option { + if let ConstValue::InferenceVar(iv) = &self.data(interner).value { + Some(*iv) + } else { + None + } + } + + /// True if this const is a "bound" const, and hence + /// needs to be shifted across binders. Meant for debug assertions. + pub fn needs_shift(&self, interner: I) -> bool { + match &self.data(interner).value { + ConstValue::BoundVar(_) => true, + ConstValue::InferenceVar(_) => false, + ConstValue::Placeholder(_) => false, + ConstValue::Concrete(_) => false, + } + } +} + +/// Constant data, containing the constant's type and value. +#[derive(Clone, PartialEq, Eq, Hash, HasInterner)] +pub struct ConstData { + /// Type that holds the constant. + pub ty: Ty, + /// The value of the constant. + pub value: ConstValue, +} + +/// A constant value, not necessarily concrete. +#[derive(Clone, PartialEq, Eq, Hash, HasInterner)] +pub enum ConstValue { + /// Bound var (e.g. a parameter). + BoundVar(BoundVar), + /// Constant whose value is being inferred. + InferenceVar(InferenceVar), + /// Lifetime on some yet-unknown placeholder. + Placeholder(PlaceholderIndex), + /// Concrete constant value. + Concrete(ConcreteConst), +} + +impl Copy for ConstValue where I::InternedConcreteConst: Copy {} + +impl ConstData { + /// Wraps the constant data in a `Const`. + pub fn intern(self, interner: I) -> Const { + Const::new(interner, self) + } +} + +/// Concrete constant, whose value is known (as opposed to +/// inferred constants and placeholders). +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct ConcreteConst { + /// The interned constant. + pub interned: I::InternedConcreteConst, +} + +impl ConcreteConst { + /// Checks whether two concrete constants are equal. + pub fn const_eq(&self, ty: &Ty, other: &ConcreteConst, interner: I) -> bool { + interner.const_eq(&ty.interned, &self.interned, &other.interned) + } +} + +/// A Rust lifetime. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct Lifetime { + interned: I::InternedLifetime, +} + +impl Lifetime { + /// Create a lifetime from lifetime data + /// (or something that can be cast to lifetime data). + pub fn new(interner: I, data: impl CastTo>) -> Self { + Lifetime { + interned: I::intern_lifetime(interner, data.cast(interner)), + } + } + + /// Gets the interned value. + pub fn interned(&self) -> &I::InternedLifetime { + &self.interned + } + + /// Gets the lifetime data. + pub fn data(&self, interner: I) -> &LifetimeData { + I::lifetime_data(interner, &self.interned) + } + + /// If this is a `Lifetime::BoundVar(d)`, returns `Some(d)` else `None`. + pub fn bound_var(&self, interner: I) -> Option { + if let LifetimeData::BoundVar(bv) = self.data(interner) { + Some(*bv) + } else { + None + } + } + + /// If this is a `Lifetime::InferenceVar(d)`, returns `Some(d)` else `None`. + pub fn inference_var(&self, interner: I) -> Option { + if let LifetimeData::InferenceVar(depth) = self.data(interner) { + Some(*depth) + } else { + None + } + } + + /// True if this lifetime is a "bound" lifetime, and hence + /// needs to be shifted across binders. Meant for debug assertions. + pub fn needs_shift(&self, interner: I) -> bool { + match self.data(interner) { + LifetimeData::BoundVar(_) => true, + LifetimeData::InferenceVar(_) => false, + LifetimeData::Placeholder(_) => false, + LifetimeData::Static => false, + LifetimeData::Erased => false, + LifetimeData::Error => false, + LifetimeData::Phantom(..) => unreachable!(), + } + } + + ///compute type flags for Lifetime + fn compute_flags(&self, interner: I) -> TypeFlags { + match self.data(interner) { + LifetimeData::InferenceVar(_) => { + TypeFlags::HAS_RE_INFER + | TypeFlags::HAS_FREE_LOCAL_REGIONS + | TypeFlags::HAS_FREE_REGIONS + } + LifetimeData::Placeholder(_) => { + TypeFlags::HAS_RE_PLACEHOLDER + | TypeFlags::HAS_FREE_LOCAL_REGIONS + | TypeFlags::HAS_FREE_REGIONS + } + LifetimeData::Static => TypeFlags::HAS_FREE_REGIONS, + LifetimeData::Phantom(_, _) => TypeFlags::empty(), + LifetimeData::BoundVar(_) => TypeFlags::HAS_RE_LATE_BOUND, + LifetimeData::Erased => TypeFlags::HAS_RE_ERASED, + LifetimeData::Error => TypeFlags::HAS_RE_ERROR, + } + } +} + +/// Lifetime data, including what kind of lifetime it is and what it points to. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub enum LifetimeData { + /// See TyKind::BoundVar. + BoundVar(BoundVar), + /// Lifetime whose value is being inferred. + InferenceVar(InferenceVar), + /// Lifetime on some yet-unknown placeholder. + Placeholder(PlaceholderIndex), + /// Static lifetime + Static, + /// An erased lifetime, used by rustc to improve caching when we doesn't + /// care about lifetimes + Erased, + /// Lifetime on phantom data. + Phantom(Void, PhantomData), + /// A lifetime that resulted from some error + Error, } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum FromEnv { +impl LifetimeData { + /// Wrap the lifetime data in a lifetime. + pub fn intern(self, interner: I) -> Lifetime { + Lifetime::new(interner, self) + } +} + +/// Index of an universally quantified parameter in the environment. +/// Two indexes are required, the one of the universe itself +/// and the relative index inside the universe. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PlaceholderIndex { + /// Index *of* the universe. + pub ui: UniverseIndex, + /// Index *in* the universe. + pub idx: usize, +} + +impl PlaceholderIndex { + /// Wrap the placeholder instance in a lifetime. + pub fn to_lifetime(self, interner: I) -> Lifetime { + LifetimeData::::Placeholder(self).intern(interner) + } + + /// Create an interned type. + pub fn to_ty(self, interner: I) -> Ty { + TyKind::Placeholder(self).intern(interner) + } + + /// Wrap the placeholder index in a constant. + pub fn to_const(self, interner: I, ty: Ty) -> Const { + ConstData { + ty, + value: ConstValue::Placeholder(self), + } + .intern(interner) + } +} +/// Represents some extra knowledge we may have about the type variable. +/// ```ignore +/// let x: &[u32]; +/// let i = 1; +/// x[i] +/// ``` +/// In this example, `i` is known to be some type of integer. We can infer that +/// it is `usize` because that is the only integer type that slices have an +/// `Index` impl for. `i` would have a `TyVariableKind` of `Integer` to guide the +/// inference process. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[allow(missing_docs)] +pub enum TyVariableKind { + General, + Integer, + Float, +} + +/// The "kind" of variable. Type, lifetime or constant. +#[derive(Clone, PartialEq, Eq, Hash)] +#[allow(missing_docs)] +pub enum VariableKind { + Ty(TyVariableKind), + Lifetime, + Const(Ty), +} + +impl interner::HasInterner for VariableKind { + type Interner = I; +} + +impl Copy for VariableKind where I::InternedType: Copy {} + +impl VariableKind { + fn to_bound_variable(&self, interner: I, bound_var: BoundVar) -> GenericArg { + match self { + VariableKind::Ty(_) => { + GenericArgData::Ty(TyKind::BoundVar(bound_var).intern(interner)).intern(interner) + } + VariableKind::Lifetime => { + GenericArgData::Lifetime(LifetimeData::BoundVar(bound_var).intern(interner)) + .intern(interner) + } + VariableKind::Const(ty) => GenericArgData::Const( + ConstData { + ty: ty.clone(), + value: ConstValue::BoundVar(bound_var), + } + .intern(interner), + ) + .intern(interner), + } + } +} + +/// A generic argument, see `GenericArgData` for more information. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct GenericArg { + interned: I::InternedGenericArg, +} + +impl GenericArg { + /// Constructs a generic argument using `GenericArgData`. + pub fn new(interner: I, data: GenericArgData) -> Self { + let interned = I::intern_generic_arg(interner, data); + GenericArg { interned } + } + + /// Gets the interned value. + pub fn interned(&self) -> &I::InternedGenericArg { + &self.interned + } + + /// Gets the underlying data. + pub fn data(&self, interner: I) -> &GenericArgData { + I::generic_arg_data(interner, &self.interned) + } + + /// Asserts that this is a type argument. + pub fn assert_ty_ref(&self, interner: I) -> &Ty { + self.ty(interner).unwrap() + } + + /// Asserts that this is a lifetime argument. + pub fn assert_lifetime_ref(&self, interner: I) -> &Lifetime { + self.lifetime(interner).unwrap() + } + + /// Asserts that this is a constant argument. + pub fn assert_const_ref(&self, interner: I) -> &Const { + self.constant(interner).unwrap() + } + + /// Checks whether the generic argument is a type. + pub fn is_ty(&self, interner: I) -> bool { + match self.data(interner) { + GenericArgData::Ty(_) => true, + GenericArgData::Lifetime(_) => false, + GenericArgData::Const(_) => false, + } + } + + /// Returns the type if it is one, `None` otherwise. + pub fn ty(&self, interner: I) -> Option<&Ty> { + match self.data(interner) { + GenericArgData::Ty(t) => Some(t), + _ => None, + } + } + + /// Returns the lifetime if it is one, `None` otherwise. + pub fn lifetime(&self, interner: I) -> Option<&Lifetime> { + match self.data(interner) { + GenericArgData::Lifetime(t) => Some(t), + _ => None, + } + } + + /// Returns the constant if it is one, `None` otherwise. + pub fn constant(&self, interner: I) -> Option<&Const> { + match self.data(interner) { + GenericArgData::Const(c) => Some(c), + _ => None, + } + } + + /// Compute type flags for GenericArg + fn compute_flags(&self, interner: I) -> TypeFlags { + match self.data(interner) { + GenericArgData::Ty(ty) => ty.data(interner).flags, + GenericArgData::Lifetime(lifetime) => lifetime.compute_flags(interner), + GenericArgData::Const(constant) => { + let data = constant.data(interner); + let flags = data.ty.data(interner).flags; + match data.value { + ConstValue::BoundVar(_) => flags, + ConstValue::InferenceVar(_) => { + flags | TypeFlags::HAS_CT_INFER | TypeFlags::STILL_FURTHER_SPECIALIZABLE + } + ConstValue::Placeholder(_) => { + flags + | TypeFlags::HAS_CT_PLACEHOLDER + | TypeFlags::STILL_FURTHER_SPECIALIZABLE + } + ConstValue::Concrete(_) => flags, + } + } + } + } +} + +/// Generic arguments data. +#[derive(Clone, PartialEq, Eq, Hash, TypeVisitable, TypeFoldable, Zip)] +pub enum GenericArgData { + /// Type argument + Ty(Ty), + /// Lifetime argument + Lifetime(Lifetime), + /// Constant argument + Const(Const), +} + +impl Copy for GenericArgData +where + I::InternedType: Copy, + I::InternedLifetime: Copy, + I::InternedConst: Copy, +{ +} + +impl GenericArgData { + /// Create an interned type. + pub fn intern(self, interner: I) -> GenericArg { + GenericArg::new(interner, self) + } +} + +/// A value with an associated variable kind. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct WithKind { + /// The associated variable kind. + pub kind: VariableKind, + /// The wrapped value. + value: T, +} + +impl Copy for WithKind where I::InternedType: Copy {} + +impl HasInterner for WithKind { + type Interner = I; +} + +impl From> for (VariableKind, T) { + fn from(with_kind: WithKind) -> Self { + (with_kind.kind, with_kind.value) + } +} + +impl WithKind { + /// Creates a `WithKind` from a variable kind and a value. + pub fn new(kind: VariableKind, value: T) -> Self { + Self { kind, value } + } + + /// Maps the value in `WithKind`. + pub fn map(self, op: OP) -> WithKind + where + OP: FnOnce(T) -> U, + { + WithKind { + kind: self.kind, + value: op(self.value), + } + } + + /// Maps a function taking `WithKind` over `&WithKind`. + pub fn map_ref(&self, op: OP) -> WithKind + where + OP: FnOnce(&T) -> U, + { + WithKind { + kind: self.kind.clone(), + value: op(&self.value), + } + } + + /// Extract the value, ignoring the variable kind. + pub fn skip_kind(&self) -> &T { + &self.value + } +} + +/// A variable kind with universe index. +#[allow(type_alias_bounds)] +pub type CanonicalVarKind = WithKind; + +/// An alias, which is a trait indirection such as a projection or opaque type. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub enum AliasTy { + /// An associated type projection. + Projection(ProjectionTy), + /// An opaque type. + Opaque(OpaqueTy), +} + +impl Copy for AliasTy where I::InternedSubstitution: Copy {} + +impl AliasTy { + /// Create an interned type for this alias. + pub fn intern(self, interner: I) -> Ty { + Ty::new(interner, self) + } + + /// Compute type flags for aliases + fn compute_flags(&self, interner: I) -> TypeFlags { + match self { + AliasTy::Projection(projection_ty) => { + TypeFlags::HAS_TY_PROJECTION | projection_ty.substitution.compute_flags(interner) + } + AliasTy::Opaque(opaque_ty) => { + TypeFlags::HAS_TY_OPAQUE | opaque_ty.substitution.compute_flags(interner) + } + } + } +} + +/// A projection `>::AssocItem`. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct ProjectionTy { + /// The id for the associated type member. + pub associated_ty_id: AssocTypeId, + /// The substitution for the projection. + pub substitution: Substitution, +} + +impl Copy for ProjectionTy where I::InternedSubstitution: Copy {} + +/// An opaque type `opaque type T<..>: Trait = HiddenTy`. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct OpaqueTy { + /// The id for the opaque type. + pub opaque_ty_id: OpaqueTyId, + /// The substitution for the opaque type. + pub substitution: Substitution, +} + +impl Copy for OpaqueTy where I::InternedSubstitution: Copy {} + +/// A trait reference describes the relationship between a type and a trait. +/// This can be used in two forms: +/// - `P0: Trait` (e.g. `i32: Copy`), which mentions that the type +/// implements the trait. +/// - `>` (e.g. `i32 as Copy`), which casts the type to +/// that specific trait. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct TraitRef { + /// The trait id. + pub trait_id: TraitId, + /// The substitution, containing both the `Self` type and the parameters. + pub substitution: Substitution, +} + +impl Copy for TraitRef where I::InternedSubstitution: Copy {} + +impl TraitRef { + /// Gets all type parameters in this trait ref, including `Self`. + pub fn type_parameters(&self, interner: I) -> impl Iterator> + '_ { + self.substitution + .iter(interner) + .filter_map(move |p| p.ty(interner)) + .cloned() + } + + /// Gets the type parameters of the `Self` type in this trait ref. + pub fn self_type_parameter(&self, interner: I) -> Ty { + self.type_parameters(interner).next().unwrap() + } + + /// Construct a `FromEnv` using this trait ref. + pub fn from_env(self) -> FromEnv { + FromEnv::Trait(self) + } + + /// Construct a `WellFormed` using this trait ref. + pub fn well_formed(self) -> WellFormed { + WellFormed::Trait(self) + } +} + +/// Lifetime outlives, which for `'a: 'b` checks that the lifetime `'a` +/// is a superset of the value of `'b`. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +#[allow(missing_docs)] +pub struct LifetimeOutlives { + pub a: Lifetime, + pub b: Lifetime, +} + +impl Copy for LifetimeOutlives where I::InternedLifetime: Copy {} + +/// Type outlives, which for `T: 'a` checks that the type `T` +/// lives at least as long as the lifetime `'a` +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub struct TypeOutlives { + /// The type which must outlive the given lifetime. + pub ty: Ty, + /// The lifetime which the type must outlive. + pub lifetime: Lifetime, +} + +impl Copy for TypeOutlives +where + I::InternedLifetime: Copy, + I::InternedType: Copy, +{ +} + +/// Where clauses that can be written by a Rust programmer. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeSuperVisitable, HasInterner, Zip)] +pub enum WhereClause { + /// Type implements a trait. + Implemented(TraitRef), + /// Type is equal to an alias. + AliasEq(AliasEq), + /// One lifetime outlives another. + LifetimeOutlives(LifetimeOutlives), + /// Type outlives a lifetime. + TypeOutlives(TypeOutlives), +} + +impl Copy for WhereClause +where + I::InternedSubstitution: Copy, + I::InternedLifetime: Copy, + I::InternedType: Copy, +{ +} + +/// Checks whether a type or trait ref is well-formed. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub enum WellFormed { + /// A predicate which is true when some trait ref is well-formed. + /// For example, given the following trait definitions: + /// + /// ```notrust + /// trait Clone { ... } + /// trait Copy where Self: Clone { ... } + /// ``` + /// + /// then we have the following rule: + /// + /// ```notrust + /// WellFormed(?Self: Copy) :- ?Self: Copy, WellFormed(?Self: Clone) + /// ``` + Trait(TraitRef), + + /// A predicate which is true when some type is well-formed. + /// For example, given the following type definition: + /// + /// ```notrust + /// struct Set where K: Hash { + /// ... + /// } + /// ``` + /// + /// then we have the following rule: `WellFormedTy(Set) :- Implemented(K: Hash)`. + Ty(Ty), +} + +impl Copy for WellFormed +where + I::InternedType: Copy, + I::InternedSubstitution: Copy, +{ +} + +/// Checks whether a type or trait ref can be derived from the contents of the environment. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub enum FromEnv { /// A predicate which enables deriving everything which should be true if we *know* that /// some trait ref is well-formed. For example given the above trait definitions, we can use /// `FromEnv(T: Copy)` to derive that `T: Clone`, like in: @@ -468,7 +1799,7 @@ pub enum FromEnv { /// } /// } /// ``` - Trait(TraitRef), + Trait(TraitRef), /// A predicate which enables deriving everything which should be true if we *know* that /// some type is well-formed. For example given the above type definition, we can use @@ -481,39 +1812,42 @@ pub enum FromEnv { /// } /// } /// ``` - Ty(Ty), + Ty(Ty), +} + +impl Copy for FromEnv +where + I::InternedType: Copy, + I::InternedSubstitution: Copy, +{ } /// A "domain goal" is a goal that is directly about Rust, rather than a pure /// logical statement. As much as possible, the Chalk solver should avoid /// decomposing this enum, and instead treat its values opaquely. -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum DomainGoal { - Holds(WhereClause), - WellFormed(WellFormed), - FromEnv(FromEnv), +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeSuperVisitable, HasInterner, Zip)] +pub enum DomainGoal { + /// Simple goal that is true if the where clause is true. + Holds(WhereClause), - Normalize(Normalize), - UnselectedNormalize(UnselectedNormalize), + /// True if the type or trait ref is well-formed. + WellFormed(WellFormed), - InScope(ItemId), + /// True if the trait ref can be derived from in-scope where clauses. + FromEnv(FromEnv), - /// Whether a type can deref into another. Right now this is just: - /// ```notrust - /// Derefs(T, U) :- Implemented(T: Deref) - /// ``` - /// In Rust there are also raw pointers which can be deref'd but do not implement Deref. - Derefs(Derefs), + /// True if the alias type can be normalized to some other type + Normalize(Normalize), /// True if a type is considered to have been "defined" by the current crate. This is true for /// a `struct Foo { }` but false for a `#[upstream] struct Foo { }`. However, for fundamental types /// like `Box`, it is true if `T` is local. - IsLocal(Ty), + IsLocal(Ty), /// True if a type is *not* considered to have been "defined" by the current crate. This is /// false for a `struct Foo { }` but true for a `#[upstream] struct Foo { }`. However, for /// fundamental types like `Box`, it is true if `T` is upstream. - IsUpstream(Ty), + IsUpstream(Ty), /// True if a type and its input types are fully visible, known types. That is, there are no /// unknown type parameters anywhere in this type. @@ -528,7 +1862,7 @@ pub enum DomainGoal { /// /// Note that any of these types can have lifetimes in their parameters too, but we only /// consider type parameters. - IsFullyVisible(Ty), + IsFullyVisible(Ty), /// Used to dictate when trait impls are allowed in the current (local) crate based on the /// orphan rules. @@ -537,14 +1871,12 @@ pub enum DomainGoal { /// the current crate. Under the current rules, this is unconditionally true for all types if /// the Trait is considered to be "defined" in the current crate. If that is not the case, then /// `LocalImplAllowed(T: Trait)` can still be true if `IsLocal(T)` is true. - LocalImplAllowed(TraitRef), + LocalImplAllowed(TraitRef), /// Used to activate the "compatible modality" rules. Rules that introduce predicates that have /// to do with "all compatible universes" should depend on this clause so that they only apply /// if this is present. - /// - /// (HACK: Having `()` makes some of our macros work better.) - Compatible(()), + Compatible, /// Used to indicate that a given type is in a downstream crate. Downstream crates contain the /// current crate at some level of their dependencies. @@ -555,96 +1887,157 @@ pub enum DomainGoal { /// forall { if (DownstreamType(T)) { /* ... */ } } /// /// This makes a new type `T` available and makes `DownstreamType(T)` provable for that type. - DownstreamType(Ty), + DownstreamType(Ty), + + /// Used to activate the "reveal mode", in which opaque (`impl Trait`) types can be equated + /// to their actual type. + Reveal, + + /// Used to indicate that a trait is object safe. + ObjectSafe(TraitId), +} + +impl Copy for DomainGoal +where + I::InternedSubstitution: Copy, + I::InternedLifetime: Copy, + I::InternedType: Copy, +{ } -pub type QuantifiedWhereClause = Binders; +/// A where clause that can contain `forall<>` or `exists<>` quantifiers. +pub type QuantifiedWhereClause = Binders>; -impl WhereClause { +impl WhereClause { /// Turn a where clause into the WF version of it i.e.: /// * `Implemented(T: Trait)` maps to `WellFormed(T: Trait)` /// * `ProjectionEq(::Item = Foo)` maps to `WellFormed(::Item = Foo)` /// * any other clause maps to itself - pub fn into_well_formed_goal(self) -> DomainGoal { + pub fn into_well_formed_goal(self, interner: I) -> DomainGoal { match self { - WhereClause::Implemented(trait_ref) => WellFormed::Trait(trait_ref).cast(), - wc => wc.cast(), + WhereClause::Implemented(trait_ref) => WellFormed::Trait(trait_ref).cast(interner), + wc => wc.cast(interner), } } /// Same as `into_well_formed_goal` but with the `FromEnv` predicate instead of `WellFormed`. - pub fn into_from_env_goal(self) -> DomainGoal { + pub fn into_from_env_goal(self, interner: I) -> DomainGoal { + match self { + WhereClause::Implemented(trait_ref) => FromEnv::Trait(trait_ref).cast(interner), + wc => wc.cast(interner), + } + } + + /// If where clause is a `TraitRef`, returns its trait id. + pub fn trait_id(&self) -> Option> { + match self { + WhereClause::Implemented(trait_ref) => Some(trait_ref.trait_id), + WhereClause::AliasEq(_) => None, + WhereClause::LifetimeOutlives(_) => None, + WhereClause::TypeOutlives(_) => None, + } + } +} + +impl QuantifiedWhereClause { + /// As with `WhereClause::into_well_formed_goal`, but for a + /// quantified where clause. For example, `forall { + /// Implemented(T: Trait)}` would map to `forall { + /// WellFormed(T: Trait) }`. + pub fn into_well_formed_goal(self, interner: I) -> Binders> { + self.map(|wc| wc.into_well_formed_goal(interner)) + } + + /// As with `WhereClause::into_from_env_goal`, but mapped over any + /// binders. For example, `forall { + /// Implemented(T: Trait)}` would map to `forall { + /// FromEnv(T: Trait) }`. + pub fn into_from_env_goal(self, interner: I) -> Binders> { + self.map(|wc| wc.into_from_env_goal(interner)) + } + + /// If the underlying where clause is a `TraitRef`, returns its trait id. + pub fn trait_id(&self) -> Option> { + self.skip_binders().trait_id() + } +} + +impl DomainGoal { + /// Convert `Implemented(...)` into `FromEnv(...)`, but leave other + /// goals unchanged. + pub fn into_from_env_goal(self, interner: I) -> DomainGoal { match self { - WhereClause::Implemented(trait_ref) => FromEnv::Trait(trait_ref).cast(), - wc => wc.cast(), + DomainGoal::Holds(wc) => wc.into_from_env_goal(interner), + goal => goal, } } -} -impl DomainGoal { - pub fn into_from_env_goal(self) -> DomainGoal { + /// Lists generic arguments that are inputs to this domain goal. + pub fn inputs(&self, interner: I) -> Vec> { match self { - DomainGoal::Holds(wc) => wc.into_from_env_goal(), - goal => goal, + DomainGoal::Holds(WhereClause::AliasEq(alias_eq)) => { + vec![GenericArgData::Ty(alias_eq.alias.clone().intern(interner)).intern(interner)] + } + _ => Vec::new(), } } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -/// A goal that does not involve any logical connectives. Equality is treated -/// specially by the logic (as with most first-order logics), since it interacts -/// with unification etc. -pub enum LeafGoal { - EqGoal(EqGoal), - DomainGoal(DomainGoal), +/// Equality goal: tries to prove that two values are equal. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, Zip)] +#[allow(missing_docs)] +pub struct EqGoal { + pub a: GenericArg, + pub b: GenericArg, } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct EqGoal { - pub a: Parameter, - pub b: Parameter, +impl Copy for EqGoal where I::InternedGenericArg: Copy {} + +/// Subtype goal: tries to prove that `a` is a subtype of `b` +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, Zip)] +#[allow(missing_docs)] +pub struct SubtypeGoal { + pub a: Ty, + pub b: Ty, } -/// Proves that the given projection **normalizes** to the given +impl Copy for SubtypeGoal where I::InternedType: Copy {} + +/// Proves that the given type alias **normalizes** to the given /// type. A projection `T::Foo` normalizes to the type `U` if we can /// **match it to an impl** and that impl has a `type Foo = V` where /// `U = V`. -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Normalize { - pub projection: ProjectionTy, - pub ty: Ty, -} - -/// Proves **equality** between a projection `T::Foo` and a type -/// `U`. Equality can be proven via normalization, but we can also -/// prove that `T::Foo = V::Foo` if `T = V` without normalizing. -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct ProjectionEq { - pub projection: ProjectionTy, - pub ty: Ty, -} - -/// Indicates that the trait where the associated type belongs to is -/// not yet known, i.e. is unselected. For example, a normal -/// `Normalize` would be of the form ` as Iterator>::Item -> -/// T`. When `Iterator` is in scope, and it is the only trait in scope -/// with an associated type `Item`, it suffices to write -/// `Vec::Item` instead of ` as Iterator>::Item`. The -/// corresponding `UnselectedNormalize` is `Vec::Item -> T`. -/// -/// For each associated type we encounter in an `impl`, we generate -/// rules to derive an `UnselectedNormalize` from a `Normalize`. For -/// example, implementing `Iterator` for `Vec` yields the rule: -/// -/// ```text -/// Vec::Item -> T :- -/// InScope(Iterator), -/// as Iterator>::Item -> T -/// ``` -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct UnselectedNormalize { - pub projection: UnselectedProjectionTy, - pub ty: Ty, +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, Zip)] +#[allow(missing_docs)] +pub struct Normalize { + pub alias: AliasTy, + pub ty: Ty, +} + +impl Copy for Normalize +where + I::InternedSubstitution: Copy, + I::InternedType: Copy, +{ +} + +/// Proves **equality** between an alias and a type. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, Zip)] +#[allow(missing_docs)] +pub struct AliasEq { + pub alias: AliasTy, + pub ty: Ty, +} + +impl Copy for AliasEq +where + I::InternedSubstitution: Copy, + I::InternedType: Copy, +{ +} + +impl HasInterner for AliasEq { + type Interner = I; } /// Indicates that the `value` is universally quantified over `N` @@ -654,16 +2047,81 @@ pub struct UnselectedNormalize { /// /// (IOW, we use deBruijn indices, where binders are introduced in reverse order /// of `self.binders`.) -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Binders { - pub binders: Vec>, - pub value: T, +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Binders { + /// The binders that quantify over the value. + pub binders: VariableKinds, + + /// The value being quantified over. + value: T, +} + +impl Copy for Binders where + ::InternedVariableKinds: Copy +{ +} + +impl HasInterner for Binders { + type Interner = T::Interner; +} + +impl Binders<&T> { + /// Converts a `Binders<&T>` to a `Binders` by cloning `T`. + pub fn cloned(self) -> Binders { + self.map(Clone::clone) + } } -impl Binders { +impl Binders { + /// Create new binders. + pub fn new(binders: VariableKinds, value: T) -> Self { + Self { binders, value } + } + + /// Wraps the given value in a binder without variables, i.e. `for<> + /// (value)`. Since our deBruijn indices count binders, not variables, this + /// is sometimes useful. + pub fn empty(interner: T::Interner, value: T) -> Self { + let binders = VariableKinds::empty(interner); + Self { binders, value } + } + + /// Skips the binder and returns the "bound" value. This is a + /// risky thing to do because it's easy to get confused about + /// De Bruijn indices and the like. `skip_binder` is only valid + /// when you are either extracting data that has nothing to + /// do with bound vars, or you are being very careful about + /// your depth accounting. + /// + /// Some examples where `skip_binder` is reasonable: + /// + /// - extracting the `TraitId` from a TraitRef; + /// - checking if there are any fields in a StructDatum + pub fn skip_binders(&self) -> &T { + &self.value + } + + /// Skips the binder and returns the "bound" value as well as the skipped free variables. This + /// is just as risky as [`skip_binders`][Self::skip_binders]. + pub fn into_value_and_skipped_binders(self) -> (T, VariableKinds) { + (self.value, self.binders) + } + + /// Converts `&Binders` to `Binders<&T>`. Produces new `Binders` + /// with cloned quantifiers containing a reference to the original + /// value, leaving the original in place. + pub fn as_ref(&self) -> Binders<&T> { + Binders { + binders: self.binders.clone(), + value: &self.value, + } + } + + /// Maps the binders by applying a function. pub fn map(self, op: OP) -> Binders where OP: FnOnce(T) -> U, + U: HasInterner, { let value = op(self.value); Binders { @@ -672,46 +2130,124 @@ impl Binders { } } - pub fn map_ref(&self, op: OP) -> Binders + /// Transforms the inner value according to the given function; returns + /// `None` if the function returns `None`. + pub fn filter_map(self, op: OP) -> Option> where - OP: FnOnce(&T) -> U, + OP: FnOnce(T) -> Option, + U: HasInterner, { - let value = op(&self.value); - Binders { - binders: self.binders.clone(), + let value = op(self.value)?; + Some(Binders { + binders: self.binders, value, - } + }) } - /// Introduces a fresh type variable at the start of the binders and returns new Binders with - /// the result of the operator function applied. - /// - /// forall will become forall where ?0 is the fresh variable - pub fn with_fresh_type_var(self, op: OP) -> Binders + /// Maps a function taking `Binders<&T>` over `&Binders`. + pub fn map_ref<'a, U, OP>(&'a self, op: OP) -> Binders where - OP: FnOnce(::Result, Ty) -> U, - T: Shift, + OP: FnOnce(&'a T) -> U, + U: HasInterner, { + self.as_ref().map(op) + } + + /// Creates a `Substitution` containing bound vars such that applying this + /// substitution will not change the value, i.e. `^0.0, ^0.1, ^0.2` and so + /// on. + pub fn identity_substitution(&self, interner: T::Interner) -> Substitution { + Substitution::from_iter( + interner, + self.binders + .iter(interner) + .enumerate() + .map(|p| p.to_generic_arg(interner)), + ) + } + + /// Creates a fresh binders that contains a single type + /// variable. The result of the closure will be embedded in this + /// binder. Note that you should be careful with what you return + /// from the closure to account for the binder that will be added. + /// + /// XXX FIXME -- this is potentially a pretty footgun-y function. + pub fn with_fresh_type_var( + interner: T::Interner, + op: impl FnOnce(Ty) -> T, + ) -> Binders { // The new variable is at the front and everything afterwards is shifted up by 1 - let new_var = Ty::BoundVar(0); - let value = op(self.value.shifted_in(1), new_var); - Binders { - binders: iter::once(ParameterKind::Ty(())) - .chain(self.binders.iter().cloned()) - .collect(), - value, - } + let new_var = TyKind::BoundVar(BoundVar::new(DebruijnIndex::INNERMOST, 0)).intern(interner); + let value = op(new_var); + let binders = VariableKinds::from1(interner, VariableKind::Ty(TyVariableKind::General)); + Binders { binders, value } + } + + /// Returns the number of binders. + pub fn len(&self, interner: T::Interner) -> usize { + self.binders.len(interner) + } +} + +impl Binders> +where + T: TypeFoldable + HasInterner, + I: Interner, +{ + /// This turns two levels of binders (`for for`) into one level (`for`). + pub fn fuse_binders(self, interner: T::Interner) -> Binders { + let num_binders = self.len(interner); + // generate a substitution to shift the indexes of the inner binder: + let subst = Substitution::from_iter( + interner, + self.value + .binders + .iter(interner) + .enumerate() + .map(|(i, pk)| (i + num_binders, pk).to_generic_arg(interner)), + ); + let binders = VariableKinds::from_iter( + interner, + self.binders + .iter(interner) + .chain(self.value.binders.iter(interner)) + .cloned(), + ); + let value = self.value.substitute(interner, &subst); + Binders { binders, value } + } +} + +impl From> for (VariableKinds, T) { + fn from(binders: Binders) -> Self { + (binders.binders, binders.value) } +} - pub fn len(&self) -> usize { - self.binders.len() +impl Binders +where + T: TypeFoldable + HasInterner, + I: Interner, +{ + /// Substitute `parameters` for the variables introduced by these + /// binders. So if the binders represent (e.g.) ` { T }` and + /// parameters is the slice `[A, B]`, then returns `[X => A, Y => + /// B] T`. + pub fn substitute(self, interner: I, parameters: &(impl AsParameters + ?Sized)) -> T { + let parameters = parameters.as_parameters(interner); + assert_eq!(self.binders.len(interner), parameters.len()); + Subst::apply(interner, parameters, self.value) } } /// Allows iterating over a Binders>, for instance. /// Each element will include the same set of parameter bounds. -impl IntoIterator for Binders { - type Item = Binders<::Item>; +impl IntoIterator for Binders +where + V: HasInterner + IntoIterator, + U: HasInterner, +{ + type Item = Binders; type IntoIter = BindersIntoIterator; fn into_iter(self) -> Self::IntoIter { @@ -722,63 +2258,144 @@ impl IntoIterator for Binders { } } -pub struct BindersIntoIterator { +/// `IntoIterator` for binders. +pub struct BindersIntoIterator { iter: ::IntoIter, - binders: Vec>, + binders: VariableKinds, } -impl Iterator for BindersIntoIterator { +impl Iterator for BindersIntoIterator +where + V: HasInterner + IntoIterator, + ::Item: HasInterner, +{ type Item = Binders<::Item>; fn next(&mut self) -> Option { - self.iter.next().map(|v| Binders { - binders: self.binders.clone(), - value: v, - }) + self.iter + .next() + .map(|v| Binders::new(self.binders.clone(), v)) } } /// Represents one clause of the form `consequence :- conditions` where /// `conditions = cond_1 && cond_2 && ...` is the conjunction of the individual /// conditions. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ProgramClauseImplication { - pub consequence: DomainGoal, - pub conditions: Vec, +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub struct ProgramClauseImplication { + /// The consequence of the clause, which holds if the conditions holds. + pub consequence: DomainGoal, + + /// The condition goals that should hold. + pub conditions: Goals, + + /// The lifetime constraints that should be proven. + pub constraints: Constraints, + + /// The relative priority of the implication. + pub priority: ClausePriority, +} + +/// Specifies how important an implication is. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum ClausePriority { + /// High priority, the solver should prioritize this. + High, + + /// Low priority, this implication has lower chance to be relevant to the goal. + Low, } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum ProgramClause { - Implies(ProgramClauseImplication), - ForAll(Binders), +impl std::ops::BitAnd for ClausePriority { + type Output = ClausePriority; + fn bitand(self, rhs: ClausePriority) -> Self::Output { + match (self, rhs) { + (ClausePriority::High, ClausePriority::High) => ClausePriority::High, + _ => ClausePriority::Low, + } + } } -impl ProgramClause { - pub fn into_from_env_clause(self) -> ProgramClause { - match self { - ProgramClause::Implies(implication) => { - if implication.conditions.is_empty() { - ProgramClause::Implies(ProgramClauseImplication { - consequence: implication.consequence.into_from_env_goal(), - conditions: vec![], - }) - } else { - ProgramClause::Implies(implication) - } +/// Contains the data for a program clause. +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, HasInterner, Zip)] +pub struct ProgramClauseData(pub Binders>); + +impl ProgramClauseImplication { + /// Change the implication into an application holding a `FromEnv` goal. + pub fn into_from_env_clause(self, interner: I) -> ProgramClauseImplication { + if self.conditions.is_empty(interner) { + ProgramClauseImplication { + consequence: self.consequence.into_from_env_goal(interner), + conditions: self.conditions.clone(), + constraints: self.constraints.clone(), + priority: self.priority, } - clause => clause, + } else { + self + } + } +} + +impl ProgramClauseData { + /// Change the program clause data into a `FromEnv` program clause. + pub fn into_from_env_clause(self, interner: I) -> ProgramClauseData { + ProgramClauseData(self.0.map(|i| i.into_from_env_clause(interner))) + } + + /// Intern the program clause data. + pub fn intern(self, interner: I) -> ProgramClause { + ProgramClause { + interned: interner.intern_program_clause(self), } } } +/// A program clause is a logic expression used to describe a part of the program. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] +pub struct ProgramClause { + interned: I::InternedProgramClause, +} + +impl ProgramClause { + /// Create a new program clause using `ProgramClauseData`. + pub fn new(interner: I, clause: ProgramClauseData) -> Self { + let interned = interner.intern_program_clause(clause); + Self { interned } + } + + /// Change the clause into a `FromEnv` clause. + pub fn into_from_env_clause(self, interner: I) -> ProgramClause { + let program_clause_data = self.data(interner); + let new_clause = program_clause_data.clone().into_from_env_clause(interner); + Self::new(interner, new_clause) + } + + /// Get the interned program clause. + pub fn interned(&self) -> &I::InternedProgramClause { + &self.interned + } + + /// Get the program clause data. + pub fn data(&self, interner: I) -> &ProgramClauseData { + interner.program_clause_data(&self.interned) + } +} + /// Wraps a "canonicalized item". Items are canonicalized as follows: /// /// All unresolved existential variables are "renumbered" according to their /// first appearance; the kind/universe of the variable is recorded in the /// `binders` field. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Canonical { +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Canonical { + /// The item that is canonicalized. pub value: T, - pub binders: Vec>, + + /// The kind/universe of the variable. + pub binders: CanonicalVarKinds, +} + +impl HasInterner for Canonical { + type Interner = T::Interner; } /// A "universe canonical" value. This is a wrapper around a @@ -787,110 +2404,236 @@ pub struct Canonical { /// distinctions. /// /// To produce one of these values, use the `u_canonicalize` method. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct UCanonical { +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct UCanonical { + /// The wrapped `Canonical`. pub canonical: Canonical, + + /// The number of universes that have been collapsed. pub universes: usize, } -impl UCanonical { - pub fn is_trivial_substitution(&self, canonical_subst: &Canonical) -> bool { +impl UCanonical { + /// Checks whether the universe canonical value is a trivial + /// substitution (e.g. an identity substitution). + pub fn is_trivial_substitution( + &self, + interner: T::Interner, + canonical_subst: &Canonical>, + ) -> bool { let subst = &canonical_subst.value.subst; - assert_eq!(self.canonical.binders.len(), subst.parameters.len()); - subst.is_identity_subst() + assert_eq!( + self.canonical.binders.len(interner), + subst.as_slice(interner).len() + ); + subst.is_identity_subst(interner) } -} -impl UCanonical> { - /// A goal has coinductive semantics if it is of the form `T: AutoTrait`, or if it is of the - /// form `WellFormed(T: Trait)` where `Trait` is any trait. The latter is needed for dealing - /// with WF requirements and cyclic traits, which generates cycles in the proof tree which must - /// not be rejected but instead must be treated as a success. - pub fn is_coinductive(&self, program: &ProgramEnvironment) -> bool { - self.canonical.value.goal.is_coinductive(program) + /// Creates an identity substitution. + pub fn trivial_substitution(&self, interner: T::Interner) -> Substitution { + let binders = &self.canonical.binders; + Substitution::from_iter( + interner, + binders + .iter(interner) + .enumerate() + .map(|(index, pk)| { + let bound_var = BoundVar::new(DebruijnIndex::INNERMOST, index); + match &pk.kind { + VariableKind::Ty(_) => { + GenericArgData::Ty(TyKind::BoundVar(bound_var).intern(interner)) + .intern(interner) + } + VariableKind::Lifetime => GenericArgData::Lifetime( + LifetimeData::BoundVar(bound_var).intern(interner), + ) + .intern(interner), + VariableKind::Const(ty) => GenericArgData::Const( + ConstData { + ty: ty.clone(), + value: ConstValue::BoundVar(bound_var), + } + .intern(interner), + ) + .intern(interner), + } + }) + .collect::>(), + ) } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] /// A general goal; this is the full range of questions you can pose to Chalk. -pub enum Goal { - /// Introduces a binding at depth 0, shifting other bindings up - /// (deBruijn index). - Quantified(QuantifierKind, Binders>), - Implies(Vec, Box), - And(Box, Box), - Not(Box), - Leaf(LeafGoal), - - /// Indicates something that cannot be proven to be true or false - /// definitively. This can occur with overflow but also with - /// unifications of skolemized variables like `forall { X = Y - /// }`. Of course, that statement is false, as there exist types - /// X, Y where `X = Y` is not true. But we treat it as "cannot - /// prove" so that `forall { not { X = Y } }` also winds up - /// as cannot prove. - /// - /// (TOTAL HACK: Having a unit result makes some of our macros work better.) - CannotProve(()), +pub struct Goal { + interned: I::InternedGoal, } -impl Goal { - pub fn quantify(self, kind: QuantifierKind, binders: Vec>) -> Goal { - Goal::Quantified( - kind, - Binders { - value: Box::new(self), - binders, - }, - ) +impl Goal { + /// Create a new goal using `GoalData`. + pub fn new(interner: I, interned: GoalData) -> Self { + let interned = I::intern_goal(interner, interned); + Self { interned } + } + + /// Gets the interned goal. + pub fn interned(&self) -> &I::InternedGoal { + &self.interned + } + + /// Gets the interned goal data. + pub fn data(&self, interner: I) -> &GoalData { + interner.goal_data(&self.interned) + } + + /// Create a goal using a `forall` or `exists` quantifier. + pub fn quantify(self, interner: I, kind: QuantifierKind, binders: VariableKinds) -> Goal { + GoalData::Quantified(kind, Binders::new(binders, self)).intern(interner) } - /// Takes a goal `G` and turns it into `not { G }` - pub fn negate(self) -> Self { - Goal::Not(Box::new(self)) + /// Takes a goal `G` and turns it into `not { G }`. + pub fn negate(self, interner: I) -> Self { + GoalData::Not(self).intern(interner) } - /// Takes a goal `G` and turns it into `compatible { G }` - pub fn compatible(self) -> Self { + /// Takes a goal `G` and turns it into `compatible { G }`. + pub fn compatible(self, interner: I) -> Self { // compatible { G } desugars into: forall { if (Compatible, DownstreamType(T)) { G } } // This activates the compatible modality rules and introduces an anonymous downstream type - Goal::Quantified( + GoalData::Quantified( QuantifierKind::ForAll, - Binders { - value: Box::new(self), - binders: Vec::new(), - }.with_fresh_type_var(|goal, ty| { - Box::new(Goal::Implies( - vec![ - DomainGoal::Compatible(()).cast(), - DomainGoal::DownstreamType(ty).cast(), - ], - goal, - )) + Binders::with_fresh_type_var(interner, |ty| { + GoalData::Implies( + ProgramClauses::from_iter( + interner, + vec![DomainGoal::Compatible, DomainGoal::DownstreamType(ty)], + ), + self.shifted_in(interner), + ) + .intern(interner) }), ) + .intern(interner) } - pub fn implied_by(self, predicates: Vec) -> Goal { - Goal::Implies(predicates, Box::new(self)) + /// Create an implication goal that holds if the predicates are true. + pub fn implied_by(self, interner: I, predicates: ProgramClauses) -> Goal { + GoalData::Implies(predicates, self).intern(interner) } - pub fn is_coinductive(&self, program: &ProgramEnvironment) -> bool { - match self { - Goal::Leaf(LeafGoal::DomainGoal(DomainGoal::Holds(wca))) => match wca { - WhereClause::Implemented(tr) => program.coinductive_traits.contains(&tr.trait_id), - WhereClause::ProjectionEq(..) => false, - }, - Goal::Leaf(LeafGoal::DomainGoal(DomainGoal::WellFormed(WellFormed::Trait(..)))) => true, - Goal::Quantified(QuantifierKind::ForAll, goal) => goal.value.is_coinductive(program), + /// True if this goal is "trivially true" -- i.e., no work is + /// required to prove it. + pub fn is_trivially_true(&self, interner: I) -> bool { + match self.data(interner) { + GoalData::All(goals) => goals.is_empty(interner), _ => false, } } } +impl Goal +where + I: Interner, +{ + /// Creates a single goal that only holds if a list of goals holds. + pub fn all(interner: I, iter: II) -> Self + where + II: IntoIterator>, + { + let mut iter = iter.into_iter(); + if let Some(goal0) = iter.next() { + if let Some(goal1) = iter.next() { + // More than one goal to prove + let goals = Goals::from_iter( + interner, + Some(goal0).into_iter().chain(Some(goal1)).chain(iter), + ); + GoalData::All(goals).intern(interner) + } else { + // One goal to prove + goal0 + } + } else { + // No goals to prove, always true + GoalData::All(Goals::empty(interner)).intern(interner) + } + } +} + +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +/// A general goal; this is the full range of questions you can pose to Chalk. +pub enum GoalData { + /// Introduces a binding at depth 0, shifting other bindings up + /// (deBruijn index). + Quantified(QuantifierKind, Binders>), + + /// A goal that holds given some clauses (like an if-statement). + Implies(ProgramClauses, Goal), + + /// List of goals that all should hold. + All(Goals), + + /// Negation: the inner goal should not hold. + Not(Goal), + + /// Make two things equal; the rules for doing so are well known to the logic + EqGoal(EqGoal), + + /// Make one thing a subtype of another; the rules for doing so are well known to the logic + SubtypeGoal(SubtypeGoal), + + /// A "domain goal" indicates some base sort of goal that can be + /// proven via program clauses + DomainGoal(DomainGoal), + + /// Indicates something that cannot be proven to be true or false + /// definitively. This can occur with overflow but also with + /// unifications of skolemized variables like `forall { X = Y + /// }`. Of course, that statement is false, as there exist types + /// X, Y where `X = Y` is not true. But we treat it as "cannot + /// prove" so that `forall { not { X = Y } }` also winds up + /// as cannot prove. + CannotProve, +} + +impl Copy for GoalData +where + I::InternedType: Copy, + I::InternedLifetime: Copy, + I::InternedGenericArg: Copy, + I::InternedSubstitution: Copy, + I::InternedGoal: Copy, + I::InternedGoals: Copy, + I::InternedProgramClauses: Copy, + I::InternedVariableKinds: Copy, +{ +} + +impl GoalData { + /// Create an interned goal. + pub fn intern(self, interner: I) -> Goal { + Goal::new(interner, self) + } +} + +/// Kinds of quantifiers in the logic, such as `forall` and `exists`. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum QuantifierKind { + /// Universal quantifier `ForAll`. + /// + /// A formula with the universal quantifier `forall(x). P(x)` is satisfiable + /// if and only if the subformula `P(x)` is true for all possible values for x. ForAll, + + /// Existential quantifier `Exists`. + /// + /// A formula with the existential quantifier `exists(x). P(x)` is satisfiable + /// if and only if there exists at least one value for all possible values of x + /// which satisfies the subformula `P(x)`. + + /// In the context of chalk, the existential quantifier usually demands the + /// existence of exactly one instance (i.e. type) that satisfies the formula + /// (i.e. type constraints). More than one instance means that the result is ambiguous. Exists, } @@ -900,29 +2643,25 @@ pub enum QuantifierKind { /// lifetime constraints, instead gathering them up to return with our solution /// for later checking. This allows for decoupling between type and region /// checking in the compiler. -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum Constraint { - LifetimeEq(Lifetime, Lifetime), +#[derive(Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner, Zip)] +pub enum Constraint { + /// Outlives constraint `'a: 'b`, indicating that the value of `'a` must be + /// a superset of the value of `'b`. + LifetimeOutlives(Lifetime, Lifetime), + + /// Type outlives constraint `T: 'a`, indicating that the type `T` must live + /// at least as long as the value of `'a`. + TypeOutlives(Ty, Lifetime), } -/// A mapping of inference variables to instantiations thereof. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Substitution { - /// Map free variable with given index to the value with the same - /// index. Naturally, the kind of the variable must agree with - /// the kind of the value. - /// - /// This is a map because the substitution is not necessarily - /// complete. We use a btree map to ensure that the result is in a - /// deterministic order. - pub parameters: Vec, +impl Copy for Constraint +where + I::InternedLifetime: Copy, + I::InternedType: Copy, +{ } -impl Substitution { - pub fn is_empty(&self) -> bool { - self.parameters.is_empty() - } - +impl Substitution { /// A substitution is an **identity substitution** if it looks /// like this /// @@ -935,40 +2674,391 @@ impl Substitution { /// /// Basically, each value is mapped to a type or lifetime with its /// same index. - pub fn is_identity_subst(&self) -> bool { - self.parameters - .iter() - .zip(0..) - .all(|(parameter, index)| match parameter { - ParameterKind::Ty(Ty::BoundVar(depth)) => index == *depth, - ParameterKind::Lifetime(Lifetime::BoundVar(depth)) => index == *depth, - _ => false, - }) + pub fn is_identity_subst(&self, interner: I) -> bool { + self.iter(interner).zip(0..).all(|(generic_arg, index)| { + let index_db = BoundVar::new(DebruijnIndex::INNERMOST, index); + match generic_arg.data(interner) { + GenericArgData::Ty(ty) => match ty.kind(interner) { + TyKind::BoundVar(depth) => index_db == *depth, + _ => false, + }, + GenericArgData::Lifetime(lifetime) => match lifetime.data(interner) { + LifetimeData::BoundVar(depth) => index_db == *depth, + _ => false, + }, + GenericArgData::Const(constant) => match &constant.data(interner).value { + ConstValue::BoundVar(depth) => index_db == *depth, + _ => false, + }, + } + }) + } + + /// Apply the substitution to a value. + pub fn apply(&self, value: T, interner: I) -> T + where + T: TypeFoldable, + { + Substitute::apply(self, value, interner) + } + + /// Gets an iterator of all type parameters. + pub fn type_parameters(&self, interner: I) -> impl Iterator> + '_ { + self.iter(interner) + .filter_map(move |p| p.ty(interner)) + .cloned() + } + + /// Compute type flags for Substitution + fn compute_flags(&self, interner: I) -> TypeFlags { + let mut flags = TypeFlags::empty(); + for generic_arg in self.iter(interner) { + flags |= generic_arg.compute_flags(interner); + } + flags + } +} + +#[derive(FallibleTypeFolder)] +struct SubstFolder<'i, I: Interner, A: AsParameters> { + interner: I, + subst: &'i A, +} + +impl> SubstFolder<'_, I, A> { + /// Index into the list of parameters. + pub fn at(&self, index: usize) -> &GenericArg { + let interner = self.interner; + &self.subst.as_parameters(interner)[index] + } +} + +/// Convert a value to a list of parameters. +pub trait AsParameters { + /// Convert the current value to parameters. + fn as_parameters(&self, interner: I) -> &[GenericArg]; +} + +impl AsParameters for Substitution { + #[allow(unreachable_code, unused_variables)] + fn as_parameters(&self, interner: I) -> &[GenericArg] { + self.as_slice(interner) + } +} + +impl AsParameters for [GenericArg] { + fn as_parameters(&self, _interner: I) -> &[GenericArg] { + self + } +} + +impl AsParameters for [GenericArg; 1] { + fn as_parameters(&self, _interner: I) -> &[GenericArg] { + self + } +} + +impl AsParameters for Vec> { + fn as_parameters(&self, _interner: I) -> &[GenericArg] { + self } } -impl<'a> DefaultTypeFolder for &'a Substitution {} +impl AsParameters for &T +where + T: ?Sized + AsParameters, +{ + fn as_parameters(&self, interner: I) -> &[GenericArg] { + T::as_parameters(self, interner) + } +} + +/// An extension trait to anything that can be represented as list of `GenericArg`s that signifies +/// that it can applied as a substituion to a value +pub trait Substitute: AsParameters { + /// Apply the substitution to a value. + fn apply>(&self, value: T, interner: I) -> T; +} + +impl> Substitute for A { + fn apply(&self, value: T, interner: I) -> T + where + T: TypeFoldable, + { + value + .try_fold_with( + &mut SubstFolder { + interner, + subst: self, + }, + DebruijnIndex::INNERMOST, + ) + .unwrap() + } +} + +/// Utility for converting a list of all the binders into scope +/// into references to those binders. Simply pair the binders with +/// the indices, and invoke `to_generic_arg()` on the `(binder, +/// index)` pair. The result will be a reference to a bound +/// variable of appropriate kind at the corresponding index. +pub trait ToGenericArg { + /// Converts the binders in scope to references to those binders. + fn to_generic_arg(&self, interner: I) -> GenericArg { + self.to_generic_arg_at_depth(interner, DebruijnIndex::INNERMOST) + } + + /// Converts the binders at the specified depth to references to those binders. + fn to_generic_arg_at_depth(&self, interner: I, debruijn: DebruijnIndex) -> GenericArg; +} + +impl<'a, I: Interner> ToGenericArg for (usize, &'a VariableKind) { + fn to_generic_arg_at_depth(&self, interner: I, debruijn: DebruijnIndex) -> GenericArg { + let &(index, binder) = self; + let bound_var = BoundVar::new(debruijn, index); + binder.to_bound_variable(interner, bound_var) + } +} + +impl<'i, I: Interner, A: AsParameters> TypeFolder for SubstFolder<'i, I, A> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } + + fn fold_free_var_ty(&mut self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Ty { + assert_eq!(bound_var.debruijn, DebruijnIndex::INNERMOST); + let ty = self.at(bound_var.index); + let ty = ty.assert_ty_ref(TypeFolder::interner(self)); + ty.clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder) + } + + fn fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + assert_eq!(bound_var.debruijn, DebruijnIndex::INNERMOST); + let l = self.at(bound_var.index); + let l = l.assert_lifetime_ref(TypeFolder::interner(self)); + l.clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder) + } + + fn fold_free_var_const( + &mut self, + _ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Const { + assert_eq!(bound_var.debruijn, DebruijnIndex::INNERMOST); + let c = self.at(bound_var.index); + let c = c.assert_const_ref(TypeFolder::interner(self)); + c.clone() + .shifted_in_from(TypeFolder::interner(self), outer_binder) + } + + fn interner(&self) -> I { + self.interner + } +} + +macro_rules! interned_slice_common { + ($seq:ident, $data:ident => $elem:ty, $intern:ident => $interned:ident) => { + /// List of interned elements. + #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, HasInterner)] + pub struct $seq { + interned: I::$interned, + } + + impl $seq { + /// Get the interned elements. + pub fn interned(&self) -> &I::$interned { + &self.interned + } + + /// Returns a slice containing the elements. + pub fn as_slice(&self, interner: I) -> &[$elem] { + Interner::$data(interner, &self.interned) + } + + /// Index into the sequence. + pub fn at(&self, interner: I, index: usize) -> &$elem { + &self.as_slice(interner)[index] + } + + /// Create an empty sequence. + pub fn empty(interner: I) -> Self { + Self::from_iter(interner, None::<$elem>) + } + + /// Check whether this is an empty sequence. + pub fn is_empty(&self, interner: I) -> bool { + self.as_slice(interner).is_empty() + } + + /// Get an iterator over the elements of the sequence. + pub fn iter(&self, interner: I) -> std::slice::Iter<'_, $elem> { + self.as_slice(interner).iter() + } + + /// Get the length of the sequence. + pub fn len(&self, interner: I) -> usize { + self.as_slice(interner).len() + } + } + }; +} + +macro_rules! interned_slice { + ($seq:ident, $data:ident => $elem:ty, $intern:ident => $interned:ident) => { + interned_slice_common!($seq, $data => $elem, $intern => $interned); + + impl $seq { + /// Tries to create a sequence using an iterator of element-like things. + pub fn from_fallible( + interner: I, + elements: impl IntoIterator, E>>, + ) -> Result { + Ok(Self { + interned: I::$intern(interner, elements.into_iter().casted(interner))?, + }) + } + + /// Create a sequence from elements + pub fn from_iter( + interner: I, + elements: impl IntoIterator>, + ) -> Self { + Self::from_fallible( + interner, + elements + .into_iter() + .map(|el| -> Result<$elem, ()> { Ok(el.cast(interner)) }), + ) + .unwrap() + } + + /// Create a sequence from a single element. + pub fn from1(interner: I, element: impl CastTo<$elem>) -> Self { + Self::from_iter(interner, Some(element)) + } + } + }; +} -impl<'a> DefaultInferenceFolder for &'a Substitution {} +interned_slice!( + QuantifiedWhereClauses, + quantified_where_clauses_data => QuantifiedWhereClause, + intern_quantified_where_clauses => InternedQuantifiedWhereClauses +); + +interned_slice!( + ProgramClauses, + program_clauses_data => ProgramClause, + intern_program_clauses => InternedProgramClauses +); + +interned_slice!( + VariableKinds, + variable_kinds_data => VariableKind, + intern_generic_arg_kinds => InternedVariableKinds +); + +interned_slice!( + CanonicalVarKinds, + canonical_var_kinds_data => CanonicalVarKind, + intern_canonical_var_kinds => InternedCanonicalVarKinds +); + +interned_slice!(Goals, goals_data => Goal, intern_goals => InternedGoals); + +interned_slice!( + Constraints, + constraints_data => InEnvironment>, + intern_constraints => InternedConstraints +); + +interned_slice!( + Substitution, + substitution_data => GenericArg, + intern_substitution => InternedSubstitution +); + +interned_slice_common!( + Variances, + variances_data => Variance, + intern_variance => InternedVariances +); + +impl Variances { + /// Tries to create a list of canonical variable kinds using an iterator. + pub fn from_fallible( + interner: I, + variances: impl IntoIterator>, + ) -> Result { + Ok(Variances { + interned: I::intern_variances(interner, variances.into_iter())?, + }) + } -impl<'a> FreeVarFolder for &'a Substitution { - fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible { - let ty = &self.parameters[depth]; - let ty = ty.assert_ty_ref(); - Ok(ty.shifted_in(binders)) + /// Creates a list of canonical variable kinds using an iterator. + pub fn from_iter(interner: I, variances: impl IntoIterator) -> Self { + Self::from_fallible( + interner, + variances + .into_iter() + .map(|p| -> Result { Ok(p) }), + ) + .unwrap() } - fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible { - let l = &self.parameters[depth]; - let l = l.assert_lifetime_ref(); - Ok(l.shifted_in(binders)) + /// Creates a list of canonical variable kinds from a single canonical variable kind. + pub fn from1(interner: I, variance: Variance) -> Self { + Self::from_iter(interner, Some(variance)) } } -impl<'a> DefaultPlaceholderFolder for &'a Substitution {} +/// Combines a substitution (`subst`) with a set of region constraints +/// (`constraints`). This represents the result of a query; the +/// substitution stores the values for the query's unknown variables, +/// and the constraints represents any region constraints that must +/// additionally be solved. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct ConstrainedSubst { + /// The substitution that is being constrained. + /// + /// NB: The `is_trivial` routine relies on the fact that `subst` is folded first. + pub subst: Substitution, + + /// Region constraints that constrain the substitution. + pub constraints: Constraints, +} + +/// The resulting substitution after solving a goal. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct AnswerSubst { + /// The substitution result. + /// + /// NB: The `is_trivial` routine relies on the fact that `subst` is folded first. + pub subst: Substitution, + + /// List of constraints that are part of the answer. + pub constraints: Constraints, + + /// Delayed subgoals, used when the solver answered with an (incomplete) `Answer` (instead of a `CompleteAnswer`). + pub delayed_subgoals: Vec>>, +} -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ConstrainedSubst { - pub subst: Substitution, - pub constraints: Vec>, +/// Logic to decide the Variance for a given subst +pub trait UnificationDatabase +where + Self: std::fmt::Debug, + I: Interner, +{ + /// Gets the variances for the substitution of a fn def + fn fn_def_variance(&self, fn_def_id: FnDefId) -> Variances; + + /// Gets the variances for the substitution of a adt + fn adt_variance(&self, adt_id: AdtId) -> Variances; } diff --git a/chalk-ir/src/macros.rs b/chalk-ir/src/macros.rs deleted file mode 100644 index 84cdcbd192b..00000000000 --- a/chalk-ir/src/macros.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! Useful macros for writing unit tests. They let you gin up dummy types and things. - -#[macro_export] -macro_rules! ty { - (apply $n:tt $($arg:tt)*) => { - $crate::Ty::Apply(ApplicationTy { - name: ty_name!($n), - parameters: vec![$(arg!($arg)),*], - }) - }; - - (for_all $n:tt $t:tt) => { - $crate::Ty::ForAll(Box::new(QuantifiedTy { - num_binders: $n, - ty: ty!($t), - })) - }; - - (projection (item $n:tt) $($arg:tt)*) => { - $crate::Ty::Projection(ProjectionTy { - associated_ty_id: ItemId { index: $n }, - parameters: vec![$(arg!($arg)),*], - }) - }; - - (infer $b:expr) => { - $crate::Ty::InferenceVar($crate::InferenceVar::from($b)) - }; - - (bound $b:expr) => { - $crate::Ty::BoundVar($b) - }; - - (expr $b:expr) => { - $b.clone() - }; - - (($($b:tt)*)) => { - ty!($($b)*) - }; -} - -#[macro_export] -macro_rules! arg { - ((lifetime $b:tt)) => { - $crate::ParameterKind::Lifetime(lifetime!($b)) - }; - - ($arg:tt) => { - $crate::ParameterKind::Ty(ty!($arg)) - }; -} - -#[macro_export] -macro_rules! lifetime { - (infer $b:expr) => { - $crate::Lifetime::InferenceVar($crate::InferenceVar::from($b)) - }; - - (bound $b:expr) => { - $crate::Lifetime::BoundVar($b) - }; - - (placeholder $b:expr) => { - $crate::Lifetime::Placeholder(PlaceholderIndex { ui: UniverseIndex { counter: $b }, idx: 0}) - }; - - (expr $b:expr) => { - $b.clone() - }; - - (($($b:tt)*)) => { - lifetime!($($b)*) - }; -} - -#[macro_export] -macro_rules! ty_name { - ((item $n:expr)) => { $crate::TypeName::ItemId(ItemId { index: $n }) }; - ((placeholder $n:expr)) => { $crate::TypeName::Placeholder( - PlaceholderIndex { - ui: UniverseIndex { counter: $n }, - idx: 0, - }) - } -} diff --git a/chalk-ir/src/tls.rs b/chalk-ir/src/tls.rs deleted file mode 100644 index 7fdc74bb618..00000000000 --- a/chalk-ir/src/tls.rs +++ /dev/null @@ -1,40 +0,0 @@ -use ::ProjectionTy; -use std::cell::RefCell; -use std::fmt; -use std::sync::Arc; - -use ::ItemId; - -thread_local! { - static PROGRAM: RefCell>> = RefCell::new(None) -} - -pub trait DebugContext { - fn debug_item_id(&self, item_id: ItemId, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error>; - - fn debug_projection( - &self, - projection: &ProjectionTy, - fmt: &mut fmt::Formatter, - ) -> Result<(), fmt::Error>; -} - -pub fn with_current_program(op: impl FnOnce(Option<&Arc>) -> R) -> R { - PROGRAM.with(|prog_cell| { - let p = prog_cell.borrow(); - op(p.as_ref()) - }) -} - -pub fn set_current_program(p: &Arc, op: OP) -> R -where - OP: FnOnce() -> R, -{ - let p: Arc = p.clone(); - PROGRAM.with(|prog_cell| { - *prog_cell.borrow_mut() = Some(p); - let r = op(); - *prog_cell.borrow_mut() = None; - r - }) -} diff --git a/chalk-ir/src/visit.rs b/chalk-ir/src/visit.rs new file mode 100644 index 00000000000..e72ea1e6f02 --- /dev/null +++ b/chalk-ir/src/visit.rs @@ -0,0 +1,424 @@ +//! Traits for visiting bits of IR. +use std::fmt::Debug; +use std::ops::ControlFlow; + +use crate::{ + BoundVar, Const, ConstValue, DebruijnIndex, DomainGoal, Goal, InferenceVar, Interner, Lifetime, + LifetimeData, PlaceholderIndex, ProgramClause, Ty, TyKind, WhereClause, +}; + +mod binder_impls; +mod boring_impls; +pub mod visitors; + +pub use visitors::VisitExt; + +/// Unwraps a `ControlFlow` or propagates its `Break` value. +/// This replaces the `Try` implementation that would be used +/// with `std::ops::ControlFlow`. +#[macro_export] +macro_rules! try_break { + ($expr:expr) => { + match $expr { + std::ops::ControlFlow::Continue(c) => c, + std::ops::ControlFlow::Break(b) => return std::ops::ControlFlow::Break(b), + } + }; +} + +/// A "visitor" recursively folds some term -- that is, some bit of IR, +/// such as a `Goal`, and computes a value as a result. +/// +/// +/// To **apply** a visitor, use the `TypeVisitable::visit_with` method, like so +/// +/// ```rust,ignore +/// let result = x.visit_with(&mut visitor, 0); +/// ``` +pub trait TypeVisitor { + /// The "break type" of the visitor, often `()`. It represents the result + /// the visitor yields when it stops visiting. + type BreakTy; + + /// Creates a `dyn` value from this visitor. Unfortunately, this + /// must be added manually to each impl of visitor; it permits the + /// default implements below to create a `&mut dyn TypeVisitor` from + /// `Self` without knowing what `Self` is (by invoking this + /// method). Effectively, this limits impls of `visitor` to types + /// for which we are able to create a dyn value (i.e., not `[T]` + /// types). + fn as_dyn(&mut self) -> &mut dyn TypeVisitor; + + /// Top-level callback: invoked for each `Ty` that is + /// encountered when visiting. By default, invokes + /// `super_visit_with`, which will in turn invoke the more + /// specialized visiting methods below, like `visit_free_var`. + fn visit_ty(&mut self, ty: &Ty, outer_binder: DebruijnIndex) -> ControlFlow { + ty.super_visit_with(self.as_dyn(), outer_binder) + } + + /// Top-level callback: invoked for each `Lifetime` that is + /// encountered when visiting. By default, invokes + /// `super_visit_with`, which will in turn invoke the more + /// specialized visiting methods below, like `visit_free_var`. + fn visit_lifetime( + &mut self, + lifetime: &Lifetime, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + lifetime.super_visit_with(self.as_dyn(), outer_binder) + } + + /// Top-level callback: invoked for each `Const` that is + /// encountered when visiting. By default, invokes + /// `super_visit_with`, which will in turn invoke the more + /// specialized visiting methods below, like `visit_free_var`. + fn visit_const( + &mut self, + constant: &Const, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + constant.super_visit_with(self.as_dyn(), outer_binder) + } + + /// Invoked for every program clause. By default, recursively visits the goals contents. + fn visit_program_clause( + &mut self, + clause: &ProgramClause, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + clause.super_visit_with(self.as_dyn(), outer_binder) + } + + /// Invoked for every goal. By default, recursively visits the goals contents. + fn visit_goal( + &mut self, + goal: &Goal, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + goal.super_visit_with(self.as_dyn(), outer_binder) + } + + /// Invoked for each domain goal. + fn visit_domain_goal( + &mut self, + domain_goal: &DomainGoal, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + domain_goal.super_visit_with(self.as_dyn(), outer_binder) + } + + /// If overridden to return true, then visiting will panic if a + /// free variable is encountered. This should be done if free + /// type/lifetime/const variables are not expected. + fn forbid_free_vars(&self) -> bool { + false + } + + /// Invoked for `BoundVar` instances that are not bound + /// within the type being visited over: + fn visit_free_var( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + if self.forbid_free_vars() { + panic!( + "unexpected free variable `{:?}` with outer binder {:?}", + bound_var, outer_binder + ) + } else { + ControlFlow::Continue(()) + } + } + + /// If overridden to return true, we will panic when a free + /// placeholder type/lifetime is encountered. + fn forbid_free_placeholders(&self) -> bool { + false + } + + /// Invoked for each occurrence of a placeholder type; these are + /// used when we instantiate binders universally. + fn visit_free_placeholder( + &mut self, + universe: PlaceholderIndex, + _outer_binder: DebruijnIndex, + ) -> ControlFlow { + if self.forbid_free_placeholders() { + panic!("unexpected placeholder type `{:?}`", universe) + } else { + ControlFlow::Continue(()) + } + } + + /// Invoked for each where clause. + fn visit_where_clause( + &mut self, + where_clause: &WhereClause, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + where_clause.super_visit_with(self.as_dyn(), outer_binder) + } + + /// If overridden to return true, inference variables will trigger + /// panics when visited. Used when inference variables are + /// unexpected. + fn forbid_inference_vars(&self) -> bool { + false + } + + /// Invoked for each occurrence of a inference type; these are + /// used when we instantiate binders universally. + fn visit_inference_var( + &mut self, + var: InferenceVar, + _outer_binder: DebruijnIndex, + ) -> ControlFlow { + if self.forbid_inference_vars() { + panic!("unexpected inference type `{:?}`", var) + } else { + ControlFlow::Continue(()) + } + } + + /// Gets the visitor's interner. + fn interner(&self) -> I; +} + +/// Applies the given `visitor` to a value, producing a visited result +/// of type `TypeVisitor::Result`. +pub trait TypeVisitable: Debug { + /// Apply the given visitor `visitor` to `self`; `binders` is the + /// number of binders that are in scope when beginning the + /// visitor. Typically `binders` starts as 0, but is adjusted when + /// we encounter `Binders` in the IR or other similar + /// constructs. + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow; +} + +/// For types where "visit" invokes a callback on the `visitor`, the +/// `TypeSuperVisitable` trait captures the recursive behavior that visits all +/// the contents of the type. +pub trait TypeSuperVisitable: TypeVisitable { + /// Recursively visits the type contents. + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow; +} + +/// "visiting" a type invokes the `visit_ty` method on the visitor; this +/// usually (in turn) invokes `super_visit_ty` to visit the individual +/// parts. +impl TypeVisitable for Ty { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_ty(self, outer_binder) + } +} + +/// "Super visit" for a type invokes the more detailed callbacks on the type +impl TypeSuperVisitable for Ty +where + I: Interner, +{ + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + match self.kind(interner) { + TyKind::BoundVar(bound_var) => { + if bound_var.shifted_out_to(outer_binder).is_some() { + visitor.visit_free_var(*bound_var, outer_binder) + } else { + ControlFlow::Continue(()) + } + } + TyKind::Dyn(clauses) => clauses.visit_with(visitor, outer_binder), + TyKind::InferenceVar(var, _) => visitor.visit_inference_var(*var, outer_binder), + TyKind::Placeholder(ui) => visitor.visit_free_placeholder(*ui, outer_binder), + TyKind::Alias(proj) => proj.visit_with(visitor, outer_binder), + TyKind::Function(fun) => fun.visit_with(visitor, outer_binder), + TyKind::Adt(_id, substitution) => substitution.visit_with(visitor, outer_binder), + TyKind::AssociatedType(_assoc_ty, substitution) => { + substitution.visit_with(visitor, outer_binder) + } + TyKind::Scalar(scalar) => scalar.visit_with(visitor, outer_binder), + TyKind::Str => ControlFlow::Continue(()), + TyKind::Tuple(arity, substitution) => { + try_break!(arity.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::OpaqueType(opaque_ty, substitution) => { + try_break!(opaque_ty.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::Slice(substitution) => substitution.visit_with(visitor, outer_binder), + TyKind::FnDef(fn_def, substitution) => { + try_break!(fn_def.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::Ref(mutability, lifetime, ty) => { + try_break!(mutability.visit_with(visitor, outer_binder)); + try_break!(lifetime.visit_with(visitor, outer_binder)); + ty.visit_with(visitor, outer_binder) + } + TyKind::Raw(mutability, ty) => { + try_break!(mutability.visit_with(visitor, outer_binder)); + ty.visit_with(visitor, outer_binder) + } + TyKind::Never => ControlFlow::Continue(()), + TyKind::Array(ty, const_) => { + try_break!(ty.visit_with(visitor, outer_binder)); + const_.visit_with(visitor, outer_binder) + } + TyKind::Closure(id, substitution) => { + try_break!(id.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::Coroutine(coroutine, substitution) => { + try_break!(coroutine.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::CoroutineWitness(witness, substitution) => { + try_break!(witness.visit_with(visitor, outer_binder)); + substitution.visit_with(visitor, outer_binder) + } + TyKind::Foreign(foreign_ty) => foreign_ty.visit_with(visitor, outer_binder), + TyKind::Error => ControlFlow::Continue(()), + } + } +} + +impl TypeVisitable for Lifetime { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_lifetime(self, outer_binder) + } +} + +impl TypeSuperVisitable for Lifetime { + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + match self.data(interner) { + LifetimeData::BoundVar(bound_var) => { + if bound_var.shifted_out_to(outer_binder).is_some() { + visitor.visit_free_var(*bound_var, outer_binder) + } else { + ControlFlow::Continue(()) + } + } + LifetimeData::InferenceVar(var) => visitor.visit_inference_var(*var, outer_binder), + LifetimeData::Placeholder(universe) => { + visitor.visit_free_placeholder(*universe, outer_binder) + } + LifetimeData::Static | LifetimeData::Erased | LifetimeData::Error => { + ControlFlow::Continue(()) + } + LifetimeData::Phantom(void, ..) => match *void {}, + } + } +} + +impl TypeVisitable for Const { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_const(self, outer_binder) + } +} + +impl TypeSuperVisitable for Const { + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + match &self.data(interner).value { + ConstValue::BoundVar(bound_var) => { + if bound_var.shifted_out_to(outer_binder).is_some() { + visitor.visit_free_var(*bound_var, outer_binder) + } else { + ControlFlow::Continue(()) + } + } + ConstValue::InferenceVar(var) => visitor.visit_inference_var(*var, outer_binder), + ConstValue::Placeholder(universe) => { + visitor.visit_free_placeholder(*universe, outer_binder) + } + ConstValue::Concrete(_) => ControlFlow::Continue(()), + } + } +} + +impl TypeVisitable for Goal { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_goal(self, outer_binder) + } +} + +impl TypeSuperVisitable for Goal { + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + self.data(interner).visit_with(visitor, outer_binder) + } +} + +impl TypeVisitable for ProgramClause { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_program_clause(self, outer_binder) + } +} + +impl TypeVisitable for WhereClause { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_where_clause(self, outer_binder) + } +} + +impl TypeVisitable for DomainGoal { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visitor.visit_domain_goal(self, outer_binder) + } +} diff --git a/chalk-ir/src/visit/binder_impls.rs b/chalk-ir/src/visit/binder_impls.rs new file mode 100644 index 00000000000..709f9904425 --- /dev/null +++ b/chalk-ir/src/visit/binder_impls.rs @@ -0,0 +1,47 @@ +//! This module contains impls of `TypeVisitable` for those types that +//! introduce binders. +//! +//! The more interesting impls of `TypeVisitable` remain in the `visit` module. + +use crate::interner::HasInterner; +use crate::{ + Binders, Canonical, ControlFlow, DebruijnIndex, FnPointer, Interner, TypeVisitable, TypeVisitor, +}; + +impl TypeVisitable for FnPointer { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + self.substitution + .visit_with(visitor, outer_binder.shifted_in()) + } +} + +impl TypeVisitable for Binders +where + T: HasInterner + TypeVisitable, +{ + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + self.value.visit_with(visitor, outer_binder.shifted_in()) + } +} + +impl TypeVisitable for Canonical +where + I: Interner, + T: HasInterner + TypeVisitable, +{ + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + self.value.visit_with(visitor, outer_binder.shifted_in()) + } +} diff --git a/chalk-ir/src/visit/boring_impls.rs b/chalk-ir/src/visit/boring_impls.rs new file mode 100644 index 00000000000..462bacc64d6 --- /dev/null +++ b/chalk-ir/src/visit/boring_impls.rs @@ -0,0 +1,261 @@ +//! This module contains "rote and uninteresting" impls of `TypeVisitable` for +//! various types. In general, we prefer to derive `TypeVisitable`, but +//! sometimes that doesn't work for whatever reason. +//! +//! The more interesting impls of `TypeVisitable` remain in the `visit` module. + +use crate::{ + try_break, AdtId, AssocTypeId, ClausePriority, ClosureId, Constraints, ControlFlow, + CoroutineId, DebruijnIndex, FloatTy, FnDefId, ForeignDefId, GenericArg, Goals, ImplId, IntTy, + Interner, Mutability, OpaqueTyId, PlaceholderIndex, ProgramClause, ProgramClauses, + QuantifiedWhereClauses, QuantifierKind, Safety, Scalar, Substitution, TraitId, + TypeSuperVisitable, TypeVisitable, TypeVisitor, UintTy, UniverseIndex, +}; +use std::{marker::PhantomData, sync::Arc}; + +/// Convenience function to visit all the items in the iterator it. +pub fn visit_iter<'i, T, I, B>( + it: impl Iterator, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, +) -> ControlFlow +where + T: TypeVisitable, + I: 'i + Interner, +{ + for e in it { + try_break!(e.visit_with(visitor, outer_binder)); + } + ControlFlow::Continue(()) +} + +impl, I: Interner> TypeVisitable for &T { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + T::visit_with(self, visitor, outer_binder) + } +} + +impl, I: Interner> TypeVisitable for Vec { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visit_iter(self.iter(), visitor, outer_binder) + } +} + +impl, I: Interner> TypeVisitable for &[T] { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + visit_iter(self.iter(), visitor, outer_binder) + } +} + +impl, I: Interner> TypeVisitable for Box { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + T::visit_with(self, visitor, outer_binder) + } +} + +impl, I: Interner> TypeVisitable for Arc { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + T::visit_with(self, visitor, outer_binder) + } +} + +macro_rules! tuple_visit { + ($($n:ident),*) => { + impl<$($n: TypeVisitable,)* I: Interner> TypeVisitable for ($($n,)*) { + fn visit_with(&self, visitor: &mut dyn TypeVisitor, outer_binder: DebruijnIndex) -> ControlFlow { + #[allow(non_snake_case)] + let &($(ref $n),*) = self; + $( + try_break!($n.visit_with(visitor, outer_binder)); + )* + ControlFlow::Continue(()) + } + } + } +} + +tuple_visit!(A, B); +tuple_visit!(A, B, C); +tuple_visit!(A, B, C, D); +tuple_visit!(A, B, C, D, E); + +impl, I: Interner> TypeVisitable for Option { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + match self { + Some(e) => e.visit_with(visitor, outer_binder), + None => ControlFlow::Continue(()), + } + } +} + +impl TypeVisitable for GenericArg { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + self.data(interner).visit_with(visitor, outer_binder) + } +} + +impl TypeVisitable for Substitution { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + visit_iter(self.iter(interner), visitor, outer_binder) + } +} + +impl TypeVisitable for Goals { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + visit_iter(self.iter(interner), visitor, outer_binder) + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! const_visit { + ($t:ty) => { + impl $crate::visit::TypeVisitable for $t { + fn visit_with( + &self, + _visitor: &mut dyn ($crate::visit::TypeVisitor), + _outer_binder: DebruijnIndex, + ) -> ControlFlow { + ControlFlow::Continue(()) + } + } + }; +} + +const_visit!(bool); +const_visit!(usize); +const_visit!(UniverseIndex); +const_visit!(PlaceholderIndex); +const_visit!(QuantifierKind); +const_visit!(DebruijnIndex); +const_visit!(ClausePriority); +const_visit!(()); +const_visit!(Scalar); +const_visit!(UintTy); +const_visit!(IntTy); +const_visit!(FloatTy); +const_visit!(Mutability); +const_visit!(Safety); + +#[doc(hidden)] +#[macro_export] +macro_rules! id_visit { + ($t:ident) => { + impl $crate::visit::TypeVisitable for $t { + fn visit_with( + &self, + _visitor: &mut dyn ($crate::visit::TypeVisitor), + _outer_binder: DebruijnIndex, + ) -> ControlFlow { + ControlFlow::Continue(()) + } + } + }; +} + +id_visit!(ImplId); +id_visit!(AdtId); +id_visit!(TraitId); +id_visit!(OpaqueTyId); +id_visit!(AssocTypeId); +id_visit!(FnDefId); +id_visit!(ClosureId); +id_visit!(CoroutineId); +id_visit!(ForeignDefId); + +impl TypeSuperVisitable for ProgramClause { + fn super_visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + + self.data(interner).0.visit_with(visitor, outer_binder) + } +} + +impl TypeVisitable for ProgramClauses { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + + visit_iter(self.iter(interner), visitor, outer_binder) + } +} + +impl TypeVisitable for Constraints { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + + visit_iter(self.iter(interner), visitor, outer_binder) + } +} + +impl TypeVisitable for QuantifiedWhereClauses { + fn visit_with( + &self, + visitor: &mut dyn TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + let interner = visitor.interner(); + + visit_iter(self.iter(interner), visitor, outer_binder) + } +} + +impl TypeVisitable for PhantomData { + fn visit_with( + &self, + _visitor: &mut dyn TypeVisitor, + _outer_binder: DebruijnIndex, + ) -> ControlFlow { + ControlFlow::Continue(()) + } +} diff --git a/chalk-ir/src/visit/visitors.rs b/chalk-ir/src/visit/visitors.rs new file mode 100644 index 00000000000..51c08784041 --- /dev/null +++ b/chalk-ir/src/visit/visitors.rs @@ -0,0 +1,41 @@ +//! TypeVisitor helpers + +use crate::{BoundVar, ControlFlow, DebruijnIndex, Interner, TypeVisitable, TypeVisitor}; + +/// TypeVisitor extensions. +pub trait VisitExt: TypeVisitable { + /// Check whether there are free (non-bound) variables. + fn has_free_vars(&self, interner: I) -> bool { + let flow = self.visit_with( + &mut FindFreeVarsVisitor { interner }, + DebruijnIndex::INNERMOST, + ); + matches!(flow, ControlFlow::Break(_)) + } +} + +impl VisitExt for T where T: TypeVisitable {} + +struct FindFreeVarsVisitor { + interner: I, +} + +impl TypeVisitor for FindFreeVarsVisitor { + type BreakTy = (); + + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + + fn interner(&self) -> I { + self.interner + } + + fn visit_free_var( + &mut self, + _bound_var: BoundVar, + _outer_binder: DebruijnIndex, + ) -> ControlFlow<()> { + ControlFlow::Break(()) + } +} diff --git a/chalk-ir/src/zip.rs b/chalk-ir/src/zip.rs index de48a5f31e9..585ff661d17 100644 --- a/chalk-ir/src/zip.rs +++ b/chalk-ir/src/zip.rs @@ -1,5 +1,7 @@ -use fold::Fold; -use ::*; +//! Traits for "zipping" types, walking through two structures and checking that they match. + +use crate::fold::TypeFoldable; +use crate::*; use std::fmt::Debug; use std::sync::Arc; @@ -19,35 +21,94 @@ use std::sync::Arc; /// represented by two distinct `ItemId` values, and the impl for /// `ItemId` requires that all `ItemId` in the two zipped values match /// up. -pub trait Zipper { - /// Indicates that the two types `a` and `b` were found in - /// matching spots, beneath `binders` levels of binders. - fn zip_tys(&mut self, a: &Ty, b: &Ty) -> Fallible<()>; +pub trait Zipper { + /// Indicates that the two types `a` and `b` were found in matching spots. + fn zip_tys(&mut self, variance: Variance, a: &Ty, b: &Ty) -> Fallible<()>; + + /// Indicates that the two lifetimes `a` and `b` were found in matching spots. + fn zip_lifetimes( + &mut self, + variance: Variance, + a: &Lifetime, + b: &Lifetime, + ) -> Fallible<()>; - /// Indicates that the two lifetimes `a` and `b` were found in - /// matching spots, beneath `binders` levels of binders. - fn zip_lifetimes(&mut self, a: &Lifetime, b: &Lifetime) -> Fallible<()>; + /// Indicates that the two consts `a` and `b` were found in matching spots. + fn zip_consts(&mut self, variance: Variance, a: &Const, b: &Const) -> Fallible<()>; /// Zips two values appearing beneath binders. - fn zip_binders(&mut self, a: &Binders, b: &Binders) -> Fallible<()> + fn zip_binders( + &mut self, + variance: Variance, + a: &Binders, + b: &Binders, + ) -> Fallible<()> where - T: Zip + Fold; + T: Clone + HasInterner + Zip + TypeFoldable; + + /// Zips two substs + fn zip_substs( + &mut self, + ambient: Variance, + variances: Option>, + a: &[GenericArg], + b: &[GenericArg], + ) -> Fallible<()> + where + Self: Sized, + { + for (i, (a, b)) in a.iter().zip(b.iter()).enumerate() { + let variance = variances + .as_ref() + .map(|v| v.as_slice(self.interner())[i]) + .unwrap_or(Variance::Invariant); + Zip::zip_with(self, ambient.xform(variance), a, b)?; + } + Ok(()) + } + + /// Retrieves the interner from the underlying zipper object + fn interner(&self) -> I; + + /// Retrieves the `UnificationDatabase` from the underlying zipper object + fn unification_database(&self) -> &dyn UnificationDatabase; } -impl<'f, Z: Zipper> Zipper for &'f mut Z { - fn zip_tys(&mut self, a: &Ty, b: &Ty) -> Fallible<()> { - (**self).zip_tys(a, b) +impl<'f, Z, I> Zipper for &'f mut Z +where + I: Interner, + Z: Zipper, +{ + fn zip_tys(&mut self, variance: Variance, a: &Ty, b: &Ty) -> Fallible<()> { + (**self).zip_tys(variance, a, b) + } + + fn zip_lifetimes( + &mut self, + variance: Variance, + a: &Lifetime, + b: &Lifetime, + ) -> Fallible<()> { + (**self).zip_lifetimes(variance, a, b) } - fn zip_lifetimes(&mut self, a: &Lifetime, b: &Lifetime) -> Fallible<()> { - (**self).zip_lifetimes(a, b) + fn zip_consts(&mut self, variance: Variance, a: &Const, b: &Const) -> Fallible<()> { + (**self).zip_consts(variance, a, b) } - fn zip_binders(&mut self, a: &Binders, b: &Binders) -> Fallible<()> + fn zip_binders(&mut self, variance: Variance, a: &Binders, b: &Binders) -> Fallible<()> where - T: Zip + Fold, + T: Clone + HasInterner + Zip + TypeFoldable, { - (**self).zip_binders(a, b) + (**self).zip_binders(variance, a, b) + } + + fn interner(&self) -> I { + Z::interner(*self) + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + (**self).unification_database() } } @@ -58,86 +119,158 @@ impl<'f, Z: Zipper> Zipper for &'f mut Z { /// /// To implement the trait, typically you would use one of the macros /// like `eq_zip!`, `struct_zip!`, or `enum_zip!`. -pub trait Zip: Debug { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>; +pub trait Zip: Debug +where + I: Interner, +{ + /// Uses the zipper to walk through two values, ensuring that they match. + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()>; } -impl<'a, T: ?Sized + Zip> Zip for &'a T { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - ::zip_with(zipper, a, b) +impl<'a, T: ?Sized + Zip, I: Interner> Zip for &'a T { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + >::zip_with(zipper, variance, a, b) } } -impl Zip for () { - fn zip_with(_: &mut Z, _: &Self, _: &Self) -> Fallible<()> { +impl Zip for () { + fn zip_with>(_: &mut Z, _: Variance, _: &Self, _: &Self) -> Fallible<()> { Ok(()) } } -impl Zip for Vec { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - <[T] as Zip>::zip_with(zipper, a, b) +impl, I: Interner> Zip for Vec { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + <[T] as Zip>::zip_with(zipper, variance, a, b) } } -impl Zip for [T] { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { +impl, I: Interner> Zip for [T] { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { if a.len() != b.len() { return Err(NoSolution); } for (a_elem, b_elem) in a.iter().zip(b) { - Zip::zip_with(zipper, a_elem, b_elem)?; + Zip::zip_with(zipper, variance, a_elem, b_elem)?; } Ok(()) } } -impl Zip for Arc { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - ::zip_with(zipper, a, b) +impl, I: Interner> Zip for Arc { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + >::zip_with(zipper, variance, a, b) } } -impl Zip for Box { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - ::zip_with(zipper, a, b) +impl, I: Interner> Zip for Box { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + >::zip_with(zipper, variance, a, b) } } -impl Zip for (T, U) { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - Zip::zip_with(zipper, &a.0, &b.0)?; - Zip::zip_with(zipper, &a.1, &b.1)?; +impl, U: Zip, I: Interner> Zip for (T, U) { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + Zip::zip_with(zipper, variance, &a.0, &b.0)?; + Zip::zip_with(zipper, variance, &a.1, &b.1)?; Ok(()) } } -impl Zip for Ty { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - zipper.zip_tys(a, b) +impl Zip for Ty { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + zipper.zip_tys(variance, a, b) } } -impl Zip for Lifetime { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - zipper.zip_lifetimes(a, b) +impl Zip for Lifetime { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + zipper.zip_lifetimes(variance, a, b) } } -impl> Zip for Binders { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - zipper.zip_binders(a, b) +impl Zip for Const { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + zipper.zip_consts(variance, a, b) + } +} +impl Zip for Binders +where + T: Clone + HasInterner + Zip + TypeFoldable, +{ + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + zipper.zip_binders(variance, a, b) } } /// Generates a Zip impl that requires the two values be /// equal. Suitable for atomic, scalar values. macro_rules! eq_zip { - ($t:ty) => { - impl Zip for $t { - fn zip_with(_zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { + ($I:ident => $t:ty) => { + impl<$I: Interner> Zip<$I> for $t { + fn zip_with>( + _zipper: &mut Z, + _variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { if a != b { return Err(NoSolution); } @@ -147,150 +280,264 @@ macro_rules! eq_zip { }; } -eq_zip!(ItemId); -eq_zip!(TypeName); -eq_zip!(Identifier); -eq_zip!(QuantifierKind); - -macro_rules! struct_zip { - ($t:ident$([$($param:tt)*])* { $($field:ident),* $(,)* } $($w:tt)*) => { - impl$(<$($param)*>)* Zip for $t $(<$($param)*>)* $($w)* { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - // Validate that we have indeed listed all fields - let $t { $($field: _),* } = *a; - $( - Zip::zip_with(zipper, &a.$field, &b.$field)?; - )* - Ok(()) - } - } +eq_zip!(I => AdtId); +eq_zip!(I => TraitId); +eq_zip!(I => AssocTypeId); +eq_zip!(I => OpaqueTyId); +eq_zip!(I => CoroutineId); +eq_zip!(I => ForeignDefId); +eq_zip!(I => FnDefId); +eq_zip!(I => ClosureId); +eq_zip!(I => QuantifierKind); +eq_zip!(I => PhantomData); +eq_zip!(I => PlaceholderIndex); +eq_zip!(I => ClausePriority); +eq_zip!(I => Mutability); +eq_zip!(I => Scalar); + +impl + Zip, I: Interner> Zip for InEnvironment { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + Zip::zip_with(zipper, variance, &a.environment, &b.environment)?; + Zip::zip_with(zipper, variance, &a.goal, &b.goal)?; + Ok(()) } } -/// Generates a Zip impl that zips each field of the struct in turn. -struct_zip!(TraitRef { - trait_id, - parameters, -}); -struct_zip!(InEnvironment[T] { environment, goal } where T: Zip); -struct_zip!(ApplicationTy { name, parameters }); -struct_zip!(ProjectionTy { - associated_ty_id, - parameters, -}); -struct_zip!(UnselectedProjectionTy { - type_name, - parameters, -}); -struct_zip!(Normalize { projection, ty }); -struct_zip!(ProjectionEq { projection, ty }); -struct_zip!(UnselectedNormalize { projection, ty }); -struct_zip!(EqGoal { a, b }); -struct_zip!(ProgramClauseImplication { - consequence, - conditions -}); -struct_zip!(Derefs { source, target }); - -impl Zip for Environment { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - assert_eq!(a.clauses.len(), b.clauses.len()); // or different numbers of clauses - Zip::zip_with(zipper, &a.clauses, &b.clauses)?; +impl Zip for Environment { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + assert_eq!(a.clauses.len(interner), b.clauses.len(interner)); // or different numbers of clauses + Zip::zip_with( + zipper, + variance, + a.clauses.as_slice(interner), + b.clauses.as_slice(interner), + )?; Ok(()) } } -macro_rules! enum_zip { - ($t:ident$([$($param:tt)*])* { $( $variant:ident ),* $(,)* } $($w:tt)*) => { - impl$(<$($param)*>)* Zip for $t $(<$($param)*>)* $($w)* { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - match (a, b) { - $( - (&$t :: $variant (ref f_a), &$t :: $variant (ref f_b)) => { - Zip::zip_with(zipper, f_a, f_b) - } - )* - - $((&$t :: $variant ( .. ), _))|* => { - return Err(NoSolution); - } - } - } - } +impl Zip for Goals { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.as_slice(interner), b.as_slice(interner))?; + Ok(()) + } +} + +impl Zip for ProgramClauses { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.as_slice(interner), b.as_slice(interner))?; + Ok(()) + } +} + +impl Zip for Constraints { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.as_slice(interner), b.as_slice(interner))?; + Ok(()) } } -/// Generates a Zip impl that requires the two enums be the same -/// variant, then zips each field of the variant in turn. Only works -/// if all variants have a single parenthesized value right now. -enum_zip!(WhereClause { - Implemented, - ProjectionEq -}); -enum_zip!(WellFormed { Trait, Ty }); -enum_zip!(FromEnv { Trait, Ty }); -enum_zip!(DomainGoal { - Holds, - WellFormed, - FromEnv, - Normalize, - UnselectedNormalize, - InScope, - Derefs, - IsLocal, - IsUpstream, - IsFullyVisible, - LocalImplAllowed, - Compatible, - DownstreamType -}); -enum_zip!(LeafGoal { DomainGoal, EqGoal }); -enum_zip!(ProgramClause { Implies, ForAll }); +impl Zip for QuantifiedWhereClauses { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.as_slice(interner), b.as_slice(interner))?; + Ok(()) + } +} // Annoyingly, Goal cannot use `enum_zip` because some variants have // two parameters, and I'm too lazy to make the macro account for the // relevant name mangling. -impl Zip for Goal { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { - match (a, b) { - (&Goal::Quantified(ref f_a, ref g_a), &Goal::Quantified(ref f_b, ref g_b)) => { - Zip::zip_with(zipper, f_a, f_b)?; - Zip::zip_with(zipper, g_a, g_b) - } - (&Goal::Implies(ref f_a, ref g_a), &Goal::Implies(ref f_b, ref g_b)) => { - Zip::zip_with(zipper, f_a, f_b)?; - Zip::zip_with(zipper, g_a, g_b) - } - (&Goal::And(ref f_a, ref g_a), &Goal::And(ref f_b, ref g_b)) => { - Zip::zip_with(zipper, f_a, f_b)?; - Zip::zip_with(zipper, g_a, g_b) - } - (&Goal::Not(ref f_a), &Goal::Not(ref f_b)) => Zip::zip_with(zipper, f_a, f_b), - (&Goal::Leaf(ref f_a), &Goal::Leaf(ref f_b)) => Zip::zip_with(zipper, f_a, f_b), - (&Goal::CannotProve(()), &Goal::CannotProve(())) => Ok(()), - (&Goal::Quantified(..), _) - | (&Goal::Implies(..), _) - | (&Goal::And(..), _) - | (&Goal::Not(..), _) - | (&Goal::Leaf(..), _) - | (&Goal::CannotProve(..), _) => { - return Err(NoSolution); - } - } +impl Zip for Goal { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.data(interner), b.data(interner)) } } // I'm too lazy to make `enum_zip` support type parameters. -impl Zip for ParameterKind { - fn zip_with(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()> { +impl Zip for VariableKind { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { match (a, b) { - (&ParameterKind::Ty(ref a), &ParameterKind::Ty(ref b)) => Zip::zip_with(zipper, a, b), - (&ParameterKind::Lifetime(ref a), &ParameterKind::Lifetime(ref b)) => { - Zip::zip_with(zipper, a, b) - } - (&ParameterKind::Ty(_), _) | (&ParameterKind::Lifetime(_), _) => { - panic!("zipping things of mixed kind") + (VariableKind::Ty(a), VariableKind::Ty(b)) if a == b => Ok(()), + (VariableKind::Lifetime, VariableKind::Lifetime) => Ok(()), + (VariableKind::Const(ty_a), VariableKind::Const(ty_b)) => { + Zip::zip_with(zipper, variance, ty_a, ty_b) } + (VariableKind::Ty(_), _) + | (VariableKind::Lifetime, _) + | (VariableKind::Const(_), _) => panic!("zipping things of mixed kind"), } } } + +impl Zip for GenericArg { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.data(interner), b.data(interner)) + } +} + +impl Zip for ProgramClause { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, a.data(interner), b.data(interner)) + } +} + +impl Zip for TraitRef { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, &a.trait_id, &b.trait_id)?; + zipper.zip_substs( + variance, + None, + a.substitution.as_slice(interner), + b.substitution.as_slice(interner), + ) + } +} + +impl Zip for ProjectionTy { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, &a.associated_ty_id, &b.associated_ty_id)?; + zipper.zip_substs( + variance, + None, + a.substitution.as_slice(interner), + b.substitution.as_slice(interner), + ) + } +} + +impl Zip for OpaqueTy { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + Zip::zip_with(zipper, variance, &a.opaque_ty_id, &b.opaque_ty_id)?; + zipper.zip_substs( + variance, + None, + a.substitution.as_slice(interner), + b.substitution.as_slice(interner), + ) + } +} + +impl Zip for DynTy { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + Zip::zip_with( + zipper, + variance.xform(Variance::Invariant), + &a.bounds, + &b.bounds, + )?; + Zip::zip_with( + zipper, + variance.xform(Variance::Contravariant), + &a.lifetime, + &b.lifetime, + )?; + Ok(()) + } +} + +impl Zip for FnSubst { + fn zip_with>( + zipper: &mut Z, + variance: Variance, + a: &Self, + b: &Self, + ) -> Fallible<()> { + let interner = zipper.interner(); + // Parameters + Zip::zip_with( + zipper, + variance.xform(Variance::Contravariant), + &a.0.as_slice(interner)[..a.0.len(interner) - 1], + &b.0.as_slice(interner)[..b.0.len(interner) - 1], + )?; + // Return type + Zip::zip_with( + zipper, + variance, + a.0.iter(interner).last().unwrap(), + b.0.iter(interner).last().unwrap(), + )?; + Ok(()) + } +} diff --git a/chalk-macros/Cargo.toml b/chalk-macros/Cargo.toml deleted file mode 100644 index 3295d5d9564..00000000000 --- a/chalk-macros/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "chalk-macros" -version = "0.1.1" -description = "Macros for Chalk" -license = "Apache-2.0/MIT" -authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" -readme = "README.md" -keywords = ["compiler", "traits", "prolog"] - -[dependencies] -lazy_static = "1.1.0" diff --git a/chalk-macros/README.md b/chalk-macros/README.md deleted file mode 100644 index ec2f9433334..00000000000 --- a/chalk-macros/README.md +++ /dev/null @@ -1 +0,0 @@ -Various macros used within Chalk. diff --git a/chalk-macros/src/index.rs b/chalk-macros/src/index.rs deleted file mode 100644 index 2e9490290f6..00000000000 --- a/chalk-macros/src/index.rs +++ /dev/null @@ -1,73 +0,0 @@ -#[macro_export] -macro_rules! index_struct { - ($v:vis struct $n:ident { - $vf:vis value: usize, - }) => { - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - $v struct $n { - $vf value: usize, - } - - impl $n { - // Not all index structs need this, so allow it to be dead - // code. - #[allow(dead_code)] - $v fn get_and_increment(&mut self) -> Self { - let old_value = *self; - self.value += 1; - old_value - } - - #[allow(dead_code)] - $v fn increment(&mut self) { - self.value += 1; - } - } - - impl ::std::fmt::Debug for $n { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - write!(fmt, "{}({})", stringify!($n), self.value) - } - } - - impl ::std::iter::Step for $n { - fn steps_between(start: &Self, end: &Self) -> Option { - usize::steps_between(&start.value, &end.value) - } - - fn replace_one(&mut self) -> Self { - Self { - value: usize::replace_one(&mut self.value), - } - } - - fn replace_zero(&mut self) -> Self { - Self { - value: usize::replace_zero(&mut self.value), - } - } - - fn add_one(&self) -> Self { - Self { - value: usize::add_one(&self.value), - } - } - - fn sub_one(&self) -> Self { - Self { - value: usize::sub_one(&self.value), - } - } - - fn add_usize(&self, n: usize) -> Option { - usize::add_usize(&self.value, n).map(|value| Self { value }) - } - } - - impl From for $n { - fn from(value: usize) -> Self { - Self { value: value } - } - } - } -} diff --git a/chalk-macros/src/lib.rs b/chalk-macros/src/lib.rs deleted file mode 100644 index 31e5e319265..00000000000 --- a/chalk-macros/src/lib.rs +++ /dev/null @@ -1,136 +0,0 @@ -#![feature(crate_visibility_modifier)] - -use std::cell::RefCell; - -#[macro_use] -extern crate lazy_static; - -#[macro_use] -mod index; - -lazy_static! { - pub static ref DEBUG_ENABLED: bool = { - use std::env; - env::var("CHALK_DEBUG") - .ok() - .and_then(|s| s.parse::().ok()) - .map(|x| x >= 2) - .unwrap_or(false) - }; - - pub static ref INFO_ENABLED: bool = { - use std::env; - env::var("CHALK_DEBUG") - .ok() - .and_then(|s| s.parse::().ok()) - .map(|x| x >= 1) - .unwrap_or(false) - }; -} - -thread_local! { - crate static INDENT: RefCell> = RefCell::new(vec![]); -} - -// When CHALK_DEBUG is enabled, we only allow this many frames of -// nested processing, at which point we assume something has gone -// awry. -const OVERFLOW_DEPTH: usize = 100; - -#[macro_export] -macro_rules! debug { - ($($t:tt)*) => { - if *$crate::DEBUG_ENABLED { - $crate::dump(&format!($($t)*), ""); - } - } -} - -#[macro_export] -macro_rules! debug_heading { - ($($t:tt)*) => { - let _ = &if *$crate::DEBUG_ENABLED { - let string = format!($($t)*); - $crate::dump(&string, " {"); - $crate::Indent::new(true, string) - } else { - $crate::Indent::new(false, String::new()) - }; - } -} - -#[macro_export] -macro_rules! info { - ($($t:tt)*) => { - if *$crate::INFO_ENABLED { - $crate::dump(&format!($($t)*), ""); - } - } -} - -#[macro_export] -macro_rules! info_heading { - ($($t:tt)*) => { - let _ = &if *$crate::INFO_ENABLED { - let string = format!($($t)*); - $crate::dump(&string, " {"); - $crate::Indent::new(true, string) - } else { - $crate::Indent::new(false, String::new()) - }; - } -} - -pub fn dump(string: &str, suffix: &str) { - let indent = INDENT.with(|i| i.borrow().len()); - let mut first = true; - for line in string.lines() { - if first { - for _ in 0..indent { - eprint!(": "); - } - eprint!("| "); - } else { - eprintln!(); - for _ in 0..indent { - eprint!(": "); - } - eprint!(": "); - } - eprint!("{}", line); - first = false; - } - - eprintln!("{}", suffix); -} - -pub struct Indent { - enabled: bool, -} - -impl Indent { - pub fn new(enabled: bool, value: String) -> Self { - if enabled { - INDENT.with(|i| { - i.borrow_mut().push(value); - if i.borrow().len() > OVERFLOW_DEPTH { - eprintln!("CHALK_DEBUG OVERFLOW:"); - for v in i.borrow().iter().rev() { - eprintln!("- {}", v); - } - panic!("CHALK_DEBUG OVERFLOW") - } - }); - } - Indent { enabled } - } -} - -impl Drop for Indent { - fn drop(&mut self) { - if self.enabled { - INDENT.with(|i| i.borrow_mut().pop().unwrap()); - dump("}", ""); - } - } -} diff --git a/chalk-parse/Cargo.toml b/chalk-parse/Cargo.toml index b2513374c90..322e295deb7 100644 --- a/chalk-parse/Cargo.toml +++ b/chalk-parse/Cargo.toml @@ -1,22 +1,21 @@ [package] name = "chalk-parse" -version = "0.1.0" +version = "0.104.0-dev.0" description = "Parser for the Chalk project" -license = "Apache-2.0/MIT" +license = "MIT OR Apache-2.0" authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" +repository = "https://github.com/rust-lang/chalk" readme = "README.md" keywords = ["compiler", "traits", "prolog"] build = "build.rs" # LALRPOP preprocessing - -# Add a dependency on the LALRPOP runtime library: -[dependencies.lalrpop-util] -version = "0.16" +edition = "2018" +publish = false [build-dependencies.lalrpop] -version = "0.16" +version = "0.20" +features = ["lexer"] [dependencies] -error-chain = "0.12.0" -lalrpop-intern = "0.15.1" -regex = "1.0.5" +lalrpop-util = "0.20" +regex = "1.5" +string_cache = "0.8.0" diff --git a/chalk-parse/README.md b/chalk-parse/README.md index 3c5d280cdec..0043af65da7 100644 --- a/chalk-parse/README.md +++ b/chalk-parse/README.md @@ -1 +1,3 @@ Parser for the Chalk standalone trait system implementation. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-parse/src/ast.rs b/chalk-parse/src/ast.rs index 2a05831e882..38464f1e4b1 100644 --- a/chalk-parse/src/ast.rs +++ b/chalk-parse/src/ast.rs @@ -1,5 +1,5 @@ -use lalrpop_intern::InternedString; use std::fmt; +use string_cache::DefaultAtom as Atom; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Span { @@ -9,7 +9,7 @@ pub struct Span { impl Span { pub fn new(lo: usize, hi: usize) -> Self { - Span { lo: lo, hi: hi } + Span { lo, hi } } } @@ -20,34 +20,154 @@ pub struct Program { #[derive(Clone, PartialEq, Eq, Debug)] pub enum Item { - StructDefn(StructDefn), + AdtDefn(AdtDefn), + FnDefn(FnDefn), + ClosureDefn(ClosureDefn), TraitDefn(TraitDefn), + OpaqueTyDefn(OpaqueTyDefn), + CoroutineDefn(CoroutineDefn), Impl(Impl), Clause(Clause), + Foreign(ForeignDefn), } #[derive(Clone, PartialEq, Eq, Debug)] -pub struct StructDefn { +pub struct ForeignDefn(pub Identifier); + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct AdtDefn { pub name: Identifier, - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub where_clauses: Vec, + pub variants: Vec, + pub flags: AdtFlags, + pub repr: AdtRepr, + pub variances: Option>, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Variant { + pub name: Identifier, pub fields: Vec, - pub flags: StructFlags, +} +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Movability { + Static, + Movable, } #[derive(Clone, PartialEq, Eq, Debug)] -pub struct StructFlags { +pub struct CoroutineDefn { + pub name: Identifier, + pub movability: Movability, + pub variable_kinds: Vec, + pub upvars: Vec, + pub resume_ty: Ty, + pub yield_ty: Ty, + pub return_ty: Ty, + pub witness_types: Vec, + pub witness_lifetimes: Vec, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct AdtFlags { pub upstream: bool, pub fundamental: bool, + pub phantom_data: bool, + pub one_zst: bool, + pub kind: AdtKind, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum AdtKind { + Struct, + Enum, + Union, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum AdtReprAttr { + C, + Packed, + Int(Ty), +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct AdtRepr { + pub c: bool, + pub packed: bool, + pub int: Option, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FnSig { + pub abi: FnAbi, + pub safety: Safety, + pub variadic: bool, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FnDefn { + pub name: Identifier, + pub variable_kinds: Vec, + pub where_clauses: Vec, + pub argument_types: Vec, + pub return_type: Ty, + pub sig: FnSig, + pub variances: Option>, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ClosureDefn { + pub name: Identifier, + pub kind: ClosureKind, + pub variable_kinds: Vec, + pub argument_types: Vec, + pub return_type: Ty, + pub upvars: Vec, +} + +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct FnAbi(pub Atom); + +impl Default for FnAbi { + fn default() -> Self { + FnAbi(Atom::from("Rust")) + } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct TraitDefn { pub name: Identifier, - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub where_clauses: Vec, pub assoc_ty_defns: Vec, pub flags: TraitFlags, + pub well_known: Option, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum WellKnownTrait { + Sized, + Copy, + Clone, + Drop, + FnOnce, + FnMut, + Fn, + AsyncFnOnce, + AsyncFnMut, + AsyncFn, + Unsize, + Unpin, + CoerceUnsized, + DiscriminantKind, + Coroutine, + DispatchFromDyn, + Tuple, + Pointee, + FnPtr, + Future, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -56,39 +176,67 @@ pub struct TraitFlags { pub marker: bool, pub upstream: bool, pub fundamental: bool, - pub deref: bool, + pub non_enumerable: bool, + pub coinductive: bool, + pub object_safe: bool, } #[derive(Clone, PartialEq, Eq, Debug)] pub struct AssocTyDefn { pub name: Identifier, - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub bounds: Vec, pub where_clauses: Vec, + pub well_known: Option, } -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum ParameterKind { +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum WellKnownAssocType { + AsyncFnOnceOutput, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct OpaqueTyDefn { + pub ty: Ty, + pub variable_kinds: Vec, + pub name: Identifier, + pub bounds: Vec, + pub where_clauses: Vec, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum VariableKind { Ty(Identifier), + IntegerTy(Identifier), + FloatTy(Identifier), Lifetime(Identifier), + Const(Identifier), } #[derive(Clone, PartialEq, Eq, Debug)] -pub enum Parameter { +pub enum GenericArg { Ty(Ty), Lifetime(Lifetime), + Id(Identifier), + Const(Const), +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Const { + Id(Identifier), + Value(u32), } #[derive(Clone, PartialEq, Eq, Debug)] /// An inline bound, e.g. `: Foo` in `impl> SomeType`. pub enum InlineBound { TraitBound(TraitBound), - ProjectionEqBound(ProjectionEqBound), + AliasEqBound(AliasEqBound), } #[derive(Clone, PartialEq, Eq, Debug)] pub struct QuantifiedInlineBound { - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub bound: InlineBound, } @@ -97,16 +245,16 @@ pub struct QuantifiedInlineBound { /// Does not know anything about what it's binding. pub struct TraitBound { pub trait_name: Identifier, - pub args_no_self: Vec, + pub args_no_self: Vec, } #[derive(Clone, PartialEq, Eq, Debug)] -/// Represents a projection equality bound on e.g. a type or type parameter. +/// Represents an alias equality bound on e.g. a type or type parameter. /// Does not know anything about what it's binding. -pub struct ProjectionEqBound { +pub struct AliasEqBound { pub trait_bound: TraitBound, pub name: Identifier, - pub args: Vec, + pub args: Vec, pub value: Ty, } @@ -114,6 +262,7 @@ pub struct ProjectionEqBound { pub enum Kind { Ty, Lifetime, + Const, } impl fmt::Display for Kind { @@ -121,14 +270,16 @@ impl fmt::Display for Kind { f.write_str(match *self { Kind::Ty => "type", Kind::Lifetime => "lifetime", + Kind::Const => "const", }) } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Impl { - pub parameter_kinds: Vec, - pub trait_ref: PolarizedTraitRef, + pub variable_kinds: Vec, + pub trait_ref: TraitRef, + pub polarity: Polarity, pub where_clauses: Vec, pub assoc_ty_values: Vec, pub impl_type: ImplType, @@ -143,8 +294,9 @@ pub enum ImplType { #[derive(Clone, PartialEq, Eq, Debug)] pub struct AssocTyValue { pub name: Identifier, - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub value: Ty, + pub default: bool, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -152,72 +304,160 @@ pub enum Ty { Id { name: Identifier, }, + Dyn { + bounds: Vec, + lifetime: Lifetime, + }, Apply { name: Identifier, - args: Vec, + args: Vec, }, Projection { proj: ProjectionTy, }, - UnselectedProjection { - proj: UnselectedProjectionTy, - }, ForAll { lifetime_names: Vec, + types: Vec>, + sig: FnSig, + }, + Tuple { + types: Vec>, + }, + Scalar { + ty: ScalarType, + }, + Slice { ty: Box, }, + Array { + ty: Box, + len: Const, + }, + Raw { + mutability: Mutability, + ty: Box, + }, + Ref { + mutability: Mutability, + lifetime: Lifetime, + ty: Box, + }, + Str, + Never, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum IntTy { + Isize, + I8, + I16, + I32, + I64, + I128, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum UintTy { + Usize, + U8, + U16, + U32, + U64, + U128, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum FloatTy { + F16, + F32, + F64, + F128, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum ScalarType { + Bool, + Char, + Int(IntTy), + Uint(UintTy), + Float(FloatTy), +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Mutability { + Mut, + Not, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Safety { + Safe, + Unsafe, +} + +impl Default for Safety { + fn default() -> Self { + Self::Safe + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] pub enum Lifetime { Id { name: Identifier }, + Static, + Erased, } #[derive(Clone, PartialEq, Eq, Debug)] pub struct ProjectionTy { pub trait_ref: TraitRef, pub name: Identifier, - pub args: Vec, -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct UnselectedProjectionTy { - pub name: Identifier, - pub args: Vec, + pub args: Vec, } #[derive(Clone, PartialEq, Eq, Debug)] pub struct TraitRef { pub trait_name: Identifier, - pub args: Vec, + pub args: Vec, } #[derive(Clone, PartialEq, Eq, Debug)] -pub enum PolarizedTraitRef { - Positive(TraitRef), - Negative(TraitRef), +pub enum Polarity { + /// `impl Foo for Bar` + Positive, + + /// `impl !Foo for Bar` + Negative, } -impl PolarizedTraitRef { - pub fn from_bool(polarity: bool, trait_ref: TraitRef) -> PolarizedTraitRef { +impl Polarity { + pub fn from_bool(polarity: bool) -> Polarity { if polarity { - PolarizedTraitRef::Positive(trait_ref) + Polarity::Positive } else { - PolarizedTraitRef::Negative(trait_ref) + Polarity::Negative } } } -#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Identifier { - pub str: InternedString, + pub str: Atom, pub span: Span, } +impl fmt::Display for Identifier { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.str) + } +} + #[derive(Clone, PartialEq, Eq, Debug)] pub enum WhereClause { Implemented { trait_ref: TraitRef }, ProjectionEq { projection: ProjectionTy, ty: Ty }, + LifetimeOutlives { a: Lifetime, b: Lifetime }, + TypeOutlives { ty: Ty, lifetime: Lifetime }, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -228,26 +468,26 @@ pub enum DomainGoal { TyWellFormed { ty: Ty }, TyFromEnv { ty: Ty }, TraitRefFromEnv { trait_ref: TraitRef }, - TraitInScope { trait_name: Identifier }, - Derefs { source: Ty, target: Ty }, IsLocal { ty: Ty }, IsUpstream { ty: Ty }, IsFullyVisible { ty: Ty }, LocalImplAllowed { trait_ref: TraitRef }, Compatible, DownstreamType { ty: Ty }, + Reveal, + ObjectSafe { id: Identifier }, } #[derive(Clone, PartialEq, Eq, Debug)] pub enum LeafGoal { DomainGoal { goal: DomainGoal }, - UnifyTys { a: Ty, b: Ty }, - UnifyLifetimes { a: Lifetime, b: Lifetime }, + UnifyGenericArgs { a: GenericArg, b: GenericArg }, + SubtypeGenericArgs { a: Ty, b: Ty }, } #[derive(Clone, PartialEq, Eq, Debug)] pub struct QuantifiedWhereClause { - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub where_clause: WhereClause, } @@ -261,17 +501,17 @@ pub struct Field { /// This allows users to add arbitrary `A :- B` clauses into the /// logic; it has no equivalent in Rust, but it's useful for testing. pub struct Clause { - pub parameter_kinds: Vec, + pub variable_kinds: Vec, pub consequence: DomainGoal, pub conditions: Vec>, } #[derive(Clone, PartialEq, Eq, Debug)] pub enum Goal { - ForAll(Vec, Box), - Exists(Vec, Box), + ForAll(Vec, Box), + Exists(Vec, Box), Implies(Vec, Box), - And(Box, Box), + And(Box, Vec>), Not(Box), /// The `compatible { G }` syntax @@ -280,3 +520,62 @@ pub enum Goal { // Additional kinds of goals: Leaf(LeafGoal), } + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum ClosureKind { + Fn, + FnMut, + FnOnce, +} + +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum FnArg { + NonVariadic(Ty), + Variadic, +} +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum FnArgs { + NonVariadic(Vec), + Variadic(Vec), +} + +impl FnArgs { + pub fn is_variadic(&self) -> bool { + matches!(self, Self::Variadic(..)) + } + + pub fn to_tys(self) -> Vec { + match self { + Self::NonVariadic(tys) | Self::Variadic(tys) => tys, + } + } + + pub fn from_vec(mut args: Vec) -> Result { + let mut tys = Vec::with_capacity(args.len()); + let last = args.pop(); + for arg in args { + match arg { + FnArg::NonVariadic(ty) => tys.push(ty), + FnArg::Variadic => { + return Err("a variadic argument must be the last parameter in a function"); + } + } + } + + Ok(match last { + Some(FnArg::NonVariadic(ty)) => { + tys.push(ty); + FnArgs::NonVariadic(tys) + } + Some(FnArg::Variadic) => FnArgs::Variadic(tys), + None => FnArgs::NonVariadic(tys), + }) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Variance { + Invariant, + Covariant, + Contravariant, +} diff --git a/chalk-parse/src/errors.rs b/chalk-parse/src/errors.rs deleted file mode 100644 index 0c6d43c7948..00000000000 --- a/chalk-parse/src/errors.rs +++ /dev/null @@ -1 +0,0 @@ -error_chain!{} diff --git a/chalk-parse/src/lib.rs b/chalk-parse/src/lib.rs index 211e194301b..c71f39c43de 100644 --- a/chalk-parse/src/lib.rs +++ b/chalk-parse/src/lib.rs @@ -1,64 +1,58 @@ #![recursion_limit = "1024"] +#![allow(unused_parens)] #[macro_use] -extern crate error_chain; - -#[macro_use] extern crate lalrpop_util; -extern crate lalrpop_intern; pub mod ast; -pub mod errors; #[rustfmt::skip] lalrpop_mod!(pub parser); -use errors::Result; use lalrpop_util::ParseError; -use std::fmt::Write; + +type Result = std::result::Result>; pub fn parse_program(text: &str) -> Result { - match parser::ProgramParser::new().parse(text) { - Ok(v) => Ok(v), - Err(e) => bail!("parse error: {:?}", e), - } + parser::ProgramParser::new() + .parse(text) + .map_err(|e| format!("parse error: {}", e).into()) } pub fn parse_ty(text: &str) -> Result { - match parser::TyParser::new().parse(text) { - Ok(v) => Ok(v), - Err(e) => bail!("error parsing `{}`: {:?}", text, e), - } + parser::TyParser::new() + .parse(text) + .map_err(|e| format!("error parsing `{}`: {}", text, e).into()) } pub fn parse_goal(text: &str) -> Result> { - match parser::GoalParser::new().parse(text) { - Ok(v) => Ok(v), - Err(e) => { - let position_string = |start: usize, end: usize| { - let mut output = String::new(); - let text = text.replace("\n", " ").replace("\r", " "); - writeln!(output, "position: `{}`", text).expect("str-write cannot fail"); - output.push_str(&" ".repeat(11 + start)); - output.push_str(&"^".repeat(end - start)); - output.push_str("\n"); - output - }; - match e { - ParseError::InvalidToken { location } => bail!( - "parse error: {:?}\n{}", - e, - position_string(location, location + 1) - ), - ParseError::UnrecognizedToken { - token: Some((start, _, end)), - .. - } => bail!("parse error: {:?}\n{}", e, position_string(start, end)), - ParseError::ExtraToken { - token: (start, _, end), - .. - } => bail!("parse error: {:?}\n{}", e, position_string(start, end)), - _ => bail!("parse error: {:?}", e), + parser::GoalParser::new().parse(text).map_err(|e| { + let mut output = format!("parse error: {}", &e); + if let Some(s) = match e { + ParseError::InvalidToken { location } => { + Some(position_string(text, location, location + 1)) } + ParseError::UnrecognizedToken { + token: (start, _, end), + .. + } => Some(position_string(text, start, end)), + ParseError::ExtraToken { + token: (start, _, end), + .. + } => Some(position_string(text, start, end)), + _ => None, + } { + output.push('\n'); + output += &s; } - } + output.into() + }) +} + +fn position_string(text: &str, start: usize, end: usize) -> String { + let text = text.replace('\n', " ").replace('\r', " "); + let mut output = format!("position: `{}`", text); + output += &" ".repeat(11 + start); + output += &"^".repeat(end - start); + output.push('\n'); + output } diff --git a/chalk-parse/src/parser.lalrpop b/chalk-parse/src/parser.lalrpop index e0a531cb58a..3aba81b220c 100644 --- a/chalk-parse/src/parser.lalrpop +++ b/chalk-parse/src/parser.lalrpop @@ -1,5 +1,5 @@ -use ast::*; -use lalrpop_intern::intern; +use crate::ast::*; +use string_cache::DefaultAtom as Atom; grammar; @@ -13,22 +13,31 @@ Items: Vec = { Item: Option = { Comment => None, - StructDefn => Some(Item::StructDefn(<>)), + AdtDefn => Some(Item::AdtDefn(<>)), + FnDefn => Some(Item::FnDefn(<>)), + ClosureDefn => Some(Item::ClosureDefn(<>)), TraitDefn => Some(Item::TraitDefn(<>)), + OpaqueTyDefn => Some(Item::OpaqueTyDefn(<>)), + CoroutineDefn => Some(Item::CoroutineDefn(<>)), Impl => Some(Item::Impl(<>)), Clause => Some(Item::Clause(<>)), + ForeignType => Some(Item::Foreign(<>)), +}; + +ForeignType: ForeignDefn = { + "extern" "type" ";" => ForeignDefn(id), }; Comment: () = r"//.*"; pub Goal: Box = { Goal1, - "," => Box::new(Goal::And(g1, g2)), + )+> => Box::new(Goal::And(g1, g2s)), }; Goal1: Box = { - "forall" "<" > ">" "{" "}" => Box::new(Goal::ForAll(p, g)), - "exists" "<" > ">" "{" "}" => Box::new(Goal::Exists(p, g)), + "forall" "<" > ">" "{" "}" => Box::new(Goal::ForAll(p, g)), + "exists" "<" > ">" "{" "}" => Box::new(Goal::Exists(p, g)), "if" "(" > ")" "{" "}" => Box::new(Goal::Implies(h, g)), "not" "{" "}" => Box::new(Goal::Not(g)), "compatible" "{" "}" => Box::new(Goal::Compatible(g)), @@ -39,62 +48,280 @@ Goal1: Box = { UpstreamKeyword: () = "#" "[" "upstream" "]"; AutoKeyword: () = "#" "[" "auto" "]"; MarkerKeyword: () = "#" "[" "marker" "]"; -DerefLangItem: () = "#" "[" "lang_deref" "]"; FundamentalKeyword: () = "#" "[" "fundamental" "]"; +NonEnumerableKeyword: () = "#" "[" "non_enumerable" "]"; +CoinductiveKeyword: () = "#" "[" "coinductive" "]"; +ObjectSafeKeyword: () = "#" "[" "object_safe" "]"; +PhantomDataKeyword: () = "#" "[" "phantom_data" "]"; +OneZstKeyword: () = "#" "[" "one_zst" "]"; + +WellKnownTrait: WellKnownTrait = { + "#" "[" "lang" "(" "sized" ")" "]" => WellKnownTrait::Sized, + "#" "[" "lang" "(" "copy" ")" "]" => WellKnownTrait::Copy, + "#" "[" "lang" "(" "clone" ")" "]" => WellKnownTrait::Clone, + "#" "[" "lang" "(" "drop" ")" "]" => WellKnownTrait::Drop, + "#" "[" "lang" "(" "fn_once" ")" "]" => WellKnownTrait::FnOnce, + "#" "[" "lang" "(" "fn_mut" ")" "]" => WellKnownTrait::FnMut, + "#" "[" "lang" "(" "fn" ")" "]" => WellKnownTrait::Fn, + "#" "[" "lang" "(" "async_fn_once" ")" "]" => WellKnownTrait::AsyncFnOnce, + "#" "[" "lang" "(" "async_fn_mut" ")" "]" => WellKnownTrait::AsyncFnMut, + "#" "[" "lang" "(" "async_fn" ")" "]" => WellKnownTrait::AsyncFn, + "#" "[" "lang" "(" "unsize" ")" "]" => WellKnownTrait::Unsize, + "#" "[" "lang" "(" "unpin" ")" "]" => WellKnownTrait::Unpin, + "#" "[" "lang" "(" "coerce_unsized" ")" "]" => WellKnownTrait::CoerceUnsized, + "#" "[" "lang" "(" "discriminant_kind" ")" "]" => WellKnownTrait::DiscriminantKind, + "#" "[" "lang" "(" "coroutine" ")" "]" => WellKnownTrait::Coroutine, + "#" "[" "lang" "(" "dispatch_from_dyn" ")" "]" => WellKnownTrait::DispatchFromDyn, + "#" "[" "lang" "(" "tuple_trait" ")" "]" => WellKnownTrait::Tuple, + "#" "[" "lang" "(" "pointee_trait" ")" "]" => WellKnownTrait::Pointee, + "#" "[" "lang" "(" "fn_ptr_trait" ")" "]" => WellKnownTrait::FnPtr, + "#" "[" "lang" "(" "future" ")" "]" => WellKnownTrait::Future, +}; + +AdtReprAttr: AdtReprAttr = { + "#" "[" "repr" "(" ")" "]" => AdtReprAttr::Int(t), + "#" "[" "repr" "(" ")" "]" =>? match &*attr.str { + "C" => Ok(AdtReprAttr::C), + "packed" => Ok(AdtReprAttr::Packed), + _ => Err(lalrpop_util::ParseError::User { + error: "unknown adt repr flag" + }) + }, +}; -StructDefn: StructDefn = { - "struct" > - "{" "}" => StructDefn +ReprIntTy: Ty = { + => Ty::Scalar { + ty: ScalarType::Int(i), + }, + => Ty::Scalar { + ty: ScalarType::Uint(u), + }, +} + +AdtDefn: AdtDefn = { + + "enum" > + "{" "}" => AdtDefn { name: n, - parameter_kinds: p, + variable_kinds: p, where_clauses: w, - fields: f, - flags: StructFlags { + variants: v, + flags: AdtFlags { upstream: upstream.is_some(), fundamental: fundamental.is_some(), + phantom_data: phantom_data.is_some(), + one_zst: one_zst.is_some(), + kind: AdtKind::Enum, + }, + repr: AdtRepr { + c: repr.iter().any(|s| s == &AdtReprAttr::C), + packed: repr.iter().any(|s| s == &AdtReprAttr::Packed), + int: repr.iter().find_map(|s| if let AdtReprAttr::Int(i) = s { + Some(i.clone()) + } else { + None + }) + }, + variances, + }, + + "struct" > + "{" "}" => AdtDefn + { + variants: vec![Variant { + // FIXME(#505) choose a proper span + name: Identifier { + str: Atom::from("0"), + span: n.span, + }, + fields: f, + }], + name: n, + variable_kinds: p, + where_clauses: w, + flags: AdtFlags { + upstream: upstream.is_some(), + fundamental: fundamental.is_some(), + phantom_data: phantom_data.is_some(), + one_zst: one_zst.is_some(), + kind: AdtKind::Struct, + }, + repr: AdtRepr { + c: repr.iter().any(|s| s == &AdtReprAttr::C), + packed: repr.iter().any(|s| s == &AdtReprAttr::Packed), + int: None + }, + variances, + } +}; + +Variants: Vec = { + >, +}; + +Variant: Variant = { + "{" "}" => Variant { + name: n, + fields: f, + }, + "(" > ")" => Variant { + fields: tys.into_iter().enumerate().map(|(i, t)| Field { + // FIXME(#505) choose a proper span + name: Identifier { + str: Atom::from(format!("{}", i)), + span: n.span, + }, + ty: t, + }).collect(), + name: n, + }, + => Variant { + name: n, + fields: vec![], + }, +}; + +FnReturn: Ty = { + "->" => ty, +}; + +FnDefn: FnDefn = { + "fn" >"(" ")" + ";" => FnDefn + { + name: n, + variable_kinds: p, + where_clauses: w, + sig: FnSig { + abi: abi.unwrap_or_default(), + safety: safety.unwrap_or_default(), + variadic: args.is_variadic(), }, + argument_types: args.to_tys(), + return_type: ret_ty.unwrap_or_else(|| Ty::Tuple { types: Vec::new() }), + variances, } }; +Movability: Movability = { + "static" => Movability::Static, + => Movability::Movable +} + +CoroutineDefn: CoroutineDefn = { + "coroutine" > "[" "resume" "=" "," "yield" "=" "]" + "{" + "upvars" "[" > "]" + "witnesses" "[" > "]" + "}" => CoroutineDefn { + name: n, + movability: m, + variable_kinds: p, + upvars: upvars, + witness_lifetimes: l.unwrap_or_default(), + resume_ty: resume, + yield_ty: yield_ty, + return_ty: ret_ty.unwrap_or_else(|| Ty::Tuple { types: Vec::new() }), + witness_types: witnesses + } +} + +FnAbi: FnAbi = "extern" "\"" "\"" => FnAbi(id.str); + +FnArg: FnArg = { + Id ":" "..." => FnArg::Variadic, + Id ":" => FnArg::NonVariadic(arg_ty), +}; + +FnArgs: FnArgs = { + > =>? FnArgs::from_vec(<>).map_err(|e| lalrpop_util::ParseError::User { + error: e, + }) +}; + +ClosureDefn: ClosureDefn = { + "closure" > "(" ")" + "{" > "}" => ClosureDefn { + name: n, + kind: s, + variable_kinds: p, + argument_types: args, + return_type: ret_ty.unwrap_or_else(|| Ty::Tuple { types: Vec::new() }), + upvars: upvars, + } +} + +ClosureSelf: ClosureKind = { + "self" => ClosureKind::FnOnce, + "&" "mut" "self" => ClosureKind::FnMut, + "&" "self" => ClosureKind::Fn, +} + +ClosureArgs: Vec = { + "," => args.to_tys(), +} + TraitDefn: TraitDefn = { - "trait" > + "trait" > "{" "}" => TraitDefn { name: n, - parameter_kinds: p, + variable_kinds: p, where_clauses: w, assoc_ty_defns: a, + well_known, flags: TraitFlags { auto: auto.is_some(), marker: marker.is_some(), upstream: upstream.is_some(), fundamental: fundamental.is_some(), - deref: deref.is_some(), + non_enumerable: non_enumerable.is_some(), + coinductive: coinductive.is_some(), + object_safe: object_safe.is_some(), }, } }; +WellKnownAssocType: WellKnownAssocType = { + "#" "[" "lang" "(" "async_fn_once_output" ")" "]" => WellKnownAssocType::AsyncFnOnceOutput, +}; + AssocTyDefn: AssocTyDefn = { - "type" > >)?> + + "type" > >)?> ";" => { AssocTyDefn { name: name, - parameter_kinds: p, + variable_kinds: p, where_clauses: w, bounds: b.unwrap_or(vec![]), + well_known, + } + } +}; + +OpaqueTyDefn: OpaqueTyDefn = { + "opaque" "type" > >)?> + "=" ";" => { + OpaqueTyDefn { + ty, + variable_kinds: p, + name, + bounds: b.unwrap_or(vec![]), + where_clauses: w, } } }; InlineBound: InlineBound = { TraitBound => InlineBound::TraitBound(<>), - ProjectionEqBound => InlineBound::ProjectionEqBound(<>), + AliasEqBound => InlineBound::AliasEqBound(<>), }; TraitBound: TraitBound = { - > => { + > => { TraitBound { trait_name: t, args_no_self: a, @@ -102,9 +329,9 @@ TraitBound: TraitBound = { } }; -ProjectionEqBound: ProjectionEqBound = { - "<" > ",")?> > - "=" ">" => ProjectionEqBound +AliasEqBound: AliasEqBound = { + "<" > ",")?> > + "=" ">" => AliasEqBound { trait_bound: TraitBound { trait_name: t, @@ -118,28 +345,29 @@ ProjectionEqBound: ProjectionEqBound = { QuantifiedInlineBound: QuantifiedInlineBound = { => QuantifiedInlineBound { - parameter_kinds: vec![], + variable_kinds: vec![], bound: b, }, - "forall" "<" > ">" => QuantifiedInlineBound { - parameter_kinds: pk, + "forall" "<" > ">" => QuantifiedInlineBound { + variable_kinds: pk, bound: b, }, }; Impl: Impl = { - "impl" > > "for" + "impl" > > "for" "{" "}" => { - let mut args = vec![Parameter::Ty(s)]; + let mut args = vec![GenericArg::Ty(s)]; args.extend(a); Impl { - parameter_kinds: p, - trait_ref: PolarizedTraitRef::from_bool(mark.is_none(), TraitRef { + variable_kinds: p, + polarity: Polarity::from_bool(mark.is_none()), + trait_ref: TraitRef { trait_name: t, args: args, - }), + }, where_clauses: w, assoc_ty_values: assoc, impl_type: external.map(|_| ImplType::External).unwrap_or(ImplType::Local), @@ -147,58 +375,159 @@ Impl: Impl = { }, }; -ParameterKind: ParameterKind = { - Id => ParameterKind::Ty(<>), - LifetimeId => ParameterKind::Lifetime(<>), +VariableKind: VariableKind = { + Id => VariableKind::Ty(<>), + LifetimeId => VariableKind::Lifetime(<>), + "const" => VariableKind::Const(id), + "int" => VariableKind::IntegerTy(id), + "float" => VariableKind::FloatTy(id), }; +RawVariance: Variance = { + "Invariant" => Variance::Invariant, + "Covariant" => Variance::Covariant, + "Contravariant" => Variance::Contravariant, +} + +Variances: Vec = "#" "[" "variance" "(" > ")" "]"; + AssocTyValue: AssocTyValue = { - "type" > "=" ";" => AssocTyValue { + "type" > "=" ";" => AssocTyValue { name: n, - parameter_kinds: a, + variable_kinds: a, value: v, + default: default.is_some(), }, }; pub Ty: Ty = { - "for" "<" > ">" => Ty::ForAll { - lifetime_names: l, - ty: Box::new(t) - }, - TyWithoutFor, + => Ty::Id { name: n }, + TyWithoutId, }; -TyWithoutFor: Ty = { - => Ty::Id { name: n}, - "<" > ">" => Ty::Apply { name: n, args: a }, +Safety: Safety = { + "unsafe" => Safety::Unsafe, +}; + +FnArgTy: FnArg = { + "..." => FnArg::Variadic, + => FnArg::NonVariadic(arg_ty), +}; + +FnArgTys: FnArgs = { + > =>? FnArgs::from_vec(<>).map_err(|e| lalrpop_util::ParseError::User { + error: e, + }) +}; + +TyWithoutId: Ty = { + "fn" "(" ")" => Ty::ForAll { + lifetime_names: l.unwrap_or_default(), + sig: FnSig { + variadic: types.is_variadic(), + safety: safety.unwrap_or_default(), + abi: abi.unwrap_or_default(), + }, + types: types + .to_tys() + .into_iter() + .chain(std::iter::once(ret_ty.unwrap_or_else(|| Ty::Tuple { types: Vec::new() }))) + .map(Box::new).collect(), + }, + => Ty::Scalar { ty: <> }, + "str" => Ty::Str, + "!" => Ty::Never, + "dyn" > "+" => Ty::Dyn { + bounds: b, + lifetime: l, + }, + "<" > ">" => Ty::Apply { name: n, args: a }, => Ty::Projection { proj: p }, - => Ty::UnselectedProjection { <> }, - "(" ")", + "(" ")" => t, + "*" => Ty::Raw{ mutability: m, ty: Box::new(t) }, + "&" "mut" => Ty::Ref{ mutability: Mutability::Mut, lifetime: l, ty: Box::new(t) }, + "&" => Ty::Ref{ mutability: Mutability::Not, lifetime: l, ty: Box::new(t) }, + "[" "]" => Ty::Slice { ty: Box::new(t) }, + "[" ";" "]" => Ty::Array { ty: Box::new(t), len }, +}; + +ExistsLifetimes: Vec = "exists" "<" > ">" => <>; +ForLifetimes: Vec = "for" "<" > ">" => <>; + +IntTy: IntTy = { + "i8" => IntTy::I8, + "i16" => IntTy::I16, + "i32" => IntTy::I32, + "i64" => IntTy::I64, + "i128" => IntTy::I128, + "isize" => IntTy::Isize, +}; + +UintTy: UintTy = { + "u8" => UintTy::U8, + "u16" => UintTy::U16, + "u32" => UintTy::U32, + "u64" => UintTy::U64, + "u128" => UintTy::U128, + "usize" => UintTy::Usize, +}; + +FloatTy: FloatTy = { + "f16" => FloatTy::F16, + "f32" => FloatTy::F32, + "f64" => FloatTy::F64, + "f128" => FloatTy::F128, +}; + +ScalarType: ScalarType = { + => ScalarType::Int(i), + => ScalarType::Uint(u), + => ScalarType::Float(f), + "bool" => ScalarType::Bool, + "char" => ScalarType::Char, +}; + +TupleOrParensInner: Ty = { + , + "," > => { + let mut types = Vec::with_capacity(rest.len() + 1); + types.push(Box::new(first)); + types.extend(rest.into_iter().map(Box::new)); + Ty::Tuple { types } + }, + () => Ty::Tuple { types: vec![] }, +}; + +RawMutability: Mutability = { + "mut" => Mutability::Mut, + "const" => Mutability::Not, }; Lifetime: Lifetime = { => Lifetime::Id { name: n }, + "'static" => Lifetime::Static, + "'erased" => Lifetime::Erased, }; -Parameter: Parameter = { - Ty => Parameter::Ty(<>), - Lifetime => Parameter::Lifetime(<>), +ConstWithoutId: Const = { + ConstValue => Const::Value(<>), }; -ProjectionTy: ProjectionTy = { - "<" > ">" "::" > => ProjectionTy { - trait_ref: t, name: n, args: a - }, +Const : Const = { + Id => Const::Id(<>), + ConstWithoutId, }; -UnselectedProjectionTy: UnselectedProjectionTy = { - "::" > => { - let mut args = a; - args.push(Parameter::Ty(ty)); - UnselectedProjectionTy { - name: name, - args: args, - } +GenericArg: GenericArg = { + TyWithoutId => GenericArg::Ty(<>), + Lifetime => GenericArg::Lifetime(<>), + Id => GenericArg::Id(<>), + ConstWithoutId => GenericArg::Const(<>), +}; + +ProjectionTy: ProjectionTy = { + "<" > ">" "::" > => ProjectionTy { + trait_ref: t, name: n, args: a }, }; @@ -214,14 +543,14 @@ Field: Field = { }; Clause: Clause = { - "forall" > "{" "if" > "}" => Clause { - parameter_kinds: pk, + "forall" > "{" "if" > "}" => Clause { + variable_kinds: pk, consequence: dg, conditions: g, }, - "forall" > "{" "}" => Clause { - parameter_kinds: pk, + "forall" > "{" "}" => Clause { + variable_kinds: pk, consequence: dg, conditions: vec![], }, @@ -229,13 +558,13 @@ Clause: Clause = { InlineClause1: Clause = { => Clause { - parameter_kinds: vec![], + variable_kinds: vec![], consequence: dg, conditions: vec![], }, ":" "-" > => Clause { - parameter_kinds: vec![], + variable_kinds: vec![], consequence: dg, conditions: g, }, @@ -244,8 +573,8 @@ InlineClause1: Clause = { InlineClause: Clause = { , - "forall" "<" > ">" "{" "}" => Clause { - parameter_kinds: pk, + "forall" "<" > ">" "{" "}" => Clause { + variable_kinds: pk, consequence: c.consequence, conditions: c.conditions, } @@ -255,25 +584,35 @@ WhereClause: WhereClause = { > => WhereClause::Implemented { trait_ref: t }, // `T: Foo` -- projection equality - ":" "<" > ",")?> > + ":" "<" > ",")?> > "=" ">" => { - let mut args = vec![Parameter::Ty(s)]; + let mut args = vec![GenericArg::Ty(s)]; if let Some(a) = a { args.extend(a); } let trait_ref = TraitRef { trait_name: t, args: args }; let projection = ProjectionTy { trait_ref, name, args: a2 }; WhereClause::ProjectionEq { projection, ty } }, + + // 'a: 'b + ":" => { + WhereClause::LifetimeOutlives { a, b } + }, + + // T: 'a + ":" => { + WhereClause::TypeOutlives { ty, lifetime } + } }; QuantifiedWhereClause: QuantifiedWhereClause = { => QuantifiedWhereClause { - parameter_kinds: vec![], + variable_kinds: vec![], where_clause: wc, }, - "forall" "<" > ">" => QuantifiedWhereClause { - parameter_kinds: pk, + "forall" "<" > ">" => QuantifiedWhereClause { + variable_kinds: pk, where_clause: wc, }, }; @@ -297,10 +636,6 @@ DomainGoal: DomainGoal = { // `::U -> Bar` -- a normalization "Normalize" "(" "->" ")" => DomainGoal::Normalize { projection: s, ty: t }, - "InScope" "(" ")" => DomainGoal::TraitInScope { trait_name: t }, - - "Derefs" "(" "," ")" => DomainGoal::Derefs { source, target }, - "IsLocal" "(" ")" => DomainGoal::IsLocal { ty }, "IsUpstream" "(" ")" => DomainGoal::IsUpstream { ty }, "IsFullyVisible" "(" ")" => DomainGoal::IsFullyVisible { ty }, @@ -309,19 +644,23 @@ DomainGoal: DomainGoal = { "Compatible" => DomainGoal::Compatible, "DownstreamType" "(" ")" => DomainGoal::DownstreamType { ty }, + + "Reveal" => DomainGoal::Reveal, + + "ObjectSafe" "(" ")" => DomainGoal::ObjectSafe { id } }; LeafGoal: LeafGoal = { => LeafGoal::DomainGoal { goal: dg }, - "=" => LeafGoal::UnifyTys { a, b }, + "=" => LeafGoal::UnifyGenericArgs { a, b }, - "=" => LeafGoal::UnifyLifetimes { a, b }, + "Subtype" "(" "," ")" => LeafGoal::SubtypeGenericArgs { a, b }, }; TraitRef: TraitRef = { - S > => { - let mut args = vec![Parameter::Ty(s)]; + S > => { + let mut args = vec![GenericArg::Ty(s)]; args.extend(a); TraitRef { trait_name: t, @@ -345,7 +684,8 @@ Separator1: Vec = { #[inline] Comma: Vec = { - > + > "," => v, + > => v, }; #[inline] @@ -365,14 +705,16 @@ Angle: Vec = { Id: Identifier = { => Identifier { - str: intern(s), + str: Atom::from(s), span: Span::new(l, r), } }; LifetimeId: Identifier = { => Identifier { - str: intern(s), + str: Atom::from(s), span: Span::new(l, r), } }; + +ConstValue: u32 = => u32::from_str_radix(s, 10).unwrap(); diff --git a/chalk-recursive/Cargo.toml b/chalk-recursive/Cargo.toml new file mode 100644 index 00000000000..e7a46be44d7 --- /dev/null +++ b/chalk-recursive/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "chalk-recursive" +version = "0.104.0-dev.0" +description = "Recursive solver for the Chalk project" +license = "MIT OR Apache-2.0" +authors = ["Rust Compiler Team", "Chalk developers"] +repository = "https://github.com/rust-lang/chalk" +readme = "README.md" +keywords = ["compiler", "traits", "prolog"] +edition = "2018" + +[dependencies] +rustc-hash = { version = "1.1.0" } +tracing = "0.1" + +chalk-derive = { version = "0.104.0-dev.0", path = "../chalk-derive" } +chalk-ir = { version = "0.104.0-dev.0", path = "../chalk-ir" } +chalk-solve = { version = "0.104.0-dev.0", path = "../chalk-solve", default-features = false } + +[dev-dependencies] +chalk-integration = { path = "../chalk-integration" } + +[features] +default = ["tracing-full"] + +tracing-full = ["chalk-solve/tracing-full"] diff --git a/chalk-recursive/README.md b/chalk-recursive/README.md new file mode 100644 index 00000000000..480a492d838 --- /dev/null +++ b/chalk-recursive/README.md @@ -0,0 +1,3 @@ +A crate housing the recursive solver for chalk. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-recursive/src/combine.rs b/chalk-recursive/src/combine.rs new file mode 100644 index 00000000000..682af6d3bef --- /dev/null +++ b/chalk-recursive/src/combine.rs @@ -0,0 +1,54 @@ +use chalk_solve::Solution; +use tracing::debug; + +use chalk_ir::interner::Interner; +use chalk_ir::{ClausePriority, DomainGoal, GenericArg}; + +#[tracing::instrument(level = "Debug", skip(interner))] +pub(super) fn with_priorities( + interner: I, + domain_goal: &DomainGoal, + a: Solution, + prio_a: ClausePriority, + b: Solution, + prio_b: ClausePriority, +) -> (Solution, ClausePriority) { + let result = match (prio_a, prio_b, a, b) { + (ClausePriority::High, ClausePriority::Low, higher, lower) + | (ClausePriority::Low, ClausePriority::High, lower, higher) => { + // if we have a high-priority solution and a low-priority solution, + // the high-priority solution overrides *if* they are both for the + // same inputs -- we don't want a more specific high-priority + // solution overriding a general low-priority one. Currently inputs + // only matter for projections; in a goal like `AliasEq(::Type = ?1)`, ?0 is the input. + let inputs_higher = calculate_inputs(interner, domain_goal, &higher); + let inputs_lower = calculate_inputs(interner, domain_goal, &lower); + if inputs_higher == inputs_lower { + debug!( + "preferring solution: {:?} over {:?} because of higher prio", + higher, lower + ); + (higher, ClausePriority::High) + } else { + (higher.combine(lower, interner), ClausePriority::High) + } + } + (_, _, a, b) => (a.combine(b, interner), prio_a), + }; + debug!(?result, "combined result"); + result +} + +fn calculate_inputs( + interner: I, + domain_goal: &DomainGoal, + solution: &Solution, +) -> Vec> { + if let Some(subst) = solution.constrained_subst(interner) { + let subst_goal = subst.value.subst.apply(domain_goal.clone(), interner); + subst_goal.inputs(interner) + } else { + domain_goal.inputs(interner) + } +} diff --git a/chalk-recursive/src/fixed_point.rs b/chalk-recursive/src/fixed_point.rs new file mode 100644 index 00000000000..517ca253b4d --- /dev/null +++ b/chalk-recursive/src/fixed_point.rs @@ -0,0 +1,247 @@ +use std::fmt::Debug; +use std::hash::Hash; +use tracing::debug; +use tracing::{info, instrument}; + +mod cache; +mod search_graph; +mod stack; + +pub use cache::Cache; +use search_graph::{DepthFirstNumber, SearchGraph}; +use stack::{Stack, StackDepth}; + +pub(super) struct RecursiveContext +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + stack: Stack, + + /// The "search graph" stores "in-progress results" that are still being + /// solved. + search_graph: SearchGraph, + + /// The "cache" stores results for goals that we have completely solved. + /// Things are added to the cache when we have completely processed their + /// result. + cache: Option>, + + /// The maximum size for goals. + max_size: usize, +} + +pub(super) trait SolverStuff: Copy +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + fn is_coinductive_goal(self, goal: &K) -> bool; + fn initial_value(self, goal: &K, coinductive_goal: bool) -> V; + fn solve_iteration( + self, + context: &mut RecursiveContext, + goal: &K, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> V; + fn reached_fixed_point(self, old_value: &V, new_value: &V) -> bool; + fn error_value(self) -> V; +} + +/// The `minimums` struct is used while solving to track whether we encountered +/// any cycles in the process. +#[derive(Copy, Clone, Debug)] +pub(super) struct Minimums { + positive: DepthFirstNumber, +} + +impl Minimums { + pub fn new() -> Self { + Minimums { + positive: DepthFirstNumber::MAX, + } + } + + pub fn update_from(&mut self, minimums: Minimums) { + self.positive = ::std::cmp::min(self.positive, minimums.positive); + } +} + +impl RecursiveContext +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + pub fn new(overflow_depth: usize, max_size: usize, cache: Option>) -> Self { + RecursiveContext { + stack: Stack::new(overflow_depth), + search_graph: SearchGraph::new(), + cache, + max_size, + } + } + + pub fn max_size(&self) -> usize { + self.max_size + } + + /// Solves a canonical goal. The substitution returned in the + /// solution will be for the fully decomposed goal. For example, given the + /// program + /// + /// ```ignore + /// struct u8 { } + /// struct SomeType { } + /// trait Foo { } + /// impl Foo for SomeType { } + /// ``` + /// + /// and the goal `exists { forall { SomeType: Foo } + /// }`, `into_peeled_goal` can be used to create a canonical goal + /// `SomeType: Foo`. This function will then return a + /// solution with the substitution `?0 := u8`. + pub fn solve_root_goal( + &mut self, + canonical_goal: &K, + solver_stuff: impl SolverStuff, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> V { + debug!("solve_root_goal(canonical_goal={:?})", canonical_goal); + assert!(self.stack.is_empty()); + let minimums = &mut Minimums::new(); + self.solve_goal(canonical_goal, minimums, solver_stuff, should_continue) + } + + /// Attempt to solve a goal that has been fully broken down into leaf form + /// and canonicalized. This is where the action really happens, and is the + /// place where we would perform caching in rustc (and may eventually do in Chalk). + #[instrument(level = "info", skip(self, minimums, solver_stuff, should_continue))] + pub fn solve_goal( + &mut self, + goal: &K, + minimums: &mut Minimums, + solver_stuff: impl SolverStuff, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> V { + // First check the cache. + if let Some(cache) = &self.cache { + if let Some(value) = cache.get(goal) { + debug!("solve_reduced_goal: cache hit, value={:?}", value); + return value; + } + } + + // Next, check if the goal is in the search tree already. + if let Some(dfn) = self.search_graph.lookup(goal) { + // Check if this table is still on the stack. + if let Some(depth) = self.search_graph[dfn].stack_depth { + self.stack[depth].flag_cycle(); + // Mixed cycles are not allowed. For more information about this + // see the corresponding section in the coinduction chapter: + // https://rust-lang.github.io/chalk/book/recursive/coinduction.html#mixed-co-inductive-and-inductive-cycles + if self.stack.mixed_inductive_coinductive_cycle_from(depth) { + return solver_stuff.error_value(); + } + } + + minimums.update_from(self.search_graph[dfn].links); + + // Return the solution from the table. + let previous_solution = self.search_graph[dfn].solution.clone(); + info!( + "solve_goal: cycle detected, previous solution {:?}", + previous_solution, + ); + previous_solution + } else { + // Otherwise, push the goal onto the stack and create a table. + // The initial result for this table depends on whether the goal is coinductive. + let coinductive_goal = solver_stuff.is_coinductive_goal(goal); + let initial_solution = solver_stuff.initial_value(goal, coinductive_goal); + let depth = self.stack.push(coinductive_goal); + let dfn = self.search_graph.insert(goal, depth, initial_solution); + + let subgoal_minimums = + self.solve_new_subgoal(goal, depth, dfn, solver_stuff, should_continue); + + self.search_graph[dfn].links = subgoal_minimums; + self.search_graph[dfn].stack_depth = None; + self.stack.pop(depth); + minimums.update_from(subgoal_minimums); + + // Read final result from table. + let result = self.search_graph[dfn].solution.clone(); + + // If processing this subgoal did not involve anything + // outside of its subtree, then we can promote it to the + // cache now. This is a sort of hack to alleviate the + // worst of the repeated work that we do during tabling. + if subgoal_minimums.positive >= dfn { + if let Some(cache) = &mut self.cache { + self.search_graph.move_to_cache(dfn, cache); + debug!("solve_reduced_goal: SCC head encountered, moving to cache"); + } else { + debug!( + "solve_reduced_goal: SCC head encountered, rolling back as caching disabled" + ); + self.search_graph.rollback_to(dfn); + } + } + + info!("solve_goal: solution = {:?}", result); + result + } + } + + #[instrument(level = "debug", skip(self, solver_stuff, should_continue))] + fn solve_new_subgoal( + &mut self, + canonical_goal: &K, + depth: StackDepth, + dfn: DepthFirstNumber, + solver_stuff: impl SolverStuff, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Minimums { + // We start with `answer = None` and try to solve the goal. At the end of the iteration, + // `answer` will be updated with the result of the solving process. If we detect a cycle + // during the solving process, we cache `answer` and try to solve the goal again. We repeat + // until we reach a fixed point for `answer`. + // Considering the partial order: + // - None < Some(Unique) < Some(Ambiguous) + // - None < Some(CannotProve) + // the function which maps the loop iteration to `answer` is a nondecreasing function + // so this function will eventually be constant and the loop terminates. + loop { + let minimums = &mut Minimums::new(); + let current_answer = solver_stuff.solve_iteration( + self, + canonical_goal, + minimums, + should_continue.clone(), // Note: cloning required as workaround for https://github.com/rust-lang/rust/issues/95734 + ); + + debug!( + "solve_new_subgoal: loop iteration result = {:?} with minimums {:?}", + current_answer, minimums + ); + + if !self.stack[depth].read_and_reset_cycle_flag() { + // None of our subgoals depended on us directly. + // We can return. + self.search_graph[dfn].solution = current_answer; + return *minimums; + } + + let old_answer = + std::mem::replace(&mut self.search_graph[dfn].solution, current_answer); + + if solver_stuff.reached_fixed_point(&old_answer, &self.search_graph[dfn].solution) { + return *minimums; + } + + // Otherwise: rollback the search tree and try again. + self.search_graph.rollback_to(dfn + 1); + } + } +} diff --git a/chalk-recursive/src/fixed_point/cache.rs b/chalk-recursive/src/fixed_point/cache.rs new file mode 100644 index 00000000000..4fbbc63042e --- /dev/null +++ b/chalk-recursive/src/fixed_point/cache.rs @@ -0,0 +1,88 @@ +use rustc_hash::FxHashMap; +use std::fmt::Debug; +use std::hash::Hash; +use std::sync::{Arc, Mutex}; +use tracing::debug; +use tracing::instrument; +/// The "cache" stores results for goals that we have completely solved. +/// Things are added to the cache when we have completely processed their +/// result, and it can be shared amongst many solvers. +pub struct Cache +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + data: Arc>>, +} +struct CacheData +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + cache: FxHashMap, +} + +impl Cache +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + pub fn new() -> Self { + Self::default() + } + + /// Record a cache result. + #[instrument(skip(self))] + pub fn insert(&self, goal: K, result: V) { + let mut data = self.data.lock().unwrap(); + data.cache.insert(goal, result); + } + + /// Record a cache result. + pub fn get(&self, goal: &K) -> Option { + let data = self.data.lock().unwrap(); + if let Some(result) = data.cache.get(goal) { + debug!(?goal, ?result, "Cache hit"); + Some(result.clone()) + } else { + debug!(?goal, "Cache miss"); + None + } + } +} + +impl Clone for Cache +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + } + } +} + +impl Default for Cache +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + fn default() -> Self { + Self { + data: Default::default(), + } + } +} + +impl Default for CacheData +where + K: Hash + Eq + Debug, + V: Debug + Clone, +{ + fn default() -> Self { + Self { + cache: Default::default(), + } + } +} diff --git a/chalk-recursive/src/fixed_point/search_graph.rs b/chalk-recursive/src/fixed_point/search_graph.rs new file mode 100644 index 00000000000..e4b6ad3f7f0 --- /dev/null +++ b/chalk-recursive/src/fixed_point/search_graph.rs @@ -0,0 +1,143 @@ +use super::stack::StackDepth; +use super::{Cache, Minimums}; +use rustc_hash::FxHashMap; +use std::fmt::Debug; +use std::hash::Hash; +use std::ops::Add; +use std::ops::Index; +use std::ops::IndexMut; +use std::usize; +use tracing::{debug, instrument}; + +/// The "search graph" stores in-progress goals that are still +/// being solved. +pub(super) struct SearchGraph +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + indices: FxHashMap, + nodes: Vec>, +} + +#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] +pub(super) struct DepthFirstNumber { + index: usize, +} + +pub(super) struct Node { + pub(crate) goal: K, + + pub(crate) solution: V, + + /// This is `Some(X)` if we are actively exploring this node, or + /// `None` otherwise. + pub(crate) stack_depth: Option, + + /// While this node is on the stack, this field will be set to + /// contain our own depth-first number. Once the node is popped + /// from the stack, it contains the DFN of the minimal ancestor + /// that the table reached (or MAX if no cycle was encountered). + pub(crate) links: Minimums, +} + +impl SearchGraph +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + pub(crate) fn new() -> Self { + SearchGraph { + indices: FxHashMap::default(), + nodes: vec![], + } + } + + pub(crate) fn lookup(&self, goal: &K) -> Option { + self.indices.get(goal).cloned() + } + + /// Insert a new search node in the tree. The node will be in the initial + /// state for a search node: + /// + /// - stack depth as given + /// - links set to its own DFN + /// - solution is initially an identity substitution for coinductive goals + /// or `NoSolution` for other goals + pub(crate) fn insert( + &mut self, + goal: &K, + stack_depth: StackDepth, + solution: V, + ) -> DepthFirstNumber { + let dfn = DepthFirstNumber { + index: self.nodes.len(), + }; + let node = Node { + goal: goal.clone(), + solution, + stack_depth: Some(stack_depth), + links: Minimums { positive: dfn }, + }; + self.nodes.push(node); + let previous_index = self.indices.insert(goal.clone(), dfn); + assert!(previous_index.is_none()); + dfn + } + + /// Clears all nodes with a depth-first number greater than or equal `dfn`. + #[instrument(level = "debug", skip(self))] + pub(crate) fn rollback_to(&mut self, dfn: DepthFirstNumber) { + self.indices.retain(|_key, value| *value < dfn); + self.nodes.truncate(dfn.index); + } + + /// Removes all nodes with a depth-first-number greater than or + /// equal to `dfn`, adding their final solutions into the cache. + #[instrument(level = "debug", skip(self, cache))] + pub(crate) fn move_to_cache(&mut self, dfn: DepthFirstNumber, cache: &Cache) { + self.indices.retain(|_key, value| *value < dfn); + for node in self.nodes.drain(dfn.index..) { + assert!(node.stack_depth.is_none()); + assert!(node.links.positive >= dfn); + debug!("caching solution {:#?} for {:#?}", node.solution, node.goal); + cache.insert(node.goal, node.solution); + } + } +} + +impl Index for SearchGraph +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + type Output = Node; + + fn index(&self, table_index: DepthFirstNumber) -> &Node { + &self.nodes[table_index.index] + } +} + +impl IndexMut for SearchGraph +where + K: Hash + Eq + Debug + Clone, + V: Debug + Clone, +{ + fn index_mut(&mut self, table_index: DepthFirstNumber) -> &mut Node { + &mut self.nodes[table_index.index] + } +} + +impl DepthFirstNumber { + pub(crate) const MAX: DepthFirstNumber = DepthFirstNumber { index: usize::MAX }; +} + +impl Add for DepthFirstNumber { + type Output = DepthFirstNumber; + + fn add(self, v: usize) -> DepthFirstNumber { + DepthFirstNumber { + index: self.index + v, + } + } +} diff --git a/chalk-recursive/src/fixed_point/stack.rs b/chalk-recursive/src/fixed_point/stack.rs new file mode 100644 index 00000000000..fa2af66ea0f --- /dev/null +++ b/chalk-recursive/src/fixed_point/stack.rs @@ -0,0 +1,107 @@ +use std::mem; +use std::ops::Index; +use std::ops::IndexMut; +use std::usize; + +pub(super) struct Stack { + // program: Arc, + entries: Vec, + overflow_depth: usize, +} + +#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] +pub(super) struct StackDepth { + depth: usize, +} + +/// The data we actively keep for each goal on the stack. +pub(super) struct StackEntry { + /// Was this a coinductive goal? + coinductive_goal: bool, + + /// Initially false, set to true when some subgoal depends on us. + cycle: bool, +} + +impl Stack { + pub(super) fn new( + // program: &Arc, + overflow_depth: usize, + ) -> Self { + Stack { + // program: program.clone(), + entries: vec![], + overflow_depth, + } + } + + pub(super) fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + pub(super) fn push(&mut self, coinductive_goal: bool) -> StackDepth { + let depth = StackDepth { + depth: self.entries.len(), + }; + + if depth.depth >= self.overflow_depth { + // This should perhaps be a result or something, though + // really I'd prefer to move to subgoal abstraction for + // guaranteeing termination. -nmatsakis + panic!("overflow depth reached") + } + + self.entries.push(StackEntry { + coinductive_goal, + cycle: false, + }); + depth + } + + pub(super) fn pop(&mut self, depth: StackDepth) { + assert_eq!( + depth.depth + 1, + self.entries.len(), + "mismatched stack push/pop" + ); + self.entries.pop(); + } + + /// True iff there exist at least one coinductive goal + /// and one inductive goal each from the top of the stack + /// down to (and including) the given depth. + pub(super) fn mixed_inductive_coinductive_cycle_from(&self, depth: StackDepth) -> bool { + let coinductive_count = self.entries[depth.depth..] + .iter() + .filter(|entry| entry.coinductive_goal) + .count(); + let total_count = self.entries.len() - depth.depth; + let any_coinductive = coinductive_count != 0; + let any_inductive = coinductive_count != total_count; + any_coinductive && any_inductive + } +} + +impl StackEntry { + pub(super) fn flag_cycle(&mut self) { + self.cycle = true; + } + + pub(super) fn read_and_reset_cycle_flag(&mut self) -> bool { + mem::replace(&mut self.cycle, false) + } +} + +impl Index for Stack { + type Output = StackEntry; + + fn index(&self, depth: StackDepth) -> &StackEntry { + &self.entries[depth.depth] + } +} + +impl IndexMut for Stack { + fn index_mut(&mut self, depth: StackDepth) -> &mut StackEntry { + &mut self.entries[depth.depth] + } +} diff --git a/chalk-recursive/src/fulfill.rs b/chalk-recursive/src/fulfill.rs new file mode 100644 index 00000000000..2524ff795b8 --- /dev/null +++ b/chalk-recursive/src/fulfill.rs @@ -0,0 +1,652 @@ +use crate::fixed_point::Minimums; +use crate::solve::SolveDatabase; +use chalk_ir::cast::Cast; +use chalk_ir::fold::TypeFoldable; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::visit::TypeVisitable; +use chalk_ir::zip::Zip; +use chalk_ir::{ + Binders, BoundVar, Canonical, ConstrainedSubst, Constraint, Constraints, DomainGoal, + Environment, EqGoal, Fallible, GenericArg, GenericArgData, Goal, GoalData, InEnvironment, + NoSolution, ProgramClauseImplication, QuantifierKind, Substitution, SubtypeGoal, TyKind, + TyVariableKind, UCanonical, UnificationDatabase, UniverseMap, Variance, +}; +use chalk_solve::debug_span; +use chalk_solve::infer::{InferenceTable, ParameterEnaVariableExt}; +use chalk_solve::solve::truncate; +use chalk_solve::{Guidance, Solution}; +use rustc_hash::FxHashSet; +use std::fmt::Debug; +use tracing::{debug, instrument}; + +enum Outcome { + Complete, + Incomplete, +} + +impl Outcome { + fn is_complete(&self) -> bool { + matches!(self, Outcome::Complete) + } +} + +/// A goal that must be resolved +#[derive(Clone, Debug, PartialEq, Eq)] +enum Obligation { + /// For "positive" goals, we flatten all the way out to leafs within the + /// current `Fulfill` + Prove(InEnvironment>), + + /// For "negative" goals, we don't flatten in *this* `Fulfill`, which would + /// require having a logical "or" operator. Instead, we recursively solve in + /// a fresh `Fulfill`. + Refute(InEnvironment>), +} + +/// When proving a leaf goal, we record the free variables that appear within it +/// so that we can update inference state accordingly. +#[derive(Clone, Debug)] +struct PositiveSolution { + free_vars: Vec>, + universes: UniverseMap, + solution: Solution, +} + +/// When refuting a goal, there's no impact on inference state. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum NegativeSolution { + Refuted, + Ambiguous, +} + +fn canonicalize( + infer: &mut InferenceTable, + interner: I, + value: T, +) -> (Canonical, Vec>) +where + T: TypeFoldable, + T: HasInterner, +{ + let res = infer.canonicalize(interner, value); + let free_vars = res + .free_vars + .into_iter() + .map(|free_var| free_var.to_generic_arg(interner)) + .collect(); + (res.quantified, free_vars) +} + +fn u_canonicalize( + _infer: &mut InferenceTable, + interner: I, + value0: &Canonical, +) -> (UCanonical, UniverseMap) +where + T: Clone + HasInterner + TypeFoldable + TypeVisitable, + T: HasInterner, +{ + let res = InferenceTable::u_canonicalize(interner, value0); + (res.quantified, res.universes) +} + +fn unify( + infer: &mut InferenceTable, + interner: I, + db: &dyn UnificationDatabase, + environment: &Environment, + variance: Variance, + a: &T, + b: &T, +) -> Fallible>>> +where + T: ?Sized + Zip, +{ + let res = infer.relate(interner, db, environment, variance, a, b)?; + Ok(res.goals) +} + +/// A `Fulfill` is where we actually break down complex goals, instantiate +/// variables, and perform inference. It's highly stateful. It's generally used +/// in Chalk to try to solve a goal, and then package up what was learned in a +/// stateless, canonical way. +/// +/// In rustc, you can think of there being an outermost `Fulfill` that's used when +/// type checking each function body, etc. There, the state reflects the state +/// of type inference in general. But when solving trait constraints, *fresh* +/// `Fulfill` instances will be created to solve canonicalized, free-standing +/// goals, and transport what was learned back to the outer context. +pub(super) struct Fulfill<'s, I: Interner, Solver: SolveDatabase> { + solver: &'s mut Solver, + subst: Substitution, + infer: InferenceTable, + + /// The remaining goals to prove or refute + obligations: Vec>, + + /// Lifetime constraints that must be fulfilled for a solution to be fully + /// validated. + constraints: FxHashSet>>, + + /// Record that a goal has been processed that can neither be proved nor + /// refuted. In such a case the solution will be either `CannotProve`, or `Err` + /// in the case where some other goal leads to an error. + cannot_prove: bool, +} + +impl<'s, I: Interner, Solver: SolveDatabase> Fulfill<'s, I, Solver> { + #[instrument(level = "debug", skip(solver, infer))] + pub(super) fn new_with_clause( + solver: &'s mut Solver, + infer: InferenceTable, + subst: Substitution, + canonical_goal: InEnvironment>, + clause: &Binders>, + ) -> Fallible { + let mut fulfill = Fulfill { + solver, + infer, + subst, + obligations: vec![], + constraints: FxHashSet::default(), + cannot_prove: false, + }; + + let ProgramClauseImplication { + consequence, + conditions, + constraints, + priority: _, + } = fulfill + .infer + .instantiate_binders_existentially(fulfill.solver.interner(), clause.clone()); + + debug!(?consequence, ?conditions, ?constraints); + fulfill + .constraints + .extend(constraints.as_slice(fulfill.interner()).to_owned()); + + debug!("the subst is {:?}", fulfill.subst); + + if let Err(e) = fulfill.unify( + &canonical_goal.environment, + Variance::Invariant, + &canonical_goal.goal, + &consequence, + ) { + return Err(e); + } + + // if so, toss in all of its premises + for condition in conditions.as_slice(fulfill.solver.interner()) { + if let Err(e) = fulfill.push_goal(&canonical_goal.environment, condition.clone()) { + return Err(e); + } + } + + Ok(fulfill) + } + + pub(super) fn new_with_simplification( + solver: &'s mut Solver, + infer: InferenceTable, + subst: Substitution, + canonical_goal: InEnvironment>, + ) -> Fallible { + let mut fulfill = Fulfill { + solver, + infer, + subst, + obligations: vec![], + constraints: FxHashSet::default(), + cannot_prove: false, + }; + + if let Err(e) = fulfill.push_goal(&canonical_goal.environment, canonical_goal.goal.clone()) + { + return Err(e); + } + + Ok(fulfill) + } + + fn push_obligation(&mut self, obligation: Obligation) { + // truncate to avoid overflows + match &obligation { + Obligation::Prove(goal) => { + if truncate::needs_truncation( + self.solver.interner(), + &mut self.infer, + self.solver.max_size(), + goal, + ) { + // the goal is too big. Record that we should return Ambiguous + self.cannot_prove = true; + return; + } + } + Obligation::Refute(goal) => { + if truncate::needs_truncation( + self.solver.interner(), + &mut self.infer, + self.solver.max_size(), + goal, + ) { + // the goal is too big. Record that we should return Ambiguous + self.cannot_prove = true; + return; + } + } + }; + self.obligations.push(obligation); + } + + /// Unifies `a` and `b` in the given environment. + /// + /// Wraps `InferenceTable::unify`; any resulting normalizations are added + /// into our list of pending obligations with the given environment. + pub(super) fn unify( + &mut self, + environment: &Environment, + variance: Variance, + a: &T, + b: &T, + ) -> Fallible<()> + where + T: ?Sized + Zip + Debug, + { + let goals = unify( + &mut self.infer, + self.solver.interner(), + self.solver.db().unification_database(), + environment, + variance, + a, + b, + )?; + debug!("unify({:?}, {:?}) succeeded", a, b); + debug!("unify: goals={:?}", goals); + for goal in goals { + let goal = goal.cast(self.solver.interner()); + self.push_obligation(Obligation::Prove(goal)); + } + Ok(()) + } + + /// Create obligations for the given goal in the given environment. This may + /// ultimately create any number of obligations. + #[instrument(level = "debug", skip(self))] + pub(super) fn push_goal( + &mut self, + environment: &Environment, + goal: Goal, + ) -> Fallible<()> { + let interner = self.solver.interner(); + match goal.data(interner) { + GoalData::Quantified(QuantifierKind::ForAll, subgoal) => { + let subgoal = self + .infer + .instantiate_binders_universally(self.solver.interner(), subgoal.clone()); + self.push_goal(environment, subgoal)?; + } + GoalData::Quantified(QuantifierKind::Exists, subgoal) => { + let subgoal = self + .infer + .instantiate_binders_existentially(self.solver.interner(), subgoal.clone()); + self.push_goal(environment, subgoal)?; + } + GoalData::Implies(wc, subgoal) => { + let new_environment = + &environment.add_clauses(interner, wc.iter(interner).cloned()); + self.push_goal(new_environment, subgoal.clone())?; + } + GoalData::All(goals) => { + for subgoal in goals.as_slice(interner) { + self.push_goal(environment, subgoal.clone())?; + } + } + GoalData::Not(subgoal) => { + let in_env = InEnvironment::new(environment, subgoal.clone()); + self.push_obligation(Obligation::Refute(in_env)); + } + GoalData::DomainGoal(_) => { + let in_env = InEnvironment::new(environment, goal); + self.push_obligation(Obligation::Prove(in_env)); + } + GoalData::EqGoal(EqGoal { a, b }) => { + self.unify(environment, Variance::Invariant, &a, &b)?; + } + GoalData::SubtypeGoal(SubtypeGoal { a, b }) => { + let a_norm = self.infer.normalize_ty_shallow(interner, a); + let a = a_norm.as_ref().unwrap_or(a); + let b_norm = self.infer.normalize_ty_shallow(interner, b); + let b = b_norm.as_ref().unwrap_or(b); + + if matches!( + a.kind(interner), + TyKind::InferenceVar(_, TyVariableKind::General) + ) && matches!( + b.kind(interner), + TyKind::InferenceVar(_, TyVariableKind::General) + ) { + self.cannot_prove = true; + } else { + self.unify(environment, Variance::Covariant, &a, &b)?; + } + } + GoalData::CannotProve => { + debug!("Pushed a CannotProve goal, setting cannot_prove = true"); + self.cannot_prove = true; + } + } + Ok(()) + } + + #[instrument(level = "debug", skip(self, minimums, should_continue))] + fn prove( + &mut self, + wc: InEnvironment>, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + let interner = self.solver.interner(); + let (quantified, free_vars) = canonicalize(&mut self.infer, interner, wc); + let (quantified, universes) = u_canonicalize(&mut self.infer, interner, &quantified); + let result = self + .solver + .solve_goal(quantified, minimums, should_continue); + Ok(PositiveSolution { + free_vars, + universes, + solution: result?, + }) + } + + fn refute( + &mut self, + goal: InEnvironment>, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible { + let canonicalized = match self + .infer + .invert_then_canonicalize(self.solver.interner(), goal) + { + Some(v) => v, + None => { + // Treat non-ground negatives as ambiguous. Note that, as inference + // proceeds, we may wind up with more information here. + return Ok(NegativeSolution::Ambiguous); + } + }; + + // Negate the result + let (quantified, _) = + u_canonicalize(&mut self.infer, self.solver.interner(), &canonicalized); + let mut minimums = Minimums::new(); // FIXME -- minimums here seems wrong + if let Ok(solution) = self + .solver + .solve_goal(quantified, &mut minimums, should_continue) + { + if solution.is_unique() { + Err(NoSolution) + } else { + Ok(NegativeSolution::Ambiguous) + } + } else { + Ok(NegativeSolution::Refuted) + } + } + + /// Trying to prove some goal led to a the substitution `subst`; we + /// wish to apply that substitution to our own inference variables + /// (and incorporate any region constraints). This substitution + /// requires some mapping to get it into our namespace -- first, + /// the universes it refers to have been canonicalized, and + /// `universes` stores the mapping back into our + /// universes. Second, the free variables that appear within can + /// be mapped into our variables with `free_vars`. + fn apply_solution( + &mut self, + free_vars: Vec>, + universes: UniverseMap, + subst: Canonical>, + ) { + use chalk_solve::infer::ucanonicalize::UniverseMapExt; + let subst = universes.map_from_canonical(self.interner(), &subst); + let ConstrainedSubst { subst, constraints } = self + .infer + .instantiate_canonical(self.solver.interner(), subst); + + debug!( + "fulfill::apply_solution: adding constraints {:?}", + constraints + ); + self.constraints + .extend(constraints.as_slice(self.interner()).to_owned()); + + // We use the empty environment for unification here because we're + // really just doing a substitution on unconstrained variables, which is + // guaranteed to succeed without generating any new constraints. + let empty_env = &Environment::new(self.solver.interner()); + + for (i, free_var) in free_vars.into_iter().enumerate() { + let subst_value = subst.at(self.interner(), i); + self.unify(empty_env, Variance::Invariant, &free_var, subst_value) + .unwrap_or_else(|err| { + panic!( + "apply_solution failed with free_var={:?}, subst_value={:?}: {:?}", + free_var, subst_value, err + ); + }); + } + } + + fn fulfill( + &mut self, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible { + debug_span!("fulfill", obligations=?self.obligations); + + // Try to solve all the obligations. We do this via a fixed-point + // iteration. We try to solve each obligation in turn. Anything which is + // successful, we drop; anything ambiguous, we retain in the + // `obligations` array. This process is repeated so long as we are + // learning new things about our inference state. + let mut obligations = Vec::with_capacity(self.obligations.len()); + let mut progress = true; + + while progress { + progress = false; + debug!("start of round, {} obligations", self.obligations.len()); + + // Take the list of `obligations` to solve this round and replace it + // with an empty vector. Iterate through each obligation to solve + // and solve it if we can. If not (because of ambiguity), then push + // it back onto `self.to_prove` for next round. Note that + // `solve_one` may also push onto the `self.to_prove` list + // directly. + assert!(obligations.is_empty()); + while let Some(obligation) = self.obligations.pop() { + let ambiguous = match &obligation { + Obligation::Prove(wc) => { + let PositiveSolution { + free_vars, + universes, + solution, + } = self.prove(wc.clone(), minimums, should_continue.clone())?; + + if let Some(constrained_subst) = solution.definite_subst(self.interner()) { + // If the substitution is trivial, we won't actually make any progress by applying it! + // So we need to check this to prevent endless loops. + let nontrivial_subst = !is_trivial_canonical_subst( + self.interner(), + &constrained_subst.value.subst, + ); + + let has_constraints = !constrained_subst + .value + .constraints + .is_empty(self.interner()); + + if nontrivial_subst || has_constraints { + self.apply_solution(free_vars, universes, constrained_subst); + progress = true; + } + } + + solution.is_ambig() + } + Obligation::Refute(goal) => { + let answer = self.refute(goal.clone(), should_continue.clone())?; + answer == NegativeSolution::Ambiguous + } + }; + + if ambiguous { + debug!("ambiguous result: {:?}", obligation); + obligations.push(obligation); + } + } + + self.obligations.append(&mut obligations); + debug!("end of round, {} obligations left", self.obligations.len()); + } + + // At the end of this process, `self.obligations` should have + // all of the ambiguous obligations, and `obligations` should + // be empty. + assert!(obligations.is_empty()); + + if self.obligations.is_empty() { + Ok(Outcome::Complete) + } else { + Ok(Outcome::Incomplete) + } + } + + /// Try to fulfill all pending obligations and build the resulting + /// solution. The returned solution will transform `subst` substitution with + /// the outcome of type inference by updating the replacements it provides. + pub(super) fn solve( + mut self, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + let outcome = match self.fulfill(minimums, should_continue.clone()) { + Ok(o) => o, + Err(e) => return Err(e), + }; + + if self.cannot_prove { + debug!("Goal cannot be proven (cannot_prove = true), returning ambiguous"); + return Ok(Solution::Ambig(Guidance::Unknown)); + } + + if outcome.is_complete() { + // No obligations remain, so we have definitively solved our goals, + // and the current inference state is the unique way to solve them. + + let constraints = Constraints::from_iter(self.interner(), self.constraints.clone()); + let constrained = canonicalize( + &mut self.infer, + self.solver.interner(), + ConstrainedSubst { + subst: self.subst, + constraints, + }, + ); + return Ok(Solution::Unique(constrained.0)); + } + + // Otherwise, we have (positive or negative) obligations remaining, but + // haven't proved that it's *impossible* to satisfy out obligations. we + // need to determine how to package up what we learned about type + // inference as an ambiguous solution. + + let canonical_subst = + canonicalize(&mut self.infer, self.solver.interner(), self.subst.clone()); + + if canonical_subst + .0 + .value + .is_identity_subst(self.solver.interner()) + { + // In this case, we didn't learn *anything* definitively. So now, we + // go one last time through the positive obligations, this time + // applying even *tentative* inference suggestions, so that we can + // yield these upwards as our own suggestions. There are no + // particular guarantees about *which* obligaiton we derive + // suggestions from. + + while let Some(obligation) = self.obligations.pop() { + if let Obligation::Prove(goal) = obligation { + let PositiveSolution { + free_vars, + universes, + solution, + } = self.prove(goal, minimums, should_continue.clone()).unwrap(); + if let Some(constrained_subst) = + solution.constrained_subst(self.solver.interner()) + { + self.apply_solution(free_vars, universes, constrained_subst); + return Ok(Solution::Ambig(Guidance::Suggested(canonical_subst.0))); + } + } + } + + Ok(Solution::Ambig(Guidance::Unknown)) + } else { + // While we failed to prove the goal, we still learned that + // something had to hold. Here's an example where this happens: + // + // ```rust + // trait Display {} + // trait Debug {} + // struct Foo {} + // struct Bar {} + // struct Baz {} + // + // impl Display for Bar {} + // impl Display for Baz {} + // + // impl Debug for Foo where T: Display {} + // ``` + // + // If we pose the goal `exists { T: Debug }`, we can't say + // for sure what `T` must be (it could be either `Foo` or + // `Foo`, but we *can* say for sure that it must be of the + // form `Foo`. + Ok(Solution::Ambig(Guidance::Definite(canonical_subst.0))) + } + } + + fn interner(&self) -> I { + self.solver.interner() + } +} + +fn is_trivial_canonical_subst(interner: I, subst: &Substitution) -> bool { + // A subst is trivial if.. + subst.iter(interner).enumerate().all(|(index, parameter)| { + let is_trivial = |b: Option| match b { + None => false, + Some(bound_var) => { + if let Some(index1) = bound_var.index_if_innermost() { + index == index1 + } else { + false + } + } + }; + + match parameter.data(interner) { + // All types and consts are mapped to distinct variables. Since this + // has been canonicalized, those will also be the first N + // variables. + GenericArgData::Ty(t) => is_trivial(t.bound_var(interner)), + GenericArgData::Const(t) => is_trivial(t.bound_var(interner)), + GenericArgData::Lifetime(t) => is_trivial(t.bound_var(interner)), + } + }) +} diff --git a/chalk-recursive/src/lib.rs b/chalk-recursive/src/lib.rs new file mode 100644 index 00000000000..a119a785448 --- /dev/null +++ b/chalk-recursive/src/lib.rs @@ -0,0 +1,12 @@ +use chalk_ir::{Goal, InEnvironment, UCanonical}; + +pub type UCanonicalGoal = UCanonical>>; + +mod combine; +mod fixed_point; +mod fulfill; +mod recursive; +pub mod solve; + +pub use fixed_point::Cache; +pub use recursive::RecursiveSolver; diff --git a/chalk-recursive/src/recursive.rs b/chalk-recursive/src/recursive.rs new file mode 100644 index 00000000000..61680c75269 --- /dev/null +++ b/chalk-recursive/src/recursive.rs @@ -0,0 +1,162 @@ +use crate::fixed_point::{Cache, Minimums, RecursiveContext, SolverStuff}; +use crate::solve::{SolveDatabase, SolveIteration}; +use crate::UCanonicalGoal; +use chalk_ir::{interner::Interner, NoSolution}; +use chalk_ir::{Canonical, ConstrainedSubst, Goal, InEnvironment, UCanonical}; +use chalk_ir::{Constraints, Fallible}; +use chalk_solve::{coinductive_goal::IsCoinductive, RustIrDatabase, Solution}; +use std::fmt; + +/// A Solver is the basic context in which you can propose goals for a given +/// program. **All questions posed to the solver are in canonical, closed form, +/// so that each question is answered with effectively a "clean slate"**. This +/// allows for better caching, and simplifies management of the inference +/// context. +struct Solver<'me, I: Interner> { + program: &'me dyn RustIrDatabase, + context: &'me mut RecursiveContext, Fallible>>, +} + +pub struct RecursiveSolver { + ctx: Box, Fallible>>>, +} + +impl RecursiveSolver { + pub fn new( + overflow_depth: usize, + max_size: usize, + cache: Option, Fallible>>>, + ) -> Self { + Self { + ctx: Box::new(RecursiveContext::new(overflow_depth, max_size, cache)), + } + } +} + +impl fmt::Debug for RecursiveSolver { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "RecursiveSolver") + } +} + +impl<'me, I: Interner> Solver<'me, I> { + pub(crate) fn new( + context: &'me mut RecursiveContext, Fallible>>, + program: &'me dyn RustIrDatabase, + ) -> Self { + Self { program, context } + } +} + +impl SolverStuff, Fallible>> for &dyn RustIrDatabase { + fn is_coinductive_goal(self, goal: &UCanonicalGoal) -> bool { + goal.is_coinductive(self) + } + + fn initial_value( + self, + goal: &UCanonicalGoal, + coinductive_goal: bool, + ) -> Fallible> { + if coinductive_goal { + Ok(Solution::Unique(Canonical { + value: ConstrainedSubst { + subst: goal.trivial_substitution(self.interner()), + constraints: Constraints::empty(self.interner()), + }, + binders: goal.canonical.binders.clone(), + })) + } else { + Err(NoSolution) + } + } + + fn solve_iteration( + self, + context: &mut RecursiveContext, Fallible>>, + goal: &UCanonicalGoal, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + Solver::new(context, self).solve_iteration(goal, minimums, should_continue) + } + + fn reached_fixed_point( + self, + old_answer: &Fallible>, + current_answer: &Fallible>, + ) -> bool { + // Some of our subgoals depended on us. We need to re-run + // with the current answer. + old_answer == current_answer || { + // Subtle: if our current answer is ambiguous, we can just stop, and + // in fact we *must* -- otherwise, we sometimes fail to reach a + // fixed point. See `multiple_ambiguous_cycles` for more. + match ¤t_answer { + Ok(s) => s.is_ambig(), + Err(_) => false, + } + } + } + + fn error_value(self) -> Fallible> { + Err(NoSolution) + } +} + +impl<'me, I: Interner> SolveDatabase for Solver<'me, I> { + fn solve_goal( + &mut self, + goal: UCanonicalGoal, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + self.context + .solve_goal(&goal, minimums, self.program, should_continue) + } + + fn interner(&self) -> I { + self.program.interner() + } + + fn db(&self) -> &dyn RustIrDatabase { + self.program + } + + fn max_size(&self) -> usize { + self.context.max_size() + } +} + +impl chalk_solve::Solver for RecursiveSolver { + fn solve( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + ) -> Option> { + self.ctx.solve_root_goal(goal, program, || true).ok() + } + + fn solve_limited( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + should_continue: &dyn std::ops::Fn() -> bool, + ) -> Option> { + self.ctx + .solve_root_goal(goal, program, should_continue) + .ok() + } + + fn solve_multiple( + &mut self, + _program: &dyn RustIrDatabase, + _goal: &UCanonical>>, + _f: &mut dyn FnMut( + chalk_solve::SubstitutionResult>>, + bool, + ) -> bool, + ) -> bool { + unimplemented!("Recursive solver doesn't support multiple answers") + } +} diff --git a/chalk-recursive/src/solve.rs b/chalk-recursive/src/solve.rs new file mode 100644 index 00000000000..ba2525467b9 --- /dev/null +++ b/chalk-recursive/src/solve.rs @@ -0,0 +1,228 @@ +use super::combine; +use super::fulfill::Fulfill; +use crate::fixed_point::Minimums; +use crate::UCanonicalGoal; +use chalk_ir::could_match::CouldMatch; +use chalk_ir::fold::TypeFoldable; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::{ + Canonical, ClausePriority, DomainGoal, Fallible, Floundered, Goal, GoalData, InEnvironment, + NoSolution, ProgramClause, ProgramClauseData, Substitution, UCanonical, +}; +use chalk_solve::clauses::program_clauses_that_could_match; +use chalk_solve::debug_span; +use chalk_solve::infer::InferenceTable; +use chalk_solve::{Guidance, RustIrDatabase, Solution}; +use tracing::{debug, instrument}; + +pub(super) trait SolveDatabase: Sized { + fn solve_goal( + &mut self, + goal: UCanonical>>, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible>; + + fn max_size(&self) -> usize; + + fn interner(&self) -> I; + + fn db(&self) -> &dyn RustIrDatabase; +} + +/// The `solve_iteration` method -- implemented for any type that implements +/// `SolveDb`. +pub(super) trait SolveIteration: SolveDatabase { + /// Executes one iteration of the recursive solver, computing the current + /// solution to the given canonical goal. This is used as part of a loop in + /// the case of cyclic goals. + #[instrument(level = "debug", skip(self, should_continue))] + fn solve_iteration( + &mut self, + canonical_goal: &UCanonicalGoal, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + if !should_continue() { + return Ok(Solution::Ambig(Guidance::Unknown)); + } + + let UCanonical { + universes, + canonical: + Canonical { + binders, + value: InEnvironment { environment, goal }, + }, + } = canonical_goal.clone(); + + match goal.data(self.interner()) { + GoalData::DomainGoal(domain_goal) => { + let canonical_goal = UCanonical { + universes, + canonical: Canonical { + binders, + value: InEnvironment { + environment, + goal: domain_goal.clone(), + }, + }, + }; + + // "Domain" goals (i.e., leaf goals that are Rust-specific) are + // always solved via some form of implication. We can either + // apply assumptions from our environment (i.e. where clauses), + // or from the lowered program, which includes fallback + // clauses. We try each approach in turn: + + let prog_solution = { + debug_span!("prog_clauses"); + + self.solve_from_clauses(&canonical_goal, minimums, should_continue) + }; + debug!(?prog_solution); + + prog_solution + } + + _ => { + let canonical_goal = UCanonical { + universes, + canonical: Canonical { + binders, + value: InEnvironment { environment, goal }, + }, + }; + + self.solve_via_simplification(&canonical_goal, minimums, should_continue) + } + } + } +} + +impl SolveIteration for S +where + S: SolveDatabase, + I: Interner, +{ +} + +/// Helper methods for `solve_iteration`, private to this module. +trait SolveIterationHelpers: SolveDatabase { + #[instrument(level = "debug", skip(self, minimums, should_continue))] + fn solve_via_simplification( + &mut self, + canonical_goal: &UCanonicalGoal, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + let (infer, subst, goal) = self.new_inference_table(canonical_goal); + match Fulfill::new_with_simplification(self, infer, subst, goal) { + Ok(fulfill) => fulfill.solve(minimums, should_continue), + Err(e) => Err(e), + } + } + + /// See whether we can solve a goal by implication on any of the given + /// clauses. If multiple such solutions are possible, we attempt to combine + /// them. + fn solve_from_clauses( + &mut self, + canonical_goal: &UCanonical>>, + minimums: &mut Minimums, + should_continue: impl std::ops::Fn() -> bool + Clone, + ) -> Fallible> { + let mut clauses = vec![]; + + let db = self.db(); + let could_match = |c: &ProgramClause| { + c.could_match( + db.interner(), + db.unification_database(), + &canonical_goal.canonical.value.goal, + ) + }; + clauses.extend(db.custom_clauses().into_iter().filter(could_match)); + match program_clauses_that_could_match(db, canonical_goal) { + Ok(goal_clauses) => clauses.extend(goal_clauses.into_iter().filter(could_match)), + Err(Floundered) => { + return Ok(Solution::Ambig(Guidance::Unknown)); + } + } + + let (infer, subst, goal) = self.new_inference_table(canonical_goal); + clauses.extend( + db.program_clauses_for_env(&goal.environment) + .iter(db.interner()) + .cloned() + .filter(could_match), + ); + + let mut cur_solution = None; + for program_clause in clauses { + debug_span!("solve_from_clauses", clause = ?program_clause); + + let ProgramClauseData(implication) = program_clause.data(self.interner()); + let infer = infer.clone(); + let subst = subst.clone(); + let goal = goal.clone(); + let res = match Fulfill::new_with_clause(self, infer, subst, goal, implication) { + Ok(fulfill) => ( + fulfill.solve(minimums, should_continue.clone()), + implication.skip_binders().priority, + ), + Err(e) => (Err(e), ClausePriority::High), + }; + + if let (Ok(solution), priority) = res { + debug!(?solution, ?priority, "Ok"); + cur_solution = Some(match cur_solution { + None => (solution, priority), + Some((cur, cur_priority)) => combine::with_priorities( + self.interner(), + &canonical_goal.canonical.value.goal, + cur, + cur_priority, + solution, + priority, + ), + }); + } else { + debug!("Error"); + } + + if let Some((cur_solution, _)) = &cur_solution { + if cur_solution.is_trivial_and_always_true(self.interner()) { + break; + } + } + } + + if let Some((s, _)) = cur_solution { + debug!("solve_from_clauses: result = {:?}", s); + Ok(s) + } else { + debug!("solve_from_clauses: error"); + Err(NoSolution) + } + } + + fn new_inference_table + HasInterner + Clone>( + &self, + ucanonical_goal: &UCanonical>, + ) -> (InferenceTable, Substitution, InEnvironment) { + let (infer, subst, canonical_goal) = InferenceTable::from_canonical( + self.interner(), + ucanonical_goal.universes, + ucanonical_goal.canonical.clone(), + ); + (infer, subst, canonical_goal) + } +} + +impl SolveIterationHelpers for S +where + S: SolveDatabase, + I: Interner, +{ +} diff --git a/chalk-solve/Cargo.toml b/chalk-solve/Cargo.toml index 8816b95f18a..91f7a85e7ac 100644 --- a/chalk-solve/Cargo.toml +++ b/chalk-solve/Cargo.toml @@ -1,28 +1,31 @@ [package] name = "chalk-solve" -version = "0.1.0" -license = "Apache-2.0/MIT" +version = "0.104.0-dev.0" description = "Combines the chalk-engine with chalk-ir" +license = "MIT OR Apache-2.0" authors = ["Rust Compiler Team", "Chalk developers"] -repository = "https://github.com/rust-lang-nursery/chalk" +repository = "https://github.com/rust-lang/chalk" readme = "README.md" keywords = ["compiler", "traits", "prolog"] +edition = "2018" [dependencies] -ena = "0.10.1" +ena = "0.14.0" +itertools = "0.12.0" +petgraph = "0.6.4" +tracing = "0.1" +tracing-subscriber = { version = "0.3", optional = true, features = ["env-filter"] } +tracing-tree = { version = "0.3", optional = true } +rustc-hash = { version = "1.1.0" } -[dependencies.chalk-macros] -version = "0.1.0" -path = "../chalk-macros" +chalk-derive = { version = "0.104.0-dev.0", path = "../chalk-derive" } +chalk-ir = { version = "0.104.0-dev.0", path = "../chalk-ir" } +indexmap = "2" -[dependencies.chalk-engine] -version = "0.9.0" -path = "../chalk-engine" +[dev-dependencies] +chalk-integration = { path = "../chalk-integration" } -[dependencies.chalk-parse] -version = "0.1.0" -path = "../chalk-parse" +[features] +default = ["tracing-full"] -[dependencies.chalk-ir] -version = "0.1.0" -path = "../chalk-ir" +tracing-full = ["tracing-subscriber", "tracing-tree"] diff --git a/chalk-solve/README.md b/chalk-solve/README.md new file mode 100644 index 00000000000..bed41761d21 --- /dev/null +++ b/chalk-solve/README.md @@ -0,0 +1,3 @@ +A library that defines the rules that translates Rust IR to logical predicates. + +See [Github](https://github.com/rust-lang/chalk) for up-to-date information. diff --git a/chalk-solve/src/clauses.rs b/chalk-solve/src/clauses.rs new file mode 100644 index 00000000000..329c6b984d2 --- /dev/null +++ b/chalk-solve/src/clauses.rs @@ -0,0 +1,1175 @@ +use self::builder::ClauseBuilder; +use self::env_elaborator::elaborate_env_clauses; +use self::program_clauses::ToProgramClauses; +use crate::goal_builder::GoalBuilder; +use crate::rust_ir::{Movability, WellKnownTrait}; +use crate::split::Split; +use crate::RustIrDatabase; +use chalk_ir::cast::{Cast, Caster}; +use chalk_ir::could_match::CouldMatch; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use rustc_hash::FxHashSet; +use std::iter; +use std::marker::PhantomData; +use tracing::{debug, instrument}; + +pub mod builder; +mod builtin_traits; +mod dyn_ty; +mod env_elaborator; +mod generalize; +pub mod program_clauses; +mod super_traits; + +// yields the types "contained" in `app_ty` +fn constituent_types(db: &dyn RustIrDatabase, ty: &TyKind) -> Vec> { + let interner = db.interner(); + + match ty { + // For non-phantom_data adts we collect its variants/fields + TyKind::Adt(adt_id, substitution) if !db.adt_datum(*adt_id).flags.phantom_data => { + let adt_datum = &db.adt_datum(*adt_id); + let adt_datum_bound = adt_datum.binders.clone().substitute(interner, substitution); + adt_datum_bound + .variants + .into_iter() + .flat_map(|variant| variant.fields.into_iter()) + .collect() + } + // And for `PhantomData`, we pass `T`. + TyKind::Adt(_, substitution) + | TyKind::Tuple(_, substitution) + | TyKind::FnDef(_, substitution) => substitution + .iter(interner) + .filter_map(|x| x.ty(interner)) + .cloned() + .collect(), + + TyKind::Array(ty, _) | TyKind::Slice(ty) | TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => { + vec![ty.clone()] + } + + TyKind::Str | TyKind::Never | TyKind::Scalar(_) => Vec::new(), + + TyKind::Coroutine(coroutine_id, substitution) => { + let coroutine_datum = &db.coroutine_datum(*coroutine_id); + let coroutine_datum_bound = coroutine_datum + .input_output + .clone() + .substitute(interner, &substitution); + + let mut tys = coroutine_datum_bound.upvars; + tys.push( + TyKind::CoroutineWitness(*coroutine_id, substitution.clone()).intern(interner), + ); + tys + } + + TyKind::Closure(_, _) => panic!("this function should not be called for closures"), + TyKind::CoroutineWitness(_, _) => { + panic!("this function should not be called for coroutine witnesses") + } + TyKind::Function(_) => panic!("this function should not be called for functions"), + TyKind::InferenceVar(_, _) | TyKind::BoundVar(_) => { + panic!("this function should not be called for inference or bound vars") + } + TyKind::Placeholder(_) => panic!("this function should not be called for placeholders"), + TyKind::Dyn(_) => panic!("this function should not be called for dyn types"), + TyKind::Alias(_) => panic!("this function should not be called for alias"), + TyKind::Foreign(_) => panic!("constituent_types of foreign types are unknown!"), + TyKind::Error => Vec::new(), + TyKind::OpaqueType(_, _) => panic!("constituent_types of opaque types are unknown!"), + TyKind::AssociatedType(_, _) => { + panic!("constituent_types of associated types are unknown!") + } + } +} + +/// FIXME(#505) update comments for ADTs +/// For auto-traits, we generate a default rule for every struct, +/// unless there is a manual impl for that struct given explicitly. +/// +/// So, if you have `impl Send for MyList`, then we would +/// generate no rule for `MyList` at all -- similarly if you have +/// `impl !Send for MyList`, or `impl Send for MyList`. +/// +/// But if you have no rules at all for `Send` / `MyList`, then we +/// generate an impl based on the field types of `MyList`. For example +/// given the following program: +/// +/// ```notrust +/// #[auto] trait Send { } +/// +/// struct MyList { +/// data: T, +/// next: Box>>, +/// } +/// +/// ``` +/// +/// we generate: +/// +/// ```notrust +/// forall { +/// Implemented(MyList: Send) :- +/// Implemented(T: Send), +/// Implemented(Box>>: Send). +/// } +/// ``` +#[instrument(level = "debug", skip(builder))] +pub fn push_auto_trait_impls( + builder: &mut ClauseBuilder<'_, I>, + auto_trait_id: TraitId, + ty: &TyKind, +) -> Result<(), Floundered> { + let interner = builder.interner(); + + // Must be an auto trait. + assert!(builder.db.trait_datum(auto_trait_id).is_auto_trait()); + + // Auto traits never have generic parameters of their own (apart from `Self`). + assert_eq!( + builder.db.trait_datum(auto_trait_id).binders.len(interner), + 1 + ); + + // If there is a `impl AutoTrait for Foo<..>` or `impl !AutoTrait + // for Foo<..>`, where `Foo` is the adt we're looking at, then + // we don't generate our own rules. + if builder.db.impl_provided_for(auto_trait_id, ty) { + debug!("impl provided"); + return Ok(()); + } + + let mk_ref = |ty: Ty| TraitRef { + trait_id: auto_trait_id, + substitution: Substitution::from1(interner, ty.cast(interner)), + }; + + let consequence = mk_ref(ty.clone().intern(interner)); + + match ty { + // function-types implement auto traits unconditionally + TyKind::Function(_) => { + builder.push_fact(consequence); + Ok(()) + } + TyKind::InferenceVar(_, _) | TyKind::BoundVar(_) => Err(Floundered), + + // auto traits are not implemented for foreign types + TyKind::Foreign(_) => Ok(()), + + // closures require binders, while the other types do not + TyKind::Closure(closure_id, substs) => { + let closure_fn_substitution = builder.db.closure_fn_substitution(*closure_id, substs); + let binders = builder.db.closure_upvars(*closure_id, substs); + let upvars = binders.substitute(builder.db.interner(), &closure_fn_substitution); + + // in a same behavior as for non-auto traits (reuse the code) we can require that + // every bound type must implement this auto-trait + use crate::clauses::builtin_traits::needs_impl_for_tys; + needs_impl_for_tys(builder.db, builder, consequence, Some(upvars).into_iter()); + + Ok(()) + } + TyKind::Coroutine(coroutine_id, _) => { + if Some(auto_trait_id) == builder.db.well_known_trait_id(WellKnownTrait::Unpin) { + match builder.db.coroutine_datum(*coroutine_id).movability { + // immovable coroutines are never `Unpin` + Movability::Static => (), + // movable coroutines are always `Unpin` + Movability::Movable => builder.push_fact(consequence), + } + } else { + // if trait is not `Unpin`, use regular auto trait clause + let conditions = constituent_types(builder.db, ty).into_iter().map(mk_ref); + builder.push_clause(consequence, conditions); + } + Ok(()) + } + + TyKind::CoroutineWitness(coroutine_id, _) => { + push_auto_trait_impls_coroutine_witness(builder, auto_trait_id, *coroutine_id); + Ok(()) + } + + TyKind::OpaqueType(opaque_ty_id, _) => { + push_auto_trait_impls_opaque(builder, auto_trait_id, *opaque_ty_id); + Ok(()) + } + + // No auto traits + TyKind::AssociatedType(_, _) + | TyKind::Placeholder(_) + | TyKind::Dyn(_) + | TyKind::Alias(_) => Ok(()), + + // app_ty implements AutoTrait if all constituents of app_ty implement AutoTrait + _ => { + let conditions = constituent_types(builder.db, ty).into_iter().map(mk_ref); + + builder.push_clause(consequence, conditions); + Ok(()) + } + } +} + +/// Leak auto traits for opaque types, just like `push_auto_trait_impls` does for structs. +/// +/// For example, given the following program: +/// +/// ```notrust +/// #[auto] trait Send { } +/// trait Trait { } +/// struct Bar { } +/// opaque type Foo: Trait = Bar +/// ``` +/// Checking the goal `Foo: Send` would generate the following: +/// +/// ```notrust +/// Foo: Send :- Bar: Send +/// ``` +#[instrument(level = "debug", skip(builder))] +pub fn push_auto_trait_impls_opaque( + builder: &mut ClauseBuilder<'_, I>, + auto_trait_id: TraitId, + opaque_id: OpaqueTyId, +) { + let opaque_ty_datum = &builder.db.opaque_ty_data(opaque_id); + let interner = builder.interner(); + + // Must be an auto trait. + assert!(builder.db.trait_datum(auto_trait_id).is_auto_trait()); + + // Auto traits never have generic parameters of their own (apart from `Self`). + assert_eq!( + builder.db.trait_datum(auto_trait_id).binders.len(interner), + 1 + ); + + let hidden_ty = builder.db.hidden_opaque_type(opaque_id); + let binders = opaque_ty_datum.bound.clone(); + builder.push_binders(binders, |builder, _| { + let self_ty = + TyKind::OpaqueType(opaque_id, builder.substitution_in_scope()).intern(interner); + + // trait_ref = `OpaqueType<...>: MyAutoTrait` + let auto_trait_ref = TraitRef { + trait_id: auto_trait_id, + substitution: Substitution::from1(interner, self_ty), + }; + + // OpaqueType<...>: MyAutoTrait :- HiddenType: MyAutoTrait + builder.push_clause( + auto_trait_ref, + std::iter::once(TraitRef { + trait_id: auto_trait_id, + substitution: Substitution::from1(interner, hidden_ty.clone()), + }), + ); + }); +} + +#[instrument(level = "debug", skip(builder))] +pub fn push_auto_trait_impls_coroutine_witness( + builder: &mut ClauseBuilder<'_, I>, + auto_trait_id: TraitId, + coroutine_id: CoroutineId, +) { + let witness_datum = builder.db.coroutine_witness_datum(coroutine_id); + let interner = builder.interner(); + + // Must be an auto trait. + assert!(builder.db.trait_datum(auto_trait_id).is_auto_trait()); + + // Auto traits never have generic parameters of their own (apart from `Self`). + assert_eq!( + builder.db.trait_datum(auto_trait_id).binders.len(interner), + 1 + ); + + // Push binders for the coroutine generic parameters. These can be used by + // both upvars and witness types + builder.push_binders(witness_datum.inner_types.clone(), |builder, inner_types| { + let witness_ty = TyKind::CoroutineWitness(coroutine_id, builder.substitution_in_scope()) + .intern(interner); + + // trait_ref = `CoroutineWitness<...>: MyAutoTrait` + let auto_trait_ref = TraitRef { + trait_id: auto_trait_id, + substitution: Substitution::from1(interner, witness_ty), + }; + + // Create a goal of the form: + // forall { + // WitnessType1: MyAutoTrait, + // ... + // WitnessTypeN: MyAutoTrait, + // + // } + // + // where `L0, L1, ...LN` are our existentially bound witness lifetimes, + // and `P0, P1, ..., PN` are the normal coroutine generics. + // + // We create a 'forall' goal due to the fact that our witness lifetimes + // are *existentially* quantified - the precise reigon is erased during + // type checking, so we just know that the type takes *some* region + // as a parameter. Therefore, we require that the auto trait bound + // hold for *all* regions, which guarantees that the bound will + // hold for the original lifetime (before it was erased). + // + // This does not take into account well-formed information from + // the witness types. For example, if we have the type + // `struct Foo<'a, 'b> { val: &'a &'b u8 }` + // then `'b: 'a` must hold for `Foo<'a, 'b>` to be well-formed. + // If we have `Foo<'a, 'b>` stored as a witness type, we will + // not currently use this information to determine a more precise + // relationship between 'a and 'b. In the future, we will likely + // do this to avoid incorrectly rejecting correct code. + let gb = &mut GoalBuilder::new(builder.db); + let witness_goal = gb.forall( + &inner_types.types, + auto_trait_id, + |gb, _subst, types, auto_trait_id| { + Goal::new( + gb.interner(), + GoalData::All(Goals::from_iter( + gb.interner(), + types.iter().map(|witness_ty| TraitRef { + trait_id: auto_trait_id, + substitution: Substitution::from1(gb.interner(), witness_ty.clone()), + }), + )), + ) + }, + ); + + // CoroutineWitnessType: AutoTrait :- forall<...> ... + // where 'forall<...> ...' is the goal described above. + builder.push_clause(auto_trait_ref, std::iter::once(witness_goal)); + }) +} + +/// Given some goal `goal` that must be proven, along with +/// its `environment`, figures out the program clauses that apply +/// to this goal from the Rust program. So for example if the goal +/// is `Implemented(T: Clone)`, then this function might return clauses +/// derived from the trait `Clone` and its impls. +#[instrument(level = "debug", skip(db))] +pub fn program_clauses_for_goal<'db, I: Interner>( + db: &'db dyn RustIrDatabase, + goal: &UCanonical>>, +) -> Result>, Floundered> { + let interner = db.interner(); + + let custom_clauses = db.custom_clauses().into_iter(); + let clauses_that_could_match = + program_clauses_that_could_match(db, goal).map(|cl| cl.into_iter())?; + + let clauses: Vec> = custom_clauses + .chain(clauses_that_could_match) + .chain( + db.program_clauses_for_env(&goal.canonical.value.environment) + .iter(interner) + .cloned(), + ) + .filter(|c| { + c.could_match( + interner, + db.unification_database(), + &goal.canonical.value.goal, + ) + }) + .collect(); + + debug!(?clauses); + + Ok(clauses) +} + +/// Returns a set of program clauses that could possibly match +/// `goal`. This can be any superset of the correct set, but the +/// more precise you can make it, the more efficient solving will +/// be. +#[instrument(level = "debug", skip(db))] +pub fn program_clauses_that_could_match( + db: &dyn RustIrDatabase, + goal: &UCanonical>>, +) -> Result>, Floundered> { + let interner = db.interner(); + let mut clauses: Vec> = vec![]; + let builder = &mut ClauseBuilder::new(db, &mut clauses); + + let UCanonical { + canonical: + Canonical { + value: InEnvironment { environment, goal }, + binders, + }, + universes: _, + } = goal; + + match goal { + DomainGoal::Holds(WhereClause::Implemented(trait_ref)) => { + let self_ty = trait_ref.self_type_parameter(interner); + + let trait_id = trait_ref.trait_id; + let trait_datum = db.trait_datum(trait_id); + + match self_ty.kind(interner) { + TyKind::InferenceVar(_, _) => { + panic!("Inference vars not allowed when getting program clauses") + } + TyKind::Alias(alias) => { + // An alias could normalize to anything, including `dyn trait` + // or an opaque type, so push a clause that asks for the + // self type to be normalized and return. + push_alias_implemented_clause(builder, trait_ref.clone(), alias.clone()); + return Ok(clauses); + } + + _ if self_ty.is_general_var(interner, binders) => { + if trait_datum.is_non_enumerable_trait() || trait_datum.is_auto_trait() { + return Err(Floundered); + } + } + + TyKind::OpaqueType(opaque_ty_id, _) => { + db.opaque_ty_data(*opaque_ty_id) + .to_program_clauses(builder, environment); + } + + TyKind::AssociatedType(assoc_ty_id, _) => { + db.associated_ty_data(*assoc_ty_id) + .to_program_clauses(builder, environment); + } + + TyKind::Dyn(_) => { + // If the self type is a `dyn trait` type, generate program-clauses + // that indicates that it implements its own traits. + // FIXME: This is presently rather wasteful, in that we don't check that the + // these program clauses we are generating are actually relevant to the goal + // `goal` that we are actually *trying* to prove (though there is some later + // code that will screen out irrelevant stuff). + // + // In other words, if we were trying to prove `Implemented(dyn + // Fn(&u8): Clone)`, we would still generate two clauses that are + // totally irrelevant to that goal, because they let us prove other + // things but not `Clone`. + dyn_ty::build_dyn_self_ty_clauses(db, builder, self_ty.clone()) + } + + // We don't actually do anything here, but we need to record the types when logging + TyKind::Adt(adt_id, _) => { + let _ = db.adt_datum(*adt_id); + } + + TyKind::FnDef(fn_def_id, _) => { + let _ = db.fn_def_datum(*fn_def_id); + } + + _ => {} + } + + // This is needed for the coherence related impls, as well + // as for the `Implemented(Foo) :- FromEnv(Foo)` rule. + trait_datum.to_program_clauses(builder, environment); + + for impl_id in db.impls_for_trait( + trait_ref.trait_id, + trait_ref.substitution.as_slice(interner), + binders, + ) { + db.impl_datum(impl_id) + .to_program_clauses(builder, environment); + } + + // If this is a `Foo: Send` (or any auto-trait), then add + // the automatic impls for `Foo`. + let trait_datum = db.trait_datum(trait_id); + if trait_datum.is_auto_trait() { + let generalized = generalize::Generalize::apply(db.interner(), trait_ref.clone()); + builder.push_binders(generalized, |builder, trait_ref| { + let ty = trait_ref.self_type_parameter(interner); + push_auto_trait_impls(builder, trait_id, ty.kind(interner)) + })?; + } + + if let Some(well_known) = trait_datum.well_known { + builtin_traits::add_builtin_program_clauses( + db, + builder, + well_known, + trait_ref.clone(), + binders, + )?; + } + } + DomainGoal::Holds(WhereClause::AliasEq(alias_eq)) => match &alias_eq.alias { + AliasTy::Projection(proj) => { + let trait_self_ty = db + .trait_ref_from_projection(proj) + .self_type_parameter(interner); + + match trait_self_ty.kind(interner) { + TyKind::Alias(alias) => { + // An alias could normalize to anything, including an + // opaque type, so push a clause that asks for the self + // type to be normalized and return. + push_alias_alias_eq_clause(builder, proj.clone(), alias.clone()); + return Ok(clauses); + } + TyKind::OpaqueType(opaque_ty_id, _) => { + db.opaque_ty_data(*opaque_ty_id) + .to_program_clauses(builder, environment); + } + TyKind::AssociatedType(assoc_ty_id, _) => { + db.associated_ty_data(*assoc_ty_id) + .to_program_clauses(builder, environment); + } + // If the self type is a `dyn trait` type, generate program-clauses + // for any associated type bindings it contains. + // FIXME: see the fixme for the analogous code for Implemented goals. + TyKind::Dyn(_) => { + dyn_ty::build_dyn_self_ty_clauses(db, builder, trait_self_ty.clone()) + } + _ => {} + } + + db.associated_ty_data(proj.associated_ty_id) + .to_program_clauses(builder, environment) + } + AliasTy::Opaque(opaque_ty) => db + .opaque_ty_data(opaque_ty.opaque_ty_id) + .to_program_clauses(builder, environment), + }, + DomainGoal::Holds(WhereClause::LifetimeOutlives(..)) => { + builder.push_bound_lifetime(|builder, a| { + builder.push_bound_lifetime(|builder, b| { + builder.push_fact_with_constraints( + DomainGoal::Holds(WhereClause::LifetimeOutlives(LifetimeOutlives { + a: a.clone(), + b: b.clone(), + })), + Some(InEnvironment::new( + &Environment::new(interner), + Constraint::LifetimeOutlives(a, b), + )), + ); + }) + }); + } + DomainGoal::Holds(WhereClause::TypeOutlives(..)) => { + builder.push_bound_ty(|builder, ty| { + builder.push_bound_lifetime(|builder, lifetime| { + builder.push_fact_with_constraints( + DomainGoal::Holds(WhereClause::TypeOutlives(TypeOutlives { + ty: ty.clone(), + lifetime: lifetime.clone(), + })), + Some(InEnvironment::new( + &Environment::new(interner), + Constraint::TypeOutlives(ty, lifetime), + )), + ) + }) + }); + } + DomainGoal::WellFormed(WellFormed::Trait(trait_ref)) + | DomainGoal::LocalImplAllowed(trait_ref) => { + db.trait_datum(trait_ref.trait_id) + .to_program_clauses(builder, environment); + } + DomainGoal::ObjectSafe(trait_id) => { + if builder.db.is_object_safe(*trait_id) { + builder.push_fact(DomainGoal::ObjectSafe(*trait_id)); + } + } + DomainGoal::WellFormed(WellFormed::Ty(ty)) + | DomainGoal::IsUpstream(ty) + | DomainGoal::DownstreamType(ty) + | DomainGoal::IsFullyVisible(ty) + | DomainGoal::IsLocal(ty) => match_ty(builder, environment, ty)?, + DomainGoal::FromEnv(_) => (), // Computed in the environment + DomainGoal::Normalize(Normalize { alias, ty: _ }) => match alias { + AliasTy::Projection(proj) => { + // Normalize goals derive from `AssociatedTyValue` datums, + // which are found in impls. That is, if we are + // normalizing (e.g.) `::Item>`, then + // search for impls of iterator and, within those impls, + // for associated type values: + // + // ```ignore + // impl Iterator for Foo { + // type Item = Bar; // <-- associated type value + // } + // ``` + let associated_ty_datum = db.associated_ty_data(proj.associated_ty_id); + let trait_id = associated_ty_datum.trait_id; + let trait_ref = db.trait_ref_from_projection(proj); + let trait_parameters = trait_ref.substitution.as_parameters(interner); + + let trait_datum = db.trait_datum(trait_id); + + let self_ty = trait_ref.self_type_parameter(interner); + if let TyKind::InferenceVar(_, _) = self_ty.kind(interner) { + panic!("Inference vars not allowed when getting program clauses"); + } + + // Flounder if the self-type is unknown and the trait is non-enumerable. + // + // e.g., Normalize(::Item = u32) + if (self_ty.is_general_var(interner, binders)) + && trait_datum.is_non_enumerable_trait() + { + return Err(Floundered); + } + + if let Some(well_known) = trait_datum.well_known { + builtin_traits::add_builtin_assoc_program_clauses( + db, + builder, + well_known, + self_ty.clone(), + )?; + } + + push_program_clauses_for_associated_type_values_in_impls_of( + builder, + environment, + trait_id, + proj.associated_ty_id, + trait_parameters, + binders, + ); + + if environment.has_compatible_clause(interner) { + push_clauses_for_compatible_normalize( + db, + builder, + interner, + trait_id, + proj.associated_ty_id, + ); + } + + // When `self_ty` is dyn type or opaque type, there may be associated type bounds + // for which we generate `Normalize` clauses. + match self_ty.kind(interner) { + // FIXME: see the fixme for the analogous code for Implemented goals. + TyKind::Dyn(_) => dyn_ty::build_dyn_self_ty_clauses(db, builder, self_ty), + TyKind::OpaqueType(id, _) => { + db.opaque_ty_data(*id) + .to_program_clauses(builder, environment); + } + _ => {} + } + } + AliasTy::Opaque(_) => (), + }, + DomainGoal::Compatible | DomainGoal::Reveal => (), + }; + + Ok(clauses) +} + +/// Adds clauses to allow normalizing possible downstream associated type +/// implementations when in the "compatible" mode. Example clauses: +/// +/// ```notrust +/// for Normalize(<^0.0 as Trait<^0.1>>::Item -> ^0.2) +/// :- Compatible, Implemented(^0.0: Trait<^0.1>), DownstreamType(^0.1), CannotProve +/// for Normalize(<^0.0 as Trait<^0.1>>::Item -> ^0.2) +/// :- Compatible, Implemented(^0.0: Trait<^0.1>), IsFullyVisible(^0.0), DownstreamType(^0.1), CannotProve +/// ``` +fn push_clauses_for_compatible_normalize( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + interner: I, + trait_id: TraitId, + associated_ty_id: AssocTypeId, +) { + let trait_datum = db.trait_datum(trait_id); + let trait_binders = trait_datum.binders.map_ref(|b| &b.where_clauses).cloned(); + builder.push_binders(trait_binders, |builder, where_clauses| { + let projection = ProjectionTy { + associated_ty_id, + substitution: builder.substitution_in_scope(), + }; + let trait_ref = TraitRef { + trait_id, + substitution: builder.substitution_in_scope(), + }; + let type_parameters: Vec<_> = trait_ref.type_parameters(interner).collect(); + + builder.push_bound_ty(|builder, target_ty| { + for i in 0..type_parameters.len() { + builder.push_clause( + DomainGoal::Normalize(Normalize { + ty: target_ty.clone(), + alias: AliasTy::Projection(projection.clone()), + }), + where_clauses + .iter() + .cloned() + .casted(interner) + .chain(iter::once(DomainGoal::Compatible.cast(interner))) + .chain(iter::once( + WhereClause::Implemented(trait_ref.clone()).cast(interner), + )) + .chain((0..i).map(|j| { + DomainGoal::IsFullyVisible(type_parameters[j].clone()).cast(interner) + })) + .chain(iter::once( + DomainGoal::DownstreamType(type_parameters[i].clone()).cast(interner), + )) + .chain(iter::once(GoalData::CannotProve.intern(interner))), + ); + } + }); + }); +} + +/// Generate program clauses from the associated-type values +/// found in impls of the given trait. i.e., if `trait_id` = Iterator, +/// then we would generate program clauses from each `type Item = ...` +/// found in any impls of `Iterator`: +/// which are found in impls. That is, if we are +/// normalizing (e.g.) `::Item>`, then +/// search for impls of iterator and, within those impls, +/// for associated type values: +/// +/// ```ignore +/// impl Iterator for Foo { +/// type Item = Bar; // <-- associated type value +/// } +/// ``` +#[instrument(level = "debug", skip(builder))] +fn push_program_clauses_for_associated_type_values_in_impls_of( + builder: &mut ClauseBuilder<'_, I>, + environment: &Environment, + trait_id: TraitId, + assoc_id: AssocTypeId, + trait_parameters: &[GenericArg], + binders: &CanonicalVarKinds, +) { + for impl_id in builder + .db + .impls_for_trait(trait_id, trait_parameters, binders) + { + let impl_datum = builder.db.impl_datum(impl_id); + if !impl_datum.is_positive() { + continue; + } + + debug!(?impl_id); + + if let Some(atv_id) = builder.db.associated_ty_from_impl(impl_id, assoc_id) { + let atv = builder.db.associated_ty_value(atv_id); + debug!(?atv_id, ?atv); + atv.to_program_clauses(builder, environment); + } + } +} + +fn push_alias_implemented_clause( + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + alias: AliasTy, +) { + let interner = builder.interner(); + assert_eq!( + *trait_ref.self_type_parameter(interner).kind(interner), + TyKind::Alias(alias.clone()) + ); + + // TODO: instead generate clauses without reference to the specific type parameters of the goal? + let generalized = generalize::Generalize::apply(interner, (trait_ref, alias)); + builder.push_binders(generalized, |builder, (trait_ref, alias)| { + // forall<..., T> { + // ::Z: Trait :- T: Trait, ::Z == T + // } + builder.push_bound_ty(|builder, bound_var| { + let fresh_self_subst = Substitution::from_iter( + interner, + std::iter::once(bound_var.clone().cast(interner)).chain( + trait_ref.substitution.as_slice(interner)[1..] + .iter() + .cloned(), + ), + ); + let fresh_self_trait_ref = TraitRef { + trait_id: trait_ref.trait_id, + substitution: fresh_self_subst, + }; + builder.push_clause( + DomainGoal::Holds(WhereClause::Implemented(trait_ref.clone())), + &[ + DomainGoal::Holds(WhereClause::Implemented(fresh_self_trait_ref)), + DomainGoal::Holds(WhereClause::AliasEq(AliasEq { + alias: alias.clone(), + ty: bound_var, + })), + ], + ); + }); + }); +} + +fn push_alias_alias_eq_clause( + builder: &mut ClauseBuilder<'_, I>, + projection_ty: ProjectionTy, + alias: AliasTy, +) { + let interner = builder.interner(); + let self_ty = builder + .db + .trait_ref_from_projection(&projection_ty) + .self_type_parameter(interner); + assert_eq!(*self_ty.kind(interner), TyKind::Alias(alias.clone())); + + // TODO: instead generate clauses without reference to the specific type parameters of the goal? + let generalized = generalize::Generalize::apply(interner, (projection_ty, alias)); + builder.push_binders(generalized, |builder, (projection_ty, alias)| { + // Given the following canonical goal: + // + // ``` + // forall<...> { + // <::A as Z>::B == W + // } + // ``` + // + // we generate: + // + // ``` + // forall<..., T, U> { + // <::A as Z>::B == U :- ::B == U, ::A == T + // } + // ``` + // + // `T` and `U` are `intermediate_eq_ty` and `eq_ty` respectively below. + // + // Note that we used to "reuse" `W` and push: + // + // ``` + // forall<..., T> { + // <::A as Z>::B == W :- ::B == W, ::A == T + // } + // ``` + // + // but it caused a cycle which led to false `NoSolution` under certain conditions, in + // particular when `W` itself is a nested projection type. See test + // `nested_proj_eq_nested_proj_should_flounder`. + builder.push_bound_ty(|builder, intermediate_eq_ty| { + builder.push_bound_ty(|builder, eq_ty| { + let (_, trait_args, assoc_args) = builder.db.split_projection(&projection_ty); + let fresh_self_subst = Substitution::from_iter( + interner, + std::iter::once(intermediate_eq_ty.clone().cast(interner)) + .chain(trait_args[1..].iter().cloned()) + .chain(assoc_args.iter().cloned()), + ); + let fresh_alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: projection_ty.associated_ty_id, + substitution: fresh_self_subst, + }); + builder.push_clause( + DomainGoal::Holds(WhereClause::AliasEq(AliasEq { + alias: AliasTy::Projection(projection_ty.clone()), + ty: eq_ty.clone(), + })), + &[ + DomainGoal::Holds(WhereClause::AliasEq(AliasEq { + alias: fresh_alias, + ty: eq_ty, + })), + DomainGoal::Holds(WhereClause::AliasEq(AliasEq { + alias, + ty: intermediate_eq_ty, + })), + ], + ); + }); + }); + }); +} + +/// Examine `T` and push clauses that may be relevant to proving the +/// following sorts of goals (and maybe others): +/// +/// * `DomainGoal::WellFormed(T)` +/// * `DomainGoal::IsUpstream(T)` +/// * `DomainGoal::DownstreamType(T)` +/// * `DomainGoal::IsFullyVisible(T)` +/// * `DomainGoal::IsLocal(T)` +/// +/// Note that the type `T` must not be an unbound inference variable; +/// earlier parts of the logic should "flounder" in that case. +fn match_ty( + builder: &mut ClauseBuilder<'_, I>, + environment: &Environment, + ty: &Ty, +) -> Result<(), Floundered> { + let interner = builder.interner(); + match ty.kind(interner) { + TyKind::InferenceVar(_, _) => { + panic!("Inference vars not allowed when getting program clauses") + } + TyKind::Adt(adt_id, _) => builder + .db + .adt_datum(*adt_id) + .to_program_clauses(builder, environment), + TyKind::OpaqueType(opaque_ty_id, _) => builder + .db + .opaque_ty_data(*opaque_ty_id) + .to_program_clauses(builder, environment), + TyKind::Error => {} + TyKind::AssociatedType(type_id, _) => builder + .db + .associated_ty_data(*type_id) + .to_program_clauses(builder, environment), + TyKind::FnDef(fn_def_id, _) => builder + .db + .fn_def_datum(*fn_def_id) + .to_program_clauses(builder, environment), + TyKind::Str + | TyKind::Never + | TyKind::Scalar(_) + | TyKind::Foreign(_) + | TyKind::Tuple(0, _) => { + // These have no substitutions, so they are trivially WF + builder.push_fact(WellFormed::Ty(ty.clone())); + } + TyKind::Raw(mutbl, _) => { + // forall WF(*const T) :- WF(T); + builder.push_bound_ty(|builder, ty| { + builder.push_clause( + WellFormed::Ty(TyKind::Raw(*mutbl, ty.clone()).intern(builder.interner())), + Some(WellFormed::Ty(ty)), + ); + }); + } + TyKind::Ref(mutbl, _, _) => { + // forall<'a, T> WF(&'a T) :- WF(T), T: 'a + builder.push_bound_ty(|builder, ty| { + builder.push_bound_lifetime(|builder, lifetime| { + let ref_ty = TyKind::Ref(*mutbl, lifetime.clone(), ty.clone()) + .intern(builder.interner()); + builder.push_clause( + WellFormed::Ty(ref_ty), + [ + DomainGoal::WellFormed(WellFormed::Ty(ty.clone())), + DomainGoal::Holds(WhereClause::TypeOutlives(TypeOutlives { + ty, + lifetime, + })), + ], + ); + }) + }); + } + TyKind::Slice(_) => { + // forall WF([T]) :- T: Sized, WF(T) + builder.push_bound_ty(|builder, ty| { + let sized = builder.db.well_known_trait_id(WellKnownTrait::Sized); + builder.push_clause( + WellFormed::Ty(TyKind::Slice(ty.clone()).intern(builder.interner())), + sized + .map(|id| { + DomainGoal::Holds(WhereClause::Implemented(TraitRef { + trait_id: id, + substitution: Substitution::from1(interner, ty.clone()), + })) + }) + .into_iter() + .chain(Some(DomainGoal::WellFormed(WellFormed::Ty(ty)))), + ); + }); + } + TyKind::Array(..) => { + // forall WF([T, N]) :- T: Sized + let interner = builder.interner(); + let binders = Binders::new( + VariableKinds::from_iter( + interner, + [ + VariableKind::Ty(TyVariableKind::General), + VariableKind::Const( + TyKind::Scalar(Scalar::Uint(UintTy::Usize)).intern(interner), + ), + ], + ), + PhantomData::, + ); + builder.push_binders(binders, |builder, PhantomData| { + let placeholders_in_scope = builder.placeholders_in_scope(); + let placeholder_count = placeholders_in_scope.len(); + let ty = placeholders_in_scope[placeholder_count - 2] + .assert_ty_ref(interner) + .clone(); + let size = placeholders_in_scope[placeholder_count - 1] + .assert_const_ref(interner) + .clone(); + + let sized = builder.db.well_known_trait_id(WellKnownTrait::Sized); + let array_ty = TyKind::Array(ty.clone(), size).intern(interner); + builder.push_clause( + WellFormed::Ty(array_ty), + sized + .map(|id| { + DomainGoal::Holds(WhereClause::Implemented(TraitRef { + trait_id: id, + substitution: Substitution::from1(interner, ty.clone()), + })) + }) + .into_iter() + .chain(Some(DomainGoal::WellFormed(WellFormed::Ty(ty)))), + ); + }); + } + TyKind::Tuple(len, _) => { + // WF((T0, ..., Tn, U)) :- T0: Sized, ..., Tn: Sized, WF(T0), ..., WF(Tn), WF(U) + let interner = builder.interner(); + let binders = Binders::new( + VariableKinds::from_iter( + interner, + iter::repeat_with(|| VariableKind::Ty(TyVariableKind::General)).take(*len), + ), + PhantomData::, + ); + builder.push_binders(binders, |builder, PhantomData| { + let placeholders_in_scope = builder.placeholders_in_scope(); + + let substs = Substitution::from_iter( + builder.interner(), + &placeholders_in_scope[placeholders_in_scope.len() - len..], + ); + + let tuple_ty = TyKind::Tuple(*len, substs.clone()).intern(interner); + let sized = builder.db.well_known_trait_id(WellKnownTrait::Sized); + builder.push_clause( + WellFormed::Ty(tuple_ty), + substs.as_slice(interner)[..*len - 1] + .iter() + .filter_map(|s| { + let ty_var = s.assert_ty_ref(interner).clone(); + sized.map(|id| { + DomainGoal::Holds(WhereClause::Implemented(TraitRef { + trait_id: id, + substitution: Substitution::from1(interner, ty_var), + })) + }) + }) + .chain(substs.iter(interner).map(|subst| { + DomainGoal::WellFormed(WellFormed::Ty( + subst.assert_ty_ref(interner).clone(), + )) + })), + ); + }); + } + TyKind::Closure(_, _) | TyKind::Coroutine(_, _) | TyKind::CoroutineWitness(_, _) => { + let ty = generalize::Generalize::apply(builder.db.interner(), ty.clone()); + builder.push_binders(ty, |builder, ty| { + builder.push_fact(WellFormed::Ty(ty)); + }); + } + TyKind::Placeholder(_) => { + builder.push_fact(WellFormed::Ty(ty.clone())); + } + TyKind::Alias(AliasTy::Projection(proj)) => builder + .db + .associated_ty_data(proj.associated_ty_id) + .to_program_clauses(builder, environment), + TyKind::Alias(AliasTy::Opaque(opaque_ty)) => builder + .db + .opaque_ty_data(opaque_ty.opaque_ty_id) + .to_program_clauses(builder, environment), + TyKind::Function(_quantified_ty) => { + let ty = generalize::Generalize::apply(builder.db.interner(), ty.clone()); + builder.push_binders(ty, |builder, ty| builder.push_fact(WellFormed::Ty(ty))); + } + TyKind::BoundVar(_) => return Err(Floundered), + TyKind::Dyn(dyn_ty) => { + // FIXME(#203) + // - Object safety? (not needed with RFC 2027) + // - Implied bounds + // - Bounds on the associated types + // - Checking that all associated types are specified, including + // those on supertraits. + // - For trait objects with GATs, if we allow them in the future, + // check that the bounds are fully general ( + // `dyn for<'a> StreamingIterator = &'a ()>` is OK, + // `dyn StreamingIterator = &'static ()>` is not). + let generalized_ty = + generalize::Generalize::apply(builder.db.interner(), dyn_ty.clone()); + builder.push_binders(generalized_ty, |builder, dyn_ty| { + let bounds = dyn_ty + .bounds + .substitute(interner, &[ty.clone().cast::>(interner)]); + + let mut wf_goals = Vec::new(); + + wf_goals.extend(bounds.iter(interner).flat_map(|bound| { + bound.map_ref(|bound| -> Vec<_> { + match bound { + WhereClause::Implemented(trait_ref) => { + vec![DomainGoal::WellFormed(WellFormed::Trait(trait_ref.clone()))] + } + WhereClause::AliasEq(_) + | WhereClause::LifetimeOutlives(_) + | WhereClause::TypeOutlives(_) => vec![], + } + }) + })); + + builder.push_clause(WellFormed::Ty(ty.clone()), wf_goals); + }); + } + } + Ok(()) +} + +fn match_alias_ty( + builder: &mut ClauseBuilder<'_, I>, + environment: &Environment, + alias: &AliasTy, +) { + if let AliasTy::Projection(projection_ty) = alias { + builder + .db + .associated_ty_data(projection_ty.associated_ty_id) + .to_program_clauses(builder, environment) + } +} + +#[instrument(level = "debug", skip(db))] +pub fn program_clauses_for_env<'db, I: Interner>( + db: &'db dyn RustIrDatabase, + environment: &Environment, +) -> ProgramClauses { + let mut last_round = environment + .clauses + .as_slice(db.interner()) + .iter() + .cloned() + .collect::>(); + let mut closure = last_round.clone(); + let mut next_round = FxHashSet::default(); + while !last_round.is_empty() { + elaborate_env_clauses( + db, + &last_round.drain().collect::>(), + &mut next_round, + environment, + ); + last_round.extend( + next_round + .drain() + .filter(|clause| closure.insert(clause.clone())), + ); + } + + ProgramClauses::from_iter(db.interner(), closure) +} diff --git a/chalk-solve/src/clauses/builder.rs b/chalk-solve/src/clauses/builder.rs new file mode 100644 index 00000000000..bbe7c2fd217 --- /dev/null +++ b/chalk-solve/src/clauses/builder.rs @@ -0,0 +1,207 @@ +use std::marker::PhantomData; + +use crate::cast::{Cast, CastTo}; +use crate::RustIrDatabase; +use chalk_ir::fold::{Shift, TypeFoldable}; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::*; +use tracing::{debug, instrument}; + +/// The "clause builder" is a useful tool for building up sets of +/// program clauses. It takes ownership of the output vector while it +/// lasts, and offers methods like `push_clause` and so forth to +/// append to it. +pub struct ClauseBuilder<'me, I: Interner> { + pub db: &'me dyn RustIrDatabase, + clauses: &'me mut Vec>, + binders: Vec>, + parameters: Vec>, +} + +impl<'me, I: Interner> ClauseBuilder<'me, I> { + pub fn new(db: &'me dyn RustIrDatabase, clauses: &'me mut Vec>) -> Self { + Self { + db, + clauses, + binders: vec![], + parameters: vec![], + } + } + + /// Pushes a "fact" `forall<..> { consequence }` into the set of + /// program clauses, meaning something that we can assume to be + /// true unconditionally. The `forall<..>` binders will be + /// whichever binders have been pushed (see `push_binders`). + pub fn push_fact(&mut self, consequence: impl CastTo>) { + self.push_clause(consequence, None::>); + } + + /// Pushes a "fact" `forall<..> { consequence }` into the set of + /// program clauses, meaning something that we can assume to be + /// true unconditionally. The `forall<..>` binders will be + /// whichever binders have been pushed (see `push_binders`). + pub fn push_fact_with_priority( + &mut self, + consequence: impl CastTo>, + constraints: impl IntoIterator>>, + priority: ClausePriority, + ) { + self.push_clause_with_priority(consequence, None::>, constraints, priority); + } + + /// Pushes a clause `forall<..> { consequence :- conditions }` + /// into the set of program clauses, meaning that `consequence` + /// can be proven if `conditions` are all true. The `forall<..>` + /// binders will be whichever binders have been pushed (see `push_binders`). + pub fn push_clause( + &mut self, + consequence: impl CastTo>, + conditions: impl IntoIterator>>, + ) { + self.push_clause_with_priority(consequence, conditions, None, ClausePriority::High) + } + + pub fn push_fact_with_constraints( + &mut self, + consequence: impl CastTo>, + constraints: impl IntoIterator>>, + ) { + self.push_fact_with_priority(consequence, constraints, ClausePriority::High) + } + + /// Pushes a clause `forall<..> { consequence :- conditions ; constraints }` + /// into the set of program clauses, meaning that `consequence` + /// can be proven if `conditions` are all true and `constraints` + /// are proven to hold. The `forall<..>` binders will be whichever binders + /// have been pushed (see `push_binders`). + pub fn push_clause_with_priority( + &mut self, + consequence: impl CastTo>, + conditions: impl IntoIterator>>, + constraints: impl IntoIterator>>, + priority: ClausePriority, + ) { + let interner = self.db.interner(); + let clause = ProgramClauseImplication { + consequence: consequence.cast(interner), + conditions: Goals::from_iter(interner, conditions), + constraints: Constraints::from_iter(interner, constraints), + priority, + }; + + let clause = if self.binders.is_empty() { + // Compensate for the added empty binder + clause.shifted_in(interner) + } else { + clause + }; + + self.clauses.push( + ProgramClauseData(Binders::new( + VariableKinds::from_iter(interner, self.binders.clone()), + clause, + )) + .intern(interner), + ); + + debug!("pushed clause {:?}", self.clauses.last()); + } + + /// Accesses the placeholders for the current list of parameters in scope. + pub fn placeholders_in_scope(&self) -> &[GenericArg] { + &self.parameters + } + + /// Accesses the placeholders for the current list of parameters in scope, + /// in the form of a `Substitution`. + pub fn substitution_in_scope(&self) -> Substitution { + Substitution::from_iter( + self.db.interner(), + self.placeholders_in_scope().iter().cloned(), + ) + } + + /// Executes `op` with the `binders` in-scope; `op` is invoked + /// with the bound value `v` as a parameter. After `op` finishes, + /// the binders are popped from scope. + /// + /// The new binders are always pushed onto the end of the internal + /// list of binders; this means that any extant values where were + /// created referencing the *old* list of binders are still valid. + #[instrument(level = "debug", skip(self, op))] + pub fn push_binders( + &mut self, + binders: Binders, + op: impl FnOnce(&mut Self, V) -> R, + ) -> R + where + V: TypeFoldable + HasInterner, + V: std::fmt::Debug, + { + let old_len = self.binders.len(); + let interner = self.interner(); + self.binders.extend(binders.binders.iter(interner).cloned()); + self.parameters.extend( + binders + .binders + .iter(interner) + .zip(old_len..) + .map(|(pk, i)| (i, pk).to_generic_arg(interner)), + ); + let value = binders.substitute(self.interner(), &self.parameters[old_len..]); + debug!(?value); + let res = op(self, value); + + self.binders.truncate(old_len); + self.parameters.truncate(old_len); + res + } + + /// Push a single binder, for a type, at the end of the binder + /// list. The indices of previously bound variables are + /// unaffected and hence the context remains usable. Invokes `op`, + /// passing a type representing this new type variable in as an + /// argument. + pub fn push_bound_ty(&mut self, op: impl FnOnce(&mut Self, Ty)) { + let interner = self.interner(); + let binders = Binders::new( + VariableKinds::from1(interner, VariableKind::Ty(TyVariableKind::General)), + PhantomData::, + ); + self.push_binders(binders, |this, PhantomData| { + let ty = this + .placeholders_in_scope() + .last() + .unwrap() + .assert_ty_ref(interner) + .clone(); + op(this, ty) + }); + } + + /// Push a single binder, for a lifetime, at the end of the binder + /// list. The indices of previously bound variables are + /// unaffected and hence the context remains usable. Invokes `op`, + /// passing a lifetime representing this new lifetime variable in as an + /// argument. + pub fn push_bound_lifetime(&mut self, op: impl FnOnce(&mut Self, Lifetime)) { + let interner = self.interner(); + let binders = Binders::new( + VariableKinds::from1(interner, VariableKind::Lifetime), + PhantomData::, + ); + self.push_binders(binders, |this, PhantomData| { + let lifetime = this + .placeholders_in_scope() + .last() + .unwrap() + .assert_lifetime_ref(interner) + .clone(); + op(this, lifetime) + }); + } + + pub fn interner(&self) -> I { + self.db.interner() + } +} diff --git a/chalk-solve/src/clauses/builtin_traits.rs b/chalk-solve/src/clauses/builtin_traits.rs new file mode 100644 index 00000000000..d4300a5d156 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits.rs @@ -0,0 +1,148 @@ +use super::{builder::ClauseBuilder, generalize}; +use crate::{ + rust_ir::AdtKind, CanonicalVarKinds, Interner, RustIrDatabase, TraitRef, WellKnownTrait, +}; +use chalk_ir::{Floundered, Substitution, Ty, TyKind}; + +mod clone; +mod copy; +mod coroutine; +mod discriminant_kind; +mod fn_family; +mod pointee; +mod sized; +mod tuple; +mod unsize; + +/// For well known traits we have special hard-coded impls, either as an +/// optimization or to enforce special rules for correctness. +pub fn add_builtin_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + well_known: WellKnownTrait, + trait_ref: TraitRef, + binders: &CanonicalVarKinds, +) -> Result<(), Floundered> { + // If `trait_ref` contains bound vars, we want to universally quantify them. + // `Generalize` collects them for us. + let generalized = generalize::Generalize::apply(db.interner(), trait_ref); + + builder.push_binders(generalized, |builder, trait_ref| { + let self_ty = trait_ref.self_type_parameter(db.interner()); + let ty = self_ty.kind(db.interner()).clone(); + + match well_known { + // Built-in traits are non-enumerable. + _ if self_ty.is_general_var(db.interner(), binders) => return Err(Floundered), + WellKnownTrait::Sized => { + sized::add_sized_program_clauses(db, builder, trait_ref, ty, binders)?; + } + WellKnownTrait::Copy => { + copy::add_copy_program_clauses(db, builder, trait_ref, ty, binders)?; + } + WellKnownTrait::Clone => { + clone::add_clone_program_clauses(db, builder, trait_ref, ty, binders)?; + } + WellKnownTrait::FnOnce + | WellKnownTrait::FnMut + | WellKnownTrait::Fn + | WellKnownTrait::AsyncFnOnce + | WellKnownTrait::AsyncFnMut + | WellKnownTrait::AsyncFn => { + fn_family::add_fn_trait_program_clauses(db, builder, well_known, self_ty); + } + WellKnownTrait::Unsize => { + unsize::add_unsize_program_clauses(db, builder, trait_ref, ty) + } + // DiscriminantKind is automatically implemented for all types + WellKnownTrait::DiscriminantKind => builder.push_fact(trait_ref), + WellKnownTrait::Coroutine => { + coroutine::add_coroutine_program_clauses(db, builder, self_ty)?; + } + WellKnownTrait::Tuple => { + tuple::add_tuple_program_clauses(db, builder, self_ty)?; + } + WellKnownTrait::Pointee => { + pointee::add_pointee_program_clauses(db, builder, self_ty)?; + } + WellKnownTrait::FnPtr => { + if let TyKind::Function(_) = self_ty.kind(db.interner()) { + builder.push_fact(trait_ref); + } + } + // There are no builtin impls provided for the following traits: + WellKnownTrait::Unpin + | WellKnownTrait::Drop + | WellKnownTrait::CoerceUnsized + | WellKnownTrait::DispatchFromDyn + | WellKnownTrait::Future => (), + } + Ok(()) + }) +} + +/// Like `add_builtin_program_clauses`, but for `DomainGoal::Normalize` involving +/// a projection (e.g. `>::Output`) +pub fn add_builtin_assoc_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + well_known: WellKnownTrait, + self_ty: Ty, +) -> Result<(), Floundered> { + // If `self_ty` contains bound vars, we want to universally quantify them. + // `Generalize` collects them for us. + let generalized = generalize::Generalize::apply(db.interner(), self_ty); + builder.push_binders(generalized, |builder, self_ty| match well_known { + WellKnownTrait::FnOnce | WellKnownTrait::AsyncFnOnce => { + fn_family::add_fn_trait_program_clauses(db, builder, well_known, self_ty); + Ok(()) + } + WellKnownTrait::Pointee => pointee::add_pointee_program_clauses(db, builder, self_ty), + WellKnownTrait::DiscriminantKind => { + discriminant_kind::add_discriminant_clauses(db, builder, self_ty) + } + WellKnownTrait::Coroutine => coroutine::add_coroutine_program_clauses(db, builder, self_ty), + _ => Ok(()), + }) +} + +/// Returns type of the last field of the input struct, which is useful for `Sized` and related +/// traits. Returns `None` if the input is not a struct or it has no fields. +fn last_field_of_struct( + db: &dyn RustIrDatabase, + id: chalk_ir::AdtId, + subst: &Substitution, +) -> Option> { + let adt_datum = db.adt_datum(id); + let interner = db.interner(); + if adt_datum.kind != AdtKind::Struct { + return None; + } + let last_field_ty = adt_datum + .binders + .map_ref(|b| b.variants.last()?.fields.last().cloned()) + .filter_map(|x| x)? + .substitute(interner, subst); + Some(last_field_ty) +} + +/// Given a trait ref `T0: Trait` and a list of types `U0..Un`, pushes a clause of the form +/// `Implemented(T0: Trait) :- Implemented(U0: Trait) .. Implemented(Un: Trait)` +pub fn needs_impl_for_tys( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + tys: impl Iterator>, +) { + let trait_id = trait_ref.trait_id; + + // The trait must take one parameter (a type) + debug_assert_eq!(db.trait_datum(trait_id).binders.len(db.interner()), 1,); + builder.push_clause( + trait_ref, + tys.map(|ty| TraitRef { + trait_id, + substitution: Substitution::from1(db.interner(), ty), + }), + ); +} diff --git a/chalk-solve/src/clauses/builtin_traits/clone.rs b/chalk-solve/src/clauses/builtin_traits/clone.rs new file mode 100644 index 00000000000..2e35c9a6894 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/clone.rs @@ -0,0 +1,16 @@ +use crate::clauses::ClauseBuilder; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::{CanonicalVarKinds, Floundered, TyKind}; + +use super::copy::add_copy_program_clauses; + +pub fn add_clone_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + ty: TyKind, + binders: &CanonicalVarKinds, +) -> Result<(), Floundered> { + // Implement Clone for types that automatically implement Copy + add_copy_program_clauses(db, builder, trait_ref, ty, binders) +} diff --git a/chalk-solve/src/clauses/builtin_traits/copy.rs b/chalk-solve/src/clauses/builtin_traits/copy.rs new file mode 100644 index 00000000000..6477d5add13 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/copy.rs @@ -0,0 +1,99 @@ +use crate::clauses::builtin_traits::needs_impl_for_tys; +use crate::clauses::ClauseBuilder; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::{CanonicalVarKinds, Floundered, Substitution, TyKind, TyVariableKind, VariableKind}; +use std::iter; +use tracing::instrument; + +fn push_tuple_copy_conditions( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + arity: usize, + substitution: &Substitution, +) { + // Empty tuples are always Copy + if arity == 0 { + builder.push_fact(trait_ref); + return; + } + + let interner = db.interner(); + + needs_impl_for_tys( + db, + builder, + trait_ref, + substitution + .iter(interner) + .map(|param| param.assert_ty_ref(interner).clone()), + ); +} + +#[instrument(skip(db, builder))] +pub fn add_copy_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + ty: TyKind, + binders: &CanonicalVarKinds, +) -> Result<(), Floundered> { + match ty { + TyKind::Tuple(arity, ref substitution) => { + push_tuple_copy_conditions(db, builder, trait_ref, arity, substitution) + } + TyKind::Array(ty, _) => { + needs_impl_for_tys(db, builder, trait_ref, iter::once(ty)); + } + TyKind::FnDef(_, _) => { + builder.push_fact(trait_ref); + } + TyKind::Closure(closure_id, ref substitution) => { + let closure_fn_substitution = db.closure_fn_substitution(closure_id, substitution); + let upvars = db.closure_upvars(closure_id, substitution); + let upvars = upvars.substitute(db.interner(), &closure_fn_substitution); + needs_impl_for_tys(db, builder, trait_ref, Some(upvars).into_iter()); + } + + // these impls are in libcore + TyKind::Ref(_, _, _) + | TyKind::Raw(_, _) + | TyKind::Scalar(_) + | TyKind::Never + | TyKind::Str => {} + + TyKind::Adt(_, _) + | TyKind::AssociatedType(_, _) + | TyKind::Slice(_) + | TyKind::OpaqueType(_, _) + | TyKind::Foreign(_) + | TyKind::Coroutine(_, _) + | TyKind::CoroutineWitness(_, _) + | TyKind::Error => {} + + TyKind::Function(_) => builder.push_fact(trait_ref), + + TyKind::InferenceVar(_, TyVariableKind::Float) + | TyKind::InferenceVar(_, TyVariableKind::Integer) => builder.push_fact(trait_ref), + + TyKind::BoundVar(bound_var) => { + let var_kind = &binders.at(db.interner(), bound_var.index).kind; + match var_kind { + VariableKind::Ty(TyVariableKind::Integer) + | VariableKind::Ty(TyVariableKind::Float) => builder.push_fact(trait_ref), + + // Don't know enough + VariableKind::Ty(TyVariableKind::General) => return Err(Floundered), + + VariableKind::Const(_) | VariableKind::Lifetime => {} + } + } + + // Don't know enough + TyKind::InferenceVar(_, TyVariableKind::General) => return Err(Floundered), + + // These should be handled elsewhere + TyKind::Alias(_) | TyKind::Dyn(_) | TyKind::Placeholder(_) => {} + }; + Ok(()) +} diff --git a/chalk-solve/src/clauses/builtin_traits/coroutine.rs b/chalk-solve/src/clauses/builtin_traits/coroutine.rs new file mode 100644 index 00000000000..615a9d3d051 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/coroutine.rs @@ -0,0 +1,76 @@ +use crate::clauses::ClauseBuilder; +use crate::rust_ir::WellKnownTrait; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::cast::Cast; +use chalk_ir::{AliasTy, Floundered, Normalize, ProjectionTy, Substitution, Ty, TyKind}; + +/// Add implicit impls of the coroutine trait, i.e., add a clause that all coroutines implement +/// `Coroutine` and clauses for `Coroutine`'s associated types. +pub fn add_coroutine_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) -> Result<(), Floundered> { + let interner = db.interner(); + + match self_ty.kind(interner) { + TyKind::Coroutine(id, substitution) => { + let coroutine_datum = db.coroutine_datum(*id); + let coroutine_io_datum = coroutine_datum + .input_output + .clone() + .substitute(interner, &substitution); + + let trait_id = db.well_known_trait_id(WellKnownTrait::Coroutine).unwrap(); + let trait_datum = db.trait_datum(trait_id); + assert_eq!( + trait_datum.associated_ty_ids.len(), + 2, + "Coroutine trait should have exactly two associated types, found {:?}", + trait_datum.associated_ty_ids + ); + + let substitution = Substitution::from_iter( + interner, + &[ + self_ty.cast(interner), + coroutine_io_datum.resume_type.cast(interner), + ], + ); + + // coroutine: Coroutine + builder.push_fact(TraitRef { + trait_id, + substitution: substitution.clone(), + }); + + // `Coroutine::Yield` + let yield_id = trait_datum.associated_ty_ids[0]; + let yield_alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: yield_id, + substitution: substitution.clone(), + }); + builder.push_fact(Normalize { + alias: yield_alias, + ty: coroutine_io_datum.yield_type, + }); + + // `Coroutine::Return` + let return_id = trait_datum.associated_ty_ids[1]; + let return_alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: return_id, + substitution, + }); + builder.push_fact(Normalize { + alias: return_alias, + ty: coroutine_io_datum.return_type, + }); + + Ok(()) + } + + // Coroutine trait is non-enumerable + TyKind::InferenceVar(..) | TyKind::BoundVar(_) | TyKind::Alias(..) => Err(Floundered), + _ => Ok(()), + } +} diff --git a/chalk-solve/src/clauses/builtin_traits/discriminant_kind.rs b/chalk-solve/src/clauses/builtin_traits/discriminant_kind.rs new file mode 100644 index 00000000000..31d0bb43871 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/discriminant_kind.rs @@ -0,0 +1,74 @@ +use crate::clauses::ClauseBuilder; +use crate::{Interner, RustIrDatabase, TraitRef, WellKnownTrait}; +use chalk_ir::{ + AliasTy, Floundered, Normalize, ProjectionTy, Substitution, Ty, TyKind, TyVariableKind, +}; + +pub fn add_discriminant_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) -> Result<(), Floundered> { + let interner = db.interner(); + + let can_determine_discriminant = match self_ty.data(interner).kind { + TyKind::Adt(..) + | TyKind::Array(..) + | TyKind::Tuple(..) + | TyKind::Slice(..) + | TyKind::Raw(..) + | TyKind::Ref(..) + | TyKind::Scalar(_) + | TyKind::Str + | TyKind::Never + | TyKind::FnDef(..) + | TyKind::Coroutine(..) + | TyKind::Closure(..) + | TyKind::CoroutineWitness(..) + | TyKind::Foreign(_) + | TyKind::Dyn(_) + | TyKind::Function(..) + | TyKind::InferenceVar(_, TyVariableKind::Integer) + | TyKind::InferenceVar(_, TyVariableKind::Float) => true, + TyKind::OpaqueType(..) + | TyKind::Alias(_) + | TyKind::BoundVar(_) + | TyKind::Placeholder(_) + | TyKind::AssociatedType(..) + | TyKind::Error + | TyKind::InferenceVar(..) => false, + }; + + let trait_id = db + .well_known_trait_id(WellKnownTrait::DiscriminantKind) + .unwrap(); + let trait_datum = db.trait_datum(trait_id); + + let associated_ty_id = trait_datum.associated_ty_ids[0]; + let substitution = Substitution::from1(interner, self_ty.clone()); + + let trait_ref = TraitRef { + trait_id, + substitution: substitution.clone(), + }; + + builder.push_fact(trait_ref); + + if !can_determine_discriminant { + return Ok(()); + } + + let disc_ty = db.discriminant_type(self_ty); + + let normalize = Normalize { + alias: AliasTy::Projection(ProjectionTy { + associated_ty_id, + substitution, + }), + ty: disc_ty, + }; + + builder.push_fact(normalize); + + Ok(()) +} diff --git a/chalk-solve/src/clauses/builtin_traits/fn_family.rs b/chalk-solve/src/clauses/builtin_traits/fn_family.rs new file mode 100644 index 00000000000..72d88c4612f --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/fn_family.rs @@ -0,0 +1,241 @@ +use crate::clauses::ClauseBuilder; +use crate::rust_ir::{ClosureKind, FnDefInputsAndOutputDatum, WellKnownAssocType, WellKnownTrait}; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::cast::Cast; +use chalk_ir::{ + AliasTy, Binders, Goal, Normalize, ProjectionTy, Safety, Substitution, TraitId, Ty, TyKind, +}; + +fn push_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + well_known: WellKnownTrait, + trait_id: TraitId, + self_ty: Ty, + arg_sub: Substitution, + return_type: Ty, +) { + let interner = db.interner(); + let tupled = TyKind::Tuple(arg_sub.len(interner), arg_sub).intern(interner); + let substitution = + Substitution::from_iter(interner, &[self_ty.cast(interner), tupled.cast(interner)]); + + let is_async = matches!( + well_known, + WellKnownTrait::AsyncFnOnce | WellKnownTrait::AsyncFnMut | WellKnownTrait::AsyncFn + ); + + if !is_async { + builder.push_fact(TraitRef { + trait_id, + substitution: substitution.clone(), + }); + + // The `Output` type is defined on the `FnOnce` + if let WellKnownTrait::FnOnce = well_known { + let trait_datum = db.trait_datum(trait_id); + assert_eq!( + trait_datum.associated_ty_ids.len(), + 1, + "FnOnce trait should have exactly one associated type, found {:?}", + trait_datum.associated_ty_ids + ); + // Constructs the alias. For `Fn`, for example, this would look like + // `Normalize( B as FnOnce<(A,)>>::Output -> B)` + let output_id = trait_datum.associated_ty_ids[0]; + let alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: output_id, + substitution, + }); + builder.push_fact(Normalize { + alias, + ty: return_type, + }); + } + } else { + let sync_counterpart = match well_known { + WellKnownTrait::AsyncFnOnce => db.well_known_trait_id(WellKnownTrait::FnOnce).unwrap(), + WellKnownTrait::AsyncFnMut => db.well_known_trait_id(WellKnownTrait::FnMut).unwrap(), + WellKnownTrait::AsyncFn => db.well_known_trait_id(WellKnownTrait::Fn).unwrap(), + _ => unreachable!(), + }; + + let future = db.well_known_trait_id(WellKnownTrait::Future).unwrap(); + let sync_counterpart = TraitRef { + trait_id: sync_counterpart, + substitution: substitution.clone(), + }; + let output_is_future = TraitRef { + trait_id: future, + substitution: Substitution::from1(interner, return_type.clone()), + }; + + // This adds the following clause: + // `F: AsyncFnX` :- `F: FnX>` + // Actually, the `::Output = O` part is added in the if let expression below. + builder.push_clause( + TraitRef { + trait_id, + substitution: substitution.clone(), + }, + [sync_counterpart.clone(), output_is_future.clone()], + ); + + if let WellKnownTrait::AsyncFnOnce = well_known { + builder.push_bound_ty(|builder, ty| { + let output_id = db + .well_known_assoc_type_id(WellKnownAssocType::AsyncFnOnceOutput) + .unwrap(); + let async_alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: output_id, + substitution, + }); + + let trait_datum = db.trait_datum(future); + assert_eq!( + trait_datum.associated_ty_ids.len(), + 1, + "Future trait should have exactly one associated type, found {:?}", + trait_datum.associated_ty_ids + ); + let output_id = trait_datum.associated_ty_ids[0]; + let future_alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: output_id, + substitution: Substitution::from1(interner, return_type), + }); + + builder.push_clause( + Normalize { + alias: async_alias, + ty: ty.clone(), + }, + [ + sync_counterpart.cast::>(interner), + output_is_future.cast(interner), + Normalize { + alias: future_alias, + ty, + } + .cast(interner), + ], + ); + }); + } + } +} + +fn push_clauses_for_apply( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + well_known: WellKnownTrait, + trait_id: TraitId, + self_ty: Ty, + inputs_and_output: Binders>, +) { + let interner = db.interner(); + builder.push_binders(inputs_and_output, |builder, inputs_and_output| { + let arg_sub = inputs_and_output + .argument_types + .iter() + .cloned() + .map(|ty| ty.cast(interner)); + let arg_sub = Substitution::from_iter(interner, arg_sub); + let output_ty = inputs_and_output.return_type; + + push_clauses( + db, builder, well_known, trait_id, self_ty, arg_sub, output_ty, + ); + }); +} + +/// Handles clauses for FnOnce/FnMut/Fn and AsyncFnOnce/AsyncFnMut/AsyncFn. +/// For sync traits, `self_ty` is a function, we push a clause of the form +/// `fn(A1, A2, ..., AN) -> O: FnTrait<(A1, A2, ..., AN)>`, where `FnTrait` +/// is the trait corresponding to `trait_id` (FnOnce/FnMut/Fn) +/// +/// If `trait_id` is `FnOnce`, we also push a clause for the output type of the form: +/// `Normalize( B as FnOnce<(A,)>>::Output -> B)` +/// We do not add the usual `Implemented(fn(A) -> b as FnOnce<(A,)>` clause +/// as a condition, since we already called `push_fact` with it +/// +/// For async traits, we push a clause of the form +/// `F: AsyncFnX` :- `F: FnX>`, +/// which corresponds to the implementation +/// `impl AsyncFn for F where F: Fn, Fut: Future`. +pub fn add_fn_trait_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + well_known: WellKnownTrait, + self_ty: Ty, +) { + let interner = db.interner(); + let trait_id = db.well_known_trait_id(well_known).unwrap(); + + match self_ty.kind(interner) { + TyKind::FnDef(fn_def_id, substitution) => { + let fn_def_datum = builder.db.fn_def_datum(*fn_def_id); + if fn_def_datum.sig.safety == Safety::Safe && !fn_def_datum.sig.variadic { + let bound = fn_def_datum + .binders + .clone() + .substitute(builder.interner(), &substitution); + push_clauses_for_apply( + db, + builder, + well_known, + trait_id, + self_ty, + bound.inputs_and_output, + ); + } + } + TyKind::Closure(closure_id, substitution) => { + let closure_kind = db.closure_kind(*closure_id, substitution); + let trait_matches = matches!( + (well_known, closure_kind), + ( + WellKnownTrait::Fn | WellKnownTrait::AsyncFn, + ClosureKind::Fn + ) | ( + WellKnownTrait::FnMut | WellKnownTrait::AsyncFnMut, + ClosureKind::FnMut | ClosureKind::Fn + ) | (WellKnownTrait::FnOnce | WellKnownTrait::AsyncFnOnce, _) + ); + if !trait_matches { + return; + } + let closure_inputs_and_output = db.closure_inputs_and_output(*closure_id, substitution); + push_clauses_for_apply( + db, + builder, + well_known, + trait_id, + self_ty, + closure_inputs_and_output, + ); + } + TyKind::Function(fn_val) if fn_val.sig.safety == Safety::Safe && !fn_val.sig.variadic => { + let bound_ref = fn_val.clone().into_binders(interner); + builder.push_binders(bound_ref, |builder, orig_sub| { + // The last parameter represents the function return type + let (arg_sub, fn_output_ty) = orig_sub + .0 + .as_slice(interner) + .split_at(orig_sub.0.len(interner) - 1); + let arg_sub = Substitution::from_iter(interner, arg_sub); + let output_ty = fn_output_ty[0].assert_ty_ref(interner).clone(); + + push_clauses( + db, + builder, + well_known, + trait_id, + self_ty.clone(), + arg_sub, + output_ty, + ); + }); + } + _ => {} + } +} diff --git a/chalk-solve/src/clauses/builtin_traits/pointee.rs b/chalk-solve/src/clauses/builtin_traits/pointee.rs new file mode 100644 index 00000000000..14574b52209 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/pointee.rs @@ -0,0 +1,149 @@ +use crate::clauses::ClauseBuilder; +use crate::rust_ir::WellKnownTrait; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::{ + AliasTy, Floundered, Normalize, ProjectionTy, Substitution, Ty, TyKind, TyVariableKind, +}; + +use super::last_field_of_struct; + +fn push_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, + metadata: Ty, +) { + let interner = db.interner(); + let trait_id = db.well_known_trait_id(WellKnownTrait::Pointee).unwrap(); + let substitution = Substitution::from1(interner, self_ty); + let trait_datum = db.trait_datum(trait_id); + assert_eq!( + trait_datum.associated_ty_ids.len(), + 1, + "Pointee trait should have exactly one associated type, found {:?}", + trait_datum.associated_ty_ids + ); + let metadata_id = trait_datum.associated_ty_ids[0]; + let alias = AliasTy::Projection(ProjectionTy { + associated_ty_id: metadata_id, + substitution, + }); + builder.push_fact(Normalize { + alias, + ty: metadata, + }); +} + +/// Add implicit impl for the `Pointee` trait for all types +pub fn add_pointee_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) -> Result<(), Floundered> { + let interner = db.interner(); + let trait_id = db.well_known_trait_id(WellKnownTrait::Pointee).unwrap(); + let substitution = Substitution::from1(interner, self_ty.clone()); + builder.push_fact(TraitRef { + trait_id, + substitution: substitution.clone(), + }); + match self_ty.kind(interner) { + TyKind::Str | TyKind::Slice(_) => push_clauses( + db, + builder, + self_ty.clone(), + TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::Usize)).intern(interner), + ), + TyKind::Array(_, _) + | TyKind::Never + | TyKind::Closure(_, _) + | TyKind::FnDef(_, _) + | TyKind::Scalar(_) + | TyKind::Raw(_, _) + | TyKind::Function(_) + | TyKind::InferenceVar(_, TyVariableKind::Float) + | TyKind::InferenceVar(_, TyVariableKind::Integer) + | TyKind::Coroutine(_, _) + | TyKind::CoroutineWitness(_, _) + | TyKind::Ref(_, _, _) => push_clauses( + db, + builder, + self_ty, + TyKind::Tuple(0, Substitution::empty(interner)).intern(interner), + ), + TyKind::Adt(id, subst) => { + if let Some(last_field_ty) = last_field_of_struct(db, *id, subst) { + push_for_last_field(last_field_ty, db, builder, self_ty); + } else { + push_clauses( + db, + builder, + self_ty, + TyKind::Tuple(0, Substitution::empty(interner)).intern(interner), + ); + } + } + TyKind::Tuple(_, subst) => { + let last_field_ty = subst + .iter(interner) + .rev() + .next() + .and_then(|x| x.ty(interner)) + .cloned(); + if let Some(last_field_ty) = last_field_ty { + push_for_last_field(last_field_ty, db, builder, self_ty); + } else { + push_clauses( + db, + builder, + self_ty, + TyKind::Tuple(0, Substitution::empty(interner)).intern(interner), + ); + } + } + TyKind::BoundVar(_) + | TyKind::AssociatedType(_, _) + | TyKind::OpaqueType(_, _) + | TyKind::Foreign(_) + | TyKind::Error + | TyKind::Placeholder(_) + | TyKind::Alias(_) => (), + TyKind::Dyn(_) => { + // FIXME: We should add a `Normalize(::Metadata -> DynMetadata)` here, but + // since chalk doesn't have the concept of lang item structs yet, we can't. + } + TyKind::InferenceVar(_, TyVariableKind::General) => return Err(Floundered), + } + Ok(()) +} + +fn push_for_last_field( + last_field_ty: Ty, + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) { + let interner = db.interner(); + let _ = add_pointee_program_clauses(db, builder, last_field_ty.clone()); + let trait_id = db.well_known_trait_id(WellKnownTrait::Pointee).unwrap(); + let trait_datum = db.trait_datum(trait_id); + assert_eq!( + trait_datum.associated_ty_ids.len(), + 1, + "Pointee trait should have exactly one associated type, found {:?}", + trait_datum.associated_ty_ids + ); + let metadata_id = trait_datum.associated_ty_ids[0]; + let alias_last_field = AliasTy::Projection(ProjectionTy { + associated_ty_id: metadata_id, + substitution: Substitution::from1(interner, last_field_ty), + }); + let alias_self = AliasTy::Projection(ProjectionTy { + associated_ty_id: metadata_id, + substitution: Substitution::from1(interner, self_ty), + }); + builder.push_fact(Normalize { + alias: alias_self, + ty: TyKind::Alias(alias_last_field).intern(interner), + }); +} diff --git a/chalk-solve/src/clauses/builtin_traits/sized.rs b/chalk-solve/src/clauses/builtin_traits/sized.rs new file mode 100644 index 00000000000..6a92c32b529 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/sized.rs @@ -0,0 +1,107 @@ +use std::iter; + +use crate::clauses::builtin_traits::needs_impl_for_tys; +use crate::clauses::ClauseBuilder; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::{ + AdtId, CanonicalVarKinds, Floundered, Substitution, TyKind, TyVariableKind, VariableKind, +}; + +use super::last_field_of_struct; + +fn push_adt_sized_conditions( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + adt_id: AdtId, + substitution: &Substitution, +) { + // We only need to check last field of the struct here. Rest of the fields and cases are handled in WF. + let last_field_ty = last_field_of_struct(db, adt_id, substitution).into_iter(); + needs_impl_for_tys(db, builder, trait_ref, last_field_ty); +} + +fn push_tuple_sized_conditions( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + arity: usize, + substitution: &Substitution, +) { + // Empty tuples are always Sized + if arity == 0 { + builder.push_fact(trait_ref); + return; + } + + let interner = db.interner(); + + // To check if a tuple is Sized, we only have to look at its last element. + // This is because the WF checks for tuples require that all the other elements must be Sized. + let last_elem_ty = substitution + .iter(interner) + .last() + .unwrap() + .ty(interner) + .unwrap() + .clone(); + + needs_impl_for_tys(db, builder, trait_ref, iter::once(last_elem_ty)); +} + +pub fn add_sized_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + ty: TyKind, + binders: &CanonicalVarKinds, +) -> Result<(), Floundered> { + match ty { + TyKind::Adt(adt_id, ref substitution) => { + push_adt_sized_conditions(db, builder, trait_ref, adt_id, substitution) + } + TyKind::Tuple(arity, ref substitution) => { + push_tuple_sized_conditions(db, builder, trait_ref, arity, substitution) + } + TyKind::Array(_, _) + | TyKind::Never + | TyKind::Closure(_, _) + | TyKind::FnDef(_, _) + | TyKind::Scalar(_) + | TyKind::Raw(_, _) + | TyKind::Coroutine(_, _) + | TyKind::CoroutineWitness(_, _) + | TyKind::Ref(_, _, _) => builder.push_fact(trait_ref), + + TyKind::AssociatedType(_, _) + | TyKind::Slice(_) + | TyKind::OpaqueType(_, _) + | TyKind::Str + | TyKind::Foreign(_) + | TyKind::Error => {} + + TyKind::Function(_) + | TyKind::InferenceVar(_, TyVariableKind::Float) + | TyKind::InferenceVar(_, TyVariableKind::Integer) => builder.push_fact(trait_ref), + + TyKind::BoundVar(bound_var) => { + let var_kind = &binders.at(db.interner(), bound_var.index).kind; + match var_kind { + VariableKind::Ty(TyVariableKind::Integer) + | VariableKind::Ty(TyVariableKind::Float) => builder.push_fact(trait_ref), + + // Don't know enough + VariableKind::Ty(TyVariableKind::General) => return Err(Floundered), + + VariableKind::Const(_) | VariableKind::Lifetime => {} + } + } + + // We don't know enough here + TyKind::InferenceVar(_, TyVariableKind::General) => return Err(Floundered), + + // These would be handled elsewhere + TyKind::Placeholder(_) | TyKind::Dyn(_) | TyKind::Alias(_) => {} + } + Ok(()) +} diff --git a/chalk-solve/src/clauses/builtin_traits/tuple.rs b/chalk-solve/src/clauses/builtin_traits/tuple.rs new file mode 100644 index 00000000000..a6244782759 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/tuple.rs @@ -0,0 +1,30 @@ +use crate::clauses::ClauseBuilder; +use crate::rust_ir::WellKnownTrait; +use crate::{Interner, RustIrDatabase, TraitRef}; +use chalk_ir::{Floundered, Substitution, Ty, TyKind}; + +/// Add implicit impl for the `Tuple` trait for all tuples +pub fn add_tuple_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) -> Result<(), Floundered> { + let interner = db.interner(); + + match self_ty.kind(interner) { + TyKind::Tuple(..) => { + let trait_id = db.well_known_trait_id(WellKnownTrait::Tuple).unwrap(); + + builder.push_fact(TraitRef { + trait_id, + substitution: Substitution::from1(interner, self_ty), + }); + + Ok(()) + } + + // Tuple trait is non-enumerable + TyKind::InferenceVar(..) | TyKind::BoundVar(_) | TyKind::Alias(..) => Err(Floundered), + _ => Ok(()), + } +} diff --git a/chalk-solve/src/clauses/builtin_traits/unsize.rs b/chalk-solve/src/clauses/builtin_traits/unsize.rs new file mode 100644 index 00000000000..d999a0229d7 --- /dev/null +++ b/chalk-solve/src/clauses/builtin_traits/unsize.rs @@ -0,0 +1,607 @@ +use std::collections::HashSet; +use std::iter; +use std::ops::ControlFlow; + +use crate::clauses::super_traits::super_traits; +use crate::clauses::ClauseBuilder; +use crate::rust_ir::AdtKind; +use crate::{Interner, RustIrDatabase, TraitRef, WellKnownTrait}; +use chalk_ir::{ + cast::Cast, + interner::HasInterner, + visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}, + Binders, Const, ConstValue, DebruijnIndex, DomainGoal, DynTy, EqGoal, Goal, LifetimeOutlives, + QuantifiedWhereClauses, Substitution, TraitId, Ty, TyKind, TypeOutlives, WhereClause, +}; + +struct UnsizeParameterCollector { + interner: I, + // FIXME should probably use a bitset instead + parameters: HashSet, +} + +impl TypeVisitor for UnsizeParameterCollector { + type BreakTy = (); + + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + + fn visit_ty(&mut self, ty: &Ty, outer_binder: DebruijnIndex) -> ControlFlow<()> { + let interner = self.interner; + + match ty.kind(interner) { + TyKind::BoundVar(bound_var) => { + // check if bound var refers to the outermost binder + if bound_var.debruijn.shifted_in() == outer_binder { + self.parameters.insert(bound_var.index); + } + ControlFlow::Continue(()) + } + _ => ty.super_visit_with(self, outer_binder), + } + } + + fn visit_const(&mut self, constant: &Const, outer_binder: DebruijnIndex) -> ControlFlow<()> { + let interner = self.interner; + + if let ConstValue::BoundVar(bound_var) = constant.data(interner).value { + // check if bound var refers to the outermost binder + if bound_var.debruijn.shifted_in() == outer_binder { + self.parameters.insert(bound_var.index); + } + } + ControlFlow::Continue(()) + } + + fn interner(&self) -> I { + self.interner + } +} + +fn outer_binder_parameters_used( + interner: I, + v: &Binders + HasInterner>, +) -> HashSet { + let mut visitor = UnsizeParameterCollector { + interner, + parameters: HashSet::new(), + }; + v.visit_with(&mut visitor, DebruijnIndex::INNERMOST); + visitor.parameters +} + +// has nothing to do with occurs check +struct ParameterOccurenceCheck<'p, I: Interner> { + interner: I, + parameters: &'p HashSet, +} + +impl<'p, I: Interner> TypeVisitor for ParameterOccurenceCheck<'p, I> { + type BreakTy = (); + + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + + fn visit_ty(&mut self, ty: &Ty, outer_binder: DebruijnIndex) -> ControlFlow<()> { + let interner = self.interner; + + match ty.kind(interner) { + TyKind::BoundVar(bound_var) => { + if bound_var.debruijn.shifted_in() == outer_binder + && self.parameters.contains(&bound_var.index) + { + ControlFlow::Break(()) + } else { + ControlFlow::Continue(()) + } + } + _ => ty.super_visit_with(self, outer_binder), + } + } + + fn visit_const(&mut self, constant: &Const, outer_binder: DebruijnIndex) -> ControlFlow<()> { + let interner = self.interner; + + match constant.data(interner).value { + ConstValue::BoundVar(bound_var) => { + if bound_var.debruijn.shifted_in() == outer_binder + && self.parameters.contains(&bound_var.index) + { + ControlFlow::Break(()) + } else { + ControlFlow::Continue(()) + } + } + _ => ControlFlow::Continue(()), + } + } + + fn interner(&self) -> I { + self.interner + } +} + +fn uses_outer_binder_params( + interner: I, + v: &Binders + HasInterner>, + parameters: &HashSet, +) -> bool { + let mut visitor = ParameterOccurenceCheck { + interner, + parameters, + }; + + let flow = v.visit_with(&mut visitor, DebruijnIndex::INNERMOST); + matches!(flow, ControlFlow::Break(_)) +} + +fn principal_trait_ref( + db: &dyn RustIrDatabase, + bounds: &Binders>, +) -> Option>>> { + bounds + .map_ref(|b| b.iter(db.interner())) + .into_iter() + .find_map(|b| { + b.filter_map(|qwc| { + qwc.as_ref().filter_map(|wc| match wc { + WhereClause::Implemented(trait_ref) => { + if db.trait_datum(trait_ref.trait_id).is_auto_trait() { + None + } else { + Some(trait_ref.clone()) + } + } + _ => None, + }) + }) + }) +} + +fn auto_trait_ids<'a, I: Interner>( + db: &'a dyn RustIrDatabase, + bounds: &'a Binders>, +) -> impl Iterator> + 'a { + let interner = db.interner(); + + bounds + .skip_binders() + .iter(interner) + .filter_map(|clause| clause.trait_id()) + .filter(move |&id| db.trait_datum(id).is_auto_trait()) +} + +pub fn add_unsize_program_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, + _ty: TyKind, +) { + let interner = db.interner(); + + let source_ty = trait_ref.self_type_parameter(interner); + let target_ty = trait_ref + .substitution + .at(interner, 1) + .assert_ty_ref(interner) + .clone(); + + let unsize_trait_id = trait_ref.trait_id; + + // N.B. here rustc asserts that `TraitRef` is not a higher-ranked bound + // i.e. `for<'a> &'a T: Unsize` is never provable. + // + // In chalk it would be awkward to implement and I am not sure + // there is a need for it, the original comment states that this restriction + // could be lifted. + // + // for more info visit `fn assemble_candidates_for_unsizing` and + // `fn confirm_builtin_unsize_candidate` in rustc. + + match (source_ty.kind(interner), target_ty.kind(interner)) { + // dyn TraitA + AutoA + 'a -> dyn TraitB + AutoB + 'b + ( + TyKind::Dyn(DynTy { + bounds: bounds_a, + lifetime: lifetime_a, + }), + TyKind::Dyn(DynTy { + bounds: bounds_b, + lifetime: lifetime_b, + }), + ) => { + let principal_trait_ref_a = principal_trait_ref(db, bounds_a); + let principal_a = principal_trait_ref_a + .as_ref() + .map(|trait_ref| trait_ref.skip_binders().skip_binders().trait_id); + let principal_b = principal_trait_ref(db, bounds_b) + .map(|trait_ref| trait_ref.skip_binders().skip_binders().trait_id); + + // Include super traits in a list of auto traits for A, + // to allow `dyn Trait -> dyn Trait + X` if `Trait: X`. + let auto_trait_ids_a: Vec<_> = auto_trait_ids(db, bounds_a) + .chain(principal_a.into_iter().flat_map(|principal_a| { + super_traits(db, principal_a) + .into_value_and_skipped_binders() + .0 + .0 + .into_iter() + .map(|x| x.skip_binders().trait_id) + .filter(|&x| db.trait_datum(x).is_auto_trait()) + })) + .collect(); + + let auto_trait_ids_b: Vec<_> = auto_trait_ids(db, bounds_b).collect(); + + // If B has a principal, then A must as well + // (i.e. we allow dropping principal, but not creating a principal out of thin air). + // `AutoB` must be a subset of `AutoA`. + let may_apply = principal_a.is_some() >= principal_b.is_some() + && auto_trait_ids_b + .iter() + .all(|id_b| auto_trait_ids_a.iter().any(|id_a| id_a == id_b)); + + if !may_apply { + return; + } + + // Check that source lifetime outlives target lifetime + let lifetime_outlives_goal: Goal = WhereClause::LifetimeOutlives(LifetimeOutlives { + a: lifetime_a.clone(), + b: lifetime_b.clone(), + }) + .cast(interner); + + // COMMENT FROM RUSTC: + // ------------------ + // Require that the traits involved in this upcast are **equal**; + // only the **lifetime bound** is changed. + // + // This condition is arguably too strong -- it would + // suffice for the source trait to be a *subtype* of the target + // trait. In particular, changing from something like + // `for<'a, 'b> Foo<'a, 'b>` to `for<'a> Foo<'a, 'a>` should be + // permitted. + // <...> + // I've modified this to `.eq` because I want to continue rejecting + // that [`old-lub-glb-object.rs`] test (as we have + // done for quite some time) before we are firmly comfortable + // with what our behavior should be there. -nikomatsakis + // ------------------ + + if principal_a == principal_b || principal_b.is_none() { + // Construct a new trait object type by taking the source ty, + // replacing auto traits of source with those of target, + // and changing source lifetime to target lifetime. + // + // In order for the coercion to be valid, this new type + // should be equal to target type. + let new_source_ty = TyKind::Dyn(DynTy { + bounds: bounds_a.map_ref(|bounds| { + QuantifiedWhereClauses::from_iter( + interner, + bounds + .iter(interner) + .cloned() + .filter_map(|bound| { + let Some(trait_id) = bound.trait_id() else { + // Keep non-"implements" bounds as-is + return Some(bound); + }; + + // Auto traits are already checked above, ignore them + // (we'll use the ones from B below) + if db.trait_datum(trait_id).is_auto_trait() { + return None; + } + + // The only "implements" bound that is not an auto trait, is the principal + assert_eq!(Some(trait_id), principal_a); + + // Only include principal_a if the principal_b is also present + // (this allows dropping principal, `dyn Tr+A -> dyn A`) + principal_b.is_some().then(|| bound) + }) + // Add auto traits from B (again, they are already checked above). + .chain(bounds_b.skip_binders().iter(interner).cloned().filter( + |bound| { + bound.trait_id().is_some_and(|trait_id| { + db.trait_datum(trait_id).is_auto_trait() + }) + }, + )), + ) + }), + lifetime: lifetime_b.clone(), + }) + .intern(interner); + + // Check that new source is equal to target + let eq_goal = EqGoal { + a: new_source_ty.cast(interner), + b: target_ty.clone().cast(interner), + } + .cast(interner); + + builder.push_clause(trait_ref, [eq_goal, lifetime_outlives_goal].iter()); + } else { + // Conditions above imply that both of these are always `Some` + // (b != None, b is Some iff a is Some). + let principal_a = principal_a.unwrap(); + let principal_b = principal_b.unwrap(); + + let principal_trait_ref_a = principal_trait_ref_a.unwrap(); + let applicable_super_traits = super_traits(db, principal_a) + .map(|(super_trait_refs, _)| super_trait_refs) + .into_iter() + .filter(|trait_ref| { + trait_ref.skip_binders().skip_binders().trait_id == principal_b + }); + + for super_trait_ref in applicable_super_traits { + // `super_trait_ref` is, at this point, quantified over generic params of + // `principal_a` and relevant higher-ranked lifetimes that come from super + // trait elaboration (see comments on `super_traits()`). + // + // So if we have `trait Trait<'a, T>: for<'b> Super<'a, 'b, T> {}`, + // `super_trait_ref` can be something like + // `for for<'b> Self: Super<'a, 'b, T>`. + // + // We need to convert it into a bound for `DynTy`. We do this by substituting + // bound vars of `principal_trait_ref_a` and then fusing inner binders for + // higher-ranked lifetimes. + let rebound_super_trait_ref = principal_trait_ref_a.map_ref(|q_trait_ref_a| { + q_trait_ref_a + .map_ref(|trait_ref_a| { + super_trait_ref.substitute(interner, &trait_ref_a.substitution) + }) + .fuse_binders(interner) + }); + + // Skip `for` binder. We'll rebind it immediately below. + let new_principal_trait_ref = rebound_super_trait_ref + .into_value_and_skipped_binders() + .0 + .map(|it| it.cast(interner)); + + // Swap trait ref for `principal_a` with the new trait ref, drop the auto + // traits not included in the upcast target. + let new_source_ty = TyKind::Dyn(DynTy { + bounds: bounds_a.map_ref(|bounds| { + QuantifiedWhereClauses::from_iter( + interner, + bounds.iter(interner).cloned().filter_map(|bound| { + let trait_id = match bound.trait_id() { + Some(id) => id, + None => return Some(bound), + }; + + if principal_a == trait_id { + Some(new_principal_trait_ref.clone()) + } else { + auto_trait_ids_b.contains(&trait_id).then_some(bound) + } + }), + ) + }), + lifetime: lifetime_b.clone(), + }) + .intern(interner); + + // Check that new source is equal to target + let eq_goal = EqGoal { + a: new_source_ty.cast(interner), + b: target_ty.clone().cast(interner), + } + .cast(interner); + + // We don't push goal for `principal_b`'s object safety because it's implied by + // `principal_a`'s object safety. + builder + .push_clause(trait_ref.clone(), [eq_goal, lifetime_outlives_goal.clone()]); + } + } + } + + // T -> dyn Trait + 'a + (_, TyKind::Dyn(DynTy { bounds, lifetime })) => { + // Check if all traits in trait object are object safe + let object_safe_goals = bounds + .skip_binders() + .iter(interner) + .filter_map(|bound| bound.trait_id()) + .map(|id| DomainGoal::ObjectSafe(id).cast(interner)); + + // Check that T implements all traits of the trait object + let source_ty_bounds = bounds + .clone() + .substitute(interner, &Substitution::from1(interner, source_ty.clone())); + + // Check that T is sized because we can only make + // a trait object from a sized type + let self_sized_goal: WhereClause<_> = TraitRef { + trait_id: db + .well_known_trait_id(WellKnownTrait::Sized) + .expect("Expected Sized to be defined when proving Unsize"), + substitution: Substitution::from1(interner, source_ty.clone()), + } + .cast(interner); + + // Check that `source_ty` outlives `'a` + let source_ty_outlives: Goal<_> = WhereClause::TypeOutlives(TypeOutlives { + ty: source_ty, + lifetime: lifetime.clone(), + }) + .cast(interner); + + builder.push_clause( + trait_ref, + source_ty_bounds + .iter(interner) + .map(|bound| bound.clone().cast::>(interner)) + .chain(object_safe_goals) + .chain(iter::once(self_sized_goal.cast(interner))) + .chain(iter::once(source_ty_outlives)), + ); + } + + (TyKind::Array(array_ty, _array_const), TyKind::Slice(slice_ty)) => { + let eq_goal = EqGoal { + a: array_ty.clone().cast(interner), + b: slice_ty.clone().cast(interner), + }; + + builder.push_clause(trait_ref, iter::once(eq_goal)); + } + + // Adt -> Adt + (TyKind::Adt(adt_id_a, substitution_a), TyKind::Adt(adt_id_b, substitution_b)) => { + if adt_id_a != adt_id_b { + return; + } + + let adt_id = *adt_id_a; + let adt_datum = db.adt_datum(adt_id); + + // Unsizing of enums is not allowed + if adt_datum.kind == AdtKind::Enum { + return; + } + + // We have a `struct` so we're guaranteed a single variant + let fields_len = adt_datum + .binders + .skip_binders() + .variants + .last() + .unwrap() + .fields + .len(); + + if fields_len == 0 { + return; + } + + let adt_tail_field = adt_datum + .binders + .map_ref(|bound| bound.variants.last().unwrap().fields.last().unwrap()) + .cloned(); + + // Collect unsize parameters that last field contains and + // ensure there at least one of them. + let unsize_parameter_candidates = + outer_binder_parameters_used(interner, &adt_tail_field); + + if unsize_parameter_candidates.is_empty() { + return; + } + // Ensure none of the other fields mention the parameters used + // in unsizing. + // We specifically want variables specified by the outermost binder + // i.e. the struct generic arguments binder. + if uses_outer_binder_params( + interner, + &adt_datum + .binders + .map_ref(|bound| &bound.variants.last().unwrap().fields[..fields_len - 1]), + &unsize_parameter_candidates, + ) { + return; + } + + let parameters_a = substitution_a.as_slice(interner); + let parameters_b = substitution_b.as_slice(interner); + // Check that the source adt with the target's + // unsizing parameters is equal to the target. + // We construct a new substitution where if a parameter is used in the + // coercion (i.e. it's a non-lifetime struct parameter used by it's last field), + // then we take that parameter from target substitution, otherwise we take + // it from the source substitution. + // + // In order for the coercion to be valid, target struct and + // struct with this newly constructed substitution applied to it should be equal. + let substitution = Substitution::from_iter( + interner, + parameters_a.iter().enumerate().map(|(i, p)| { + if unsize_parameter_candidates.contains(&i) { + ¶meters_b[i] + } else { + p + } + }), + ); + + let eq_goal = EqGoal { + a: TyKind::Adt(adt_id, substitution) + .intern(interner) + .cast(interner), + b: target_ty.clone().cast(interner), + } + .cast(interner); + + // Extract `TailField` and `TailField` from `Struct` and `Struct`. + let source_tail_field = adt_tail_field.clone().substitute(interner, substitution_a); + let target_tail_field = adt_tail_field.substitute(interner, substitution_b); + + // Check that `TailField: Unsize>` + let last_field_unsizing_goal: Goal = TraitRef { + trait_id: unsize_trait_id, + substitution: Substitution::from_iter( + interner, + [source_tail_field, target_tail_field].iter().cloned(), + ), + } + .cast(interner); + + builder.push_clause(trait_ref, [eq_goal, last_field_unsizing_goal].iter()); + } + + // (.., T) -> (.., U) + (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => { + if arity_a != arity_b || *arity_a == 0 { + return; + } + let arity = arity_a; + + let tail_ty_a = substitution_a.iter(interner).last().unwrap(); + let tail_ty_b = substitution_b.iter(interner).last().unwrap(); + + // Check that the source tuple with the target's + // last element is equal to the target. + let new_tuple = TyKind::Tuple( + *arity, + Substitution::from_iter( + interner, + substitution_a + .iter(interner) + .take(arity - 1) + .chain(iter::once(tail_ty_b)), + ), + ) + .cast(interner) + .intern(interner); + + let eq_goal: Goal = EqGoal { + a: new_tuple.cast(interner), + b: target_ty.clone().cast(interner), + } + .cast(interner); + + // Check that `T: Unsize` + let last_field_unsizing_goal: Goal = TraitRef { + trait_id: unsize_trait_id, + substitution: Substitution::from_iter( + interner, + [tail_ty_a, tail_ty_b].iter().cloned(), + ), + } + .cast(interner); + + builder.push_clause(trait_ref, [eq_goal, last_field_unsizing_goal].iter()); + } + + _ => (), + } +} diff --git a/chalk-solve/src/clauses/dyn_ty.rs b/chalk-solve/src/clauses/dyn_ty.rs new file mode 100644 index 00000000000..505da43f972 --- /dev/null +++ b/chalk-solve/src/clauses/dyn_ty.rs @@ -0,0 +1,81 @@ +use super::{builder::ClauseBuilder, generalize}; +use crate::RustIrDatabase; +use chalk_ir::{cast::Cast, interner::Interner, Ty, TyKind, WhereClause}; + +/// If the self type `S` of an `Implemented` goal is a `dyn trait` type, we wish +/// to generate program-clauses that indicates that it implements its own +/// traits. For example, a `dyn Write` type implements `Write` and so on. +/// +/// To see how this works, consider as an example the type `dyn Fn(&u8)`. This +/// is really shorthand for `dyn for<'a> Fn<(&'a u8), Output = ()>`, and we +/// represent that type as something like this: +/// +/// ```ignore +/// dyn(exists { +/// forall<'a> { Implemented(T: Fn<'a>) }, +/// forall<'a> { AliasEq(>::Output, ()) }, +/// }) +/// ``` +/// +/// so what we will do is to generate one program clause for each of the +/// conditions. Thus we get two program clauses: +/// +/// ```ignore +/// forall<'a> { Implemented(dyn Fn(&u8): Fn<(&'a u8)>) } +/// ``` +/// +/// and +/// +/// ```ignore +/// forall<'a> { AliasEq(>::Output, ()) }, +/// ``` +pub(super) fn build_dyn_self_ty_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + self_ty: Ty, +) { + let interner = db.interner(); + let dyn_ty = match self_ty.kind(interner) { + TyKind::Dyn(dyn_ty) => dyn_ty.clone(), + _ => return, + }; + let generalized_dyn_ty = generalize::Generalize::apply(db.interner(), dyn_ty); + + // Here, `self_ty` is the `dyn Fn(&u8)`, and `dyn_ty` is the `exists { .. + // }` clauses shown above. + + // Turn free BoundVars in the type into new existentials. E.g. + // we might get some `dyn Foo`, and we don't want to return + // a clause with a free variable. We can instead return a + // slightly more general clause by basically turning this into + // `exists dyn Foo`. + + builder.push_binders(generalized_dyn_ty, |builder, dyn_ty| { + for exists_qwc in dyn_ty.bounds.map_ref(|r| r.iter(interner)) { + // Replace the `T` from `exists { .. }` with `self_ty`, + // yielding clases like + // + // ``` + // forall<'a> { Implemented(dyn Fn(&u8): Fn<(&'a u8)>) } + // ``` + let qwc = exists_qwc + .cloned() + .substitute(interner, &[self_ty.clone().cast(interner)]); + + builder.push_binders(qwc, |builder, bound| match &bound { + // For the implemented traits, we need to elaborate super traits and add where clauses from the trait + WhereClause::Implemented(trait_ref) => { + super::super_traits::push_trait_super_clauses( + builder.db, + builder, + trait_ref.clone(), + ) + } + // FIXME: Associated item bindings are just taken as facts (?) + WhereClause::AliasEq(_) => builder.push_fact(bound), + WhereClause::LifetimeOutlives(..) => {} + WhereClause::TypeOutlives(..) => {} + }); + } + }); +} diff --git a/chalk-solve/src/clauses/env_elaborator.rs b/chalk-solve/src/clauses/env_elaborator.rs new file mode 100644 index 00000000000..6279d36a78a --- /dev/null +++ b/chalk-solve/src/clauses/env_elaborator.rs @@ -0,0 +1,107 @@ +use super::program_clauses::ToProgramClauses; +use crate::clauses::builder::ClauseBuilder; +use crate::clauses::{match_alias_ty, match_ty}; +use crate::DomainGoal; +use crate::FromEnv; +use crate::ProgramClause; +use crate::RustIrDatabase; +use crate::Ty; +use crate::{debug_span, TyKind}; +use chalk_ir::interner::Interner; +use chalk_ir::visit::{TypeVisitable, TypeVisitor}; +use chalk_ir::{DebruijnIndex, Environment}; +use rustc_hash::FxHashSet; +use std::ops::ControlFlow; +use tracing::instrument; + +/// When proving a `FromEnv` goal, we elaborate all `FromEnv` goals +/// found in the environment. +/// +/// For example, when `T: Clone` is in the environment, we can prove +/// `T: Copy` by adding the clauses from `trait Clone`, which includes +/// the rule `FromEnv(T: Copy) :- FromEnv(T: Clone)` +pub(super) fn elaborate_env_clauses( + db: &dyn RustIrDatabase, + in_clauses: &[ProgramClause], + out: &mut FxHashSet>, + environment: &Environment, +) { + let mut this_round = vec![]; + let builder = &mut ClauseBuilder::new(db, &mut this_round); + let mut elaborater = EnvElaborator { + db, + builder, + environment, + }; + in_clauses.visit_with(&mut elaborater, DebruijnIndex::INNERMOST); + out.extend(this_round); +} + +struct EnvElaborator<'me, 'builder, I: Interner> { + db: &'me dyn RustIrDatabase, + builder: &'builder mut ClauseBuilder<'me, I>, + environment: &'me Environment, +} + +impl<'me, 'builder, I: Interner> TypeVisitor for EnvElaborator<'me, 'builder, I> { + type BreakTy = (); + + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + + fn interner(&self) -> I { + self.db.interner() + } + #[instrument(level = "debug", skip(self, _outer_binder))] + fn visit_ty(&mut self, ty: &Ty, _outer_binder: DebruijnIndex) -> ControlFlow<()> { + match ty.kind(self.interner()) { + TyKind::Alias(alias_ty) => match_alias_ty(self.builder, self.environment, alias_ty), + TyKind::Placeholder(_) => {} + + // FIXME(#203) -- We haven't fully figured out the implied + // bounds story around `dyn Trait` types. + TyKind::Dyn(_) => (), + + TyKind::Function(_) | TyKind::BoundVar(_) | TyKind::InferenceVar(_, _) => (), + + _ => { + // This shouldn't fail because of the above clauses + match_ty(self.builder, self.environment, ty) + .map_err(|_| ()) + .unwrap() + } + } + ControlFlow::Continue(()) + } + + fn visit_domain_goal( + &mut self, + domain_goal: &DomainGoal, + outer_binder: DebruijnIndex, + ) -> ControlFlow<()> { + if let DomainGoal::FromEnv(from_env) = domain_goal { + debug_span!("visit_domain_goal", ?from_env); + match from_env { + FromEnv::Trait(trait_ref) => { + let trait_datum = self.db.trait_datum(trait_ref.trait_id); + + trait_datum.to_program_clauses(self.builder, self.environment); + + // If we know that `T: Iterator`, then we also know + // things about `::Item`, so push those + // implied bounds too: + for &associated_ty_id in &trait_datum.associated_ty_ids { + self.db + .associated_ty_data(associated_ty_id) + .to_program_clauses(self.builder, self.environment); + } + ControlFlow::Continue(()) + } + FromEnv::Ty(ty) => ty.visit_with(self, outer_binder), + } + } else { + ControlFlow::Continue(()) + } + } +} diff --git a/chalk-solve/src/clauses/generalize.rs b/chalk-solve/src/clauses/generalize.rs new file mode 100644 index 00000000000..bff05b36995 --- /dev/null +++ b/chalk-solve/src/clauses/generalize.rs @@ -0,0 +1,99 @@ +//! This gets rid of free variables in a type by replacing them by fresh bound +//! ones. We use this when building clauses that contain types passed to +//! `program_clauses`; these may contain variables, and just copying those +//! variables verbatim leads to problems. Instead, we return a slightly more +//! general program clause, with new variables in those places. This can only +//! happen with `dyn Trait` currently; that's the only case where we use the +//! types passed to `program_clauses` in the clauses we generate. + +use chalk_derive::FallibleTypeFolder; +use chalk_ir::{ + fold::{TypeFoldable, TypeFolder}, + interner::{HasInterner, Interner}, + Binders, BoundVar, Const, ConstData, ConstValue, DebruijnIndex, Lifetime, LifetimeData, Ty, + TyKind, TyVariableKind, VariableKind, VariableKinds, +}; +use rustc_hash::FxHashMap; + +#[derive(FallibleTypeFolder)] +pub struct Generalize { + binders: Vec>, + mapping: FxHashMap, + interner: I, +} + +impl Generalize { + pub fn apply(interner: I, value: T) -> Binders + where + T: HasInterner + TypeFoldable, + { + let mut generalize = Generalize { + binders: Vec::new(), + mapping: FxHashMap::default(), + interner, + }; + let value = value + .try_fold_with(&mut generalize, DebruijnIndex::INNERMOST) + .unwrap(); + Binders::new( + VariableKinds::from_iter(interner, generalize.binders), + value, + ) + } +} + +impl TypeFolder for Generalize { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } + + fn fold_free_var_ty(&mut self, bound_var: BoundVar, outer_binder: DebruijnIndex) -> Ty { + let binder_vec = &mut self.binders; + let new_index = self.mapping.entry(bound_var).or_insert_with(|| { + let i = binder_vec.len(); + binder_vec.push(VariableKind::Ty(TyVariableKind::General)); + i + }); + let new_var = BoundVar::new(outer_binder, *new_index); + TyKind::BoundVar(new_var).intern(TypeFolder::interner(self)) + } + + fn fold_free_var_const( + &mut self, + ty: Ty, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Const { + let binder_vec = &mut self.binders; + let new_index = self.mapping.entry(bound_var).or_insert_with(|| { + let i = binder_vec.len(); + binder_vec.push(VariableKind::Const(ty.clone())); + i + }); + let new_var = BoundVar::new(outer_binder, *new_index); + ConstData { + ty, + value: ConstValue::BoundVar(new_var), + } + .intern(TypeFolder::interner(self)) + } + + fn fold_free_var_lifetime( + &mut self, + bound_var: BoundVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + let binder_vec = &mut self.binders; + let new_index = self.mapping.entry(bound_var).or_insert_with(|| { + let i = binder_vec.len(); + binder_vec.push(VariableKind::Lifetime); + i + }); + let new_var = BoundVar::new(outer_binder, *new_index); + LifetimeData::BoundVar(new_var).intern(TypeFolder::interner(self)) + } + + fn interner(&self) -> I { + self.interner + } +} diff --git a/chalk-solve/src/clauses/program_clauses.rs b/chalk-solve/src/clauses/program_clauses.rs new file mode 100644 index 00000000000..e1fec223d06 --- /dev/null +++ b/chalk-solve/src/clauses/program_clauses.rs @@ -0,0 +1,954 @@ +use crate::clauses::builder::ClauseBuilder; +use crate::rust_ir::*; +use crate::split::Split; +use chalk_ir::cast::{Cast, Caster}; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use std::iter; +use tracing::instrument; + +/// Trait for lowering a given piece of rust-ir source (e.g., an impl +/// or struct definition) into its associated "program clauses" -- +/// that is, into the lowered, logical rules that it defines. +pub trait ToProgramClauses { + fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>, environment: &Environment); +} + +impl ToProgramClauses for ImplDatum { + /// Given `impl Clone for Vec { ... }`, generate: + /// + /// ```notrust + /// -- Rule Implemented-From-Impl + /// forall { + /// Implemented(Vec: Clone) :- Implemented(T: Clone). + /// } + /// ``` + /// + /// For a negative impl like `impl... !Clone for ...`, however, we + /// generate nothing -- this is just a way to *opt out* from the + /// default auto trait impls, it doesn't have any positive effect + /// on its own. + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + if self.is_positive() { + let binders = self.binders.clone(); + builder.push_binders( + binders, + |builder, + ImplDatumBound { + trait_ref, + where_clauses, + }| { + builder.push_clause(trait_ref, where_clauses); + }, + ); + } + } +} + +impl ToProgramClauses for AssociatedTyValue { + /// Given the following trait: + /// + /// ```notrust + /// trait Iterable { + /// type IntoIter<'a>: 'a; + /// } + /// ``` + /// + /// Then for the following impl: + /// ```notrust + /// impl Iterable for Vec where T: Clone { + /// type IntoIter<'a> = Iter<'a, T>; + /// } + /// ``` + /// + /// we generate: + /// + /// ```notrust + /// -- Rule Normalize-From-Impl + /// forall<'a, T> { + /// Normalize( as Iterable>::IntoIter<'a> -> Iter<'a, T>>) :- + /// Implemented(T: Clone), // (1) + /// Implemented(Iter<'a, T>: 'a). // (2) + /// } + /// ``` + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + let impl_datum = builder.db.impl_datum(self.impl_id); + let associated_ty = builder.db.associated_ty_data(self.associated_ty_id); + + builder.push_binders(self.value.clone(), |builder, assoc_ty_value| { + let all_parameters = builder.placeholders_in_scope().to_vec(); + + // Get the projection for this associated type: + // + // * `impl_params`: `[!T]` + // * `projection`: ` as Iterable>::Iter<'!a>` + let (impl_params, projection) = builder + .db + .impl_parameters_and_projection_from_associated_ty_value(&all_parameters, self); + + // Assemble the full list of conditions for projection to be valid. + // This comes in two parts, marked as (1) and (2) in doc above: + // + // 1. require that the where clauses from the impl apply + let interner = builder.db.interner(); + let impl_where_clauses = impl_datum + .binders + .map_ref(|b| &b.where_clauses) + .into_iter() + .map(|wc| wc.cloned().substitute(interner, impl_params)); + + // 2. any where-clauses from the `type` declaration in the trait: the + // parameters must be substituted with those of the impl + let assoc_ty_where_clauses = associated_ty + .binders + .map_ref(|b| &b.where_clauses) + .into_iter() + .map(|wc| wc.cloned().substitute(interner, &projection.substitution)); + + // Create the final program clause: + // + // ```notrust + // -- Rule Normalize-From-Impl + // forall<'a, T> { + // Normalize( as Iterable>::IntoIter<'a> -> Iter<'a, T>>) :- + // Implemented(T: Clone), // (1) + // Implemented(Iter<'a, T>: 'a). // (2) + // } + // ``` + builder.push_clause( + Normalize { + alias: AliasTy::Projection(projection.clone()), + ty: assoc_ty_value.ty, + }, + impl_where_clauses.chain(assoc_ty_where_clauses), + ); + }); + } +} + +impl ToProgramClauses for OpaqueTyDatum { + /// Given `opaque type T: A + B = HiddenTy where U: C;`, we generate: + /// + /// ```notrust + /// AliasEq(T = HiddenTy) :- Reveal. + /// AliasEq(T = !T). + /// WF(T) :- WF(U: C). + /// Implemented(!T: A). + /// Implemented(!T: B). + /// ``` + /// where `!T<..>` is the placeholder for the unnormalized type `T<..>`. + #[instrument(level = "debug", skip(builder))] + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + builder.push_binders(self.bound.clone(), |builder, opaque_ty_bound| { + let interner = builder.interner(); + let substitution = builder.substitution_in_scope(); + let alias = AliasTy::Opaque(OpaqueTy { + opaque_ty_id: self.opaque_ty_id, + substitution: substitution.clone(), + }); + + let alias_placeholder_ty = + TyKind::OpaqueType(self.opaque_ty_id, substitution).intern(interner); + + // AliasEq(T<..> = HiddenTy) :- Reveal. + builder.push_clause( + DomainGoal::Holds( + AliasEq { + alias: alias.clone(), + ty: builder.db.hidden_opaque_type(self.opaque_ty_id), + } + .cast(interner), + ), + iter::once(DomainGoal::Reveal), + ); + + // AliasEq(T<..> = !T<..>). + builder.push_fact(DomainGoal::Holds( + AliasEq { + alias, + ty: alias_placeholder_ty.clone(), + } + .cast(interner), + )); + + // WF(!T<..>) :- WF(WC). + builder.push_binders(opaque_ty_bound.where_clauses, |builder, where_clauses| { + builder.push_clause( + WellFormed::Ty(alias_placeholder_ty.clone()), + where_clauses + .into_iter() + .map(|wc| wc.into_well_formed_goal(interner)), + ); + }); + + let substitution = Substitution::from1(interner, alias_placeholder_ty); + for bound in opaque_ty_bound.bounds { + let bound_with_placeholder_ty = bound.substitute(interner, &substitution); + builder.push_binders(bound_with_placeholder_ty, |builder, bound| match &bound { + // For the implemented traits, we need to elaborate super traits and add where clauses from the trait + WhereClause::Implemented(trait_ref) => { + super::super_traits::push_trait_super_clauses( + builder.db, + builder, + trait_ref.clone(), + ) + } + // FIXME: Associated item bindings are just taken as facts (?) + WhereClause::AliasEq(_) => builder.push_fact(bound), + WhereClause::LifetimeOutlives(..) => {} + WhereClause::TypeOutlives(..) => {} + }); + } + }); + } +} + +/// Generates the "well-formed" program clauses for an applicative type +/// with the name `type_name`. For example, given a struct definition: +/// +/// ```ignore +/// struct Foo { } +/// ``` +/// +/// we would generate the clause: +/// +/// ```notrust +/// forall { +/// WF(Foo) :- WF(T: Eq). +/// } +/// ``` +/// +/// # Parameters +/// - builder -- the clause builder. We assume all the generic types from `Foo` are in scope +/// - type_name -- in our example above, the name `Foo` +/// - where_clauses -- the list of where clauses declared on the type (`T: Eq`, in our example) +fn well_formed_program_clauses<'a, I, Wc>( + builder: &'a mut ClauseBuilder<'_, I>, + ty: Ty, + where_clauses: Wc, +) where + I: Interner, + Wc: Iterator>, +{ + let interner = builder.interner(); + builder.push_clause( + WellFormed::Ty(ty), + where_clauses + .cloned() + .map(|qwc| qwc.into_well_formed_goal(interner)), + ); +} + +/// Generates the "fully visible" program clauses for an applicative type +/// with the name `type_name`. For example, given a struct definition: +/// +/// ```ignore +/// struct Foo { } +/// ``` +/// +/// we would generate the clause: +/// +/// ```notrust +/// forall { +/// IsFullyVisible(Foo) :- IsFullyVisible(T). +/// } +/// ``` +/// +/// # Parameters +/// +/// - builder -- the clause builder. We assume all the generic types from `Foo` are in scope +/// - type_name -- in our example above, the name `Foo` +fn fully_visible_program_clauses( + builder: &mut ClauseBuilder<'_, I>, + ty: Ty, + subst: &Substitution, +) where + I: Interner, +{ + let interner = builder.interner(); + builder.push_clause( + DomainGoal::IsFullyVisible(ty), + subst + .type_parameters(interner) + .map(|typ| DomainGoal::IsFullyVisible(typ).cast::>(interner)), + ); +} + +/// Generates the "implied bounds" clauses for an applicative +/// type with the name `type_name`. For example, if `type_name` +/// represents a struct `S` that is declared like: +/// +/// ```ignore +/// struct S where T: Eq { } +/// ``` +/// +/// then we would generate the rule: +/// +/// ```notrust +/// FromEnv(T: Eq) :- FromEnv(S) +/// ``` +/// +/// # Parameters +/// +/// - builder -- the clause builder. We assume all the generic types from `S` are in scope. +/// - type_name -- in our example above, the name `S` +/// - where_clauses -- the list of where clauses declared on the type (`T: Eq`, in our example). +fn implied_bounds_program_clauses<'a, I, Wc>( + builder: &'a mut ClauseBuilder<'_, I>, + ty: &Ty, + where_clauses: Wc, +) where + I: Interner, + Wc: Iterator>, +{ + let interner = builder.interner(); + + for qwc in where_clauses { + builder.push_binders(qwc.clone(), |builder, wc| { + builder.push_clause(wc.into_from_env_goal(interner), Some(ty.clone().from_env())); + }); + } +} + +impl ToProgramClauses for AdtDatum { + /// Given the following type definition: `struct Foo { }`, generate: + /// + /// ```notrust + /// -- Rule WellFormed-Type + /// forall { + /// WF(Foo) :- WF(T: Eq). + /// } + /// + /// -- Rule Implied-Bound-From-Type + /// forall { + /// FromEnv(T: Eq) :- FromEnv(Foo). + /// } + /// + /// forall { + /// IsFullyVisible(Foo) :- IsFullyVisible(T). + /// } + /// ``` + /// + /// If the type `Foo` is marked `#[upstream]`, we also generate: + /// + /// ```notrust + /// forall { IsUpstream(Foo). } + /// ``` + /// + /// Otherwise, if the type `Foo` is not marked `#[upstream]`, we generate: + /// ```notrust + /// forall { IsLocal(Foo). } + /// ``` + /// + /// Given an `#[upstream]` type that is also fundamental: + /// + /// ```notrust + /// #[upstream] + /// #[fundamental] + /// struct Box {} + /// ``` + /// + /// We generate the following clauses: + /// + /// ```notrust + /// forall { IsLocal(Box) :- IsLocal(T). } + /// forall { IsLocal(Box) :- IsLocal(U). } + /// + /// forall { IsUpstream(Box) :- IsUpstream(T), IsUpstream(U). } + /// + /// // Generated for both upstream and local fundamental types + /// forall { DownstreamType(Box) :- DownstreamType(T). } + /// forall { DownstreamType(Box) :- DownstreamType(U). } + /// ``` + /// + #[instrument(level = "debug", skip(builder))] + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + let interner = builder.interner(); + let binders = self.binders.map_ref(|b| &b.where_clauses).cloned(); + + builder.push_binders(binders, |builder, where_clauses| { + let self_ty = TyKind::Adt(self.id, builder.substitution_in_scope()).intern(interner); + + well_formed_program_clauses(builder, self_ty.clone(), where_clauses.iter()); + + implied_bounds_program_clauses(builder, &self_ty, where_clauses.iter()); + + fully_visible_program_clauses( + builder, + self_ty.clone(), + &builder.substitution_in_scope(), + ); + + // Types that are not marked `#[upstream]` satisfy IsLocal(Ty) + if !self.flags.upstream { + // `IsLocalTy(Ty)` depends *only* on whether the type + // is marked #[upstream] and nothing else + builder.push_fact(DomainGoal::IsLocal(self_ty.clone())); + } else if self.flags.fundamental { + // If a type is `#[upstream]`, but is also + // `#[fundamental]`, it satisfies IsLocal if and only + // if its parameters satisfy IsLocal + for type_param in builder.substitution_in_scope().type_parameters(interner) { + builder.push_clause( + DomainGoal::IsLocal(self_ty.clone()), + Some(DomainGoal::IsLocal(type_param)), + ); + } + builder.push_clause( + DomainGoal::IsUpstream(self_ty.clone()), + builder + .substitution_in_scope() + .type_parameters(interner) + .map(|type_param| DomainGoal::IsUpstream(type_param)), + ); + } else { + // The type is just upstream and not fundamental + builder.push_fact(DomainGoal::IsUpstream(self_ty.clone())); + } + + if self.flags.fundamental { + assert!( + builder + .substitution_in_scope() + .type_parameters(interner) + .count() + >= 1, + "Only fundamental types with type parameters are supported" + ); + for type_param in builder.substitution_in_scope().type_parameters(interner) { + builder.push_clause( + DomainGoal::DownstreamType(self_ty.clone()), + Some(DomainGoal::DownstreamType(type_param)), + ); + } + } + }); + } +} + +impl ToProgramClauses for FnDefDatum { + /// Given the following function definition: `fn bar() where T: Eq`, generate: + /// + /// ```notrust + /// -- Rule WellFormed-Type + /// forall { + /// WF(bar) :- WF(T: Eq) + /// } + /// + /// -- Rule Implied-Bound-From-Type + /// forall { + /// FromEnv(T: Eq) :- FromEnv(bar). + /// } + /// + /// forall { + /// IsFullyVisible(bar) :- IsFullyVisible(T). + /// } + /// ``` + #[instrument(level = "debug", skip(builder))] + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + let interner = builder.interner(); + let binders = self.binders.map_ref(|b| &b.where_clauses).cloned(); + + builder.push_binders(binders, |builder, where_clauses| { + let ty = TyKind::FnDef(self.id, builder.substitution_in_scope()).intern(interner); + + well_formed_program_clauses(builder, ty.clone(), where_clauses.iter()); + + implied_bounds_program_clauses(builder, &ty, where_clauses.iter()); + + fully_visible_program_clauses(builder, ty, &builder.substitution_in_scope()); + }); + } +} + +impl ToProgramClauses for TraitDatum { + /// Given the following trait declaration: `trait Ord where Self: Eq { ... }`, generate: + /// + /// ```notrust + /// -- Rule WellFormed-TraitRef + /// forall { + /// WF(Self: Ord) :- Implemented(Self: Ord), WF(Self: Eq). + /// } + /// ``` + /// + /// and the reverse rules: + /// + /// ```notrust + /// -- Rule Implemented-From-Env + /// forall { + /// (Self: Ord) :- FromEnv(Self: Ord). + /// } + /// + /// -- Rule Implied-Bound-From-Trait + /// forall { + /// FromEnv(Self: Eq) :- FromEnv(Self: Ord). + /// } + /// ``` + /// + /// As specified in the orphan rules, if a trait is not marked `#[upstream]`, the current crate + /// can implement it for any type. To represent that, we generate: + /// + /// ```notrust + /// // `Ord` would not be `#[upstream]` when compiling `std` + /// forall { LocalImplAllowed(Self: Ord). } + /// ``` + /// + /// For traits that are `#[upstream]` (i.e. not in the current crate), the orphan rules dictate + /// that impls are allowed as long as at least one type parameter is local and each type + /// prior to that is fully visible. That means that each type prior to the first local + /// type cannot contain any of the type parameters of the impl. + /// + /// This rule is fairly complex, so we expand it and generate a program clause for each + /// possible case. This is represented as follows: + /// + /// ```notrust + /// // for `#[upstream] trait Foo where Self: Eq { ... }` + /// forall { + /// LocalImplAllowed(Self: Foo) :- IsLocal(Self). + /// } + /// + /// forall { + /// LocalImplAllowed(Self: Foo) :- + /// IsFullyVisible(Self), + /// IsLocal(T). + /// } + /// + /// forall { + /// LocalImplAllowed(Self: Foo) :- + /// IsFullyVisible(Self), + /// IsFullyVisible(T), + /// IsLocal(U). + /// } + /// + /// forall { + /// LocalImplAllowed(Self: Foo) :- + /// IsFullyVisible(Self), + /// IsFullyVisible(T), + /// IsFullyVisible(U), + /// IsLocal(V). + /// } + /// ``` + /// + /// The overlap check uses compatible { ... } mode to ensure that it accounts for impls that + /// may exist in some other *compatible* world. For every upstream trait, we add a rule to + /// account for the fact that upstream crates are able to compatibly add impls of upstream + /// traits for upstream types. + /// + /// ```notrust + /// // For `#[upstream] trait Foo where Self: Eq { ... }` + /// forall { + /// Implemented(Self: Foo) :- + /// Implemented(Self: Eq), // where clauses + /// Compatible, // compatible modality + /// IsUpstream(Self), + /// IsUpstream(T), + /// IsUpstream(U), + /// IsUpstream(V), + /// CannotProve. // returns ambiguous + /// } + /// ``` + /// + /// In certain situations, this is too restrictive. Consider the following code: + /// + /// ```notrust + /// /* In crate std */ + /// trait Sized { } + /// struct str { } + /// + /// /* In crate bar (depends on std) */ + /// trait Bar { } + /// impl Bar for str { } + /// impl Bar for T where T: Sized { } + /// ``` + /// + /// Here, because of the rules we've defined, these two impls overlap. The std crate is + /// upstream to bar, and thus it is allowed to compatibly implement Sized for str. If str + /// can implement Sized in a compatible future, these two impls definitely overlap since the + /// second impl covers all types that implement Sized. + /// + /// The solution we've got right now is to mark Sized as "fundamental" when it is defined. + /// This signals to the Rust compiler that it can rely on the fact that str does not + /// implement Sized in all contexts. A consequence of this is that we can no longer add an + /// implementation of Sized compatibly for str. This is the trade off you make when defining + /// a fundamental trait. + /// + /// To implement fundamental traits, we simply just do not add the rule above that allows + /// upstream types to implement upstream traits. Fundamental traits are not allowed to + /// compatibly do that. + fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>, environment: &Environment) { + let interner = builder.interner(); + let binders = self.binders.map_ref(|b| &b.where_clauses).cloned(); + builder.push_binders(binders, |builder, where_clauses| { + let trait_ref = chalk_ir::TraitRef { + trait_id: self.id, + substitution: builder.substitution_in_scope(), + }; + + builder.push_clause( + trait_ref.clone().well_formed(), + where_clauses + .iter() + .cloned() + .map(|qwc| qwc.into_well_formed_goal(interner)) + .casted::>(interner) + .chain(Some(trait_ref.clone().cast(interner))), + ); + + // The number of parameters will always be at least 1 + // because of the Self parameter that is automatically + // added to every trait. This is important because + // otherwise the added program clauses would not have any + // conditions. + let type_parameters: Vec<_> = trait_ref.type_parameters(interner).collect(); + + if environment.has_compatible_clause(interner) { + // Note: even though we do check for a `Compatible` clause here, + // we also keep it as a condition for the clauses below, purely + // for logical consistency. But really, it's not needed and could be + // removed. + + // Drop trait can't have downstream implementation because it can only + // be implemented with the same genericity as the struct definition, + // i.e. Drop implementation for `struct S {}` is forced to be + // `impl Drop for S { ... }`. That means that orphan rules + // prevent Drop from being implemented in downstream crates. + if self.well_known != Some(WellKnownTrait::Drop) { + // Add all cases for potential downstream impls that could exist + for i in 0..type_parameters.len() { + builder.push_clause( + trait_ref.clone(), + where_clauses + .iter() + .cloned() + .casted(interner) + .chain(iter::once(DomainGoal::Compatible.cast(interner))) + .chain((0..i).map(|j| { + DomainGoal::IsFullyVisible(type_parameters[j].clone()) + .cast(interner) + })) + .chain(iter::once( + DomainGoal::DownstreamType(type_parameters[i].clone()) + .cast(interner), + )) + .chain(iter::once(GoalData::CannotProve.intern(interner))), + ); + } + } + + // Fundamental traits can be reasoned about negatively without any ambiguity, so no + // need for this rule if the trait is fundamental. + if !self.flags.fundamental { + builder.push_clause( + trait_ref.clone(), + where_clauses + .iter() + .cloned() + .casted(interner) + .chain(iter::once(DomainGoal::Compatible.cast(interner))) + .chain( + trait_ref + .type_parameters(interner) + .map(|ty| DomainGoal::IsUpstream(ty).cast(interner)), + ) + .chain(iter::once(GoalData::CannotProve.intern(interner))), + ); + } + } + + // Orphan rules: + if !self.flags.upstream { + // Impls for traits declared locally always pass the impl rules + builder.push_fact(DomainGoal::LocalImplAllowed(trait_ref.clone())); + } else { + // Impls for remote traits must have a local type in the right place + for i in 0..type_parameters.len() { + builder.push_clause( + DomainGoal::LocalImplAllowed(trait_ref.clone()), + (0..i) + .map(|j| DomainGoal::IsFullyVisible(type_parameters[j].clone())) + .chain(Some(DomainGoal::IsLocal(type_parameters[i].clone()))), + ); + } + } + + // Reverse implied bound rules: given (e.g.) `trait Foo: Bar + Baz`, + // we create rules like: + // + // ``` + // FromEnv(T: Bar) :- FromEnv(T: Foo) + // ``` + // + // and + // + // ``` + // FromEnv(T: Baz) :- FromEnv(T: Foo) + // ``` + for qwc in where_clauses { + builder.push_binders(qwc, |builder, wc| { + builder.push_clause( + wc.into_from_env_goal(interner), + Some(trait_ref.clone().from_env()), + ); + }); + } + + // Finally, for every trait `Foo` we make a rule + // + // ``` + // Implemented(T: Foo) :- FromEnv(T: Foo) + // ``` + builder.push_clause(trait_ref.clone(), Some(trait_ref.from_env())); + }); + } +} + +impl ToProgramClauses for AssociatedTyDatum { + /// For each associated type, we define the "projection + /// equality" rules. There are always two; one for a successful normalization, + /// and one for the "fallback" notion of equality. + /// + /// Given: (here, `'a` and `T` represent zero or more parameters) + /// + /// ```notrust + /// trait Foo { + /// type Assoc<'a, T>: Bounds where WC; + /// } + /// ``` + /// + /// we generate the 'fallback' rule: + /// + /// ```notrust + /// -- Rule AliasEq-Placeholder + /// forall { + /// AliasEq(::Assoc<'a, T> = (Foo::Assoc<'a, T>)). + /// } + /// ``` + /// + /// and + /// + /// ```notrust + /// -- Rule AliasEq-Normalize + /// forall { + /// AliasEq(::Assoc<'a, T> = U) :- + /// Normalize(::Assoc -> U). + /// } + /// ``` + /// + /// We used to generate an "elaboration" rule like this: + /// + /// ```notrust + /// forall { + /// T: Foo :- exists { AliasEq(::Assoc = U) }. + /// } + /// ``` + /// + /// but this caused problems with the recursive solver. In + /// particular, whenever normalization is possible, we cannot + /// solve that projection uniquely, since we can now elaborate + /// `AliasEq` to fallback *or* normalize it. So instead we + /// handle this kind of reasoning through the `FromEnv` predicate. + /// + /// Another set of clauses we generate for each associated type is about placeholder associated + /// types (i.e. `TyKind::AssociatedType`). Given + /// + /// ```notrust + /// trait Foo { + /// type Assoc<'a, T>: Bar where WC; + /// } + /// ``` + /// + /// we generate + /// + /// ```notrust + /// forall { + /// Implemented((Foo::Assoc<'a, T>): Bar) :- WC. + /// AliasEq(<<(Foo::Assoc<'a, T>)> as Bar>::U = Ty) :- WC. + /// } + /// ``` + /// + /// We also generate rules specific to WF requirements and implied bounds: + /// + /// ```notrust + /// -- Rule WellFormed-AssocTy + /// forall { + /// WellFormed((Foo::Assoc)) :- WellFormed(Self: Foo), WellFormed(WC). + /// } + /// + /// -- Rule Implied-WC-From-AssocTy + /// forall { + /// FromEnv(WC) :- FromEnv((Foo::Assoc)). + /// } + /// + /// -- Rule Implied-Bound-From-AssocTy + /// forall { + /// FromEnv(::Assoc<'a,T>: Bounds) :- FromEnv(Self: Foo), WC. + /// } + /// + /// -- Rule Implied-Trait-From-AssocTy + /// forall { + /// FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)). + /// } + /// ``` + fn to_program_clauses( + &self, + builder: &mut ClauseBuilder<'_, I>, + _environment: &Environment, + ) { + let interner = builder.interner(); + let binders = self.binders.clone(); + builder.push_binders( + binders, + |builder, + AssociatedTyDatumBound { + where_clauses, + bounds, + }| { + let substitution = builder.substitution_in_scope(); + + let projection = ProjectionTy { + associated_ty_id: self.id, + substitution: substitution.clone(), + }; + let projection_ty = AliasTy::Projection(projection.clone()).intern(interner); + + // Retrieve the trait ref embedding the associated type + let trait_ref = builder.db.trait_ref_from_projection(&projection); + + // Construct an application from the projection. So if we have `::Item`, + // we would produce `(Iterator::Item)`. + let placeholder_ty = TyKind::AssociatedType(self.id, substitution).intern(interner); + + let projection_eq = AliasEq { + alias: AliasTy::Projection(projection.clone()), + ty: placeholder_ty.clone(), + }; + + // Fallback rule. The solver uses this to move between the projection + // and placeholder type. + // + // forall { + // AliasEq(::Assoc = (Foo::Assoc)). + // } + builder.push_fact_with_priority(projection_eq, None, ClausePriority::Low); + + // Well-formedness of projection type. + // + // forall { + // WellFormed((Foo::Assoc)) :- WellFormed(Self: Foo), WellFormed(WC). + // } + builder.push_clause( + WellFormed::Ty(placeholder_ty.clone()), + iter::once(WellFormed::Trait(trait_ref.clone()).cast::>(interner)) + .chain( + where_clauses + .iter() + .cloned() + .map(|qwc| qwc.into_well_formed_goal(interner)) + .casted(interner), + ), + ); + + // Assuming well-formedness of projection type means we can assume + // the trait ref as well. Mostly used in function bodies. + // + // forall { + // FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)). + // } + builder.push_clause( + FromEnv::Trait(trait_ref.clone()), + Some(placeholder_ty.from_env()), + ); + + // Reverse rule for where clauses. + // + // forall { + // FromEnv(WC) :- FromEnv((Foo::Assoc)). + // } + // + // This is really a family of clauses, one for each where clause. + for qwc in &where_clauses { + builder.push_binders(qwc.clone(), |builder, wc| { + builder.push_clause( + wc.into_from_env_goal(interner), + Some(FromEnv::Ty(placeholder_ty.clone())), + ); + }); + } + + for quantified_bound in bounds { + builder.push_binders(quantified_bound, |builder, bound| { + // Reverse rule for implied bounds. + // + // forall { + // FromEnv(::Assoc: Bounds) :- FromEnv(Self: Foo), WC + // } + for wc in bound.into_where_clauses(interner, projection_ty.clone()) { + builder.push_clause( + wc.into_from_env_goal(interner), + iter::once( + FromEnv::Trait(trait_ref.clone()).cast::>(interner), + ) + .chain(where_clauses.iter().cloned().casted(interner)), + ); + } + + // Rules for the corresponding placeholder type. + // + // When `Foo::Assoc` has a bound `type Assoc: Trait`, we generate: + // + // forall { + // Implemented((Foo::Assoc): Trait) :- WC + // AliasEq(<(Foo::Assoc) as Trait>::T = Ty) :- WC + // } + for wc in bound.into_where_clauses(interner, placeholder_ty.clone()) { + builder.push_clause(wc, where_clauses.iter().cloned()); + } + }); + } + + // add new type parameter U + builder.push_bound_ty(|builder, ty| { + // `Normalize(::Assoc -> U)` + let normalize = Normalize { + alias: AliasTy::Projection(projection.clone()), + ty: ty.clone(), + }; + + // `AliasEq(::Assoc = U)` + let projection_eq = AliasEq { + alias: AliasTy::Projection(projection), + ty, + }; + + // Projection equality rule from above. + // + // forall { + // AliasEq(::Assoc = U) :- + // Normalize(::Assoc -> U). + // } + builder.push_clause(projection_eq, Some(normalize)); + }); + }, + ); + } +} diff --git a/chalk-solve/src/clauses/super_traits.rs b/chalk-solve/src/clauses/super_traits.rs new file mode 100644 index 00000000000..fe2702a09d4 --- /dev/null +++ b/chalk-solve/src/clauses/super_traits.rs @@ -0,0 +1,196 @@ +use itertools::{Either, Itertools}; +use rustc_hash::FxHashSet; + +use super::builder::ClauseBuilder; +use crate::{split::Split, RustIrDatabase}; +use chalk_ir::{ + fold::shift::Shift, interner::Interner, AliasEq, AliasTy, Binders, BoundVar, DebruijnIndex, + Normalize, ProjectionTy, TraitId, TraitRef, Ty, WhereClause, +}; + +/// Generate `Implemented` and `Normalize` clauses for `dyn Trait` and opaque types. +/// We need to generate those clauses for all super traits, and for each trait we +/// require its where clauses. (See #203) +pub(super) fn push_trait_super_clauses( + db: &dyn RustIrDatabase, + builder: &mut ClauseBuilder<'_, I>, + trait_ref: TraitRef, +) { + let interner = db.interner(); + // Given `trait SuperTrait: WC`, which is a super trait + // of `Trait` (including actually just being the same trait); + // then we want to push + // - for `dyn Trait`: + // `Implemented(dyn Trait: SuperTrait) :- WC`. + // - for placeholder `!T` of `opaque type T: Trait = HiddenTy`: + // `Implemented(!T: SuperTrait) :- WC` + // + // When `SuperTrait` has `AliasEq` bounds like `trait SuperTrait: AnotherTrait`, + // we also push + // - for `dyn Trait`: + // `Normalize(::Assoc -> Ty) :- AssocWC, WC` + // - for placeholder `!T` of `opaque type T: Trait = HiddenTy`: + // `Normalize(::Assoc -> Ty) :- AssocWC, WC` + // where `WC` and `AssocWC` are the where clauses for `AnotherTrait` and `AnotherTrait::Assoc` + // respectively. + let (super_trait_refs, super_trait_proj) = + super_traits(db, trait_ref.trait_id).substitute(interner, &trait_ref.substitution); + + for q_super_trait_ref in super_trait_refs { + builder.push_binders(q_super_trait_ref, |builder, super_trait_ref| { + let trait_datum = db.trait_datum(super_trait_ref.trait_id); + let wc = trait_datum + .where_clauses() + .cloned() + .substitute(interner, &super_trait_ref.substitution); + builder.push_clause(super_trait_ref, wc); + }); + } + + for q_super_trait_proj in super_trait_proj { + builder.push_binders(q_super_trait_proj, |builder, (proj, ty)| { + let assoc_ty_datum = db.associated_ty_data(proj.associated_ty_id); + let trait_datum = db.trait_datum(assoc_ty_datum.trait_id); + let assoc_wc = assoc_ty_datum + .binders + .map_ref(|b| &b.where_clauses) + .into_iter() + .map(|wc| wc.cloned().substitute(interner, &proj.substitution)); + + let impl_params = db.trait_parameters_from_projection(&proj); + let impl_wc = trait_datum + .where_clauses() + .into_iter() + .map(|wc| wc.cloned().substitute(interner, impl_params)); + builder.push_clause( + Normalize { + alias: AliasTy::Projection(proj.clone()), + ty, + }, + impl_wc.chain(assoc_wc), + ); + }); + } +} + +/// Returns super-`TraitRef`s and super-`Projection`s that are quantified over the parameters of +/// `trait_id` and relevant higher-ranked lifetimes. The outer `Binders` is for the former and the +/// inner `Binders` is for the latter. +/// +/// For example, given the following trait definitions and `C` as `trait_id`, +/// +/// ``` +/// trait A<'a, T> {} +/// trait B<'b, U> where Self: for<'x> A<'x, U> {} +/// trait C<'c, V> where Self: B<'c, V> {} +/// ``` +/// +/// returns the following quantified `TraitRef`s. +/// +/// ```notrust +/// for { +/// for<'x> { Self: A<'x, V> } +/// for<> { Self: B<'c, V> } +/// for<> { Self: C<'c, V> } +/// } +/// ``` +pub(crate) fn super_traits( + db: &dyn RustIrDatabase, + trait_id: TraitId, +) -> Binders<( + Vec>>, + Vec, Ty)>>, +)> { + let interner = db.interner(); + let mut seen_traits = FxHashSet::default(); + let trait_datum = db.trait_datum(trait_id); + let trait_ref = Binders::empty( + db.interner(), + TraitRef { + trait_id, + substitution: trait_datum + .binders + .identity_substitution(interner) + .shifted_in(interner), + }, + ); + let mut trait_refs = Vec::new(); + let mut aliases = Vec::new(); + go( + db, + trait_ref, + &mut seen_traits, + &mut trait_refs, + &mut aliases, + ); + + fn go( + db: &dyn RustIrDatabase, + trait_ref: Binders>, + seen_traits: &mut FxHashSet>, + trait_refs: &mut Vec>>, + aliases: &mut Vec, Ty)>>, + ) { + let interner = db.interner(); + let trait_id = trait_ref.skip_binders().trait_id; + // Avoid cycles + if !seen_traits.insert(trait_id) { + return; + } + trait_refs.push(trait_ref.clone()); + let trait_datum = db.trait_datum(trait_id); + let (super_trait_refs, super_trait_projs): (Vec<_>, Vec<_>) = trait_datum + .binders + .map_ref(|td| { + td.where_clauses + .iter() + .filter(|qwc| { + let trait_ref = match qwc.skip_binders() { + WhereClause::Implemented(tr) => tr.clone(), + WhereClause::AliasEq(AliasEq { + alias: AliasTy::Projection(p), + .. + }) => db.trait_ref_from_projection(p), + _ => return false, + }; + // We're looking for where clauses of the form + // `Self: Trait` or `::Assoc`. `Self` is + // ^1.0 because we're one binder in. + trait_ref.self_type_parameter(interner).bound_var(interner) + == Some(BoundVar::new(DebruijnIndex::ONE, 0)) + }) + .cloned() + .partition_map(|qwc| { + let (value, binders) = qwc.into_value_and_skipped_binders(); + + match value { + WhereClause::Implemented(tr) => Either::Left(Binders::new(binders, tr)), + WhereClause::AliasEq(AliasEq { + alias: AliasTy::Projection(p), + ty, + }) => Either::Right(Binders::new(binders, (p, ty))), + _ => unreachable!(), + } + }) + }) + // we skip binders on the trait_ref here and add them to the binders + // on the trait ref in the loop below. We could probably avoid this if + // we could turn the `Binders>` into a `Vec>` easily. + .substitute(db.interner(), &trait_ref.skip_binders().substitution); + for q_super_trait_ref in super_trait_refs { + // So now we need to combine the binders of trait_ref with the + // binders of super_trait_ref. + let actual_binders = Binders::new(trait_ref.binders.clone(), q_super_trait_ref); + let q_super_trait_ref = actual_binders.fuse_binders(interner); + go(db, q_super_trait_ref, seen_traits, trait_refs, aliases); + } + for q_super_trait_proj in super_trait_projs { + let actual_binders = Binders::new(trait_ref.binders.clone(), q_super_trait_proj); + let q_super_trait_proj = actual_binders.fuse_binders(interner); + aliases.push(q_super_trait_proj); + } + seen_traits.remove(&trait_id); + } + + Binders::new(trait_datum.binders.binders.clone(), (trait_refs, aliases)) +} diff --git a/chalk-solve/src/coherence.rs b/chalk-solve/src/coherence.rs new file mode 100644 index 00000000000..5528b9a21fd --- /dev/null +++ b/chalk-solve/src/coherence.rs @@ -0,0 +1,149 @@ +use indexmap::IndexMap; +use petgraph::prelude::*; +use rustc_hash::FxHashMap; + +use crate::solve::Solver; +use crate::RustIrDatabase; +use chalk_ir::interner::Interner; +use chalk_ir::{self, ImplId, TraitId}; +use std::fmt; +use std::sync::Arc; + +pub mod orphan; +mod solve; + +pub struct CoherenceSolver<'a, I: Interner> { + db: &'a dyn RustIrDatabase, + solver_builder: &'a dyn Fn() -> Box>, + trait_id: TraitId, +} + +#[derive(Debug)] +pub enum CoherenceError { + OverlappingImpls(TraitId), + FailedOrphanCheck(TraitId), +} + +impl fmt::Display for CoherenceError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CoherenceError::OverlappingImpls(id) => { + write!(f, "overlapping impls of trait `{:?}`", id) + } + CoherenceError::FailedOrphanCheck(id) => { + write!(f, "impl for trait `{:?}` violates the orphan rules", id) + } + } + } +} + +impl std::error::Error for CoherenceError {} + +/// Stores the specialization priorities for a set of impls. +/// This basically encodes which impls specialize one another. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct SpecializationPriorities { + map: IndexMap, SpecializationPriority>, +} + +impl SpecializationPriorities { + pub fn new() -> Self { + Self { + map: IndexMap::new(), + } + } + + /// Lookup the priority of an impl in the set (panics if impl is not in set). + pub fn priority(&self, impl_id: ImplId) -> SpecializationPriority { + self.map[&impl_id] + } + + /// Store the priority of an impl (used during construction). + /// Panics if we have already stored the priority for this impl. + fn insert(&mut self, impl_id: ImplId, p: SpecializationPriority) { + let old_value = self.map.insert(impl_id, p); + assert!(old_value.is_none()); + } +} + +/// Impls with higher priority take precedence over impls with lower +/// priority (if both apply to the same types). Impls with equal +/// priority should never apply to the same set of input types. +#[derive(Copy, Clone, Default, PartialOrd, Ord, PartialEq, Eq, Debug)] +pub struct SpecializationPriority(usize); + +impl<'a, I> CoherenceSolver<'a, I> +where + I: Interner, +{ + /// Constructs a new `CoherenceSolver`. + pub fn new( + db: &'a dyn RustIrDatabase, + solver_builder: &'a dyn Fn() -> Box>, + trait_id: TraitId, + ) -> Self { + Self { + db, + solver_builder, + trait_id, + } + } + + pub fn specialization_priorities( + &self, + ) -> Result>, CoherenceError> { + let mut result = SpecializationPriorities::::new(); + + let forest = self.build_specialization_forest()?; + + // TypeVisitable every root in the forest & set specialization + // priority for the tree that is the root of. + for root_idx in forest.externals(Direction::Incoming) { + self.set_priorities(root_idx, &forest, 0, &mut result); + } + + Ok(Arc::new(result)) + } + + // Build the forest of specialization relationships. + fn build_specialization_forest(&self) -> Result, ()>, CoherenceError> { + let mut forest = DiGraph::new(); + let mut node_map = FxHashMap::default(); + + // Find all specializations. Record them in the forest + // by adding an edge from the less special to the more special. + self.visit_specializations_of_trait(|less_special, more_special| { + let less_special_node = *node_map + .entry(less_special) + .or_insert_with(|| forest.add_node(less_special)); + let more_special_node = *node_map + .entry(more_special) + .or_insert_with(|| forest.add_node(more_special)); + forest.update_edge(less_special_node, more_special_node, ()); + })?; + + Ok(forest) + } + + // Recursively set priorities for those node and all of its children. + fn set_priorities( + &self, + idx: NodeIndex, + forest: &Graph, ()>, + p: usize, + map: &mut SpecializationPriorities, + ) { + // Get the impl datum recorded at this node and reset its priority + { + let impl_id = forest + .node_weight(idx) + .expect("index should be a valid index into graph"); + map.insert(*impl_id, SpecializationPriority(p)); + } + + // TypeVisitable all children of this node, setting their priority to this + 1 + for child_idx in forest.neighbors(idx) { + self.set_priorities(child_idx, forest, p + 1, map); + } + } +} diff --git a/chalk-solve/src/coherence/orphan.rs b/chalk-solve/src/coherence/orphan.rs new file mode 100644 index 00000000000..f8e06b901ec --- /dev/null +++ b/chalk-solve/src/coherence/orphan.rs @@ -0,0 +1,44 @@ +use crate::coherence::CoherenceError; +use crate::ext::GoalExt; +use crate::solve::Solver; +use crate::RustIrDatabase; +use chalk_ir::cast::*; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use tracing::{debug, instrument}; + +// Test if a local impl violates the orphan rules. +// +// For `impl Trait for MyType` we generate: +// +// forall { LocalImplAllowed(MyType: Trait) } +// +// This must be provable in order to pass the orphan check. +#[instrument(level = "debug", skip(db, solver))] +pub fn perform_orphan_check( + db: &dyn RustIrDatabase, + solver: &mut dyn Solver, + impl_id: ImplId, +) -> Result<(), CoherenceError> { + let impl_datum = db.impl_datum(impl_id); + debug!(?impl_datum); + + let impl_allowed: Goal = impl_datum + .binders + .map_ref(|bound_impl| { + // Ignoring the polarization of the impl's polarized trait ref + DomainGoal::LocalImplAllowed(bound_impl.trait_ref.clone()) + }) + .cast(db.interner()); + + let canonical_goal = &impl_allowed.into_closed_goal(db.interner()); + let is_allowed = solver.solve(db, canonical_goal).is_some(); + debug!("overlaps = {:?}", is_allowed); + + if !is_allowed { + let trait_id = impl_datum.trait_id(); + return Err(CoherenceError::FailedOrphanCheck(trait_id)); + } + + Ok(()) +} diff --git a/chalk-solve/src/coherence/solve.rs b/chalk-solve/src/coherence/solve.rs new file mode 100644 index 00000000000..57dd81061dc --- /dev/null +++ b/chalk-solve/src/coherence/solve.rs @@ -0,0 +1,260 @@ +use crate::coherence::{CoherenceError, CoherenceSolver}; +use crate::debug_span; +use crate::ext::*; +use crate::rust_ir::*; +use crate::{goal_builder::GoalBuilder, Solution}; +use chalk_ir::cast::*; +use chalk_ir::fold::shift::Shift; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use itertools::Itertools; +use tracing::{debug, instrument}; + +impl CoherenceSolver<'_, I> { + pub(super) fn visit_specializations_of_trait( + &self, + mut record_specialization: impl FnMut(ImplId, ImplId), + ) -> Result<(), CoherenceError> { + // Ignore impls for marker traits as they are allowed to overlap. + let trait_datum = self.db.trait_datum(self.trait_id); + if trait_datum.flags.marker { + return Ok(()); + } + + // Iterate over every pair of impls for the same trait. + let impls = self.db.local_impls_to_coherence_check(self.trait_id); + for (l_id, r_id) in impls.into_iter().tuple_combinations() { + let lhs = &self.db.impl_datum(l_id); + let rhs = &self.db.impl_datum(r_id); + + // Two negative impls never overlap. + if !lhs.is_positive() && !rhs.is_positive() { + continue; + } + + // Check if the impls overlap, then if they do, check if one specializes + // the other. Note that specialization can only run one way - if both + // specialization checks return *either* true or false, that's an error. + if !self.disjoint(lhs, rhs) { + match (self.specializes(l_id, r_id), self.specializes(r_id, l_id)) { + (true, false) => record_specialization(l_id, r_id), + (false, true) => record_specialization(r_id, l_id), + (_, _) => { + return Err(CoherenceError::OverlappingImpls(self.trait_id)); + } + } + } + } + + Ok(()) + } + + // Test if the set of types that these two impls apply to overlap. If the test succeeds, these + // two impls are disjoint. + // + // We combine the binders of the two impls & treat them as existential quantifiers. Then we + // attempt to unify the input types to the trait provided by each impl, as well as prove that + // the where clauses from both impls all hold. At the end, we apply the `compatible` modality + // and negate the query. Negating the query means that we are asking chalk to prove that no + // such overlapping impl exists. By applying `compatible { G }`, chalk attempts to prove that + // "there exists a compatible world where G is provable." When we negate compatible, it turns + // into the statement "for all compatible worlds, G is not provable." This is exactly what we + // want since we want to ensure that there is no overlap in *all* compatible worlds, not just + // that there is no overlap in *some* compatible world. + // + // Examples: + // + // Impls: + // impl Foo for T { } // rhs + // impl Foo for i32 { } // lhs + // Generates: + // not { compatible { exists { exists<> { T = i32 } } } } + // + // Impls: + // impl Foo for Vec { } // rhs + // impl Foo for Vec { } // lhs + // Generates: + // not { compatible { exists { exists { Vec = Vec, T1 = T2 } } } } + // + // Impls: + // impl Foo for Vec where T: Bar { } + // impl Foo for Vec where U: Baz { } + // Generates: + // not { compatible { exists { exists { Vec = Vec, T: Bar, U: Baz } } } } + // + #[instrument(level = "debug", skip(self))] + fn disjoint(&self, lhs: &ImplDatum, rhs: &ImplDatum) -> bool { + let interner = self.db.interner(); + + let (lhs_binders, lhs_bound) = lhs.binders.as_ref().into(); + let (rhs_binders, rhs_bound) = rhs.binders.as_ref().into(); + + // Upshift the rhs variables in params to account for the joined binders + let lhs_params = lhs_bound + .trait_ref + .substitution + .as_slice(interner) + .iter() + .cloned(); + let rhs_params = rhs_bound + .trait_ref + .substitution + .as_slice(interner) + .iter() + .map(|param| param.clone().shifted_in(interner)); + + // Create an equality goal for every input type the trait, attempting + // to unify the inputs to both impls with one another + let params_goals = lhs_params + .zip(rhs_params) + .map(|(a, b)| GoalData::EqGoal(EqGoal { a, b }).intern(interner)); + + // Upshift the rhs variables in where clauses + let lhs_where_clauses = lhs_bound.where_clauses.iter().cloned(); + let rhs_where_clauses = rhs_bound + .where_clauses + .iter() + .map(|wc| wc.clone().shifted_in(interner)); + + // Create a goal for each clause in both where clauses + let wc_goals = lhs_where_clauses + .chain(rhs_where_clauses) + .map(|wc| wc.cast(interner)); + + // Join all the goals we've created together with And, then quantify them + // over the joined binders. This is our query. + let goal = Box::new(Goal::all(interner, params_goals.chain(wc_goals))) + .quantify(interner, QuantifierKind::Exists, lhs_binders) + .quantify(interner, QuantifierKind::Exists, rhs_binders) + .compatible(interner) + .negate(interner); + + let canonical_goal = &goal.into_closed_goal(interner); + let mut fresh_solver = (self.solver_builder)(); + let solution = fresh_solver.solve(self.db, canonical_goal); + let result = match solution { + // Goal was proven with a unique solution, so no impl was found that causes these two + // to overlap + Some(Solution::Unique(_)) => true, + // Goal was ambiguous, so there *may* be overlap + Some(Solution::Ambig(_)) | + // Goal cannot be proven, so there is some impl that causes overlap + None => false, + }; + debug!("overlaps: result = {:?}", result); + result + } + + // Creates a goal which, if provable, means "more special" impl specializes the "less special" one. + // + // # General rule + // + // Given the more special impl: + // + // ```ignore + // impl SomeTrait for T0 where WC_more + // ``` + // + // and less special impl + // + // ```ignore + // impl SomeTrait for U0 where WC_less + // ``` + // + // create the goal: + // + // ```ignore + // forall { + // if (WC_more) {} + // exists { + // T0 = U0, ..., Tm = Um, + // WC_less + // } + // } + // } + // ``` + // + // # Example + // + // Given: + // + // * more: `impl Foo for Vec` + // * less: `impl Foo for U` + // + // Resulting goal: + // + // ```ignore + // forall { + // if (T: Clone) { + // exists { + // Vec = U, U: Clone + // } + // } + // } + // ``` + #[instrument(level = "debug", skip(self))] + fn specializes(&self, less_special_id: ImplId, more_special_id: ImplId) -> bool { + let more_special = &self.db.impl_datum(more_special_id); + let less_special = &self.db.impl_datum(less_special_id); + debug_span!("specializes", ?less_special, ?more_special); + + let interner = self.db.interner(); + + let gb = &mut GoalBuilder::new(self.db); + + // forall { ... } + let goal = gb.forall( + &more_special.binders, + less_special_id, + |gb, _, more_special_impl, less_special_id| { + // if (WC_more) { ... } + gb.implies(more_special_impl.where_clauses.iter().cloned(), |gb| { + let less_special = &gb.db().impl_datum(less_special_id); + + // exists { ... } + gb.exists( + &less_special.binders, + more_special_impl.trait_ref.clone(), + |gb, _, less_special_impl, more_special_trait_ref| { + let interner = gb.interner(); + + // T0 = U0, ..., Tm = Um + let params_goals = more_special_trait_ref + .substitution + .as_slice(interner) + .iter() + .cloned() + .zip( + less_special_impl + .trait_ref + .substitution + .as_slice(interner) + .iter() + .cloned(), + ) + .map(|(a, b)| GoalData::EqGoal(EqGoal { a, b }).intern(interner)); + + // = where clauses from the less special impl + let less_special_wc_goals = less_special_impl + .where_clauses + .iter() + .cloned() + .casted(interner); + + // && WC_less + gb.all(params_goals.chain(less_special_wc_goals)) + }, + ) + }) + }, + ); + + let canonical_goal = &goal.into_closed_goal(interner); + let mut fresh_solver = (self.solver_builder)(); + let result = fresh_solver.has_unique_solution(self.db, canonical_goal); + + debug!("specializes: result = {:?}", result); + + result + } +} diff --git a/chalk-solve/src/coinductive_goal.rs b/chalk-solve/src/coinductive_goal.rs new file mode 100644 index 00000000000..cdb5cca108d --- /dev/null +++ b/chalk-solve/src/coinductive_goal.rs @@ -0,0 +1,43 @@ +use crate::RustIrDatabase; +use chalk_ir::interner::Interner; +use chalk_ir::*; + +pub trait IsCoinductive { + /// A goal G has coinductive semantics if proving G is allowed to + /// assume G is true (very roughly speaking). In the case of + /// chalk-ir, this is true for goals of the form `T: AutoTrait`, + /// or if it is of the form `WellFormed(T: Trait)` where `Trait` + /// is any trait. The latter is needed for dealing with WF + /// requirements and cyclic traits, which generates cycles in the + /// proof tree which must not be rejected but instead must be + /// treated as a success. + fn is_coinductive(&self, db: &dyn RustIrDatabase) -> bool; +} + +impl IsCoinductive for Goal { + fn is_coinductive(&self, db: &dyn RustIrDatabase) -> bool { + let interner = db.interner(); + match self.data(interner) { + GoalData::DomainGoal(DomainGoal::Holds(wca)) => match wca { + WhereClause::Implemented(tr) => { + db.trait_datum(tr.trait_id).is_auto_trait() + || db.trait_datum(tr.trait_id).is_coinductive_trait() + } + WhereClause::AliasEq(..) => false, + WhereClause::LifetimeOutlives(..) => false, + WhereClause::TypeOutlives(..) => false, + }, + GoalData::DomainGoal(DomainGoal::WellFormed(WellFormed::Trait(..))) => true, + GoalData::Quantified(QuantifierKind::ForAll, goal) => { + goal.skip_binders().is_coinductive(db) + } + _ => false, + } + } +} + +impl IsCoinductive for UCanonical>> { + fn is_coinductive(&self, db: &dyn RustIrDatabase) -> bool { + self.canonical.value.goal.is_coinductive(db) + } +} diff --git a/chalk-solve/src/display.rs b/chalk-solve/src/display.rs new file mode 100644 index 00000000000..b608232d432 --- /dev/null +++ b/chalk-solve/src/display.rs @@ -0,0 +1,224 @@ +use std::{ + borrow::Borrow, + fmt::{Display, Result}, + sync::Arc, +}; + +use crate::rust_ir::*; +use chalk_ir::{interner::Interner, *}; +use itertools::Itertools; + +use crate::{logging_db::RecordedItemId, split::Split, RustIrDatabase}; + +#[macro_use] +mod utils; + +mod bounds; +mod identifiers; +mod items; +mod render_trait; +mod state; +mod stub; +mod ty; + +use self::render_trait::*; +pub use self::state::*; +pub use self::utils::sanitize_debug_name; + +use self::utils::as_display; + +fn write_item(f: &mut F, ws: &InternalWriterState<'_, I>, v: &T) -> Result +where + F: std::fmt::Write + ?Sized, + I: Interner, + T: RenderAsRust, +{ + writeln!(f, "{}", v.display(ws)) +} + +/// Writes stubs for items which were referenced by name, but for which we +/// didn't directly access. For instance, traits mentioned in where bounds which +/// are only usually checked during well-formedness, when we weren't recording +/// well-formedness. +/// +/// The "stub" nature of this means it writes output with the right names and +/// the right number of generics, but nothing else. Where clauses, bounds, and +/// fields are skipped. Associated types are ???? skipped. +/// +/// `RecordedItemId::Impl` is not supported. +pub fn write_stub_items(f: &mut F, ws: &WriterState, ids: T) -> Result +where + F: std::fmt::Write + ?Sized, + I: Interner, + DB: RustIrDatabase, + P: Borrow, + T: IntoIterator>, +{ + let wrapped_db = &ws.wrap_db_ref(|db| stub::StubWrapper::new(db.borrow())); + + write_items(f, wrapped_db, ids) +} + +/// Writes out each item recorded by a [`LoggingRustIrDatabase`]. +/// +/// [`LoggingRustIrDatabase`]: crate::logging_db::LoggingRustIrDatabase +pub fn write_items(f: &mut F, ws: &WriterState, ids: T) -> Result +where + F: std::fmt::Write + ?Sized, + I: Interner, + DB: RustIrDatabase, + P: Borrow, + T: IntoIterator>, +{ + for id in ids { + match id { + RecordedItemId::Impl(id) => { + let v = ws.db().impl_datum(id); + write_item(f, &InternalWriterState::new(ws), &*v)?; + } + RecordedItemId::Adt(id) => { + let v = ws.db().adt_datum(id); + write_item(f, &InternalWriterState::new(ws), &*v)?; + } + RecordedItemId::Trait(id) => { + let v = ws.db().trait_datum(id); + write_item(f, &InternalWriterState::new(ws), &*v)?; + } + RecordedItemId::OpaqueTy(id) => { + let v = ws.db().opaque_ty_data(id); + write_item(f, &InternalWriterState::new(ws), &*v)?; + } + RecordedItemId::FnDef(id) => { + let v = ws.db().fn_def_datum(id); + write_item(f, &InternalWriterState::new(ws), &*v)?; + } + RecordedItemId::Coroutine(id) => { + let coroutine = ws.db().coroutine_datum(id); + let witness = ws.db().coroutine_witness_datum(id); + write_item(f, &InternalWriterState::new(ws), &(&*coroutine, &*witness))?; + } + } + } + Ok(()) +} + +/// Displays a set of bounds, all targeting `Self`, as just the trait names, +/// separated by `+`. +/// +/// For example, a list of quantified where clauses which would normally be +/// displayed as: +/// +/// ```notrust +/// Self: A, Self: B, Self: C +/// ``` +/// +/// Is instead displayed by this function as: +/// +/// ```notrust +/// A + B + C +/// ``` +/// +/// Shared between the `Trait` in `dyn Trait` and [`OpaqueTyDatum`] bounds. +fn display_self_where_clauses_as_bounds<'a, I: Interner>( + s: &'a InternalWriterState<'a, I>, + bounds: &'a [QuantifiedWhereClause], +) -> impl Display + 'a { + as_display(move |f| { + let interner = s.db().interner(); + write!( + f, + "{}", + bounds + .iter() + .map(|bound| { + as_display(|f| { + // each individual trait can have a forall + let s = &s.add_debrujin_index(None); + if !bound.binders.is_empty(interner) { + write!( + f, + "forall<{}> ", + s.binder_var_display(&bound.binders) + .collect::>() + .join(", ") + )?; + } + match &bound.skip_binders() { + WhereClause::Implemented(trait_ref) => display_type_with_generics( + s, + trait_ref.trait_id, + &trait_ref.substitution.as_slice(interner)[1..], + ) + .fmt(f), + WhereClause::AliasEq(alias_eq) => match &alias_eq.alias { + AliasTy::Projection(projection_ty) => { + let (assoc_ty_datum, trait_params, assoc_type_params) = + s.db().split_projection(projection_ty); + display_trait_with_assoc_ty_value( + s, + assoc_ty_datum, + &trait_params[1..], + assoc_type_params, + &alias_eq.ty, + ) + .fmt(f) + } + AliasTy::Opaque(opaque) => opaque.display(s).fmt(f), + }, + WhereClause::LifetimeOutlives(lifetime) => lifetime.display(s).fmt(f), + WhereClause::TypeOutlives(ty) => ty.display(s).fmt(f), + } + }) + .to_string() + }) + .format(" + ") + ) + }) +} + +/// Displays a type with its parameters - something like `AsRef`, +/// OpaqueTyName, or `AdtName`. +/// +/// This is shared between where bounds, OpaqueTy, & dyn Trait. +fn display_type_with_generics<'a, I: Interner>( + s: &'a InternalWriterState<'a, I>, + trait_name: impl RenderAsRust + 'a, + trait_params: impl IntoIterator> + 'a, +) -> impl Display + 'a { + use std::fmt::Write; + let trait_params = trait_params.into_iter().map(|param| param.display(s)); + let mut trait_params_str = String::new(); + write_joined_non_empty_list!(trait_params_str, "<{}>", trait_params, ", ").unwrap(); + as_display(move |f| write!(f, "{}{}", trait_name.display(s), trait_params_str)) +} + +/// Displays a trait with its parameters and a single associated type - +/// something like `IntoIterator`. +/// +/// This is shared between where bounds & dyn Trait. +fn display_trait_with_assoc_ty_value<'a, I: Interner>( + s: &'a InternalWriterState<'a, I>, + assoc_ty_datum: Arc>, + trait_params: &'a [GenericArg], + assoc_ty_params: &'a [GenericArg], + assoc_ty_value: &'a Ty, +) -> impl Display + 'a { + as_display(move |f| { + write!(f, "{}<", assoc_ty_datum.trait_id.display(s))?; + write_joined_non_empty_list!( + f, + "{}, ", + trait_params.iter().map(|param| param.display(s)), + ", " + )?; + write!(f, "{}", assoc_ty_datum.id.display(s))?; + write_joined_non_empty_list!( + f, + "<{}>", + assoc_ty_params.iter().map(|param| param.display(s)), + ", " + )?; + write!(f, "={}>", assoc_ty_value.display(s))?; + Ok(()) + }) +} diff --git a/chalk-solve/src/display/bounds.rs b/chalk-solve/src/display/bounds.rs new file mode 100644 index 00000000000..3c6bfde14f4 --- /dev/null +++ b/chalk-solve/src/display/bounds.rs @@ -0,0 +1,168 @@ +//! Writer logic for `where` clauses and other bounds. +//! +//! Contains logic for writing the various forms of `Foo: Bar`. +use std::fmt::{Display, Formatter, Result}; + +use crate::rust_ir::*; +use chalk_ir::{interner::Interner, *}; +use itertools::Itertools; + +use super::{ + display_trait_with_assoc_ty_value, display_type_with_generics, render_trait::RenderAsRust, + state::InternalWriterState, +}; +use crate::split::Split; + +impl RenderAsRust for InlineBound { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + match self { + // Foo: Vec + InlineBound::TraitBound(trait_bound) => trait_bound.fmt(s, f), + // Foo: Iterator + InlineBound::AliasEqBound(eq_bound) => eq_bound.fmt(s, f), + } + } +} + +impl RenderAsRust for TraitBound { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + display_type_with_generics(s, self.trait_id, &self.args_no_self).fmt(f) + } +} + +impl RenderAsRust for AliasEqBound { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + display_trait_with_assoc_ty_value( + s, + s.db().associated_ty_data(self.associated_ty_id), + &self.trait_bound.args_no_self, + &self.parameters, + &self.value, + ) + .fmt(f) + } +} + +impl RenderAsRust for QuantifiedWhereClause { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + let s = &s.add_debrujin_index(None); + if !self.binders.is_empty(interner) { + write!( + f, + "forall<{}> ", + s.binder_var_display(&self.binders).format(", ") + )?; + } + self.skip_binders().fmt(s, f) + } +} + +impl RenderAsRust for QuantifiedInlineBound { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + let s = &s.add_debrujin_index(None); + if !self.binders.is_empty(interner) { + write!( + f, + "forall<{}> ", + s.binder_var_display(&self.binders).format(", ") + )?; + } + self.skip_binders().fmt(s, f) + } +} + +impl RenderAsRust for Vec> { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + write!( + f, + "{}", + self.iter() + .map(|where_clause| { format!("{}{}", s.indent(), where_clause.display(s)) }) + .format(",\n") + )?; + Ok(()) + } +} + +impl RenderAsRust for WhereClause { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + match self { + WhereClause::Implemented(trait_ref) => trait_ref.fmt(s, f), + WhereClause::AliasEq(alias_eq) => alias_eq.fmt(s, f), + WhereClause::LifetimeOutlives(lifetime) => lifetime.display(s).fmt(f), + WhereClause::TypeOutlives(ty) => ty.display(s).fmt(f), + } + } +} + +/// This renders `TraitRef` as a clause in a where clause, as opposed to its +/// usage in other places. +impl RenderAsRust for TraitRef { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + write!( + f, + "{}: {}", + self.self_type_parameter(interner).display(s), + display_type_with_generics( + s, + self.trait_id, + &self.substitution.as_slice(interner)[1..] + ) + ) + } +} + +/// This renders `AliasEq` as a clause in a where clause, as opposed to its +/// usage in other places. +impl RenderAsRust for AliasEq { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // we have: X: Y=D> + // B1, B2, B3, X, A1, A2, A3 are put into alias_eq.alias.substitution + // D is alias_eq.ty + // Z is alias_eq.alias.associated_ty_id + // Y is also packed into alias_eq.alias.associated_ty_id + // Now, we split out A*, Y/Z and B*: + // trait_params is X, A1, A2, A3, + // assoc_type_params is B1, B2, B3, + // assoc_ty_datum stores info about Y and Z. + match &self.alias { + AliasTy::Projection(projection_ty) => { + let (assoc_ty_datum, trait_params, assoc_type_params) = + s.db().split_projection(projection_ty); + // An alternate form might be `<{} as {}<{}>>::{}<{}> = {}` (with same + // parameter ordering). This alternate form would require type equality + // constraints (https://github.com/rust-lang/rust/issues/20041). + write!( + f, + "{}: {}", + trait_params[0].display(s), + display_trait_with_assoc_ty_value( + s, + assoc_ty_datum, + &trait_params[1..], + assoc_type_params, + &self.ty + ), + ) + } + AliasTy::Opaque(opaque) => write!(f, "{}", opaque.display(s)), + } + } +} + +impl RenderAsRust for LifetimeOutlives { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + // a': 'b + write!(f, "{}: {}", self.a.display(s), self.b.display(s)) + } +} + +impl RenderAsRust for TypeOutlives { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + // T: 'a + write!(f, "{}: {}", self.ty.display(s), self.lifetime.display(s)) + } +} diff --git a/chalk-solve/src/display/identifiers.rs b/chalk-solve/src/display/identifiers.rs new file mode 100644 index 00000000000..81a08d71b30 --- /dev/null +++ b/chalk-solve/src/display/identifiers.rs @@ -0,0 +1,54 @@ +//! Writer logic for simple IDs +//! +//! `RenderAsRust` impls for identifiers which are either too small or too +//! shared to belong anywhere else belong here. +use std::fmt::{Formatter, Result}; + +use chalk_ir::interner::Interner; +use chalk_ir::*; + +use super::{render_trait::RenderAsRust, state::InternalWriterState}; + +impl RenderAsRust for AdtId { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // TODO: use debug methods? + write!( + f, + "{}", + s.alias_for_adt_id_name(self.0, s.db().adt_name(*self)) + ) + } +} + +impl RenderAsRust for TraitId { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // TODO: use debug methods? + write!( + f, + "{}", + s.alias_for_id_name(self.0, s.db().trait_name(*self)) + ) + } +} + +impl RenderAsRust for AssocTypeId { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // TODO: use debug methods? + write!( + f, + "{}", + s.alias_for_id_name(self.0, s.db().assoc_type_name(*self)) + ) + } +} + +impl RenderAsRust for OpaqueTyId { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // TODO: use debug methods? + write!( + f, + "{}", + s.alias_for_id_name(self.0, s.db().opaque_type_name(*self)) + ) + } +} diff --git a/chalk-solve/src/display/items.rs b/chalk-solve/src/display/items.rs new file mode 100644 index 00000000000..8e9918709d0 --- /dev/null +++ b/chalk-solve/src/display/items.rs @@ -0,0 +1,509 @@ +//! Writer logic for top level items. +//! +//! Contains code specific to top-level items and other structures specific to a +//! single top-level item. + +use std::fmt::{Formatter, Result}; + +use crate::rust_ir::*; +use crate::split::Split; +use chalk_ir::interner::Interner; +use itertools::Itertools; + +use super::{ + display_self_where_clauses_as_bounds, display_type_with_generics, render_trait::RenderAsRust, + state::InternalWriterState, +}; + +/// Used in `AdtDatum` and `TraitDatum` to write n flags from a flags struct +/// to a writer. Each flag field turns into an if expression + write!, so we can +/// just list the names and not repeat this pattern over and over. +/// +/// This macro will error if unknown flags are specified. This will also error +/// if any flags are missing. +/// +/// # Usage +/// +/// ```rust,ignore +/// write_flags!(f, self.flags, XFlags { red, green }) +/// ``` +/// +/// Turns into +/// +/// ```rust,ignore +/// match self.flags { +/// XFlags { red, green } => { +/// if red { +/// write!(f, "#[red]")?; +/// } +/// if green { +/// write!(f, "#[green]")?; +/// } +/// } +/// } +/// ``` +macro_rules! write_flags { + ($writer:ident, $val:expr, $struct_name:ident { $($n:ident $(: $extra_arg:tt)?),* }) => { + match $val { + // if any fields are missing, the destructuring will error + $struct_name { + $($n,)* + } => { + $(if $n { + write!($writer, "#[{}]\n", write_flags!(@default $n $(: $extra_arg)*))?; + })* + } + } + }; + (@default $n:ident : $name:literal) => { + $name + }; + (@default $n:ident ) => { + stringify!($n) + }; +} + +impl<'a, I: Interner> RenderAsRust for (&'a CoroutineDatum, &'a CoroutineWitnessDatum) { + fn fmt(&self, _s: &InternalWriterState<'_, I>, _f: &'_ mut Formatter<'_>) -> Result { + unimplemented!() + } +} + +impl RenderAsRust for AdtDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // When support for Self in structs is added, self_binding should be + // changed to Some(0) + let s = &s.add_debrujin_index(None); + let value = self.binders.skip_binders(); + + // flags + write_flags!( + f, + self.flags, + AdtFlags { + // Ordering matters + upstream, + fundamental, + phantom_data + } + ); + + // repr + let repr = s.db().adt_repr(self.id); + + if repr.c { + write!(f, "#[repr(C)]")?; + } + if repr.packed { + write!(f, "#[repr(packed)]")?; + } + if let Some(t) = &repr.int { + write!(f, "#[repr({})]", t.display(s))?; + } + + // name + match self.kind { + AdtKind::Struct => write!(f, "struct {}", self.id.display(s),)?, + AdtKind::Enum => write!(f, "enum {}", self.id.display(s),)?, + AdtKind::Union => write!(f, "union {}", self.id.display(s),)?, + } + write_joined_non_empty_list!(f, "<{}>", s.binder_var_display(&self.binders.binders), ", ")?; + + // where clauses + if !value.where_clauses.is_empty() { + let s = &s.add_indent(); + write!(f, "\nwhere\n{}\n", value.where_clauses.display(s))?; + } else { + write!(f, " ")?; + } + + // body + write!(f, "{{")?; + let s = &s.add_indent(); + match self.kind { + AdtKind::Struct | AdtKind::Union => { + write_joined_non_empty_list!( + f, + "\n{}\n", + value.variants[0] + .fields + .iter() + .enumerate() + .map(|(idx, field)| { + format!("{}field_{}: {}", s.indent(), idx, field.display(s)) + }), + ",\n" + )?; + } + AdtKind::Enum => { + for (variant_idx, variant) in value.variants.iter().enumerate() { + write!(f, "\n{}variant_{} {{", s.indent(), variant_idx)?; + let s = &s.add_indent(); + write_joined_non_empty_list!( + f, + "\n{}\n", + variant.fields.iter().enumerate().map(|(idx, field)| { + format!("{}field_{}: {}", s.indent(), idx, field.display(s)) + }), + ",\n" + )?; + write!(f, "{}}},", s.indent())?; + } + } + } + write!(f, "}}")?; + Ok(()) + } +} + +impl RenderAsRust for Polarity { + fn fmt(&self, _s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + if !self.is_positive() { + write!(f, "!")?; + } + Ok(()) + } +} + +impl RenderAsRust for TraitDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let s = &s.add_debrujin_index(Some(0)); + let value = self.binders.skip_binders(); + + // flags + write_flags!( + f, + self.flags, + TraitFlags { + auto, + marker, + upstream, + fundamental, + non_enumerable, + coinductive + } + ); + + // object safe + if s.db().is_object_safe(self.id) { + writeln!(f, "#[object_safe]")?; + } + + // well-known + if let Some(well_known) = self.well_known { + let name = match well_known { + WellKnownTrait::Sized => "sized", + WellKnownTrait::Copy => "copy", + WellKnownTrait::Clone => "clone", + WellKnownTrait::Drop => "drop", + WellKnownTrait::FnOnce => "fn_once", + WellKnownTrait::FnMut => "fn_mut", + WellKnownTrait::Fn => "fn", + WellKnownTrait::AsyncFnOnce => "async_fn_once", + WellKnownTrait::AsyncFnMut => "async_fn_mut", + WellKnownTrait::AsyncFn => "async_fn", + WellKnownTrait::Unsize => "unsize", + WellKnownTrait::Unpin => "unpin", + WellKnownTrait::CoerceUnsized => "coerce_unsized", + WellKnownTrait::DiscriminantKind => "discriminant_kind", + WellKnownTrait::Coroutine => "coroutine", + WellKnownTrait::DispatchFromDyn => "dispatch_from_dyn", + WellKnownTrait::Tuple => "tuple_trait", + WellKnownTrait::Pointee => "pointee", + WellKnownTrait::FnPtr => "fn_ptr_trait", + WellKnownTrait::Future => "future", + }; + writeln!(f, "#[lang({})]", name)?; + } + + // trait declaration + let binders = s.binder_var_display(&self.binders.binders).skip(1); + write!(f, "trait {}", self.id.display(s))?; + write_joined_non_empty_list!(f, "<{}>", binders, ", ")?; + + // where clauses + if !value.where_clauses.is_empty() { + let s = &s.add_indent(); + write!(f, "\nwhere\n{}\n", value.where_clauses.display(s))?; + } else { + write!(f, " ")?; + } + + // body + write!(f, "{{")?; + let s = &s.add_indent(); + write_joined_non_empty_list!( + f, + "\n{}\n", + self.associated_ty_ids.iter().map(|assoc_ty_id| { + let assoc_ty_data = s.db().associated_ty_data(*assoc_ty_id); + format!("{}{}", s.indent(), (*assoc_ty_data).display(s)) + }), + "\n" + )?; + write!(f, "}}")?; + Ok(()) + } +} + +impl RenderAsRust for ImplDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + + let s = &s.add_debrujin_index(None); + let binders = s.binder_var_display(&self.binders.binders); + let value = self.binders.skip_binders(); + + // annotations + // #[upstream] + // ^^^^^^^^^^^ + // impl Foo for Bar where T: Baz { } + if self.impl_type == ImplType::External { + writeln!(f, "#[upstream]")?; + } + + // impl keyword + // impl Foo for Bar where T: Baz { } + // ^^^^ + write!(f, "impl")?; + let trait_ref = &value.trait_ref; + + // generic binders + // impl Foo for Bar where T: Baz + // ^^^ + write_joined_non_empty_list!(f, "<{}>", binders, ", ")?; + + // trait, type and parameters + // impl Foo for Bar where T: Baz { } + // ^^^^^^^^^^^^^^^^^ + let full_trait_name = display_type_with_generics( + s, + trait_ref.trait_id, + // Ignore automatically added Self parameter by skipping first parameter + &trait_ref.substitution.as_slice(interner)[1..], + ); + write!( + f, + " {}{} for {}", + self.polarity.display(s), + full_trait_name, + trait_ref.self_type_parameter(interner).display(s) + )?; + + // where clauses + // impl Foo for Bar where T: Baz { } + // ^^^^^^^^^^^^ + if !value.where_clauses.is_empty() { + let s = &s.add_indent(); + write!(f, "\nwhere\n{}\n", value.where_clauses.display(s))?; + } else { + write!(f, " ")?; + } + + // body + // impl Foo for Bar where T: Baz { } + // ^^^ + write!(f, "{{")?; + { + let s = &s.add_indent(); + let assoc_ty_values = self.associated_ty_value_ids.iter().map(|assoc_ty_value| { + s.db() + .associated_ty_value(*assoc_ty_value) + .display(s) + .to_string() + }); + write_joined_non_empty_list!(f, "\n{}\n", assoc_ty_values, "\n")?; + } + write!(f, "}}")?; + Ok(()) + } +} + +impl RenderAsRust for OpaqueTyDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + let s = &s.add_debrujin_index(None); + let bounds = self.bound.skip_binders(); + write!(f, "opaque type {}", self.opaque_ty_id.display(s))?; + write_joined_non_empty_list!(f, "<{}>", s.binder_var_display(&self.bound.binders), ", ")?; + { + let s = &s.add_debrujin_index(Some(0)); + let clauses = bounds.bounds.skip_binders(); + write!( + f, + ": {} = ", + display_self_where_clauses_as_bounds(s, clauses) + )?; + } + write!( + f, + "{};", + s.db().hidden_opaque_type(self.opaque_ty_id).display(s) + )?; + Ok(()) + } +} + +impl RenderAsRust for AssociatedTyDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // In lowering, a completely new empty environment is created for each + // AssociatedTyDatum, and it's given generic parameters for each generic + // parameter that its trait had. We want to map the new binders for + // those generic parameters back into their original names. To do that, + // first find their original names (trait_binder_names), then the names + // they have inside the AssociatedTyDatum (assoc_ty_names_for_trait_params), + // and then add that mapping to the WriterState when writing bounds and + // where clauses. + let trait_datum = s.db().trait_datum(self.trait_id); + // inverted Debrujin indices for the trait's parameters in the trait + // environment + let trait_param_names_in_trait_env = s.binder_var_indices(&trait_datum.binders.binders); + let s = &s.add_debrujin_index(None); + // inverted Debrujin indices for the trait's parameters in the + // associated type environment + let param_names_in_assoc_ty_env = s + .binder_var_indices(&self.binders.binders) + .collect::>(); + // inverted Debrujin indices to render the trait's parameters in the + // associated type environment + let (trait_param_names_in_assoc_ty_env, _) = s + .db() + .split_associated_ty_parameters(¶m_names_in_assoc_ty_env, self); + + let s = &s.add_parameter_mapping( + trait_param_names_in_assoc_ty_env.iter().copied(), + trait_param_names_in_trait_env, + ); + + // rendered names for the associated type's generics in the associated + // type environment + let binder_display_in_assoc_ty = s + .binder_var_display(&self.binders.binders) + .collect::>(); + + let (_, assoc_ty_params) = s + .db() + .split_associated_ty_parameters(&binder_display_in_assoc_ty, self); + write!(f, "type {}", self.id.display(s))?; + write_joined_non_empty_list!(f, "<{}>", assoc_ty_params, ", ")?; + + let datum_bounds = &self.binders.skip_binders(); + + if !datum_bounds.bounds.is_empty() { + write!(f, ": ")?; + } + + // bounds is `A: V, B: D, C = E`? + // type Foo: X + Y + Z; + let bounds = datum_bounds + .bounds + .iter() + .map(|bound| bound.display(s).to_string()) + .format(" + "); + write!(f, "{}", bounds)?; + + // where_clause is 'X: Y, Z: D' + // type Foo<...>: ... where X: Y, Z: D; + + // note: it's a quantified clause b/c we could have `for<'a> T: Foo<'a>` + // within 'where' + if !datum_bounds.where_clauses.is_empty() { + let where_s = &s.add_indent(); + let where_clauses = datum_bounds.where_clauses.display(where_s); + write!(f, "\n{}where\n{}", s.indent(), where_clauses)?; + } + write!(f, ";")?; + Ok(()) + } +} + +impl RenderAsRust for AssociatedTyValue { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // see comments for a similar empty env operation in AssociatedTyDatum's + // impl of RenderAsRust. + let assoc_ty_data = s.db().associated_ty_data(self.associated_ty_id); + let impl_datum = s.db().impl_datum(self.impl_id); + + let impl_param_names_in_impl_env = s.binder_var_indices(&impl_datum.binders.binders); + + let s = &s.add_debrujin_index(None); + let value = self.value.skip_binders(); + + let param_names_in_assoc_ty_value_env = s + .binder_var_indices(&self.value.binders) + .collect::>(); + + let (impl_params_in_assoc_ty_value_env, _assoc_ty_value_params) = s + .db() + .split_associated_ty_value_parameters(¶m_names_in_assoc_ty_value_env, self); + + let s = &s.add_parameter_mapping( + impl_params_in_assoc_ty_value_env.iter().cloned(), + impl_param_names_in_impl_env, + ); + + let display_params = s + .binder_var_display(&self.value.binders) + .collect::>(); + + let (_impl_display, assoc_ty_value_display) = s + .db() + .split_associated_ty_value_parameters(&display_params, self); + + write!(f, "{}type {}", s.indent(), assoc_ty_data.id.display(s))?; + write_joined_non_empty_list!(f, "<{}>", assoc_ty_value_display, ", ")?; + write!(f, " = {};", value.ty.display(s))?; + Ok(()) + } +} + +impl RenderAsRust for FnDefDatum { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + let s = &s.add_debrujin_index(None); + let bound_datum = self.binders.skip_binders(); + + // declaration + // fn foo(arg: u32, arg2: T) -> Result where T: Bar + // ^^^^^^ + write!(f, "fn {}", s.db().fn_def_name(self.id))?; + + // binders + // fn foo(arg: u32, arg2: T) -> Result where T: Bar + // ^^^ + let binders = s.binder_var_display(&self.binders.binders); + write_joined_non_empty_list!(f, "<{}>", binders, ", ")?; + + { + let s = &s.add_debrujin_index(None); + let inputs_and_output = bound_datum.inputs_and_output.skip_binders(); + + // arguments + // fn foo(arg: u32, arg2: T) -> Result where T: Bar + // ^^^^^^^^^^^^^^^^^^^ + let arguments = inputs_and_output + .argument_types + .iter() + .enumerate() + .map(|(idx, arg)| format!("arg_{}: {}", idx, arg.display(s))) + .format(", "); + + write!(f, "({})", arguments)?; + + // return Type + // fn foo(arg: u32, arg2: T) -> Result where T: Bar + // ^^^^^^^^^^^^^ + write!(f, " -> {}", inputs_and_output.return_type.display(s))?; + } + + // where clause + // fn foo(arg: u32, arg2: T) -> Result where T: Bar + // ^^^^^^^^^^^^ + if !bound_datum.where_clauses.is_empty() { + let s = &s.add_indent(); + write!(f, "\nwhere\n{}", bound_datum.where_clauses.display(s))?; + } + + write!(f, ";")?; + + Ok(()) + } +} diff --git a/chalk-solve/src/display/render_trait.rs b/chalk-solve/src/display/render_trait.rs new file mode 100644 index 00000000000..a565f076c7d --- /dev/null +++ b/chalk-solve/src/display/render_trait.rs @@ -0,0 +1,30 @@ +//! `RenderAsRust` trait and related utils. +use std::fmt::{Display, Formatter, Result}; + +use chalk_ir::interner::Interner; + +use super::state::InternalWriterState; + +/// Displays `RenderAsRust` data. +/// +/// This is a utility struct for making `RenderAsRust` nice to use with rust format macros. +pub(in crate::display) struct DisplayRenderAsRust<'a, I: Interner, T> { + s: &'a InternalWriterState<'a, I>, + rar: &'a T, +} + +impl> Display for DisplayRenderAsRust<'_, I, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + self.rar.fmt(self.s, f) + } +} + +pub(in crate::display) trait RenderAsRust { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result; + fn display<'a>(&'a self, s: &'a InternalWriterState<'a, I>) -> DisplayRenderAsRust<'a, I, Self> + where + Self: Sized, + { + DisplayRenderAsRust { s, rar: self } + } +} diff --git a/chalk-solve/src/display/state.rs b/chalk-solve/src/display/state.rs new file mode 100644 index 00000000000..fed2f5ca599 --- /dev/null +++ b/chalk-solve/src/display/state.rs @@ -0,0 +1,352 @@ +//! Persistent state passed down between writers. +//! +//! This is essentially `InternalWriterState` and other things supporting that. +use core::hash::Hash; +use std::{ + borrow::Borrow, + collections::BTreeMap, + fmt::{Debug, Display, Formatter, Result}, + marker::PhantomData, + rc::Rc, + sync::{Arc, Mutex}, +}; + +use crate::RustIrDatabase; +use chalk_ir::{interner::Interner, *}; +use indexmap::IndexMap; +use itertools::Itertools; + +/// Like a BoundVar, but with the debrujin index inverted so as to create a +/// canonical name we can use anywhere for each bound variable. +/// +/// In BoundVar, the innermost bound variables have debrujin index `0`, and +/// each further out BoundVar has a debrujin index `1` higher. +/// +/// In InvertedBoundVar, the outermost variables have inverted_debrujin_idx `0`, +/// and the innermost have their depth, not the other way around. +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub struct InvertedBoundVar { + /// The inverted debrujin index. Corresponds roughly to an inverted `DebrujinIndex::depth`. + inverted_debrujin_idx: i64, + /// The index within the debrujin index. Corresponds to `BoundVar::index`. + within_idx: IndexWithinBinding, +} + +impl Display for InvertedBoundVar { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + write!(f, "_{}_{}", self.inverted_debrujin_idx, self.within_idx) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum UnifiedId { + AdtId(I::InternedAdtId), + DefId(I::DefId), +} + +#[derive(Debug)] +pub struct IdAliasStore { + /// Map from the DefIds we've encountered to a u32 alias id unique to all ids + /// the same name. + aliases: IndexMap, + /// Map from each name to the next unused u32 alias id. + next_unused_for_name: BTreeMap, +} + +impl Default for IdAliasStore { + fn default() -> Self { + IdAliasStore { + aliases: IndexMap::default(), + next_unused_for_name: BTreeMap::default(), + } + } +} + +impl IdAliasStore { + fn alias_for_id_name(&mut self, id: T, name: String) -> String { + let next_unused_for_name = &mut self.next_unused_for_name; + let alias = *self.aliases.entry(id).or_insert_with(|| { + let next_unused: &mut u32 = next_unused_for_name.entry(name.clone()).or_default(); + let id = *next_unused; + *next_unused += 1; + id + }); + // If there are no conflicts, keep the name the same so that we don't + // need name-agnostic equality in display tests. + if alias == 0 { + name + } else { + format!("{}_{}", name, alias) + } + } +} + +#[derive(Debug)] +struct IdAliases { + id_aliases: IdAliasStore>, +} + +impl Default for IdAliases { + fn default() -> Self { + IdAliases { + id_aliases: IdAliasStore::default(), + } + } +} + +/// Writer state which persists across multiple writes. +/// +/// Currently, this means keeping track of what IDs have been given what names, +/// including deduplication information. +/// +/// This data is stored using interior mutability - clones will point to the same underlying +/// data. +/// +/// Uses a separate type, `P`, for the database stored inside to account for +/// `Arc` or wrapping other storage mediums. +#[derive(Debug)] +pub struct WriterState +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + pub(super) db: P, + id_aliases: Arc>>, + _phantom: PhantomData, +} + +impl Clone for WriterState +where + DB: RustIrDatabase, + P: Borrow + Clone, + I: Interner, +{ + fn clone(&self) -> Self { + WriterState { + db: self.db.clone(), + id_aliases: self.id_aliases.clone(), + _phantom: PhantomData, + } + } +} + +impl WriterState +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + pub fn new(db: P) -> Self { + WriterState { + db, + id_aliases: Arc::new(Mutex::new(IdAliases::default())), + _phantom: PhantomData, + } + } + + /// Returns a new version of self containing a wrapped database which + /// references the outer data. + /// + /// `f` will be run on the internal database, and the returned result will + /// wrap the result from `f`. For consistency, `f` should always contain the + /// given database, and must keep the same ID<->item relationships. + pub(super) fn wrap_db_ref<'a, DB2: ?Sized, P2, F>(&'a self, f: F) -> WriterState + where + DB2: RustIrDatabase, + P2: Borrow, + // We need to pass in `&'a P` specifically to guarantee that the `&P` + // can outlive the function body, and thus that it's safe to store `&P` + // in `P2`. + F: FnOnce(&'a P) -> P2, + { + WriterState { + db: f(&self.db), + id_aliases: self.id_aliases.clone(), + _phantom: PhantomData, + } + } + + pub(crate) fn db(&self) -> &DB { + self.db.borrow() + } +} + +/// Writer state for a single write call, persistent only as long as necessary +/// to write a single item. +/// +/// Stores things necessary for . +#[derive(Clone, Debug)] +pub(super) struct InternalWriterState<'a, I: Interner> { + persistent_state: WriterState + 'a, &'a dyn RustIrDatabase>, + indent_level: usize, + debrujin_indices_deep: u32, + // lowered_(inverted_debrujin_idx, index) -> src_correct_(inverted_debrujin_idx, index) + remapping: Rc>, + // the inverted_bound_var which maps to "Self" + self_mapping: Option, +} + +type IndexWithinBinding = usize; + +impl<'a, I: Interner> InternalWriterState<'a, I> { + pub fn new(persistent_state: &'a WriterState) -> Self + where + DB: RustIrDatabase, + P: Borrow, + { + InternalWriterState { + persistent_state: persistent_state + .wrap_db_ref(|db| db.borrow() as &dyn RustIrDatabase), + indent_level: 0, + debrujin_indices_deep: 0, + remapping: Rc::new(BTreeMap::new()), + self_mapping: None, + } + } + + pub(super) fn db(&self) -> &dyn RustIrDatabase { + self.persistent_state.db + } + + pub(super) fn add_indent(&self) -> Self { + InternalWriterState { + indent_level: self.indent_level + 1, + ..self.clone() + } + } + + pub(super) fn indent(&self) -> impl Display { + std::iter::repeat(" ").take(self.indent_level).format("") + } + + pub(super) fn alias_for_adt_id_name(&self, id: I::InternedAdtId, name: String) -> impl Display { + self.persistent_state + .id_aliases + .lock() + .unwrap() + .id_aliases + .alias_for_id_name(UnifiedId::AdtId(id), name) + } + + pub(super) fn alias_for_id_name(&self, id: I::DefId, name: String) -> impl Display { + self.persistent_state + .id_aliases + .lock() + .unwrap() + .id_aliases + .alias_for_id_name(UnifiedId::DefId(id), name) + } + + /// Adds a level of debrujin index, and possibly a "Self" parameter. + /// + /// This should be called whenever recursing into the value within a + /// [`Binders`]. + /// + /// If `self_binding` is `Some`, then it will introduce a new variable named + /// `Self` with the within-debrujin index given within and the innermost + /// debrujian index after increasing debrujin index. + #[must_use = "this returns a new `InternalWriterState`, and does not modify the existing one"] + pub(super) fn add_debrujin_index(&self, self_binding: Option) -> Self { + let mut new_state = self.clone(); + new_state.debrujin_indices_deep += 1; + new_state.self_mapping = self_binding + .map(|idx| new_state.indices_for_introduced_bound_var(idx)) + .or(self.self_mapping); + new_state + } + + /// Adds parameter remapping. + /// + /// Each of the parameters in `lowered_vars` will be mapped to its + /// corresponding variable in `original_vars` when printed through the + /// `InternalWriterState` returned from this method. + /// + /// `lowered_vars` and `original_vars` must have the same length. + pub(super) fn add_parameter_mapping( + &self, + lowered_vars: impl Iterator, + original_vars: impl Iterator, + ) -> Self { + let remapping = self + .remapping + .iter() + .map(|(a, b)| (*a, *b)) + .chain(lowered_vars.zip(original_vars)) + .collect::>(); + + InternalWriterState { + remapping: Rc::new(remapping), + ..self.clone() + } + } + + /// Inverts the debrujin index so as to create a canonical name we can + /// anywhere for each bound variable. + /// + /// See [`InvertedBoundVar`][InvertedBoundVar]. + pub(super) fn invert_debrujin_idx( + &self, + debrujin_idx: u32, + index: IndexWithinBinding, + ) -> InvertedBoundVar { + InvertedBoundVar { + inverted_debrujin_idx: (self.debrujin_indices_deep as i64) - (debrujin_idx as i64), + within_idx: index, + } + } + + pub(super) fn apply_mappings(&self, b: InvertedBoundVar) -> impl Display { + let remapped = self.remapping.get(&b).copied().unwrap_or(b); + if self.self_mapping == Some(remapped) { + "Self".to_owned() + } else { + remapped.to_string() + } + } + + pub(super) fn indices_for_bound_var(&self, b: &BoundVar) -> InvertedBoundVar { + self.invert_debrujin_idx(b.debruijn.depth(), b.index) + } + + pub(super) fn indices_for_introduced_bound_var( + &self, + idx: IndexWithinBinding, + ) -> InvertedBoundVar { + // freshly introduced bound vars will always have debrujin index of 0, + // they're always "innermost". + self.invert_debrujin_idx(0, idx) + } + + pub(super) fn display_bound_var(&self, b: &BoundVar) -> impl Display { + self.apply_mappings(self.indices_for_bound_var(b)) + } + + pub(super) fn name_for_introduced_bound_var(&self, idx: IndexWithinBinding) -> impl Display { + self.apply_mappings(self.indices_for_introduced_bound_var(idx)) + } + + pub(super) fn binder_var_indices<'b>( + &'b self, + binders: &'b VariableKinds, + ) -> impl Iterator + 'b { + binders + .iter(self.db().interner()) + .enumerate() + .map(move |(idx, _param)| self.indices_for_introduced_bound_var(idx)) + } + + pub(super) fn binder_var_display<'b>( + &'b self, + binders: &'b VariableKinds, + ) -> impl Iterator + 'b { + binders + .iter(self.db().interner()) + .zip(self.binder_var_indices(binders)) + .map(move |(parameter, var)| match parameter { + VariableKind::Ty(_) => format!("{}", self.apply_mappings(var)), + VariableKind::Lifetime => format!("'{}", self.apply_mappings(var)), + VariableKind::Const(_ty) => format!("const {}", self.apply_mappings(var)), + }) + } +} diff --git a/chalk-solve/src/display/stub.rs b/chalk-solve/src/display/stub.rs new file mode 100644 index 00000000000..bffa5b4c227 --- /dev/null +++ b/chalk-solve/src/display/stub.rs @@ -0,0 +1,283 @@ +//! Contains a `LoggingIrDatabase` which returns stub versions of everything +//! queried. +use std::sync::Arc; + +use crate::rust_ir::{CoroutineDatum, CoroutineWitnessDatum}; +use crate::{ + rust_ir::{ + AdtDatumBound, AdtKind, AdtVariantDatum, AssociatedTyDatumBound, FnDefDatumBound, + OpaqueTyDatumBound, TraitDatumBound, + }, + RustIrDatabase, +}; +use chalk_ir::{ + interner::Interner, Binders, CanonicalVarKinds, CoroutineId, Substitution, Ty, + UnificationDatabase, VariableKinds, Variances, +}; + +#[derive(Debug)] +pub struct StubWrapper<'a, DB> { + db: &'a DB, +} + +impl<'a, DB> StubWrapper<'a, DB> { + pub fn new(db: &'a DB) -> Self { + StubWrapper { db } + } +} + +impl> UnificationDatabase for StubWrapper<'_, DB> { + fn fn_def_variance(&self, fn_def_id: chalk_ir::FnDefId) -> Variances { + self.db.unification_database().fn_def_variance(fn_def_id) + } + + fn adt_variance(&self, adt_id: chalk_ir::AdtId) -> Variances { + self.db.unification_database().adt_variance(adt_id) + } +} + +impl> RustIrDatabase for StubWrapper<'_, DB> { + fn custom_clauses(&self) -> Vec> { + self.db.custom_clauses() + } + + fn associated_ty_data( + &self, + ty: chalk_ir::AssocTypeId, + ) -> std::sync::Arc> { + let mut v = (*self.db.associated_ty_data(ty)).clone(); + v.binders = Binders::new( + v.binders.binders.clone(), + AssociatedTyDatumBound { + where_clauses: Vec::new(), + bounds: Vec::new(), + }, + ); + Arc::new(v) + } + + fn trait_datum( + &self, + trait_id: chalk_ir::TraitId, + ) -> std::sync::Arc> { + let mut v = (*self.db.trait_datum(trait_id)).clone(); + v.binders = Binders::new( + v.binders.binders.clone(), + TraitDatumBound { + where_clauses: Vec::new(), + }, + ); + Arc::new(v) + } + + fn adt_datum(&self, adt_id: chalk_ir::AdtId) -> std::sync::Arc> { + let mut v = (*self.db.adt_datum(adt_id)).clone(); + let variants = match v.kind { + AdtKind::Struct | AdtKind::Union => vec![AdtVariantDatum { fields: vec![] }], + AdtKind::Enum => vec![], + }; + v.binders = Binders::new( + v.binders.binders.clone(), + AdtDatumBound { + variants, + where_clauses: Vec::new(), + }, + ); + Arc::new(v) + } + + fn adt_repr(&self, id: chalk_ir::AdtId) -> std::sync::Arc> { + self.db.adt_repr(id) + } + + fn adt_size_align(&self, id: chalk_ir::AdtId) -> Arc { + self.db.adt_size_align(id) + } + + fn fn_def_datum( + &self, + fn_def_id: chalk_ir::FnDefId, + ) -> std::sync::Arc> { + let mut v = (*self.db.fn_def_datum(fn_def_id)).clone(); + v.binders = Binders::new( + v.binders.binders.clone(), + FnDefDatumBound { + inputs_and_output: v.binders.skip_binders().inputs_and_output.clone(), + where_clauses: Vec::new(), + }, + ); + Arc::new(v) + } + + fn impl_datum( + &self, + _impl_id: chalk_ir::ImplId, + ) -> std::sync::Arc> { + unreachable!("impl items should never be stubbed") + } + + fn associated_ty_from_impl( + &self, + _impl_id: chalk_ir::ImplId, + _assoc_type_id: chalk_ir::AssocTypeId, + ) -> Option> { + unreachable!("should never reach projection if impl datum is not stubbed") + } + + fn associated_ty_value( + &self, + _id: crate::rust_ir::AssociatedTyValueId, + ) -> std::sync::Arc> { + unreachable!("associated type values should never be stubbed") + } + + fn opaque_ty_data( + &self, + id: chalk_ir::OpaqueTyId, + ) -> std::sync::Arc> { + let mut v = (*self.db.opaque_ty_data(id)).clone(); + v.bound = Binders::new( + v.bound.binders, + OpaqueTyDatumBound { + bounds: Binders::new(VariableKinds::empty(self.db.interner()), Vec::new()), + where_clauses: Binders::new(VariableKinds::empty(self.db.interner()), Vec::new()), + }, + ); + Arc::new(v) + } + + fn hidden_opaque_type(&self, _id: chalk_ir::OpaqueTyId) -> chalk_ir::Ty { + // Return a unit since the particular hidden type doesn't matter (If it + // did matter, it would have been recorded) + chalk_ir::TyKind::Tuple(0, Substitution::empty(self.db.interner())) + .intern(self.db.interner()) + } + + fn impls_for_trait( + &self, + _trait_id: chalk_ir::TraitId, + _parameters: &[chalk_ir::GenericArg], + _binders: &CanonicalVarKinds, + ) -> Vec> { + // We panic here because the returned ids may not be collected, + // resulting in unresolvable names. + unimplemented!("stub display code should call this") + } + + fn local_impls_to_coherence_check( + &self, + trait_id: chalk_ir::TraitId, + ) -> Vec> { + self.db.local_impls_to_coherence_check(trait_id) + } + + fn impl_provided_for( + &self, + _auto_trait_id: chalk_ir::TraitId, + _ty: &chalk_ir::TyKind, + ) -> bool { + // We panic here because the returned ids may not be collected, + // resulting in unresolvable names. + unimplemented!("stub display code should call this") + } + + fn well_known_trait_id( + &self, + well_known_trait: crate::rust_ir::WellKnownTrait, + ) -> Option> { + self.db.well_known_trait_id(well_known_trait) + } + + fn well_known_assoc_type_id( + &self, + assoc_type: crate::rust_ir::WellKnownAssocType, + ) -> Option> { + self.db.well_known_assoc_type_id(assoc_type) + } + + fn program_clauses_for_env( + &self, + environment: &chalk_ir::Environment, + ) -> chalk_ir::ProgramClauses { + self.db.program_clauses_for_env(environment) + } + + fn interner(&self) -> I { + self.db.interner() + } + + fn is_object_safe(&self, trait_id: chalk_ir::TraitId) -> bool { + self.db.is_object_safe(trait_id) + } + + fn closure_kind( + &self, + _closure_id: chalk_ir::ClosureId, + _substs: &chalk_ir::Substitution, + ) -> crate::rust_ir::ClosureKind { + unimplemented!("cannot stub closures") + } + + fn closure_inputs_and_output( + &self, + _closure_id: chalk_ir::ClosureId, + _substs: &chalk_ir::Substitution, + ) -> chalk_ir::Binders> { + unimplemented!("cannot stub closures") + } + + fn closure_upvars( + &self, + _closure_id: chalk_ir::ClosureId, + _substs: &chalk_ir::Substitution, + ) -> chalk_ir::Binders> { + unimplemented!("cannot stub closures") + } + + fn coroutine_datum(&self, _coroutine_id: CoroutineId) -> Arc> { + unimplemented!("cannot stub coroutine") + } + + fn coroutine_witness_datum( + &self, + _coroutine_id: CoroutineId, + ) -> Arc> { + unimplemented!("cannot stub coroutine witness") + } + + fn closure_fn_substitution( + &self, + _closure_id: chalk_ir::ClosureId, + _substs: &chalk_ir::Substitution, + ) -> chalk_ir::Substitution { + unimplemented!("cannot stub closures") + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } + + fn trait_name(&self, trait_id: chalk_ir::TraitId) -> String { + self.db.trait_name(trait_id) + } + + fn adt_name(&self, struct_id: chalk_ir::AdtId) -> String { + self.db.adt_name(struct_id) + } + + fn assoc_type_name(&self, assoc_ty_id: chalk_ir::AssocTypeId) -> String { + self.db.assoc_type_name(assoc_ty_id) + } + + fn opaque_type_name(&self, opaque_ty_id: chalk_ir::OpaqueTyId) -> String { + self.db.opaque_type_name(opaque_ty_id) + } + + fn fn_def_name(&self, fn_def_id: chalk_ir::FnDefId) -> String { + self.db.fn_def_name(fn_def_id) + } + + fn discriminant_type(&self, ty: Ty) -> Ty { + self.db.discriminant_type(ty) + } +} diff --git a/chalk-solve/src/display/ty.rs b/chalk-solve/src/display/ty.rs new file mode 100644 index 00000000000..daa47ccc999 --- /dev/null +++ b/chalk-solve/src/display/ty.rs @@ -0,0 +1,306 @@ +//! Writer logic for types. +//! +//! Contains the highly-recursive logic for writing `TyKind` and its variants. +use std::fmt::{Formatter, Result}; + +use crate::split::Split; +use chalk_ir::{interner::Interner, *}; +use itertools::Itertools; + +use super::{ + display_self_where_clauses_as_bounds, display_type_with_generics, render_trait::RenderAsRust, + state::InternalWriterState, +}; + +impl RenderAsRust for TyKind { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + match self { + TyKind::Adt(sid, substitution) => { + write!(f, "{}", sid.display(s))?; + let parameters = substitution.as_slice(interner); + let parameters = parameters.iter().map(|param| param.display(s)); + write_joined_non_empty_list!(f, "<{}>", parameters, ", ") + } + TyKind::AssociatedType(assoc_type_id, substitution) => { + // (Iterator::Item)(x) + // should be written in Rust as ::Item + let datum = s.db().associated_ty_data(*assoc_type_id); + assert!( + substitution + .iter(interner) + .filter_map(move |p| p.ty(interner)) + .count() + >= 1, + "AssociatedType should have at least 1 parameter" + ); + write!( + f, + "<{} as {}>::{}", + substitution + .iter(interner) + .filter_map(move |p| p.ty(interner)) + .next() + .unwrap() + .display(s), + datum.trait_id.display(s), + datum.id.display(s), + )?; + let params = substitution.as_slice(interner); + write_joined_non_empty_list!( + f, + "<{}>", + params[1..].iter().map(|ty| ty.display(s)), + "," + ) + } + TyKind::Scalar(scalar) => write!(f, "{}", scalar.display(s)), + TyKind::Tuple(arity, substitution) => { + write!( + f, + "({}{})", + substitution + .as_slice(interner) + .iter() + .map(|p| p.display(s)) + .format(", "), + if *arity == 1 { + // need trailing single comma + "," + } else { + "" + } + ) + } + TyKind::OpaqueType(opaque_ty_id, substitution) => write!( + f, + "{}", + display_type_with_generics(s, *opaque_ty_id, substitution.as_slice(interner)) + ), + TyKind::Raw(mutability, ty) => match mutability { + Mutability::Mut => write!(f, "*mut {}", ty.display(s)), + Mutability::Not => write!(f, "*const {}", ty.display(s)), + }, + TyKind::Ref(mutability, lifetime, ty) => match mutability { + Mutability::Mut => write!(f, "&{} mut {}", lifetime.display(s), ty.display(s)), + Mutability::Not => write!(f, "&{} {}", lifetime.display(s), ty.display(s)), + }, + TyKind::Str => write!(f, "str"), + TyKind::Slice(ty) => write!(f, "[{}]", ty.display(s)), + TyKind::Error => write!(f, "{{error}}"), + TyKind::Never => write!(f, "!"), + + // FIXME: write out valid types for these variants + TyKind::FnDef(..) => write!(f, ""), + TyKind::Closure(..) => write!(f, ""), + TyKind::Foreign(..) => write!(f, ""), + TyKind::Coroutine(..) => write!(f, ""), + TyKind::CoroutineWitness(..) => write!(f, ""), + + TyKind::Array(ty, const_) => write!(f, "[{}; {}]", ty.display(s), const_.display(s),), + TyKind::Dyn(dyn_ty) => { + // the lifetime needs to be outside of the bounds, so we + // introduce a new scope for the bounds + { + let s = &s.add_debrujin_index(None); + // dyn_ty.bounds.binders creates a Self binding for the trait + let bounds = dyn_ty.bounds.skip_binders(); + + write!( + f, + "dyn {}", + display_self_where_clauses_as_bounds(s, bounds.as_slice(interner)), + )?; + } + + write!(f, " + {}", dyn_ty.lifetime.display(s))?; + Ok(()) + } + TyKind::BoundVar(bound_var) => write!(f, "{}", s.display_bound_var(bound_var)), + TyKind::InferenceVar(_, _) => write!(f, "_"), + TyKind::Alias(alias_ty) => alias_ty.fmt(s, f), + TyKind::Function(func) => func.fmt(s, f), + TyKind::Placeholder(_) => write!(f, ""), + } + } +} + +impl RenderAsRust for AliasTy { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + match self { + AliasTy::Projection(projection_ty) => projection_ty.fmt(s, f), + AliasTy::Opaque(opaque_ty) => opaque_ty.fmt(s, f), + } + } +} + +impl RenderAsRust for ProjectionTy { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // >::Z + + // Now, we split out A*, Y/Z and B*: + // trait_params is X, A1, A2, A3, + // assoc_type_params is B1, B2, B3, + // assoc_ty_datum stores info about Y and Z. + let (assoc_ty_datum, trait_params, assoc_type_params) = s.db().split_projection(self); + write!( + f, + "<{} as {}>::{}", + trait_params[0].display(s), + display_type_with_generics(s, assoc_ty_datum.trait_id, &trait_params[1..]), + assoc_ty_datum.id.display(s), + )?; + write_joined_non_empty_list!( + f, + "<{}>", + assoc_type_params.iter().map(|param| param.display(s)), + ", " + )?; + Ok(()) + } +} + +impl RenderAsRust for OpaqueTy { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + write!( + f, + "{}", + display_type_with_generics(s, self.opaque_ty_id, self.substitution.as_slice(interner),) + ) + } +} + +impl RenderAsRust for FnPointer { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + let interner = s.db().interner(); + let s = &s.add_debrujin_index(None); + if self.num_binders > 0 { + write!( + f, + "for<{}> ", + (0..self.num_binders) + .map(|n| format!("'{}", s.name_for_introduced_bound_var(n))) + .format(", ") + )?; + } + let parameters = self.substitution.0.as_slice(interner); + write!( + f, + "fn({}) -> {}", + parameters[..parameters.len() - 1] + .iter() + .map(|param| param.display(s)) + .format(", "), + parameters[parameters.len() - 1].display(s), + ) + } +} + +impl RenderAsRust for Scalar { + fn fmt(&self, _s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + use chalk_ir::{FloatTy::*, IntTy::*, UintTy::*}; + write!( + f, + "{}", + match self { + Scalar::Bool => "bool", + Scalar::Char => "char", + Scalar::Int(int) => match int { + Isize => "isize", + I8 => "i8", + I16 => "i16", + I32 => "i32", + I64 => "i64", + I128 => "i128", + }, + Scalar::Uint(uint) => match uint { + Usize => "usize", + U8 => "u8", + U16 => "u16", + U32 => "u32", + U64 => "u64", + U128 => "u128", + }, + Scalar::Float(float) => match float { + F16 => "f16", + F32 => "f32", + F64 => "f64", + F128 => "f128", + }, + } + ) + } +} + +impl RenderAsRust for LifetimeData { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + match self { + LifetimeData::BoundVar(v) => write!(f, "'{}", s.display_bound_var(v)), + LifetimeData::InferenceVar(_) => write!(f, "'_"), + LifetimeData::Placeholder(ix) => { + write!(f, "'_placeholder_{}_{}", ix.ui.counter, ix.idx) + } + LifetimeData::Static => write!(f, "'static"), + LifetimeData::Erased => write!(f, "'_"), + LifetimeData::Error => write!(f, "'{{error}}"), + // Matching the void ensures at compile time that this code is + // unreachable + LifetimeData::Phantom(void, _) => match *void {}, + } + } +} + +impl RenderAsRust for ConstData { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + write!(f, "{}", self.value.display(s)) + } +} + +impl RenderAsRust for ConstValue { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + match self { + ConstValue::BoundVar(v) => write!(f, "{}", s.display_bound_var(v)), + ConstValue::InferenceVar(_) => write!(f, "_"), + ConstValue::Placeholder(_) => write!(f, ""), + ConstValue::Concrete(value) => write!(f, "{:?}", value.interned), + } + } +} + +impl RenderAsRust for GenericArgData { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + match self { + GenericArgData::Ty(ty) => write!(f, "{}", ty.display(s)), + GenericArgData::Lifetime(lt) => write!(f, "{}", lt.display(s)), + GenericArgData::Const(const_ty) => write!(f, "{}", const_ty.display(s)), + } + } +} + +impl RenderAsRust for Ty { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // delegate to TyKind + self.kind(s.db().interner()).fmt(s, f) + } +} + +impl RenderAsRust for Lifetime { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // delegate to LifetimeData + self.data(s.db().interner()).fmt(s, f) + } +} + +impl RenderAsRust for Const { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &mut Formatter<'_>) -> Result { + self.data(s.db().interner()).fmt(s, f) + } +} + +impl RenderAsRust for GenericArg { + fn fmt(&self, s: &InternalWriterState<'_, I>, f: &'_ mut Formatter<'_>) -> Result { + // delegate to GenericArgData + self.data(s.db().interner()).fmt(s, f) + } +} diff --git a/chalk-solve/src/display/utils.rs b/chalk-solve/src/display/utils.rs new file mode 100644 index 00000000000..30660561679 --- /dev/null +++ b/chalk-solve/src/display/utils.rs @@ -0,0 +1,51 @@ +//! Render utilities which don't belong anywhere else. +use std::fmt::{Display, Formatter, Result}; + +pub fn as_display) -> Result>(f: F) -> impl Display { + struct ClosureDisplay) -> Result>(F); + + impl) -> Result> Display for ClosureDisplay { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + self.0(f) + } + } + + ClosureDisplay(f) +} + +macro_rules! write_joined_non_empty_list { + ($f:expr,$template:tt,$list:expr,$sep:expr) => {{ + let mut x = $list.into_iter().peekable(); + if x.peek().is_some() { + write!($f, $template, x.format($sep)) + } else { + Ok(()) + } + }}; +} + +/// Processes a name given by an [`Interner`][chalk_ir::interner::Interner] debug +/// method into something usable by the `display` module. +/// +/// This is specifically useful when implementing +/// [`RustIrDatabase`][crate::RustIrDatabase] `name_*` methods. +pub fn sanitize_debug_name(func: impl Fn(&mut Formatter<'_>) -> Option) -> String { + use std::fmt::Write; + + // First, write the debug method contents to a String. + let mut debug_out = String::new(); + // ignore if the result is `None`, as we can just as easily tell by looking + // to see if anything was written to `debug_out`. + write!( + debug_out, + "{}", + as_display(|fmt| { func(fmt).unwrap_or(Ok(())) }) + ) + .expect("expected writing to a String to succeed"); + if debug_out.is_empty() { + return "Unknown".to_owned(); + } + + // now the actual sanitization + debug_out.replace(|c: char| !c.is_ascii_alphanumeric(), "_") +} diff --git a/chalk-solve/src/ext.rs b/chalk-solve/src/ext.rs index a5c2ca9df9e..9cb8b774a05 100644 --- a/chalk-solve/src/ext.rs +++ b/chalk-solve/src/ext.rs @@ -1,16 +1,22 @@ +use crate::infer::InferenceTable; +use chalk_ir::fold::TypeFoldable; +use chalk_ir::interner::{HasInterner, Interner}; use chalk_ir::*; -use chalk_ir::fold::Fold; -use infer::InferenceTable; -pub trait CanonicalExt { - fn map(self, op: OP) -> Canonical +pub trait CanonicalExt { + fn map(self, interner: I, op: OP) -> Canonical where - OP: FnOnce(T::Result) -> U, - T: Fold, - U: Fold; + OP: FnOnce(T) -> U, + T: TypeFoldable, + U: TypeFoldable, + U: HasInterner; } -impl CanonicalExt for Canonical { +impl CanonicalExt for Canonical +where + T: HasInterner, + I: Interner, +{ /// Maps the contents using `op`, but preserving the binders. /// /// NB. `op` will be invoked with an instantiated version of the @@ -18,11 +24,12 @@ impl CanonicalExt for Canonical { /// inference context) are used in place of the quantified free /// variables. The result should be in terms of those same /// inference variables and will be re-canonicalized. - fn map(self, op: OP) -> Canonical + fn map(self, interner: I, op: OP) -> Canonical where - OP: FnOnce(T::Result) -> U, - T: Fold, - U: Fold, + OP: FnOnce(T) -> U, + T: TypeFoldable, + U: TypeFoldable, + U: HasInterner, { // Subtle: It is only quite rarely correct to apply `op` and // just re-use our existing binders. For that to be valid, the @@ -35,54 +42,57 @@ impl CanonicalExt for Canonical { // `Canonical` type (indeed, its entire reason for existence). let mut infer = InferenceTable::new(); let snapshot = infer.snapshot(); - let instantiated_value = infer.instantiate_canonical(&self); + let instantiated_value = infer.instantiate_canonical(interner, self); let mapped_value = op(instantiated_value); - let result = infer.canonicalize(&mapped_value); + let result = infer.canonicalize(interner, mapped_value); infer.rollback_to(snapshot); result.quantified } } -pub trait GoalExt { - fn into_peeled_goal(self) -> UCanonical>; - fn into_closed_goal(self) -> UCanonical>; +pub trait GoalExt { + fn into_peeled_goal(self, interner: I) -> UCanonical>>; + fn into_closed_goal(self, interner: I) -> UCanonical>>; } -impl GoalExt for Goal { +impl GoalExt for Goal { /// Returns a canonical goal in which the outermost `exists<>` and /// `forall<>` quantifiers (as well as implications) have been /// "peeled" and are converted into free universal or existential /// variables. Assumes that this goal is a "closed goal" which /// does not -- at present -- contain any variables. Useful for /// REPLs and tests but not much else. - fn into_peeled_goal(self) -> UCanonical> { + fn into_peeled_goal(self, interner: I) -> UCanonical>> { let mut infer = InferenceTable::new(); let peeled_goal = { - let mut env_goal = InEnvironment::new(&Environment::new(), self); + let mut env_goal = InEnvironment::new(&Environment::new(interner), self); loop { let InEnvironment { environment, goal } = env_goal; - match goal { - Goal::Quantified(QuantifierKind::ForAll, subgoal) => { - let subgoal = infer.instantiate_binders_universally(&subgoal); - env_goal = InEnvironment::new(&environment, *subgoal); + match goal.data(interner) { + GoalData::Quantified(QuantifierKind::ForAll, subgoal) => { + let subgoal = + infer.instantiate_binders_universally(interner, subgoal.clone()); + env_goal = InEnvironment::new(&environment, subgoal); } - Goal::Quantified(QuantifierKind::Exists, subgoal) => { - let subgoal = infer.instantiate_binders_existentially(&subgoal); - env_goal = InEnvironment::new(&environment, *subgoal); + GoalData::Quantified(QuantifierKind::Exists, subgoal) => { + let subgoal = + infer.instantiate_binders_existentially(interner, subgoal.clone()); + env_goal = InEnvironment::new(&environment, subgoal); } - Goal::Implies(wc, subgoal) => { - let new_environment = &environment.add_clauses(wc); - env_goal = InEnvironment::new(&new_environment, *subgoal); + GoalData::Implies(wc, subgoal) => { + let new_environment = + environment.add_clauses(interner, wc.iter(interner).cloned()); + env_goal = InEnvironment::new(&new_environment, Goal::clone(subgoal)); } _ => break InEnvironment::new(&environment, goal), } } }; - let canonical = infer.canonicalize(&peeled_goal).quantified; - infer.u_canonicalize(&canonical).quantified + let canonical = infer.canonicalize(interner, peeled_goal).quantified; + InferenceTable::u_canonicalize(interner, &canonical).quantified } /// Given a goal with no free variables (a "closed" goal), creates @@ -94,10 +104,10 @@ impl GoalExt for Goal { /// # Panics /// /// Will panic if this goal does in fact contain free variables. - fn into_closed_goal(self) -> UCanonical> { + fn into_closed_goal(self, interner: I) -> UCanonical>> { let mut infer = InferenceTable::new(); - let env_goal = InEnvironment::new(&Environment::new(), self); - let canonical_goal = infer.canonicalize(&env_goal).quantified; - infer.u_canonicalize(&canonical_goal).quantified + let env_goal = InEnvironment::new(&Environment::new(interner), self); + let canonical_goal = infer.canonicalize(interner, env_goal).quantified; + InferenceTable::u_canonicalize(interner, &canonical_goal).quantified } } diff --git a/chalk-solve/src/goal_builder.rs b/chalk-solve/src/goal_builder.rs new file mode 100644 index 00000000000..aa5c9c9eb49 --- /dev/null +++ b/chalk-solve/src/goal_builder.rs @@ -0,0 +1,152 @@ +use crate::RustIrDatabase; +use cast::CastTo; +use chalk_ir::cast::Cast; +use chalk_ir::cast::Caster; +use chalk_ir::*; +use fold::shift::Shift; +use fold::TypeFoldable; +use interner::{HasInterner, Interner}; + +pub struct GoalBuilder<'i, I: Interner> { + db: &'i dyn RustIrDatabase, +} + +impl<'i, I: Interner> GoalBuilder<'i, I> { + pub fn new(db: &'i dyn RustIrDatabase) -> Self { + GoalBuilder { db } + } + + /// Returns the database within the goal builder. + pub fn db(&self) -> &'i dyn RustIrDatabase { + self.db + } + + /// Returns the interner within the goal builder. + pub fn interner(&self) -> I { + self.db.interner() + } + + /// Creates a goal that ensures all of the goals from the `goals` + /// iterator are met (e.g., `goals[0] && ... && goals[N]`). + pub fn all(&mut self, goals: GS) -> Goal + where + GS: IntoIterator, + G: CastTo>, + { + Goal::all(self.interner(), goals.into_iter().casted(self.interner())) + } + + /// Creates a goal `clauses => goal`. The clauses are given as an iterator + /// and the goal is returned via the contained closure. + pub fn implies(&mut self, clauses: CS, goal: impl FnOnce(&mut Self) -> G) -> Goal + where + CS: IntoIterator, + C: CastTo>, + G: CastTo>, + { + GoalData::Implies( + ProgramClauses::from_iter(self.interner(), clauses), + goal(self).cast(self.interner()), + ) + .intern(self.interner()) + } + + /// Given a bound value `binders` like ` V`, + /// creates a goal `forall { G }` where + /// the goal `G` is created by invoking a helper + /// function `body`. + /// + /// # Parameters to `body` + /// + /// `body` will be invoked with: + /// + /// * the goal builder `self` + /// * the substitution `Q0..Qn` + /// * the bound value `[P0..Pn => Q0..Qn] V` instantiated + /// with the substitution + /// * the value `passthru`, appropriately shifted so that + /// any debruijn indices within account for the new binder + /// + /// # Why is `body` a function and not a closure? + /// + /// This is to ensure that `body` doesn't accidentally reference + /// values from the environment whose debruijn indices do not + /// account for the new binder being created. + pub fn forall( + &mut self, + binders: &Binders, + passthru: P, + body: fn(&mut Self, Substitution, &B, P) -> G, + ) -> Goal + where + B: HasInterner, + P: TypeFoldable, + G: CastTo>, + { + self.quantified(QuantifierKind::ForAll, binders, passthru, body) + } + + /// Like [`GoalBuilder::forall`], but for a `exists { G }` goal. + pub fn exists( + &mut self, + binders: &Binders, + passthru: P, + body: fn(&mut Self, Substitution, &B, P) -> G, + ) -> Goal + where + B: HasInterner, + P: TypeFoldable, + G: CastTo>, + { + self.quantified(QuantifierKind::Exists, binders, passthru, body) + } + + /// A combined helper functon for the various methods + /// to create `forall` and `exists` goals. See: + /// + /// * [`GoalBuilder::forall`] + /// * [`GoalBuilder::exists`] + /// + /// for details. + fn quantified( + &mut self, + quantifier_kind: QuantifierKind, + binders: &Binders, + passthru: P, + body: fn(&mut Self, Substitution, &B, P) -> G, + ) -> Goal + where + B: HasInterner, + P: TypeFoldable, + G: CastTo>, + { + let interner = self.interner(); + + // Make an identity mapping `[0 => ^0.0, 1 => ^0.1, ..]` + // and so forth. This substitution is mapping from the `` variables + // in `binders` to the corresponding `P0..Pn` variables we're about to + // introduce in the form of a `forall` goal. Of course, it's + // actually an identity mapping, since this `forall` will be the innermost + // debruijn binder and so forth, so there's no actual reason to + // *do* the substitution, since it would effectively just be a clone. + let substitution = Substitution::from_iter( + interner, + binders + .binders + .iter(interner) + .enumerate() + .map(|p| p.to_generic_arg(interner)), + ); + + // Shift passthru into one level of binder, to account for the `forall` + // we are about to introduce. + let passthru_shifted = passthru.shifted_in(self.interner()); + + // Invoke `body` function, which returns a goal, and wrap that goal in the binders + // from `binders`, and finally a `forall` or `exists` goal. + let bound_goal = binders.map_ref(|bound_value| { + body(self, substitution, bound_value, passthru_shifted).cast(interner) + }); + GoalData::Quantified(quantifier_kind, bound_goal).intern(interner) + } +} diff --git a/chalk-solve/src/infer.rs b/chalk-solve/src/infer.rs index ccb9386eff4..6ede065e0c7 100644 --- a/chalk-solve/src/infer.rs +++ b/chalk-solve/src/infer.rs @@ -1,38 +1,39 @@ -use ena::unify as ena; +use chalk_ir::interner::{HasInterner, Interner}; use chalk_ir::*; -use chalk_ir::fold::Fold; +use chalk_ir::{cast::Cast, fold::TypeFoldable}; +use tracing::debug; -pub mod canonicalize; -pub mod ucanonicalize; -mod normalize_deep; -pub mod instantiate; +mod canonicalize; +pub(crate) mod instantiate; mod invert; -pub mod unify; -pub mod var; mod test; +pub mod ucanonicalize; +pub mod unify; +mod var; use self::var::*; #[derive(Clone)] -pub struct InferenceTable { - unify: ena::InPlaceUnificationTable, - vars: Vec, +pub struct InferenceTable { + unify: ena::unify::InPlaceUnificationTable>, + vars: Vec>, max_universe: UniverseIndex, } -pub struct InferenceSnapshot { - unify_snapshot: ena::Snapshot>, +pub struct InferenceSnapshot { + unify_snapshot: ena::unify::Snapshot>>, max_universe: UniverseIndex, - vars: Vec, + vars: Vec>, } -crate type ParameterEnaVariable = ParameterKind; +#[allow(type_alias_bounds)] +pub type ParameterEnaVariable = WithKind>; -impl InferenceTable { +impl InferenceTable { /// Create an empty inference table with no variables. pub fn new() -> Self { InferenceTable { - unify: ena::UnificationTable::new(), + unify: ena::unify::UnificationTable::new(), vars: vec![], max_universe: UniverseIndex::root(), } @@ -46,11 +47,12 @@ impl InferenceTable { /// corresponding existential variable, along with the /// instantiated result. pub fn from_canonical( + interner: I, num_universes: usize, - canonical: &Canonical, - ) -> (Self, Substitution, T) + canonical: Canonical, + ) -> (Self, Substitution, T) where - T: Fold + Clone, + T: HasInterner + TypeFoldable + Clone, { let mut table = InferenceTable::new(); @@ -59,8 +61,9 @@ impl InferenceTable { table.new_universe(); } - let subst = table.fresh_subst(&canonical.binders); - let value = canonical.value.fold_with(&mut &subst, 0).unwrap(); + let subst = table.fresh_subst(interner, canonical.binders.as_slice(interner)); + let value = subst.apply(canonical.value, interner); + // let value = canonical.value.fold_with(&mut &subst, 0).unwrap(); (table, subst, value) } @@ -72,22 +75,17 @@ impl InferenceTable { pub fn new_universe(&mut self) -> UniverseIndex { let u = self.max_universe.next(); self.max_universe = u; - debug!("new_universe: {:?}", u); + debug!("created new universe: {:?}", u); u } - /// Current maximum universe -- one that can see all existing names. - pub fn max_universe(&self) -> UniverseIndex { - self.max_universe - } - /// Creates a new inference variable and returns its index. The /// kind of the variable should be known by the caller, but is not /// tracked directly by the inference table. - crate fn new_variable(&mut self, ui: UniverseIndex) -> EnaVariable { + pub fn new_variable(&mut self, ui: UniverseIndex) -> EnaVariable { let var = self.unify.new_key(InferenceValue::Unbound(ui)); self.vars.push(var); - debug!("new_variable: var={:?} ui={:?}", var, ui); + debug!(?var, ?ui, "created new variable"); var } @@ -97,7 +95,7 @@ impl InferenceTable { /// must respect a stack discipline (i.e., rollback or commit /// snapshots in reverse order of that with which they were /// created). - pub fn snapshot(&mut self) -> InferenceSnapshot { + pub fn snapshot(&mut self) -> InferenceSnapshot { let unify_snapshot = self.unify.snapshot(); let vars = self.vars.clone(); let max_universe = self.max_universe; @@ -109,70 +107,78 @@ impl InferenceTable { } /// Restore the table to the state it had when the snapshot was taken. - pub fn rollback_to(&mut self, snapshot: InferenceSnapshot) { + pub fn rollback_to(&mut self, snapshot: InferenceSnapshot) { self.unify.rollback_to(snapshot.unify_snapshot); self.vars = snapshot.vars; self.max_universe = snapshot.max_universe; } /// Make permanent the changes made since the snapshot was taken. - pub fn commit(&mut self, snapshot: InferenceSnapshot) { + pub fn commit(&mut self, snapshot: InferenceSnapshot) { self.unify.commit(snapshot.unify_snapshot); } - /// If type `leaf` is a free inference variable, and that variable has been - /// bound, returns `Some(T)` where `T` is the type to which it has been bound. - /// - /// `binders` is the number of binders under which `leaf` appears; - /// the return value will also be shifted accordingly so that it - /// can appear under that same number of binders. - pub fn normalize_shallow(&mut self, leaf: &Ty) -> Option { - let var = EnaVariable::from(leaf.inference_var()?); - match self.unify.probe_value(var) { - InferenceValue::Unbound(_) => None, - InferenceValue::Bound(ref val) => { - let ty = val.as_ref().ty().unwrap().clone(); - assert!(!ty.needs_shift()); - Some(ty) - } - } + pub fn normalize_ty_shallow(&mut self, interner: I, leaf: &Ty) -> Option> { + // An integer/float type variable will never normalize to another + // variable; but a general type variable might normalize to an + // integer/float variable. So we potentially need to normalize twice to + // get at the actual value. + self.normalize_ty_shallow_inner(interner, leaf) + .map(|ty| self.normalize_ty_shallow_inner(interner, &ty).unwrap_or(ty)) } - /// If `leaf` represents an inference variable `X`, and `X` is bound, - /// returns `Some(v)` where `v` is the value to which `X` is bound. - pub fn normalize_lifetime(&mut self, leaf: &Lifetime) -> Option { - let var = EnaVariable::from(leaf.inference_var()?); - let v1 = self.probe_lifetime_var(var)?; - assert!(!v1.needs_shift()); - Some(v1) + fn normalize_ty_shallow_inner(&mut self, interner: I, leaf: &Ty) -> Option> { + self.probe_var(leaf.inference_var(interner)?) + .map(|p| p.assert_ty_ref(interner).clone()) } - /// Finds the type to which `var` is bound, returning `None` if it is not yet - /// bound. - /// - /// # Panics - /// - /// This method is only valid for inference variables of kind - /// type. If this variable is of a different kind, then the - /// function may panic. - fn probe_ty_var(&mut self, var: EnaVariable) -> Option { - match self.unify.probe_value(var) { - InferenceValue::Unbound(_) => None, - InferenceValue::Bound(ref val) => Some(val.as_ref().ty().unwrap().clone()), - } + pub fn normalize_lifetime_shallow( + &mut self, + interner: I, + leaf: &Lifetime, + ) -> Option> { + self.probe_var(leaf.inference_var(interner)?) + .map(|p| p.assert_lifetime_ref(interner).clone()) + } + + pub fn normalize_const_shallow(&mut self, interner: I, leaf: &Const) -> Option> { + self.probe_var(leaf.inference_var(interner)?) + .map(|p| p.assert_const_ref(interner).clone()) + } + + pub fn ty_root(&mut self, interner: I, leaf: &Ty) -> Option> { + Some( + self.unify + .find(leaf.inference_var(interner)?) + .to_ty(interner), + ) } - /// Finds the lifetime to which `var` is bound, returning `None` if it is not yet - /// bound. + pub fn lifetime_root(&mut self, interner: I, leaf: &Lifetime) -> Option> { + Some( + self.unify + .find(leaf.inference_var(interner)?) + .to_lifetime(interner), + ) + } + + /// Finds the root inference var for the given variable. /// - /// # Panics + /// The returned variable will be exactly equivalent to the given + /// variable except in name. All variables which have been unified to + /// eachother (but don't yet have a value) have the same "root". /// - /// This method is only valid for inference variables of kind - /// lifetime. If this variable is of a different kind, then the function may panic. - fn probe_lifetime_var(&mut self, var: EnaVariable) -> Option { - match self.unify.probe_value(var) { + /// This is useful for `DeepNormalizer`. + pub fn inference_var_root(&mut self, var: InferenceVar) -> InferenceVar { + self.unify.find(var).into() + } + + /// If type `leaf` is a free inference variable, and that variable has been + /// bound, returns `Some(P)` where `P` is the parameter to which it has been bound. + pub fn probe_var(&mut self, leaf: InferenceVar) -> Option> { + match self.unify.probe_value(EnaVariable::from(leaf)) { InferenceValue::Unbound(_) => None, - InferenceValue::Bound(ref val) => Some(val.as_ref().lifetime().unwrap().clone()), + InferenceValue::Bound(val) => Some(val), } } @@ -181,7 +187,7 @@ impl InferenceTable { /// # Panics /// /// Panics if the variable is bound. - fn universe_of_unbound_var(&mut self, var: EnaVariable) -> UniverseIndex { + fn universe_of_unbound_var(&mut self, var: EnaVariable) -> UniverseIndex { match self.unify.probe_value(var) { InferenceValue::Unbound(ui) => ui, InferenceValue::Bound(_) => panic!("var_universe invoked on bound variable"), @@ -189,15 +195,18 @@ impl InferenceTable { } } -pub trait ParameterEnaVariableExt { - fn to_parameter(self) -> Parameter; +pub trait ParameterEnaVariableExt { + fn to_generic_arg(&self, interner: I) -> GenericArg; } -impl ParameterEnaVariableExt for ParameterEnaVariable { - fn to_parameter(self) -> Parameter { - match self { - ParameterKind::Ty(v) => ParameterKind::Ty(v.to_ty()), - ParameterKind::Lifetime(v) => ParameterKind::Lifetime(v.to_lifetime()), +impl ParameterEnaVariableExt for ParameterEnaVariable { + fn to_generic_arg(&self, interner: I) -> GenericArg { + // we are matching on kind, so skipping it is fine + let ena_variable = self.skip_kind(); + match &self.kind { + VariableKind::Ty(kind) => ena_variable.to_ty_with_kind(interner, *kind).cast(interner), + VariableKind::Lifetime => ena_variable.to_lifetime(interner).cast(interner), + VariableKind::Const(ty) => ena_variable.to_const(interner, ty.clone()).cast(interner), } } } diff --git a/chalk-solve/src/infer/canonicalize.rs b/chalk-solve/src/infer/canonicalize.rs index 1c2f61e845a..ddec0515d6b 100644 --- a/chalk-solve/src/infer/canonicalize.rs +++ b/chalk-solve/src/infer/canonicalize.rs @@ -1,14 +1,15 @@ -use chalk_engine::fallible::*; +use crate::debug_span; +use chalk_derive::FallibleTypeFolder; use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::{ - DefaultTypeFolder, Fold, DefaultFreeVarFolder, InferenceFolder, PlaceholderFolder, -}; +use chalk_ir::fold::{TypeFoldable, TypeFolder}; +use chalk_ir::interner::{HasInterner, Interner}; use chalk_ir::*; use std::cmp::max; +use tracing::{debug, instrument}; -use super::{EnaVariable, InferenceTable, ParameterEnaVariable}; +use super::{InferenceTable, ParameterEnaVariable}; -impl InferenceTable { +impl InferenceTable { /// Given a value `value` with variables in it, replaces those variables /// with their instantiated values; any variables not yet instantiated are /// replaced with a small integer index 0..N in order of appearance. The @@ -27,141 +28,210 @@ impl InferenceTable { /// /// A substitution mapping from the free variables to their re-bound form is /// also returned. - pub fn canonicalize(&mut self, value: &T) -> Canonicalized { - debug!("canonicalize({:#?})", value); + pub fn canonicalize(&mut self, interner: I, value: T) -> Canonicalized + where + T: TypeFoldable, + T: HasInterner, + { + debug_span!("canonicalize", "{:#?}", value); let mut q = Canonicalizer { table: self, free_vars: Vec::new(), max_universe: UniverseIndex::root(), + interner, }; - let value = value.fold_with(&mut q, 0).unwrap(); + let value = value + .try_fold_with(&mut q, DebruijnIndex::INNERMOST) + .unwrap(); let free_vars = q.free_vars.clone(); - let max_universe = q.max_universe; Canonicalized { quantified: Canonical { value, binders: q.into_binders(), }, - max_universe, free_vars, } } } #[derive(Debug)] -pub struct Canonicalized { +pub struct Canonicalized { /// The canonicalized result. pub quantified: Canonical, /// The free existential variables, along with the universes they inhabit. - crate free_vars: Vec, - - /// The maximum universe of any universally quantified variables - /// encountered. - max_universe: UniverseIndex, + pub free_vars: Vec>, } -struct Canonicalizer<'q> { - table: &'q mut InferenceTable, - free_vars: Vec, +#[derive(FallibleTypeFolder)] +struct Canonicalizer<'q, I: Interner> { + table: &'q mut InferenceTable, + free_vars: Vec>, max_universe: UniverseIndex, + interner: I, } -impl<'q> Canonicalizer<'q> { - fn into_binders(self) -> Vec> { +impl<'q, I: Interner> Canonicalizer<'q, I> { + fn into_binders(self) -> CanonicalVarKinds { let Canonicalizer { table, free_vars, - max_universe: _, + interner, + .. } = self; - free_vars - .into_iter() - .map(|p_v| p_v.map(|v| table.universe_of_unbound_var(v))) - .collect() + CanonicalVarKinds::from_iter( + interner, + free_vars + .into_iter() + .map(|p_v| p_v.map(|v| table.universe_of_unbound_var(v))), + ) } - fn add(&mut self, free_var: ParameterEnaVariable) -> usize { - self.free_vars.iter().position(|&v| v == free_var).unwrap_or_else(|| { - let next_index = self.free_vars.len(); - self.free_vars.push(free_var); - next_index - }) + fn add(&mut self, free_var: ParameterEnaVariable) -> usize { + self.max_universe = max( + self.max_universe, + self.table.universe_of_unbound_var(*free_var.skip_kind()), + ); + + self.free_vars + .iter() + .position(|v| v.skip_kind() == free_var.skip_kind()) + .unwrap_or_else(|| { + let next_index = self.free_vars.len(); + self.free_vars.push(free_var); + next_index + }) } } -impl<'q> DefaultTypeFolder for Canonicalizer<'q> {} +impl<'i, I: Interner> TypeFolder for Canonicalizer<'i, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } -impl<'q> PlaceholderFolder for Canonicalizer<'q> { fn fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Ty { + let interner = self.interner; self.max_universe = max(self.max_universe, universe.ui); - Ok(universe.to_ty()) + universe.to_ty(interner) } fn fold_free_placeholder_lifetime( &mut self, universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Lifetime { + let interner = self.interner; self.max_universe = max(self.max_universe, universe.ui); - Ok(universe.to_lifetime()) + universe.to_lifetime(interner) } -} -impl<'q> DefaultFreeVarFolder for Canonicalizer<'q> { - fn forbid() -> bool { + fn fold_free_placeholder_const( + &mut self, + ty: Ty, + universe: PlaceholderIndex, + _outer_binder: DebruijnIndex, + ) -> Const { + let interner = self.interner; + self.max_universe = max(self.max_universe, universe.ui); + universe.to_const(interner, ty) + } + + fn forbid_free_vars(&self) -> bool { true } -} -impl<'q> InferenceFolder for Canonicalizer<'q> { - fn fold_inference_ty(&mut self, var: InferenceVar, binders: usize) -> Fallible { - debug_heading!( - "fold_inference_ty(depth={:?}, binders={:?})", - var, - binders - ); - let var = EnaVariable::from(var); - match self.table.probe_ty_var(var) { + #[instrument(level = "debug", skip(self))] + fn fold_inference_ty( + &mut self, + var: InferenceVar, + kind: TyVariableKind, + outer_binder: DebruijnIndex, + ) -> Ty { + let interner = self.interner; + match self.table.probe_var(var) { Some(ty) => { + let ty = ty.assert_ty_ref(interner); debug!("bound to {:?}", ty); - Ok(ty.fold_with(self, 0)?.shifted_in(binders)) + ty.clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in_from(interner, outer_binder) } None => { // If this variable is not yet bound, find its // canonical index `root_var` in the union-find table, // and then map `root_var` to a fresh index that is // unique to this quantification. - let free_var = ParameterKind::Ty(self.table.unify.find(var)); - let position = self.add(free_var); - debug!("not yet unified: position={:?}", position); - Ok(Ty::BoundVar(position + binders)) + let free_var = + ParameterEnaVariable::new(VariableKind::Ty(kind), self.table.unify.find(var)); + + let bound_var = BoundVar::new(DebruijnIndex::INNERMOST, self.add(free_var)); + debug!(position=?bound_var, "not yet unified"); + TyKind::BoundVar(bound_var.shifted_in_from(outer_binder)).intern(interner) } } } - fn fold_inference_lifetime(&mut self, var: InferenceVar, binders: usize) -> Fallible { - debug_heading!( - "fold_inference_lifetime(depth={:?}, binders={:?})", - var, - binders - ); - let var = EnaVariable::from(var); - match self.table.probe_lifetime_var(var) { + #[instrument(level = "debug", skip(self))] + fn fold_inference_lifetime( + &mut self, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Lifetime { + let interner = self.interner; + match self.table.probe_var(var) { Some(l) => { + let l = l.assert_lifetime_ref(interner); debug!("bound to {:?}", l); - Ok(l.fold_with(self, 0)?.shifted_in(binders)) + l.clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in_from(interner, outer_binder) } None => { - let free_var = ParameterKind::Lifetime(self.table.unify.find(var)); - let position = self.add(free_var); - debug!("not yet unified: position={:?}", position); - Ok(Lifetime::BoundVar(position + binders)) + let free_var = + ParameterEnaVariable::new(VariableKind::Lifetime, self.table.unify.find(var)); + let bound_var = BoundVar::new(DebruijnIndex::INNERMOST, self.add(free_var)); + debug!(position=?bound_var, "not yet unified"); + LifetimeData::BoundVar(bound_var.shifted_in_from(outer_binder)).intern(interner) } } } + + #[instrument(level = "debug", skip(self, ty))] + fn fold_inference_const( + &mut self, + ty: Ty, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Const { + let interner = self.interner; + match self.table.probe_var(var) { + Some(c) => { + let c = c.assert_const_ref(interner); + debug!("bound to {:?}", c); + c.clone() + .fold_with(self, DebruijnIndex::INNERMOST) + .shifted_in_from(interner, outer_binder) + } + None => { + let free_var = ParameterEnaVariable::new( + VariableKind::Const(ty.clone()), + self.table.unify.find(var), + ); + let bound_var = BoundVar::new(DebruijnIndex::INNERMOST, self.add(free_var)); + debug!(position = ?bound_var, "not yet unified"); + bound_var + .shifted_in_from(outer_binder) + .to_const(interner, ty) + } + } + } + + fn interner(&self) -> I { + self.interner + } } diff --git a/chalk-solve/src/infer/instantiate.rs b/chalk-solve/src/infer/instantiate.rs index 6f182d614d5..161271f2389 100644 --- a/chalk-solve/src/infer/instantiate.rs +++ b/chalk-solve/src/infer/instantiate.rs @@ -1,33 +1,37 @@ use chalk_ir::fold::*; +use chalk_ir::interner::HasInterner; use std::fmt::Debug; +use tracing::instrument; use super::*; -impl InferenceTable { +impl InferenceTable { /// Given the binders from a canonicalized value C, returns a /// substitution S mapping each free variable in C to a fresh /// inference variable. This substitution can then be applied to /// C, which would be equivalent to /// `self.instantiate_canonical(v)`. - pub fn fresh_subst(&mut self, binders: &[ParameterKind]) -> Substitution { - Substitution { - parameters: binders - .iter() - .map(|kind| { - let param_infer_var = kind.map(|ui| self.new_variable(ui)); - param_infer_var.to_parameter() - }) - .collect(), - } + pub(super) fn fresh_subst( + &mut self, + interner: I, + binders: &[CanonicalVarKind], + ) -> Substitution { + Substitution::from_iter( + interner, + binders.iter().map(|kind| { + let param_infer_var = kind.map_ref(|&ui| self.new_variable(ui)); + param_infer_var.to_generic_arg(interner) + }), + ) } /// Variant on `instantiate` that takes a `Canonical`. - pub fn instantiate_canonical(&mut self, bound: &Canonical) -> T::Result + pub fn instantiate_canonical(&mut self, interner: I, bound: Canonical) -> T where - T: Fold + Debug, + T: HasInterner + TypeFoldable + Debug, { - let subst = self.fresh_subst(&bound.binders); - bound.value.fold_with(&mut &subst, 0).unwrap() + let subst = self.fresh_subst(interner, bound.binders.as_slice(interner)); + subst.apply(bound.value, interner) } /// Instantiates `arg` with fresh existential variables in the @@ -35,81 +39,73 @@ impl InferenceTable { /// `binders`. This is used to apply a universally quantified /// clause like `forall X, 'Y. P => Q`. Here the `binders` /// argument is referring to `X, 'Y`. - pub fn instantiate_in( + fn instantiate_in( &mut self, + interner: I, universe: UniverseIndex, - binders: U, - arg: &T, - ) -> T::Result + binders: impl Iterator>, + arg: T, + ) -> T where - T: Fold, - U: IntoIterator>, + T: TypeFoldable, { - let binders: Vec<_> = binders.into_iter().map(|pk| pk.map(|()| universe)).collect(); - let subst = self.fresh_subst(&binders); - arg.fold_with(&mut &subst, 0).unwrap() + let binders: Vec<_> = binders + .map(|pk| CanonicalVarKind::new(pk, universe)) + .collect(); + let subst = self.fresh_subst(interner, &binders); + subst.apply(arg, interner) } /// Variant on `instantiate_in` that takes a `Binders`. - #[allow(non_camel_case_types)] - pub fn instantiate_binders_existentially( - &mut self, - arg: &impl BindersAndValue, - ) -> T::Result + #[instrument(level = "debug", skip(self, interner))] + pub fn instantiate_binders_existentially(&mut self, interner: I, arg: Binders) -> T where - T: Fold, + T: TypeFoldable + HasInterner, { - let (binders, value) = arg.split(); + let (value, binders) = arg.into_value_and_skipped_binders(); + let max_universe = self.max_universe; - self.instantiate_in(max_universe, binders.iter().cloned(), value) + self.instantiate_in( + interner, + max_universe, + binders.iter(interner).cloned(), + value, + ) } - #[allow(non_camel_case_types)] - pub fn instantiate_binders_universally( - &mut self, - arg: &impl BindersAndValue, - ) -> T::Result + #[instrument(level = "debug", skip(self, interner))] + pub fn instantiate_binders_universally(&mut self, interner: I, arg: Binders) -> T where - T: Fold, + T: TypeFoldable + HasInterner, { - let (binders, value) = arg.split(); - let ui = self.new_universe(); + let (value, binders) = arg.into_value_and_skipped_binders(); + + let mut lazy_ui = None; + let mut ui = || { + lazy_ui.unwrap_or_else(|| { + let ui = self.new_universe(); + lazy_ui = Some(ui); + ui + }) + }; let parameters: Vec<_> = binders - .iter() + .iter(interner) + .cloned() .enumerate() .map(|(idx, pk)| { - let placeholder_idx = PlaceholderIndex { ui, idx }; - match *pk { - ParameterKind::Lifetime(()) => { - let lt = placeholder_idx.to_lifetime(); - ParameterKind::Lifetime(lt) + let placeholder_idx = PlaceholderIndex { ui: ui(), idx }; + match pk { + VariableKind::Lifetime => { + let lt = placeholder_idx.to_lifetime(interner); + lt.cast(interner) + } + VariableKind::Ty(_) => placeholder_idx.to_ty(interner).cast(interner), + VariableKind::Const(ty) => { + placeholder_idx.to_const(interner, ty).cast(interner) } - ParameterKind::Ty(()) => ParameterKind::Ty(placeholder_idx.to_ty()), } }) .collect(); - Subst::apply(¶meters, value) - } -} - -pub trait BindersAndValue { - type Output; - - fn split(&self) -> (&[ParameterKind<()>], &Self::Output); -} - -impl BindersAndValue for Binders { - type Output = T; - - fn split(&self) -> (&[ParameterKind<()>], &Self::Output) { - (&self.binders, &self.value) - } -} - -impl<'a, T> BindersAndValue for (&'a Vec>, &'a T) { - type Output = T; - - fn split(&self) -> (&[ParameterKind<()>], &Self::Output) { - (&self.0, &self.1) + Subst::apply(interner, ¶meters, value) } } diff --git a/chalk-solve/src/infer/invert.rs b/chalk-solve/src/infer/invert.rs index 853546ff62d..e5bc3590ced 100644 --- a/chalk-solve/src/infer/invert.rs +++ b/chalk-solve/src/infer/invert.rs @@ -1,15 +1,15 @@ -use chalk_engine::fallible::*; +use chalk_derive::FallibleTypeFolder; use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::{ - DefaultFreeVarFolder, DefaultInferenceFolder, DefaultTypeFolder, Fold, PlaceholderFolder, -}; +use chalk_ir::fold::{TypeFoldable, TypeFolder}; +use chalk_ir::interner::HasInterner; +use chalk_ir::interner::Interner; use chalk_ir::*; -use std::collections::HashMap; +use rustc_hash::FxHashMap; use super::canonicalize::Canonicalized; use super::{EnaVariable, InferenceTable}; -impl InferenceTable { +impl InferenceTable { /// Converts `value` into a "negation" value -- meaning one that, /// if we can find any answer to it, then the negation fails. For /// goals that do not contain any free variables, then this is a @@ -19,10 +19,10 @@ impl InferenceTable { /// yet been assigned a value, then this function will return /// `None`, indicating that we cannot prove negation for this goal /// yet. This follows the approach in Clark's original - /// negation-as-failure paper [1], where negative goals are only + /// [negation-as-failure paper][1], where negative goals are only /// permitted if they contain no free (existential) variables. /// - /// [1] http://www.doc.ic.ac.uk/~klc/NegAsFailure.pdf + /// [1]: https://www.doc.ic.ac.uk/~klc/NegAsFailure.pdf /// /// Restricting free existential variables is done because the /// semantics of such queries is not what you expect: it basically @@ -47,7 +47,7 @@ impl InferenceTable { /// An additional complication arises around free universal /// variables. Consider a query like `not { !0 = !1 }`, where /// `!0` and `!1` are placeholders for universally quantified - /// types (i.e., `TypeName::Placeholder`). If we just tried to + /// types (i.e., `TyKind::Placeholder`). If we just tried to /// prove `!0 = !1`, we would get false, because those types /// cannot be unified -- this would then allow us to conclude that /// `not { !0 = !1 }`, i.e., `forall { not { X = Y } }`, but @@ -64,7 +64,7 @@ impl InferenceTable { /// /// (One could imagine converting free existentials into /// universals, rather than forbidding them altogether. This would - /// be conveivable, but overly strict. For example, the goal + /// be conceivable, but overly strict. For example, the goal /// `exists { not { ?T: Clone }, ?T = Vec }` would come /// back as false, when clearly this is true. This is because we /// would wind up proving that `?T: Clone` can *never* be @@ -72,15 +72,15 @@ impl InferenceTable { /// `?T: Clone` in the case where `?T = Vec`. The current /// version would delay processing the negative goal (i.e., return /// `None`) until the second unification has occurred.) - pub fn invert(&mut self, value: &T) -> Option + pub fn invert(&mut self, interner: I, value: T) -> Option where - T: Fold, + T: TypeFoldable + HasInterner, { let Canonicalized { free_vars, quantified, .. - } = self.canonicalize(&value); + } = self.canonicalize(interner, value); // If the original contains free existential variables, give up. if !free_vars.is_empty() { @@ -88,71 +88,87 @@ impl InferenceTable { } // If this contains free universal variables, replace them with existentials. - assert!(quantified.binders.is_empty()); + assert!(quantified.binders.is_empty(interner)); let inverted = quantified .value - .fold_with(&mut Inverter::new(self), 0) + .try_fold_with(&mut Inverter::new(interner, self), DebruijnIndex::INNERMOST) .unwrap(); Some(inverted) } + + /// As `negated_instantiated`, but canonicalizes before + /// returning. Just a convenience function. + pub fn invert_then_canonicalize(&mut self, interner: I, value: T) -> Option> + where + T: TypeFoldable + HasInterner, + { + let snapshot = self.snapshot(); + let result = self.invert(interner, value); + let result = result.map(|r| self.canonicalize(interner, r).quantified); + self.rollback_to(snapshot); + result + } } -struct Inverter<'q> { - table: &'q mut InferenceTable, - inverted_ty: HashMap, - inverted_lifetime: HashMap, +#[derive(FallibleTypeFolder)] +struct Inverter<'q, I: Interner> { + table: &'q mut InferenceTable, + inverted_ty: FxHashMap>, + inverted_lifetime: FxHashMap>, + interner: I, } -impl<'q> Inverter<'q> { - fn new(table: &'q mut InferenceTable) -> Self { +impl<'q, I: Interner> Inverter<'q, I> { + fn new(interner: I, table: &'q mut InferenceTable) -> Self { Inverter { table, - inverted_ty: HashMap::new(), - inverted_lifetime: HashMap::new(), + inverted_ty: FxHashMap::default(), + inverted_lifetime: FxHashMap::default(), + interner, } } } -impl<'q> DefaultTypeFolder for Inverter<'q> {} +impl<'i, I: Interner> TypeFolder for Inverter<'i, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } -impl<'q> PlaceholderFolder for Inverter<'q> { fn fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, - binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Ty { let table = &mut self.table; - Ok(self - .inverted_ty + self.inverted_ty .entry(universe) .or_insert_with(|| table.new_variable(universe.ui)) - .to_ty() - .shifted_in(binders)) + .to_ty(TypeFolder::interner(self)) + .shifted_in(TypeFolder::interner(self)) } fn fold_free_placeholder_lifetime( &mut self, universe: PlaceholderIndex, - binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Lifetime { let table = &mut self.table; - Ok(self - .inverted_lifetime + self.inverted_lifetime .entry(universe) .or_insert_with(|| table.new_variable(universe.ui)) - .to_lifetime() - .shifted_in(binders)) + .to_lifetime(TypeFolder::interner(self)) + .shifted_in(TypeFolder::interner(self)) } -} -impl<'q> DefaultFreeVarFolder for Inverter<'q> { - fn forbid() -> bool { + fn forbid_free_vars(&self) -> bool { true } -} -impl<'q> DefaultInferenceFolder for Inverter<'q> { - fn forbid() -> bool { + fn forbid_inference_vars(&self) -> bool { true } + + fn interner(&self) -> I { + self.interner + } } diff --git a/chalk-solve/src/infer/normalize_deep.rs b/chalk-solve/src/infer/normalize_deep.rs deleted file mode 100644 index 43020de6454..00000000000 --- a/chalk-solve/src/infer/normalize_deep.rs +++ /dev/null @@ -1,59 +0,0 @@ -use chalk_engine::fallible::*; -use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::{ - DefaultFreeVarFolder, DefaultPlaceholderFolder, DefaultTypeFolder, Fold, InferenceFolder, -}; -use chalk_ir::*; - -use super::{EnaVariable, InferenceTable}; - -impl InferenceTable { - /// Given a value `value` with variables in it, replaces those variables - /// with their instantiated values (if any). Uninstantiated variables are - /// left as-is. - /// - /// This is mainly intended for getting final values to dump to - /// the user and its use should otherwise be avoided, particularly - /// given the possibility of snapshots and rollbacks. - /// - /// See also `InferenceTable::canonicalize`, which -- during real - /// processing -- is often used to capture the "current state" of - /// variables. - pub fn normalize_deep(&mut self, value: &T) -> T::Result { - value - .fold_with(&mut DeepNormalizer { table: self }, 0) - .unwrap() - } -} - -struct DeepNormalizer<'table> { - table: &'table mut InferenceTable, -} - -impl<'table> DefaultTypeFolder for DeepNormalizer<'table> {} - -impl<'table> DefaultPlaceholderFolder for DeepNormalizer<'table> {} - -impl<'table> InferenceFolder for DeepNormalizer<'table> { - fn fold_inference_ty(&mut self, var: InferenceVar, binders: usize) -> Fallible { - let var = EnaVariable::from(var); - match self.table.probe_ty_var(var) { - Some(ty) => Ok(ty.fold_with(self, 0)?.shifted_in(binders)), // FIXME shift - None => Ok(var.to_ty()), - } - } - - fn fold_inference_lifetime(&mut self, var: InferenceVar, binders: usize) -> Fallible { - let var = EnaVariable::from(var); - match self.table.probe_lifetime_var(var) { - Some(l) => Ok(l.fold_with(self, 0)?.shifted_in(binders)), - None => Ok(var.to_lifetime()), // FIXME shift - } - } -} - -impl<'table> DefaultFreeVarFolder for DeepNormalizer<'table> { - fn forbid() -> bool { - true - } -} diff --git a/chalk-solve/src/infer/test.rs b/chalk-solve/src/infer/test.rs index cb5c43136ec..2ac35f0275d 100644 --- a/chalk-solve/src/infer/test.rs +++ b/chalk-solve/src/infer/test.rs @@ -1,117 +1,221 @@ #![cfg(test)] +use super::unify::RelationResult; use super::*; -use super::unify::UnificationResult; +use chalk_integration::interner::ChalkIr; +use chalk_integration::{arg, lifetime, ty}; -#[test] -fn infer() { - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U0).to_ty(); - table - .unify(&environment0, &a, &ty!(apply (item 0) (expr b))) - .unwrap(); - assert_eq!(table.normalize_deep(&a), ty!(apply (item 0) (expr b))); - table - .unify(&environment0, &b, &ty!(apply (item 1))) - .unwrap(); - assert_eq!(table.normalize_deep(&a), ty!(apply (item 0) (apply (item 1)))); +// We just use a vec of 20 `Invariant`, since this is zipped and no substs are +// longer than this +#[derive(Debug)] +struct TestDatabase; +impl UnificationDatabase for TestDatabase { + fn fn_def_variance(&self, _fn_def_id: FnDefId) -> Variances { + Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied()) + } + + fn adt_variance(&self, _adt_id: AdtId) -> Variances { + Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied()) + } } #[test] fn universe_error() { // exists(A -> forall(X -> A = X)) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); table - .unify(&environment0, &a, &ty!(apply (placeholder 1))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(placeholder 1), + ) .unwrap_err(); } #[test] fn cycle_error() { // exists(A -> A = foo A) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); table - .unify(&environment0, &a, &ty!(apply (item 0) (expr a))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (expr a)), + ) .unwrap_err(); // exists(A -> A = for<'a> A) table - .unify(&environment0, &a, &ty!(for_all 1 (infer 0))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(function 1 (infer 0)), + ) .unwrap_err(); } #[test] fn cycle_indirect() { // exists(A -> A = foo B, A = B) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U0).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U0).to_ty(interner); table - .unify(&environment0, &a, &ty!(apply (item 0) (expr b))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (expr b)), + ) .unwrap(); - table.unify(&environment0, &a, &b).unwrap_err(); + table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &b, + ) + .unwrap_err(); } #[test] fn universe_error_indirect_1() { // exists(A -> forall(X -> exists(B -> B = X, A = B))) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U1).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U1).to_ty(interner); table - .unify(&environment0, &b, &ty!(apply (placeholder 1))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &b, + &ty!(placeholder 1), + ) .unwrap(); - table.unify(&environment0, &a, &b).unwrap_err(); + table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &b, + ) + .unwrap_err(); } #[test] fn universe_error_indirect_2() { // exists(A -> forall(X -> exists(B -> B = A, B = X))) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U1).to_ty(); - table.unify(&environment0, &a, &b).unwrap(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U1).to_ty(interner); table - .unify(&environment0, &b, &ty!(apply (placeholder 1))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &b, + ) + .unwrap(); + table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &b, + &ty!(placeholder 1), + ) .unwrap_err(); } #[test] fn universe_promote() { // exists(A -> forall(X -> exists(B -> A = foo(B), A = foo(i32)))) ---> OK - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U1).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U1).to_ty(interner); table - .unify(&environment0, &a, &ty!(apply (item 0) (expr b))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (expr b)), + ) .unwrap(); table - .unify(&environment0, &a, &ty!(apply (item 0) (apply (item 1)))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (apply (item 1))), + ) .unwrap(); } #[test] fn universe_promote_bad() { // exists(A -> forall(X -> exists(B -> A = foo(B), B = X))) ---> error - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); - let b = table.new_variable(U1).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); + let b = table.new_variable(U1).to_ty(interner); table - .unify(&environment0, &a, &ty!(apply (item 0) (expr b))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &a, + &ty!(apply (item 0) (expr b)), + ) .unwrap(); table - .unify(&environment0, &b, &ty!(apply (placeholder 1))) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &b, + &ty!(placeholder 1), + ) .unwrap_err(); } @@ -120,14 +224,18 @@ fn projection_eq() { // exists(A -> A = Item0<::foo>) // ^^^^^^^^^^^^ Can A repeat here? For now, // we say no, but it's an interesting question. - let mut table = InferenceTable::new(); - let environment0 = Environment::new(); - let a = table.new_variable(U0).to_ty(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); + let environment0 = Environment::new(interner); + let a = table.new_variable(U0).to_ty(interner); // expect an error ("cycle during unification") table - .unify( + .relate( + interner, + &TestDatabase, &environment0, + Variance::Invariant, &a, &ty!(apply (item 0) (projection (item 1) (expr a))), ) @@ -138,8 +246,8 @@ const U0: UniverseIndex = UniverseIndex { counter: 0 }; const U1: UniverseIndex = UniverseIndex { counter: 1 }; const U2: UniverseIndex = UniverseIndex { counter: 2 }; -fn make_table() -> InferenceTable { - let mut table = InferenceTable::new(); +fn make_table() -> InferenceTable { + let mut table: InferenceTable = InferenceTable::new(); let _ = table.new_universe(); // U1 let _ = table.new_universe(); // U2 table @@ -147,6 +255,7 @@ fn make_table() -> InferenceTable { #[test] fn quantify_simple() { + let interner = ChalkIr; let mut table = make_table(); let _ = table.new_variable(U0); let _ = table.new_variable(U1); @@ -154,32 +263,39 @@ fn quantify_simple() { assert_eq!( table - .canonicalize(&ty!(apply (item 0) (infer 2) (infer 1) (infer 0))) + .canonicalize(interner, ty!(apply (item 0) (infer 2) (infer 1) (infer 0))) .quantified, Canonical { value: ty!(apply (item 0) (bound 0) (bound 1) (bound 2)), - binders: vec![ - ParameterKind::Ty(U2), - ParameterKind::Ty(U1), - ParameterKind::Ty(U0), - ], + binders: CanonicalVarKinds::from_iter( + interner, + vec![ + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U2), + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U1), + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U0), + ] + ), } ); } #[test] fn quantify_bound() { + let interner = ChalkIr; let mut table = make_table(); - let environment0 = Environment::new(); + let environment0 = Environment::new(interner); - let v0 = table.new_variable(U0).to_ty(); - let v1 = table.new_variable(U1).to_ty(); - let v2a = table.new_variable(U2).to_ty(); - let v2b = table.new_variable(U2).to_ty(); + let v0 = table.new_variable(U0).to_ty(interner); + let v1 = table.new_variable(U1).to_ty(interner); + let v2a = table.new_variable(U2).to_ty(interner); + let v2b = table.new_variable(U2).to_ty(interner); table - .unify( + .relate( + interner, + &TestDatabase, &environment0, + Variance::Invariant, &v2b, &ty!(apply (item 1) (expr v1) (expr v0)), ) @@ -187,66 +303,96 @@ fn quantify_bound() { assert_eq!( table - .canonicalize(&ty!(apply (item 0) (expr v2b) (expr v2a) (expr v1) (expr v0))) + .canonicalize( + interner, + ty!(apply (item 0) (expr v2b) (expr v2a) (expr v1) (expr v0)) + ) .quantified, Canonical { value: ty!(apply (item 0) (apply (item 1) (bound 0) (bound 1)) (bound 2) (bound 0) (bound 1)), - binders: vec![ - ParameterKind::Ty(U1), - ParameterKind::Ty(U0), - ParameterKind::Ty(U2), - ], + binders: CanonicalVarKinds::from_iter( + interner, + vec![ + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U1), + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U0), + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U2), + ] + ), } ); } #[test] fn quantify_ty_under_binder() { + let interner = ChalkIr; let mut table = make_table(); let v0 = table.new_variable(U0); let v1 = table.new_variable(U0); let _r2 = table.new_variable(U0); // Unify v0 and v1. - let environment0 = Environment::new(); + let environment0 = Environment::new(interner); table - .unify(&environment0, &v0.to_ty(), &v1.to_ty()) + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &v0.to_ty(interner), + &v1.to_ty(interner), + ) .unwrap(); - // Here: the `for_all` introduces 3 binders, so in the result, + // Here: the `function` introduces 3 binders, so in the result, // `(bound 3)` references the first canonicalized inference // variable. -- note that `infer 0` and `infer 1` have been // unified above, as well. assert_eq!( table .canonicalize( - &ty!(for_all 3 (apply (item 0) (bound 1) (infer 0) (infer 1) (lifetime (infer 2)))) + interner, + ty!(function 3 (apply (item 0) (bound 1) (infer 0) (infer 1) (lifetime (infer 2)))) ) .quantified, Canonical { - value: ty!(for_all 3 (apply (item 0) (bound 1) (bound 3) (bound 3) (lifetime (bound 4)))), - binders: vec![ParameterKind::Ty(U0), ParameterKind::Lifetime(U0)], + value: ty!(function 3 (apply (item 0) (bound 1) (bound 1 0) (bound 1 0) (lifetime (bound 1 1)))), + binders: CanonicalVarKinds::from_iter( + interner, + vec![ + CanonicalVarKind::new(VariableKind::Ty(TyVariableKind::General), U0), + CanonicalVarKind::new(VariableKind::Lifetime, U0) + ] + ), } ); } #[test] fn lifetime_constraint_indirect() { - let mut table = InferenceTable::new(); + let interner = ChalkIr; + let mut table: InferenceTable = InferenceTable::new(); let _ = table.new_universe(); // U1 let _t_0 = table.new_variable(U0); let _l_1 = table.new_variable(U1); - let environment0 = Environment::new(); + let environment0 = Environment::new(interner); // Here, we unify '?1 (the lifetime variable in universe 1) with // '!1. let t_a = ty!(apply (item 0) (lifetime (placeholder 1))); let t_b = ty!(apply (item 0) (lifetime (infer 1))); - let UnificationResult { goals, constraints } = table.unify(&environment0, &t_a, &t_b).unwrap(); + let RelationResult { goals } = table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &t_a, + &t_b, + ) + .unwrap(); assert!(goals.is_empty()); - assert!(constraints.is_empty()); // Here, we try to unify `?0` (the type variable in universe 0) // with something that involves `'?1`. Since `'?1` has been @@ -254,11 +400,23 @@ fn lifetime_constraint_indirect() { // we will replace `'!1` with a new variable `'?2` and introduce a // (likely unsatisfiable) constraint relating them. let t_c = ty!(infer 0); - let UnificationResult { goals, constraints } = table.unify(&environment0, &t_c, &t_b).unwrap(); - assert!(goals.is_empty()); - assert_eq!(constraints.len(), 1); + let RelationResult { goals } = table + .relate( + interner, + &TestDatabase, + &environment0, + Variance::Invariant, + &t_c, + &t_b, + ) + .unwrap(); + assert_eq!(goals.len(), 2); + assert_eq!( + format!("{:?}", goals[0]), + "InEnvironment { environment: Env([]), goal: \'?2: \'!1_0 }", + ); assert_eq!( - format!("{:?}", constraints[0]), - "InEnvironment { environment: Env([]), goal: \'?2 == \'!1_0 }", + format!("{:?}", goals[1]), + "InEnvironment { environment: Env([]), goal: \'!1_0: \'?2 }", ); } diff --git a/chalk-solve/src/infer/ucanonicalize.rs b/chalk-solve/src/infer/ucanonicalize.rs index f599f7bfb95..b44880e3764 100644 --- a/chalk-solve/src/infer/ucanonicalize.rs +++ b/chalk-solve/src/infer/ucanonicalize.rs @@ -1,42 +1,57 @@ -use chalk_engine::fallible::*; -use chalk_ir::fold::{ - DefaultFreeVarFolder, DefaultInferenceFolder, DefaultTypeFolder, Fold, PlaceholderFolder, -}; +use crate::debug_span; +use chalk_derive::FallibleTypeFolder; +use chalk_ir::fold::{TypeFoldable, TypeFolder}; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::visit::{TypeVisitable, TypeVisitor}; use chalk_ir::*; +use std::ops::ControlFlow; use super::InferenceTable; -impl InferenceTable { - pub fn u_canonicalize(&mut self, value0: &Canonical) -> UCanonicalized { - debug!("u_canonicalize({:#?})", value0); +impl InferenceTable { + pub fn u_canonicalize(interner: I, value0: &Canonical) -> UCanonicalized + where + T: Clone + HasInterner + TypeFoldable + TypeVisitable, + T: HasInterner, + { + debug_span!("u_canonicalize", "{:#?}", value0); // First, find all the universes that appear in `value`. let mut universes = UniverseMap::new(); - value0 - .value - .fold_with( - &mut UCollector { - universes: &mut universes, - }, - 0, - ).unwrap(); + + for universe in value0.binders.iter(interner) { + universes.add(*universe.skip_kind()); + } + + value0.value.visit_with( + &mut UCollector { + universes: &mut universes, + interner, + }, + DebruijnIndex::INNERMOST, + ); // Now re-map the universes found in value. We have to do this // in a second pass because it is only then that we know the // full set of universes found in the original value. let value1 = value0 .value - .fold_with( + .clone() + .try_fold_with( &mut UMapToCanonical { universes: &universes, + interner, }, - 0, - ).unwrap(); - let binders = value0 - .binders - .iter() - .map(|pk| pk.map(|ui| universes.map_universe_to_canonical(ui))) - .collect(); + DebruijnIndex::INNERMOST, + ) + .unwrap(); + let binders = CanonicalVarKinds::from_iter( + interner, + value0 + .binders + .iter(interner) + .map(|pk| pk.map_ref(|&ui| universes.map_universe_to_canonical(ui).unwrap())), + ); UCanonicalized { quantified: UCanonical { @@ -52,39 +67,25 @@ impl InferenceTable { } #[derive(Debug)] -pub struct UCanonicalized { +pub struct UCanonicalized { /// The canonicalized result. pub quantified: UCanonical, /// A map between the universes in `quantified` and the original universes - crate universes: UniverseMap, + pub universes: UniverseMap, } -/// Maps the universes found in the `u_canonicalize` result (the -/// "canonical" universes) to the universes found in the original -/// value (and vice versa). When used as a folder -- i.e., from -/// outside this module -- converts from "canonical" universes to the -/// original (but see the `UMapToCanonical` folder). -#[derive(Clone, Debug)] -pub struct UniverseMap { - /// A reverse map -- for each universe Ux that appears in - /// `quantified`, the corresponding universe in the original was - /// `universes[x]`. - universes: Vec, +pub trait UniverseMapExt { + fn add(&mut self, universe: UniverseIndex); + fn map_universe_to_canonical(&self, universe: UniverseIndex) -> Option; + fn map_universe_from_canonical(&self, universe: UniverseIndex) -> UniverseIndex; + fn map_from_canonical(&self, interner: I, canonical_value: &Canonical) -> Canonical + where + T: Clone + TypeFoldable + HasInterner, + T: HasInterner, + I: Interner; } - -impl UniverseMap { - fn new() -> Self { - UniverseMap { - universes: vec![UniverseIndex::root()], - } - } - - /// Number of canonical universes. - fn num_canonical_universes(&self) -> usize { - self.universes.len() - } - +impl UniverseMapExt for UniverseMap { fn add(&mut self, universe: UniverseIndex) { if let Err(i) = self.universes.binary_search(&universe) { self.universes.insert(i, universe); @@ -96,66 +97,13 @@ impl UniverseMap { /// looking for the index I of U in `self.universes`. We will /// return the universe with "counter" I. This effectively /// "compresses" the range of universes to things from - /// `0..self.universes.len()`. - /// - /// There is one subtle point, though: if we don't find U in the - /// vector, what should we return? This can only occur when we are - /// mapping the universes for existentially quantified variables - /// appearing in the original value. For example, if we have an initial - /// query like - /// - /// ```notrust - /// !U1: Foo - /// ``` - /// - /// where `?X` is an existential variable in universe U2, and - /// `!U1` (resp. `!U3`) is a placeholder variable in universe U1 - /// (resp. U3), then this will be canonicalized to - /// - /// ```notrust - /// exists { !U1: Foo - /// ``` - /// - /// We will then collect the universe vector `[Root, 1, 3]`. - /// Hence we would remap the inner part to `!U1': Foo` - /// (I am using the convention of writing U1' and U2' to indicate - /// the target universes that we are mappin to, which are - /// logically distincte). But what universe should we use for the - /// `exists` binder? `U2` is not in the set of universes we - /// collected initially. The answer is that we will remap U2 to - /// U1' in the final result, giving: - /// - /// ```notrust - /// exists { !U1': Foo - /// ``` - /// - /// More generally, we pick the highest numbered universe we did - /// find that is still lower then the universe U we are - /// mapping. Effectivelly we "remapped" from U2 (in the original - /// multiverse) to U1; this is a sound approximation, because all - /// names from U1 are visible to U2 (but not vice - /// versa). Moreover, since there are no placeholders from U2 in - /// the original query, there is no way we would have equated `?0` - /// with such a name. - fn map_universe_to_canonical(&self, universe: UniverseIndex) -> UniverseIndex { - match self.universes.binary_search(&universe) { - Ok(index) => UniverseIndex { counter: index }, - - // `index` is the location in the vector where universe - // *would have* gone. So, in our example from the comment - // above, if we were looking up `U2` we would get back 2, - // since it would go betewen U1 (with index 1) and U3 - // (with index 2). Therefore, we want to subtract one to - // get the biggest universe that is still lower than - // `universe`. - // - // Note that `index` can never be 0: that is always the - // root universe, we always add that to the vector. - Err(index) => { - assert!(index > 0); - UniverseIndex { counter: index - 1 } - } - } + /// `0..self.universes.len()`. If the universe is not present in the map, + /// we return `None`. + fn map_universe_to_canonical(&self, universe: UniverseIndex) -> Option { + self.universes + .binary_search(&universe) + .ok() + .map(|index| UniverseIndex { counter: index }) } /// Given a "canonical universe" -- one found in the @@ -206,128 +154,183 @@ impl UniverseMap { /// of universes, since that determines visibility, and (b) that /// the universe we produce does not correspond to any of the /// other original universes. - pub fn map_from_canonical(&self, value: &T) -> T::Result { - debug!("map_from_canonical(value={:?})", value); - debug!("map_from_canonical: universes = {:?}", self.universes); - value - .fold_with(&mut UMapFromCanonical { universes: self }, 0) - .unwrap() + fn map_from_canonical(&self, interner: I, canonical_value: &Canonical) -> Canonical + where + T: Clone + TypeFoldable + HasInterner, + T: HasInterner, + I: Interner, + { + debug_span!("map_from_canonical", ?canonical_value, universes = ?self.universes); + + let binders = canonical_value + .binders + .iter(interner) + .map(|cvk| cvk.map_ref(|&universe| self.map_universe_from_canonical(universe))); + + let value = canonical_value + .value + .clone() + .try_fold_with( + &mut UMapFromCanonical { + interner, + universes: self, + }, + DebruijnIndex::INNERMOST, + ) + .unwrap(); + + Canonical { + binders: CanonicalVarKinds::from_iter(interner, binders), + value, + } } } /// The `UCollector` is a "no-op" in terms of the value, but along the /// way it collects all universes that were found into a vector. -struct UCollector<'q> { +struct UCollector<'q, I> { universes: &'q mut UniverseMap, + interner: I, } -impl<'q> DefaultTypeFolder for UCollector<'q> {} +impl TypeVisitor for UCollector<'_, I> { + type BreakTy = (); -impl<'q> PlaceholderFolder for UCollector<'q> { - fn fold_free_placeholder_ty( - &mut self, - universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { - self.universes.add(universe.ui); - Ok(universe.to_ty()) + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self } - fn fold_free_placeholder_lifetime( + fn visit_free_placeholder( &mut self, universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> ControlFlow<()> { self.universes.add(universe.ui); - Ok(universe.to_lifetime()) + ControlFlow::Continue(()) } -} -impl<'q> DefaultFreeVarFolder for UCollector<'q> {} - -impl<'q> DefaultInferenceFolder for UCollector<'q> { - fn forbid() -> bool { + fn forbid_inference_vars(&self) -> bool { true } + + fn interner(&self) -> I { + self.interner + } } -struct UMapToCanonical<'q> { +#[derive(FallibleTypeFolder)] +struct UMapToCanonical<'q, I: Interner> { + interner: I, universes: &'q UniverseMap, } -impl<'q> DefaultTypeFolder for UMapToCanonical<'q> {} +impl<'i, I: Interner> TypeFolder for UMapToCanonical<'i, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } -impl<'q> DefaultInferenceFolder for UMapToCanonical<'q> { - fn forbid() -> bool { + fn forbid_inference_vars(&self) -> bool { true } -} -impl<'q> PlaceholderFolder for UMapToCanonical<'q> { fn fold_free_placeholder_ty( &mut self, universe0: PlaceholderIndex, - _binders: usize, - ) -> Fallible { - let ui = self.universes.map_universe_to_canonical(universe0.ui); - Ok(PlaceholderIndex { + _outer_binder: DebruijnIndex, + ) -> Ty { + let ui = self + .universes + .map_universe_to_canonical(universe0.ui) + .expect("Expected UCollector to encounter this universe"); + PlaceholderIndex { ui, idx: universe0.idx, - }.to_ty()) + } + .to_ty(TypeFolder::interner(self)) } fn fold_free_placeholder_lifetime( &mut self, universe0: PlaceholderIndex, - _binders: usize, - ) -> Fallible { - let universe = self.universes.map_universe_to_canonical(universe0.ui); - Ok(PlaceholderIndex { + _outer_binder: DebruijnIndex, + ) -> Lifetime { + let universe = self + .universes + .map_universe_to_canonical(universe0.ui) + .expect("Expected UCollector to encounter this universe"); + + PlaceholderIndex { ui: universe, idx: universe0.idx, - }.to_lifetime()) + } + .to_lifetime(TypeFolder::interner(self)) } -} -impl<'q> DefaultFreeVarFolder for UMapToCanonical<'q> {} + fn fold_free_placeholder_const( + &mut self, + ty: Ty, + universe0: PlaceholderIndex, + _outer_binder: DebruijnIndex, + ) -> Const { + let universe = self + .universes + .map_universe_to_canonical(universe0.ui) + .expect("Expected UCollector to encounter this universe"); + + PlaceholderIndex { + ui: universe, + idx: universe0.idx, + } + .to_const(TypeFolder::interner(self), ty) + } -struct UMapFromCanonical<'q> { + fn interner(&self) -> I { + self.interner + } +} + +#[derive(FallibleTypeFolder)] +struct UMapFromCanonical<'q, I: Interner> { + interner: I, universes: &'q UniverseMap, } -impl<'q> DefaultTypeFolder for UMapFromCanonical<'q> {} +impl<'i, I: Interner> TypeFolder for UMapFromCanonical<'i, I> { + fn as_dyn(&mut self) -> &mut dyn TypeFolder { + self + } -impl<'q> PlaceholderFolder for UMapFromCanonical<'q> { fn fold_free_placeholder_ty( &mut self, universe0: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Ty { let ui = self.universes.map_universe_from_canonical(universe0.ui); - Ok(PlaceholderIndex { + PlaceholderIndex { ui, idx: universe0.idx, - }.to_ty()) + } + .to_ty(TypeFolder::interner(self)) } fn fold_free_placeholder_lifetime( &mut self, universe0: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Lifetime { let universe = self.universes.map_universe_from_canonical(universe0.ui); - Ok(PlaceholderIndex { + PlaceholderIndex { ui: universe, idx: universe0.idx, - }.to_lifetime()) + } + .to_lifetime(TypeFolder::interner(self)) } -} - -impl<'q> DefaultFreeVarFolder for UMapFromCanonical<'q> {} -impl<'q> DefaultInferenceFolder for UMapFromCanonical<'q> { - fn forbid() -> bool { + fn forbid_inference_vars(&self) -> bool { true } -} + fn interner(&self) -> I { + self.interner + } +} diff --git a/chalk-solve/src/infer/unify.rs b/chalk-solve/src/infer/unify.rs index 8eecedc8d7a..1ce4796da98 100644 --- a/chalk-solve/src/infer/unify.rs +++ b/chalk-solve/src/infer/unify.rs @@ -1,32 +1,29 @@ -use chalk_engine::fallible::*; -use chalk_ir::cast::Cast; -use chalk_ir::fold::{ - DefaultFreeVarFolder, DefaultTypeFolder, Fold, InferenceFolder, PlaceholderFolder, -}; -use chalk_ir::zip::{Zip, Zipper}; -use std::sync::Arc; - use super::var::*; use super::*; +use crate::debug_span; +use chalk_ir::cast::Cast; +use chalk_ir::fold::{FallibleTypeFolder, TypeFoldable}; +use chalk_ir::interner::{HasInterner, Interner}; +use chalk_ir::zip::{Zip, Zipper}; +use chalk_ir::UnificationDatabase; +use std::fmt::Debug; +use tracing::{debug, instrument}; -impl InferenceTable { - pub fn unify( +impl InferenceTable { + pub fn relate( &mut self, - environment: &Arc, + interner: I, + db: &dyn UnificationDatabase, + environment: &Environment, + variance: Variance, a: &T, b: &T, - ) -> Fallible + ) -> Fallible> where - T: ?Sized + Zip, + T: ?Sized + Zip, { - debug_heading!( - "unify(a={:?}\ - ,\n b={:?})", - a, - b - ); let snapshot = self.snapshot(); - match Unifier::new(self, environment).unify(a, b) { + match Unifier::new(interner, db, self, environment).relate(variance, a, b) { Ok(r) => { self.commit(snapshot); Ok(r) @@ -39,225 +36,830 @@ impl InferenceTable { } } -struct Unifier<'t> { - table: &'t mut InferenceTable, - environment: &'t Arc, - goals: Vec>, - constraints: Vec>, +struct Unifier<'t, I: Interner> { + table: &'t mut InferenceTable, + environment: &'t Environment, + goals: Vec>>, + interner: I, + db: &'t dyn UnificationDatabase, } #[derive(Debug)] -pub struct UnificationResult { - crate goals: Vec>, - crate constraints: Vec>, +pub struct RelationResult { + pub goals: Vec>>, } -impl<'t> Unifier<'t> { - fn new(table: &'t mut InferenceTable, environment: &'t Arc) -> Self { +impl<'t, I: Interner> Unifier<'t, I> { + fn new( + interner: I, + db: &'t dyn UnificationDatabase, + table: &'t mut InferenceTable, + environment: &'t Environment, + ) -> Self { Unifier { - environment: environment, - table: table, + environment, + table, goals: vec![], - constraints: vec![], + interner, + db, } } /// The main entry point for the `Unifier` type and really the /// only type meant to be called externally. Performs a - /// unification of `a` and `b` and returns the Unification Result. - fn unify(mut self, a: &T, b: &T) -> Fallible + /// relation of `a` and `b` and returns the Unification Result. + #[instrument(level = "debug", skip(self))] + fn relate(mut self, variance: Variance, a: &T, b: &T) -> Fallible> where - T: ?Sized + Zip, + T: ?Sized + Zip, { - Zip::zip_with(&mut self, a, b)?; - Ok(UnificationResult { - goals: self.goals, - constraints: self.constraints, - }) + Zip::zip_with(&mut self, variance, a, b)?; + let interner = self.interner(); + let mut goals = self.goals; + let table = self.table; + // Sometimes we'll produce a lifetime outlives goal which we later solve by unification + // Technically, these *will* get canonicalized to the same bound var and so that will end up + // as a goal like `^0.0 <: ^0.0`, which is trivially true. But, we remove those *here*, which + // might help caching. + goals.retain(|g| match g.goal.data(interner) { + GoalData::SubtypeGoal(SubtypeGoal { a, b }) => { + let n_a = table.ty_root(interner, a); + let n_b = table.ty_root(interner, b); + let a = n_a.as_ref().unwrap_or(a); + let b = n_b.as_ref().unwrap_or(b); + a != b + } + _ => true, + }); + Ok(RelationResult { goals }) } - /// When we encounter a "sub-unification" problem that is in a distinct - /// environment, we invoke this routine. - fn sub_unify(&mut self, ty1: T, ty2: T) -> Fallible<()> - where - T: Zip + Fold, - { - let sub_unifier = Unifier::new(self.table, &self.environment); - let UnificationResult { goals, constraints } = sub_unifier.unify(&ty1, &ty2)?; - self.goals.extend(goals); - self.constraints.extend(constraints); - Ok(()) - } + /// Relate `a`, `b` with the variance such that if `variance = Covariant`, `a` is + /// a subtype of `b`. + fn relate_ty_ty(&mut self, variance: Variance, a: &Ty, b: &Ty) -> Fallible<()> { + let interner = self.interner; - fn unify_ty_ty<'a>(&mut self, a: &'a Ty, b: &'a Ty) -> Fallible<()> { - // ^^ ^^ ^^ FIXME rustc bug - if let Some(n_a) = self.table.normalize_shallow(a) { - return self.unify_ty_ty(&n_a, b); - } else if let Some(n_b) = self.table.normalize_shallow(b) { - return self.unify_ty_ty(a, &n_b); - } + let n_a = self.table.normalize_ty_shallow(interner, a); + let n_b = self.table.normalize_ty_shallow(interner, b); + let a = n_a.as_ref().unwrap_or(a); + let b = n_b.as_ref().unwrap_or(b); - debug_heading!( - "unify_ty_ty(a={:?}\ - ,\n b={:?})", - a, - b - ); + debug_span!("relate_ty_ty", ?variance, ?a, ?b); - match (a, b) { - (&Ty::InferenceVar(var1), &Ty::InferenceVar(var2)) => { - debug!("unify_ty_ty: unify_var_var({:?}, {:?})", var1, var2); - let var1 = EnaVariable::from(var1); - let var2 = EnaVariable::from(var2); - Ok(self - .table - .unify - .unify_var_var(var1, var2) - .expect("unification of two unbound variables cannot fail")) + if a.kind(interner) == b.kind(interner) { + return Ok(()); + } + + match (a.kind(interner), b.kind(interner)) { + // Relating two inference variables: + // First, if either variable is a float or int kind, then we always + // unify if they match. This is because float and ints don't have + // subtype relationships. + // If both kinds are general then: + // If `Invariant`, unify them in the underlying ena table. + // If `Covariant` or `Contravariant`, push `SubtypeGoal` + (&TyKind::InferenceVar(var1, kind1), &TyKind::InferenceVar(var2, kind2)) => { + if matches!(kind1, TyVariableKind::General) + && matches!(kind2, TyVariableKind::General) + { + // Both variable kinds are general; so unify if invariant, otherwise push subtype goal + match variance { + Variance::Invariant => self.unify_var_var(var1, var2), + Variance::Covariant => { + self.push_subtype_goal(a.clone(), b.clone()); + Ok(()) + } + Variance::Contravariant => { + self.push_subtype_goal(b.clone(), a.clone()); + Ok(()) + } + } + } else if kind1 == kind2 { + // At least one kind is not general, but they match, so unify + self.unify_var_var(var1, var2) + } else if kind1 == TyVariableKind::General { + // First kind is general, second isn't, unify + self.unify_general_var_specific_ty(var1, b.clone()) + } else if kind2 == TyVariableKind::General { + // Second kind is general, first isn't, unify + self.unify_general_var_specific_ty(var2, a.clone()) + } else { + debug!( + "Tried to unify mis-matching inference variables: {:?} and {:?}", + kind1, kind2 + ); + Err(NoSolution) + } } - (&Ty::InferenceVar(var), ty @ &Ty::Apply(_)) - | (ty @ &Ty::Apply(_), &Ty::InferenceVar(var)) - | (&Ty::InferenceVar(var), ty @ &Ty::ForAll(_)) - | (ty @ &Ty::ForAll(_), &Ty::InferenceVar(var)) => { - self.unify_var_ty(var, ty) + // Unifying `forall { T }` with some other forall type `forall { U }` + (&TyKind::Function(ref fn1), &TyKind::Function(ref fn2)) => { + if fn1.sig == fn2.sig { + Zip::zip_with( + self, + variance, + &fn1.clone().into_binders(interner), + &fn2.clone().into_binders(interner), + ) + } else { + Err(NoSolution) + } } - (&Ty::ForAll(ref quantified_ty1), &Ty::ForAll(ref quantified_ty2)) => { - self.unify_forall_tys(quantified_ty1, quantified_ty2) + (&TyKind::Placeholder(ref p1), &TyKind::Placeholder(ref p2)) => { + Zip::zip_with(self, variance, p1, p2) } - (&Ty::ForAll(ref quantified_ty), apply_ty @ &Ty::Apply(_)) - | (apply_ty @ &Ty::Apply(_), &Ty::ForAll(ref quantified_ty)) => { - self.unify_forall_apply(quantified_ty, apply_ty) + // Unifying two dyn is possible if they have the same bounds. + (&TyKind::Dyn(ref qwc1), &TyKind::Dyn(ref qwc2)) => { + Zip::zip_with(self, variance, qwc1, qwc2) } - (&Ty::Apply(ref apply1), &Ty::Apply(ref apply2)) => { - if apply1.name != apply2.name { - return Err(NoSolution); - } + (TyKind::BoundVar(_), _) | (_, TyKind::BoundVar(_)) => panic!( + "unification encountered bound variable: a={:?} b={:?}", + a, b + ), + + // Unifying an alias type with some other type `U`. + (_, &TyKind::Alias(ref alias)) => self.relate_alias_ty(variance.invert(), alias, a), + (&TyKind::Alias(ref alias), _) => self.relate_alias_ty(variance, alias, b), - Zip::zip_with(self, &apply1.parameters, &apply2.parameters) + (&TyKind::InferenceVar(var, kind), ty_data) => { + let ty = ty_data.clone().intern(interner); + self.relate_var_ty(variance, var, kind, &ty) + } + (ty_data, &TyKind::InferenceVar(var, kind)) => { + // We need to invert the variance if inference var is `b` because we pass it in + // as `a` to relate_var_ty + let ty = ty_data.clone().intern(interner); + self.relate_var_ty(variance.invert(), var, kind, &ty) } - (proj1 @ &Ty::Projection(_), proj2 @ &Ty::UnselectedProjection(_)) - | (proj1 @ &Ty::UnselectedProjection(_), proj2 @ &Ty::Projection(_)) - | (proj1 @ &Ty::UnselectedProjection(_), proj2 @ &Ty::UnselectedProjection(_)) => self - .unify_projection_tys(proj1.as_projection_ty_enum(), proj2.as_projection_ty_enum()), + // This would correspond to unifying a `fn` type with a non-fn + // type in Rust; error. + (&TyKind::Function(_), _) | (_, &TyKind::Function(_)) => Err(NoSolution), - (ty @ &Ty::Apply(_), &Ty::Projection(ref proj)) - | (ty @ &Ty::ForAll(_), &Ty::Projection(ref proj)) - | (ty @ &Ty::InferenceVar(_), &Ty::Projection(ref proj)) - | (&Ty::Projection(ref proj), ty @ &Ty::Projection(_)) - | (&Ty::Projection(ref proj), ty @ &Ty::Apply(_)) - | (&Ty::Projection(ref proj), ty @ &Ty::ForAll(_)) - | (&Ty::Projection(ref proj), ty @ &Ty::InferenceVar(_)) => { - self.unify_projection_ty(proj, ty) - } + // Cannot unify (e.g.) some struct type `Foo` and a placeholder like `T` + (_, &TyKind::Placeholder(_)) | (&TyKind::Placeholder(_), _) => Err(NoSolution), - (ty @ &Ty::Apply(_), &Ty::UnselectedProjection(ref proj)) - | (ty @ &Ty::ForAll(_), &Ty::UnselectedProjection(ref proj)) - | (ty @ &Ty::InferenceVar(_), &Ty::UnselectedProjection(ref proj)) - | (&Ty::UnselectedProjection(ref proj), ty @ &Ty::Apply(_)) - | (&Ty::UnselectedProjection(ref proj), ty @ &Ty::ForAll(_)) - | (&Ty::UnselectedProjection(ref proj), ty @ &Ty::InferenceVar(_)) => { - self.unify_unselected_projection_ty(proj, ty) - } + // Cannot unify `dyn Trait` with things like structs or placeholders + (_, &TyKind::Dyn(_)) | (&TyKind::Dyn(_), _) => Err(NoSolution), - (Ty::BoundVar(_), _) | (_, Ty::BoundVar(_)) => { - panic!("unification encountered bound variable: a={:?} b={:?}", a, b) + (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + Some(self.unification_database().adt_variance(*id_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) } + ( + TyKind::AssociatedType(id_a, substitution_a), + TyKind::AssociatedType(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, // TODO: AssociatedType variances? + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => { + Zip::zip_with(self, variance, scalar_a, scalar_b) + } + (TyKind::Str, TyKind::Str) => Ok(()), + (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => { + if arity_a != arity_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + Some(Variances::from_iter( + self.interner, + std::iter::repeat(Variance::Covariant).take(*arity_a), + )), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::OpaqueType(id_a, substitution_a), + TyKind::OpaqueType(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => Zip::zip_with(self, variance, ty_a, ty_b), + (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + Some(self.unification_database().fn_def_variance(*id_a)), + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::Ref(mutability_a, lifetime_a, ty_a), + TyKind::Ref(mutability_b, lifetime_b, ty_b), + ) => { + if mutability_a != mutability_b { + return Err(NoSolution); + } + // The lifetime is `Contravariant` + Zip::zip_with( + self, + variance.xform(Variance::Contravariant), + lifetime_a, + lifetime_b, + )?; + // The type is `Covariant` when not mut, `Invariant` otherwise + let output_variance = match mutability_a { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + Zip::zip_with(self, variance.xform(output_variance), ty_a, ty_b) + } + (TyKind::Raw(mutability_a, ty_a), TyKind::Raw(mutability_b, ty_b)) => { + if mutability_a != mutability_b { + return Err(NoSolution); + } + let ty_variance = match mutability_a { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + Zip::zip_with(self, variance.xform(ty_variance), ty_a, ty_b) + } + (TyKind::Never, TyKind::Never) => Ok(()), + (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => { + Zip::zip_with(self, variance, ty_a, ty_b)?; + Zip::zip_with(self, variance, const_a, const_b) + } + (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Coroutine(id_a, substitution_a), TyKind::Coroutine(id_b, substitution_b)) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + ( + TyKind::CoroutineWitness(id_a, substitution_a), + TyKind::CoroutineWitness(id_b, substitution_b), + ) => { + if id_a != id_b { + return Err(NoSolution); + } + self.zip_substs( + variance, + None, + substitution_a.as_slice(interner), + substitution_b.as_slice(interner), + ) + } + (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => { + Zip::zip_with(self, variance, id_a, id_b) + } + (TyKind::Error, TyKind::Error) => Ok(()), + + (_, _) => Err(NoSolution), } } - fn unify_forall_tys(&mut self, ty1: &QuantifiedTy, ty2: &QuantifiedTy) -> Fallible<()> { + /// Unify two inference variables + #[instrument(level = "debug", skip(self))] + fn unify_var_var(&mut self, a: InferenceVar, b: InferenceVar) -> Fallible<()> { + let var1 = EnaVariable::from(a); + let var2 = EnaVariable::from(b); + self.table + .unify + .unify_var_var(var1, var2) + .expect("unification of two unbound variables cannot fail"); + Ok(()) + } + + /// Unify a general inference variable with a specific inference variable + /// (type kind is not `General`). For example, unify a `TyVariableKind::General` + /// inference variable with a `TyVariableKind::Integer` variable, resulting in the + /// general inference variable narrowing to an integer variable. + + #[instrument(level = "debug", skip(self))] + fn unify_general_var_specific_ty( + &mut self, + general_var: InferenceVar, + specific_ty: Ty, + ) -> Fallible<()> { + self.table + .unify + .unify_var_value( + general_var, + InferenceValue::from_ty(self.interner, specific_ty), + ) + .unwrap(); + + Ok(()) + } + + #[instrument(level = "debug", skip(self))] + fn relate_binders<'a, T>( + &mut self, + variance: Variance, + a: &Binders, + b: &Binders, + ) -> Fallible<()> + where + T: Clone + TypeFoldable + HasInterner + Zip, + 't: 'a, + { // for<'a...> T == for<'b...> U // // if: // // for<'a...> exists<'b...> T == U && // for<'b...> exists<'a...> T == U - // - // Here we only check for<'a...> exists<'b...> T == U, - // can someone smart comment why this is sufficient? - debug!("unify_forall_tys({:?}, {:?})", ty1, ty2); + // for<'a...> T <: for<'b...> U + // + // if + // + // for<'b...> exists<'a...> T <: U - let ui = self.table.new_universe(); - let lifetimes1: Vec<_> = (0..ty1.num_binders) - .map(|idx| Lifetime::Placeholder(PlaceholderIndex { ui, idx }).cast()) - .collect(); + let interner = self.interner; - let max_universe = self.table.max_universe; - let lifetimes2: Vec<_> = (0..ty2.num_binders) - .map(|_| self.table.new_variable(max_universe).to_lifetime().cast()) - .collect(); + if let Variance::Invariant | Variance::Contravariant = variance { + let a_universal = self + .table + .instantiate_binders_universally(interner, a.clone()); + let b_existential = self + .table + .instantiate_binders_existentially(interner, b.clone()); + Zip::zip_with(self, Variance::Contravariant, &a_universal, &b_existential)?; + } - let ty1 = ty1.substitute(&lifetimes1); - let ty2 = ty2.substitute(&lifetimes2); - debug!("unify_forall_tys: ty1 = {:?}", ty1); - debug!("unify_forall_tys: ty2 = {:?}", ty2); + if let Variance::Invariant | Variance::Covariant = variance { + let b_universal = self + .table + .instantiate_binders_universally(interner, b.clone()); + let a_existential = self + .table + .instantiate_binders_existentially(interner, a.clone()); + Zip::zip_with(self, Variance::Covariant, &a_existential, &b_universal)?; + } - self.sub_unify(ty1, ty2) + Ok(()) } - fn unify_projection_tys( + /// Relate an alias like `::Item` or `impl Trait` with some other + /// type `ty`. If the variance is `Invariant`, creates a goal like + /// + /// ```notrust + /// AliasEq(::Item = U) // associated type projection + /// AliasEq(impl Trait = U) // impl trait + /// ``` + /// Otherwise, this creates a new variable `?X`, creates a goal like + /// ```notrust + /// AliasEq(Alias = ?X) + /// ``` + /// and relates `?X` and `ty`. + #[instrument(level = "debug", skip(self))] + fn relate_alias_ty( &mut self, - proj1: ProjectionTyRefEnum, - proj2: ProjectionTyRefEnum, + variance: Variance, + alias: &AliasTy, + ty: &Ty, ) -> Fallible<()> { - let max_universe = self.table.max_universe; - let var = self.table.new_variable(max_universe).to_ty(); - self.unify_projection_ty_enum(proj1, &var)?; - self.unify_projection_ty_enum(proj2, &var)?; - Ok(()) + let interner = self.interner; + match variance { + Variance::Invariant => { + self.goals.push(InEnvironment::new( + self.environment, + AliasEq { + alias: alias.clone(), + ty: ty.clone(), + } + .cast(interner), + )); + Ok(()) + } + Variance::Covariant | Variance::Contravariant => { + let var = self + .table + .new_variable(UniverseIndex::root()) + .to_ty(interner); + self.goals.push(InEnvironment::new( + self.environment, + AliasEq { + alias: alias.clone(), + ty: var.clone(), + } + .cast(interner), + )); + self.relate_ty_ty(variance, &var, ty) + } + } + } + + #[instrument(level = "debug", skip(self))] + fn generalize_ty( + &mut self, + ty: &Ty, + universe_index: UniverseIndex, + variance: Variance, + ) -> Ty { + let interner = self.interner; + match ty.kind(interner) { + TyKind::Adt(id, substitution) => { + let variances = if matches!(variance, Variance::Invariant) { + None + } else { + Some(self.unification_database().adt_variance(*id)) + }; + let get_variance = |i| { + variances + .as_ref() + .map(|v| v.as_slice(interner)[i]) + .unwrap_or(Variance::Invariant) + }; + TyKind::Adt( + *id, + self.generalize_substitution(substitution, universe_index, get_variance), + ) + .intern(interner) + } + TyKind::AssociatedType(id, substitution) => TyKind::AssociatedType( + *id, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::Scalar(scalar) => TyKind::Scalar(*scalar).intern(interner), + TyKind::Str => TyKind::Str.intern(interner), + TyKind::Tuple(arity, substitution) => TyKind::Tuple( + *arity, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::OpaqueType(id, substitution) => TyKind::OpaqueType( + *id, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::Slice(ty) => { + TyKind::Slice(self.generalize_ty(ty, universe_index, variance)).intern(interner) + } + TyKind::FnDef(id, substitution) => { + let variances = if matches!(variance, Variance::Invariant) { + None + } else { + Some(self.unification_database().fn_def_variance(*id)) + }; + let get_variance = |i| { + variances + .as_ref() + .map(|v| v.as_slice(interner)[i]) + .unwrap_or(Variance::Invariant) + }; + TyKind::FnDef( + *id, + self.generalize_substitution(substitution, universe_index, get_variance), + ) + .intern(interner) + } + TyKind::Ref(mutability, lifetime, ty) => { + let lifetime_variance = variance.xform(Variance::Contravariant); + let ty_variance = match mutability { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + TyKind::Ref( + *mutability, + self.generalize_lifetime(lifetime, universe_index, lifetime_variance), + self.generalize_ty(ty, universe_index, ty_variance), + ) + .intern(interner) + } + TyKind::Raw(mutability, ty) => { + let ty_variance = match mutability { + Mutability::Not => Variance::Covariant, + Mutability::Mut => Variance::Invariant, + }; + TyKind::Raw( + *mutability, + self.generalize_ty(ty, universe_index, ty_variance), + ) + .intern(interner) + } + TyKind::Never => TyKind::Never.intern(interner), + TyKind::Array(ty, const_) => TyKind::Array( + self.generalize_ty(ty, universe_index, variance), + self.generalize_const(const_, universe_index), + ) + .intern(interner), + TyKind::Closure(id, substitution) => TyKind::Closure( + *id, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::Coroutine(id, substitution) => TyKind::Coroutine( + *id, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::CoroutineWitness(id, substitution) => TyKind::CoroutineWitness( + *id, + self.generalize_substitution(substitution, universe_index, |_| variance), + ) + .intern(interner), + TyKind::Foreign(id) => TyKind::Foreign(*id).intern(interner), + TyKind::Error => TyKind::Error.intern(interner), + TyKind::Dyn(dyn_ty) => { + let DynTy { bounds, lifetime } = dyn_ty; + let lifetime = self.generalize_lifetime( + lifetime, + universe_index, + variance.xform(Variance::Contravariant), + ); + + let bounds = bounds.map_ref(|value| { + let iter = value.iter(interner).map(|sub_var| { + sub_var.map_ref(|clause| { + match clause { + WhereClause::Implemented(trait_ref) => { + let TraitRef { + ref substitution, + trait_id, + } = *trait_ref; + let substitution = self.generalize_substitution_skip_self( + substitution, + universe_index, + |_| Some(variance), + ); + WhereClause::Implemented(TraitRef { + substitution, + trait_id, + }) + } + WhereClause::AliasEq(alias_eq) => { + let AliasEq { alias, ty: _ } = alias_eq; + let alias = match alias { + AliasTy::Opaque(opaque_ty) => { + let OpaqueTy { + ref substitution, + opaque_ty_id, + } = *opaque_ty; + let substitution = self.generalize_substitution( + substitution, + universe_index, + |_| variance, + ); + AliasTy::Opaque(OpaqueTy { + substitution, + opaque_ty_id, + }) + } + AliasTy::Projection(projection_ty) => { + let ProjectionTy { + ref substitution, + associated_ty_id, + } = *projection_ty; + // TODO: We should be skipping "self", which + // would be the first element of + // "trait_params" if we had a + // `RustIrDatabase` to call + // `split_projection` on... + // let (assoc_ty_datum, trait_params, assoc_type_params) = s.db().split_projection(&self); + let substitution = self.generalize_substitution( + substitution, + universe_index, + |_| variance, + ); + AliasTy::Projection(ProjectionTy { + substitution, + associated_ty_id, + }) + } + }; + let ty = + self.table.new_variable(universe_index).to_ty(interner); + WhereClause::AliasEq(AliasEq { alias, ty }) + } + WhereClause::TypeOutlives(_) => { + let lifetime_var = self.table.new_variable(universe_index); + let lifetime = lifetime_var.to_lifetime(interner); + let ty_var = self.table.new_variable(universe_index); + let ty = ty_var.to_ty(interner); + WhereClause::TypeOutlives(TypeOutlives { ty, lifetime }) + } + WhereClause::LifetimeOutlives(_) => { + unreachable!("dyn Trait never contains LifetimeOutlive bounds") + } + } + }) + }); + QuantifiedWhereClauses::from_iter(interner, iter) + }); + + TyKind::Dyn(DynTy { bounds, lifetime }).intern(interner) + } + TyKind::Function(fn_ptr) => { + let FnPointer { + num_binders, + sig, + ref substitution, + } = *fn_ptr; + + let len = substitution.0.len(interner); + let vars = substitution.0.iter(interner).enumerate().map(|(i, var)| { + if i < len - 1 { + self.generalize_generic_var( + var, + universe_index, + variance.xform(Variance::Contravariant), + ) + } else { + self.generalize_generic_var( + substitution.0.as_slice(interner).last().unwrap(), + universe_index, + variance, + ) + } + }); + + let substitution = FnSubst(Substitution::from_iter(interner, vars)); + + TyKind::Function(FnPointer { + num_binders, + sig, + substitution, + }) + .intern(interner) + } + TyKind::Placeholder(_) | TyKind::BoundVar(_) => { + debug!("just generalizing to the ty itself: {:?}", ty); + // BoundVar and PlaceHolder have no internal values to be + // generic over, so we just relate directly to it + ty.clone() + } + TyKind::Alias(_) => { + let ena_var = self.table.new_variable(universe_index); + ena_var.to_ty(interner) + } + TyKind::InferenceVar(_var, kind) => { + if matches!(kind, TyVariableKind::Integer | TyVariableKind::Float) { + ty.clone() + } else if let Some(ty) = self.table.normalize_ty_shallow(interner, ty) { + self.generalize_ty(&ty, universe_index, variance) + } else if matches!(variance, Variance::Invariant) { + ty.clone() + } else { + let ena_var = self.table.new_variable(universe_index); + ena_var.to_ty(interner) + } + } + } } - fn unify_projection_ty_enum(&mut self, proj: ProjectionTyRefEnum, ty: &Ty) -> Fallible<()> { - match proj { - ProjectionTyEnum::Selected(proj) => self.unify_projection_ty(proj, ty), - ProjectionTyEnum::Unselected(proj) => self.unify_unselected_projection_ty(proj, ty), + #[instrument(level = "debug", skip(self))] + fn generalize_lifetime( + &mut self, + lifetime: &Lifetime, + universe_index: UniverseIndex, + variance: Variance, + ) -> Lifetime { + if matches!(lifetime.data(self.interner), LifetimeData::BoundVar(_)) + || matches!(variance, Variance::Invariant) + { + lifetime.clone() + } else { + self.table + .new_variable(universe_index) + .to_lifetime(self.interner) } } - fn unify_projection_ty(&mut self, proj: &ProjectionTy, ty: &Ty) -> Fallible<()> { - Ok(self.goals.push(InEnvironment::new( - self.environment, - ProjectionEq { - projection: proj.clone(), - ty: ty.clone(), - }.cast(), - ))) + #[instrument(level = "debug", skip(self))] + fn generalize_const(&mut self, const_: &Const, universe_index: UniverseIndex) -> Const { + let data = const_.data(self.interner); + if matches!(data.value, ConstValue::BoundVar(_)) { + const_.clone() + } else { + self.table + .new_variable(universe_index) + .to_const(self.interner, data.ty.clone()) + } } - fn unify_unselected_projection_ty( + fn generalize_generic_var( &mut self, - proj: &UnselectedProjectionTy, - ty: &Ty, - ) -> Fallible<()> { - Ok(self.goals.push(InEnvironment::new( - self.environment, - UnselectedNormalize { - projection: proj.clone(), - ty: ty.clone(), - }.cast(), - ))) + sub_var: &GenericArg, + universe_index: UniverseIndex, + variance: Variance, + ) -> GenericArg { + let interner = self.interner; + (match sub_var.data(interner) { + GenericArgData::Ty(ty) => { + GenericArgData::Ty(self.generalize_ty(ty, universe_index, variance)) + } + GenericArgData::Lifetime(lifetime) => GenericArgData::Lifetime( + self.generalize_lifetime(lifetime, universe_index, variance), + ), + GenericArgData::Const(const_value) => { + GenericArgData::Const(self.generalize_const(const_value, universe_index)) + } + }) + .intern(interner) } - fn unify_forall_apply(&mut self, ty1: &QuantifiedTy, ty2: &Ty) -> Fallible<()> { - let ui = self.table.new_universe(); - let lifetimes1: Vec<_> = (0..ty1.num_binders) - .map(|idx| Lifetime::Placeholder(PlaceholderIndex { ui, idx }).cast()) - .collect(); + /// Generalizes all but the first + #[instrument(level = "debug", skip(self, get_variance))] + fn generalize_substitution_skip_self Option>( + &mut self, + substitution: &Substitution, + universe_index: UniverseIndex, + get_variance: F, + ) -> Substitution { + let interner = self.interner; + let vars = substitution.iter(interner).enumerate().map(|(i, sub_var)| { + if i == 0 { + sub_var.clone() + } else { + let variance = get_variance(i).unwrap_or(Variance::Invariant); + self.generalize_generic_var(sub_var, universe_index, variance) + } + }); + Substitution::from_iter(interner, vars) + } - let ty1 = ty1.substitute(&lifetimes1); - let ty2 = ty2.clone(); + #[instrument(level = "debug", skip(self, get_variance))] + fn generalize_substitution Variance>( + &mut self, + substitution: &Substitution, + universe_index: UniverseIndex, + get_variance: F, + ) -> Substitution { + let interner = self.interner; + let vars = substitution.iter(interner).enumerate().map(|(i, sub_var)| { + let variance = get_variance(i); + self.generalize_generic_var(sub_var, universe_index, variance) + }); - self.sub_unify(ty1, ty2) + Substitution::from_iter(interner, vars) } - fn unify_var_ty(&mut self, var: InferenceVar, ty: &Ty) -> Fallible<()> { - debug!("unify_var_ty(var={:?}, ty={:?})", var, ty); + /// Unify an inference variable `var` with some non-inference + /// variable `ty`, just bind `var` to `ty`. But we must enforce two conditions: + /// + /// - `var` does not appear inside of `ty` (the standard `OccursCheck`) + /// - `ty` does not reference anything in a lifetime that could not be named in `var` + /// (the extended `OccursCheck` created to handle universes) + #[instrument(level = "debug", skip(self))] + fn relate_var_ty( + &mut self, + variance: Variance, + var: InferenceVar, + var_kind: TyVariableKind, + ty: &Ty, + ) -> Fallible<()> { + let interner = self.interner; + + match (var_kind, ty.is_integer(interner), ty.is_float(interner)) { + // General inference variables can unify with any type + (TyVariableKind::General, _, _) + // Integer inference variables can only unify with integer types + | (TyVariableKind::Integer, true, _) + // Float inference variables can only unify with float types + | (TyVariableKind::Float, _, true) => { + }, + _ => return Err(NoSolution), + } let var = EnaVariable::from(var); @@ -267,108 +869,365 @@ impl<'t> Unifier<'t> { // this variable was created -- though it may change over time // as the variable is unified. let universe_index = self.table.universe_of_unbound_var(var); + // let universe_index = self.table.max_universe(); + + debug!("relate_var_ty: universe index of var: {:?}", universe_index); + + debug!("trying fold_with on {:?}", ty); + let ty1 = ty + .clone() + .try_fold_with( + &mut OccursCheck::new(self, var, universe_index), + DebruijnIndex::INNERMOST, + ) + .map_err(|e| { + debug!("failed to fold {:?}", ty); + e + })?; + + // "Generalize" types. This ensures that we aren't accidentally forcing + // too much onto `var`. Instead of directly setting `var` equal to `ty`, + // we just take the outermost structure we _know_ `var` holds, and then + // apply that to `ty`. This involves creating new inference vars for + // everything inside `var`, then calling `relate_ty_ty` to relate those + // inference vars to the things they generalized with the correct + // variance. - let ty1 = ty.fold_with(&mut OccursCheck::new(self, var, universe_index), 0)?; + // The main problem this solves is that lifetime relationships are + // relationships, not just eq ones. So when solving &'a u32 <: U, + // generalizing we would end up with U = &'a u32. Instead, we want + // U = &'b u32, with a lifetime constraint 'a <: 'b. This matters + // especially when solving multiple constraints - for example, &'a u32 + // <: U, &'b u32 <: U (where without generalizing, we'd end up with 'a + // <: 'b, where we really want 'a <: 'c, 'b <: 'c for some 'c). + + // Example operation: consider `ty` as `&'x SomeType`. To generalize + // this, we create two new vars `'0` and `1`. Then we relate `var` with + // `&'0 1` and `&'0 1` with `&'x SomeType`. The second relation will + // recurse, and we'll end up relating `'0` with `'x` and `1` with `SomeType`. + let generalized_val = self.generalize_ty(&ty1, universe_index, variance); + + debug!("var {:?} generalized to {:?}", var, generalized_val); self.table .unify - .unify_var_value(var, InferenceValue::from(ty1.clone())) + .unify_var_value( + var, + InferenceValue::from_ty(interner, generalized_val.clone()), + ) .unwrap(); - debug!("unify_var_ty: var {:?} set to {:?}", var, ty1); + debug!("var {:?} set to {:?}", var, generalized_val); + + self.relate_ty_ty(variance, &generalized_val, &ty1)?; + + debug!( + "generalized version {:?} related to original {:?}", + generalized_val, ty1 + ); Ok(()) } - fn unify_lifetime_lifetime(&mut self, a: &Lifetime, b: &Lifetime) -> Fallible<()> { - if let Some(n_a) = self.table.normalize_lifetime(a) { - return self.unify_lifetime_lifetime(&n_a, b); - } else if let Some(n_b) = self.table.normalize_lifetime(b) { - return self.unify_lifetime_lifetime(a, &n_b); - } + fn relate_lifetime_lifetime( + &mut self, + variance: Variance, + a: &Lifetime, + b: &Lifetime, + ) -> Fallible<()> { + let interner = self.interner; + + let n_a = self.table.normalize_lifetime_shallow(interner, a); + let n_b = self.table.normalize_lifetime_shallow(interner, b); + let a = n_a.as_ref().unwrap_or(a); + let b = n_b.as_ref().unwrap_or(b); - debug_heading!("unify_lifetime_lifetime({:?}, {:?})", a, b); + debug_span!("relate_lifetime_lifetime", ?variance, ?a, ?b); - match (a, b) { - (&Lifetime::InferenceVar(var_a), &Lifetime::InferenceVar(var_b)) => { + match (a.data(interner), b.data(interner)) { + (&LifetimeData::InferenceVar(var_a), &LifetimeData::InferenceVar(var_b)) => { let var_a = EnaVariable::from(var_a); let var_b = EnaVariable::from(var_b); - debug!( - "unify_lifetime_lifetime: var_a={:?} var_b={:?}", - var_a, var_b - ); + debug!(?var_a, ?var_b); self.table.unify.unify_var_var(var_a, var_b).unwrap(); Ok(()) } - (&Lifetime::InferenceVar(var), &Lifetime::Placeholder(idx)) - | (&Lifetime::Placeholder(idx), &Lifetime::InferenceVar(var)) => { - let var = EnaVariable::from(var); - let var_ui = self.table.universe_of_unbound_var(var); - if var_ui.can_see(idx.ui) { - debug!( - "unify_lifetime_lifetime: {:?} in {:?} can see {:?}; unifying", - var, var_ui, idx.ui - ); - let v = Lifetime::Placeholder(idx); - self.table - .unify - .unify_var_value(var, InferenceValue::from(v)) - .unwrap(); + ( + &LifetimeData::InferenceVar(a_var), + &LifetimeData::Placeholder(PlaceholderIndex { ui, .. }), + ) => self.unify_lifetime_var(variance, a_var, b, ui), + + ( + &LifetimeData::Placeholder(PlaceholderIndex { ui, .. }), + &LifetimeData::InferenceVar(b_var), + ) => self.unify_lifetime_var(variance.invert(), b_var, a, ui), + + (&LifetimeData::InferenceVar(a_var), &LifetimeData::Erased) + | (&LifetimeData::InferenceVar(a_var), &LifetimeData::Static) + | (&LifetimeData::InferenceVar(a_var), &LifetimeData::Error) => { + self.unify_lifetime_var(variance, a_var, b, UniverseIndex::root()) + } + + (&LifetimeData::Erased, &LifetimeData::InferenceVar(b_var)) + | (&LifetimeData::Static, &LifetimeData::InferenceVar(b_var)) + | (&LifetimeData::Error, &LifetimeData::InferenceVar(b_var)) => { + self.unify_lifetime_var(variance.invert(), b_var, a, UniverseIndex::root()) + } + + (&LifetimeData::Static, &LifetimeData::Static) + | (&LifetimeData::Erased, &LifetimeData::Erased) => Ok(()), + + (&LifetimeData::Static, &LifetimeData::Placeholder(_)) + | (&LifetimeData::Static, &LifetimeData::Erased) + | (&LifetimeData::Placeholder(_), &LifetimeData::Static) + | (&LifetimeData::Placeholder(_), &LifetimeData::Placeholder(_)) + | (&LifetimeData::Placeholder(_), &LifetimeData::Erased) + | (&LifetimeData::Erased, &LifetimeData::Static) + | (&LifetimeData::Erased, &LifetimeData::Placeholder(_)) => { + if a != b { + self.push_lifetime_outlives_goals(variance, a.clone(), b.clone()); Ok(()) } else { - debug!( - "unify_lifetime_lifetime: {:?} in {:?} cannot see {:?}; pushing constraint", - var, var_ui, idx.ui - ); - Ok(self.push_lifetime_eq_constraint(*a, *b)) + Ok(()) } } - (&Lifetime::Placeholder(_), &Lifetime::Placeholder(_)) => if a != b { - Ok(self.push_lifetime_eq_constraint(*a, *b)) - } else { + (LifetimeData::Error, _) | (_, LifetimeData::Error) => Ok(()), + (LifetimeData::BoundVar(_), _) | (_, LifetimeData::BoundVar(_)) => panic!( + "unification encountered bound variable: a={:?} b={:?}", + a, b + ), + + (LifetimeData::Phantom(..), _) | (_, LifetimeData::Phantom(..)) => unreachable!(), + } + } + + #[instrument(level = "debug", skip(self))] + fn unify_lifetime_var( + &mut self, + variance: Variance, + var: InferenceVar, + value: &Lifetime, + value_ui: UniverseIndex, + ) -> Fallible<()> { + let var = EnaVariable::from(var); + let var_ui = self.table.universe_of_unbound_var(var); + if var_ui.can_see(value_ui) && matches!(variance, Variance::Invariant) { + debug!("{:?} in {:?} can see {:?}; unifying", var, var_ui, value_ui); + self.table + .unify + .unify_var_value( + var, + InferenceValue::from_lifetime(self.interner, value.clone()), + ) + .unwrap(); + Ok(()) + } else { + debug!( + "{:?} in {:?} cannot see {:?}; pushing constraint", + var, var_ui, value_ui + ); + self.push_lifetime_outlives_goals( + variance, + var.to_lifetime(self.interner), + value.clone(), + ); + Ok(()) + } + } + + fn relate_const_const<'a>( + &mut self, + variance: Variance, + a: &'a Const, + b: &'a Const, + ) -> Fallible<()> { + let interner = self.interner; + + let n_a = self.table.normalize_const_shallow(interner, a); + let n_b = self.table.normalize_const_shallow(interner, b); + let a = n_a.as_ref().unwrap_or(a); + let b = n_b.as_ref().unwrap_or(b); + + debug_span!("relate_const_const", ?variance, ?a, ?b); + + let ConstData { + ty: a_ty, + value: a_val, + } = a.data(interner); + let ConstData { + ty: b_ty, + value: b_val, + } = b.data(interner); + + self.relate_ty_ty(variance, a_ty, b_ty)?; + + match (a_val, b_val) { + // Unifying two inference variables: unify them in the underlying + // ena table. + (&ConstValue::InferenceVar(var1), &ConstValue::InferenceVar(var2)) => { + debug!(?var1, ?var2, "relate_ty_ty"); + let var1 = EnaVariable::from(var1); + let var2 = EnaVariable::from(var2); + self.table + .unify + .unify_var_var(var1, var2) + .expect("unification of two unbound variables cannot fail"); Ok(()) - }, + } - (Lifetime::BoundVar(_), _) | (_, Lifetime::BoundVar(_)) => { - panic!("unification encountered bound variable: a={:?} b={:?}", a, b) + // Unifying an inference variables with a non-inference variable. + (&ConstValue::InferenceVar(var), &ConstValue::Concrete(_)) + | (&ConstValue::InferenceVar(var), &ConstValue::Placeholder(_)) => { + debug!(?var, ty=?b, "unify_var_ty"); + self.unify_var_const(var, b) } + + (&ConstValue::Concrete(_), &ConstValue::InferenceVar(var)) + | (&ConstValue::Placeholder(_), &ConstValue::InferenceVar(var)) => { + debug!(?var, ty=?a, "unify_var_ty"); + self.unify_var_const(var, a) + } + + (&ConstValue::Placeholder(p1), &ConstValue::Placeholder(p2)) => { + Zip::zip_with(self, variance, &p1, &p2) + } + + (&ConstValue::Concrete(ref ev1), &ConstValue::Concrete(ref ev2)) => { + if ev1.const_eq(a_ty, ev2, interner) { + Ok(()) + } else { + Err(NoSolution) + } + } + + (&ConstValue::Concrete(_), &ConstValue::Placeholder(_)) + | (&ConstValue::Placeholder(_), &ConstValue::Concrete(_)) => Err(NoSolution), + + (ConstValue::BoundVar(_), _) | (_, ConstValue::BoundVar(_)) => panic!( + "unification encountered bound variable: a={:?} b={:?}", + a, b + ), } } - fn push_lifetime_eq_constraint(&mut self, a: Lifetime, b: Lifetime) { - self.constraints.push(InEnvironment::new( - self.environment, - Constraint::LifetimeEq(a, b), - )); + #[instrument(level = "debug", skip(self))] + fn unify_var_const(&mut self, var: InferenceVar, c: &Const) -> Fallible<()> { + let interner = self.interner; + let var = EnaVariable::from(var); + + // Determine the universe index associated with this + // variable. This is basically a count of the number of + // `forall` binders that had been introduced at the point + // this variable was created -- though it may change over time + // as the variable is unified. + let universe_index = self.table.universe_of_unbound_var(var); + + let c1 = c.clone().try_fold_with( + &mut OccursCheck::new(self, var, universe_index), + DebruijnIndex::INNERMOST, + )?; + + debug!("unify_var_const: var {:?} set to {:?}", var, c1); + self.table + .unify + .unify_var_value(var, InferenceValue::from_const(interner, c1)) + .unwrap(); + + Ok(()) + } + + /// Relate `a`, `b` such that if `variance = Covariant`, `a` is a subtype of + /// `b` and thus `a` must outlive `b`. + fn push_lifetime_outlives_goals(&mut self, variance: Variance, a: Lifetime, b: Lifetime) { + debug!( + "pushing lifetime outlives goals for a={:?} b={:?} with variance {:?}", + a, b, variance + ); + if matches!(variance, Variance::Invariant | Variance::Contravariant) { + self.goals.push(InEnvironment::new( + self.environment, + WhereClause::LifetimeOutlives(LifetimeOutlives { + a: a.clone(), + b: b.clone(), + }) + .cast(self.interner), + )); + } + if matches!(variance, Variance::Invariant | Variance::Covariant) { + self.goals.push(InEnvironment::new( + self.environment, + WhereClause::LifetimeOutlives(LifetimeOutlives { a: b, b: a }).cast(self.interner), + )); + } + } + + /// Pushes a goal of `a` being a subtype of `b`. + fn push_subtype_goal(&mut self, a: Ty, b: Ty) { + let subtype_goal = GoalData::SubtypeGoal(SubtypeGoal { a, b }).intern(self.interner()); + self.goals + .push(InEnvironment::new(self.environment, subtype_goal)); } } -impl<'t> Zipper for Unifier<'t> { - fn zip_tys(&mut self, a: &Ty, b: &Ty) -> Fallible<()> { - self.unify_ty_ty(a, b) +impl<'i, I: Interner> Zipper for Unifier<'i, I> { + fn zip_tys(&mut self, variance: Variance, a: &Ty, b: &Ty) -> Fallible<()> { + debug!("zip_tys {:?}, {:?}, {:?}", variance, a, b); + self.relate_ty_ty(variance, a, b) } - fn zip_lifetimes(&mut self, a: &Lifetime, b: &Lifetime) -> Fallible<()> { - self.unify_lifetime_lifetime(a, b) + fn zip_lifetimes( + &mut self, + variance: Variance, + a: &Lifetime, + b: &Lifetime, + ) -> Fallible<()> { + self.relate_lifetime_lifetime(variance, a, b) } - fn zip_binders(&mut self, _: &Binders, _: &Binders) -> Fallible<()> + fn zip_consts(&mut self, variance: Variance, a: &Const, b: &Const) -> Fallible<()> { + self.relate_const_const(variance, a, b) + } + + fn zip_binders(&mut self, variance: Variance, a: &Binders, b: &Binders) -> Fallible<()> where - T: Zip + Fold, + T: Clone + HasInterner + Zip + TypeFoldable, { - panic!("cannot unify things with binders (other than types)") + // The binders that appear in types (apart from quantified types, which are + // handled in `unify_ty`) appear as part of `dyn Trait` and `impl Trait` types. + // + // They come in two varieties: + // + // * The existential binder from `dyn Trait` / `impl Trait` + // (representing the hidden "self" type) + // * The `for<..>` binders from higher-ranked traits. + // + // In both cases we can use the same `relate_binders` routine. + + self.relate_binders(variance, a, b) + } + + fn interner(&self) -> I { + self.interner + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self.db } } -struct OccursCheck<'u, 't: 'u> { - unifier: &'u mut Unifier<'t>, - var: EnaVariable, +struct OccursCheck<'u, 't, I: Interner> { + unifier: &'u mut Unifier<'t, I>, + var: EnaVariable, universe_index: UniverseIndex, } -impl<'u, 't> OccursCheck<'u, 't> { - fn new(unifier: &'u mut Unifier<'t>, var: EnaVariable, universe_index: UniverseIndex) -> Self { +impl<'u, 't, I: Interner> OccursCheck<'u, 't, I> { + fn new( + unifier: &'u mut Unifier<'t, I>, + var: EnaVariable, + universe_index: UniverseIndex, + ) -> Self { OccursCheck { unifier, var, @@ -377,26 +1236,51 @@ impl<'u, 't> OccursCheck<'u, 't> { } } -impl<'u, 't> DefaultTypeFolder for OccursCheck<'u, 't> {} +impl<'i, I: Interner> FallibleTypeFolder for OccursCheck<'_, 'i, I> { + type Error = NoSolution; -impl<'u, 't> PlaceholderFolder for OccursCheck<'u, 't> { - fn fold_free_placeholder_ty( + fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder { + self + } + + fn try_fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Fallible> { + let interner = self.interner(); if self.universe_index < universe.ui { + debug!( + "OccursCheck aborting because self.universe_index ({:?}) < universe.ui ({:?})", + self.universe_index, universe.ui + ); Err(NoSolution) } else { - Ok(universe.to_ty()) // no need to shift, not relative to depth + Ok(universe.to_ty(interner)) // no need to shift, not relative to depth } } - fn fold_free_placeholder_lifetime( + fn try_fold_free_placeholder_const( + &mut self, + ty: Ty, + universe: PlaceholderIndex, + _outer_binder: DebruijnIndex, + ) -> Fallible> { + let interner = self.interner(); + if self.universe_index < universe.ui { + Err(NoSolution) + } else { + Ok(universe.to_const(interner, ty)) // no need to shift, not relative to depth + } + } + + #[instrument(level = "debug", skip(self))] + fn try_fold_free_placeholder_lifetime( &mut self, ui: PlaceholderIndex, - _binders: usize, - ) -> Fallible { + _outer_binder: DebruijnIndex, + ) -> Fallible> { + let interner = self.interner(); if self.universe_index < ui.ui { // Scenario is like: // @@ -412,26 +1296,35 @@ impl<'u, 't> PlaceholderFolder for OccursCheck<'u, 't> { // exists<'x> forall<'b> ?T = Foo<'x>, where 'x = 'b let tick_x = self.unifier.table.new_variable(self.universe_index); - self.unifier - .push_lifetime_eq_constraint(tick_x.to_lifetime(), ui.to_lifetime()); - Ok(tick_x.to_lifetime()) + self.unifier.push_lifetime_outlives_goals( + Variance::Invariant, + tick_x.to_lifetime(interner), + ui.to_lifetime(interner), + ); + Ok(tick_x.to_lifetime(interner)) } else { // If the `ui` is higher than `self.universe_index`, then we can name // this lifetime, no problem. - Ok(ui.to_lifetime()) // no need to shift, not relative to depth + Ok(ui.to_lifetime(interner)) // no need to shift, not relative to depth } } -} -impl<'u, 't> InferenceFolder for OccursCheck<'u, 't> { - fn fold_inference_ty(&mut self, var: InferenceVar, _binders: usize) -> Fallible { + fn try_fold_inference_ty( + &mut self, + var: InferenceVar, + kind: TyVariableKind, + _outer_binder: DebruijnIndex, + ) -> Fallible> { + let interner = self.interner(); let var = EnaVariable::from(var); match self.unifier.table.unify.probe_value(var) { // If this variable already has a value, fold over that value instead. InferenceValue::Bound(normalized_ty) => { - let normalized_ty = normalized_ty.ty().unwrap(); - let normalized_ty = normalized_ty.fold_with(self, 0)?; - assert!(!normalized_ty.needs_shift()); + let normalized_ty = normalized_ty.assert_ty_ref(interner); + let normalized_ty = normalized_ty + .clone() + .try_fold_with(self, DebruijnIndex::INNERMOST)?; + assert!(!normalized_ty.needs_shift(interner)); Ok(normalized_ty) } @@ -440,6 +1333,10 @@ impl<'u, 't> InferenceFolder for OccursCheck<'u, 't> { // become the value of). InferenceValue::Unbound(ui) => { if self.unifier.table.unify.unioned(var, self.var) { + debug!( + "OccursCheck aborting because {:?} unioned with {:?}", + var, self.var, + ); return Err(NoSolution); } @@ -457,14 +1354,65 @@ impl<'u, 't> InferenceFolder for OccursCheck<'u, 't> { .unwrap(); } - Ok(var.to_ty()) + Ok(var.to_ty_with_kind(interner, kind)) } } } - fn fold_inference_lifetime(&mut self, var: InferenceVar, binders: usize) -> Fallible { + fn try_fold_inference_const( + &mut self, + ty: Ty, + var: InferenceVar, + _outer_binder: DebruijnIndex, + ) -> Fallible> { + let interner = self.interner(); + let var = EnaVariable::from(var); + match self.unifier.table.unify.probe_value(var) { + // If this variable already has a value, fold over that value instead. + InferenceValue::Bound(normalized_const) => { + let normalized_const = normalized_const.assert_const_ref(interner); + let normalized_const = normalized_const + .clone() + .try_fold_with(self, DebruijnIndex::INNERMOST)?; + assert!(!normalized_const.needs_shift(interner)); + Ok(normalized_const) + } + + // Otherwise, check the universe of the variable, and also + // check for cycles with `self.var` (which this will soon + // become the value of). + InferenceValue::Unbound(ui) => { + if self.unifier.table.unify.unioned(var, self.var) { + return Err(NoSolution); + } + + if self.universe_index < ui { + // Scenario is like: + // + // forall exists ?C = Foo + // + // where A is in universe 0 and B is in universe 1. + // This is OK, if B is promoted to universe 0. + self.unifier + .table + .unify + .unify_var_value(var, InferenceValue::Unbound(self.universe_index)) + .unwrap(); + } + + Ok(var.to_const(interner, ty)) + } + } + } + + fn try_fold_inference_lifetime( + &mut self, + var: InferenceVar, + outer_binder: DebruijnIndex, + ) -> Fallible> { // a free existentially bound region; find the // inference variable it corresponds to + let interner = self.interner(); let var = EnaVariable::from(var); match self.unifier.table.unify.probe_value(var) { InferenceValue::Unbound(ui) => { @@ -481,21 +1429,23 @@ impl<'u, 't> InferenceFolder for OccursCheck<'u, 't> { .unify_var_value(var, InferenceValue::Unbound(self.universe_index)) .unwrap(); } - Ok(var.to_lifetime()) + Ok(var.to_lifetime(interner)) } InferenceValue::Bound(l) => { - let l = l.lifetime().unwrap(); - let l = l.fold_with(self, binders)?; - assert!(!l.needs_shift()); - Ok(l.clone()) + let l = l.assert_lifetime_ref(interner); + let l = l.clone().try_fold_with(self, outer_binder)?; + assert!(!l.needs_shift(interner)); + Ok(l) } } } -} -impl<'u, 't> DefaultFreeVarFolder for OccursCheck<'u, 't> { - fn forbid() -> bool { + fn forbid_free_vars(&self) -> bool { true } + + fn interner(&self) -> I { + self.unifier.interner + } } diff --git a/chalk-solve/src/infer/var.rs b/chalk-solve/src/infer/var.rs index 676b4f99dcf..3fbf9200201 100644 --- a/chalk-solve/src/infer/var.rs +++ b/chalk-solve/src/infer/var.rs @@ -1,7 +1,10 @@ -use ena::unify::{UnifyKey, UnifyValue}; +use chalk_ir::cast::Cast; +use chalk_ir::interner::Interner; use chalk_ir::*; +use ena::unify::{UnifyKey, UnifyValue}; use std::cmp::min; use std::fmt; +use std::marker::PhantomData; use std::u32; /// Wrapper around `chalk_ir::InferenceVar` for coherence purposes. @@ -27,41 +30,67 @@ use std::u32; /// reject illegal values. Once the value of a variable is known, it /// can never change. /// - The value we actually store for variables is a -/// `ir::Parameter`, and hence it does carry along the kind of the +/// `ir::GenericArg`, and hence it does carry along the kind of the /// variable via the enum variant. However, we should always know /// the kind of the variable from context, and hence we typically /// "downcast" the resulting variable using /// e.g. `value.ty().unwrap()`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -crate struct EnaVariable(InferenceVar); +pub struct EnaVariable { + var: InferenceVar, + phantom: PhantomData, +} + +impl From for EnaVariable { + fn from(var: InferenceVar) -> EnaVariable { + EnaVariable { + var, + phantom: PhantomData, + } + } +} -impl From for EnaVariable { - fn from(var: InferenceVar) -> EnaVariable { - EnaVariable(var) +impl From> for InferenceVar { + fn from(ena_var: EnaVariable) -> InferenceVar { + ena_var.var } } -impl EnaVariable { +impl EnaVariable { /// Convert this inference variable into a type. When using this /// method, naturally you should know from context that the kind /// of this inference variable is a type (we can't check it). - pub fn to_ty(self) -> Ty { - self.0.to_ty() + pub fn to_ty_with_kind(self, interner: I, kind: TyVariableKind) -> Ty { + self.var.to_ty(interner, kind) + } + + /// Same as `to_ty_with_kind`, but the kind is set to `TyVariableKind::General`. + /// This should be used instead of `to_ty_with_kind` when creating a new + /// inference variable (when the kind is not known). + pub fn to_ty(self, interner: I) -> Ty { + self.var.to_ty(interner, TyVariableKind::General) } /// Convert this inference variable into a lifetime. When using this /// method, naturally you should know from context that the kind /// of this inference variable is a lifetime (we can't check it). - pub fn to_lifetime(self) -> Lifetime { - self.0.to_lifetime() + pub fn to_lifetime(self, interner: I) -> Lifetime { + self.var.to_lifetime(interner) + } + + /// Convert this inference variable into a const. When using this + /// method, naturally you should know from context that the kind + /// of this inference variable is a const (we can't check it). + pub fn to_const(self, interner: I, ty: Ty) -> Const { + self.var.to_const(interner, ty) } } -impl UnifyKey for EnaVariable { - type Value = InferenceValue; +impl UnifyKey for EnaVariable { + type Value = InferenceValue; fn index(&self) -> u32 { - self.0.index() + self.var.index() } fn from_index(u: u32) -> Self { @@ -77,36 +106,38 @@ impl UnifyKey for EnaVariable { /// universe index; when the inference variable is assigned a value, it becomes /// bound and records that value. See `EnaVariable` for more details. #[derive(Clone, Debug, PartialEq, Eq)] -crate enum InferenceValue { +pub enum InferenceValue { Unbound(UniverseIndex), - Bound(Parameter), + Bound(GenericArg), } -impl From for InferenceValue { - fn from(ty: Ty) -> Self { - InferenceValue::Bound(ParameterKind::Ty(ty)) +impl InferenceValue { + pub fn from_ty(interner: I, ty: Ty) -> Self { + InferenceValue::Bound(ty.cast(interner)) + } + + pub fn from_lifetime(interner: I, lifetime: Lifetime) -> Self { + InferenceValue::Bound(lifetime.cast(interner)) } -} -impl From for InferenceValue { - fn from(lifetime: Lifetime) -> Self { - InferenceValue::Bound(ParameterKind::Lifetime(lifetime)) + pub fn from_const(interner: I, constant: Const) -> Self { + InferenceValue::Bound(constant.cast(interner)) } } -impl UnifyValue for InferenceValue { - type Error = (InferenceValue, InferenceValue); +impl UnifyValue for InferenceValue { + type Error = (InferenceValue, InferenceValue); fn unify_values( - a: &InferenceValue, - b: &InferenceValue, - ) -> Result { + a: &InferenceValue, + b: &InferenceValue, + ) -> Result, (InferenceValue, InferenceValue)> { match (a, b) { (&InferenceValue::Unbound(ui_a), &InferenceValue::Unbound(ui_b)) => { Ok(InferenceValue::Unbound(min(ui_a, ui_b))) } - (bound @ &InferenceValue::Bound(_), &InferenceValue::Unbound(_)) | - (&InferenceValue::Unbound(_), bound @ &InferenceValue::Bound(_)) => Ok(bound.clone()), + (bound @ &InferenceValue::Bound(_), &InferenceValue::Unbound(_)) + | (&InferenceValue::Unbound(_), bound @ &InferenceValue::Bound(_)) => Ok(bound.clone()), (&InferenceValue::Bound(_), &InferenceValue::Bound(_)) => { panic!("we should not be asked to unify two bound things") } @@ -114,8 +145,8 @@ impl UnifyValue for InferenceValue { } } -impl fmt::Debug for EnaVariable { - fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(fmt, "{:?}", self.0) +impl fmt::Debug for EnaVariable { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(fmt, "{:?}", self.var) } } diff --git a/chalk-solve/src/lib.rs b/chalk-solve/src/lib.rs index 559e252d17a..a02a6f59ca8 100644 --- a/chalk-solve/src/lib.rs +++ b/chalk-solve/src/lib.rs @@ -1,12 +1,228 @@ -#![feature(crate_visibility_modifier)] -#![feature(non_modrs_mods)] +#![deny(rust_2018_idioms)] -#[macro_use] -extern crate chalk_macros; -extern crate chalk_engine; -extern crate chalk_ir; -extern crate ena; +use crate::display::sanitize_debug_name; +use crate::rust_ir::*; +use chalk_ir::interner::Interner; + +use chalk_ir::*; +use std::fmt::Debug; +use std::sync::Arc; +pub mod clauses; +pub mod coherence; +pub mod coinductive_goal; +pub mod display; +pub mod ext; +pub mod goal_builder; pub mod infer; +pub mod logging; +pub mod logging_db; +pub mod rust_ir; pub mod solve; -pub mod ext; +pub mod split; +pub mod wf; + +/// Trait representing access to a database of rust types. +/// +/// # `*_name` methods +/// +/// This trait has a number of `*_name` methods with default implementations. +/// These are used in the implementation for [`LoggingRustIrDatabase`], so that +/// when printing `.chalk` files equivalent to the data used, we can use real +/// names. +/// +/// The default implementations simply fall back to calling [`Interner`] debug +/// methods, and printing `"UnknownN"` (where `N` is the demultiplexing integer) +/// if those methods return `None`. +/// +/// The [`display::sanitize_debug_name`] utility is used in the default +/// implementations, and might be useful when providing custom implementations. +/// +/// [`LoggingRustIrDatabase`]: crate::logging_db::LoggingRustIrDatabase +/// [`display::sanitize_debug_name`]: crate::display::sanitize_debug_name +/// [`Interner`]: Interner +pub trait RustIrDatabase: Debug { + /// Returns any "custom program clauses" that do not derive from + /// Rust IR. Used only in testing the underlying solver. + fn custom_clauses(&self) -> Vec>; + + /// Returns the datum for the associated type with the given id. + fn associated_ty_data(&self, ty: AssocTypeId) -> Arc>; + + /// Returns the datum for the definition with the given id. + fn trait_datum(&self, trait_id: TraitId) -> Arc>; + + /// Returns the datum for the ADT with the given id. + fn adt_datum(&self, adt_id: AdtId) -> Arc>; + + /// Returns the coroutine datum for the coroutine with the given id. + fn coroutine_datum(&self, coroutine_id: CoroutineId) -> Arc>; + + /// Returns the coroutine witness datum for the coroutine with the given id. + fn coroutine_witness_datum( + &self, + coroutine_id: CoroutineId, + ) -> Arc>; + + /// Returns the representation for the ADT definition with the given id. + fn adt_repr(&self, id: AdtId) -> Arc>; + + /// Returns the siza and alignment of the ADT definition with the given id. + fn adt_size_align(&self, id: AdtId) -> Arc; + + /// Returns the datum for the fn definition with the given id. + fn fn_def_datum(&self, fn_def_id: FnDefId) -> Arc>; + + /// Returns the datum for the impl with the given id. + fn impl_datum(&self, impl_id: ImplId) -> Arc>; + + fn associated_ty_from_impl( + &self, + impl_id: ImplId, + assoc_type_id: AssocTypeId, + ) -> Option>; + + /// Returns the `AssociatedTyValue` with the given id. + fn associated_ty_value(&self, id: AssociatedTyValueId) -> Arc>; + + /// Returns the `OpaqueTyDatum` with the given id. + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc>; + + /// Returns the "hidden type" corresponding with the opaque type. + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty; + + /// Returns a list of potentially relevant impls for a given + /// trait-id; we also supply the type parameters that we are + /// trying to match (if known: these parameters may contain + /// inference variables, for example). The implementor is + /// permitted to return any superset of the applicable impls; + /// chalk will narrow down the list to only those that truly + /// apply. The parameters are provided as a "hint" to help the + /// implementor do less work, but can be completely ignored if + /// desired. + /// + /// The `binders` are for the `parameters`; if the recursive solver is used, + /// the parameters can contain bound variables referring to these binders. + fn impls_for_trait( + &self, + trait_id: TraitId, + parameters: &[GenericArg], + binders: &CanonicalVarKinds, + ) -> Vec>; + + /// Returns the impls that require coherence checking. This is not the + /// full set of impls that exist: + /// + /// - It can exclude impls not defined in the current crate. + /// - It can exclude "built-in" impls, like those for closures; only the + /// impls actually written by users need to be checked. + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec>; + + /// Returns true if there is an explicit impl of the auto trait + /// `auto_trait_id` for the type `ty`. This is part of + /// the auto trait handling -- if there is no explicit impl given + /// by the user for `ty`, then we provide default impls + /// (otherwise, we rely on the impls the user gave). + fn impl_provided_for(&self, auto_trait_id: TraitId, ty: &TyKind) -> bool; + + /// Returns id of a trait lang item, if found + fn well_known_trait_id(&self, well_known_trait: WellKnownTrait) -> Option>; + + /// Returns id of a associated type, if found. + fn well_known_assoc_type_id(&self, assoc_type: WellKnownAssocType) -> Option>; + + /// Calculates program clauses from an env. This is intended to call the + /// `program_clauses_for_env` function and then possibly cache the clauses. + fn program_clauses_for_env(&self, environment: &Environment) -> ProgramClauses; + + fn interner(&self) -> I; + + /// Check if a trait is object safe + fn is_object_safe(&self, trait_id: TraitId) -> bool; + + /// Gets the `ClosureKind` for a given closure and substitution. + fn closure_kind(&self, closure_id: ClosureId, substs: &Substitution) -> ClosureKind; + + /// Gets the inputs and output for a given closure id and substitution. We + /// pass both the `ClosureId` and it's `Substituion` to give implementors + /// the freedom to store associated data in the substitution (like rustc) or + /// separately (like chalk-integration). + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders>; + + /// Gets the upvars as a `Ty` for a given closure id and substitution. There + /// are no restrictions on the type of upvars. + fn closure_upvars(&self, closure_id: ClosureId, substs: &Substitution) -> Binders>; + + /// Gets the substitution for the closure when used as a function. + /// For example, for the following (not-quite-)rust code: + /// ```ignore + /// let foo = |a: &mut u32| { a += 1; }; + /// let c: &'a u32 = &0; + /// foo(c); + /// ``` + /// + /// This would return a `Substitution` of `[&'a]`. This could either be + /// substituted into the inputs and output, or into the upvars. + fn closure_fn_substitution( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution; + + fn unification_database(&self) -> &dyn UnificationDatabase; + + /// Retrieves a trait's original name. No uniqueness guarantees, but must + /// a valid Rust identifier. + fn trait_name(&self, trait_id: TraitId) -> String { + sanitize_debug_name(|f| I::debug_trait_id(trait_id, f)) + } + + /// Retrieves a struct's original name. No uniqueness guarantees, but must + /// a valid Rust identifier. + fn adt_name(&self, adt_id: AdtId) -> String { + sanitize_debug_name(|f| I::debug_adt_id(adt_id, f)) + } + + /// Retrieves the name of an associated type. No uniqueness guarantees, but must + /// a valid Rust identifier. + fn assoc_type_name(&self, assoc_ty_id: AssocTypeId) -> String { + sanitize_debug_name(|f| I::debug_assoc_type_id(assoc_ty_id, f)) + } + + /// Retrieves the name of an opaque type. No uniqueness guarantees, but must + /// a valid Rust identifier. + fn opaque_type_name(&self, opaque_ty_id: OpaqueTyId) -> String { + sanitize_debug_name(|f| I::debug_opaque_ty_id(opaque_ty_id, f)) + } + + /// Retrieves the name of a function definition. No uniqueness guarantees, but must + /// a valid Rust identifier. + fn fn_def_name(&self, fn_def_id: FnDefId) -> String { + sanitize_debug_name(|f| I::debug_fn_def_id(fn_def_id, f)) + } + + // Retrieves the discriminant type for a type (mirror of rustc `Ty::discriminant_ty`) + fn discriminant_type(&self, ty: Ty) -> Ty; +} + +pub use clauses::program_clauses_for_env; + +pub use solve::Guidance; +pub use solve::Solution; +pub use solve::Solver; +pub use solve::SubstitutionResult; + +#[macro_use] +mod debug_macros { + #[macro_export] + macro_rules! debug_span { + ($($t: tt)*) => { + let __span = tracing::debug_span!($($t)*); + let __span = __span.enter(); + }; + } +} diff --git a/chalk-solve/src/logging.rs b/chalk-solve/src/logging.rs new file mode 100644 index 00000000000..a5be4d4d468 --- /dev/null +++ b/chalk-solve/src/logging.rs @@ -0,0 +1,19 @@ +/// Run an action with a tracing log subscriber. The logging level is loaded +/// from `CHALK_DEBUG`. +#[cfg(feature = "tracing-full")] +pub fn with_tracing_logs(action: impl FnOnce() -> T) -> T { + use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry}; + use tracing_tree::HierarchicalLayer; + let filter = EnvFilter::from_env("CHALK_DEBUG"); + let subscriber = Registry::default() + .with(filter) + .with(HierarchicalLayer::new(2).with_writer(std::io::stdout)); + tracing::subscriber::with_default(subscriber, action) +} + +/// Run an action with a tracing log subscriber. The logging level is loaded +/// from `CHALK_DEBUG`. +#[cfg(not(feature = "tracing-full"))] +pub fn with_tracing_logs(action: impl FnOnce() -> T) -> T { + action() +} diff --git a/chalk-solve/src/logging_db.rs b/chalk-solve/src/logging_db.rs new file mode 100644 index 00000000000..b37b2bd4e03 --- /dev/null +++ b/chalk-solve/src/logging_db.rs @@ -0,0 +1,621 @@ +//! Provides wrappers over `RustIrDatabase` which record used definitions and write +//! `.chalk` files containing those definitions. +use std::{ + borrow::Borrow, + fmt::{self, Debug, Display}, + io::Write, + marker::PhantomData, + sync::Arc, + sync::Mutex, +}; + +use crate::rust_ir::*; +use crate::{ + display::{self, WriterState}, + RustIrDatabase, +}; +use chalk_ir::{interner::Interner, *}; + +use indexmap::IndexSet; + +mod id_collector; + +/// Wraps another `RustIrDatabase` (`DB`) and records which definitions are +/// used. +/// +/// A full .chalk file containing all used definitions can be recovered through +/// `LoggingRustIrDatabase`'s `Display` implementation. +/// +/// Uses a separate type, `P`, for the database stored inside to account for +/// `Arc` or wrapping other storage mediums. +#[derive(Debug)] +pub struct LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + ws: WriterState, + def_ids: Mutex>>, + _phantom: PhantomData, +} + +impl LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + pub fn new(db: P) -> Self { + LoggingRustIrDatabase { + ws: WriterState::new(db), + def_ids: Default::default(), + _phantom: PhantomData, + } + } +} + +impl Display for LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let def_ids = self.def_ids.lock().unwrap(); + let stub_ids = id_collector::collect_unrecorded_ids(self.ws.db(), &def_ids); + display::write_stub_items(f, &self.ws, stub_ids)?; + display::write_items(f, &self.ws, def_ids.iter().copied()) + } +} + +impl LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow, + I: Interner, +{ + fn record(&self, id: impl Into>) { + self.def_ids.lock().unwrap().insert(id.into()); + } + + fn record_all(&self, ids: T) + where + T: IntoIterator, + U: Into>, + { + self.def_ids + .lock() + .unwrap() + .extend(ids.into_iter().map(Into::into)); + } +} + +impl UnificationDatabase for LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow + Debug, + I: Interner, +{ + fn fn_def_variance(&self, fn_def_id: chalk_ir::FnDefId) -> Variances { + self.ws + .db() + .unification_database() + .fn_def_variance(fn_def_id) + } + + fn adt_variance(&self, adt_id: chalk_ir::AdtId) -> Variances { + self.ws.db().unification_database().adt_variance(adt_id) + } +} + +impl RustIrDatabase for LoggingRustIrDatabase +where + DB: RustIrDatabase, + P: Borrow + Debug, + I: Interner, +{ + fn custom_clauses(&self) -> Vec> { + self.ws.db().custom_clauses() + } + + fn associated_ty_data( + &self, + ty: chalk_ir::AssocTypeId, + ) -> Arc> { + let ty_datum = self.ws.db().associated_ty_data(ty); + self.record(ty_datum.trait_id); + ty_datum + } + + fn trait_datum(&self, trait_id: TraitId) -> Arc> { + self.record(trait_id); + self.ws.db().trait_datum(trait_id) + } + + fn adt_datum(&self, adt_id: AdtId) -> Arc> { + self.record(adt_id); + self.ws.db().adt_datum(adt_id) + } + + fn coroutine_datum(&self, coroutine_id: CoroutineId) -> Arc> { + self.record(coroutine_id); + self.ws.db().borrow().coroutine_datum(coroutine_id) + } + + fn coroutine_witness_datum( + &self, + coroutine_id: CoroutineId, + ) -> Arc> { + self.record(coroutine_id); + self.ws.db().borrow().coroutine_witness_datum(coroutine_id) + } + + fn adt_repr(&self, id: AdtId) -> Arc> { + self.record(id); + self.ws.db().adt_repr(id) + } + + fn adt_size_align(&self, id: chalk_ir::AdtId) -> Arc { + self.record(id); + self.ws.db().adt_size_align(id) + } + + fn impl_datum(&self, impl_id: ImplId) -> Arc> { + self.record(impl_id); + self.ws.db().impl_datum(impl_id) + } + + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty { + self.record(id); + self.ws.db().hidden_opaque_type(id) + } + + fn associated_ty_from_impl( + &self, + impl_id: ImplId, + assoc_type_id: AssocTypeId, + ) -> Option> { + self.ws.db().associated_ty_from_impl(impl_id, assoc_type_id) + } + + fn associated_ty_value( + &self, + id: crate::rust_ir::AssociatedTyValueId, + ) -> Arc> { + let value = self.ws.db().associated_ty_value(id); + self.record(value.impl_id); + value + } + + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc> { + self.record(id); + self.ws.db().opaque_ty_data(id) + } + + fn impls_for_trait( + &self, + trait_id: TraitId, + parameters: &[chalk_ir::GenericArg], + binders: &CanonicalVarKinds, + ) -> Vec> { + self.record(trait_id); + let impl_ids = self.ws.db().impls_for_trait(trait_id, parameters, binders); + self.record_all(impl_ids.iter().copied()); + impl_ids + } + + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec> { + self.record(trait_id); + self.ws.db().local_impls_to_coherence_check(trait_id) + } + + fn impl_provided_for(&self, auto_trait_id: TraitId, ty: &TyKind) -> bool { + self.record(auto_trait_id); + if let TyKind::Adt(adt_id, _) = ty { + self.record(*adt_id); + } + self.ws.db().impl_provided_for(auto_trait_id, ty) + } + + fn well_known_trait_id( + &self, + well_known_trait: crate::rust_ir::WellKnownTrait, + ) -> Option> { + let trait_id = self.ws.db().well_known_trait_id(well_known_trait); + if let Some(id) = trait_id { + self.record(id); + } + trait_id + } + + fn well_known_assoc_type_id(&self, assoc_type: WellKnownAssocType) -> Option> { + let assoc_type_id = self.ws.db().well_known_assoc_type_id(assoc_type); + if let Some(id) = assoc_type_id { + self.record(self.ws.db().associated_ty_data(id).trait_id); + } + assoc_type_id + } + + fn program_clauses_for_env( + &self, + environment: &chalk_ir::Environment, + ) -> chalk_ir::ProgramClauses { + self.ws.db().program_clauses_for_env(environment) + } + + fn interner(&self) -> I { + self.ws.db().interner() + } + + fn trait_name(&self, trait_id: TraitId) -> String { + self.ws.db().trait_name(trait_id) + } + + fn adt_name(&self, adt_id: AdtId) -> String { + self.ws.db().adt_name(adt_id) + } + + fn assoc_type_name(&self, assoc_ty_id: AssocTypeId) -> String { + self.ws.db().assoc_type_name(assoc_ty_id) + } + + fn opaque_type_name(&self, opaque_ty_id: OpaqueTyId) -> String { + self.ws.db().opaque_type_name(opaque_ty_id) + } + + fn is_object_safe(&self, trait_id: TraitId) -> bool { + self.record(trait_id); + self.ws.db().is_object_safe(trait_id) + } + + fn fn_def_datum(&self, fn_def_id: chalk_ir::FnDefId) -> Arc> { + self.record(fn_def_id); + self.ws.db().fn_def_datum(fn_def_id) + } + + fn fn_def_name(&self, fn_def_id: FnDefId) -> String { + self.ws.db().fn_def_name(fn_def_id) + } + + fn closure_kind(&self, closure_id: ClosureId, substs: &Substitution) -> ClosureKind { + // TODO: record closure IDs + self.ws.db().closure_kind(closure_id, substs) + } + + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + // TODO: record closure IDs + self.ws.db().closure_inputs_and_output(closure_id, substs) + } + + fn closure_upvars(&self, closure_id: ClosureId, substs: &Substitution) -> Binders> { + // TODO: record closure IDs + self.ws.db().closure_upvars(closure_id, substs) + } + + fn closure_fn_substitution( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution { + // TODO: record closure IDs + self.ws.db().closure_fn_substitution(closure_id, substs) + } + + fn discriminant_type(&self, ty: Ty) -> Ty { + self.ws.db().discriminant_type(ty) + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } +} + +/// Wraps a [`RustIrDatabase`], and, when dropped, writes out all used +/// definition to the given file. +/// +/// Uses [`LoggingRustIrDatabase`] internally. +/// +/// Uses a separate type, `P`, for the database stored inside to account for +/// `Arc` or wrapping other storage mediums. +pub struct WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow, +{ + db: LoggingRustIrDatabase, + write: W, +} + +impl fmt::Debug for WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow + fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WriteOnDropRustIrDatabase") + .field("db", &self.db) + .field("write", &"") + .finish() + } +} + +impl WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow, +{ + pub fn new(db: P, write: W) -> Self { + WriteOnDropRustIrDatabase { + db: LoggingRustIrDatabase::new(db), + write, + } + } + + pub fn from_logging_db(db: LoggingRustIrDatabase, write: W) -> Self { + WriteOnDropRustIrDatabase { db, write } + } +} + +impl Drop for WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow, +{ + fn drop(&mut self) { + write!(self.write, "{}", self.db) + .and_then(|_| self.write.flush()) + .expect("expected to be able to write rust ir database"); + } +} + +impl UnificationDatabase for WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow + Debug, +{ + fn fn_def_variance(&self, fn_def_id: chalk_ir::FnDefId) -> Variances { + self.db + .borrow() + .unification_database() + .fn_def_variance(fn_def_id) + } + + fn adt_variance(&self, adt_id: chalk_ir::AdtId) -> Variances { + self.db.borrow().unification_database().adt_variance(adt_id) + } +} + +impl RustIrDatabase for WriteOnDropRustIrDatabase +where + I: Interner, + W: Write, + DB: RustIrDatabase, + P: Borrow + Debug, +{ + fn custom_clauses(&self) -> Vec> { + self.db.custom_clauses() + } + + fn associated_ty_data( + &self, + ty: chalk_ir::AssocTypeId, + ) -> Arc> { + self.db.associated_ty_data(ty) + } + + fn trait_datum(&self, trait_id: TraitId) -> Arc> { + self.db.trait_datum(trait_id) + } + + fn adt_datum(&self, adt_id: AdtId) -> Arc> { + self.db.adt_datum(adt_id) + } + + fn coroutine_datum(&self, coroutine_id: CoroutineId) -> Arc> { + self.db.borrow().coroutine_datum(coroutine_id) + } + + /// Returns the coroutine witness datum for the coroutine with the given id. + fn coroutine_witness_datum( + &self, + coroutine_id: CoroutineId, + ) -> Arc> { + self.db.borrow().coroutine_witness_datum(coroutine_id) + } + + fn adt_repr(&self, id: AdtId) -> Arc> { + self.db.adt_repr(id) + } + + fn adt_size_align(&self, id: chalk_ir::AdtId) -> Arc { + self.db.adt_size_align(id) + } + + fn impl_datum(&self, impl_id: ImplId) -> Arc> { + self.db.impl_datum(impl_id) + } + + fn associated_ty_from_impl( + &self, + impl_id: ImplId, + assoc_type_id: AssocTypeId, + ) -> Option> { + self.db.associated_ty_from_impl(impl_id, assoc_type_id) + } + + fn associated_ty_value( + &self, + id: crate::rust_ir::AssociatedTyValueId, + ) -> Arc> { + self.db.associated_ty_value(id) + } + + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc> { + self.db.opaque_ty_data(id) + } + + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty { + self.db.hidden_opaque_type(id) + } + + fn impls_for_trait( + &self, + trait_id: TraitId, + parameters: &[chalk_ir::GenericArg], + binders: &CanonicalVarKinds, + ) -> Vec> { + self.db.impls_for_trait(trait_id, parameters, binders) + } + + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec> { + self.db.local_impls_to_coherence_check(trait_id) + } + + fn impl_provided_for(&self, auto_trait_id: TraitId, ty: &TyKind) -> bool { + self.db.impl_provided_for(auto_trait_id, ty) + } + + fn well_known_trait_id( + &self, + well_known_trait: crate::rust_ir::WellKnownTrait, + ) -> Option> { + self.db.well_known_trait_id(well_known_trait) + } + + fn well_known_assoc_type_id(&self, assoc_type: WellKnownAssocType) -> Option> { + self.db.well_known_assoc_type_id(assoc_type) + } + + fn program_clauses_for_env( + &self, + environment: &chalk_ir::Environment, + ) -> chalk_ir::ProgramClauses { + self.db.program_clauses_for_env(environment) + } + + fn interner(&self) -> I { + self.db.interner() + } + + fn is_object_safe(&self, trait_id: TraitId) -> bool { + self.db.is_object_safe(trait_id) + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } + + fn trait_name(&self, trait_id: TraitId) -> String { + self.db.trait_name(trait_id) + } + + fn adt_name(&self, adt_id: AdtId) -> String { + self.db.adt_name(adt_id) + } + + fn assoc_type_name(&self, assoc_ty_id: AssocTypeId) -> String { + self.db.assoc_type_name(assoc_ty_id) + } + + fn opaque_type_name(&self, opaque_ty_id: OpaqueTyId) -> String { + self.db.opaque_type_name(opaque_ty_id) + } + + fn fn_def_datum(&self, fn_def_id: chalk_ir::FnDefId) -> Arc> { + self.db.fn_def_datum(fn_def_id) + } + + fn fn_def_name(&self, fn_def_id: FnDefId) -> String { + self.db.fn_def_name(fn_def_id) + } + + fn closure_kind(&self, closure_id: ClosureId, substs: &Substitution) -> ClosureKind { + // TODO: record closure IDs + self.db.closure_kind(closure_id, substs) + } + + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + self.db.closure_inputs_and_output(closure_id, substs) + } + + fn closure_upvars(&self, closure_id: ClosureId, substs: &Substitution) -> Binders> { + self.db.closure_upvars(closure_id, substs) + } + + fn closure_fn_substitution( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution { + self.db.closure_fn_substitution(closure_id, substs) + } + + fn discriminant_type(&self, ty: Ty) -> Ty { + self.db.discriminant_type(ty) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub enum RecordedItemId { + Adt(AdtId), + Trait(TraitId), + Impl(ImplId), + OpaqueTy(OpaqueTyId), + FnDef(FnDefId), + Coroutine(CoroutineId), +} + +impl From> for RecordedItemId { + fn from(v: AdtId) -> Self { + RecordedItemId::Adt(v) + } +} + +impl From> for RecordedItemId { + fn from(v: TraitId) -> Self { + RecordedItemId::Trait(v) + } +} + +impl From> for RecordedItemId { + fn from(v: ImplId) -> Self { + RecordedItemId::Impl(v) + } +} + +impl From> for RecordedItemId { + fn from(v: OpaqueTyId) -> Self { + RecordedItemId::OpaqueTy(v) + } +} + +impl From> for RecordedItemId { + fn from(v: FnDefId) -> Self { + RecordedItemId::FnDef(v) + } +} + +impl From> for RecordedItemId { + fn from(v: CoroutineId) -> Self { + RecordedItemId::Coroutine(v) + } +} diff --git a/chalk-solve/src/logging_db/id_collector.rs b/chalk-solve/src/logging_db/id_collector.rs new file mode 100644 index 00000000000..4b97bac26b8 --- /dev/null +++ b/chalk-solve/src/logging_db/id_collector.rs @@ -0,0 +1,162 @@ +use super::RecordedItemId; +use crate::RustIrDatabase; +use chalk_ir::{ + interner::Interner, + visit::TypeVisitor, + visit::{TypeSuperVisitable, TypeVisitable}, + AliasTy, DebruijnIndex, TyKind, WhereClause, +}; +use std::ops::ControlFlow; + +use indexmap::IndexSet; + +/// Collects the identifiers needed to resolve all the names for a given +/// set of identifers, excluding identifiers we already have. +/// +/// When recording identifiers to print, the `LoggingRustIrDatabase` only +/// records identifiers the solver uses. But the solver assumes well-formedness, +/// and thus skips over many names referenced in the definitions. +/// +/// For instance, if we have: +/// +/// ```rust,ignore +/// struct S {} +/// +/// trait Parent {} +/// trait Child where Self: Parent {} +/// impl Parent for S {} +/// impl Child for S {} +/// ``` +/// +/// And our goal is `S: Child`, we will only render `S`, `impl Child for S`, and +/// `trait Child`. This will not parse because the `Child` trait's definition +/// references parent. IdCollector solves this by collecting all of the directly +/// related identifiers, allowing those to be rendered as well, ensuring name +/// resolution is successful. +pub fn collect_unrecorded_ids>( + db: &DB, + identifiers: &'_ IndexSet>, +) -> IndexSet> { + let mut collector = IdCollector { + db, + found_identifiers: IndexSet::new(), + }; + for id in identifiers { + match *id { + RecordedItemId::Adt(adt_id) => { + collector + .db + .adt_datum(adt_id) + .visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + RecordedItemId::FnDef(fn_def) => { + collector + .db + .fn_def_datum(fn_def) + .visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + RecordedItemId::Coroutine(_coroutine_id) => unimplemented!(), + RecordedItemId::Trait(trait_id) => { + let trait_datum = collector.db.trait_datum(trait_id); + + trait_datum.visit_with(&mut collector, DebruijnIndex::INNERMOST); + for assoc_ty_id in &trait_datum.associated_ty_ids { + let assoc_ty_datum = collector.db.associated_ty_data(*assoc_ty_id); + assoc_ty_datum + .bounds_on_self(collector.db.interner()) + .visit_with(&mut collector, DebruijnIndex::INNERMOST); + assoc_ty_datum.visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + } + RecordedItemId::OpaqueTy(opaque_id) => { + collector + .db + .opaque_ty_data(opaque_id) + .visit_with(&mut collector, DebruijnIndex::INNERMOST); + collector + .db + .hidden_opaque_type(opaque_id) + .visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + RecordedItemId::Impl(impl_id) => { + let impl_datum = collector.db.impl_datum(impl_id); + for id in &impl_datum.associated_ty_value_ids { + let assoc_ty_value = collector.db.associated_ty_value(*id); + assoc_ty_value.visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + impl_datum.visit_with(&mut collector, DebruijnIndex::INNERMOST); + } + } + } + collector + .found_identifiers + .difference(identifiers) + .copied() + .collect() +} + +struct IdCollector<'i, I: Interner, DB: RustIrDatabase> { + db: &'i DB, + found_identifiers: IndexSet>, +} + +impl<'i, I: Interner, DB: RustIrDatabase> IdCollector<'i, I, DB> { + fn record(&mut self, id: impl Into>) { + self.found_identifiers.insert(id.into()); + } + + fn visit_alias(&mut self, alias: &AliasTy) { + match alias { + AliasTy::Projection(projection_ty) => { + let assoc_ty_datum = self.db.associated_ty_data(projection_ty.associated_ty_id); + self.record(assoc_ty_datum.trait_id) + } + AliasTy::Opaque(opaque_ty) => self.record(opaque_ty.opaque_ty_id), + } + } +} + +impl<'i, I: Interner, DB: RustIrDatabase> TypeVisitor for IdCollector<'i, I, DB> { + type BreakTy = (); + + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + fn interner(&self) -> I { + self.db.interner() + } + + fn visit_ty( + &mut self, + ty: &chalk_ir::Ty, + outer_binder: chalk_ir::DebruijnIndex, + ) -> ControlFlow<()> { + match ty.kind(self.db.interner()) { + TyKind::Adt(adt, _) => self.record(*adt), + TyKind::FnDef(fn_def, _) => self.record(*fn_def), + TyKind::OpaqueType(opaque, _) => self.record(*opaque), + TyKind::Alias(alias) => self.visit_alias(alias), + TyKind::BoundVar(..) => (), + TyKind::Dyn(..) => (), + TyKind::Function(..) => (), + TyKind::InferenceVar(..) => (), + TyKind::Placeholder(..) => (), + _ => {} + } + ty.super_visit_with(self, outer_binder) + } + + fn visit_where_clause( + &mut self, + where_clause: &WhereClause, + outer_binder: DebruijnIndex, + ) -> ControlFlow<()> { + match where_clause { + WhereClause::Implemented(trait_ref) => self.record(trait_ref.trait_id), + WhereClause::AliasEq(alias_eq) => self.visit_alias(&alias_eq.alias), + WhereClause::LifetimeOutlives(_lifetime_outlives) => (), + WhereClause::TypeOutlives(_type_outlives) => (), + } + where_clause.super_visit_with(self.as_dyn(), outer_binder) + } +} diff --git a/chalk-solve/src/rust_ir.rs b/chalk-solve/src/rust_ir.rs new file mode 100644 index 00000000000..643a0124551 --- /dev/null +++ b/chalk-solve/src/rust_ir.rs @@ -0,0 +1,767 @@ +//! Contains the definition for the "Rust IR" -- this is basically a "lowered" +//! version of the AST, roughly corresponding to [the HIR] in the Rust +//! compiler. + +use chalk_derive::{HasInterner, TypeFoldable, TypeVisitable}; +use chalk_ir::cast::Cast; +use chalk_ir::fold::shift::Shift; +use chalk_ir::interner::Interner; +use chalk_ir::{ + try_break, visit::TypeVisitable, AdtId, AliasEq, AliasTy, AssocTypeId, Binders, DebruijnIndex, + FnDefId, GenericArg, ImplId, OpaqueTyId, ProjectionTy, QuantifiedWhereClause, Substitution, + ToGenericArg, TraitId, TraitRef, Ty, TyKind, VariableKind, WhereClause, WithKind, +}; +use std::iter; +use std::ops::ControlFlow; + +/// Identifier for an "associated type value" found in some impl. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AssociatedTyValueId(pub I::DefId); + +chalk_ir::id_visit!(AssociatedTyValueId); +chalk_ir::id_fold!(AssociatedTyValueId); + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeVisitable)] +pub struct ImplDatum { + pub polarity: Polarity, + pub binders: Binders>, + pub impl_type: ImplType, + pub associated_ty_value_ids: Vec>, +} + +impl ImplDatum { + pub fn is_positive(&self) -> bool { + self.polarity.is_positive() + } + + pub fn trait_id(&self) -> TraitId { + self.binders.skip_binders().trait_ref.trait_id + } + + pub fn self_type_adt_id(&self, interner: I) -> Option> { + match self + .binders + .skip_binders() + .trait_ref + .self_type_parameter(interner) + .kind(interner) + { + TyKind::Adt(id, _) => Some(*id), + _ => None, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, HasInterner, TypeFoldable, TypeVisitable)] +pub struct ImplDatumBound { + pub trait_ref: TraitRef, + pub where_clauses: Vec>, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum ImplType { + Local, + External, +} + +chalk_ir::const_visit!(ImplType); + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct DefaultImplDatum { + pub binders: Binders>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, HasInterner)] +pub struct DefaultImplDatumBound { + pub trait_ref: TraitRef, + pub accessible_tys: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeVisitable)] +pub struct AdtDatum { + pub binders: Binders>, + pub id: AdtId, + pub flags: AdtFlags, + pub kind: AdtKind, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub enum AdtKind { + Struct, + Enum, + Union, +} + +chalk_ir::const_visit!(AdtKind); + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner, TypeVisitable)] +pub struct AdtDatumBound { + pub variants: Vec>, + pub where_clauses: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner, TypeVisitable)] +pub struct AdtVariantDatum { + pub fields: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct AdtFlags { + pub upstream: bool, + pub fundamental: bool, + pub phantom_data: bool, +} + +chalk_ir::const_visit!(AdtFlags); + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct AdtRepr { + pub c: bool, + pub packed: bool, + pub int: Option>, +} + +/// Information about the size and alignment of an ADT. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct AdtSizeAlign { + one_zst: bool, +} + +impl AdtSizeAlign { + pub fn from_one_zst(one_zst: bool) -> AdtSizeAlign { + AdtSizeAlign { one_zst } + } + + pub fn one_zst(&self) -> bool { + self.one_zst + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +/// A rust intermediate representation (rust_ir) of a function definition/declaration. +/// For example, in the following rust code: +/// +/// ```ignore +/// fn foo() -> i32 where T: Eq; +/// ``` +/// +/// This would represent the declaration of `foo`. +/// +/// Note this is distinct from a function pointer, which points to +/// a function with a given type signature, whereas this represents +/// a specific function definition. +pub struct FnDefDatum { + pub id: FnDefId, + pub sig: chalk_ir::FnSig, + pub binders: Binders>, +} + +/// Avoids visiting `I::FnAbi` +impl TypeVisitable for FnDefDatum { + fn visit_with( + &self, + visitor: &mut dyn chalk_ir::visit::TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + try_break!(self.id.visit_with(visitor, outer_binder)); + self.binders.visit_with(visitor, outer_binder) + } +} + +/// Represents the inputs and outputs on a `FnDefDatum`. This is split +/// from the where clauses, since these can contain bound lifetimes. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner, TypeVisitable)] +pub struct FnDefInputsAndOutputDatum { + /// Types of the function's arguments + /// ```ignore + /// fn foo(bar: i32, baz: T); + /// ^^^ ^ + /// ``` + /// + pub argument_types: Vec>, + /// Return type of the function + /// ```ignore + /// fn foo() -> i32; + /// ^^^ + /// ``` + pub return_type: Ty, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner, TypeVisitable)] +/// Represents the bounds on a `FnDefDatum`, including +/// the function definition's type signature and where clauses. +pub struct FnDefDatumBound { + /// Inputs and outputs defined on a function + /// These are needed for late-bound regions in rustc. For example the + /// lifetime `'a` in + /// ```ignore + /// fn foo<'a, T>(&'a T); + /// ^^ + /// ``` + /// Rustc doesn't pass in late-bound the regions in substs, but the inputs + /// and outputs may use them. `where_clauses` don't need an extra set of + /// `Binders`, since any lifetimes found in where clauses are not late-bound. + /// + /// For more information, see [this rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/early-late-bound.html). + pub inputs_and_output: Binders>, + + /// Where clauses defined on the function + /// ```ignore + /// fn foo() where T: Eq; + /// ^^^^^^^^^^^ + /// ``` + pub where_clauses: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +/// A rust intermediate representation (rust_ir) of a Trait Definition. For +/// example, given the following rust code: +/// +/// ``` +/// use std::fmt::Debug; +/// +/// trait Foo +/// where +/// T: Debug, +/// { +/// type Bar; +/// } +/// ``` +/// +/// This would represent the `trait Foo` declaration. Note that the details of +/// the trait members (e.g., the associated type declaration (`type Bar`) are +/// not contained in this type, and are represented separately (e.g., in +/// [`AssociatedTyDatum`]). +/// +/// Not to be confused with the rust_ir for a Trait Implementation, which is +/// represented by [`ImplDatum`] +/// +/// [`ImplDatum`]: struct.ImplDatum.html +/// [`AssociatedTyDatum`]: struct.AssociatedTyDatum.html +#[derive(TypeVisitable)] +pub struct TraitDatum { + pub id: TraitId, + + pub binders: Binders>, + + /// "Flags" indicate special kinds of traits, like auto traits. + /// In Rust syntax these are represented in different ways, but in + /// chalk we add annotations like `#[auto]`. + pub flags: TraitFlags, + + pub associated_ty_ids: Vec>, + + /// If this is a well-known trait, which one? If `None`, this is a regular, + /// user-defined trait. + pub well_known: Option, +} + +/// A list of the traits that are "well known" to chalk, which means that +/// the chalk-solve crate has special, hard-coded impls for them. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub enum WellKnownTrait { + Sized, + Copy, + Clone, + Drop, + /// The trait `FnOnce` - the generic argument `Args` is always a tuple + /// corresponding to the arguments of a function implementing this trait. + /// E.g. `fn(u8, bool): FnOnce<(u8, bool)>` + FnOnce, + FnMut, + Fn, + AsyncFnOnce, + AsyncFnMut, + AsyncFn, + Unsize, + Unpin, + CoerceUnsized, + DiscriminantKind, + Coroutine, + DispatchFromDyn, + Tuple, + Pointee, + FnPtr, + Future, +} + +chalk_ir::const_visit!(WellKnownTrait); + +/// A list of the associated types that are "well known" to chalk, which means that +/// the chalk-solve crate has special, hard-coded impls for them. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub enum WellKnownAssocType { + AsyncFnOnceOutput, +} + +chalk_ir::const_visit!(WellKnownAssocType); + +impl TraitDatum { + pub fn is_auto_trait(&self) -> bool { + self.flags.auto + } + + pub fn is_non_enumerable_trait(&self) -> bool { + self.flags.non_enumerable + } + + pub fn is_coinductive_trait(&self) -> bool { + self.flags.coinductive + } + + /// Gives access to the where clauses of the trait, quantified over the type parameters of the trait: + /// + /// ```ignore + /// trait Foo where T: Debug { } + /// ^^^^^^^^^^^^^^ + /// ``` + pub fn where_clauses(&self) -> Binders<&Vec>> { + self.binders.as_ref().map(|td| &td.where_clauses) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, HasInterner, TypeVisitable)] +pub struct TraitDatumBound { + /// Where clauses defined on the trait: + /// + /// ```ignore + /// trait Foo where T: Debug { } + /// ^^^^^^^^^^^^^^ + /// ``` + pub where_clauses: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TraitFlags { + /// An "auto trait" is one that is "automatically implemented" for every + /// struct, so long as no explicit impl is given. + /// + /// Examples are `Send` and `Sync`. + pub auto: bool, + + pub marker: bool, + + /// Indicate that a trait is defined upstream (in a dependency), used during + /// coherence checking. + pub upstream: bool, + + /// A fundamental trait is a trait where adding an impl for an existing type + /// is considered a breaking change. Examples of fundamental traits are the + /// closure traits like `Fn` and `FnMut`. + /// + /// As of this writing (2020-03-27), fundamental traits are declared by the + /// unstable `#[fundamental]` attribute in rustc, and hence cannot appear + /// outside of the standard library. + pub fundamental: bool, + + /// Indicates that chalk cannot list all of the implementations of the given + /// trait, likely because it is a publicly exported trait in a library. + /// + /// Currently (2020-03-27) rustc and rust-analyzer mark all traits as + /// non_enumerable, and in the future it may become the only option. + pub non_enumerable: bool, + + pub coinductive: bool, +} + +chalk_ir::const_visit!(TraitFlags); + +/// An inline bound, e.g. `: Foo` in `impl> SomeType`. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub enum InlineBound { + TraitBound(TraitBound), + AliasEqBound(AliasEqBound), +} + +#[allow(type_alias_bounds)] +pub type QuantifiedInlineBound = Binders>; + +pub trait IntoWhereClauses { + type Output; + + fn into_where_clauses(&self, interner: I, self_ty: Ty) -> Vec; +} + +impl IntoWhereClauses for InlineBound { + type Output = WhereClause; + + /// Applies the `InlineBound` to `self_ty` and lowers to a + /// [`chalk_ir::DomainGoal`]. + /// + /// Because an `InlineBound` does not know anything about what it's binding, + /// you must provide that type as `self_ty`. + fn into_where_clauses(&self, interner: I, self_ty: Ty) -> Vec> { + match self { + InlineBound::TraitBound(b) => b.into_where_clauses(interner, self_ty), + InlineBound::AliasEqBound(b) => b.into_where_clauses(interner, self_ty), + } + } +} + +impl IntoWhereClauses for QuantifiedInlineBound { + type Output = QuantifiedWhereClause; + + fn into_where_clauses(&self, interner: I, self_ty: Ty) -> Vec> { + let self_ty = self_ty.shifted_in(interner); + self.map_ref(|b| b.into_where_clauses(interner, self_ty)) + .into_iter() + .collect() + } +} + +/// Represents a trait bound on e.g. a type or type parameter. +/// Does not know anything about what it's binding. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +pub struct TraitBound { + pub trait_id: TraitId, + pub args_no_self: Vec>, +} + +impl TraitBound { + fn into_where_clauses(&self, interner: I, self_ty: Ty) -> Vec> { + let trait_ref = self.as_trait_ref(interner, self_ty); + vec![WhereClause::Implemented(trait_ref)] + } + + pub fn as_trait_ref(&self, interner: I, self_ty: Ty) -> TraitRef { + TraitRef { + trait_id: self.trait_id, + substitution: Substitution::from_iter( + interner, + iter::once(self_ty.cast(interner)).chain(self.args_no_self.iter().cloned()), + ), + } + } +} + +/// Represents an alias equality bound on e.g. a type or type parameter. +/// Does not know anything about what it's binding. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +pub struct AliasEqBound { + pub trait_bound: TraitBound, + pub associated_ty_id: AssocTypeId, + /// Does not include trait parameters. + pub parameters: Vec>, + pub value: Ty, +} + +impl AliasEqBound { + fn into_where_clauses(&self, interner: I, self_ty: Ty) -> Vec> { + let trait_ref = self.trait_bound.as_trait_ref(interner, self_ty); + + let substitution = Substitution::from_iter( + interner, + trait_ref + .substitution + .iter(interner) + .cloned() + .chain(self.parameters.iter().cloned()), + ); + + vec![ + WhereClause::Implemented(trait_ref), + WhereClause::AliasEq(AliasEq { + alias: AliasTy::Projection(ProjectionTy { + associated_ty_id: self.associated_ty_id, + substitution, + }), + ty: self.value.clone(), + }), + ] + } +} + +pub trait Anonymize { + /// Utility function that converts from a list of generic arguments + /// which *have* associated data (`WithKind`) to a list of + /// "anonymous" generic parameters that just preserves their + /// kinds (`VariableKind`). Often convenient in lowering. + fn anonymize(&self) -> Vec>; +} + +impl Anonymize for [WithKind] { + fn anonymize(&self) -> Vec> { + self.iter().map(|pk| pk.kind.clone()).collect() + } +} + +/// Represents an associated type declaration found inside of a trait: +/// +/// ```notrust +/// trait Foo { // P0 is Self +/// type Bar: [bounds] +/// where +/// [where_clauses]; +/// } +/// ``` +/// +/// The meaning of each of these parts: +/// +/// * The *parameters* `P0...Pm` are all in scope for this associated type. +/// * The *bounds* `bounds` are things that the impl must prove to be true. +/// * The *where clauses* `where_clauses` are things that the impl can *assume* to be true +/// (but which projectors must prove). +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct AssociatedTyDatum { + /// The trait this associated type is defined in. + pub trait_id: TraitId, + + /// The ID of this associated type + pub id: AssocTypeId, + + /// Name of this associated type. + pub name: I::Identifier, + + /// These binders represent the `P0...Pm` variables. The binders + /// are in the order `[Pn..Pm; P0..Pn]`. That is, the variables + /// from `Bar` come first (corresponding to the de bruijn concept + /// that "inner" binders are lower indices, although within a + /// given binder we do not have an ordering). + pub binders: Binders>, +} + +// Manual implementation to avoid I::Identifier type. +impl TypeVisitable for AssociatedTyDatum { + fn visit_with( + &self, + visitor: &mut dyn chalk_ir::visit::TypeVisitor, + outer_binder: DebruijnIndex, + ) -> ControlFlow { + try_break!(self.trait_id.visit_with(visitor, outer_binder)); + try_break!(self.id.visit_with(visitor, outer_binder)); + self.binders.visit_with(visitor, outer_binder) + } +} + +/// Encodes the parts of `AssociatedTyDatum` where the parameters +/// `P0..Pm` are in scope (`bounds` and `where_clauses`). +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct AssociatedTyDatumBound { + /// Bounds on the associated type itself. + /// + /// These must be proven by the implementer, for all possible parameters that + /// would result in a well-formed projection. + pub bounds: Vec>, + + /// Where clauses that must hold for the projection to be well-formed. + pub where_clauses: Vec>, +} + +impl AssociatedTyDatum { + /// Returns the associated ty's bounds applied to the projection type, e.g.: + /// + /// ```notrust + /// Implemented(::Item: Sized) + /// ``` + /// + /// these quantified where clauses are in the scope of the + /// `binders` field. + pub fn bounds_on_self(&self, interner: I) -> Vec> { + let (binders, assoc_ty_datum) = self.binders.as_ref().into(); + // Create a list `P0...Pn` of references to the binders in + // scope for this associated type: + let substitution = Substitution::from_iter( + interner, + binders + .iter(interner) + .enumerate() + .map(|p| p.to_generic_arg(interner)), + ); + + // The self type will be `>::Item` etc + let self_ty = TyKind::Alias(AliasTy::Projection(ProjectionTy { + associated_ty_id: self.id, + substitution, + })) + .intern(interner); + + // Now use that as the self type for the bounds, transforming + // something like `type Bar: Debug` into + // + // ``` + // >::Item: Debug + // ``` + assoc_ty_datum + .bounds + .iter() + .flat_map(|b| b.into_where_clauses(interner, self_ty.clone())) + .collect() + } +} + +/// Represents the *value* of an associated type that is assigned +/// from within some impl. +/// +/// ```ignore +/// impl Iterator for Foo { +/// type Item = XXX; // <-- represents this line! +/// } +/// ``` +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +pub struct AssociatedTyValue { + /// Impl in which this associated type value is found. You might + /// need to look at this to find the generic parameters defined on + /// the impl, for example. + /// + /// ```ignore + /// impl Iterator for Foo { // <-- refers to this impl + /// type Item = XXX; // <-- (where this is self) + /// } + /// ``` + pub impl_id: ImplId, + + /// Associated type being defined. + /// + /// ```ignore + /// impl Iterator for Foo { + /// type Item = XXX; // <-- (where this is self) + /// } + /// ... + /// trait Iterator { + /// type Item; // <-- refers to this declaration here! + /// } + /// ``` + pub associated_ty_id: AssocTypeId, + + /// Additional binders declared on the associated type itself, + /// beyond those from the impl. This would be empty for normal + /// associated types, but non-empty for generic associated types. + /// + /// ```ignore + /// impl Iterable for Vec { + /// type Iter<'a> = vec::Iter<'a, T>; + /// // ^^^^ refers to these generics here + /// } + /// ``` + pub value: Binders>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable, HasInterner)] +pub struct AssociatedTyValueBound { + /// Type that we normalize to. The X in `type Foo<'a> = X`. + pub ty: Ty, +} + +/// Represents the bounds for an `impl Trait` type. +/// +/// ```ignore +/// opaque type T: A + B = HiddenTy; +/// ``` +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] +pub struct OpaqueTyDatum { + /// The placeholder `!T` that corresponds to the opaque type `T`. + pub opaque_ty_id: OpaqueTyId, + + /// The type bound to when revealed. + pub bound: Binders>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner, TypeVisitable)] +pub struct OpaqueTyDatumBound { + /// Trait bounds for the opaque type. These are bounds that the hidden type must meet. + pub bounds: Binders>>, + /// Where clauses that inform well-formedness conditions for the opaque type. + /// These are conditions on the generic parameters of the opaque type which must be true + /// for a reference to the opaque type to be well-formed. + pub where_clauses: Binders>>, +} + +// The movability of a coroutine: whether a coroutine contains self-references, +// causing it to be !Unpin +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum Movability { + Static, + Movable, +} +chalk_ir::copy_fold!(Movability); + +/// Represents a coroutine type. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner)] +pub struct CoroutineDatum { + // Can the coroutine be moved (is Unpin or not) + pub movability: Movability, + /// All of the nested types for this coroutine. The `Binder` + /// represents the types and lifetimes that this coroutine is generic over - + /// this behaves in the same way as `AdtDatum.binders` + pub input_output: Binders>, +} + +/// The nested types for a coroutine. This always appears inside a `CoroutineDatum` +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner)] +pub struct CoroutineInputOutputDatum { + /// The coroutine resume type - a value of this type + /// is supplied by the caller when resuming the coroutine. + /// Currently, this plays no rule in goal resolution. + pub resume_type: Ty, + /// The coroutine yield type - a value of this type + /// is supplied by the coroutine during a yield. + /// Currently, this plays no role in goal resolution. + pub yield_type: Ty, + /// The coroutine return type - a value of this type + /// is supplied by the coroutine when it returns. + /// Currently, this plays no role in goal resolution + pub return_type: Ty, + /// The upvars stored by the coroutine. These represent + /// types captured from the coroutine's environment, + /// and are stored across all yields. These types (along with the witness types) + /// are considered 'constituent types' for the purposes of determining auto trait + /// implementations - that its, a coroutine impls an auto trait A + /// iff all of its constituent types implement A. + pub upvars: Vec>, +} + +/// The coroutine witness data. Each `CoroutineId` has both a `CoroutineDatum` +/// and a `CoroutineWitnessDatum` - these represent two distinct types in Rust. +/// `CoroutineWitnessDatum` is logically 'inside' a coroutine - this only +/// matters when we treat the witness type as a 'constituent type for the +/// purposes of determining auto trait implementations. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner)] +pub struct CoroutineWitnessDatum { + /// This binder is identical to the `input_output` binder in `CoroutineWitness` - + /// it binds the types and lifetimes that the coroutine is generic over. + /// There is an additional binder inside `CoroutineWitnessExistential`, which + /// is treated specially. + pub inner_types: Binders>, +} + +/// The coroutine witness types, together with existentially bound lifetimes. +/// Each 'witness type' represents a type stored inside the coroutine across +/// a yield. When a coroutine type is constructed, the precise region relationships +/// found in the coroutine body are erased. As a result, we are left with existential +/// lifetimes - each type is parameterized over *some* lifetimes, but we do not +/// know their precise values. +/// +/// Unlike the binder in `CoroutineWitnessDatum`, this `Binder` never gets substituted +/// via an `Ty`. Instead, we handle this `Binders` specially when determining +/// auto trait impls. See `push_auto_trait_impls_coroutine_witness` for more details. +#[derive(Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, HasInterner)] +pub struct CoroutineWitnessExistential { + pub types: Binders>>, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub enum Polarity { + Positive, + Negative, +} + +chalk_ir::const_visit!(Polarity); + +impl Polarity { + pub fn is_positive(&self) -> bool { + match *self { + Polarity::Positive => true, + Polarity::Negative => false, + } + } +} + +/// Indicates the "most permissive" Fn-like trait that the closure implements. +/// If the closure kind for a closure is FnMut, for example, then the closure +/// implements FnMut and FnOnce. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub enum ClosureKind { + Fn, + FnMut, + FnOnce, +} diff --git a/chalk-solve/src/solve.rs b/chalk-solve/src/solve.rs index 3beb84a44d9..0734fc53ea3 100644 --- a/chalk-solve/src/solve.rs +++ b/chalk-solve/src/solve.rs @@ -1,130 +1,350 @@ -use chalk_engine::context::Context; -use chalk_engine::context::ContextOps; -use chalk_engine::fallible::*; -use chalk_engine::forest::Forest; -use crate::solve::slg::implementation::SlgContext; +use crate::RustIrDatabase; +use chalk_derive::HasInterner; +use chalk_ir::interner::Interner; use chalk_ir::*; use std::fmt; -use std::sync::Arc; +use tracing::debug; -pub mod slg; -mod truncate; +pub mod truncate; -#[derive(Clone, Debug, PartialEq, Eq)] -/// A (possible) solution for a proposed goal. Usually packaged in a `Result`, -/// where `Err` represents definite *failure* to prove a goal. -pub enum Solution { +/// A (possible) solution for a proposed goal. +#[derive(Clone, Debug, PartialEq, Eq, HasInterner)] +pub enum Solution { /// The goal indeed holds, and there is a unique value for all existential /// variables. In this case, we also record a set of lifetime constraints /// which must also hold for the goal to be valid. - Unique(Canonical), + Unique(Canonical>), /// The goal may be provable in multiple ways, but regardless we may have some guidance /// for type inference. In this case, we don't return any lifetime /// constraints, since we have not "committed" to any particular solution /// yet. - Ambig(Guidance), + Ambig(Guidance), } -#[derive(Clone, Debug, PartialEq, Eq)] /// When a goal holds ambiguously (e.g., because there are multiple possible /// solutions), we issue a set of *guidance* back to type inference. -pub enum Guidance { +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Guidance { /// The existential variables *must* have the given values if the goal is /// ever to hold, but that alone isn't enough to guarantee the goal will /// actually hold. - Definite(Canonical), + Definite(Canonical>), /// There are multiple plausible values for the existentials, but the ones /// here are suggested as the preferred choice heuristically. These should /// be used for inference fallback only. - Suggested(Canonical), + Suggested(Canonical>), /// There's no useful information to feed back to type inference Unknown, } -impl Solution { - pub fn is_unique(&self) -> bool { - match *self { - Solution::Unique(..) => true, - _ => false, +impl Solution { + /// There are multiple candidate solutions, which may or may not agree on + /// the values for existential variables; attempt to combine them. This + /// operation does not depend on the order of its arguments. + /// + /// This actually isn't as precise as it could be, in two ways: + /// + /// a. It might be that while there are multiple distinct candidates, they + /// all agree about *some things*. To be maximally precise, we would + /// compute the intersection of what they agree on. It's not clear though + /// that this is actually what we want Rust's inference to do, and it's + /// certainly not what it does today. + /// + /// b. There might also be an ambiguous candidate and a successful candidate, + /// both with the same refined-goal. In that case, we could probably claim + /// success, since if the conditions of the ambiguous candidate were met, + /// we know the success would apply. Example: `?0: Clone` yields ambiguous + /// candidate `Option: Clone` and successful candidate `Option: + /// Clone`. + /// + /// But you get the idea. + pub fn combine(self, other: Solution, interner: I) -> Solution { + use self::Guidance::*; + + if self == other { + return self; } + + // Special case hack: if one solution is "true" without any constraints, + // that is always the combined result. + // + // This is not as general as it could be: ideally, if we had one solution + // that is Unique with a simpler substitution than the other one, or region constraints + // which are a subset, we'd combine them. + if self.is_trivial_and_always_true(interner) { + return self; + } + if other.is_trivial_and_always_true(interner) { + return other; + } + + debug!( + "combine {} with {}", + self.display(interner), + other.display(interner) + ); + + // Otherwise, always downgrade to Ambig: + + let guidance = match (self.into_guidance(), other.into_guidance()) { + (Definite(ref subst1), Definite(ref subst2)) if subst1 == subst2 => { + Definite(subst1.clone()) + } + (Suggested(ref subst1), Suggested(ref subst2)) if subst1 == subst2 => { + Suggested(subst1.clone()) + } + _ => Unknown, + }; + Solution::Ambig(guidance) } -} -impl fmt::Display for Solution { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + pub fn is_trivial_and_always_true(&self, interner: I) -> bool { match self { - Solution::Unique(constrained) => write!(f, "Unique; {}", constrained,), - Solution::Ambig(Guidance::Definite(subst)) => { - write!(f, "Ambiguous; definite substitution {}", subst) + Solution::Unique(constrained_subst) => { + constrained_subst.value.subst.is_identity_subst(interner) + && constrained_subst.value.constraints.is_empty(interner) } - Solution::Ambig(Guidance::Suggested(subst)) => { - write!(f, "Ambiguous; suggested substitution {}", subst) + Solution::Ambig(_) => false, + } + } + + /// View this solution purely in terms of type inference guidance + pub fn into_guidance(self) -> Guidance { + match self { + Solution::Unique(constrained) => Guidance::Definite(Canonical { + value: constrained.value.subst, + binders: constrained.binders, + }), + Solution::Ambig(guidance) => guidance, + } + } + + /// Extract a constrained substitution from this solution, even if ambiguous. + pub fn constrained_subst(&self, interner: I) -> Option>> { + match *self { + Solution::Unique(ref constrained) => Some(constrained.clone()), + Solution::Ambig(Guidance::Definite(ref canonical)) + | Solution::Ambig(Guidance::Suggested(ref canonical)) => { + let value = ConstrainedSubst { + subst: canonical.value.clone(), + constraints: Constraints::empty(interner), + }; + Some(Canonical { + value, + binders: canonical.binders.clone(), + }) } - Solution::Ambig(Guidance::Unknown) => write!(f, "Ambiguous; no inference guidance"), + Solution::Ambig(_) => None, } } -} -#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] -pub enum SolverChoice { - /// Run the SLG solver, producing a Solution. - SLG { max_size: usize }, -} + /// Determine whether this solution contains type information that *must* + /// hold, and returns the subst in that case. + pub fn definite_subst(&self, interner: I) -> Option>> { + match self { + Solution::Unique(constrained) => Some(constrained.clone()), + Solution::Ambig(Guidance::Definite(canonical)) => { + let value = ConstrainedSubst { + subst: canonical.value.clone(), + constraints: Constraints::empty(interner), + }; + Some(Canonical { + value, + binders: canonical.binders.clone(), + }) + } + _ => None, + } + } -impl SolverChoice { - /// Attempts to solve the given root goal, which must be in - /// canonical form. The solution is searching for unique answers - /// to any free existential variables in this goal. - /// - /// # Returns - /// - /// - `Ok(None)` is the goal cannot be proven. - /// - `Ok(Some(solution))` if we succeeded in finding *some* answers, - /// although `solution` may reflect ambiguity and unknowns. - /// - `Err` if there was an internal error solving the goal, which does not - /// reflect success nor failure. - pub fn solve_root_goal( - self, - env: &Arc, - canonical_goal: &UCanonical>, - ) -> Fallible> { - Ok(self.create_solver(env).solve(canonical_goal)) + pub fn is_unique(&self) -> bool { + matches!(*self, Solution::Unique(..)) } - /// Returns the default SLG parameters. - fn slg() -> Self { - SolverChoice::SLG { max_size: 10 } + pub fn is_ambig(&self) -> bool { + matches!(*self, Solution::Ambig(_)) } - pub fn create_solver(self, env: &Arc) -> Box { - match self { - SolverChoice::SLG { max_size } => Box::new(Forest::new(SlgContext::new(env, max_size))), + pub fn display(&self, interner: I) -> SolutionDisplay<'_, I> { + SolutionDisplay { + solution: self, + interner, } } } -impl Default for SolverChoice { - fn default() -> Self { - SolverChoice::slg() +pub struct SolutionDisplay<'a, I: Interner> { + solution: &'a Solution, + interner: I, +} + +impl<'a, I: Interner> fmt::Display for SolutionDisplay<'a, I> { + #[rustfmt::skip] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + let SolutionDisplay { solution, interner } = self; + match solution { + // If a `Unique` solution has no associated data, omit the trailing semicolon. + // This makes blessed test output nicer to read. + Solution::Unique(Canonical { binders, value: ConstrainedSubst { subst, constraints } } ) + if interner.constraints_data(constraints.interned()).is_empty() + && interner.substitution_data(subst.interned()).is_empty() + && interner.canonical_var_kinds_data(binders.interned()).is_empty() + => write!(f, "Unique"), + + Solution::Unique(constrained) => write!(f, "Unique; {}", constrained.display(*interner)), + + Solution::Ambig(Guidance::Definite(subst)) => write!( + f, + "Ambiguous; definite substitution {}", + subst.display(*interner) + ), + Solution::Ambig(Guidance::Suggested(subst)) => write!( + f, + "Ambiguous; suggested substitution {}", + subst.display(*interner) + ), + Solution::Ambig(Guidance::Unknown) => write!(f, "Ambiguous; no inference guidance"), + } + } +} + +#[derive(Debug)] +pub enum SubstitutionResult { + Definite(S), + Ambiguous(S), + Floundered, +} + +impl SubstitutionResult { + pub fn as_ref(&self) -> SubstitutionResult<&S> { + match self { + SubstitutionResult::Definite(subst) => SubstitutionResult::Definite(subst), + SubstitutionResult::Ambiguous(subst) => SubstitutionResult::Ambiguous(subst), + SubstitutionResult::Floundered => SubstitutionResult::Floundered, + } + } + pub fn map U>(self, f: F) -> SubstitutionResult { + match self { + SubstitutionResult::Definite(subst) => SubstitutionResult::Definite(f(subst)), + SubstitutionResult::Ambiguous(subst) => SubstitutionResult::Ambiguous(f(subst)), + SubstitutionResult::Floundered => SubstitutionResult::Floundered, + } } } -pub trait Solver { - /// Solves a given goal, producing the solution. This will do only - /// as much work towards `goal` as it has to (and that works is - /// cached for future attempts). - fn solve(&mut self, goal: &UCanonical>) -> Option; +impl fmt::Display for SubstitutionResult { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SubstitutionResult::Definite(subst) => write!(fmt, "{}", subst), + SubstitutionResult::Ambiguous(subst) => write!(fmt, "Ambiguous({})", subst), + SubstitutionResult::Floundered => write!(fmt, "Floundered"), + } + } } -impl Solver for Forest +/// Finds the solution to "goals", or trait queries -- i.e., figures +/// out what sets of types implement which traits. Also, between +/// queries, this struct stores the cached state from previous solver +/// attempts, which can then be re-used later. +pub trait Solver where - C: Context>, Solution = Solution>, - CO: ContextOps, + Self: fmt::Debug, { - fn solve(&mut self, goal: &UCanonical>) -> Option { - self.solve(goal) + /// Attempts to solve the given goal, which must be in canonical + /// form. Returns a unique solution (if one exists). This will do + /// only as much work towards `goal` as it has to (and that work + /// is cached for future attempts). + /// + /// # Parameters + /// + /// - `program` -- defines the program clauses in scope. + /// - **Important:** You must supply the same set of program clauses + /// each time you invoke `solve`, as otherwise the cached data may be + /// invalid. + /// - `goal` the goal to solve + /// + /// # Returns + /// + /// - `None` is the goal cannot be proven. + /// - `Some(solution)` if we succeeded in finding *some* answers, + /// although `solution` may reflect ambiguity and unknowns. + fn solve( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + ) -> Option>; + + /// Attempts to solve the given goal, which must be in canonical + /// form. Returns a unique solution (if one exists). This will do + /// only as much work towards `goal` as it has to (and that work + /// is cached for future attempts). In addition, the solving of the + /// goal can be limited by returning `false` from `should_continue`. + /// + /// # Parameters + /// + /// - `program` -- defines the program clauses in scope. + /// - **Important:** You must supply the same set of program clauses + /// each time you invoke `solve`, as otherwise the cached data may be + /// invalid. + /// - `goal` the goal to solve + /// - `should_continue` if `false` is returned, the no further solving + /// will be done. A `Guidance(Suggested(...))` will be returned a + /// `Solution`, using any answers that were generated up to that point. + /// + /// # Returns + /// + /// - `None` is the goal cannot be proven. + /// - `Some(solution)` if we succeeded in finding *some* answers, + /// although `solution` may reflect ambiguity and unknowns. + fn solve_limited( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + should_continue: &dyn std::ops::Fn() -> bool, + ) -> Option>; + + /// Attempts to solve the given goal, which must be in canonical + /// form. Provides multiple solutions to function `f`. This will do + /// only as much work towards `goal` as it has to (and that work + /// is cached for future attempts). + /// + /// # Parameters + /// + /// - `program` -- defines the program clauses in scope. + /// - **Important:** You must supply the same set of program clauses + /// each time you invoke `solve`, as otherwise the cached data may be + /// invalid. + /// - `goal` the goal to solve + /// - `f` -- function to proceed solution. New solutions will be generated + /// while function returns `true`. + /// - first argument is solution found + /// - second argument is the next solution present + /// - returns true if next solution should be handled + /// + /// # Returns + /// + /// - `true` all solutions were processed with the function. + /// - `false` the function returned `false` and solutions were interrupted. + fn solve_multiple( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + f: &mut dyn FnMut(SubstitutionResult>>, bool) -> bool, + ) -> bool; + + /// A convenience method for when one doesn't need the actual solution, + /// only whether or not one exists. + fn has_unique_solution( + &mut self, + program: &dyn RustIrDatabase, + goal: &UCanonical>>, + ) -> bool { + match self.solve(program, goal) { + Some(sol) => sol.is_unique(), + None => false, + } } } diff --git a/chalk-solve/src/solve/slg.rs b/chalk-solve/src/solve/slg.rs deleted file mode 100644 index f5652a4a2aa..00000000000 --- a/chalk-solve/src/solve/slg.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod implementation; diff --git a/chalk-solve/src/solve/slg/implementation.rs b/chalk-solve/src/solve/slg/implementation.rs deleted file mode 100644 index d222de688c1..00000000000 --- a/chalk-solve/src/solve/slg/implementation.rs +++ /dev/null @@ -1,475 +0,0 @@ -use chalk_engine::fallible::Fallible; -use chalk_ir::cast::{Cast, Caster}; -use chalk_ir::could_match::CouldMatch; -use chalk_ir::*; -use crate::infer::ucanonicalize::{UCanonicalized, UniverseMap}; -use crate::infer::unify::UnificationResult; -use crate::infer::InferenceTable; -use crate::solve::truncate::{self, Truncated}; -use crate::solve::Solution; - -use chalk_engine::context; -use chalk_engine::hh::HhGoal; -use chalk_engine::{DelayedLiteral, ExClause, Literal}; - -use std::fmt::Debug; -use std::sync::Arc; - -mod aggregate; -mod resolvent; - -#[derive(Clone, Debug)] -pub struct SlgContext { - program: Arc, - max_size: usize, -} - -pub(super) struct TruncatingInferenceTable { - program: Arc, - max_size: usize, - infer: InferenceTable, -} - -impl SlgContext { - pub fn new(program: &Arc, max_size: usize) -> SlgContext { - SlgContext { - program: program.clone(), - max_size, - } - } -} - -impl context::Context for SlgContext { - type CanonicalGoalInEnvironment = Canonical>; - type CanonicalExClause = Canonical>; - type UCanonicalGoalInEnvironment = UCanonical>; - type UniverseMap = UniverseMap; - type InferenceNormalizedSubst = Substitution; - type Solution = Solution; - type Environment = Arc; - type DomainGoal = DomainGoal; - type Goal = Goal; - type BindersGoal = Binders>; - type Parameter = Parameter; - type ProgramClause = ProgramClause; - type ProgramClauses = Vec; - type UnificationResult = UnificationResult; - type CanonicalConstrainedSubst = Canonical; - type GoalInEnvironment = InEnvironment; - type Substitution = Substitution; - type RegionConstraint = InEnvironment; - type Variance = (); - - fn goal_in_environment(environment: &Arc, goal: Goal) -> InEnvironment { - InEnvironment::new(environment, goal) - } -} - -impl context::ContextOps for SlgContext { - fn is_coinductive(&self, goal: &UCanonical>) -> bool { - goal.is_coinductive(&self.program) - } - - fn instantiate_ucanonical_goal( - &self, - arg: &UCanonical>, - op: impl context::WithInstantiatedUCanonicalGoal, - ) -> R { - let (infer, subst, InEnvironment { environment, goal }) = - InferenceTable::from_canonical(arg.universes, &arg.canonical); - let dyn_infer = &mut TruncatingInferenceTable::new(&self.program, self.max_size, infer); - op.with(dyn_infer, subst, environment, goal) - } - - fn instantiate_ex_clause( - &self, - num_universes: usize, - canonical_ex_clause: &Canonical>, - op: impl context::WithInstantiatedExClause, - ) -> R { - let (infer, _subst, ex_cluse) = - InferenceTable::from_canonical(num_universes, canonical_ex_clause); - let dyn_infer = &mut TruncatingInferenceTable::new(&self.program, self.max_size, infer); - op.with(dyn_infer, ex_cluse) - } - - fn inference_normalized_subst_from_ex_clause( - canon_ex_clause: &Canonical>, - ) -> &Substitution { - &canon_ex_clause.value.subst - } - - fn empty_constraints(ccs: &Canonical) -> bool { - ccs.value.constraints.is_empty() - } - - fn inference_normalized_subst_from_subst(ccs: &Canonical) -> &Substitution { - &ccs.value.subst - } - - fn canonical(u_canon: &UCanonical>) -> &Canonical> { - &u_canon.canonical - } - - fn is_trivial_substitution( - u_canon: &UCanonical>, - canonical_subst: &Canonical, - ) -> bool { - u_canon.is_trivial_substitution(canonical_subst) - } - - fn num_universes(u_canon: &UCanonical>) -> usize { - u_canon.universes - } - - fn map_goal_from_canonical( - map: &UniverseMap, - value: &Canonical>, - ) -> Canonical> { - map.map_from_canonical(value) - } - - fn map_subst_from_canonical( - map: &UniverseMap, - value: &Canonical, - ) -> Canonical { - map.map_from_canonical(value) - } -} - -impl TruncatingInferenceTable { - fn new(program: &Arc, max_size: usize, infer: InferenceTable) -> Self { - Self { - program: program.clone(), - max_size, - infer, - } - } -} - -impl context::TruncateOps for TruncatingInferenceTable { - fn truncate_goal(&mut self, subgoal: &InEnvironment) -> Option> { - let Truncated { overflow, value } = - truncate::truncate(&mut self.infer, self.max_size, subgoal); - if overflow { - Some(value) - } else { - None - } - } - - fn truncate_answer(&mut self, subst: &Substitution) -> Option { - let Truncated { overflow, value } = - truncate::truncate(&mut self.infer, self.max_size, subst); - if overflow { - Some(value) - } else { - None - } - } -} - -impl context::InferenceTable for TruncatingInferenceTable { - fn into_hh_goal(&mut self, goal: Goal) -> HhGoal { - match goal { - Goal::Quantified(QuantifierKind::ForAll, binders_goal) => HhGoal::ForAll(binders_goal), - Goal::Quantified(QuantifierKind::Exists, binders_goal) => HhGoal::Exists(binders_goal), - Goal::Implies(dg, subgoal) => HhGoal::Implies(dg, *subgoal), - Goal::And(g1, g2) => HhGoal::And(*g1, *g2), - Goal::Not(g1) => HhGoal::Not(*g1), - Goal::Leaf(LeafGoal::EqGoal(EqGoal { a, b })) => HhGoal::Unify((), a, b), - Goal::Leaf(LeafGoal::DomainGoal(domain_goal)) => HhGoal::DomainGoal(domain_goal), - Goal::CannotProve(()) => HhGoal::CannotProve, - } - } - - // Used by: simplify - fn add_clauses( - &mut self, - env: &Arc, - clauses: Vec, - ) -> Arc { - Environment::add_clauses(env, clauses) - } - - fn into_goal(&self, domain_goal: DomainGoal) -> Goal { - domain_goal.cast() - } - - fn cannot_prove(&self) -> Goal { - Goal::CannotProve(()) - } -} - -impl context::UnificationOps for TruncatingInferenceTable { - fn program_clauses( - &self, - environment: &Arc, - goal: &DomainGoal, - ) -> Vec { - let environment_clauses = environment - .clauses - .iter() - .filter(|&env_clause| env_clause.could_match(goal)) - .cloned(); - - let program_clauses = self - .program - .program_clauses - .iter() - .filter(|&clause| clause.could_match(goal)) - .cloned(); - - environment_clauses.chain(program_clauses).collect() - } - - fn instantiate_binders_universally(&mut self, arg: &Binders>) -> Goal { - *self.infer.instantiate_binders_universally(arg) - } - - fn instantiate_binders_existentially(&mut self, arg: &Binders>) -> Goal { - *self.infer.instantiate_binders_existentially(arg) - } - - fn debug_ex_clause<'v>(&mut self, value: &'v ExClause) -> Box { - Box::new(self.infer.normalize_deep(value)) - } - - fn canonicalize_goal(&mut self, value: &InEnvironment) -> Canonical> { - self.infer.canonicalize(value).quantified - } - - fn canonicalize_ex_clause( - &mut self, - value: &ExClause, - ) -> Canonical> { - self.infer.canonicalize(value).quantified - } - - fn canonicalize_constrained_subst( - &mut self, - subst: Substitution, - constraints: Vec>, - ) -> Canonical { - self.infer - .canonicalize(&ConstrainedSubst { subst, constraints }) - .quantified - } - - fn u_canonicalize_goal( - &mut self, - value: &Canonical>, - ) -> ( - UCanonical>, - crate::infer::ucanonicalize::UniverseMap, - ) { - let UCanonicalized { - quantified, - universes, - } = self.infer.u_canonicalize(value); - (quantified, universes) - } - - fn invert_goal(&mut self, value: &InEnvironment) -> Option> { - self.infer.invert(value) - } - - fn unify_parameters( - &mut self, - environment: &Arc, - _: (), - a: &Parameter, - b: &Parameter, - ) -> Fallible { - self.infer.unify(environment, a, b) - } - - /// Since we do not have distinct types for the inference context and the slg-context, - /// these conversion operations are just no-ops.q - fn sink_answer_subset(&self, c: &Canonical) -> Canonical { - c.clone() - } - - /// Since we do not have distinct types for the inference context and the slg-context, - /// these conversion operations are just no-ops.q - fn lift_delayed_literal(&self, c: DelayedLiteral) -> DelayedLiteral { - c - } - - fn into_ex_clause(&mut self, result: UnificationResult, ex_clause: &mut ExClause) { - into_ex_clause(result, ex_clause) - } -} - -/// Helper function -fn into_ex_clause(result: UnificationResult, ex_clause: &mut ExClause) { - ex_clause - .subgoals - .extend(result.goals.into_iter().casted().map(Literal::Positive)); - ex_clause.constraints.extend(result.constraints); -} - -trait SubstitutionExt { - fn may_invalidate(&self, subst: &Canonical) -> bool; -} - -impl SubstitutionExt for Substitution { - fn may_invalidate(&self, subst: &Canonical) -> bool { - self.parameters - .iter() - .zip(&subst.value.parameters) - .any(|(new, current)| MayInvalidate.aggregate_parameters(new, current)) - } -} - -// This is a struct in case we need to add state at any point like in AntiUnifier -struct MayInvalidate; - -impl MayInvalidate { - fn aggregate_parameters(&mut self, new: &Parameter, current: &Parameter) -> bool { - match (new, current) { - (ParameterKind::Ty(ty1), ParameterKind::Ty(ty2)) => self.aggregate_tys(ty1, ty2), - (ParameterKind::Lifetime(l1), ParameterKind::Lifetime(l2)) => { - self.aggregate_lifetimes(l1, l2) - } - (ParameterKind::Ty(_), _) | (ParameterKind::Lifetime(_), _) => panic!( - "mismatched parameter kinds: new={:?} current={:?}", - new, current - ), - } - } - - // Returns true if the two types could be unequal. - fn aggregate_tys(&mut self, new: &Ty, current: &Ty) -> bool { - match (new, current) { - (_, Ty::BoundVar(_)) => { - // If the aggregate solution already has an inference - // variable here, then no matter what type we produce, - // the aggregate cannot get 'more generalized' than it - // already is. So return false, we cannot invalidate. - // - // (Note that "inference variables" show up as *bound - // variables* here, because we are looking at the - // canonical form.) - false - } - - (Ty::BoundVar(_), _) => { - // If we see a type variable in the potential future - // solution, we have to be conservative. We don't know - // what type variable will wind up being! Remember - // that the future solution could be any instantiation - // of `ty0` -- or it could leave this variable - // unbound, if the result is true for all types. - // - // (Note that "inference variables" show up as *bound - // variables* here, because we are looking at the - // canonical form.) - true - } - - (Ty::InferenceVar(_), _) | (_, Ty::InferenceVar(_)) => { - panic!( - "unexpected free inference variable in may-invalidate: {:?} vs {:?}", - new, current, - ); - } - - (Ty::Apply(apply1), Ty::Apply(apply2)) => { - self.aggregate_application_tys(apply1, apply2) - } - - (Ty::Projection(apply1), Ty::Projection(apply2)) => { - self.aggregate_projection_tys(apply1, apply2) - } - - (Ty::UnselectedProjection(apply1), Ty::UnselectedProjection(apply2)) => { - self.aggregate_unselected_projection_tys(apply1, apply2) - } - - // For everything else, be conservative here and just say we may invalidate. - (Ty::ForAll(_), _) - | (Ty::Apply(_), _) - | (Ty::Projection(_), _) - | (Ty::UnselectedProjection(_), _) => true, - } - } - - fn aggregate_lifetimes(&mut self, _: &Lifetime, _: &Lifetime) -> bool { - true - } - - fn aggregate_application_tys(&mut self, new: &ApplicationTy, current: &ApplicationTy) -> bool { - let ApplicationTy { - name: new_name, - parameters: new_parameters, - } = new; - let ApplicationTy { - name: current_name, - parameters: current_parameters, - } = current; - - self.aggregate_name_and_substs(new_name, new_parameters, current_name, current_parameters) - } - - fn aggregate_projection_tys(&mut self, new: &ProjectionTy, current: &ProjectionTy) -> bool { - let ProjectionTy { - associated_ty_id: new_name, - parameters: new_parameters, - } = new; - let ProjectionTy { - associated_ty_id: current_name, - parameters: current_parameters, - } = current; - - self.aggregate_name_and_substs(new_name, new_parameters, current_name, current_parameters) - } - - fn aggregate_unselected_projection_tys( - &mut self, - new: &UnselectedProjectionTy, - current: &UnselectedProjectionTy, - ) -> bool { - let UnselectedProjectionTy { - type_name: new_name, - parameters: new_parameters, - } = new; - let UnselectedProjectionTy { - type_name: current_name, - parameters: current_parameters, - } = current; - - self.aggregate_name_and_substs(new_name, new_parameters, current_name, current_parameters) - } - - fn aggregate_name_and_substs( - &mut self, - new_name: N, - new_parameters: &[Parameter], - current_name: N, - current_parameters: &[Parameter], - ) -> bool - where - N: Copy + Eq + Debug, - { - if new_name != current_name { - return true; - } - - let name = new_name; - - assert_eq!( - new_parameters.len(), - current_parameters.len(), - "does {:?} take {} parameters or {}? can't both be right", - name, - new_parameters.len(), - current_parameters.len() - ); - - new_parameters - .iter() - .zip(current_parameters) - .any(|(new, current)| self.aggregate_parameters(new, current)) - } -} diff --git a/chalk-solve/src/solve/slg/implementation/aggregate.rs b/chalk-solve/src/solve/slg/implementation/aggregate.rs deleted file mode 100644 index 58189f20ee7..00000000000 --- a/chalk-solve/src/solve/slg/implementation/aggregate.rs +++ /dev/null @@ -1,391 +0,0 @@ -use chalk_ir::cast::Cast; -use chalk_ir::*; -use crate::ext::*; -use crate::infer::InferenceTable; -use crate::solve::slg::implementation::SubstitutionExt; -use crate::solve::{Guidance, Solution}; - -use chalk_engine::context; -use chalk_engine::SimplifiedAnswer; -use std::fmt::Debug; - -use super::SlgContext; - -/// Draws as many answers as it needs from `simplified_answers` (but -/// no more!) in order to come up with a solution. -impl context::AggregateOps for SlgContext { - fn make_solution( - &self, - root_goal: &Canonical>, - mut simplified_answers: impl context::AnswerStream, - ) -> Option { - // No answers at all? - if simplified_answers.peek_answer().is_none() { - return None; - } - let SimplifiedAnswer { subst, ambiguous } = simplified_answers.next_answer().unwrap(); - - // Exactly 1 unconditional answer? - if simplified_answers.peek_answer().is_none() && !ambiguous { - return Some(Solution::Unique(subst)); - } - - // Otherwise, we either have >1 answer, or else we have - // ambiguity. Either way, we are only going to be giving back - // **guidance**, and with guidance, the caller doesn't get - // back any region constraints. So drop them from our `subst` - // variable. - // - // FIXME-- there is actually a 3rd possibility. We could have - // >1 answer where all the answers have the same substitution, - // but different region constraints. We should collapse those - // cases into an `OR` region constraint at some point, but I - // leave that for future work. This is basically - // rust-lang/rust#21974. - let mut subst = subst.map(|cs| cs.subst); - - // Extract answers and merge them into `subst`. Stop once we have - // a trivial subst (or run out of answers). - let guidance = loop { - if subst.value.is_empty() || is_trivial(&subst) { - break Guidance::Unknown; - } - - if !simplified_answers - .any_future_answer(|ref mut new_subst| new_subst.may_invalidate(&subst)) - { - break Guidance::Definite(subst); - } - - match simplified_answers.next_answer() { - Some(answer1) => { - subst = merge_into_guidance(root_goal, subst, &answer1.subst); - } - - None => { - break Guidance::Definite(subst); - } - } - }; - - Some(Solution::Ambig(guidance)) - } -} - -/// Given a current substitution used as guidance for `root_goal`, and -/// a new possible answer to `root_goal`, returns a new set of -/// guidance that encompasses both of them. This is often more general -/// than the old guidance. For example, if we had a guidance of `?0 = -/// u32` and the new answer is `?0 = i32`, then the guidance would -/// become `?0 = ?X` (where `?X` is some fresh variable). -fn merge_into_guidance( - root_goal: &Canonical>, - guidance: Canonical, - answer: &Canonical, -) -> Canonical { - let mut infer = InferenceTable::new(); - let Canonical { - value: ConstrainedSubst { - subst: subst1, - constraints: _, - }, - binders: _, - } = answer; - - // Collect the types that the two substitutions have in - // common. - let aggr_parameters: Vec<_> = guidance - .value - .parameters - .iter() - .zip(&subst1.parameters) - .enumerate() - .map(|(index, (value, value1))| { - // We have two values for some variable X that - // appears in the root goal. Find out the universe - // of X. - let universe = root_goal.binders[index].into_inner(); - - let ty = match value { - ParameterKind::Ty(ty) => ty, - ParameterKind::Lifetime(_) => { - // Ignore the lifetimes from the substitution: we're just - // creating guidance here anyway. - return infer.new_variable(universe).to_lifetime().cast(); - } - }; - - let ty1 = value1.assert_ty_ref(); - - // Combine the two types into a new type. - let mut aggr = AntiUnifier { - infer: &mut infer, - universe, - }; - aggr.aggregate_tys(&ty, ty1).cast() - }).collect(); - - let aggr_subst = Substitution { - parameters: aggr_parameters, - }; - - infer.canonicalize(&aggr_subst).quantified -} - -fn is_trivial(subst: &Canonical) -> bool { - // A subst is trivial if.. - subst - .value - .parameters - .iter() - .enumerate() - .all(|(index, parameter)| match parameter { - // All types are mapped to distinct variables. Since this - // has been canonicalized, those will also be the first N - // variables. - ParameterKind::Ty(t) => match t.bound() { - None => false, - Some(depth) => depth == index, - }, - - // And no lifetime mappings. (This is too strict, but we never - // product substs with lifetimes.) - ParameterKind::Lifetime(_) => false, - }) -} - -/// [Anti-unification] is the act of taking two things that do not -/// unify and finding a minimal generarlization of them. So for -/// example `Vec` anti-unified with `Vec` might be -/// `Vec`. This is a **very simplistic** anti-unifier. -/// -/// [Anti-unification]: https://en.wikipedia.org/wiki/Anti-unification_(computer_science) -struct AntiUnifier<'infer> { - infer: &'infer mut InferenceTable, - universe: UniverseIndex, -} - -impl<'infer> AntiUnifier<'infer> { - fn aggregate_tys(&mut self, ty0: &Ty, ty1: &Ty) -> Ty { - match (ty0, ty1) { - // If we see bound things on either side, just drop in a - // fresh variable. This means we will sometimes - // overgeneralize. So for example if we have two - // solutions that are both `(X, X)`, we just produce `(Y, - // Z)` in all cases. - (Ty::InferenceVar(_), Ty::InferenceVar(_)) => self.new_variable(), - - // Ugh. Aggregating two types like `for<'a> fn(&'a u32, - // &'a u32)` and `for<'a, 'b> fn(&'a u32, &'b u32)` seems - // kinda' hard. Don't try to be smart for now, just plop a - // variable in there and be done with it. - (Ty::BoundVar(_), Ty::BoundVar(_)) | (Ty::ForAll(_), Ty::ForAll(_)) => { - self.new_variable() - } - - (Ty::Apply(apply1), Ty::Apply(apply2)) => { - self.aggregate_application_tys(apply1, apply2) - } - - (Ty::Projection(apply1), Ty::Projection(apply2)) => { - self.aggregate_projection_tys(apply1, apply2) - } - - (Ty::UnselectedProjection(apply1), Ty::UnselectedProjection(apply2)) => { - self.aggregate_unselected_projection_tys(apply1, apply2) - } - - // Mismatched base kinds. - (Ty::InferenceVar(_), _) - | (Ty::BoundVar(_), _) - | (Ty::ForAll(_), _) - | (Ty::Apply(_), _) - | (Ty::Projection(_), _) - | (Ty::UnselectedProjection(_), _) => self.new_variable(), - } - } - - fn aggregate_application_tys(&mut self, apply1: &ApplicationTy, apply2: &ApplicationTy) -> Ty { - let ApplicationTy { - name: name1, - parameters: parameters1, - } = apply1; - let ApplicationTy { - name: name2, - parameters: parameters2, - } = apply2; - - self.aggregate_name_and_substs(name1, parameters1, name2, parameters2) - .map(|(&name, parameters)| Ty::Apply(ApplicationTy { name, parameters })) - .unwrap_or_else(|| self.new_variable()) - } - - fn aggregate_projection_tys(&mut self, proj1: &ProjectionTy, proj2: &ProjectionTy) -> Ty { - let ProjectionTy { - associated_ty_id: name1, - parameters: parameters1, - } = proj1; - let ProjectionTy { - associated_ty_id: name2, - parameters: parameters2, - } = proj2; - - self.aggregate_name_and_substs(name1, parameters1, name2, parameters2) - .map(|(&associated_ty_id, parameters)| { - Ty::Projection(ProjectionTy { - associated_ty_id, - parameters, - }) - }).unwrap_or_else(|| self.new_variable()) - } - - fn aggregate_unselected_projection_tys( - &mut self, - proj1: &UnselectedProjectionTy, - proj2: &UnselectedProjectionTy, - ) -> Ty { - let UnselectedProjectionTy { - type_name: name1, - parameters: parameters1, - } = proj1; - let UnselectedProjectionTy { - type_name: name2, - parameters: parameters2, - } = proj2; - - self.aggregate_name_and_substs(name1, parameters1, name2, parameters2) - .map(|(&type_name, parameters)| { - Ty::UnselectedProjection(UnselectedProjectionTy { - type_name, - parameters, - }) - }).unwrap_or_else(|| self.new_variable()) - } - - fn aggregate_name_and_substs( - &mut self, - name1: N, - parameters1: &[Parameter], - name2: N, - parameters2: &[Parameter], - ) -> Option<(N, Vec)> - where - N: Copy + Eq + Debug, - { - if name1 != name2 { - return None; - } - - let name = name1; - - assert_eq!( - parameters1.len(), - parameters2.len(), - "does {:?} take {} parameters or {}? can't both be right", - name, - parameters1.len(), - parameters2.len() - ); - - let parameters: Vec<_> = parameters1 - .iter() - .zip(parameters2) - .map(|(p1, p2)| self.aggregate_parameters(p1, p2)) - .collect(); - - Some((name, parameters)) - } - - fn aggregate_parameters(&mut self, p1: &Parameter, p2: &Parameter) -> Parameter { - match (p1, p2) { - (ParameterKind::Ty(ty1), ParameterKind::Ty(ty2)) => { - ParameterKind::Ty(self.aggregate_tys(ty1, ty2)) - } - (ParameterKind::Lifetime(l1), ParameterKind::Lifetime(l2)) => { - ParameterKind::Lifetime(self.aggregate_lifetimes(l1, l2)) - } - (ParameterKind::Ty(_), _) | (ParameterKind::Lifetime(_), _) => { - panic!("mismatched parameter kinds: p1={:?} p2={:?}", p1, p2) - } - } - } - - fn aggregate_lifetimes(&mut self, l1: &Lifetime, l2: &Lifetime) -> Lifetime { - match (l1, l2) { - (Lifetime::InferenceVar(_), _) | (_, Lifetime::InferenceVar(_)) => { - self.new_lifetime_variable() - } - - (Lifetime::BoundVar(_), _) | (_, Lifetime::BoundVar(_)) => { - self.new_lifetime_variable() - } - - (Lifetime::Placeholder(_), Lifetime::Placeholder(_)) => if l1 == l2 { - *l1 - } else { - self.new_lifetime_variable() - }, - } - } - - fn new_variable(&mut self) -> Ty { - self.infer.new_variable(self.universe).to_ty() - } - - fn new_lifetime_variable(&mut self) -> Lifetime { - self.infer.new_variable(self.universe).to_lifetime() - } -} - -/// Test the equivalent of `Vec` vs `Vec` -#[test] -fn vec_i32_vs_vec_u32() { - let mut infer = InferenceTable::new(); - let mut anti_unifier = AntiUnifier { - infer: &mut infer, - universe: UniverseIndex::root(), - }; - - let ty = anti_unifier.aggregate_tys( - &ty!(apply (item 0) (apply (item 1))), - &ty!(apply (item 0) (apply (item 2))), - ); - assert_eq!(ty!(apply (item 0) (infer 0)), ty); -} - -/// Test the equivalent of `Vec` vs `Vec` -#[test] -fn vec_i32_vs_vec_i32() { - let mut infer = InferenceTable::new(); - let mut anti_unifier = AntiUnifier { - infer: &mut infer, - universe: UniverseIndex::root(), - }; - - let ty = anti_unifier.aggregate_tys( - &ty!(apply (item 0) (apply (item 1))), - &ty!(apply (item 0) (apply (item 1))), - ); - assert_eq!(ty!(apply (item 0) (apply (item 1))), ty); -} - -/// Test the equivalent of `Vec` vs `Vec` -#[test] -fn vec_x_vs_vec_y() { - let mut infer = InferenceTable::new(); - let mut anti_unifier = AntiUnifier { - infer: &mut infer, - universe: UniverseIndex::root(), - }; - - // Note that the `var 0` and `var 1` in these types would be - // referring to canonicalized free variables, not variables in - // `infer`. - let ty = anti_unifier.aggregate_tys( - &ty!(apply (item 0) (infer 0)), - &ty!(apply (item 0) (infer 1)), - ); - - // But this `var 0` is from `infer. - assert_eq!(ty!(apply (item 0) (infer 0)), ty); -} diff --git a/chalk-solve/src/solve/slg/implementation/resolvent.rs b/chalk-solve/src/solve/slg/implementation/resolvent.rs deleted file mode 100644 index 9390f980493..00000000000 --- a/chalk-solve/src/solve/slg/implementation/resolvent.rs +++ /dev/null @@ -1,424 +0,0 @@ -use chalk_engine::fallible::Fallible; -use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::Fold; -use chalk_ir::zip::{Zip, Zipper}; -use chalk_ir::*; -use crate::infer::InferenceTable; -use crate::solve::slg::implementation::{self, SlgContext, TruncatingInferenceTable}; - -use chalk_engine::context; -use chalk_engine::{ExClause, Literal}; -use std::sync::Arc; - -/////////////////////////////////////////////////////////////////////////// -// SLG RESOLVENTS -// -// The "SLG Resolvent" is used to combine a *goal* G with some -// clause or answer *C*. It unifies the goal's selected literal -// with the clause and then inserts the clause's conditions into -// the goal's list of things to prove, basically. Although this is -// one operation in EWFS, we have specialized variants for merging -// a program clause and an answer (though they share some code in -// common). -// -// Terminology note: The NFTD and RR papers use the term -// "resolvent" to mean both the factor and the resolvent, but EWFS -// distinguishes the two. We follow EWFS here since -- in the code -// -- we tend to know whether there are delayed literals or not, -// and hence to know which code path we actually want. -// -// From EWFS: -// -// Let G be an X-clause A :- D | L1,...Ln, where N > 0, and Li be selected atom. -// -// Let C be an X-clause with no delayed literals. Let -// -// C' = A' :- L'1...L'm -// -// be a variant of C such that G and C' have no variables in -// common. -// -// Let Li and A' be unified with MGU S. -// -// Then: -// -// S(A :- D | L1...Li-1, L1'...L'm, Li+1...Ln) -// -// is the SLG resolvent of G with C. - -impl context::ResolventOps for TruncatingInferenceTable { - /// Applies the SLG resolvent algorithm to incorporate a program - /// clause into the main X-clause, producing a new X-clause that - /// must be solved. - /// - /// # Parameters - /// - /// - `goal` is the goal G that we are trying to solve - /// - `clause` is the program clause that may be useful to that end - fn resolvent_clause( - &mut self, - environment: &Arc, - goal: &DomainGoal, - subst: &Substitution, - clause: &ProgramClause, - ) -> Fallible>> { - // Relating the above description to our situation: - // - // - `goal` G, except with binders for any existential variables. - // - Also, we always select the first literal in `ex_clause.literals`, so `i` is 0. - // - `clause` is C, except with binders for any existential variables. - - debug_heading!( - "resolvent_clause(\ - \n goal={:?},\ - \n clause={:?})", - goal, - clause, - ); - - let snapshot = self.infer.snapshot(); - - // C' in the description above is `consequence :- conditions`. - // - // Note that G and C' have no variables in common. - let ProgramClauseImplication { - consequence, - conditions, - } = match clause { - ProgramClause::Implies(implication) => implication.clone(), - ProgramClause::ForAll(implication) => { - self.infer.instantiate_binders_existentially(implication) - } - }; - debug!("consequence = {:?}", consequence); - debug!("conditions = {:?}", conditions); - - // Unify the selected literal Li with C'. - let unification_result = self.infer.unify(environment, goal, &consequence)?; - - // Final X-clause that we will return. - let mut ex_clause = ExClause { - subst: subst.clone(), - delayed_literals: vec![], - constraints: vec![], - subgoals: vec![], - }; - - // Add the subgoals/region-constraints that unification gave us. - implementation::into_ex_clause(unification_result, &mut ex_clause); - - // Add the `conditions` from the program clause into the result too. - ex_clause - .subgoals - .extend(conditions.into_iter().map(|c| match c { - Goal::Not(c) => Literal::Negative(InEnvironment::new(environment, *c)), - c => Literal::Positive(InEnvironment::new(environment, c)), - })); - - let canonical_ex_clause = self.infer.canonicalize(&ex_clause).quantified; - - self.infer.rollback_to(snapshot); - - Ok(canonical_ex_clause) - } - - /////////////////////////////////////////////////////////////////////////// - // apply_answer_subst - // - // Apply answer subst has the job of "plugging in" the answer to a - // query into the pending ex-clause. To see how it works, it's worth stepping - // up one level. Imagine that first we are trying to prove a goal A: - // - // A :- T: Foo>, ?U: Bar - // - // this spawns a subgoal `T: Foo>`, and it's this subgoal that - // has now produced an answer `?0 = u32`. When the goal A spawned the - // subgoal, it will also have registered a `PendingExClause` with its - // current state. At the point where *this* method has been invoked, - // that pending ex-clause has been instantiated with fresh variables and setup, - // so we have four bits of incoming information: - // - // - `ex_clause`, which is the remaining stuff to prove for the goal A. - // Here, the inference variable `?U` has been instantiated with a fresh variable - // `?X`. - // - `A :- ?X: Bar` - // - `selected_goal`, which is the thing we were trying to prove when we - // spawned the subgoal. It shares inference variables with `ex_clause`. - // - `T: Foo>` - // - `answer_table_goal`, which is the subgoal in canonical form: - // - `for T: Foo>` - // - `canonical_answer_subst`, which is an answer to `answer_table_goal`. - // - `[?0 = u32]` - // - // In this case, this function will (a) unify `u32` and `?X` and then - // (b) return `ex_clause` (extended possibly with new region constraints - // and subgoals). - // - // One way to do this would be to (a) substitute - // `canonical_answer_subst` into `answer_table_goal` (yielding `T: - // Foo>`) and then (b) instantiate the result with fresh - // variables (no effect in this instance) and then (c) unify that with - // `selected_goal` (yielding, indirectly, that `?X = u32`). But that - // is not what we do: it's inefficient, to start, but it also causes - // problems because unification of projections can make new - // sub-goals. That is, even if the answers don't involve any - // projections, the table goals might, and this can create an infinite - // loop (see also #74). - // - // What we do instead is to (a) instantiate the substitution, which - // may have free variables in it (in this case, it would not, and the - // instantiation woudl have no effect) and then (b) zip - // `answer_table_goal` and `selected_goal` without having done any - // substitution. After all, these ought to be basically the same, - // since `answer_table_goal` was created by canonicalizing (and - // possibly truncating, but we'll get to that later) - // `selected_goal`. Then, whenever we reach a "free variable" in - // `answer_table_goal`, say `?0`, we go to the instantiated answer - // substitution and lookup the result (in this case, `u32`). We take - // that result and unify it with whatever we find in `selected_goal` - // (in this case, `?X`). - // - // Let's cover then some corner cases. First off, what is this - // business of instantiating the answer? Well, the answer may not be a - // simple type like `u32`, it could be a "family" of types, like - // `for Vec` -- i.e., `Vec: Bar` for *any* `X`. In that - // case, the instantiation would produce a substitution `[?0 := - // Vec]` (note that the key is not affected, just the value). So - // when we do the unification, instead of unifying `?X = u32`, we - // would unify `?X = Vec`. - // - // Next, truncation. One key thing is that the `answer_table_goal` may - // not be *exactly* the same as the `selected_goal` -- we will - // truncate it if it gets too deep. so, in our example, it may be that - // instead of `answer_table_goal` being `for T: Foo>`, - // it could have been truncated to `for T: Foo` (which is a - // more general goal). In that case, let's say that the answer is - // still `[?0 = u32]`, meaning that `T: Foo` is true (which isn't - // actually interesting to our original goal). When we do the zip - // then, we will encounter `?0` in the `answer_table_goal` and pair - // that with `Vec` from the pending goal. We will attempt to unify - // `Vec` with `u32` (from the substitution), which will fail. That - // failure will get propagated back up. - - fn apply_answer_subst( - &mut self, - ex_clause: ExClause, - selected_goal: &InEnvironment, - answer_table_goal: &Canonical>, - canonical_answer_subst: &Canonical, - ) -> Fallible> { - debug_heading!("apply_answer_subst()"); - debug!("ex_clause={:?}", ex_clause); - debug!( - "selected_goal={:?}", - self.infer.normalize_deep(selected_goal) - ); - debug!("answer_table_goal={:?}", answer_table_goal); - debug!("canonical_answer_subst={:?}", canonical_answer_subst); - - // C' is now `answer`. No variables in commmon with G. - let ConstrainedSubst { - subst: answer_subst, - - // Assuming unification succeeds, we incorporate the - // region constraints from the answer into the result; - // we'll need them if this answer (which is not yet known - // to be true) winds up being true, and otherwise (if the - // answer is false or unknown) it doesn't matter. - constraints: answer_constraints, - } = self.infer.instantiate_canonical(&canonical_answer_subst); - - let mut ex_clause = AnswerSubstitutor::substitute( - &mut self.infer, - &selected_goal.environment, - &answer_subst, - ex_clause, - &answer_table_goal.value, - selected_goal, - )?; - ex_clause.constraints.extend(answer_constraints); - Ok(ex_clause) - } -} - -struct AnswerSubstitutor<'t> { - table: &'t mut InferenceTable, - environment: &'t Arc, - answer_subst: &'t Substitution, - answer_binders: usize, - pending_binders: usize, - ex_clause: ExClause, -} - -impl<'t> AnswerSubstitutor<'t> { - fn substitute( - table: &mut InferenceTable, - environment: &Arc, - answer_subst: &Substitution, - ex_clause: ExClause, - answer: &T, - pending: &T, - ) -> Fallible> { - let mut this = AnswerSubstitutor { - table, - environment, - answer_subst, - ex_clause, - answer_binders: 0, - pending_binders: 0, - }; - Zip::zip_with(&mut this, answer, pending)?; - Ok(this.ex_clause) - } - - fn unify_free_answer_var( - &mut self, - answer_depth: usize, - pending: ParameterKind<&Ty, &Lifetime>, - ) -> Fallible { - // This variable is bound in the answer, not free, so it - // doesn't represent a reference into the answer substitution. - if answer_depth < self.answer_binders { - return Ok(false); - } - - let answer_param = &self.answer_subst.parameters[answer_depth - self.answer_binders]; - - let pending_shifted = &pending - .shifted_out(self.pending_binders) - .unwrap_or_else(|_| { - panic!( - "truncate extracted a pending value that references internal binder: {:?}", - pending, - ) - }); - - implementation::into_ex_clause( - self.table - .unify(&self.environment, answer_param, pending_shifted)?, - &mut self.ex_clause, - ); - - Ok(true) - } - - /// When we encounter a variable in the answer goal, we first try - /// `unify_free_answer_var`. Assuming that this fails, the - /// variable must be a bound variable in the answer goal -- in - /// that case, there should be a corresponding bound variable in - /// the pending goal. This bit of code just checks that latter - /// case. - fn assert_matching_vars(&mut self, answer_depth: usize, pending_depth: usize) -> Fallible<()> { - assert!(answer_depth < self.answer_binders); - assert!(pending_depth < self.pending_binders); - assert_eq!( - self.answer_binders - answer_depth, - self.pending_binders - pending_depth - ); - Ok(()) - } -} - -impl<'t> Zipper for AnswerSubstitutor<'t> { - fn zip_tys(&mut self, answer: &Ty, pending: &Ty) -> Fallible<()> { - if let Some(pending) = self.table.normalize_shallow(pending) { - return Zip::zip_with(self, answer, &pending); - } - - // If the answer has a variable here, then this is one of the - // "inputs" to the subgoal table. We need to extract the - // resulting answer that the subgoal found and unify it with - // the value from our "pending subgoal". - if let Ty::BoundVar(answer_depth) = answer { - if self.unify_free_answer_var(*answer_depth, ParameterKind::Ty(pending))? { - return Ok(()); - } - } - - // Otherwise, the answer and the selected subgoal ought to be a perfect match for - // one another. - match (answer, pending) { - (Ty::BoundVar(answer_depth), Ty::BoundVar(pending_depth)) => { - self.assert_matching_vars(*answer_depth, *pending_depth) - } - - (Ty::Apply(answer), Ty::Apply(pending)) => Zip::zip_with(self, answer, pending), - - (Ty::Projection(answer), Ty::Projection(pending)) => { - Zip::zip_with(self, answer, pending) - } - - (Ty::UnselectedProjection(answer), Ty::UnselectedProjection(pending)) => { - Zip::zip_with(self, answer, pending) - } - - (Ty::ForAll(answer), Ty::ForAll(pending)) => { - self.answer_binders += answer.num_binders; - self.pending_binders += pending.num_binders; - Zip::zip_with(self, &answer.ty, &pending.ty)?; - self.answer_binders -= answer.num_binders; - self.pending_binders -= pending.num_binders; - Ok(()) - } - - (Ty::InferenceVar(_), _) | (_, Ty::InferenceVar(_)) => panic!( - "unexpected inference var in answer `{:?}` or pending goal `{:?}`", - answer, pending, - ), - - (Ty::BoundVar(_), _) - | (Ty::Apply(_), _) - | (Ty::Projection(_), _) - | (Ty::UnselectedProjection(_), _) - | (Ty::ForAll(_), _) => panic!( - "structural mismatch between answer `{:?}` and pending goal `{:?}`", - answer, pending, - ), - } - } - - fn zip_lifetimes(&mut self, answer: &Lifetime, pending: &Lifetime) -> Fallible<()> { - if let Some(pending) = self.table.normalize_lifetime(pending) { - return Zip::zip_with(self, answer, &pending); - } - - if let Lifetime::BoundVar(answer_depth) = answer { - if self.unify_free_answer_var(*answer_depth, ParameterKind::Lifetime(pending))? { - return Ok(()); - } - } - - match (answer, pending) { - (Lifetime::BoundVar(answer_depth), Lifetime::BoundVar(pending_depth)) => { - self.assert_matching_vars(*answer_depth, *pending_depth) - } - - (Lifetime::Placeholder(_), Lifetime::Placeholder(_)) => { - assert_eq!(answer, pending); - Ok(()) - } - - (Lifetime::InferenceVar(_), _) | (_, Lifetime::InferenceVar(_)) => panic!( - "unexpected inference var in answer `{:?}` or pending goal `{:?}`", - answer, pending, - ), - - (Lifetime::BoundVar(_), _) | (Lifetime::Placeholder(_), _) => panic!( - "structural mismatch between answer `{:?}` and pending goal `{:?}`", - answer, pending, - ), - } - } - - fn zip_binders(&mut self, answer: &Binders, pending: &Binders) -> Fallible<()> - where - T: Zip + Fold, - { - self.answer_binders += answer.binders.len(); - self.pending_binders += pending.binders.len(); - Zip::zip_with(self, &answer.value, &pending.value)?; - self.answer_binders -= answer.binders.len(); - self.pending_binders -= pending.binders.len(); - Ok(()) - } -} diff --git a/chalk-solve/src/solve/slg/test.rs b/chalk-solve/src/solve/slg/test.rs deleted file mode 100644 index 57318d35c21..00000000000 --- a/chalk-solve/src/solve/slg/test.rs +++ /dev/null @@ -1,1115 +0,0 @@ -#![cfg(test)] - -use crate::solve::slg::implementation::SlgContext; - -use chalk_engine::forest::Forest; -use std::sync::Arc; -use test_util::*; -use ir::solve::SolverChoice; - -macro_rules! test { - (program $program:tt $(goal $goal:tt first $n:tt with max $depth:tt { $expected:expr })*) => { - solve_goal(stringify!($program), vec![$(($depth, $n, stringify!($goal), $expected)),*]) - }; - - (program $program:tt $(goal $goal:tt fixed $n:tt with max $depth:tt { $expected:expr })*) => { - solve_goal_fixed_num_answers( - stringify!($program), - vec![$(($depth, $n, stringify!($goal), $expected)),*], - ) - } -} - -fn solve_goal(program_text: &str, goals: Vec<(usize, usize, &str, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let program = &Arc::new( - parse_and_lower_program( - &program_text[1..program_text.len() - 1], - SolverChoice::default() - ).unwrap() - ); - let env = &Arc::new(program.environment()); - ir::tls::set_current_program(&program, || { - for (max_size, num_answers, goal_text, expected) in goals { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - let peeled_goal = goal.into_peeled_goal(); - let mut forest = Forest::new(SlgContext::new(env, max_size)); - let result = format!("{:#?}", forest.force_answers(peeled_goal, num_answers)); - - assert_test_result_eq(&expected, &result); - } - }); -} - -fn solve_goal_fixed_num_answers(program_text: &str, goals: Vec<(usize, usize, &str, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let program = &Arc::new( - parse_and_lower_program( - &program_text[1..program_text.len() - 1], - SolverChoice::default() - ).unwrap() - ); - let env = &Arc::new(program.environment()); - ir::tls::set_current_program(&program, || { - for (max_size, num_answers, goal_text, expected) in goals { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - let peeled_goal = goal.into_peeled_goal(); - let mut forest = Forest::new(SlgContext::new(env, max_size)); - let result = format!("{:?}", forest.solve(&peeled_goal)); - - assert_test_result_eq(&expected, &result); - - let num_cached_answers_for_goal = forest.num_cached_answers_for_goal(&peeled_goal); - // ::test_util::assert_test_result_eq( - // &format!("{}", num_cached_answers_for_goal), - // &format!("{}", expected_num_answers) - // ); - assert_eq!(num_cached_answers_for_goal, num_answers); - } - }); -} - -#[test] -fn basic() { - test! { - program { - trait Sized { } - - struct i32 { } - impl Sized for i32 { } - } - - goal { - forall { if (T: Sized) { T: Sized } } - } first 2 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn breadth_first() { - test! { - program { - trait Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Sized } - } first 5 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := i32], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Slice], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec>], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Slice>], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn infinite_recursion() { - test! { - program { - trait A { } - trait B { } - trait C { } - trait D { } - - struct Vec { } - impl A for Vec where T: B { } - impl B for Vec where T: C { } - impl C for Vec where T: D { } - impl D for Vec where T: A { } - } - - goal { - exists { T: A } - } first 5 with max 10 { - r"[]" - } - } -} - -/// Make sure we don't get a stack overflow or other badness for this -/// test from scalexm. -#[test] -fn subgoal_abstraction() { - test! { - program { - trait Foo { } - struct Box { } - impl Foo for T where Box: Foo { } - } - - goal { - exists { T: Foo } - } first 5 with max 50 { - r"[]" - } - } -} - -#[test] -fn flounder() { - test! { - program { - trait A { } - - struct Vec { } - impl A for Vec { } - } - - goal { - // This goal "flounders" because it has a free existential - // variable. We choose to replace it with a `CannotProve` - // result. - exists { not { T: A } } - } first 5 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := ?0], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -// Test that, when solving `?T: Sized`, we only wind up pulling a few -// answers before we stop. -#[test] -fn only_draw_so_many() { - test! { - program { - trait Sized { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Sized } - } fixed 2 with max 10 { - "Some(Ambig(Unknown))" - } - } -} - -#[test] -fn only_draw_so_many_blow_up() { - test! { - program { - trait Sized { } - trait Foo { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - impl Foo for Vec where T: Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Foo } - } fixed 2 with max 10 { - "Some(Ambig(Definite(Canonical { value: [?0 := Vec<^0>], binders: [Ty(U0)] })))" - } - } -} - -/// Here, P and Q depend on one another through a negative loop. -#[test] -fn negative_loop() { - test! { - program { - trait P { } - trait Q { } - struct u32 { } - - forall<> { u32: P if not { u32: Q } } - forall<> { u32: Q if not { u32: P } } - } - - goal { - u32: P - } first 5 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ) - } - } - } - ]" - } - } -} - -#[test] -fn subgoal_cycle_uninhabited() { - test! { - program { - trait Foo { } - struct Box { } - struct Vec { } - struct u32 { } - impl Foo for Box where Box>: Foo { } - } - - // There is no solution here with a finite proof, so we get - // back: 0 answer(s) found. - goal { - exists { T: Foo } - } first 10 with max 2 { - r"[]" - } - - // Unsurprisingly, applying negation succeeds then. - goal { - not { exists { T: Foo } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // Eqiuvalent to the previous. - goal { - forall { not { T: Foo } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // However, if we come across a negative goal that exceeds our - // size threshold, we have a problem. - goal { - exists { T = Vec, not { Vec>: Foo } } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - - // Same query with larger threshold works fine, though. - goal { - exists { T = Vec, not { Vec>: Foo } } - } first 10 with max 4 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // Here, due to the hypothesis, there does indeed exist a suitable T, `U`. - goal { - forall { if (U: Foo) { exists { T: Foo } } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := !1_0], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn subgoal_cycle_inhabited() { - test! { - program { - trait Foo { } - struct Box { } - struct Vec { } - struct u32 { } - impl Foo for Box where Box>: Foo { } - impl Foo for u32 { } - } - - goal { - exists { T: Foo } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := u32], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn basic_region_constraint_from_positive_impl() { - test! { - program { - trait Foo { } - struct Ref<'a, 'b, T> { } - struct u32 { } - impl<'x, T> Foo for Ref<'x, 'x, T> { } - } - - goal { - forall<'a, 'b, T> { Ref<'a, 'b, T>: Foo } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [ - InEnvironment { - environment: Env([]), - goal: '!1_1 == '!1_0 - } - ] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_1_EWFS() { - test! { - program { - trait Edge { } - trait TransitiveClosure { } - struct a { } - struct b { } - struct c { } - - forall<> { a: Edge } - forall<> { b: Edge } - forall<> { b: Edge } - forall { X: TransitiveClosure if X: Edge } - forall { X: TransitiveClosure if X: Edge, Z: TransitiveClosure } - } - - goal { - exists { a: TransitiveClosure } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := b], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := c], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := a], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_2_EWFS() { - test! { - program { - trait M { } - trait P { } - trait Q { } - - struct a { } - struct b { } - struct c { } - - forall { X: M if not { X: P } } - forall<> { a: P } - forall { X: P if X: Q } - forall<> { b: Q } - forall { X: Q if X: P } - } - - goal { - c: M - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_3_EWFS() { - test! { - program { - trait W { } - trait M { } - trait P { } - - struct a { } - struct b { } - struct c { } - - forall { X: W if X: M, not { Y: W }, Y: P } - forall<> { a: M } - forall<> { b: M } - forall<> { c: M } - forall<> { b: P } - } - - goal { - a: W - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_3_3_EWFS() { - test! { - program { - trait S { } - trait P { } - trait Q { } - - struct a { } - - forall<> { a: S if not { a: P }, not { a: Q } } - forall<> { a: P if not { a: S }, a: Q } - forall<> { a: Q if not { a: S }, a: P } - } - - goal { - a: S - } first 10 with max 3 { - // We don't yet have support for **simplification** -- - // hence we delay the negatives here but that's it. - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ), - Negative( - TableIndex(6) - ) - } - } - } - ]" - } - } -} - -/// Here, P is neither true nor false. If it were true, then it would -/// be false, and so forth. -#[test] -fn contradiction() { - test! { - program { - trait P { } - struct u32 { } - - forall<> { u32: P if not { u32: P } } - } - - goal { - u32: P - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(0) - ) - } - } - } - ]" - } - } -} - -/// Test (along with the other `cached_answers` tests) that the -/// ordering in which we we encounter clauses doesn't affect the final -/// set of answers we get. In particular, all of them should get 5 -/// answers, but in Ye Olde Days Of Yore there were sometimes bugs -/// that came up when replaying tabled answers that led to fewer -/// answers being produced. -/// -/// This test is also a test for ANSWER ABSTRACTION: the only reason -/// we get 5 answers is because of the max size of 2. -#[test] -fn cached_answers_1() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - // Use explicit program clauses here rather than traits - // and impls to avoid hashmaps and other things that - // sometimes alter the final order of the program clauses: - forall<> { Lemon: Sour } - forall<> { Vinegar: Sour } - forall { HotSauce: Sour if T: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -/// See `cached_answers_1`. -#[test] -fn cached_answers_2() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - forall { HotSauce: Sour if T: Sour } - forall<> { Lemon: Sour } - forall<> { Vinegar: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -/// See `cached_answers_1`. -#[test] -fn cached_answers_3() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - forall<> { Lemon: Sour } - forall { HotSauce: Sour if T: Sour } - forall<> { Vinegar: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -/// Here, P depends on Q negatively, but Q depends only on itself. -/// What happens is that P adds a negative link on Q, so that when Q -/// delays, P is also delayed. -#[test] -fn negative_answer_delayed_literal() { - test! { - program { - trait P { } - trait Q { } - struct u32 { } - - forall<> { u32: P if not { u32: Q } } - forall<> { u32: Q if not { u32: Q } } - } - - goal { - u32: P - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ) - } - } - } - ]" - } - } -} diff --git a/chalk-solve/src/solve/test.rs b/chalk-solve/src/solve/test.rs deleted file mode 100644 index d64c06968df..00000000000 --- a/chalk-solve/src/solve/test.rs +++ /dev/null @@ -1,2848 +0,0 @@ -#![cfg(test)] - -use chalk_engine::fallible::{Fallible, NoSolution}; -use ir; -use ir::solve::{Solution, SolverChoice}; -use std::collections::HashMap; -use std::sync::Arc; -use test_util::*; - -mod bench; - -fn result_to_string(result: &Fallible>) -> String { - match result { - Ok(Some(v)) => format!("{}", v), - Ok(None) => format!("No possible solution"), - Err(NoSolution) => format!("Error"), - } -} - -fn assert_result(result: &Fallible>, expected: &str) { - let result = result_to_string(result); - - println!("expected:\n{}", expected); - println!("actual:\n{}", result); - - let expected1: String = expected.chars().filter(|w| !w.is_whitespace()).collect(); - let result1: String = result.chars().filter(|w| !w.is_whitespace()).collect(); - assert!(!expected1.is_empty() && result1.starts_with(&expected1)); -} - -macro_rules! test { - (program $program:tt $($goals:tt)*) => { - test!(@program[$program] - @parsed_goals[] - @unparsed_goals[$($goals)*]) - }; - - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[]) => { - solve_goal(stringify!($program), vec![$($parsed_goals),*]) - }; - - // goal { G } yields { "Y" } -- test both solvers behave the same (the default) - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt yields { $expected:expr } - $($unparsed_goals:tt)* - ]) => { - test!(@program[$program] - @parsed_goals[ - $($parsed_goals)* - (stringify!($goal), SolverChoice::default(), $expected) - ] - @unparsed_goals[$($unparsed_goals)*]) - }; - - // goal { G } yields[C1] { "Y1" } yields[C2] { "Y2" } -- test that solver C1 yields Y1 - // and C2 yields Y2 - // - // Annoyingly, to avoid getting a parsing ambiguity error, we have - // to distinguish the case where there are other goals to come - // (this rule) for the last goal in the list (next rule). There - // might be a more elegant fix than copy-and-paste but this works. - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt $(yields[$($C:expr),+] { $expected:expr })* - goal $($unparsed_goals:tt)* - ]) => { - test!(@program[$program] - @parsed_goals[$($parsed_goals)* - $($((stringify!($goal), $C, $expected))+)+] - @unparsed_goals[goal $($unparsed_goals)*]) - }; - - // same as above, but for the final goal in the list. - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt $(yields[$($C:expr),+] { $expected:expr })* - ]) => { - test!(@program[$program] - @parsed_goals[$($parsed_goals)* - $($((stringify!($goal), $C, $expected))+)+] - @unparsed_goals[]) - }; -} - -fn solve_goal(program_text: &str, goals: Vec<(&str, SolverChoice, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let mut program_env_cache = HashMap::new(); - for (goal_text, solver_choice, expected) in goals { - let (program, env) = program_env_cache.entry(solver_choice).or_insert_with(|| { - let program_text = &program_text[1..program_text.len() - 1]; // exclude `{}` - let program = - Arc::new(parse_and_lower_program(program_text, solver_choice).unwrap()); - let env = Arc::new(program.environment()); - (program, env) - }); - - ir::tls::set_current_program(&program, || { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - - println!("using solver: {:?}", solver_choice); - let peeled_goal = goal.into_peeled_goal(); - let result = solver_choice.solve_root_goal(&env, &peeled_goal); - assert_result(&result, expected); - }); - } -} - -#[test] -fn prove_clone() { - test! { - program { - struct Foo { } - struct Bar { } - struct Vec { } - trait Clone { } - impl Clone for Vec where T: Clone { } - impl Clone for Foo { } - } - - goal { - Vec: Clone - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Foo: Clone - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Bar: Clone - } yields { - "No possible solution" - } - - goal { - Vec: Clone - } yields { - "No possible solution" - } - } -} - -#[test] -fn inner_cycle() { - // Interesting test that shows why recursive solver needs to run - // to an inner fixed point during iteration. Here, the first - // round, we get that `?T: A` has a unique sol'n `?T = i32`. On - // the second round, we ought to get ambiguous: but if we don't - // run the `?T: B` to a fixed point, it will terminate with `?T = - // i32`, leading to an (incorrect) unique solution. - test! { - program { - #[marker] - trait A { } - #[marker] - trait B { } - - struct i32 { } - struct Vec { } - - impl A for T where T: B { } - impl A for i32 { } - - impl B for T where T: A { } - impl B for Vec where T: B { } - } - - goal { - exists { T: A } - } yields { - "Ambiguous" - } - } -} - -#[test] -fn prove_infer() { - test! { - program { - struct Foo { } - struct Bar { } - trait Map { } - impl Map for Foo { } - impl Map for Bar { } - } - - goal { - exists { A: Map } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { A: Map } - } yields { - "Unique; substitution [?0 := Foo], lifetime constraints []" - } - - goal { - exists { Foo: Map } - } yields { - "Unique; substitution [?0 := Bar], lifetime constraints []" - } - } -} - -#[test] -fn prove_forall() { - test! { - program { - struct Foo { } - struct Vec { } - - trait Marker { } - impl Marker for Vec { } - - trait Clone { } - impl Clone for Foo { } - - impl Clone for Vec where T: Clone { } - } - - goal { - forall { T: Marker } - } yields { - "No possible solution" - } - - goal { - forall { not { T: Marker } } - } yields { - "No" - } - - goal { - not { forall { T: Marker } } - } yields { - "Unique" - } - - // If we assume `T: Marker`, then obviously `T: Marker`. - goal { - forall { if (T: Marker) { T: Marker } } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - // We don't have to know anything about `T` to know that - // `Vec: Marker`. - goal { - forall { Vec: Marker } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - // Here, we don't know that `T: Clone`, so we can't prove that - // `Vec: Clone`. - goal { - forall { Vec: Clone } - } yields { - "No possible solution" - } - - // Here, we do know that `T: Clone`, so we can. - goal { - forall { - if (T: Clone) { - Vec: Clone - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn higher_ranked() { - test! { - program { - struct u8 { } - struct SomeType { } - trait Foo { } - impl Foo for SomeType { } - } - - goal { - exists { - forall { - SomeType: Foo - } - } - } yields { - "Unique; substitution [?0 := u8], lifetime constraints []" - } - } -} - -#[test] -fn ordering() { - test! { - program { - trait Foo { } - impl Foo for U { } - } - - goal { - exists { - forall { - U: Foo - } - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn cycle_no_solution() { - test! { - program { - trait Foo { } - struct S { } - impl Foo for S where T: Foo { } - } - - // only solution: infinite type S { - T: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn cycle_many_solutions() { - test! { - program { - trait Foo { } - struct S { } - struct i32 { } - impl Foo for S where T: Foo { } - impl Foo for i32 { } - } - - // infinite family of solutions: {i32, S, S>, ... } - goal { - exists { - T: Foo - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn cycle_unique_solution() { - test! { - program { - trait Foo { } - trait Bar { } - struct S { } - struct i32 { } - impl Foo for S where T: Foo, T: Bar { } - impl Foo for i32 { } - } - - goal { - exists { - T: Foo - } - } yields { - "Unique; substitution [?0 := i32]" - } - } -} - -#[test] -fn multiple_ambiguous_cycles() { - test! { - program { - trait WF { } - trait Sized { } - - struct Vec { } - struct Int { } - - impl Sized for Int { } - impl WF for Int { } - - impl WF for Vec where T: Sized { } - impl Sized for Vec where T: WF, T: Sized { } - } - - // ?T: WF - // | - // | - // | - // Int: WF. <-----> (Vec: WF) :- (?T: Sized) - // | - // | - // | - // Int: Sized. <-------> (Vec: Sized) :- (?T: Sized), (?T: WF) - // | | - // | | - // | | - // cycle cycle - // - // Depending on the evaluation order of the above tree (which cycle we come upon first), - // we may fail to reach a fixed point if we loop continuously because `Ambig` does not perform - // any unification. We must stop looping as soon as we encounter `Ambig`. In fact without - // this strategy, the above program will not even be loaded because of the overlap check which - // will loop forever. - goal { - exists { - T: WF - } - } yields { - "Ambig" - } - } -} - -#[test] -#[should_panic] -fn overflow() { - test! { - program { - trait Q { } - struct Z { } - struct G - struct S - - impl Q for Z { } - impl Q for G where X: Q { } - impl Q for S where X: Q, S>: Q { } - } - - // Will try to prove S>: Q then S>>: Q etc ad infinitum - goal { - S: Q - } yields { - "" - } - } -} - -#[test] -fn normalize_basic() { - test! { - program { - trait Iterator { type Item; } - struct Vec { } - struct u32 { } - impl Iterator for Vec { - type Item = T; - } - } - - goal { - forall { - exists { - Normalize( as Iterator>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := !1_0], lifetime constraints []" - } - - goal { - forall { - Vec: Iterator - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall { - if (T: Iterator) { - ::Item = u32 - } - } - } yields { - "Unique; substitution []" - } - - goal { - forall { - if (T: Iterator) { - exists { - T: Iterator - } - } - } - } yields { - "Unique; substitution [?0 := (Iterator::Item)]" - } - - goal { - forall { - if (T: Iterator) { - exists { - T: Iterator - } - } - } - } yields { - "Unique; substitution [?0 := (Iterator::Item)]" - } - - goal { - forall { - if (T: Iterator) { - ::Item = ::Item - } - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Iterator) { - exists { - ::Item = ::Item - } - } - } - } yields { - // True for `U = T`, of course, but also true for `U = Vec`. - "Ambiguous" - } - } -} - -#[test] -fn normalize_gat1() { - test! { - program { - struct Vec { } - - trait Iterable { - type Iter<'a>; - } - - impl Iterable for Vec { - type Iter<'a> = Iter<'a, T>; - } - - trait Iterator { - type Item; - } - - struct Iter<'a, T> { } - struct Ref<'a, T> { } - - impl<'a, T> Iterator for Iter<'a, T> { - type Item = Ref<'a, T>; - } - } - - goal { - forall { - forall<'a> { - exists { - Normalize( as Iterable>::Iter<'a> -> U) - } - } - } - } yields { - "Unique; substitution [?0 := Iter<'!2_0, !1_0>], lifetime constraints []" - } - } -} - -#[test] -fn normalize_gat2() { - test! { - program { - trait StreamingIterator { type Item<'a>; } - struct Span<'a, T> { } - struct StreamIterMut { } - struct u32 { } - impl StreamingIterator for StreamIterMut { - type Item<'a> = Span<'a, T>; - } - } - - goal { - forall<'a, T> { - exists { - Normalize( as StreamingIterator>::Item<'a> -> U) - } - } - } yields { - "Unique; substitution [?0 := Span<'!1_0, !1_1>], lifetime constraints []" - } - - goal { - forall<'a, T> { - as StreamingIterator>::Item<'a> = Span<'a, T> - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall<'a, T, U> { - if (T: StreamingIterator = Span<'a, U>>) { - >::Item<'a> = Span<'a, U> - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn normalize_gat_with_where_clause() { - test! { - program { - trait Sized { } - trait Foo { - type Item where T: Sized; - } - - struct Value { } - struct Sometype { } - impl Foo for Sometype { - type Item = Value; - } - } - - goal { - forall { - exists { - Normalize(::Item -> U) - } - } - } yields { - "No possible solution" - } - - goal { - forall { - exists { - if (T: Sized) { - Normalize(::Item -> U) - } - } - } - } yields { - "Unique; substitution [?0 := Value]" - } - } -} - -#[test] -fn normalize_gat_with_where_clause2() { - test! { - program { - trait Bar { } - trait Foo { - type Item where U: Bar; - } - - struct i32 { } - impl Foo for i32 { - type Item = U; - } - } - - goal { - forall { - exists { - Normalize(>::Item -> V) - } - } - } yields { - "No possible solution" - } - - goal { - forall { - exists { - if (U: Bar) { - Normalize(>::Item -> V) - } - } - } - } yields { - "Unique; substitution [?0 := !1_1]" - } - } -} - -#[test] -fn normalize_gat_with_higher_ranked_trait_bound() { - test! { - program { - trait Foo<'a, T> { } - struct i32 { } - - trait Bar<'a, T> { - type Item: Foo<'a, T> where forall<'b> V: Foo<'b, T>; - } - - impl<'a, T> Foo<'a, T> for i32 { } - impl<'a, T> Bar<'a, T> for i32 { - type Item = i32; - } - } - - goal { - forall<'a, T, V> { - if (forall<'b> { V: Foo<'b, T> }) { - exists { - Normalize(>::Item -> U) - } - } - } - } yields { - "Unique; substitution [?0 := i32], lifetime constraints []" - } - } -} - -#[test] -fn implied_bounds() { - test! { - program { - trait Clone { } - trait Iterator where Self: Clone { type Item; } - struct u32 { } - } - - goal { - forall { - if (T: Iterator) { - T: Clone - } - } - } yields { - "Unique; substitution []" - } - } -} - -#[test] -fn gat_implied_bounds() { - test! { - program { - trait Clone { } - trait Foo { type Item: Clone; } - struct u32 { } - } - - goal { - forall { - if (T: Foo = V>) { - V: Clone - } - } - } yields { - "Unique; substitution []" - } - } - - test! { - program { - trait Clone { } - trait Foo { type Item; } - struct u32 { } - } - - goal { - forall { - if (T: Foo = V>) { - // Without the bound Item: Clone, there is no way to infer this. - V: Clone - } - } - } yields { - "No possible solution" - } - } - - test! { - program { - trait Fn { } - struct Ref<'a, T> { } - trait Sized { } - - trait Foo { - type Item: forall<'a> Fn> + Sized; - } - } - - goal { - forall { - if (Type: Foo) { - forall<'a, T> { - ::Item: Fn> - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn implied_from_env() { - test! { - program { - trait Clone { } - trait Foo { type Item; } - } - - goal { - forall { - if (FromEnv(>::Item)) { - FromEnv(T: Foo) - } - } - } yields { - "Unique" - } - - goal { - forall { - if (FromEnv(>::Item)) { - FromEnv(T: Clone) - } - } - } yields { - "No possible solution" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn normalize_rev_infer() { - test! { - program { - trait Identity { type Item; } - struct u32 { } - struct i32 { } - impl Identity for u32 { type Item = u32; } - impl Identity for i32 { type Item = i32; } - } - - goal { - exists { - T: Identity - } - } yields { - "Unique; substitution [?0 := u32]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn normalize_rev_infer_gat() { - test! { - program { - trait Combine { type Item; } - struct u32 { } - struct i32 { } - struct Either { } - impl Combine for u32 { type Item = Either; } - impl Combine for i32 { type Item = Either; } - } - - goal { - exists { - T: Combine = Either> - } - } yields { - // T is ?1 and U is ?0, so this is surprising, but correct! (See #126.) - "Unique; substitution [?0 := i32, ?1 := u32]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn region_equality() { - test! { - program { - trait Eq { } - impl Eq for T { } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - forall<'a, 'b> { - Ref<'a, Unit>: Eq> - } - } yields { - "Unique; substitution [], - lifetime constraints \ - [InEnvironment { environment: Env([]), goal: '!1_1 == '!1_0 }] - " - } - - goal { - forall<'a> { - exists<'b> { - Ref<'a, Unit>: Eq> - } - } - } yields { - "Unique; substitution [?0 := '!1_0], lifetime constraints []" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_equality() { - test! { - program { - trait Eq { } - impl Eq for T { } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - // A valid equality; we get back a series of solvable - // region constraints, since each region variable must - // refer to exactly one placeholder region, and they are - // all in a valid universe to do so (universe 4). - for<'a, 'b> Ref<'a, Ref<'b, Unit>>: Eq Ref<'c, Ref<'d, Unit>>> - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - // Note: this equality is false, but we get back successful; - // this is because the region constraints are unsolvable. - // - // Note that `?0` (in universe 2) must be equal to both - // `!1_0` and `!1_1`, which of course it cannot be. - for<'a, 'b> Ref<'a, Ref<'b, Ref<'a, Unit>>>: Eq< - for<'c, 'd> Ref<'c, Ref<'d, Ref<'d, Unit>>>> - } yields { - "Unique; substitution [], lifetime constraints [ - InEnvironment { environment: Env([]), goal: '!1_1 == '!1_0 } - ]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_projection() { - test! { - program { - trait Eq { } - impl Eq for T { } - - trait DropLt<'a> { type Item; } - impl<'a, T> DropLt<'a> for T { type Item = T; } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - for<'a> >::Item: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_projection_gat() { - test! { - program { - trait Eq { } - impl Eq for T { } - - trait Sized { } - - trait DropOuter<'a> { type Item where U: Sized; } - impl<'a, T> DropOuter<'a> for T { type Item = T; } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - forall { - for<'a> >::Item: Eq - } - } yields { - "No possible solution" - } - - goal { - forall { - if (T: Sized) { - for<'a> >::Item: Eq - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall<'a, T> { - WellFormed(>::Item) - } - } yields { - "No possible solution" - } - - goal { - forall { - if (T: Sized) { - WellFormed(for<'a> >::Item: Eq) - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn struct_wf() { - test! { - program { - struct Foo where T: Eq { } - struct Bar { } - struct Baz { } - - trait Eq { } - - impl Eq for Baz { } - impl Eq for Foo where T: Eq { } - } - - goal { - WellFormed(Foo) - } yields { - "No possible solution" - } - - goal { - WellFormed(Foo) - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - WellFormed(Foo>) - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn generic_trait() { - test! { - program { - struct Int { } - struct Uint { } - - trait Eq { } - - impl Eq for Int { } - impl Eq for Uint { } - } - - goal { - Int: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Uint: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Int: Eq - } yields { - "No possible solution" - } - } -} - -#[test] -fn normalize_under_binder() { - test! { - program { - struct Ref<'a, T> { } - struct I32 { } - - trait Deref<'a> { - type Item; - } - - trait Id<'a> { - type Item; - } - - impl<'a, T> Deref<'a> for Ref<'a, T> { - type Item = T; - } - - impl<'a, T> Id<'a> for Ref<'a, T> { - type Item = Ref<'a, T>; - } - } - - goal { - exists { - forall<'a> { - Ref<'a, I32>: Deref<'a, Item = U> - } - } - } yields { - "Ambiguous" - } - - goal { - exists { - forall<'a> { - Normalize( as Deref<'a>>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := I32], lifetime constraints []" - } - - goal { - forall<'a> { - exists { - Ref<'a, I32>: Id<'a, Item = U> - } - } - } yields { - "Ambiguous" - } - - goal { - forall<'a> { - exists { - Normalize( as Id<'a>>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := Ref<'!1_0, I32>], lifetime constraints []" - } - - goal { - exists { - forall<'a> { - Normalize( as Id<'a>>::Item -> U) - } - } - } yields { - "Unique; for { \ - substitution [?0 := Ref<'?0, I32>], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '?0 == '!1_0 }] \ - }" - } - } -} - -#[test] -fn unify_quantified_lifetimes() { - test! { - program { - } - - // Check that `'a` (here, `'?0`) is not unified - // with `'!1`, because they belong to incompatible - // universes. - goal { - exists<'a> { - forall<'b> { - 'a = 'b - } - } - } yields { - "Unique; for { \ - substitution [?0 := '?0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '?0 == '!1_0 }] \ - }" - } - - // Similar to the previous test, but indirect. - goal { - exists<'a> { - forall<'b> { - exists<'c> { - 'a = 'c, - 'c = 'b - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '?0, ?1 := '!1_0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '?0 == '!1_0 }] \ - }" - } - } -} - -#[test] -fn equality_binder() { - test! { - program { - struct Ref<'a, T> { } - } - - // Check that `'a` (here, `'?0`) is not unified - // with `'!1`, because they belong to incompatible - // universes. - goal { - forall { - exists<'a> { - for<'c> Ref<'c, T> = Ref<'a, T> - } - } - } yields { - "Unique; for { \ - substitution [?0 := '?0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0 == '?0 }] \ - }" - } - } -} - -#[test] -fn mixed_indices_unify() { - test! { - program { - struct Ref<'a, T> { } - } - - goal { - exists { - exists<'a> { - exists { - Ref<'a, T> = Ref<'a, U> - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '?0, ?1 := ?1, ?2 := ?1], \ - lifetime constraints []\ - }" - } - } -} - -#[test] -fn mixed_indices_match_program() { - test! { - program { - struct S { } - struct Bar<'a, T, U> { } - trait Foo {} - impl<'a> Foo for Bar<'a, S, S> {} - } - - goal { - exists { - exists<'a> { - exists { - Bar<'a, T, U>: Foo - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '?0, ?1 := S, ?2 := S], \ - lifetime constraints [] \ - }" - } - } -} - -#[test] -fn mixed_indices_normalize_application() { - test! { - program { - struct Ref<'a, T> { } - trait Foo { - type T; - } - - impl Foo for Ref<'a, U> { - type T = U; - } - } - - goal { - exists { - exists<'a> { - exists { - Normalize( as Foo>::T -> U) - } - } - } - } yields { - "Unique; for { substitution [?0 := '?0, ?1 := ?1, ?2 := ?1], " - } - } -} - -#[test] -fn mixed_indices_normalize_gat_application() { - test! { - program { - struct Either { } - struct Ref<'a, T> { } - trait Foo { - type T; - } - - impl Foo for Ref<'a, U> { - type T = Either; - } - } - - goal { - exists { - Normalize( as Foo>::T -> Either) - } - } yields { - // Our GAT parameter is mapped to ?0; all others appear left to right - // in our Normalize(...) goal. - "Unique; for { \ - substitution [?0 := ?0, ?1 := '?1, ?2 := ?2, ?3 := ?0, ?4 := ?2], " - } - } -} - -#[test] -// Test that we properly detect failure even if there are applicable impls at -// the top level, if we can't find anything to fill in those impls with -fn deep_failure() { - test! { - program { - struct Foo {} - trait Bar {} - trait Baz {} - - impl Bar for Foo where T: Baz {} - } - - goal { - exists { T: Baz } - } yields { - "No possible solution" - } - - goal { - exists { Foo: Bar } - } yields { - "No possible solution" - } - } -} - -#[test] -// Test that we infer a unique solution even if it requires multiple levels of -// search to do so -fn deep_success() { - test! { - program { - struct Foo {} - struct ImplsBaz {} - trait Bar {} - trait Baz {} - - impl Baz for ImplsBaz {} - impl Bar for Foo where T: Baz {} - } - - goal { - exists { Foo: Bar } - } yields { - "Unique; substitution [?0 := ImplsBaz]" - } - } -} - -#[test] -fn definite_guidance() { - test! { - program { - trait Display {} - trait Debug {} - struct Foo {} - struct Bar {} - struct Baz {} - - impl Display for Bar {} - impl Display for Baz {} - - impl Debug for Foo where T: Display {} - } - - goal { - exists { - T: Debug - } - } yields { - "Ambiguous; definite substitution for { [?0 := Foo] }" - } - } -} - -#[test] -fn suggested_subst() { - test! { - program { - trait SomeTrait {} - struct Foo {} - struct Bar {} - struct i32 {} - struct bool {} - impl SomeTrait for Foo {} - impl SomeTrait for Bar {} - impl SomeTrait for Bar {} - } - - goal { - exists { - Foo: SomeTrait - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (i32: SomeTrait) { - i32: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := bool]" - } - - goal { - exists { - if (i32: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - // FIXME: we need to rework the "favor environment" heuristic. - // Should be: "Ambiguous; suggested substitution [?0 := bool]" - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Foo: SomeTrait) { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { - Bar: SomeTrait - } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Bar: SomeTrait) { - Bar: SomeTrait - } - } - } yields { - // FIXME: same as above, should be: "Ambiguous; suggested substitution [?0 := bool]" - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Bar: SomeTrait) { - if (Bar: SomeTrait) { - Bar: SomeTrait - } - } - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn simple_negation() { - test! { - program { - struct i32 {} - trait Foo {} - } - - goal { - not { i32: Foo } - } yields { - "Unique" - } - - goal { - not { - not { i32: Foo } - } - } yields { - "No" - } - - goal { - not { - not { - not { i32: Foo } - } - } - } yields { - "Unique" - } - - goal { - exists { - not { T: Foo } - } - } yields { - "Ambig" - } - - goal { - forall { - not { T: Foo } - } - } yields { - "Unique" - } - - goal { - not { - exists { T: Foo } - } - } yields { - "Unique" - } - - goal { - not { - forall { T: Foo } - } - } yields { - "Unique" - } - } -} - -#[test] -fn deep_negation() { - test! { - program { - struct Foo {} - trait Bar {} - trait Baz {} - - impl Bar for Foo where T: Baz {} - } - - goal { - not { - exists { T: Baz } - } - } yields { - "Unique" - } - - goal { - not { - exists { Foo: Bar } - } - } yields { - "Unique" - } - } -} - -#[test] -fn negation_quantifiers() { - test! { - program { - struct i32 {} - struct u32 {} - } - - goal { - not { - forall { - T = U - } - } - } yields { - "Unique" - } - - goal { - not { - exists { - T = U - } - } - } yields { - "No" - } - - goal { - forall { - not { - T = U - } - } - } yields { - "No" - } - } -} - -#[test] -fn negation_free_vars() { - test! { - program { - struct Vec {} - struct i32 {} - struct u32 {} - trait Foo {} - impl Foo for Vec {} - } - - goal { - exists { - not { Vec: Foo } - } - } yields { - "Ambig" - } - } -} - -#[test] -fn where_clause_trumps() { - test! { - program { - struct Foo { } - - trait Marker { } - impl Marker for Foo { } - } - - goal { - forall { - if (T: Marker) { - T: Marker - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn inapplicable_assumption_does_not_shadow() { - test! { - program { - struct i32 { } - struct u32 { } - - trait Foo { } - - impl Foo for T { } - } - - goal { - forall { - exists { - if (i32: Foo) { - T: Foo - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn auto_trait_without_impls() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - - struct Useless { } - - struct Data { - data: T - } - } - - goal { - i32: Send - } yields { - "Unique" - } - - // No fields so `Useless` is `Send`. - goal { - forall { - Useless: Send - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Send) { - Data: Send - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn auto_trait_with_impls() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - struct f32 { } - struct Vec { } - - impl Send for Vec where T: Send { } - impl !Send for i32 { } - } - - goal { - i32: Send - } yields { - "No possible solution" - } - - goal { - f32: Send - } yields { - "Unique" - } - - goal { - Vec: Send - } yields { - "No possible solution" - } - - goal { - Vec: Send - } yields { - "Unique" - } - - goal { - forall { - Vec: Send - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn coinductive_semantics() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - - struct Ptr { } - impl Send for Ptr where T: Send { } - - struct List { - data: T, - next: Ptr> - } - } - - goal { - forall { - List: Send - } - } yields { - "No possible solution" - } - goal { - forall { - if (T: Send) { - List: Send - } - } - } yields { - "Unique" - } - - goal { - List: Send - } yields { - "Unique" - } - - goal { - exists { - T: Send - } - } yields { - "Ambiguous" - } - } -} - -#[test] -fn mixed_semantics() { - test! { - program { - #[auto] trait Send { } - trait Foo { } - - impl Send for T where T: Foo { } - impl Foo for T where T: Send { } - } - - // We have a cycle `(T: Send) :- (T: Foo) :- (T: Send)` with a non-coinductive - // inner component `T: Foo` so we reject it. - goal { - exists { - T: Send - } - } yields { - "No possible solution" - } - - goal { - exists { - T: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn partial_overlap_2() { - test! { - program { - trait Marker {} - trait Foo {} - trait Bar {} - - struct i32 {} - struct u32 {} - - impl Marker for T where T: Foo {} - impl Marker for T where T: Bar {} - } - - goal { - forall { - if (T: Foo; T: Bar) { - exists { T: Marker } - } - } - } yields { - "Ambiguous" - } - - goal { - forall { - if (T: Foo; T: Bar) { - T: Marker - } - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Foo; T: Bar) { - T: Marker - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn partial_overlap_3() { - test! { - program { - #[marker] trait Marker {} - trait Foo {} - trait Bar {} - - impl Marker for T where T: Foo {} - impl Marker for T where T: Bar {} - - struct i32 {} - impl Foo for i32 {} - impl Bar for i32 {} - } - - goal { - forall { - if (T: Foo; T: Bar) { T: Marker } - } - } yields { - "Unique" - } - - goal { - i32: Marker - } yields { - "Unique" - } - } -} - -#[test] -fn inscope() { - test! { - program { - trait Foo { } - } - - goal { - InScope(Foo) - } yields { - "No possible solution" - } - - goal { - if (InScope(Foo)) { - InScope(Foo) - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn unselected_projection() { - test! { - program { - trait Iterator { - type Item; - } - - trait Iterator2 { - type Item; - } - - struct Chars { } - struct char { } - struct char2 { } - - impl Iterator for Chars { - type Item = char; - } - - impl Iterator2 for Chars { - type Item = char2; - } - } - - goal { - Chars::Item = char - } yields { - "No possible solution" - } - - goal { - if (InScope(Iterator)) { - Chars::Item = char - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - exists { - if (InScope(Iterator)) { - Chars::Item = T - } - } - } yields { - "Unique; substitution [?0 := char], lifetime constraints []" - } - - goal { - exists { - if (InScope(Iterator); InScope(Iterator2)) { - Chars::Item = T - } - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn unselected_projection_with_gat() { - test! { - program { - trait Foo { - type Item<'a>; - } - - struct Ref<'a, T> { } - struct i32 { } - - impl Foo for i32 { - type Item<'a> = Ref<'a, i32>; - } - } - - goal { - forall<'a> { - if (InScope(Foo)) { - i32::Item<'a> = Ref<'a, i32> - } - } - } yields { - "Unique" - } - - goal { - forall<'a> { - if (InScope(Foo)) { - WellFormed(i32::Item<'a>) - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn unselected_projection_with_parametric_trait() { - test! { - program { - trait Foo { - type Item; - } - - struct i32 { } - - impl Foo for i32 { - type Item = i32; - } - } - goal { - if (InScope(Foo)) { - i32::Item = i32 - } - } yields { - "Unique" - } - } -} - -#[test] -fn overflow_universe() { - test! { - program { - struct Foo { } - - trait Bar { } - - // When asked to solve X: Bar, we will produce a - // requirement to solve !1: Bar. And then when asked to - // solve that, we'll produce a requirement to solve !2: - // Bar. And so forth. - forall { X: Bar if forall { Y: Bar } } - } - - goal { - Foo: Bar - } yields { - // The internal universe canonicalization in the on-demand/recursive - // solver means that when we are asked to solve (e.g.) - // `!2: Bar`, we rewrite that to `!1: Bar`, identifying a - // cycle. - "No possible solution" - } - } -} - -#[test] -fn projection_from_env() { - test! { - program { - trait Sized { } - - struct Slice where T: Sized { } - impl Sized for Slice { } - - trait SliceExt - { - type Item; - } - - impl SliceExt for Slice - { - type Item = T; - } - } - - goal { - forall { - if ( - as SliceExt>::Item: Sized - ) { - T: Sized - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn gat_unify_with_implied_wc() { - test! { - program { - struct Slice { } - - trait Cast { } - trait CastingIter { - type Item: Cast where T: Cast; - } - - impl CastingIter for Slice { - type Item = Castable; - } - - struct Castable { } - impl Cast for Castable { } - } - - goal { - forall { - if ( - FromEnv( as CastingIter>::Item) - ) { - T: Cast - } - } - } yields { - "Unique" - } - - goal { - forall { - T: Cast - } - } yields { - "No possible solution" - } - } -} - -// This variant of the above test used to be achingly slow on SLG -// solvers, before the "trivial answer" green cut was introduced. -// -// The problem was that we wound up enumerating a goal like -// -// ::Item = !1 -// -// which meant "find me the types that normalize to `!1`". We had no -// problem finding these types, but after the first such type, we had -// the only unique answer we would ever find, and we wanted to reach -// the point where we could say "no more answers", so we kept -// requesting more answers. -#[test] -fn projection_from_env_slow() { - test! { - program { - trait Clone { } - trait Sized { } - - struct Slice where T: Sized { } - impl Sized for Slice { } - - struct u32 { } - impl Clone for u32 { } - impl Sized for u32 { } - - trait SliceExt - where ::Item: Clone - { - type Item; - } - - impl SliceExt for Slice - where T: Clone - { - type Item = T; - } - } - - goal { - forall { - if ( - as SliceExt>::Item: Clone; - as SliceExt>::Item: Sized; - T: Clone - ) { - T: Sized - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn clauses_in_if_goals() { - test! { - program { - trait Foo { } - struct Vec { } - struct i32 { } - } - - goal { - if (forall { T: Foo }) { - forall { T: Foo } - } - } yields { - "Unique" - } - - goal { - forall { - if (Vec: Foo :- T: Foo) { - if (T: Foo) { - Vec: Foo - } - } - } - } yields { - "Unique" - } - - goal { - if (forall { Vec: Foo :- T: Foo }) { - if (i32: Foo) { - Vec: Foo - } - } - } yields { - "Unique" - } - - goal { - if (forall { Vec: Foo :- T: Foo }) { - Vec: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn quantified_types() { - test! { - program { - trait Foo { } - struct fn<'a> { } - struct fn2<'a, 'b> { } - impl Foo for for<'a> fn<'a> { } - } - - goal { - for<'a> fn<'a>: Foo - } yields { - "Unique" - } - - goal { - for<'a, 'b> fn2<'a, 'b> = for<'b, 'a> fn2<'a, 'b> - } yields { - "Unique" - } - - goal { - forall<'a> { fn<'a>: Foo } - } yields { - // Lifetime constraints are unsatisfiable - "Unique; substitution [], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0 == '!1_0 }]" - } - } -} - -#[test] -fn higher_ranked_implied_bounds() { - test! { - program { - trait Foo<'a> { } - trait Bar where forall<'a> Self: Foo<'a> { } - } - - goal { - forall { - if (T: Bar) { - forall<'a> { - T: Foo<'a> - } - } - } - } yields { - "Unique" - } - } - - test! { - program { - trait Foo { } - trait Bar where forall Self: Foo { } - } - - goal { - forall { - if (T: Bar) { - forall { - T: Foo - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn deref_goal() { - test! { - program { - #[lang_deref] - trait Deref { type Target; } - struct Foo { } - struct Bar { } - struct Baz { } - impl Deref for Foo { type Target = Bar; } - } - - goal { - Derefs(Foo, Bar) - } yields { - "Unique" - } - - goal { - Derefs(Foo, Baz) - } yields { - "No possible solution" - } - } - - test! { - program { - #[lang_deref] - trait Deref { type Target; } - struct Arc { } - struct i32 { } - struct u64 { } - impl Deref for Arc { type Target = T; } - } - - goal { - Derefs(Arc, i32) - } yields { - "Unique" - } - - goal { - Derefs(Arc, u64) - } yields { - "No possible solution" - } - } -} - -#[test] -fn local_and_upstream_types() { - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - } - - goal { IsLocal(Upstream) } yields { "No possible solution" } - goal { IsUpstream(Upstream) } yields { "Unique" } - - goal { IsLocal(Local) } yields { "Unique" } - goal { IsUpstream(Local) } yields { "No possible solution" } - } - - test! { - program { - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - goal { forall { IsLocal(Upstream) } } yields { "No possible solution" } - goal { forall { IsUpstream(Upstream) } } yields { "Unique" } - - goal { forall { IsLocal(Local) } } yields { "Unique" } - goal { forall { IsUpstream(Local) } } yields { "No possible solution" } - } -} - -#[test] -fn is_fully_visible() { - // Should be visible regardless of local, fundamental, or upstream - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - - #[upstream] - #[fundamental] - struct Box { } - } - - goal { IsFullyVisible(Upstream) } yields { "Unique" } - goal { IsFullyVisible(Local) } yields { "Unique" } - goal { IsFullyVisible(Box) } yields { "Unique" } - goal { IsFullyVisible(Box) } yields { "Unique" } - } - - // Should be visible regardless of local, fundamental, or upstream - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - - #[upstream] struct Upstream2 { } - struct Local2 { } - - #[upstream] - #[fundamental] - struct Box { } - } - - // Unknown type parameters are not fully visible - goal { forall { IsFullyVisible(Box) } } yields { "No possible solution" } - goal { forall { IsFullyVisible(Upstream2) } } yields { "No possible solution" } - goal { forall { IsFullyVisible(Local2) } } yields { "No possible solution" } - - // Without any unknown type parameters, local and upstream should not matter - goal { forall { IsFullyVisible(Upstream2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2) } } yields { "Unique" } - - // Fundamental anywhere should not change the outcome - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2>) } } yields { "Unique" } - } -} - -#[test] -fn fundamental_types() { - // NOTE: These tests need to have both Local and Upstream structs since chalk will attempt - // to enumerate all of them. - - // This first test is a sanity check to make sure `Box` isn't a special case. - // By testing this, we ensure that adding the #[fundamental] attribute does in fact - // change behaviour - test! { - program { - #[upstream] struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // Without fundamental, Box should behave like a regular upstream type - goal { forall { not { IsLocal(Box) } } } yields { "Unique" } - goal { forall { IsLocal(Box) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box) } } yields { "Unique" } - - // Without fundamental, Box is upstream regardless of its inner type - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsUpstream(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "Unique" } - } - - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // With fundamental, Box can be local for certain types, so there is no unique solution - // anymore for any of these - goal { forall { not { IsLocal(Box) } } } yields { "No possible solution" } - goal { forall { IsLocal(Box) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box) } } yields { "No possible solution" } - - // With fundamental, some of these yield different results -- no longer depends on Box - // itself - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsLocal(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "No possible solution" } - } - - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - // Upstream is upstream no matter what, so this should not be local for any T - goal { forall { IsLocal(Box>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box>) } } yields { "Unique" } - - // A fundamental type inside an upstream type should not make a difference (i.e. the rules - // for the outer, non-fundamental type should apply) - goal { forall { IsLocal(Upstream>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Upstream>) } } yields { "Unique" } - - // Make sure internal types within an upstream type do not make a difference - goal { forall { IsLocal(Box>>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box>>) } } yields { "Unique" } - - // Local is local no matter what, so this should be local for any T - goal { forall { IsLocal(Box>) } } yields { "Unique" } - goal { forall { IsUpstream(Box>) } } yields { "No possible solution" } - - // A fundamental type inside an internal type should not make a difference - goal { forall { IsLocal(Local>) } } yields { "Unique" } - goal { forall { IsUpstream(Local>) } } yields { "No possible solution" } - - // Make sure upstream types within an internal type and vice versa do not make a difference - goal { forall { IsLocal(Box>>) } } yields { "Unique" } - goal { forall { IsUpstream(Box>>) } } yields { "Unique" } - } - - // Nested fundamental types should still be local if they can be recursively proven to be local - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - // This type represents &T which is also fundamental - #[upstream] - #[fundamental] - struct Ref { } - - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - goal { forall { IsLocal(Ref>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Ref>) } } yields { "No possible solution" } - - goal { IsLocal(Ref>) } yields { "No possible solution" } - goal { IsUpstream(Ref>) } yields { "Unique" } - - goal { IsLocal(Ref>) } yields { "Unique" } - goal { IsUpstream(Ref>) } yields { "No possible solution" } - } - - // If a type is not upstream, it is always local regardless of its parameters or #[fundamental] - test! { - program { - // if we were compiling std, Box would never be upstream - #[fundamental] - struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - goal { forall { IsLocal(Box) } } yields { "Unique" } - goal { IsLocal(Box) } yields { "Unique" } - goal { IsLocal(Box) } yields { "Unique" } - } -} - -#[test] -fn local_impl_allowed_for_traits() { - test! { - program { - trait LocalTrait { } - trait LocalTrait2 { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // Local traits are always implementable - goal { forall { LocalImplAllowed(T: LocalTrait) } } yields { "Unique" } - goal { LocalImplAllowed(Local: LocalTrait) } yields { "Unique" } - goal { LocalImplAllowed(Upstream: LocalTrait) } yields { "Unique" } - goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Local: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream: LocalTrait2) } } yields { "Unique" } - } - - // Single-type parameter trait refs (Self only) - test! { - program { - #[upstream] trait UpstreamTrait { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - struct Local2 { } - } - - // No local type - goal { LocalImplAllowed(Upstream: UpstreamTrait) } yields { "No possible solution" } - goal { forall { LocalImplAllowed(T: UpstreamTrait) } } yields { "No possible solution" } - - // Local type, not preceded by anything - // Notice that the types after the first local type do not matter at all - goal { LocalImplAllowed(Local: UpstreamTrait) } yields { "Unique" } - } - - // Multi-type parameter trait refs (Self, T) - test! { - program { - trait Clone { } - #[upstream] trait UpstreamTrait2 where T: Clone { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - struct Local2 { } - } - - // No local type - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2) } } yields { "No possible solution" } - - // Local type, but preceded by a type parameter - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - - // Local type, not preceded by anything - // Notice that the types after the first local type do not matter at all - goal { forall { LocalImplAllowed(Local: UpstreamTrait2) } } yields { "Unique" } - goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { "Unique" } - - // Local type, but preceded by a fully visible type (i.e. no placeholder types) - goal { LocalImplAllowed(Upstream: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { "Unique" } - - // Type parameter covered by the local type - goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2>) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { "Unique" } - - // Type parameter covered by a deeply nested upstream type - // Notice that it does not matter that the T is wrapped in a local type because the outer - // type is still upstream - goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { "No possible solution" } - // Does not matter whether the covered type parameter is eventually covered or not by the - // first actually local type found - goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { "No possible solution" } - } - - test! { - program { - trait Clone { } - trait Eq { } - // Lifetime is just included to show that it does not break anything. - // Where clauses do not change the results at all. - #[upstream] trait UpstreamTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } - trait InternalTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - } - - // Local traits can be implemented regardless of the types involved - goal { forall { LocalImplAllowed(Self: InternalTrait<'a, T, U, V>) } } yields { "Unique" } - - // Upstream traits definitely cannot be implemented for all types - goal { forall { LocalImplAllowed(Self: UpstreamTrait<'a, T, U, V>) } } yields { "No possible solution" } - - // No local types - goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait<'a, Upstream, Upstream, Upstream>) } } yields { "No possible solution" } - goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait< - 'a, - Upstream2, - Upstream2>>, - Upstream2> - >) } } yields { "No possible solution" } - - // Local type, not preceded by anything -- types after the first local type do not matter - goal { forall<'a, T, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, T, U, V>) } } yields { "Unique" } - goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Local, U, V>) } } yields { "Unique" } - goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, U, V>) } } yields { "Unique" } - goal { forall<'a> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, Local, Local>) } } yields { "Unique" } - - // Local type preceded by a type that is not fully visible - goal { forall<'a, T> { LocalImplAllowed(T: UpstreamTrait<'a, Upstream, Upstream, Local>) } } yields { "No possible solution" } - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, T, Upstream, Local>) } } yields { "No possible solution" } - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, T, Local>) } } yields { "No possible solution" } - - // Once again, types after the first local do not matter - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, Local, T>) } } yields { "Unique" } - } -} diff --git a/chalk-solve/src/solve/test/bench.rs b/chalk-solve/src/solve/test/bench.rs index bbf31b854b0..6ef6eb1f976 100644 --- a/chalk-solve/src/solve/test/bench.rs +++ b/chalk-solve/src/solve/test/bench.rs @@ -3,35 +3,37 @@ extern crate test; use self::test::Bencher; +use crate::db::ChalkDatabase; +use crate::query::{ProgramSolverChoice, ProgramText}; +use chalk_solve::SolverChoice; use ir; -use ir::solve::SolverChoice; use std::sync::Arc; -use super::{parse_and_lower_program, - parse_and_lower_goal, - assert_result}; +use super::{assert_result, parse_and_lower_goal, parse_and_lower_program}; fn run_bench( program_text: &str, solver_choice: SolverChoice, goal_text: &str, bencher: &mut Bencher, - expected: &str + expected: &str, ) { - let program = Arc::new(parse_and_lower_program(program_text, solver_choice).unwrap()); - let env = Arc::new(program.environment()); - ir::tls::set_current_program(&program, || { - let goal = parse_and_lower_goal(&program, goal_text).unwrap(); - let peeled_goal = goal.into_peeled_goal(); - - // Execute once to get an expected result. - let result = solver_choice.solve_root_goal(&env, &peeled_goal); - - // Check expectation. - assert_result(&result, expected); - - // Then do it many times to measure time. - bencher.iter(|| solver_choice.solve_root_goal(&env, &peeled_goal)); + ChalkDatabase::with_program(Arc::new(program_text.to_string()), solver_choice, |db| { + let program = db.lowered_program().unwrap(); + let env = db.environment().unwrap(); + ir::tls::set_current_program(&program, || { + let goal = parse_and_lower_goal(&program, goal_text).unwrap(); + let peeled_goal = goal.into_peeled_goal(); + + // Execute once to get an expected result. + let result = solver_choice.solve_root_goal(&env, &peeled_goal); + + // Check expectation. + assert_result(&result, expected); + + // Then do it many times to measure time. + bencher.iter(|| solver_choice.solve_root_goal(&env, &peeled_goal)); + }); }); } @@ -101,11 +103,9 @@ forall { fn cycley_slg(b: &mut Bencher) { run_bench( CYCLEY, - SolverChoice::SLG { - max_size: 20, - }, + SolverChoice::SLG { max_size: 20 }, CYCLEY_GOAL, b, - "Unique" + "Unique", ); } diff --git a/chalk-solve/src/solve/truncate.rs b/chalk-solve/src/solve/truncate.rs index 5bfcec2cc13..1ed47b94fd0 100644 --- a/chalk-solve/src/solve/truncate.rs +++ b/chalk-solve/src/solve/truncate.rs @@ -1,257 +1,134 @@ //! -use chalk_engine::fallible::*; -use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::{ - self, DefaultFreeVarFolder, DefaultInferenceFolder, DefaultPlaceholderFolder, Fold, TypeFolder, -}; -use chalk_ir::*; use crate::infer::InferenceTable; +use chalk_ir::interner::Interner; +use chalk_ir::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}; +use chalk_ir::*; +use std::cmp::max; +use std::ops::ControlFlow; + +/// "Truncation" (called "abstraction" in the papers referenced below) +/// refers to the act of modifying a goal or answer that has become +/// too large in order to guarantee termination. +/// +/// Currently we don't perform truncation (but it might me readded later). +/// +/// Citations: +/// +/// - Terminating Evaluation of Logic Programs with Finite Three-Valued Models +/// - Riguzzi and Swift; ACM Transactions on Computational Logic 2013 +/// - Radial Restraint +/// - Grosof and Swift; 2013 +pub fn needs_truncation( + interner: I, + infer: &mut InferenceTable, + max_size: usize, + value: impl TypeVisitable, +) -> bool { + let mut visitor = TySizeVisitor::new(interner, infer); + value.visit_with(&mut visitor, DebruijnIndex::INNERMOST); -crate fn truncate(infer: &mut InferenceTable, max_size: usize, value: &T) -> Truncated -where - T: Fold, -{ - debug_heading!("truncate(max_size={}, value={:?})", max_size, value); - - let mut truncater = Truncater::new(infer, max_size); - let value = value - .fold_with(&mut truncater, 0) - .expect("Truncater is infallible"); - debug!( - "truncate: overflow={} value={:?}", - truncater.overflow, value - ); - Truncated { - overflow: truncater.overflow, - value, - } -} - -/// Result from `truncate`. -crate struct Truncated { - /// If true, then `value` was truncated relative to the original - /// (e.g., fresh inference variables were introduced). If false, - /// then it is effectively a clone of the original. - crate overflow: bool, - - /// Possibly truncate value. - crate value: T, + visitor.max_size > max_size } -struct Truncater<'infer> { - infer: &'infer mut InferenceTable, - current_size: usize, +struct TySizeVisitor<'infer, I: Interner> { + interner: I, + infer: &'infer mut InferenceTable, + size: usize, + depth: usize, max_size: usize, - overflow: bool, } -impl<'infer> Truncater<'infer> { - fn new(infer: &'infer mut InferenceTable, max_size: usize) -> Self { - Truncater { +impl<'infer, I: Interner> TySizeVisitor<'infer, I> { + fn new(interner: I, infer: &'infer mut InferenceTable) -> Self { + Self { + interner, infer, - current_size: 0, - max_size, - overflow: false, + size: 0, + depth: 0, + max_size: 0, } } +} + +impl<'infer, I: Interner> TypeVisitor for TySizeVisitor<'infer, I> { + type BreakTy = (); - fn overflow(&mut self, pre_size: usize) -> Ty { - self.overflow = true; - self.current_size = pre_size + 1; - let universe = self.infer.max_universe(); - self.infer.new_variable(universe).to_ty() + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self } -} -impl<'infer> TypeFolder for Truncater<'infer> { - fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Fallible { - if let Some(normalized_ty) = self.infer.normalize_shallow(ty) { - return self.fold_ty(&normalized_ty, binders); + fn visit_ty(&mut self, ty: &Ty, outer_binder: DebruijnIndex) -> ControlFlow<()> { + if let Some(normalized_ty) = self.infer.normalize_ty_shallow(self.interner, ty) { + normalized_ty.visit_with(self, outer_binder); + return ControlFlow::Continue(()); } - let pre_size = self.current_size; - self.current_size += 1; - - let result = fold::super_fold_ty(self, ty, binders)?; + self.size += 1; + self.max_size = max(self.size, self.max_size); - // We wish to maintain the invariant that: - // - // pre_size < self.max_size => - // post_size <= self.max_size - // - // Presuming that `pre_size < self.max_size`, then the - // invariant is in jeopardy if `post_size > self.max_size`. - // To repair the situation, we replace the entire subtree with - // a fresh existential variable (in the innermost universe). - let post_size = self.current_size; - let result = if pre_size < self.max_size && post_size > self.max_size { - self.overflow(pre_size).shifted_in(binders) - } else { - result - }; + self.depth += 1; + ty.super_visit_with(self, outer_binder); + self.depth -= 1; // When we get back to the first invocation, clear the counters. - // We process each type independently. - if pre_size == 0 { - self.current_size = 0; + // We process each outermost type independently. + if self.depth == 0 { + self.size = 0; } - - Ok(result) + ControlFlow::Continue(()) } - fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Fallible { - fold::super_fold_lifetime(self, lifetime, binders) + fn interner(&self) -> I { + self.interner } } -impl<'infer> DefaultFreeVarFolder for Truncater<'infer> {} - -impl<'infer> DefaultInferenceFolder for Truncater<'infer> {} - -impl<'infer> DefaultPlaceholderFolder for Truncater<'infer> {} - -#[test] -fn truncate_types() { - let mut table = InferenceTable::new(); - let environment0 = &Environment::new(); - let _u1 = table.new_universe(); +#[cfg(test)] +mod tests { + use super::*; + use chalk_integration::{arg, ty}; + + #[test] + fn one_type() { + use chalk_integration::interner::ChalkIr; + let interner = ChalkIr; + let mut table = InferenceTable::::new(); + let _u1 = table.new_universe(); + + // Vec>>> + let ty0 = ty!(apply (item 0) + (apply (item 0) + (apply (item 0) + (apply (item 0) + (placeholder 1))))); - // Vec>>> - let ty0 = ty!(apply (item 0) - (apply (item 0) - (apply (item 0) - (apply (item 0) - (apply (placeholder 1)))))); + let mut visitor = TySizeVisitor::new(interner, &mut table); + ty0.visit_with(&mut visitor, DebruijnIndex::INNERMOST); + assert!(visitor.max_size == 5); + } - // test: no truncation with size 5 - let Truncated { - overflow, - value: ty_no_overflow, - } = truncate(&mut table, 5, &ty0); - assert!(!overflow); - assert_eq!(ty0, ty_no_overflow); + #[test] + fn multiple_types() { + use chalk_integration::interner::ChalkIr; + let interner = ChalkIr; + let mut table = InferenceTable::::new(); + let _u1 = table.new_universe(); - // test: with size 3, truncates to `Vec>` - let ty_expect = ty!(apply (item 0) + // Vec>>> + let ty0 = ty!(apply (item 0) + (apply (item 0) + (apply (item 0) (apply (item 0) - (infer 0))); - let Truncated { - overflow, - value: ty_overflow, - } = truncate(&mut table, 3, &ty0); - assert!(overflow); - assert_eq!(ty_expect, ty_overflow); + (placeholder 1))))); - // test: the `X` is in u1, hence should fail to unify with a skolemized value in U2. - let _u2 = table.new_universe(); - let ty_in_u2 = ty!(apply (item 0) + let ty1 = ty!(apply (item 0) + (apply (item 0) (apply (item 0) - (apply (placeholder 2)))); - table - .unify(environment0, &ty_overflow, &ty_in_u2) - .unwrap_err(); -} - -#[test] -fn truncate_multiple_types() { - let mut table = InferenceTable::new(); - let _u1 = table.new_universe(); - - // Vec>>> - let ty0 = ty!(apply (item 0) - (apply (item 0) - (apply (item 0) - (apply (item 0) - (apply (placeholder 1)))))); - - // test: no truncation with size 5 - let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; - let Truncated { - overflow, - value: ty_no_overflow, - } = truncate(&mut table, 5, &ty0_3); - assert!(!overflow); - assert_eq!(ty0_3, ty_no_overflow); - - // test: no truncation with size 6 - let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; - let Truncated { - overflow, - value: ty_no_overflow, - } = truncate(&mut table, 6, &ty0_3); - assert!(!overflow); - assert_eq!(ty0_3, ty_no_overflow); - - // test: truncation of all types evenly with size 3 - let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; - let Truncated { - overflow, - value: ty_overflow, - } = truncate(&mut table, 3, &ty0_3); - assert!(overflow); - assert_eq!( - vec![ - ty!(apply (item 0) (apply (item 0) (infer 0))), - ty!(apply (item 0) (apply (item 0) (infer 1))), - ty!(apply (item 0) (apply (item 0) (infer 2))), - ], - ty_overflow - ); -} - -#[test] -fn truncate_normalizes() { - let mut table = InferenceTable::new(); - - let environment0 = &Environment::new(); - let u1 = table.new_universe(); - - // ty0 = Vec> - let v0 = table.new_variable(u1); - let ty0 = ty!(apply (item 0) - (apply (item 0) - (infer 0))); + (placeholder 1)))); - // ty1 = Vec> - let ty1 = ty!(apply (item 0) - (apply (item 0) - (apply (placeholder 1)))); - - // test: truncating *before* unifying has no effect - assert!(!truncate(&mut table, 3, &ty0).overflow); - - // unify X and ty1 - table.unify(environment0, &v0.to_ty(), &ty1).unwrap(); - - // test: truncating *after* triggers - let Truncated { - overflow, - value: ty_overflow, - } = truncate(&mut table, 3, &ty0); - assert!(overflow); - assert_eq!( - ty!(apply (item 0) - (apply (item 0) - (infer 1))), - ty_overflow - ); -} - -#[test] -fn truncate_normalizes_under_binders() { - let mut table = InferenceTable::new(); - - let u0 = UniverseIndex::ROOT; - - // v0 = X - let _v0 = table.new_variable(u0); - - // ty0 = for<'a> Vec> - let ty0 = ty!(for_all 1 - (apply (item 0) - (apply (item 0) - (infer 0)))); - - assert!(!truncate(&mut table, 4, &ty0).overflow); + let mut visitor = TySizeVisitor::new(interner, &mut table); + vec![&ty0, &ty1].visit_with(&mut visitor, DebruijnIndex::INNERMOST); + assert!(visitor.max_size == 5); + } } diff --git a/chalk-solve/src/split.rs b/chalk-solve/src/split.rs new file mode 100644 index 00000000000..5a45a833d99 --- /dev/null +++ b/chalk-solve/src/split.rs @@ -0,0 +1,199 @@ +use crate::rust_ir::*; +use crate::RustIrDatabase; +use chalk_ir::interner::Interner; +use chalk_ir::*; +use std::sync::Arc; +use tracing::{debug, instrument}; + +/// Methods for splitting up the projections for associated types from +/// the surrounding context. +pub trait Split: RustIrDatabase { + /// Given a projection of an associated type, split the type + /// parameters into those that come from the *trait* and those + /// that come from the *associated type itself*. So e.g. if you + /// have `(Iterator::Item)`, this would return `([F], [])`, + /// since `Iterator::Item` is not generic and hence doesn't have + /// any type parameters itself. + fn split_projection<'p>( + &self, + projection: &'p ProjectionTy, + ) -> ( + Arc>, + &'p [GenericArg], + &'p [GenericArg], + ) { + let interner = self.interner(); + let ProjectionTy { + associated_ty_id, + ref substitution, + } = *projection; + let parameters = substitution.as_slice(interner); + let associated_ty_data = &self.associated_ty_data(associated_ty_id); + let (trait_params, other_params) = + self.split_associated_ty_parameters(parameters, &**associated_ty_data); + (associated_ty_data.clone(), trait_params, other_params) + } + + /// Given a projection `>::Item`, + /// returns the trait parameters `[P0..Pn]` (see + /// `split_projection`). + fn trait_parameters_from_projection<'p>( + &self, + projection: &'p ProjectionTy, + ) -> &'p [GenericArg] { + let (_, trait_params, _) = self.split_projection(projection); + trait_params + } + + /// Given a projection `>::Item`, + /// returns the trait parameters `[P0..Pn]` (see + /// `split_projection`). + fn trait_ref_from_projection(&self, projection: &ProjectionTy) -> TraitRef { + let interner = self.interner(); + let (associated_ty_data, trait_params, _) = self.split_projection(projection); + TraitRef { + trait_id: associated_ty_data.trait_id, + substitution: Substitution::from_iter(interner, trait_params), + } + } + + /// Given the full set of parameters (or binders) for an + /// associated type *value* (which appears in an impl), splits + /// them into the substitutions for the *impl* and those for the + /// *associated type*. + /// + /// # Example + /// + /// ```ignore (example) + /// impl Iterable for Vec { + /// type Iter<'a>; + /// } + /// ``` + /// + /// in this example, the full set of parameters would be `['x, + /// Y]`, where `'x` is the value for `'a` and `Y` is the value for + /// `T`. + /// + /// # Returns + /// + /// Returns the pair of: + /// + /// * the parameters for the impl (`[Y]`, in our example) + /// * the parameters for the associated type value (`['a]`, in our example) + fn split_associated_ty_value_parameters<'p, P>( + &self, + parameters: &'p [P], + associated_ty_value: &AssociatedTyValue, + ) -> (&'p [P], &'p [P]) { + let interner = self.interner(); + let impl_datum = self.impl_datum(associated_ty_value.impl_id); + let impl_params_len = impl_datum.binders.len(interner); + assert!(parameters.len() >= impl_params_len); + + // the impl parameters are a suffix + // + // [ P0..Pn, Pn...Pm ] + // ^^^^^^ impl parameters + let (impl_params, other_params) = parameters.split_at(impl_params_len); + (impl_params, other_params) + } + + /// Given the full set of parameters for an associated type *value* + /// (which appears in an impl), returns the trait reference + /// and projection that are being satisfied by that value. + /// + /// # Example + /// + /// ```ignore (example) + /// impl Iterable for Vec { + /// type Iter<'a>; + /// } + /// ``` + /// + /// Here we expect the full set of parameters for `Iter`, which + /// would be `['x, Y]`, where `'x` is the value for `'a` and `Y` + /// is the value for `T`. + /// + /// Returns the pair of: + /// + /// * the parameters that apply to the impl (`Y`, in our example) + /// * the projection ` as Iterable>::Iter<'x>` + #[instrument(level = "debug", skip(self, associated_ty_value))] + fn impl_parameters_and_projection_from_associated_ty_value<'p>( + &self, + parameters: &'p [GenericArg], + associated_ty_value: &AssociatedTyValue, + ) -> (&'p [GenericArg], ProjectionTy) { + let interner = self.interner(); + + let impl_datum = self.impl_datum(associated_ty_value.impl_id); + + // Get the trait ref from the impl -- so in our example above + // this would be `Box: Foo`. + let (impl_parameters, atv_parameters) = + self.split_associated_ty_value_parameters(parameters, associated_ty_value); + let trait_ref = { + let opaque_ty_ref = impl_datum.binders.map_ref(|b| &b.trait_ref).cloned(); + debug!(?opaque_ty_ref); + opaque_ty_ref.substitute(interner, impl_parameters) + }; + + // Create the parameters for the projection -- in our example + // above, this would be `['!a, Box]`, corresponding to + // ` as Foo>::Item<'!a>` + let projection_substitution = Substitution::from_iter( + interner, + trait_ref + .substitution + .iter(interner) + .chain(atv_parameters.iter()) + .cloned(), + ); + + let projection = ProjectionTy { + associated_ty_id: associated_ty_value.associated_ty_id, + substitution: projection_substitution, + }; + + debug!(?impl_parameters, ?trait_ref, ?projection); + + (impl_parameters, projection) + } + + /// Given the full set of parameters (or binders) for an + /// associated type datum (the one appearing in a trait), splits + /// them into the parameters for the *trait* and those for the + /// *associated type*. + /// + /// # Example + /// + /// ```ignore (example) + /// trait Foo { + /// type Assoc<'a>; + /// } + /// ``` + /// + /// in this example, the full set of parameters would be `['x, + /// Y]`, where `'x` is the value for `'a` and `Y` is the value for + /// `T`. + /// + /// # Returns + /// + /// Returns the tuple of: + /// + /// * the parameters for the impl (`[Y]`, in our example) + /// * the parameters for the associated type value (`['a]`, in our example) + fn split_associated_ty_parameters<'p, P>( + &self, + parameters: &'p [P], + associated_ty_datum: &AssociatedTyDatum, + ) -> (&'p [P], &'p [P]) { + let trait_datum = &self.trait_datum(associated_ty_datum.trait_id); + let trait_num_params = trait_datum.binders.len(self.interner()); + let split_point = trait_num_params; + let (trait_params, other_params) = parameters.split_at(split_point); + (trait_params, other_params) + } +} + +impl + ?Sized, I: Interner> Split for DB {} diff --git a/chalk-solve/src/wf.rs b/chalk-solve/src/wf.rs new file mode 100644 index 00000000000..c47adf37c28 --- /dev/null +++ b/chalk-solve/src/wf.rs @@ -0,0 +1,1207 @@ +use std::ops::ControlFlow; +use std::{fmt, iter}; + +use crate::{ + ext::*, goal_builder::GoalBuilder, rust_ir::*, solve::Solver, split::Split, RustIrDatabase, +}; +use chalk_ir::{ + cast::*, + fold::shift::Shift, + interner::Interner, + visit::{TypeVisitable, TypeVisitor}, + *, +}; +use tracing::debug; + +#[derive(Debug)] +pub enum WfError { + IllFormedTypeDecl(chalk_ir::AdtId), + IllFormedOpaqueTypeDecl(chalk_ir::OpaqueTyId), + IllFormedTraitImpl(chalk_ir::TraitId), +} + +impl fmt::Display for WfError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WfError::IllFormedTypeDecl(id) => write!( + f, + "type declaration `{:?}` does not meet well-formedness requirements", + id + ), + WfError::IllFormedOpaqueTypeDecl(id) => write!( + f, + "opaque type declaration `{:?}` does not meet well-formedness requirements", + id + ), + WfError::IllFormedTraitImpl(id) => write!( + f, + "trait impl for `{:?}` does not meet well-formedness requirements", + id + ), + } + } +} + +impl std::error::Error for WfError {} + +pub struct WfSolver<'a, I: Interner> { + db: &'a dyn RustIrDatabase, + solver_builder: &'a dyn Fn() -> Box>, +} + +struct InputTypeCollector { + types: Vec>, + interner: I, +} + +impl InputTypeCollector { + fn new(interner: I) -> Self { + Self { + types: Vec::new(), + interner, + } + } + + fn types_in(interner: I, value: impl TypeVisitable) -> Vec> { + let mut collector = Self::new(interner); + value.visit_with(&mut collector, DebruijnIndex::INNERMOST); + collector.types + } +} + +impl TypeVisitor for InputTypeCollector { + type BreakTy = (); + fn as_dyn(&mut self) -> &mut dyn TypeVisitor { + self + } + + fn interner(&self) -> I { + self.interner + } + + fn visit_where_clause( + &mut self, + where_clause: &WhereClause, + outer_binder: DebruijnIndex, + ) -> ControlFlow<()> { + match where_clause { + WhereClause::AliasEq(alias_eq) => alias_eq + .alias + .clone() + .intern(self.interner) + .visit_with(self, outer_binder), + WhereClause::Implemented(trait_ref) => trait_ref.visit_with(self, outer_binder), + WhereClause::TypeOutlives(TypeOutlives { ty, .. }) => ty.visit_with(self, outer_binder), + WhereClause::LifetimeOutlives(..) => ControlFlow::Continue(()), + } + } + + fn visit_ty(&mut self, ty: &Ty, outer_binder: DebruijnIndex) -> ControlFlow<()> { + let interner = self.interner(); + + let mut push_ty = || { + self.types + .push(ty.clone().shifted_out_to(interner, outer_binder).unwrap()) + }; + match ty.kind(interner) { + TyKind::Adt(id, substitution) => { + push_ty(); + id.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::AssociatedType(assoc_ty, substitution) => { + push_ty(); + assoc_ty.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::Scalar(scalar) => { + push_ty(); + scalar.visit_with(self, outer_binder) + } + TyKind::Str => { + push_ty(); + ControlFlow::Continue(()) + } + TyKind::Tuple(arity, substitution) => { + push_ty(); + arity.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::OpaqueType(opaque_ty, substitution) => { + push_ty(); + opaque_ty.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::Slice(substitution) => { + push_ty(); + substitution.visit_with(self, outer_binder) + } + TyKind::FnDef(fn_def, substitution) => { + push_ty(); + fn_def.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::Ref(mutability, lifetime, ty) => { + push_ty(); + mutability.visit_with(self, outer_binder); + lifetime.visit_with(self, outer_binder); + ty.visit_with(self, outer_binder) + } + TyKind::Raw(mutability, substitution) => { + push_ty(); + mutability.visit_with(self, outer_binder); + substitution.visit_with(self, outer_binder) + } + TyKind::Never => { + push_ty(); + ControlFlow::Continue(()) + } + TyKind::Array(ty, const_) => { + push_ty(); + ty.visit_with(self, outer_binder); + const_.visit_with(self, outer_binder) + } + TyKind::Closure(_id, substitution) => { + push_ty(); + substitution.visit_with(self, outer_binder) + } + TyKind::Coroutine(_coroutine, substitution) => { + push_ty(); + substitution.visit_with(self, outer_binder) + } + TyKind::CoroutineWitness(_witness, substitution) => { + push_ty(); + substitution.visit_with(self, outer_binder) + } + TyKind::Foreign(_foreign_ty) => { + push_ty(); + ControlFlow::Continue(()) + } + TyKind::Error => { + push_ty(); + ControlFlow::Continue(()) + } + + TyKind::Dyn(clauses) => { + push_ty(); + clauses.visit_with(self, outer_binder) + } + + TyKind::Alias(AliasTy::Projection(proj)) => { + push_ty(); + proj.visit_with(self, outer_binder) + } + + TyKind::Alias(AliasTy::Opaque(opaque_ty)) => { + push_ty(); + opaque_ty.visit_with(self, outer_binder) + } + + TyKind::Placeholder(_) => { + push_ty(); + ControlFlow::Continue(()) + } + + // Type parameters do not carry any input types (so we can sort of assume they are + // always WF). + TyKind::BoundVar(..) => ControlFlow::Continue(()), + + // Higher-kinded types such as `for<'a> fn(&'a u32)` introduce their own implied + // bounds, and these bounds will be enforced upon calling such a function. In some + // sense, well-formedness requirements for the input types of an HKT will be enforced + // lazily, so no need to include them here. + TyKind::Function(..) => ControlFlow::Continue(()), + + TyKind::InferenceVar(..) => { + panic!("unexpected inference variable in wf rules: {:?}", ty) + } + } + } +} + +impl<'a, I> WfSolver<'a, I> +where + I: Interner, +{ + /// Constructs a new `WfSolver`. + pub fn new( + db: &'a dyn RustIrDatabase, + solver_builder: &'a dyn Fn() -> Box>, + ) -> Self { + Self { db, solver_builder } + } + + pub fn verify_adt_decl(&self, adt_id: AdtId) -> Result<(), WfError> { + let interner = self.db.interner(); + + // Given a struct like + // + // ```rust + // struct Foo where T: Eq { + // data: Vec + // } + // ``` + let adt_datum = self.db.adt_datum(adt_id); + let is_enum = adt_datum.kind == AdtKind::Enum; + + let mut gb = GoalBuilder::new(self.db); + let adt_data = adt_datum + .binders + .map_ref(|b| (&b.variants, &b.where_clauses)); + + // We make a goal like... + // + // forall { ... } + let wg_goal = gb.forall( + &adt_data, + is_enum, + |gb, _, (variants, where_clauses), is_enum| { + let interner = gb.interner(); + + // (FromEnv(T: Eq) => ...) + gb.implies( + where_clauses + .iter() + .cloned() + .map(|wc| wc.into_from_env_goal(interner)), + |gb| { + let sub_goals: Vec<_> = variants + .iter() + .flat_map(|variant| { + let fields = &variant.fields; + + // When checking if Enum is well-formed, we require that all fields of + // each variant are sized. For `structs`, we relax this requirement to + // all but the last field. + let sized_constraint_goal = + WfWellKnownConstraints::struct_sized_constraint( + gb.db(), + fields, + is_enum, + ); + + // WellFormed(Vec), for each field type `Vec` or type that appears in the where clauses + let types = InputTypeCollector::types_in( + gb.interner(), + (&fields, &where_clauses), + ); + + types + .into_iter() + .map(|ty| ty.well_formed().cast(interner)) + .chain(sized_constraint_goal.into_iter()) + }) + .collect(); + + gb.all(sub_goals) + }, + ) + }, + ); + + let wg_goal = wg_goal.into_closed_goal(interner); + let mut fresh_solver = (self.solver_builder)(); + let is_legal = fresh_solver.has_unique_solution(self.db, &wg_goal); + + if !is_legal { + Err(WfError::IllFormedTypeDecl(adt_id)) + } else { + Ok(()) + } + } + + pub fn verify_trait_impl(&self, impl_id: ImplId) -> Result<(), WfError> { + let interner = self.db.interner(); + + let impl_datum = self.db.impl_datum(impl_id); + let trait_id = impl_datum.trait_id(); + + let impl_goal = Goal::all( + interner, + impl_header_wf_goal(self.db, impl_id).into_iter().chain( + impl_datum + .associated_ty_value_ids + .iter() + .filter_map(|&id| compute_assoc_ty_goal(self.db, id)), + ), + ); + + if let Some(well_known) = self.db.trait_datum(trait_id).well_known { + self.verify_well_known_impl(impl_id, well_known)? + } + + debug!("WF trait goal: {:?}", impl_goal); + + let mut fresh_solver = (self.solver_builder)(); + let is_legal = + fresh_solver.has_unique_solution(self.db, &impl_goal.into_closed_goal(interner)); + + if is_legal { + Ok(()) + } else { + Err(WfError::IllFormedTraitImpl(trait_id)) + } + } + + pub fn verify_opaque_ty_decl(&self, opaque_ty_id: OpaqueTyId) -> Result<(), WfError> { + // Given an opaque type like + // ```notrust + // opaque type Foo: Clone where T: Bar = Baz; + // ``` + let interner = self.db.interner(); + + let mut gb = GoalBuilder::new(self.db); + + let datum = self.db.opaque_ty_data(opaque_ty_id); + let bound = &datum.bound; + + // We make a goal like + // + // forall + let goal = gb.forall(bound, opaque_ty_id, |gb, _, bound, opaque_ty_id| { + let interner = gb.interner(); + + let subst = Substitution::from1(interner, gb.db().hidden_opaque_type(opaque_ty_id)); + + let bounds = bound.bounds.clone().substitute(interner, &subst); + let where_clauses = bound.where_clauses.clone().substitute(interner, &subst); + + let clauses = where_clauses + .iter() + .cloned() + .map(|wc| wc.into_from_env_goal(interner)); + + // if (WellFormed(T: Bar)) + gb.implies(clauses, |gb| { + let interner = gb.interner(); + + // all(WellFormed(Baz: Clone)) + gb.all( + bounds + .iter() + .cloned() + .map(|b| b.into_well_formed_goal(interner)), + ) + }) + }); + + debug!("WF opaque type goal: {:#?}", goal); + + let mut new_solver = (self.solver_builder)(); + let is_legal = new_solver.has_unique_solution(self.db, &goal.into_closed_goal(interner)); + + if is_legal { + Ok(()) + } else { + Err(WfError::IllFormedOpaqueTypeDecl(opaque_ty_id)) + } + } + + /// Verify builtin rules for well-known traits + pub fn verify_well_known_impl( + &self, + impl_id: ImplId, + well_known: WellKnownTrait, + ) -> Result<(), WfError> { + let mut solver = (self.solver_builder)(); + let impl_datum = self.db.impl_datum(impl_id); + + let is_legal = match well_known { + WellKnownTrait::Copy => { + WfWellKnownConstraints::copy_impl_constraint(&mut *solver, self.db, &impl_datum) + } + WellKnownTrait::Drop => { + WfWellKnownConstraints::drop_impl_constraint(&mut *solver, self.db, &impl_datum) + } + WellKnownTrait::CoerceUnsized => { + WfWellKnownConstraints::coerce_unsized_impl_constraint( + &mut *solver, + self.db, + &impl_datum, + ) + } + WellKnownTrait::DispatchFromDyn => { + WfWellKnownConstraints::dispatch_from_dyn_constraint( + &mut *solver, + self.db, + &impl_datum, + ) + } + WellKnownTrait::Clone | WellKnownTrait::Unpin | WellKnownTrait::Future => true, + // You can't add a manual implementation for the following traits: + WellKnownTrait::Fn + | WellKnownTrait::FnOnce + | WellKnownTrait::FnMut + | WellKnownTrait::AsyncFn + | WellKnownTrait::AsyncFnOnce + | WellKnownTrait::AsyncFnMut + | WellKnownTrait::Unsize + | WellKnownTrait::Sized + | WellKnownTrait::DiscriminantKind + | WellKnownTrait::Coroutine + | WellKnownTrait::Pointee + | WellKnownTrait::Tuple + | WellKnownTrait::FnPtr => false, + }; + + if is_legal { + Ok(()) + } else { + Err(WfError::IllFormedTraitImpl(impl_datum.trait_id())) + } + } +} + +fn impl_header_wf_goal( + db: &dyn RustIrDatabase, + impl_id: ImplId, +) -> Option> { + let impl_datum = db.impl_datum(impl_id); + + if !impl_datum.is_positive() { + return None; + } + + let impl_fields = impl_datum + .binders + .map_ref(|v| (&v.trait_ref, &v.where_clauses)); + + let mut gb = GoalBuilder::new(db); + // forall {...} + let well_formed_goal = gb.forall(&impl_fields, (), |gb, _, (trait_ref, where_clauses), ()| { + let interner = gb.interner(); + + // if (WC && input types are well formed) { ... } + gb.implies( + impl_wf_environment(interner, where_clauses, trait_ref), + |gb| { + // We retrieve all the input types of the where clauses appearing on the trait impl, + // e.g. in: + // ``` + // impl Foo for (T, K) where T: Iterator, Vec>)> { ... } + // ``` + // we would retrieve `HashSet`, `Box`, `Vec>`, `(HashSet, Vec>)`. + // We will have to prove that these types are well-formed (e.g. an additional `K: Hash` + // bound would be needed here). + let types = InputTypeCollector::types_in(gb.interner(), &where_clauses); + + // Things to prove well-formed: input types of the where-clauses, projection types + // appearing in the header, associated type values, and of course the trait ref. + debug!(input_types=?types); + let goals = types + .into_iter() + .map(|ty| ty.well_formed().cast(interner)) + .chain(Some((*trait_ref).clone().well_formed().cast(interner))); + + gb.all::<_, Goal>(goals) + }, + ) + }); + + Some(well_formed_goal) +} + +/// Creates the conditions that an impl (and its contents of an impl) +/// can assume to be true when proving that it is well-formed. +fn impl_wf_environment<'i, I: Interner>( + interner: I, + where_clauses: &'i [QuantifiedWhereClause], + trait_ref: &'i TraitRef, +) -> impl Iterator> + 'i { + // if (WC) { ... } + let wc = where_clauses + .iter() + .cloned() + .map(move |qwc| qwc.into_from_env_goal(interner).cast(interner)); + + // We retrieve all the input types of the type on which we implement the trait: we will + // *assume* that these types are well-formed, e.g. we will be able to derive that + // `K: Hash` holds without writing any where clause. + // + // Example: + // ``` + // struct HashSet where K: Hash { ... } + // + // impl Foo for HashSet { + // // Inside here, we can rely on the fact that `K: Hash` holds + // } + // ``` + let types = InputTypeCollector::types_in(interner, trait_ref); + + let types_wf = types + .into_iter() + .map(move |ty| ty.into_from_env_goal(interner).cast(interner)); + + wc.chain(types_wf) +} + +/// Associated type values are special because they can be parametric (independently of +/// the impl), so we issue a special goal which is quantified using the binders of the +/// associated type value, for example in: +/// +/// ```ignore +/// trait Foo { +/// type Item<'a>: Clone where Self: 'a +/// } +/// +/// impl Foo for Box { +/// type Item<'a> = Box<&'a T>; +/// } +/// ``` +/// +/// we would issue the following subgoal: `forall<'a> { WellFormed(Box<&'a T>) }`. +/// +/// Note that there is no binder for `T` in the above: the goal we +/// generate is expected to be exected in the context of the +/// larger WF goal for the impl, which already has such a +/// binder. So the entire goal for the impl might be: +/// +/// ```ignore +/// forall { +/// WellFormed(Box) /* this comes from the impl, not this routine */, +/// forall<'a> { WellFormed(Box<&'a T>) }, +/// } +/// ``` +fn compute_assoc_ty_goal( + db: &dyn RustIrDatabase, + assoc_ty_id: AssociatedTyValueId, +) -> Option> { + let mut gb = GoalBuilder::new(db); + let assoc_ty = &db.associated_ty_value(assoc_ty_id); + + // Create `forall { .. }` + Some(gb.forall( + &assoc_ty.value.map_ref(|v| &v.ty), + assoc_ty_id, + |gb, assoc_ty_substitution, value_ty, assoc_ty_id| { + let interner = gb.interner(); + let db = gb.db(); + + // Hmm, because `Arc` does not implement `TypeFoldable`, we can't pass this value through, + // just the id, so we have to fetch `assoc_ty` from the database again. + // Implementing `TypeFoldable` for `AssociatedTyValue` doesn't *quite* seem right though, as that + // would result in a deep clone, and the value is inert. We could do some more refatoring + // (move the `Arc` behind a newtype, for example) to fix this, but for now doesn't + // seem worth it. + let assoc_ty = &db.associated_ty_value(assoc_ty_id); + + let (impl_parameters, projection) = db + .impl_parameters_and_projection_from_associated_ty_value( + assoc_ty_substitution.as_slice(interner), + assoc_ty, + ); + + // If (/* impl WF environment */) { ... } + let impl_id = assoc_ty.impl_id; + let impl_datum = &db.impl_datum(impl_id); + let ImplDatumBound { + trait_ref: impl_trait_ref, + where_clauses: impl_where_clauses, + } = impl_datum + .binders + .clone() + .substitute(interner, impl_parameters); + let impl_wf_clauses = + impl_wf_environment(interner, &impl_where_clauses, &impl_trait_ref); + gb.implies(impl_wf_clauses, |gb| { + // Get the bounds and where clauses from the trait + // declaration, substituted appropriately. + // + // From our example: + // + // * bounds + // * original in trait, `Clone` + // * after substituting impl parameters, `Clone` + // * note that the self-type is not yet supplied for bounds, + // we will do that later + // * where clauses + // * original in trait, `Self: 'a` + // * after substituting impl parameters, `Box: '!a` + let assoc_ty_datum = db.associated_ty_data(projection.associated_ty_id); + let AssociatedTyDatumBound { + bounds: defn_bounds, + where_clauses: defn_where_clauses, + } = assoc_ty_datum + .binders + .clone() + .substitute(interner, &projection.substitution); + + // Create `if (/* where clauses on associated type value */) { .. }` + gb.implies( + defn_where_clauses + .iter() + .cloned() + .map(|qwc| qwc.into_from_env_goal(interner)), + |gb| { + let types = InputTypeCollector::types_in(gb.interner(), value_ty); + + // We require that `WellFormed(T)` for each type that appears in the value + let wf_goals = types + .into_iter() + .map(|ty| ty.well_formed()) + .casted(interner); + + // Check that the `value_ty` meets the bounds from the trait. + // Here we take the substituted bounds (`defn_bounds`) and we + // supply the self-type `value_ty` to yield the final result. + // + // In our example, the bound was `Clone`, so the combined + // result is `Box: Clone`. This is then converted to a + // well-formed goal like `WellFormed(Box: Clone)`. + let bound_goals = defn_bounds + .iter() + .cloned() + .flat_map(|qb| qb.into_where_clauses(interner, (*value_ty).clone())) + .map(|qwc| qwc.into_well_formed_goal(interner)) + .casted(interner); + + // Concatenate the WF goals of inner types + the requirements from trait + gb.all::<_, Goal>(wf_goals.chain(bound_goals)) + }, + ) + }) + }, + )) +} + +/// Defines methods to compute well-formedness goals for well-known +/// traits (e.g. a goal for all fields of struct in a Copy impl to be Copy) +struct WfWellKnownConstraints; + +impl WfWellKnownConstraints { + /// Computes a goal to prove Sized constraints on a struct definition. + /// Struct is considered well-formed (in terms of Sized) when it either + /// has no fields or all of it's fields except the last are proven to be Sized. + pub fn struct_sized_constraint( + db: &dyn RustIrDatabase, + fields: &[Ty], + size_all: bool, + ) -> Option> { + let excluded = if size_all { 0 } else { 1 }; + + if fields.len() <= excluded { + return None; + } + + let interner = db.interner(); + + let sized_trait = db.well_known_trait_id(WellKnownTrait::Sized)?; + + Some(Goal::all( + interner, + fields[..fields.len() - excluded].iter().map(|ty| { + TraitRef { + trait_id: sized_trait, + substitution: Substitution::from1(interner, ty.clone()), + } + .cast(interner) + }), + )) + } + + /// Verify constraints on a Copy implementation. + /// Copy impl is considered well-formed for + /// a) certain builtin types (scalar values, shared ref, etc..) + /// b) adts which + /// 1) have all Copy fields + /// 2) don't have a Drop impl + fn copy_impl_constraint( + solver: &mut dyn Solver, + db: &dyn RustIrDatabase, + impl_datum: &ImplDatum, + ) -> bool { + let interner = db.interner(); + + let mut gb = GoalBuilder::new(db); + + let impl_fields = impl_datum + .binders + .map_ref(|v| (&v.trait_ref, &v.where_clauses)); + + // Implementations for scalars, pointer types and never type are provided by libcore. + // User implementations on types other than ADTs are forbidden. + match impl_datum + .binders + .skip_binders() + .trait_ref + .self_type_parameter(interner) + .kind(interner) + { + TyKind::Scalar(_) + | TyKind::Raw(_, _) + | TyKind::Ref(Mutability::Not, _, _) + | TyKind::Never => return true, + + TyKind::Adt(_, _) => (), + + _ => return false, + }; + + // Well fomedness goal for ADTs + let well_formed_goal = + gb.forall(&impl_fields, (), |gb, _, (trait_ref, where_clauses), ()| { + let interner = gb.interner(); + + let ty = trait_ref.self_type_parameter(interner); + + let (adt_id, substitution) = match ty.kind(interner) { + TyKind::Adt(adt_id, substitution) => (*adt_id, substitution), + + _ => unreachable!(), + }; + + // if (WC) { ... } + gb.implies( + impl_wf_environment(interner, where_clauses, trait_ref), + |gb| -> Goal { + let db = gb.db(); + + // not { Implemented(ImplSelfTy: Drop) } + let neg_drop_goal = + db.well_known_trait_id(WellKnownTrait::Drop) + .map(|drop_trait_id| { + TraitRef { + trait_id: drop_trait_id, + substitution: Substitution::from1(interner, ty.clone()), + } + .cast::>(interner) + .negate(interner) + }); + + let adt_datum = db.adt_datum(adt_id); + + let goals = adt_datum + .binders + .map_ref(|b| &b.variants) + .cloned() + .substitute(interner, substitution) + .into_iter() + .flat_map(|v| { + v.fields.into_iter().map(|f| { + // Implemented(FieldTy: Copy) + TraitRef { + trait_id: trait_ref.trait_id, + substitution: Substitution::from1(interner, f), + } + .cast(interner) + }) + }) + .chain(neg_drop_goal.into_iter()); + gb.all(goals) + }, + ) + }); + + solver.has_unique_solution(db, &well_formed_goal.into_closed_goal(interner)) + } + + /// Verifies constraints on a Drop implementation + /// Drop implementation is considered well-formed if: + /// a) it's implemented on an ADT + /// b) The generic parameters of the impl's type must all be parameters + /// of the Drop impl itself (i.e., no specialization like + /// `impl Drop for S {...}` is allowed). + /// c) Any bounds on the genereic parameters of the impl must be + /// deductible from the bounds imposed by the struct definition + /// (i.e. the implementation must be exactly as generic as the ADT definition). + /// + /// ```rust,ignore + /// struct S { } + /// struct Foo { } + /// + /// impl Drop for S> { } + /// ``` + /// + /// generates the following: + /// goal derived from c): + /// + /// ```notrust + /// forall { + /// Implemented(U1: Copy), Implemented(U2: Sized) :- FromEnv(S>) + /// } + /// ``` + /// + /// goal derived from b): + /// ```notrust + /// forall { + /// exists { + /// S = S> + /// } + /// } + /// ``` + fn drop_impl_constraint( + solver: &mut dyn Solver, + db: &dyn RustIrDatabase, + impl_datum: &ImplDatum, + ) -> bool { + let interner = db.interner(); + + let adt_id = match impl_datum.self_type_adt_id(interner) { + Some(id) => id, + // Drop can only be implemented on a nominal type + None => return false, + }; + + let mut gb = GoalBuilder::new(db); + + let adt_datum = db.adt_datum(adt_id); + + let impl_fields = impl_datum + .binders + .map_ref(|v| (&v.trait_ref, &v.where_clauses)); + + // forall { .. } + let implied_by_adt_def_goal = + gb.forall(&impl_fields, (), |gb, _, (trait_ref, where_clauses), ()| { + let interner = gb.interner(); + + // FromEnv(ImplSelfType) => ... + gb.implies( + iter::once( + FromEnv::Ty(trait_ref.self_type_parameter(interner)) + .cast::>(interner), + ), + |gb| { + // All(ImplWhereClauses) + gb.all( + where_clauses + .iter() + .map(|wc| wc.clone().into_well_formed_goal(interner)), + ) + }, + ) + }); + + let impl_self_ty = impl_datum + .binders + .map_ref(|b| b.trait_ref.self_type_parameter(interner)); + + // forall {...} + let eq_goal = gb.forall( + &adt_datum.binders, + (adt_id, impl_self_ty), + |gb, substitution, _, (adt_id, impl_self_ty)| { + let interner = gb.interner(); + + let def_adt = TyKind::Adt(adt_id, substitution).intern(interner); + + // exists { .. } + gb.exists(&impl_self_ty, def_adt, |gb, _, impl_adt, def_adt| { + let interner = gb.interner(); + + // StructName = ImplSelfType + GoalData::EqGoal(EqGoal { + a: GenericArgData::Ty(def_adt).intern(interner), + b: GenericArgData::Ty(impl_adt.clone()).intern(interner), + }) + .intern(interner) + }) + }, + ); + + let well_formed_goal = gb.all([implied_by_adt_def_goal, eq_goal].iter()); + + solver.has_unique_solution(db, &well_formed_goal.into_closed_goal(interner)) + } + + /// Verify constraints a CoerceUnsized impl. + /// Rules for CoerceUnsized impl to be considered well-formed: + /// 1) pointer conversions: `&[mut] T -> &[mut] U`, `&[mut] T -> *[mut] U`, + /// `*[mut] T -> *[mut] U` are considered valid if + /// 1) `T: Unsize` + /// 2) mutability is respected, i.e. immutable -> immutable, mutable -> immutable, + /// mutable -> mutable conversions are allowed, immutable -> mutable is not. + /// 2) struct conversions of structures with the same definition, `S` -> `S`. + /// To check if this impl is legal, we would walk down the fields of `S` + /// and consider their types with both substitutes. We are looking to find + /// exactly one (non-phantom) field that has changed its type (from `T` to `U`), and + /// expect `T` to be unsizeable to `U`, i.e. `T: CoerceUnsized`. + /// + /// As an example, consider a struct + /// ```rust + /// struct Foo { + /// extra: T, + /// ptr: *mut U, + /// } + /// ``` + /// + /// We might have an impl that allows (e.g.) `Foo` to be unsized + /// to `Foo`. That impl would look like: + /// ```rust,ignore + /// impl, V> CoerceUnsized> for Foo {} + /// ``` + /// In this case: + /// + /// - `extra` has type `T` before and type `T` after + /// - `ptr` has type `*mut U` before and type `*mut V` after + /// + /// Since just one field changed, we would then check that `*mut U: CoerceUnsized<*mut V>` + /// is implemented. This will work out because `U: Unsize`, and we have a libcore rule + /// that `*mut U` can be coerced to `*mut V` if `U: Unsize`. + fn coerce_unsized_impl_constraint( + solver: &mut dyn Solver, + db: &dyn RustIrDatabase, + impl_datum: &ImplDatum, + ) -> bool { + let interner = db.interner(); + let mut gb = GoalBuilder::new(db); + + let (binders, impl_datum) = impl_datum.binders.as_ref().into(); + + let trait_ref: &TraitRef = &impl_datum.trait_ref; + + let source = trait_ref.self_type_parameter(interner); + let target = trait_ref + .substitution + .at(interner, 1) + .assert_ty_ref(interner) + .clone(); + + let mut place_in_environment = |goal| -> Goal { + gb.forall( + &Binders::new( + binders.clone(), + (goal, trait_ref, &impl_datum.where_clauses), + ), + (), + |gb, _, (goal, trait_ref, where_clauses), ()| { + let interner = gb.interner(); + gb.implies( + impl_wf_environment(interner, where_clauses, trait_ref), + |_| goal, + ) + }, + ) + }; + + match (source.kind(interner), target.kind(interner)) { + (TyKind::Ref(s_m, _, source), TyKind::Ref(t_m, _, target)) + | (TyKind::Ref(s_m, _, source), TyKind::Raw(t_m, target)) + | (TyKind::Raw(s_m, source), TyKind::Raw(t_m, target)) => { + if (*s_m, *t_m) == (Mutability::Not, Mutability::Mut) { + return false; + } + + let unsize_trait_id = + if let Some(id) = db.well_known_trait_id(WellKnownTrait::Unsize) { + id + } else { + return false; + }; + + // Source: Unsize + let unsize_goal: Goal = TraitRef { + trait_id: unsize_trait_id, + substitution: Substitution::from_iter( + interner, + [source.clone(), target.clone()].iter().cloned(), + ), + } + .cast(interner); + + // ImplEnv -> Source: Unsize + let unsize_goal = place_in_environment(unsize_goal); + + solver.has_unique_solution(db, &unsize_goal.into_closed_goal(interner)) + } + (TyKind::Adt(source_id, subst_a), TyKind::Adt(target_id, subst_b)) => { + let adt_datum = db.adt_datum(*source_id); + + if source_id != target_id || adt_datum.kind != AdtKind::Struct { + return false; + } + + let fields = adt_datum + .binders + .map_ref(|bound| &bound.variants.last().unwrap().fields) + .cloned(); + + let (source_fields, target_fields) = ( + fields.clone().substitute(interner, subst_a), + fields.substitute(interner, subst_b), + ); + + // collect fields with unequal ids + let uneq_field_ids: Vec = (0..source_fields.len()) + .filter(|&i| { + // ignore phantom data fields + if let Some(adt_id) = source_fields[i].adt_id(interner) { + if db.adt_datum(adt_id).flags.phantom_data { + return false; + } + } + + let eq_goal: Goal = EqGoal { + a: source_fields[i].clone().cast(interner), + b: target_fields[i].clone().cast(interner), + } + .cast(interner); + + // ImplEnv -> Source.fields[i] = Target.fields[i] + let eq_goal = place_in_environment(eq_goal); + + // We are interested in !UNEQUAL! fields + !solver.has_unique_solution(db, &eq_goal.into_closed_goal(interner)) + }) + .collect(); + + if uneq_field_ids.len() != 1 { + return false; + } + + let field_id = uneq_field_ids[0]; + + // Source.fields[i]: CoerceUnsized + let coerce_unsized_goal: Goal = TraitRef { + trait_id: trait_ref.trait_id, + substitution: Substitution::from_iter( + interner, + [ + source_fields[field_id].clone(), + target_fields[field_id].clone(), + ] + .iter() + .cloned(), + ), + } + .cast(interner); + + // ImplEnv -> Source.fields[i]: CoerceUnsized + let coerce_unsized_goal = place_in_environment(coerce_unsized_goal); + + solver.has_unique_solution(db, &coerce_unsized_goal.into_closed_goal(interner)) + } + _ => false, + } + } + + /// Verify constraints of a DispatchFromDyn impl. + /// + /// Rules for DispatchFromDyn impl to be considered well-formed: + /// + /// * Self and the type parameter must both be references or raw pointers with the same mutabilty + /// * OR all the following hold: + /// - Self and the type parameter must be structs + /// - Self and the type parameter must have the same definitions + /// - Self must not be `#[repr(packed)]` or `#[repr(C)]` + /// - Self must have exactly one field which is not a 1-ZST (there may be any number of 1-ZST + /// fields), and that field must have a different type in the type parameter (i.e., it is + /// the field being coerced) + /// - `DispatchFromDyn` is implemented for the type of the field being coerced. + fn dispatch_from_dyn_constraint( + solver: &mut dyn Solver, + db: &dyn RustIrDatabase, + impl_datum: &ImplDatum, + ) -> bool { + let interner = db.interner(); + let mut gb = GoalBuilder::new(db); + + let (binders, impl_datum) = impl_datum.binders.as_ref().into(); + + let trait_ref: &TraitRef = &impl_datum.trait_ref; + + // DispatchFromDyn specifies that Self (source) can be coerced to T (target; its single type parameter). + let source = trait_ref.self_type_parameter(interner); + let target = trait_ref + .substitution + .at(interner, 1) + .assert_ty_ref(interner) + .clone(); + + let mut place_in_environment = |goal| -> Goal { + gb.forall( + &Binders::new( + binders.clone(), + (goal, trait_ref, &impl_datum.where_clauses), + ), + (), + |gb, _, (goal, trait_ref, where_clauses), ()| { + let interner = gb.interner(); + gb.implies( + impl_wf_environment(interner, &where_clauses, &trait_ref), + |_| goal, + ) + }, + ) + }; + + match (source.kind(interner), target.kind(interner)) { + (TyKind::Ref(s_m, _, _), TyKind::Ref(t_m, _, _)) + | (TyKind::Raw(s_m, _), TyKind::Raw(t_m, _)) + if s_m == t_m => + { + true + } + (TyKind::Adt(source_id, subst_a), TyKind::Adt(target_id, subst_b)) => { + let adt_datum = db.adt_datum(*source_id); + + // Definitions are equal and are structs. + if source_id != target_id || adt_datum.kind != AdtKind::Struct { + return false; + } + + // Not repr(C) or repr(packed). + let repr = db.adt_repr(*source_id); + if repr.c || repr.packed { + return false; + } + + // Collect non 1-ZST fields; there must be exactly one. + let fields = adt_datum + .binders + .map_ref(|bound| &bound.variants.last().unwrap().fields) + .cloned(); + + let (source_fields, target_fields) = ( + fields.clone().substitute(interner, subst_a), + fields.substitute(interner, subst_b), + ); + + let mut non_zst_fields: Vec<_> = source_fields + .iter() + .zip(target_fields.iter()) + .filter(|(sf, _)| match sf.adt_id(interner) { + Some(adt) => !db.adt_size_align(adt).one_zst(), + None => true, + }) + .collect(); + + if non_zst_fields.len() != 1 { + return false; + } + + // The field being coerced (the interesting field). + let (field_src, field_tgt) = non_zst_fields.pop().unwrap(); + + // The interesting field is different in the source and target types. + let eq_goal: Goal = EqGoal { + a: field_src.clone().cast(interner), + b: field_tgt.clone().cast(interner), + } + .cast(interner); + let eq_goal = place_in_environment(eq_goal); + if solver.has_unique_solution(db, &eq_goal.into_closed_goal(interner)) { + return false; + } + + // Type(field_src): DispatchFromDyn + let field_dispatch_goal: Goal = TraitRef { + trait_id: trait_ref.trait_id, + substitution: Substitution::from_iter( + interner, + [field_src.clone(), field_tgt.clone()].iter().cloned(), + ), + } + .cast(interner); + let field_dispatch_goal = place_in_environment(field_dispatch_goal); + if !solver.has_unique_solution(db, &field_dispatch_goal.into_closed_goal(interner)) + { + return false; + } + + true + } + _ => false, + } + } +} diff --git a/libstd.chalk b/libstd.chalk index c117e2279c2..c94b6047924 100644 --- a/libstd.chalk +++ b/libstd.chalk @@ -10,12 +10,10 @@ trait Clone { } trait Copy where Self: Clone { } trait Sized { } -struct i32 { } impl Copy for i32 { } impl Clone for i32 { } impl Sized for i32 { } -struct u32 { } impl Copy for u32 { } impl Clone for u32 { } impl Sized for u32 { } @@ -32,7 +30,6 @@ impl Sized for Box { } // Meant to be [T] struct Slice where T: Sized { } -impl Sized for Slice { } impl AsRef> for Slice where T: Sized { } struct Vec where T: Sized { } diff --git a/releases-template.hbs b/releases-template.hbs new file mode 100644 index 00000000000..531443d827b --- /dev/null +++ b/releases-template.hbs @@ -0,0 +1,29 @@ +### Changelog + +All notable changes to this project will be documented in this file. Dates are displayed in UTC. + +{{#unless options.hideCredit}} + Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog) + (Note: versions before 0.11.0 were manually generated). +{{/unless}} + +{{#each releases}} + {{#if href}} + ###{{#unless major}}#{{/unless}} [{{title}}]({{href}}) + {{else}} + #### {{title}} + {{/if}} + + {{#if tag}} + > {{niceDate}} + {{/if}} + + {{#if summary}} + {{summary}} + {{/if}} + + {{#each merges}} + - {{#if commit.breaking}}**Breaking change:** {{/if}}{{message}}{{#if href}} [`#{{id}}`]({{href}}){{/if}} + {{/each}} + +{{/each}} diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index bf867e0ae5b..00000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -nightly diff --git a/src/coherence.rs b/src/coherence.rs deleted file mode 100644 index 4f09ca35201..00000000000 --- a/src/coherence.rs +++ /dev/null @@ -1,65 +0,0 @@ -use petgraph::prelude::*; - -use errors::Result; -use chalk_ir::{self, ItemId}; -use rust_ir::Program; -use chalk_solve::solve::SolverChoice; -use std::sync::Arc; - -mod solve; -mod orphan; -mod test; - -impl Program { - crate fn record_specialization_priorities(&mut self, solver_choice: SolverChoice) -> Result<()> { - chalk_ir::tls::set_current_program(&Arc::new(self.clone()), || { - let forest = self.build_specialization_forest(solver_choice)?; - - // Visit every root in the forest & set specialization - // priority for the tree that is the root of. - for root_idx in forest.externals(Direction::Incoming) { - self.set_priorities(root_idx, &forest, 0); - } - - Ok(()) - }) - } - - // Build the forest of specialization relationships. - fn build_specialization_forest( - &self, - solver_choice: SolverChoice, - ) -> Result> { - // The forest is returned as a graph but built as a GraphMap; this is - // so that we never add multiple nodes with the same ItemId. - let mut forest = DiGraphMap::new(); - - // Find all specializations (implemented in coherence/solve) - // Record them in the forest by adding an edge from the less special - // to the more special. - self.visit_specializations(solver_choice, |less_special, more_special| { - forest.add_edge(less_special, more_special, ()); - })?; - - Ok(forest.into_graph()) - } - - // Recursively set priorities for those node and all of its children. - fn set_priorities(&mut self, idx: NodeIndex, forest: &Graph, p: usize) { - // Get the impl datum recorded at this node and reset its priority - { - let impl_id = forest - .node_weight(idx) - .expect("index should be a valid index into graph"); - let impl_datum = self.impl_data - .get_mut(impl_id) - .expect("node should be valid impl id"); - impl_datum.binders.value.specialization_priority = p; - } - - // Visit all children of this node, setting their priority to this + 1 - for child_idx in forest.neighbors(idx) { - self.set_priorities(child_idx, forest, p + 1) - } - } -} diff --git a/src/coherence/forest.rs b/src/coherence/forest.rs deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coherence/orphan.rs b/src/coherence/orphan.rs deleted file mode 100644 index f6c9ce330e5..00000000000 --- a/src/coherence/orphan.rs +++ /dev/null @@ -1,63 +0,0 @@ -use std::sync::Arc; - -use errors::*; -use chalk_ir::*; -use rust_ir::*; -use chalk_ir::cast::*; -use chalk_solve::ext::*; -use chalk_solve::solve::SolverChoice; - -struct OrphanSolver { - env: Arc, - solver_choice: SolverChoice, -} - -impl Program { - crate fn perform_orphan_check(&self, solver_choice: SolverChoice) -> Result<()> { - let solver = OrphanSolver { - env: Arc::new(self.environment()), - solver_choice, - }; - - let local_impls = self.impl_data - .values() - // Only keep local impls (i.e. impls in the current crate) - .filter(|impl_datum| impl_datum.binders.value.impl_type == ImplType::Local); - - for impl_datum in local_impls { - if !solver.orphan_check(impl_datum) { - let trait_id = impl_datum.binders.value.trait_ref.trait_ref().trait_id; - let trait_id = self.type_kinds.get(&trait_id).unwrap().name; - return Err(Error::from_kind(ErrorKind::FailedOrphanCheck(trait_id))); - } - } - - Ok(()) - } -} - -impl OrphanSolver { - // Test if a local impl violates the orphan rules. - // - // For `impl Trait for MyType` we generate: - // - // forall { LocalImplAllowed(MyType: Trait) } - // - // This must be provable in order to pass the orphan check. - fn orphan_check(&self, impl_datum: &ImplDatum) -> bool { - debug_heading!("orphan_check(impl={:#?})", impl_datum); - - let impl_allowed: Goal = impl_datum.binders.map_ref(|bound_impl| { - // Ignoring the polarization of the impl's polarized trait ref - DomainGoal::LocalImplAllowed(bound_impl.trait_ref.trait_ref().clone()) - }).cast(); - - let canonical_goal = &impl_allowed.into_closed_goal(); - let result = self.solver_choice - .solve_root_goal(&self.env, canonical_goal) - .unwrap() - .is_some(); - debug!("overlaps: result = {:?}", result); - result - } -} diff --git a/src/coherence/solve.rs b/src/coherence/solve.rs deleted file mode 100644 index 14fc7927514..00000000000 --- a/src/coherence/solve.rs +++ /dev/null @@ -1,261 +0,0 @@ -use std::sync::Arc; - -use chalk_ir::fold::shift::Shift; -use itertools::Itertools; -use errors::*; -use chalk_ir::*; -use rust_ir::*; -use chalk_ir::cast::*; -use chalk_solve::ext::*; -use chalk_solve::solve::{SolverChoice, Solution}; - -struct DisjointSolver { - env: Arc, - solver_choice: SolverChoice, -} - -impl Program { - pub(super) fn visit_specializations( - &self, - solver_choice: SolverChoice, - mut record_specialization: F, - ) -> Result<()> - where - F: FnMut(ItemId, ItemId), - { - let mut solver = DisjointSolver { - env: Arc::new(self.environment()), - solver_choice, - }; - - // Create a vector of references to impl datums, sorted by trait ref. - let impl_data = self.impl_data - .iter() - .filter(|&(_, impl_datum)| { - // Ignore impls for marker traits as they are allowed to overlap. - let trait_id = impl_datum.binders.value.trait_ref.trait_ref().trait_id; - let trait_datum = &self.trait_data[&trait_id]; - !trait_datum.binders.value.flags.marker - }) - .sorted_by(|&(_, lhs), &(_, rhs)| { - lhs.binders - .value - .trait_ref - .trait_ref() - .trait_id - .cmp(&rhs.binders.value.trait_ref.trait_ref().trait_id) - }); - - // Group impls by trait. - let impl_groupings = impl_data.into_iter().group_by(|&(_, impl_datum)| { - impl_datum.binders.value.trait_ref.trait_ref().trait_id - }); - - - // Iterate over every pair of impls for the same trait. - for (trait_id, impls) in &impl_groupings { - let impls: Vec<(&ItemId, &ImplDatum)> = impls.collect(); - - for ((&l_id, lhs), (&r_id, rhs)) in impls.into_iter().tuple_combinations() { - // Two negative impls never overlap. - if !lhs.binders.value.trait_ref.is_positive() - && !rhs.binders.value.trait_ref.is_positive() - { - continue; - } - - // Check if the impls overlap, then if they do, check if one specializes - // the other. Note that specialization can only run one way - if both - // specialization checks return *either* true or false, that's an error. - if !solver.disjoint(lhs, rhs) { - match (solver.specializes(lhs, rhs), solver.specializes(rhs, lhs)) { - (true, false) => record_specialization(l_id, r_id), - (false, true) => record_specialization(r_id, l_id), - (_, _) => { - let trait_id = self.type_kinds.get(&trait_id).unwrap().name; - return Err(Error::from_kind(ErrorKind::OverlappingImpls(trait_id))); - } - } - } - } - } - - Ok(()) - } -} - -impl DisjointSolver { - // Test if the set of types that these two impls apply to overlap. If the test succeeds, these - // two impls are disjoint. - // - // We combine the binders of the two impls & treat them as existential quantifiers. Then we - // attempt to unify the input types to the trait provided by each impl, as well as prove that - // the where clauses from both impls all hold. At the end, we apply the `compatible` modality - // and negate the query. Negating the query means that we are asking chalk to prove that no - // such overlapping impl exists. By applying `compatible { G }`, chalk attempts to prove that - // "there exists a compatible world where G is provable." When we negate compatible, it turns - // into the statement "for all compatible worlds, G is not provable." This is exactly what we - // want since we want to ensure that there is no overlap in *all* compatible worlds, not just - // that there is no overlap in *some* compatible world. - // - // Examples: - // - // Impls: - // impl Foo for T { } - // impl Foo for i32 { } - // Generates: - // not { compatible { exists { T = i32 } } } - // - // Impls: - // impl Foo for Vec { } - // impl Foo for Vec { } - // Generates: - // not { compatible { exists { Vec = Vec, T1 = T2 } } } - // - // Impls: - // impl Foo for Vec where T: Bar { } - // impl Foo for Vec where U: Baz { } - // Generates: - // not { compatible { exists { Vec = Vec, T: Bar, U: Baz } } } - // - fn disjoint(&self, lhs: &ImplDatum, rhs: &ImplDatum) -> bool { - debug_heading!("overlaps(lhs={:#?}, rhs={:#?})", lhs, rhs); - - let lhs_len = lhs.binders.len(); - - // Join the two impls' binders together - let mut binders = lhs.binders.binders.clone(); - binders.extend(rhs.binders.binders.clone()); - - // Upshift the rhs variables in params to account for the joined binders - let lhs_params = params(lhs).iter().cloned(); - let rhs_params = params(rhs).iter().map(|param| param.shifted_in(lhs_len)); - - // Create an equality goal for every input type the trait, attempting - // to unify the inputs to both impls with one another - let params_goals = lhs_params - .zip(rhs_params) - .map(|(a, b)| Goal::Leaf(LeafGoal::EqGoal(EqGoal { a, b }))); - - // Upshift the rhs variables in where clauses - let lhs_where_clauses = lhs.binders.value.where_clauses.iter().cloned(); - let rhs_where_clauses = rhs.binders - .value - .where_clauses - .iter() - .map(|wc| wc.shifted_in(lhs_len)); - - // Create a goal for each clause in both where clauses - let wc_goals = lhs_where_clauses - .chain(rhs_where_clauses) - .map(|wc| wc.cast()); - - // Join all the goals we've created together with And, then quantify them - // over the joined binders. This is our query. - let goal = params_goals - .chain(wc_goals) - .fold1(|goal, leaf| Goal::And(Box::new(goal), Box::new(leaf))) - .expect("Every trait takes at least one input type") - .quantify(QuantifierKind::Exists, binders) - .compatible() - .negate(); - - let canonical_goal = &goal.into_closed_goal(); - let solution = self.solver_choice - .solve_root_goal(&self.env, canonical_goal) - .unwrap(); // internal errors in the solver are fatal - let result = match solution { - // Goal was proven with a unique solution, so no impl was found that causes these two - // to overlap - Some(Solution::Unique(_)) => true, - // Goal was ambiguous, so there *may* be overlap - Some(Solution::Ambig(_)) | - // Goal cannot be proven, so there is some impl that causes overlap - None => false, - }; - debug!("overlaps: result = {:?}", result); - result - } - - // Test for specialization. - // - // If this test suceeds, the second impl specializes the first. - // - // Example lowering: - // - // more: impl Foo for Vec - // less: impl Foo for U - // - // forall { - // if (T: Clone) { - // exists { - // Vec = U, U: Clone - // } - // } - // } - fn specializes(&mut self, less_special: &ImplDatum, more_special: &ImplDatum) -> bool { - debug_heading!( - "specializes(less_special={:#?}, more_special={:#?})", - less_special, - more_special - ); - - // Negative impls cannot specialize. - if !less_special.binders.value.trait_ref.is_positive() - || !more_special.binders.value.trait_ref.is_positive() - { - return false; - } - - let more_len = more_special.binders.len(); - - // Create parameter equality goals. - let more_special_params = params(more_special).iter().cloned(); - let less_special_params = params(less_special).iter().map(|p| p.shifted_in(more_len)); - let params_goals = more_special_params - .zip(less_special_params) - .map(|(a, b)| Goal::Leaf(LeafGoal::EqGoal(EqGoal { a, b }))); - - // Create the where clause goals. - let more_special_wc = more_special - .binders - .value - .where_clauses - .iter() - .cloned() - .casted() - .collect(); - let less_special_wc = less_special - .binders - .value - .where_clauses - .iter() - .map(|wc| wc.shifted_in(more_len).cast()); - - // Join all of the goals together. - let goal = params_goals - .chain(less_special_wc) - .fold1(|goal, leaf| Goal::And(Box::new(goal), Box::new(leaf))) - .expect("Every trait takes at least one input type") - .quantify(QuantifierKind::Exists, less_special.binders.binders.clone()) - .implied_by(more_special_wc) - .quantify(QuantifierKind::ForAll, more_special.binders.binders.clone()); - - let canonical_goal = &goal.into_closed_goal(); - let result = match self.solver_choice - .solve_root_goal(&self.env, canonical_goal) - .unwrap() - { - Some(sol) => sol.is_unique(), - None => false, - }; - - debug!("specializes: result = {:?}", result); - - result - } -} - -fn params(impl_datum: &ImplDatum) -> &[Parameter] { - &impl_datum.binders.value.trait_ref.trait_ref().parameters -} diff --git a/src/errors.rs b/src/errors.rs deleted file mode 100644 index f6413ff7e55..00000000000 --- a/src/errors.rs +++ /dev/null @@ -1,63 +0,0 @@ -use chalk_parse::{self, ast}; -use chalk_ir; -use rust_ir; - -error_chain! { - links { - Parse(chalk_parse::errors::Error, chalk_parse::errors::ErrorKind); - } - - errors { - InvalidTypeName(identifier: ast::Identifier) { - description("invalid type name") - display("invalid type name `{}`", identifier.str) - } - - CannotApplyTypeParameter(identifier: ast::Identifier) { - description("cannot apply type parameter") - display("cannot apply type parameter `{}`", identifier.str) - } - - IncorrectNumberOfTypeParameters(identifier: ast::Identifier, - expected: usize, - actual: usize) { - description("incorrect number of type parameters") - display("`{}` takes {} type parameters, not {}", identifier.str, expected, actual) - } - - NotTrait(identifier: ast::Identifier) { - description("not a trait") - display("expected a trait, found `{}`, which is not a trait", identifier.str) - } - - OverlappingImpls(trait_id: chalk_ir::Identifier) { - description("overlapping impls") - display("overlapping impls of trait {:?}", trait_id) - } - - IllFormedTypeDecl(ty_id: chalk_ir::Identifier) { - description("ill-formed type declaration") - display("type declaration {:?} does not meet well-formedness requirements", ty_id) - } - - IllFormedTraitImpl(trait_id: chalk_ir::Identifier) { - description("ill-formed trait impl") - display("trait impl for {:?} does not meet well-formedness requirements", trait_id) - } - - CouldNotMatch { - description("could not match") - display("could not match") - } - - DuplicateLangItem(item: rust_ir::LangItem) { - description("Duplicate lang item") - display("Duplicate lang item `{:?}`", item) - } - - FailedOrphanCheck(trait_id: chalk_ir::Identifier) { - description("impl violates the orphan rules") - display("impl for trait {:?} violates the orphan rules", trait_id) - } - } -} diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 50cd1ee9fe5..00000000000 --- a/src/lib.rs +++ /dev/null @@ -1,36 +0,0 @@ -#![recursion_limit = "1024"] -#![cfg_attr(test, feature(test))] -#![feature(crate_visibility_modifier)] -#![feature(in_band_lifetimes)] -#![feature(macro_at_most_once_rep)] -#![feature(specialization)] -#![feature(step_trait)] -#![feature(underscore_imports)] -#![feature(non_modrs_mods)] - -extern crate chalk_parse; -#[macro_use] -extern crate chalk_macros; -extern crate chalk_engine; -#[macro_use] -extern crate chalk_ir; -extern crate chalk_solve; -extern crate diff; -#[macro_use] -extern crate error_chain; -extern crate itertools; -extern crate lalrpop_intern; -extern crate petgraph; -extern crate stacker; - -#[macro_use] -mod test_util; - -pub mod rust_ir; - -crate mod coherence; -crate mod rules; -pub mod errors; - -mod test; - diff --git a/src/bin/chalki.rs b/src/main.rs similarity index 51% rename from src/bin/chalki.rs rename to src/main.rs index 066ac3357a5..0b900bbbd96 100644 --- a/src/bin/chalki.rs +++ b/src/main.rs @@ -1,86 +1,112 @@ -extern crate chalk; -extern crate chalk_engine; -extern crate chalk_ir; -extern crate chalk_solve; -extern crate chalk_parse; -extern crate docopt; -extern crate rustyline; - #[macro_use] extern crate serde_derive; -#[macro_use] -extern crate error_chain; - -use std::io::Read; use std::fs::File; -use std::sync::Arc; +use std::io::Read; use std::process::exit; -use chalk::rust_ir; -use chalk::rust_ir::lowering::*; +use chalk_integration::db::ChalkDatabase; +use chalk_integration::interner::ChalkIr; +use chalk_integration::lowering::*; +use chalk_integration::query::LoweringDatabase; +use chalk_integration::SolverChoice; use chalk_solve::ext::*; -use chalk_solve::solve::SolverChoice; -use chalk_engine::fallible::NoSolution; +use chalk_solve::logging; +use chalk_solve::RustIrDatabase; use docopt::Docopt; use rustyline::error::ReadlineError; -const USAGE: &'static str = " +type Result = std::result::Result>; + +const USAGE: &str = " chalk repl Usage: - chalki [options] - chalki (-h | --help) + chalk [options] + chalk (-h | --help) Options: --help Show this screen. --program=PATH Specifies the path to the `.chalk` file containing traits/impls. --goal=GOAL Specifies a goal to evaluate (may be given more than once). --overflow-depth=N Specifies the overflow depth [default: 10]. - --no-cache Disable caching. + --multiple Output multiple answers instead of ambiguous solution. + --solver=S Specifies the solver to use. `slg` or `recursive`. Default is SLG. "; /// This struct represents the various command line options available. #[derive(Debug, Deserialize)] struct Args { flag_program: Option, + flag_solver: Option, flag_goal: Vec, flag_overflow_depth: usize, - flag_no_cache: bool, -} - -error_chain! { - links { - Parse(chalk_parse::errors::Error, chalk_parse::errors::ErrorKind); - Chalk(chalk::errors::Error, chalk::errors::ErrorKind); - } - - foreign_links { - Io(::std::io::Error); - Rustyline(ReadlineError); - } + flag_multiple: bool, } /// A loaded and parsed program. -struct Program { +struct LoadedProgram { text: String, - ir: Arc, - env: Arc, + db: ChalkDatabase, } -impl Program { +impl LoadedProgram { /// Creates a new Program struct, given a `.chalk` file as a String and /// a [`SolverChoice`]. /// /// [`SolverChoice`]: struct.solve.SolverChoice.html - fn new(text: String, solver_choice: SolverChoice) -> Result { - let ir = Arc::new(chalk_parse::parse_program(&text)?.lower(solver_choice)?); - let env = Arc::new(ir.environment()); - Ok(Program { text, ir, env }) + fn new(text: String, solver_choice: SolverChoice) -> Result { + let db = ChalkDatabase::with(&text, solver_choice); + Ok(LoadedProgram { text, db }) } -} -quick_main!(run); + /// Parse a goal and attempt to solve it, using the specified solver. + fn goal( + &self, + mut rl: Option<&mut rustyline::DefaultEditor>, + text: &str, + multiple_answers: bool, + ) -> Result<()> { + let program = self.db.checked_program()?; + let goal = lower_goal(&*chalk_parse::parse_goal(text)?, &*program)?; + let peeled_goal = goal.into_peeled_goal(self.db.interner()); + if multiple_answers { + let no_more_solutions = self.db.solve_multiple(&peeled_goal, &mut |v, has_next| { + println!("{}\n", v.as_ref().map(|v| v.display(ChalkIr))); + if has_next { + if let Some(ref mut rl) = rl { + loop { + if let Ok(next) = rl.readline("Show next answer (y/n): ") { + if "y" == next { + return true; + } else if "n" == next { + return false; + } else { + println!("Unknown response. Try again."); + } + } else { + return false; + } + } + } else { + true + } + } else { + true + } + }); + if no_more_solutions { + println!("No more solutions"); + } + } else { + match self.db.solve(&peeled_goal) { + Some(v) => println!("{}\n", v.display(ChalkIr)), + None => println!("No possible solution.\n"), + } + } + Ok(()) + } +} fn run() -> Result<()> { // Parse the command line arguments. @@ -108,7 +134,7 @@ fn run() -> Result<()> { if args.flag_goal.is_empty() { // The user specified no goal. Enter interactive mode. - readline_loop(&mut rustyline::Editor::new(), "?- ", |rl, line| { + readline_loop(&mut rustyline::Editor::new()?, "?- ", |rl, line| { if let Err(e) = process(args, line, rl, &mut prog) { eprintln!("error: {}", e); } @@ -122,9 +148,9 @@ fn run() -> Result<()> { // Evaluate the goal(s). If any goal returns an error, print the error // and exit. - chalk_ir::tls::set_current_program(&prog.ir, || -> Result<()> { + prog.db.with_program(|_| -> Result<()> { for g in &args.flag_goal { - if let Err(e) = goal(&args, g, &prog) { + if let Err(e) = prog.goal(None, g, args.flag_multiple) { eprintln!("error: {}", e); exit(1); } @@ -141,15 +167,15 @@ fn run() -> Result<()> { /// /// The loop terminates (and the program ends) when EOF is reached or if an error /// occurs while reading the next line. -fn readline_loop(rl: &mut rustyline::Editor<()>, prompt: &str, mut f: F) -> Result<()> +fn readline_loop(rl: &mut rustyline::DefaultEditor, prompt: &str, mut f: F) -> Result<()> where - F: FnMut(&mut rustyline::Editor<()>, &str), + F: FnMut(&mut rustyline::DefaultEditor, &str), { loop { match rl.readline(prompt) { Ok(line) => { // Save the line to the history list. - rl.add_history_entry(&line); + let _ = rl.add_history_entry(&line); // Process the line. f(rl, &line); @@ -158,8 +184,8 @@ where // EOF: We're done. Err(ReadlineError::Eof) => break, - // Some other error occured. - Err(e) => Err(e)?, + // Some other error occurred. + Err(e) => return Err(e.into()), } } @@ -173,45 +199,54 @@ where fn process( args: &Args, command: &str, - rl: &mut rustyline::Editor<()>, - prog: &mut Option, + rl: &mut rustyline::DefaultEditor, + prog: &mut Option, ) -> Result<()> { - if command == "help" || command == "h" { + if command.is_empty() { + // Ignore empty commands. + } else if command == "help" || command == "h" { // Print out interpreter commands. // TODO: Implement "help " for more specific help. help() } else if command == "program" { // Load a .chalk file via stdin, until EOF is found. - *prog = Some(Program::new(read_program(rl)?, args.solver_choice())?); - } else if command.starts_with("load ") { + let chalk_prog = LoadedProgram::new(read_program(rl)?, args.solver_choice())?; + // Let's do a sanity check before going forward. + let _ = chalk_prog.db.checked_program()?; + *prog = Some(chalk_prog); + } else if let Some(filename) = command.strip_prefix("load ") { // Load a .chalk file. - let filename = &command["load ".len()..]; - *prog = Some(load_program(args, filename)?); - } else if command.starts_with("debug ") { - match command.split_whitespace().nth(1) { - Some(level) => std::env::set_var("CHALK_DEBUG", level), - None => println!("debug set debug level to "), + let chalk_prog = load_program(args, filename)?; + // Let's do a sanity check before going forward. + let _ = chalk_prog.db.checked_program()?; + *prog = Some(chalk_prog); + } else if let Some(level) = command.strip_prefix("debug ") { + if level.is_empty() { + println!("debug set debug level to "); + } else { + std::env::set_var("CHALK_DEBUG", level); } } else { // The command is either "print", "lowered", or a goal. // Check that a program has been loaded. - let prog = prog.as_ref() + let prog = prog + .as_ref() .ok_or("no program currently loaded; type 'help' to see available commands")?; // Attempt to parse the program. - chalk_ir::tls::set_current_program(&prog.ir, || -> Result<()> { + prog.db.with_program(|_| -> Result<()> { match command { // Print out the loaded program. "print" => println!("{}", prog.text), // TODO: Write a line of documentation here. - "lowered" => println!("{:#?}", prog.env), + "lowered" => println!("{:#?}", prog.db.environment()), // Assume this is a goal. // TODO: Print out "type 'help' to see available commands" if it // fails to parse? - _ => goal(args, command, prog)?, + _ => prog.goal(Some(rl), command, args.flag_multiple)?, } Ok(()) })? @@ -223,10 +258,10 @@ fn process( /// Load the file into a string, and parse it. // TODO: Could we pass in an Options struct or something? The Args struct // still has Strings where it should have Enums... (e.g. solver_choice) -fn load_program(args: &Args, filename: &str) -> Result { +fn load_program(args: &Args, filename: &str) -> Result { let mut text = String::new(); File::open(filename)?.read_to_string(&mut text)?; - Ok(Program::new(text, args.solver_choice())?) + LoadedProgram::new(text, args.solver_choice()) } /// Print out help for commands in interpreter mode. @@ -243,8 +278,8 @@ fn help() { } /// Read a program from the command-line. Stop reading when EOF is read. If -/// an error occurs while reading, a Result::Err is returned. -fn read_program(rl: &mut rustyline::Editor<()>) -> Result { +/// an error occurs while reading, a `Err` is returned. +fn read_program(rl: &mut rustyline::DefaultEditor) -> Result { println!("Enter a program; press Ctrl-D when finished"); let mut text = String::new(); readline_loop(rl, "| ", |_, line| { @@ -254,26 +289,32 @@ fn read_program(rl: &mut rustyline::Editor<()>) -> Result { Ok(text) } -/// Parse a goal and attempt to solve it, using the specified solver. -// TODO: Could we pass in an Options struct or something? The Args struct -// still has Strings where it should have Enums... (e.g. solver_choice) -fn goal(args: &Args, text: &str, prog: &Program) -> Result<()> { - let goal = chalk_parse::parse_goal(text)?.lower(&*prog.ir)?; - let peeled_goal = goal.into_peeled_goal(); - match args.solver_choice() - .solve_root_goal(&prog.env, &peeled_goal) - { - Ok(Some(v)) => println!("{}\n", v), - Ok(None) => println!("No possible solution.\n"), - Err(NoSolution) => println!("Solver failed"), - } - Ok(()) -} - impl Args { fn solver_choice(&self) -> SolverChoice { - SolverChoice::SLG { - max_size: self.flag_overflow_depth, + match self.flag_solver.as_deref() { + None | Some("slg") => SolverChoice::SLG { + max_size: self.flag_overflow_depth, + expected_answers: None, + }, + Some("recursive") => SolverChoice::Recursive { + overflow_depth: 100, + caching_enabled: true, + max_size: 30, + }, + Some(s) => panic!("invalid solver {}", s), } } } + +fn main() { + use std::io::Write; + logging::with_tracing_logs(|| { + ::std::process::exit(match run() { + Ok(_) => 0, + Err(ref e) => { + write!(&mut ::std::io::stderr(), "{}", e).expect("Error writing to stderr"); + 1 + } + }) + }); +} diff --git a/src/rules.rs b/src/rules.rs deleted file mode 100644 index 5c6f65ff590..00000000000 --- a/src/rules.rs +++ /dev/null @@ -1,966 +0,0 @@ -use chalk_ir::cast::{Cast, Caster}; -use chalk_ir::fold::shift::Shift; -use chalk_ir::fold::Subst; -use chalk_ir::*; -use rust_ir::*; -use std::iter; - -mod default; -mod wf; - -impl Program { - pub fn environment(&self) -> ProgramEnvironment { - // Construct the set of *clauses*; these are sort of a compiled form - // of the data above that always has the form: - // - // forall P0...Pn. Something :- Conditions - let mut program_clauses = self.custom_clauses.iter().cloned().chain( - self.struct_data - .values() - .flat_map(|d| d.to_program_clauses()) - ).chain( - self.trait_data - .values() - .flat_map(|d| d.to_program_clauses()), - ).chain( - self.associated_ty_data - .values() - .flat_map(|d| d.to_program_clauses(self)), - ).chain( - self.default_impl_data.iter().map(|d| d.to_program_clause()) - ).collect::>(); - - // Adds clause that defines the Derefs domain goal: - // forall { Derefs(T, U) :- ProjectionEq(::Target = U>) } - if let Some(trait_id) = self.lang_items.get(&LangItem::DerefTrait) { - // Find `Deref::Target`. - let associated_ty_id = self - .associated_ty_data - .values() - .find(|d| d.trait_id == *trait_id) - .expect("Deref has no assoc item") - .id; - let t = || Ty::BoundVar(0); - let u = || Ty::BoundVar(1); - program_clauses.push( - Binders { - binders: vec![ParameterKind::Ty(()), ParameterKind::Ty(())], - value: ProgramClauseImplication { - consequence: DomainGoal::Derefs(Derefs { - source: t(), - target: u(), - }), - conditions: vec![ - ProjectionEq { - projection: ProjectionTy { - associated_ty_id, - parameters: vec![t().cast()], - }, - ty: u(), - }.cast(), - ], - }, - }.cast(), - ); - } - - for datum in self.impl_data.values() { - // If we encounter a negative impl, do not generate any rule. Negative impls - // are currently just there to deactivate default impls for auto traits. - if datum.binders.value.trait_ref.is_positive() { - program_clauses.push(datum.to_program_clause()); - program_clauses.extend( - datum - .binders - .value - .associated_ty_values - .iter() - .flat_map(|atv| atv.to_program_clauses(self, datum)), - ); - } - } - - let coinductive_traits = self - .trait_data - .iter() - .filter_map(|(&trait_id, trait_datum)| { - if trait_datum.binders.value.flags.auto { - Some(trait_id) - } else { - None - } - }).collect(); - - ProgramEnvironment { - coinductive_traits, - program_clauses, - } - } -} - -impl ImplDatum { - /// Given `impl Clone for Vec { ... }`, generate: - /// - /// ```notrust - /// -- Rule Implemented-From-Impl - /// forall { - /// Implemented(Vec: Clone) :- Implemented(T: Clone). - /// } - /// ``` - fn to_program_clause(&self) -> ProgramClause { - self.binders - .map_ref(|bound| ProgramClauseImplication { - consequence: bound.trait_ref.trait_ref().clone().cast(), - conditions: bound.where_clauses.iter().cloned().casted().collect(), - }).cast() - } -} - -impl DefaultImplDatum { - /// For each accessible type `T` in a struct which needs a default implementation for the auto - /// trait `Foo` (accessible types are the struct fields types), we add a bound `T: Foo` (which - /// is then expanded with `WF(T: Foo)`). For example, given: - /// - /// ```notrust - /// #[auto] trait Send { } - /// - /// struct MyList { - /// data: T, - /// next: Box>>, - /// } - /// - /// ``` - /// - /// generate: - /// - /// ```notrust - /// forall { - /// Implemented(MyList: Send) :- - /// Implemented(T: Send), - /// Implemented(Box>>: Send). - /// } - /// ``` - fn to_program_clause(&self) -> ProgramClause { - self.binders - .map_ref(|bound| ProgramClauseImplication { - consequence: bound.trait_ref.clone().cast(), - conditions: { - let wc = bound.accessible_tys.iter().cloned().map(|ty| TraitRef { - trait_id: bound.trait_ref.trait_id, - parameters: vec![ParameterKind::Ty(ty)], - }); - - wc.casted().collect() - }, - }).cast() - } -} - -impl AssociatedTyValue { - /// Given the following trait: - /// - /// ```notrust - /// trait Iterable { - /// type IntoIter<'a>: 'a; - /// } - /// ``` - /// - /// Then for the following impl: - /// ```notrust - /// impl Iterable for Vec { - /// type IntoIter<'a> = Iter<'a, T>; - /// } - /// ``` - /// - /// we generate: - /// - /// ```notrust - /// -- Rule Normalize-From-Impl - /// forall<'a, T> { - /// Normalize( as Iterable>::IntoIter<'a> -> Iter<'a, T>>) :- - /// Implemented(Vec: Iterable), // (1) - /// Implemented(Iter<'a, T>: 'a). // (2) - /// } - /// ``` - /// - /// and: - /// - /// ```notrust - /// forall<'a, T> { - /// UnselectedNormalize(Vec::IntoIter<'a> -> Iter<'a, T>) :- - /// InScope(Iterable), - /// Normalize( as Iterable>::IntoIter<'a> -> Iter<'a, T>). - /// } - /// ``` - fn to_program_clauses(&self, program: &Program, impl_datum: &ImplDatum) -> Vec { - let associated_ty = &program.associated_ty_data[&self.associated_ty_id]; - - // Begin with the innermost parameters (`'a`) and then add those from impl (`T`). - let all_binders: Vec<_> = self - .value - .binders - .iter() - .chain(impl_datum.binders.binders.iter()) - .cloned() - .collect(); - - let impl_trait_ref = impl_datum - .binders - .value - .trait_ref - .trait_ref() - .shifted_in(self.value.len()); - - let all_parameters: Vec<_> = self - .value - .binders - .iter() - .zip(0..) - .map(|p| p.to_parameter()) - .chain(impl_trait_ref.parameters.iter().cloned()) - .collect(); - - // Assemble the full list of conditions for projection to be valid. - // This comes in two parts, marked as (1) and (2) in doc above: - // - // 1. require that the trait is implemented - // 2. any where-clauses from the `type` declaration in the trait: the - // parameters must be substituted with those of the impl - let where_clauses = associated_ty - .where_clauses - .iter() - .map(|wc| Subst::apply(&all_parameters, wc)) - .casted(); - - let conditions: Vec = where_clauses - .chain(Some(impl_trait_ref.clone().cast())) - .collect(); - - // Bound parameters + `Self` type of the trait-ref - let parameters: Vec<_> = { - // First add refs to the bound parameters (`'a`, in above example) - let parameters = self.value.binders.iter().zip(0..).map(|p| p.to_parameter()); - - // Then add the `Self` type (`Vec`, in above example) - parameters - .chain(Some(impl_trait_ref.parameters[0].clone())) - .collect() - }; - - let projection = ProjectionTy { - associated_ty_id: self.associated_ty_id, - - // Add the remaining parameters of the trait-ref, if any - parameters: parameters - .iter() - .chain(&impl_trait_ref.parameters[1..]) - .cloned() - .collect(), - }; - - let normalize_goal = DomainGoal::Normalize(Normalize { - projection: projection.clone(), - ty: self.value.value.ty.clone(), - }); - - // Determine the normalization - let normalization = Binders { - binders: all_binders.clone(), - value: ProgramClauseImplication { - consequence: normalize_goal.clone(), - conditions: conditions, - }, - }.cast(); - - let unselected_projection = UnselectedProjectionTy { - type_name: associated_ty.name.clone(), - parameters: parameters, - }; - - let unselected_normalization = Binders { - binders: all_binders, - value: ProgramClauseImplication { - consequence: DomainGoal::UnselectedNormalize(UnselectedNormalize { - projection: unselected_projection, - ty: self.value.value.ty.clone(), - }), - conditions: vec![ - normalize_goal.cast(), - DomainGoal::InScope(impl_trait_ref.trait_id).cast(), - ], - }, - }.cast(); - - vec![normalization, unselected_normalization] - } -} - -impl StructDatum { - /// Given the following type definition: `struct Foo { }`, generate: - /// - /// ```notrust - /// -- Rule WellFormed-Type - /// forall { - /// WF(Foo) :- Implemented(T: Eq). - /// } - /// - /// -- Rule Implied-Bound-From-Type - /// forall { - /// FromEnv(T: Eq) :- FromEnv(Foo). - /// } - /// - /// forall { - /// IsFullyVisible(Foo) :- IsFullyVisible(T). - /// } - /// ``` - /// - /// If the type `Foo` is marked `#[upstream]`, we also generate: - /// - /// ```notrust - /// forall { IsUpstream(Foo). } - /// ``` - /// - /// Otherwise, if the type `Foo` is not marked `#[upstream]`, we generate: - /// ```notrust - /// forall { IsLocal(Foo). } - /// ``` - /// - /// Given an `#[upstream]` type that is also fundamental: - /// - /// ```notrust - /// #[upstream] - /// #[fundamental] - /// struct Box {} - /// ``` - /// - /// We generate the following clauses: - /// - /// ```notrust - /// forall { IsLocal(Box) :- IsLocal(T). } - /// - /// forall { IsUpstream(Box) :- IsUpstream(T). } - /// - /// // Generated for both upstream and local fundamental types - /// forall { DownstreamType(Box) :- DownstreamType(T). } - /// ``` - /// - fn to_program_clauses(&self) -> Vec { - let wf = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: WellFormed::Ty(bound_datum.self_ty.clone().cast()).cast(), - - conditions: { bound_datum.where_clauses.iter().cloned().casted().collect() }, - }).cast(); - - let is_fully_visible = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::IsFullyVisible(bound_datum.self_ty.clone().cast()), - conditions: bound_datum - .self_ty - .type_parameters() - .map(|ty| DomainGoal::IsFullyVisible(ty).cast()) - .collect(), - }).cast(); - - let mut clauses = vec![wf, is_fully_visible]; - - // Fundamental types often have rules in the form of: - // Goal(FundamentalType) :- Goal(T) - // This macro makes creating that kind of clause easy - macro_rules! fundamental_rule { - ($goal:ident) => { - // Fundamental types must always have at least one type parameter for this rule to - // make any sense. We currently do not have have any fundamental types with more than - // one type parameter, nor do we know what the behaviour for that should be. Thus, we - // are asserting here that there is only a single type parameter until the day when - // someone makes a decision about how that should behave. - assert_eq!(self.binders.value.self_ty.len_type_parameters(), 1, - "Only fundamental types with a single parameter are supported"); - - clauses.push(self.binders.map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::$goal(bound_datum.self_ty.clone().cast()), - conditions: vec![ - DomainGoal::$goal( - // This unwrap is safe because we asserted above for the presence of a type - // parameter - bound_datum.self_ty.first_type_parameter().unwrap() - ).cast(), - ], - }).cast()); - }; - } - - // Types that are not marked `#[upstream]` satisfy IsLocal(TypeName) - if !self.binders.value.flags.upstream { - // `IsLocalTy(Ty)` depends *only* on whether the type is marked #[upstream] and nothing else - let is_local = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::IsLocal(bound_datum.self_ty.clone().cast()), - conditions: Vec::new(), - }).cast(); - - clauses.push(is_local); - } else if self.binders.value.flags.fundamental { - // If a type is `#[upstream]`, but is also `#[fundamental]`, it satisfies IsLocal - // if and only if its parameters satisfy IsLocal - fundamental_rule!(IsLocal); - fundamental_rule!(IsUpstream); - } else { - // The type is just upstream and not fundamental - - let is_upstream = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::IsUpstream(bound_datum.self_ty.clone().cast()), - conditions: Vec::new(), - }).cast(); - - clauses.push(is_upstream); - } - - if self.binders.value.flags.fundamental { - fundamental_rule!(DownstreamType); - } - - let condition = DomainGoal::FromEnv(FromEnv::Ty(self.binders.value.self_ty.clone().cast())); - - for wc in self - .binders - .value - .where_clauses - .iter() - .cloned() - .map(|wc| wc.map(|bound| bound.into_from_env_goal())) - { - // We move the binders of the where-clause to the left, e.g. if we had: - // - // `forall { WellFormed(Foo) :- forall<'a> Implemented(T: Fn(&'a i32)) }` - // - // then the reverse rule will be: - // - // `forall<'a, T> { FromEnv(T: Fn(&'a i32)) :- FromEnv(Foo) }` - // - let shift = wc.binders.len(); - clauses.push( - Binders { - binders: wc - .binders - .into_iter() - .chain(self.binders.binders.clone()) - .collect(), - value: ProgramClauseImplication { - consequence: wc.value, - conditions: vec![condition.clone().shifted_in(shift).cast()], - }, - }.cast(), - ); - } - - clauses - } -} - -impl TraitDatum { - /// Given the following trait declaration: `trait Ord where Self: Eq { ... }`, generate: - /// - /// ```notrust - /// -- Rule WellFormed-TraitRef - /// forall { - /// WF(Self: Ord) :- Implemented(Self: Ord), WF(Self: Eq). - /// } - /// ``` - /// - /// and the reverse rules: - /// - /// ```notrust - /// -- Rule Implemented-From-Env - /// forall { - /// (Self: Ord) :- FromEnv(Self: Ord). - /// } - /// - /// -- Rule Implied-Bound-From-Trait - /// forall { - /// FromEnv(Self: Eq) :- FromEnv(Self: Ord). - /// } - /// ``` - /// - /// As specified in the orphan rules, if a trait is not marked `#[upstream]`, the current crate - /// can implement it for any type. To represent that, we generate: - /// - /// ```notrust - /// // `Ord` would not be `#[upstream]` when compiling `std` - /// forall { LocalImplAllowed(Self: Ord). } - /// ``` - /// - /// For traits that are `#[upstream]` (i.e. not in the current crate), the orphan rules dictate - /// that impls are allowed as long as at least one type parameter is local and each type - /// prior to that is fully visible. That means that each type prior to the first local - /// type cannot contain any of the type parameters of the impl. - /// - /// This rule is fairly complex, so we expand it and generate a program clause for each - /// possible case. This is represented as follows: - /// - /// ```notrust - /// // for `#[upstream] trait Foo where Self: Eq { ... }` - /// forall { - /// LocalImplAllowed(Self: Foo) :- IsLocal(Self). - /// } - /// - /// forall { - /// LocalImplAllowed(Self: Foo) :- - /// IsFullyVisible(Self), - /// IsLocal(T). - /// } - /// - /// forall { - /// LocalImplAllowed(Self: Foo) :- - /// IsFullyVisible(Self), - /// IsFullyVisible(T), - /// IsLocal(U). - /// } - /// - /// forall { - /// LocalImplAllowed(Self: Foo) :- - /// IsFullyVisible(Self), - /// IsFullyVisible(T), - /// IsFullyVisible(U), - /// IsLocal(V). - /// } - /// ``` - /// - /// The overlap check uses compatible { ... } mode to ensure that it accounts for impls that - /// may exist in some other *compatible* world. For every upstream trait, we add a rule to - /// account for the fact that upstream crates are able to compatibly add impls of upstream - /// traits for upstream types. - /// - /// ```notrust - /// // For `#[upstream] trait Foo where Self: Eq { ... }` - /// forall { - /// Implemented(Self: Foo) :- - /// Implemented(Self: Eq), // where clauses - /// Compatible, // compatible modality - /// IsUpstream(Self), - /// IsUpstream(T), - /// IsUpstream(U), - /// IsUpstream(V), - /// CannotProve. // returns ambiguous - /// } - /// ``` - /// - /// In certain situations, this is too restrictive. Consider the following code: - /// - /// ```notrust - /// /* In crate std */ - /// trait Sized { } - /// struct str { } - /// - /// /* In crate bar (depends on std) */ - /// trait Bar { } - /// impl Bar for str { } - /// impl Bar for T where T: Sized { } - /// ``` - /// - /// Here, because of the rules we've defined, these two impls overlap. The std crate is - /// upstream to bar, and thus it is allowed to compatibly implement Sized for str. If str - /// can implement Sized in a compatible future, these two impls definitely overlap since the - /// second impl covers all types that implement Sized. - /// - /// The solution we've got right now is to mark Sized as "fundamental" when it is defined. - /// This signals to the Rust compiler that it can rely on the fact that str does not - /// implement Sized in all contexts. A consequence of this is that we can no longer add an - /// implementation of Sized compatibly for str. This is the trade off you make when defining - /// a fundamental trait. - /// - /// To implement fundamental traits, we simply just do not add the rule above that allows - /// upstream types to implement upstream traits. Fundamental traits are not allowed to - /// compatibly do that. - fn to_program_clauses(&self) -> Vec { - let trait_ref = self.binders.value.trait_ref.clone(); - - let trait_ref_impl = WhereClause::Implemented(self.binders.value.trait_ref.clone()); - - let wf = self - .binders - .map_ref(|bound| ProgramClauseImplication { - consequence: WellFormed::Trait(trait_ref.clone()).cast(), - - conditions: { - bound - .where_clauses - .iter() - .cloned() - .map(|wc| wc.map(|bound| bound.into_well_formed_goal())) - .casted() - .chain(Some(DomainGoal::Holds(trait_ref_impl.clone()).cast())) - .collect() - }, - }).cast(); - - let mut clauses = vec![wf]; - - // The number of parameters will always be at least 1 because of the Self parameter - // that is automatically added to every trait. This is important because otherwise - // the added program clauses would not have any conditions. - let type_parameters: Vec<_> = self.binders.value.trait_ref.type_parameters().collect(); - - // Add all cases for potential downstream impls that could exist - clauses.extend((0..type_parameters.len()).map(|i| { - let impl_may_exist = - self.binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::Holds(WhereClause::Implemented( - bound_datum.trait_ref.clone(), - )), - conditions: bound_datum - .where_clauses - .iter() - .cloned() - .casted() - .chain(iter::once(DomainGoal::Compatible(()).cast())) - .chain((0..i).map(|j| { - DomainGoal::IsFullyVisible(type_parameters[j].clone()).cast() - })).chain(iter::once( - DomainGoal::DownstreamType(type_parameters[i].clone()).cast(), - )).chain(iter::once(Goal::CannotProve(()))) - .collect(), - }).cast(); - - impl_may_exist - })); - - if !self.binders.value.flags.upstream { - let impl_allowed = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::LocalImplAllowed(bound_datum.trait_ref.clone()), - conditions: Vec::new(), - }).cast(); - - clauses.push(impl_allowed); - } else { - clauses.extend((0..type_parameters.len()).map(|i| { - let impl_maybe_allowed = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::LocalImplAllowed(bound_datum.trait_ref.clone()), - conditions: (0..i) - .map(|j| DomainGoal::IsFullyVisible(type_parameters[j].clone()).cast()) - .chain(iter::once( - DomainGoal::IsLocal(type_parameters[i].clone()).cast(), - )).collect(), - }).cast(); - - impl_maybe_allowed - })); - - // Fundamental traits can be reasoned about negatively without any ambiguity, so no - // need for this rule if the trait is fundamental. - if !self.binders.value.flags.fundamental { - let impl_may_exist = self - .binders - .map_ref(|bound_datum| ProgramClauseImplication { - consequence: DomainGoal::Holds(WhereClause::Implemented( - bound_datum.trait_ref.clone(), - )), - conditions: bound_datum - .where_clauses - .iter() - .cloned() - .casted() - .chain(iter::once(DomainGoal::Compatible(()).cast())) - .chain( - bound_datum - .trait_ref - .type_parameters() - .map(|ty| DomainGoal::IsUpstream(ty).cast()), - ).chain(iter::once(Goal::CannotProve(()))) - .collect(), - }).cast(); - - clauses.push(impl_may_exist); - } - } - - let condition = DomainGoal::FromEnv(FromEnv::Trait(trait_ref.clone())); - - clauses.extend( - self.binders - .value - .where_clauses - .iter() - .cloned() - .map(|wc| wc.map(|bound| bound.into_from_env_goal())) - .map(|wc| { - // We move the binders of the where-clause to the left for the reverse rules, - // cf `StructDatum::to_program_clauses`. - let shift = wc.binders.len(); - - Binders { - binders: wc - .binders - .into_iter() - .chain(self.binders.binders.clone()) - .collect(), - value: ProgramClauseImplication { - consequence: wc.value, - conditions: vec![condition.clone().shifted_in(shift).cast()], - }, - }.cast() - }) - ); - - clauses.push( - self.binders - .map_ref(|_| ProgramClauseImplication { - consequence: DomainGoal::Holds(trait_ref_impl), - conditions: vec![condition.cast()], - }).cast(), - ); - - clauses - } -} - -impl AssociatedTyDatum { - /// For each associated type, we define the "projection - /// equality" rules. There are always two; one for a successful normalization, - /// and one for the "fallback" notion of equality. - /// - /// Given: (here, `'a` and `T` represent zero or more parameters) - /// - /// ```notrust - /// trait Foo { - /// type Assoc<'a, T>: Bounds where WC; - /// } - /// ``` - /// - /// we generate the 'fallback' rule: - /// - /// ```notrust - /// -- Rule ProjectionEq-Placeholder - /// forall { - /// ProjectionEq(::Assoc<'a, T> = (Foo::Assoc<'a, T>)). - /// } - /// ``` - /// - /// and - /// - /// ```notrust - /// -- Rule ProjectionEq-Normalize - /// forall { - /// ProjectionEq(::Assoc<'a, T> = U) :- - /// Normalize(::Assoc -> U). - /// } - /// ``` - /// - /// We used to generate an "elaboration" rule like this: - /// - /// ```notrust - /// forall { - /// T: Foo :- exists { ProjectionEq(::Assoc = U) }. - /// } - /// ``` - /// - /// but this caused problems with the recursive solver. In - /// particular, whenever normalization is possible, we cannot - /// solve that projection uniquely, since we can now elaborate - /// `ProjectionEq` to fallback *or* normalize it. So instead we - /// handle this kind of reasoning through the `FromEnv` predicate. - /// - /// We also generate rules specific to WF requirements and implied bounds: - /// - /// ```notrust - /// -- Rule WellFormed-AssocTy - /// forall { - /// WellFormed((Foo::Assoc)) :- Implemented(Self: Foo), WC. - /// } - /// - /// -- Rule Implied-WC-From-AssocTy - /// forall { - /// FromEnv(WC) :- FromEnv((Foo::Assoc)). - /// } - /// - /// -- Rule Implied-Bound-From-AssocTy - /// forall { - /// FromEnv(::Assoc<'a,T>: Bounds) :- FromEnv(Self: Foo), WC. - /// } - /// - /// -- Rule Implied-Trait-From-AssocTy - /// forall { - /// FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)). - /// } - /// ``` - fn to_program_clauses(&self, program: &Program) -> Vec { - let binders: Vec<_> = self - .parameter_kinds - .iter() - .map(|pk| pk.map(|_| ())) - .collect(); - let parameters: Vec<_> = binders.iter().zip(0..).map(|p| p.to_parameter()).collect(); - let projection = ProjectionTy { - associated_ty_id: self.id, - parameters: parameters.clone(), - }; - - // Retrieve the trait ref embedding the associated type - let trait_ref = { - let (associated_ty_data, trait_params, _) = program.split_projection(&projection); - TraitRef { - trait_id: associated_ty_data.trait_id, - parameters: trait_params.to_owned(), - } - }; - - // Construct an application from the projection. So if we have `::Item`, - // we would produce `(Iterator::Item)`. - let app = ApplicationTy { - name: TypeName::AssociatedType(self.id), - parameters, - }; - let app_ty = Ty::Apply(app); - - let projection_eq = ProjectionEq { - projection: projection.clone(), - ty: app_ty.clone(), - }; - - let mut clauses = vec![]; - - // Fallback rule. The solver uses this to move between the projection - // and placeholder type. - // - // forall { - // ProjectionEq(::Assoc = (Foo::Assoc)). - // } - clauses.push( - Binders { - binders: binders.clone(), - value: ProgramClauseImplication { - consequence: projection_eq.clone().cast(), - conditions: vec![], - }, - }.cast(), - ); - - // Well-formedness of projection type. - // - // forall { - // WellFormed((Foo::Assoc)) :- Implemented(Self: Foo), WC. - // } - clauses.push( - Binders { - binders: binders.clone(), - value: ProgramClauseImplication { - consequence: WellFormed::Ty(app_ty.clone()).cast(), - conditions: iter::once(trait_ref.clone().cast()) - .chain(self.where_clauses.iter().cloned().casted()) - .collect(), - }, - }.cast(), - ); - - // Assuming well-formedness of projection type means we can assume - // the trait ref as well. Mostly used in function bodies. - // - // forall { - // FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)). - // } - clauses.push( - Binders { - binders: binders.clone(), - value: ProgramClauseImplication { - consequence: FromEnv::Trait(trait_ref.clone()).cast(), - conditions: vec![FromEnv::Ty(app_ty.clone()).cast()], - }, - }.cast(), - ); - - // Reverse rule for where clauses. - // - // forall { - // FromEnv(WC) :- FromEnv((Foo::Assoc)). - // } - // - // This is really a family of clauses, one for each where clause. - clauses.extend(self.where_clauses.iter().map(|wc| { - // Don't forget to move the binders to the left in case of higher-ranked where clauses. - let shift = wc.binders.len(); - Binders { - binders: wc.binders.iter().chain(binders.iter()).cloned().collect(), - value: ProgramClauseImplication { - consequence: wc.value.clone().into_from_env_goal(), - conditions: vec![FromEnv::Ty(app_ty.clone()).shifted_in(shift).cast()], - }, - }.cast() - })); - - // Reverse rule for implied bounds. - // - // forall { - // FromEnv(::Assoc: Bounds) :- FromEnv(Self: Foo), WC - // } - clauses.extend(self.bounds_on_self().into_iter().map(|bound| { - // Same as above in case of higher-ranked inline bounds. - let shift = bound.binders.len(); - let from_env_trait = iter::once( - FromEnv::Trait(trait_ref.clone()).shifted_in(shift).cast() - ); - - let where_clauses = self.where_clauses - .iter() - .cloned() - .casted(); - - Binders { - binders: bound - .binders - .iter() - .chain(binders.iter()) - .cloned() - .collect(), - value: ProgramClauseImplication { - consequence: bound.value.clone().into_from_env_goal(), - conditions: from_env_trait.chain(where_clauses).collect(), - }, - }.cast() - })); - - // add new type parameter U - let mut binders = binders; - binders.push(ParameterKind::Ty(())); - let ty = Ty::BoundVar(binders.len() - 1); - - // `Normalize(::Assoc -> U)` - let normalize = Normalize { - projection: projection.clone(), - ty: ty.clone(), - }; - - // `ProjectionEq(::Assoc = U)` - let projection_eq = ProjectionEq { - projection: projection.clone(), - ty, - }; - - // Projection equality rule from above. - // - // forall { - // ProjectionEq(::Assoc = U) :- - // Normalize(::Assoc -> U). - // } - clauses.push( - Binders { - binders: binders.clone(), - value: ProgramClauseImplication { - consequence: projection_eq.clone().cast(), - conditions: vec![normalize.clone().cast()], - }, - }.cast(), - ); - - clauses - } -} diff --git a/src/rules/default.rs b/src/rules/default.rs deleted file mode 100644 index 8992564fce7..00000000000 --- a/src/rules/default.rs +++ /dev/null @@ -1,89 +0,0 @@ -use chalk_ir::*; -use rust_ir::*; -use chalk_solve::infer::InferenceTable; -use chalk_ir::cast::Cast; - -impl Program { - pub fn add_default_impls(&mut self) { - // For each auto trait `MyAutoTrait` and for each struct/type `MyStruct` - for auto_trait in self.trait_data - .values() - .filter(|t| t.binders.value.flags.auto) - { - for struct_datum in self.struct_data.values() { - // `MyStruct: MyAutoTrait` - let trait_ref = TraitRef { - trait_id: auto_trait.binders.value.trait_ref.trait_id, - parameters: vec![ - ParameterKind::Ty(Ty::Apply(struct_datum.binders.value.self_ty.clone())), - ], - }; - - // If a positive or negative impl is already provided for a type family - // which includes `MyStruct`, we do not generate a default impl. - if self.impl_provided_for(trait_ref.clone(), struct_datum) { - continue; - } - - self.default_impl_data.push(DefaultImplDatum { - binders: Binders { - binders: struct_datum.binders.binders.clone(), - value: DefaultImplDatumBound { - trait_ref, - accessible_tys: struct_datum.binders.value.fields.clone(), - }, - }, - }); - } - } - } - - fn impl_provided_for(&self, trait_ref: TraitRef, struct_datum: &StructDatum) -> bool { - let goal: DomainGoal = trait_ref.cast(); - - let mut infer = InferenceTable::new(); - - let goal = infer.instantiate_binders_existentially(&(&struct_datum.binders.binders, &goal)); - - for impl_datum in self.impl_data.values() { - // We retrieve the trait ref given by the positive impl (even if the actual impl is negative) - let impl_goal: DomainGoal = impl_datum - .binders - .value - .trait_ref - .trait_ref() - .clone() - .cast(); - - let impl_goal = - infer.instantiate_binders_existentially(&(&impl_datum.binders.binders, &impl_goal)); - - // We check whether the impl `MyStruct: (!)MyAutoTrait` unifies with an existing impl. - // Examples: - // - // ``` - // struct MyStruct; - // impl Send for T where T: Foo { } - // ``` - // `MyStruct: Send` unifies with `T: Send` so no default impl is generated for `MyStruct`. - // - // ``` - // struct MyStruct; - // impl Send for Vec where T: Foo { } - // ``` - // `Vec: Send` unifies with `Vec: Send` so no default impl is generated for `Vec`. - // But a default impl is generated for `MyStruct`. - // - // ``` - // struct MyStruct; - // impl !Send for T where T: Foo { } - // ``` - // `MyStruct: !Send` unifies with `T: !Send` so no default impl is generated for `MyStruct`. - if infer.unify(&Environment::new(), &goal, &impl_goal).is_ok() { - return true; - } - } - - false - } -} diff --git a/src/rules/wf.rs b/src/rules/wf.rs deleted file mode 100644 index 409b57e9c49..00000000000 --- a/src/rules/wf.rs +++ /dev/null @@ -1,321 +0,0 @@ -use std::sync::Arc; - -use chalk_ir::*; -use errors::*; -use chalk_ir::cast::*; -use chalk_ir::fold::*; -use chalk_ir::fold::shift::Shift; -use itertools::Itertools; -use rust_ir::*; -use chalk_solve::ext::*; -use chalk_solve::solve::SolverChoice; - -mod test; - -struct WfSolver<'me> { - program: &'me Program, - env: Arc, - solver_choice: SolverChoice, -} - -impl Program { - pub fn verify_well_formedness(&self, solver_choice: SolverChoice) -> Result<()> { - tls::set_current_program(&Arc::new(self.clone()), || self.solve_wf_requirements(solver_choice)) - } - - fn solve_wf_requirements(&self, solver_choice: SolverChoice) -> Result<()> { - let solver = WfSolver { - program: self, - env: Arc::new(self.environment()), - solver_choice, - }; - - for (id, struct_datum) in &self.struct_data { - if !solver.verify_struct_decl(struct_datum) { - let name = self.type_kinds.get(id).unwrap().name; - return Err(Error::from_kind(ErrorKind::IllFormedTypeDecl(name))); - } - } - - for impl_datum in self.impl_data.values() { - if !solver.verify_trait_impl(impl_datum) { - let trait_ref = impl_datum.binders.value.trait_ref.trait_ref(); - let name = self.type_kinds.get(&trait_ref.trait_id).unwrap().name; - return Err(Error::from_kind(ErrorKind::IllFormedTraitImpl(name))); - } - } - - Ok(()) - } -} - -/// A trait for retrieving all types appearing in some Chalk construction. -trait FoldInputTypes { - fn fold(&self, accumulator: &mut Vec); -} - -impl FoldInputTypes for Vec { - fn fold(&self, accumulator: &mut Vec) { - for f in self { - f.fold(accumulator); - } - } -} - -impl FoldInputTypes for Parameter { - fn fold(&self, accumulator: &mut Vec) { - if let ParameterKind::Ty(ty) = self { - ty.fold(accumulator) - } - } -} - -impl FoldInputTypes for Ty { - fn fold(&self, accumulator: &mut Vec) { - match self { - Ty::Apply(app) => { - accumulator.push(self.clone()); - app.parameters.fold(accumulator); - } - Ty::Projection(proj) => { - accumulator.push(self.clone()); - proj.parameters.fold(accumulator); - } - Ty::UnselectedProjection(proj) => { - accumulator.push(self.clone()); - proj.parameters.fold(accumulator); - } - - // Type parameters do not carry any input types (so we can sort of assume they are - // always WF). - Ty::BoundVar(..) => (), - - // Higher-kinded types such as `for<'a> fn(&'a u32)` introduce their own implied - // bounds, and these bounds will be enforced upon calling such a function. In some - // sense, well-formedness requirements for the input types of an HKT will be enforced - // lazily, so no need to include them here. - Ty::ForAll(..) => (), - - Ty::InferenceVar(..) => panic!( - "unexpected inference variable in wf rules: {:?}", - self, - ), - } - } -} - -impl FoldInputTypes for TraitRef { - fn fold(&self, accumulator: &mut Vec) { - self.parameters.fold(accumulator); - } -} - -impl FoldInputTypes for ProjectionEq { - fn fold(&self, accumulator: &mut Vec) { - Ty::Projection(self.projection.clone()).fold(accumulator); - self.ty.fold(accumulator); - } -} - -impl FoldInputTypes for WhereClause { - fn fold(&self, accumulator: &mut Vec) { - match self { - WhereClause::Implemented(tr) => tr.fold(accumulator), - WhereClause::ProjectionEq(p) => p.fold(accumulator), - } - } -} - -impl FoldInputTypes for Binders { - fn fold(&self, accumulator: &mut Vec) { - self.value.fold(accumulator); - } -} - -impl<'me> WfSolver<'me> { - fn verify_struct_decl(&self, struct_datum: &StructDatum) -> bool { - // We retrieve all the input types of the struct fields. - let mut input_types = Vec::new(); - struct_datum.binders.value.fields.fold(&mut input_types); - struct_datum.binders.value.where_clauses.fold(&mut input_types); - - if input_types.is_empty() { - return true; - } - - let goals = input_types.into_iter() - .map(|ty| DomainGoal::WellFormed(WellFormed::Ty(ty))) - .casted(); - let goal = goals.fold1(|goal, leaf| Goal::And(Box::new(goal), Box::new(leaf))) - .expect("at least one goal"); - - let hypotheses = - struct_datum.binders - .value - .where_clauses - .iter() - .cloned() - .map(|wc| wc.map(|bound| bound.into_from_env_goal())) - .casted() - .collect(); - - // We ask that the above input types are well-formed provided that all the where-clauses - // on the struct definition hold. - let goal = Goal::Implies(hypotheses, Box::new(goal)) - .quantify(QuantifierKind::ForAll, struct_datum.binders.binders.clone()); - - match self.solver_choice.solve_root_goal(&self.env, &goal.into_closed_goal()).unwrap() { - Some(sol) => sol.is_unique(), - None => false, - } - } - - fn verify_trait_impl(&self, impl_datum: &ImplDatum) -> bool { - let trait_ref = match impl_datum.binders.value.trait_ref { - PolarizedTraitRef::Positive(ref trait_ref) => trait_ref, - _ => return true - }; - - // We retrieve all the input types of the where clauses appearing on the trait impl, - // e.g. in: - // ``` - // impl Foo for (T, K) where T: Iterator, Vec>)> { ... } - // ``` - // we would retrieve `HashSet`, `Box`, `Vec>`, `(HashSet, Vec>)`. - // We will have to prove that these types are well-formed (e.g. an additional `K: Hash` - // bound would be needed here). - let mut input_types = Vec::new(); - impl_datum.binders.value.where_clauses.fold(&mut input_types); - - // We retrieve all the input types of the type on which we implement the trait: we will - // *assume* that these types are well-formed, e.g. we will be able to derive that - // `K: Hash` holds without writing any where clause. - // - // Example: - // ``` - // struct HashSet where K: Hash { ... } - // - // impl Foo for HashSet { - // // Inside here, we can rely on the fact that `K: Hash` holds - // } - // ``` - let mut header_input_types = Vec::new(); - trait_ref.fold(&mut header_input_types); - - // Associated type values are special because they can be parametric (independently of - // the impl), so we issue a special goal which is quantified using the binders of the - // associated type value, for example in: - // ``` - // trait Foo { - // type Item<'a> - // } - // - // impl Foo for Box { - // type Item<'a> = Box<&'a T>; - // } - // ``` - // we would issue the following subgoal: `forall<'a> { WellFormed(Box<&'a T>) }`. - let compute_assoc_ty_goal = |assoc_ty: &AssociatedTyValue| { - let assoc_ty_datum = &self.program.associated_ty_data[&assoc_ty.associated_ty_id]; - let bounds = &assoc_ty_datum.bounds; - - let mut input_types = Vec::new(); - assoc_ty.value.value.ty.fold(&mut input_types); - - let wf_goals = - input_types.into_iter() - .map(|ty| DomainGoal::WellFormed(WellFormed::Ty(ty))) - .casted(); - - let trait_ref = trait_ref.shifted_in(assoc_ty.value.binders.len()); - - let all_parameters: Vec<_> = - assoc_ty.value.binders.iter() - .zip(0..) - .map(|p| p.to_parameter()) - .chain(trait_ref.parameters.iter().cloned()) - .collect(); - - // Add bounds from the trait. Because they are defined on the trait, - // their parameters must be substituted with those of the impl. - let bound_goals = - bounds.iter() - .map(|b| Subst::apply(&all_parameters, b)) - .flat_map(|b| b.into_where_clauses(assoc_ty.value.value.ty.clone())) - .map(|wc| wc.map(|bound| bound.into_well_formed_goal())) - .casted(); - - let goals = wf_goals.chain(bound_goals); - let goal = match goals.fold1(|goal, leaf| Goal::And(Box::new(goal), Box::new(leaf))) { - Some(goal) => goal, - None => return None, - }; - - // Add where clauses from the associated ty definition. We must - // substitute parameters here, like we did with the bounds above. - let hypotheses = - assoc_ty_datum.where_clauses - .iter() - .map(|wc| Subst::apply(&all_parameters, wc)) - .map(|wc| wc.map(|bound| bound.into_from_env_goal())) - .casted() - .collect(); - - let goal = Goal::Implies( - hypotheses, - Box::new(goal) - ); - - Some(goal.quantify(QuantifierKind::ForAll, assoc_ty.value.binders.clone())) - }; - - let assoc_ty_goals = - impl_datum.binders - .value - .associated_ty_values - .iter() - .filter_map(compute_assoc_ty_goal); - - // Things to prove well-formed: input types of the where-clauses, projection types - // appearing in the header, associated type values, and of course the trait ref. - let trait_ref_wf = DomainGoal::WellFormed( - WellFormed::Trait(trait_ref.clone()) - ); - let goals = - input_types.into_iter() - .map(|ty| DomainGoal::WellFormed(WellFormed::Ty(ty)).cast()) - .chain(assoc_ty_goals) - .chain(Some(trait_ref_wf).cast()); - - let goal = goals.fold1(|goal, leaf| Goal::And(Box::new(goal), Box::new(leaf))) - .expect("at least one goal"); - - // Assumptions: types appearing in the header which are not projection types are - // assumed to be well-formed, and where clauses declared on the impl are assumed - // to hold. - let hypotheses = - impl_datum.binders - .value - .where_clauses - .iter() - .cloned() - .map(|wc| wc.map(|bound| bound.into_from_env_goal())) - .casted() - .chain( - header_input_types.into_iter() - .map(|ty| DomainGoal::FromEnv(FromEnv::Ty(ty))) - .casted() - ) - .collect(); - - let goal = Goal::Implies(hypotheses, Box::new(goal)) - .quantify(QuantifierKind::ForAll, impl_datum.binders.binders.clone()); - - debug!("WF trait goal: {:?}", goal); - - match self.solver_choice.solve_root_goal(&self.env, &goal.into_closed_goal()).unwrap() { - Some(sol) => sol.is_unique(), - None => false, - } - } -} diff --git a/src/rules/wf/test.rs b/src/rules/wf/test.rs deleted file mode 100644 index 7763c69a3a9..00000000000 --- a/src/rules/wf/test.rs +++ /dev/null @@ -1,640 +0,0 @@ -#![cfg(test)] - -use test_util::*; - -#[test] -fn well_formed_trait_decl() { - lowering_success! { - program { - trait Clone { } - trait Copy where Self: Clone { } - - struct i32 { } - - impl Clone for i32 { } - impl Copy for i32 { } - } - } -} - -#[test] -fn ill_formed_trait_decl() { - lowering_error! { - program { - trait Clone { } - trait Copy where Self: Clone { } - - struct i32 { } - - impl Copy for i32 { } - } error_msg { - "trait impl for \"Copy\" does not meet well-formedness requirements" - } - } -} -#[test] -fn cyclic_traits() { - lowering_success! { - program { - trait A where Self: B { } - trait B where Self: A { } - - impl B for T { } - impl A for T { } - } - } - - lowering_error! { - program { - trait Copy { } - - trait A where Self: B, Self: Copy {} - trait B where Self: A { } - - // This impl won't be able to prove that `T: Copy` holds. - impl B for T {} - - impl A for T where T: B {} - } error_msg { - "trait impl for \"B\" does not meet well-formedness requirements" - } - } - - lowering_success! { - program { - trait Copy { } - - trait A where Self: B, Self: Copy {} - trait B where Self: A { } - - impl B for T where T: Copy {} - impl A for T where T: B {} - } - } -} - -#[test] -fn cyclic_wf_requirements() { - lowering_success! { - program { - trait Foo where ::Value: Foo { - type Value; - } - - struct Unit { } - impl Foo for Unit { - type Value = Unit; - } - } - } -} - -#[test] -fn ill_formed_assoc_ty() { - lowering_error! { - program { - trait Foo { } - struct OnlyFoo where T: Foo { } - - struct i32 { } - - trait Bar { - type Value; - } - - impl Bar for i32 { - // `OnlyFoo` is ill-formed because `i32: Foo` does not hold. - type Value = OnlyFoo; - } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn implied_bounds() { - lowering_success! { - program { - trait Eq { } - trait Hash where Self: Eq { } - - struct Set where K: Hash { } - - struct OnlyEq where T: Eq { } - - trait Foo { - type Value; - } - - impl Foo for Set { - // Here, `WF(Set)` implies `K: Hash` and hence `OnlyEq` is WF. - type Value = OnlyEq; - } - } - } -} - -#[test] -fn ill_formed_ty_decl() { - lowering_error! { - program { - trait Hash { } - struct Set where K: Hash { } - - struct MyType { - value: Set - } - } error_msg { - "type declaration \"MyType\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn implied_bounds_on_ty_decl() { - lowering_success! { - program { - trait Eq { } - trait Hash where Self: Eq { } - struct OnlyEq where T: Eq { } - - struct MyType where K: Hash { - value: OnlyEq - } - } - } -} - -#[test] -fn wf_requiremements_for_projection() { - lowering_error! { - program { - trait Foo { - type Value; - } - - trait Iterator { - type Item; - } - - impl Foo for T { - // The projection is well-formed if `T: Iterator` holds, which cannot - // be proved here. - type Value = ::Item; - } - } error_msg { - "trait impl for \"Foo\" does not meet well-formedness requirements" - } - } - - lowering_success! { - program { - trait Foo { - type Value; - } - - trait Iterator { - type Item; - } - - impl Foo for T where T: Iterator { - type Value = ::Item; - } - } - } -} - -#[test] -fn ill_formed_type_in_header() { - lowering_error! { - program { - trait Foo { - type Value; - } - - trait Bar { } - - // Types in where clauses are not assumed to be well-formed, - // an explicit where clause would be needed (see below). - impl Bar for T where ::Value: Bar { } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } - - lowering_success! { - program { - trait Foo { - type Value; - } - - trait Bar { } - - impl Bar for T where T: Foo, ::Value: Bar { } - } - } -} - -#[test] -fn bound_in_header_from_env() { - lowering_success! { - program { - trait Foo { } - - trait Bar { - type Item: Foo; - } - - struct Stuff { } - - impl Bar for Stuff where T: Foo { - // Should have FromEnv(T: Foo) here. - type Item = T; - } - } - } - - lowering_error! { - program { - trait Foo { } - trait Baz { } - - trait Bar { - type Item: Baz; - } - - struct Stuff { } - - impl Bar for Stuff where T: Foo { - // No T: Baz here. - type Item = T; - } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn mixed_indices_check_projection_bounds() { - lowering_success! { - program { - trait Foo { } - - trait Bar { - type Item: Foo; - } - - struct Stuff { } - - impl Bar for Stuff where U: Foo { - type Item = U; - } - } - } - - lowering_error! { - program { - trait Foo { } - trait Baz { } - - trait Bar { - type Item: Baz; - } - - struct Stuff { } - - impl Bar for Stuff where U: Foo { - type Item = U; - } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn mixed_indices_check_generic_projection_bounds() { - lowering_success! { - program { - struct Stuff { } - - trait Foo { } - - // A type that impls Foo as long as U: Foo. - struct Fooey { } - impl Foo for Fooey where U: Foo { } - - trait Bar { - type Item: Foo where V: Foo; - } - - impl Bar for Stuff where U: Foo { - type Item = Fooey; - } - } - } - - lowering_error! { - program { - struct Stuff { } - - trait Foo { } - trait Baz { } - - // A type that impls Foo as long as U: Foo. - struct Fooey { } - impl Foo for Fooey where U: Foo { } - - trait Bar { - type Item: Baz where V: Foo; - } - - impl Bar for Stuff where U: Foo { - type Item = Fooey; - } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn generic_projection_where_clause() { - lowering_success! { - program { - trait PointerFamily { type Pointer; } - - struct Cow { } - struct CowFamily { } - impl PointerFamily for CowFamily { type Pointer = Cow; } - - struct String { } - struct Foo

where P: PointerFamily { - bar:

::Pointer - } - } - } - - lowering_error! { - program { - trait Copy { } - trait PointerFamily { type Pointer where T: Copy; } - - struct Cow { } - struct CowFamily { } - impl PointerFamily for CowFamily { type Pointer = Cow; } - - struct String { } - struct Foo

where P: PointerFamily { - // No impl Copy for String, so this will fail. - bar:

::Pointer - } - } error_msg { - "type declaration \"Foo\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn generic_projection_bound() { - lowering_success! { - program { - trait Clone { } - trait PointerFamily { type Pointer: Clone where T: Clone; } - - struct Cow { } - impl Clone for Cow where T: Clone { } - - struct CowFamily { } - - // impl is WF due because of: - // - `where T: Clone` clause on PointerFamily::Pointer - // - impl Clone for Cow where T: Clone - impl PointerFamily for CowFamily { type Pointer = Cow; } - - struct String { } - impl Clone for String { } - struct Foo

where P: PointerFamily { - bar:

where P: PointerFamily { + bar:

::Pointer + } + } + } + + lowering_error! { + program { + trait Copy { } + trait PointerFamily { type Pointer where T: Copy; } + + struct Cow { } + struct CowFamily { } + impl PointerFamily for CowFamily { type Pointer = Cow; } + + struct String { } + struct Foo

where P: PointerFamily { + // No impl Copy for String, so this will fail. + bar:

::Pointer + } + } error_msg { + "type declaration `Foo` does not meet well-formedness requirements" + } + } +} + +#[test] +fn generic_projection_bound() { + lowering_success! { + program { + trait Clone { } + trait PointerFamily { type Pointer: Clone where T: Clone; } + + struct Cow { } + impl Clone for Cow where T: Clone { } + + struct CowFamily { } + + // impl is WF due because of: + // - `where T: Clone` clause on PointerFamily::Pointer + // - impl Clone for Cow where T: Clone + impl PointerFamily for CowFamily { type Pointer = Cow; } + + struct String { } + impl Clone for String { } + struct Foo

where P: PointerFamily { + bar:

::Pointer + } + } + } + + lowering_error! { + program { + trait Clone { } + trait PointerFamily { type Pointer: Clone where T: Clone; } + + struct Cow { } + struct CowFamily { } + + // No impl Clone for Cow, so this will fail. + impl PointerFamily for CowFamily { type Pointer = Cow; } + } error_msg { + "trait impl for `PointerFamily` does not meet well-formedness requirements" + } + } +} + +#[test] +fn higher_ranked_trait_bounds() { + lowering_error! { + program { + trait Foo<'a> { } + trait Bar where forall<'a> Self: Foo<'a> { } + struct Baz { } + + impl Bar for Baz { } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Foo<'a> { } + trait Bar where forall<'a> Self: Foo<'a> { } + struct Baz { } + + impl<'a> Foo<'a> for Baz { } + impl Bar for Baz { } + } + } +} + +#[test] +fn higher_ranked_trait_bound_on_gat() { + lowering_success! { + program { + trait Foo<'a> { } + struct Baz { } + + trait Bar<'a> { + type Item: Foo<'a> where forall<'b> V: Foo<'b>; + } + + impl<'a> Bar<'a> for Baz { + type Item = V; + } + } + } +} + +// See `cyclic_traits`, this is essentially the same but with higher-ranked co-inductive WF goals. +#[test] +fn higher_ranked_cyclic_requirements() { + lowering_success! { + program { + trait Foo where forall Self: Bar { } + trait Bar where forall Self: Foo { } + + impl Foo for U { } + impl Bar for U { } + } + } + + lowering_error! { + program { + trait Copy { } + trait Foo where forall Self: Bar, Self: Copy { } + trait Bar where forall Self: Foo { } + + impl Foo for U { } + impl Bar for U where U: Foo { } + } error_msg { + "trait impl for `Foo` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Copy { } + trait Foo where forall Self: Bar, Self: Copy { } + trait Bar where forall Self: Foo { } + + impl Foo for U where U: Copy { } + impl Bar for U where U: Foo { } + } + } +} + +#[test] +fn higher_ranked_inline_bound_on_gat() { + lowering_success! { + program { + trait Fn { } + struct Ref<'a, T> { } + struct Val {} + + struct fun { } + + impl<'a, T> Fn> for for<'b> fn(fun>) { } + + trait Bar { + type Item: forall<'a> Fn>; + } + + impl Bar for Val { + type Item = for<'a> fn(fun>); + } + } + } + + lowering_error! { + program { + trait Fn { } + struct Val {} + + struct fun { } + + impl Fn for fun { } + + trait Bar { + type Item: forall Fn; + } + + impl Bar for Val { + type Item = fun; + } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn assoc_type_recursive_bound() { + lowering_error! { + program { + trait Sized { } + trait Print { + // fn print(); + } + + trait Foo { + type Item: Sized where ::Item: Sized; + } + + struct Number { } + + impl Foo for Number { + // Well-formedness checks require that the following + // goal is true: + // ``` + // if (str: Sized) { # if the where clauses hold + // str: Sized # then the bound on the associated type hold + // } + // ``` + // which it is :) + type Item = str; + } + + struct OnlySized where T: Sized { } + impl Print for OnlySized { + // fn print() { + // println!("{}", std::mem::size_of::()); + // } + } + + trait Bar { + type Assoc: Print; + } + + impl Bar for T where T: Foo { + type Assoc = OnlySized<::Item>; + } + + // Above, we used to incorrectly assume that `OnlySized<::Item>` + // is well-formed because of the `FromEnv(T: Foo)`, hence making the `T: Bar` + // impl pass the well-formedness check. But the following query will + // (and should) always succeed, as there is no where clauses on `Assoc`: + // ``` + // forall { if (T: Bar) { WellFormed(::Assoc) } } + // ``` + // + // This may lead to the following code to compile: + + // ``` + // fn foo() { + // T::print() // oops, in fact `T = OnlySized` which is ill-formed + // } + + // fn bar { + // // ok, we have `FromEnv(T: Bar)` hence + // // `::Assoc` is well-formed and + // // `Implemented(::Assoc: Print)` hold + // foo<::Assoc>( + // } + + // fn main() { + // bar::() // ok, `Implemented(Number: Bar)` hold + // } + // ``` + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn struct_sized_constraints() { + lowering_error! { + program { + #[lang(sized)] + trait Sized { } + + struct S { + t1: T, + t2: T + } + } error_msg { + "type declaration `S` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + #[lang(sized)] + trait Sized { } + + struct Foo { } + + struct S { + t1: Foo, + t2: T + } + } + } + + lowering_success! { + program { + #[lang(sized)] + trait Sized { } + + struct S where T: Sized { + t1: T, + t2: T + } + } + } + + lowering_success! { + program { + #[lang(sized)] + trait Sized { } + + struct Foo {} + + struct G { + foo: S>, + s: S>> + } + + struct S { + t1: T + } + } + } + + lowering_error! { + program { + #[lang(sized)] + trait Sized { } + + struct Foo {} + + impl Sized for Foo {} + } error_msg { + "trait impl for `Sized` does not meet well-formedness requirements" + } + } +} + +#[test] +fn enum_sized_constraints() { + // All fields must be sized + lowering_error! { + program { + #[lang(sized)] + trait Sized { } + + enum E { + A { + t1: T, + t2: T, + }, + B, + C, + } + } error_msg { + "type declaration `E` does not meet well-formedness requirements" + } + } + + // Even the last field must be sized + lowering_error! { + program { + #[lang(sized)] + trait Sized { } + + struct Foo { } + + enum E { + A { + t1: Foo, + t2: T, + }, + B, + C, + } + } error_msg { + "type declaration `E` does not meet well-formedness requirements" + } + } + + // Sized bound + lowering_success! { + program { + #[lang(sized)] + trait Sized { } + + enum S where T: Sized { + A { + t1: T, + t2: T, + }, + B, + C, + } + } + } + + // No manual impls + lowering_error! { + program { + #[lang(sized)] + trait Sized { } + + enum Foo {} + + impl Sized for Foo {} + } error_msg { + "trait impl for `Sized` does not meet well-formedness requirements" + } + } +} + +#[test] +fn copy_constraints() { + lowering_success! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + struct S { t1: T1, t2: T2 } + + impl Copy for S where T1: Copy, T2: Copy { } + } + } + + lowering_success! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + trait MyTrait where Self: Copy { } + + struct S where T: MyTrait { t: T } + + impl Copy for S { } + } + } + + // Copy implementations for a struct with non-copy field + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + struct S { t: T } + + impl Copy for S { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + struct S { t1: T1, t2: T2 } + + impl Copy for S where T2: Copy { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + // Copy implemenation for a Drop type + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + struct S where T: Copy { t: T } + + impl Copy for S { } + + impl Drop for S { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + // Enums + + // Copy types on enum + lowering_success! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + enum E { Foo(T1), Bar { t2: T2 } } + + impl Copy for E where T1: Copy, T2: Copy { } + } + } + + // Types with with copy bound + lowering_success! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + trait MyTrait where Self: Copy { } + + enum E where T: MyTrait { Foo(T) } + + impl Copy for E { } + } + } + + // Copy implementations for a adt with non-copy field + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + enum E { Foo(T) } + + impl Copy for E { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + // Only one copy field + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + enum E { Foo(T1), Bar { t2: T2 } } + + impl Copy for E where T2: Copy { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + // Copy implemenation for a Drop type + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + enum E where T: Copy { Foo { t: T } } + + impl Copy for E { } + + impl Drop for E { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + // Tests for Copy impls for builtin types + lowering_success! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(drop)] + trait Drop { } + + impl Copy for u8 {} + impl Copy for f32 {} + impl Copy for char {} + impl Copy for bool {} + impl Copy for *const T {} + impl Copy for *mut T {} + impl<'a, T> Copy for &'a T {} + impl Copy for ! {} + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + impl<'a, T> Copy for &'a mut T {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + #[object_safe] + trait Trait {} + + impl<'a> Copy for dyn Trait + 'a {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + impl Copy for fn(u32) {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + impl Copy for str {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + impl Copy for [u32; 4] {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + #[lang(copy)] + trait Copy { } + + impl Copy for [u32] {} + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } +} + +#[test] +fn drop_constraints() { + lowering_error! { + program { + #[lang(drop)] + trait Drop { } + + struct Foo { } + struct S { } + + impl Drop for S { } + } error_msg { + "trait impl for `Drop` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Trait where Self: SuperTrait { } + trait SuperTrait {} + + #[lang(drop)] + trait Drop { } + + struct S where T: Trait { } + + impl Drop for S where T: SuperTrait { } + } + } + + lowering_success! { + program { + #[lang(drop)] + trait Drop { } + + struct S { } + + impl Drop for S { } + } + } + + lowering_error! { + program { + trait MyTrait { } + + #[lang(drop)] + trait Drop { } + + struct S{ } + + impl Drop for S where T: MyTrait { } + } error_msg { + "trait impl for `Drop` does not meet well-formedness requirements" + } + } +} + +#[test] +fn no_unsize_impls() { + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + impl Unsize for u32 {} + } error_msg { + "trait impl for `Unsize` does not meet well-formedness requirements" + } + } +} + +#[test] +fn ill_formed_opaque_ty() { + lowering_error! { + program { + trait Foo {} + struct Bar {} + + opaque type T: Foo = Bar; + } error_msg { + "opaque type declaration `T` does not meet well-formedness requirements" + } + } + + lowering_error! { + program { + trait Foo { } + struct NotFoo { } + struct IsFoo { } + impl Foo for IsFoo { } + opaque type T: Foo = NotFoo; + } error_msg { + "opaque type declaration `T` does not meet well-formedness requirements" + } + } +} + +#[test] +fn coerce_unsized_pointer() { + lowering_success! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a, T, U> CoerceUnsized<*mut U> for &'a mut T where T: Unsize {} + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + } + } + + // T: Unsize is not in the environment + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a, T, U> CoerceUnsized<*mut U> for &'a mut T {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Test with builtin Unsize impl + lowering_success! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + #[object_safe] + trait Foo {} + + #[auto] + #[object_safe] + trait Auto {} + + impl<'a> CoerceUnsized<&'a (dyn Foo + 'a)> for &'a (dyn Foo + Auto + 'a) {} + } + } + + // Test with builtin Unsize impl + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + #[object_safe] + trait Foo {} + + #[auto] + #[object_safe] + trait Auto {} + + impl<'a> CoerceUnsized<&'a (dyn Foo + Auto + 'a)> for &'a (dyn Foo + 'a) {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Test with builtin Unsize impl + lowering_success! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a> CoerceUnsized<&'a [f32]> for &'a [f32; 3] {} + } + } + + // Coercing from shared to mut + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a, T, U> CoerceUnsized<*mut U> for &'a T where T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Coercing from shared to mut + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a, T, U> CoerceUnsized<&'a mut U> for &'a T where T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Coercing from shared to mut + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl CoerceUnsized<*mut U> for *const T where T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Coercing from raw pointer to ref + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + impl<'a, T, U> CoerceUnsized<&'a U> for *const T where T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } +} + +#[test] +fn coerce_unsized_struct() { + lowering_success! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + struct Foo<'a, T> where T: 'a { + t: &'a T + } + + struct Bar { + extra: T, + ptr: *mut U, + } + + impl<'a, T, U> CoerceUnsized<&'a U> for &'a T where T: Unsize {} + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + impl<'a> CoerceUnsized> for Foo<'a, [u32; 3]> {} + impl CoerceUnsized> for Bar where U: Unsize {} + } + } + + // Unsizing different structs + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + struct S1 { + t: T, + } + + struct S2 { + t: T, + } + + impl CoerceUnsized> for S1 where T: CoerceUnsized {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Unsizing enums + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + enum Foo { + A { + t: T + } + } + + impl CoerceUnsized> for Foo where T: CoerceUnsized {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Unsizing two fields + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + struct Bar { + ptr1: *mut T, + ptr2: *mut U, + } + + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + impl CoerceUnsized> for Bar where U: Unsize, T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Unsizing no fields + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + struct Bar { + ptr1: *mut T, + ptr2: *mut U, + } + + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + impl CoerceUnsized> for Bar where T: Unsize {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // No unsize in the environment + lowering_error! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + struct Bar { + extra: T, + ptr: *mut U, + } + + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + impl CoerceUnsized> for Bar {} + } error_msg { + "trait impl for `CoerceUnsized` does not meet well-formedness requirements" + } + } + + // Phantom data test & CoerceUnsized in the environment test + lowering_success! { + program { + #[lang(unsize)] + trait Unsize {} + + #[lang(coerce_unsized)] + trait CoerceUnsized {} + + #[phantom_data] + struct PhantomData {} + + struct Foo { + coerce: T, + phantom: PhantomData, + } + + struct Bar { + extra: T, + phantom: PhantomData, + ptr: *mut U, + } + + impl CoerceUnsized<*mut U> for *mut T where T: Unsize {} + impl CoerceUnsized> for Bar where U: Unsize {} + impl CoerceUnsized> for Foo where T: CoerceUnsized {} + } + } +} diff --git a/tests/test_util.rs b/tests/test_util.rs new file mode 100644 index 00000000000..ac8e21ca7a7 --- /dev/null +++ b/tests/test_util.rs @@ -0,0 +1,54 @@ +#![allow(unused_macros)] + +macro_rules! lowering_success { + (program $program:tt) => { + let program_text = stringify!($program); + assert!(program_text.starts_with("{")); + assert!(program_text.ends_with("}")); + let result = chalk_solve::logging::with_tracing_logs(|| { + chalk_integration::db::ChalkDatabase::with( + &program_text[1..program_text.len() - 1], + chalk_integration::SolverChoice::default(), + ) + .checked_program() + }); + if let Err(ref e) = result { + println!("lowering error: {}", e); + } + assert!(result.is_ok()); + }; +} + +macro_rules! lowering_error { + (program $program:tt error_msg { $expected:expr }) => { + let program_text = stringify!($program); + assert!(program_text.starts_with("{")); + assert!(program_text.ends_with("}")); + let error = chalk_solve::logging::with_tracing_logs(|| { + chalk_integration::db::ChalkDatabase::with( + &program_text[1..program_text.len() - 1], + chalk_integration::SolverChoice::default(), + ) + .checked_program() + .unwrap_err() + .to_string() + }); + let expected = $expected.to_string(); + crate::test_util::assert_same(&error, &expected); + }; +} + +pub fn assert_same(result: &str, expected: &str) { + println!("expected:\n{}", expected); + println!("actual:\n{}", result); + + let expected1: String = expected.chars().filter(|w| !w.is_whitespace()).collect(); + let result1: String = result.chars().filter(|w| !w.is_whitespace()).collect(); + assert!(!expected1.is_empty(), "Expectation cannot be empty!"); + if !result1.starts_with(&expected1) { + let prefix = &result1[..std::cmp::min(result1.len(), expected1.len())]; + // These will never be equal, which will cause a nice error message + // to be displayed + pretty_assertions::assert_eq!(expected1, prefix); + } +} diff --git a/triagebot.toml b/triagebot.toml new file mode 100644 index 00000000000..06055c98899 --- /dev/null +++ b/triagebot.toml @@ -0,0 +1 @@ +[assign] \ No newline at end of file

::Pointer - } - } - } - - lowering_error! { - program { - trait Clone { } - trait PointerFamily { type Pointer: Clone where T: Clone; } - - struct Cow { } - struct CowFamily { } - - // No impl Clone for Cow, so this will fail. - impl PointerFamily for CowFamily { type Pointer = Cow; } - } error_msg { - "trait impl for \"PointerFamily\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn higher_ranked_trait_bounds() { - lowering_error! { - program { - trait Foo<'a> { } - trait Bar where forall<'a> Self: Foo<'a> { } - struct i32 { } - - impl Bar for i32 { } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } - - lowering_success! { - program { - trait Foo<'a> { } - trait Bar where forall<'a> Self: Foo<'a> { } - struct i32 { } - - impl<'a> Foo<'a> for i32 { } - impl Bar for i32 { } - } - } -} - -#[test] -fn higher_ranked_trait_bound_on_gat() { - lowering_success! { - program { - trait Foo<'a> { } - struct i32 { } - - trait Bar<'a> { - type Item: Foo<'a> where forall<'b> V: Foo<'b>; - } - - impl<'a> Bar<'a> for i32 { - type Item = V; - } - } - } -} - -// See `cyclic_traits`, this is essentially the same but with higher-ranked co-inductive WF goals. -#[test] -fn higher_ranked_cyclic_requirements() { - lowering_success! { - program { - trait Foo where forall Self: Bar { } - trait Bar where forall Self: Foo { } - - impl Foo for U { } - impl Bar for U { } - } - } - - lowering_error! { - program { - trait Copy { } - trait Foo where forall Self: Bar, Self: Copy { } - trait Bar where forall Self: Foo { } - - impl Foo for U { } - impl Bar for U where U: Foo { } - } error_msg { - "trait impl for \"Foo\" does not meet well-formedness requirements" - } - } - - lowering_success! { - program { - trait Copy { } - trait Foo where forall Self: Bar, Self: Copy { } - trait Bar where forall Self: Foo { } - - impl Foo for U where U: Copy { } - impl Bar for U where U: Foo { } - } - } -} - -#[test] -fn higher_ranked_inline_bound_on_gat() { - lowering_success! { - program { - trait Fn { } - struct Ref<'a, T> { } - struct i32 {} - - struct fn { } - - impl<'a, T> Fn> for for<'b> fn> { } - - trait Bar { - type Item: forall<'a> Fn>; - } - - impl Bar for i32 { - type Item = for<'a> fn>; - } - } - } - - lowering_error! { - program { - trait Fn { } - struct i32 {} - - struct fn { } - - impl Fn for fn { } - - trait Bar { - type Item: forall Fn; - } - - impl Bar for i32 { - type Item = fn; - } - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} - -#[test] -fn assoc_type_recursive_bound() { - lowering_error! { - program { - trait Sized { } - trait Print { - // fn print(); - } - - trait Foo { - type Item: Sized where ::Item: Sized; - } - - struct i32 { } - struct str { } // not sized - - impl Foo for i32 { - // Well-formedness checks require that the following - // goal is true: - // ``` - // if (str: Sized) { # if the where clauses hold - // str: Sized # then the bound on the associated type hold - // } - // ``` - // which it is :) - type Item = str; - } - - struct OnlySized where T: Sized { } - impl Print for OnlySized { - // fn print() { - // println!("{}", std::mem::size_of::()); - // } - } - - trait Bar { - type Assoc: Print; - } - - impl Bar for T where T: Foo { - type Assoc = OnlySized<::Item>; - } - - // Above, we used to incorrectly assume that `OnlySized<::Item>` - // is well-formed because of the `FromEnv(T: Foo)`, hence making the `T: Bar` - // impl pass the well-formedness check. But the following query will - // (and should) always succeed, as there is no where clauses on `Assoc`: - // ``` - // forall { if (T: Bar) { WellFormed(::Assoc) } } - // ``` - // - // This may lead to the following code to compile: - - // ``` - // fn foo() { - // T::print() // oops, in fact `T = OnlySized` which is ill-formed - // } - - // fn bar { - // // ok, we have `FromEnv(T: Bar)` hence - // // `::Assoc` is well-formed and - // // `Implemented(::Assoc: Print)` hold - // foo<::Assoc>( - // } - - // fn main() { - // bar::() // ok, `Implemented(i32: Bar)` hold - // } - // ``` - } error_msg { - "trait impl for \"Bar\" does not meet well-formedness requirements" - } - } -} diff --git a/src/rust_ir.rs b/src/rust_ir.rs deleted file mode 100644 index 8d8eaae639a..00000000000 --- a/src/rust_ir.rs +++ /dev/null @@ -1,434 +0,0 @@ -//! Contains the definition for the "Rust IR" -- this is basically a "lowered" -//! version of the AST, roughly corresponding to [the HIR] in the Rust -//! compiler. - -use chalk_ir::fold::shift::Shift; -use chalk_ir::tls; -use chalk_ir::{ - ApplicationTy, Binders, Identifier, ItemId, Lifetime, Parameter, ParameterKind, ProgramClause, - ProjectionEq, ProjectionTy, QuantifiedWhereClause, TraitRef, Ty, WhereClause, -}; -use chalk_ir::debug::Angle; -use std::collections::BTreeMap; -use std::fmt; -use std::iter; - -pub mod lowering; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Program { - /// From type-name to item-id. Used during lowering only. - crate type_ids: BTreeMap, - - /// For each struct/trait: - crate type_kinds: BTreeMap, - - /// For each struct: - crate struct_data: BTreeMap, - - /// For each impl: - crate impl_data: BTreeMap, - - /// For each trait: - crate trait_data: BTreeMap, - - /// For each associated ty: - crate associated_ty_data: BTreeMap, - - /// For each default impl (automatically generated for auto traits): - crate default_impl_data: Vec, - - /// For each user-specified clause - crate custom_clauses: Vec, - - /// Special types and traits. - crate lang_items: BTreeMap, -} - -impl Program { - /// Given a projection of an associated type, split the type parameters - /// into those that come from the *trait* and those that come from the - /// *associated type itself*. So e.g. if you have `(Iterator::Item)`, - /// this would return `([F], [])`, since `Iterator::Item` is not generic - /// and hence doesn't have any type parameters itself. - /// - /// Used primarily for debugging output. - crate fn split_projection<'p>( - &self, - projection: &'p ProjectionTy, - ) -> (&AssociatedTyDatum, &'p [Parameter], &'p [Parameter]) { - let ProjectionTy { - associated_ty_id, - ref parameters, - } = *projection; - let associated_ty_data = &self.associated_ty_data[&associated_ty_id]; - let trait_datum = &self.trait_data[&associated_ty_data.trait_id]; - let trait_num_params = trait_datum.binders.len(); - let split_point = parameters.len() - trait_num_params; - let (other_params, trait_params) = parameters.split_at(split_point); - (associated_ty_data, trait_params, other_params) - } -} - -impl tls::DebugContext for Program { - fn debug_item_id(&self, item_id: ItemId, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - if let Some(k) = self.type_kinds.get(&item_id) { - write!(fmt, "{}", k.name) - } else if let Some(k) = self.associated_ty_data.get(&item_id) { - write!(fmt, "({:?}::{})", k.trait_id, k.name) - } else { - fmt.debug_struct("InvalidItemId") - .field("index", &item_id.index) - .finish() - } - } - - fn debug_projection( - &self, - projection_ty: &ProjectionTy, - fmt: &mut fmt::Formatter, - ) -> Result<(), fmt::Error> { - let (associated_ty_data, trait_params, other_params) = - self.split_projection(projection_ty); - write!( - fmt, - "<{:?} as {:?}{:?}>::{}{:?}", - &trait_params[0], - associated_ty_data.trait_id, - Angle(&trait_params[1..]), - associated_ty_data.name, - Angle(&other_params) - ) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum LangItem { - DerefTrait, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ImplDatum { - crate binders: Binders, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ImplDatumBound { - crate trait_ref: PolarizedTraitRef, - crate where_clauses: Vec, - crate associated_ty_values: Vec, - crate specialization_priority: usize, - crate impl_type: ImplType, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum ImplType { - Local, - External, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct DefaultImplDatum { - crate binders: Binders, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct DefaultImplDatumBound { - crate trait_ref: TraitRef, - crate accessible_tys: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct StructDatum { - crate binders: Binders, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct StructDatumBound { - crate self_ty: ApplicationTy, - crate fields: Vec, - crate where_clauses: Vec, - crate flags: StructFlags, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct StructFlags { - crate upstream: bool, - crate fundamental: bool, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TraitDatum { - crate binders: Binders, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TraitDatumBound { - crate trait_ref: TraitRef, - crate where_clauses: Vec, - crate flags: TraitFlags, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TraitFlags { - crate auto: bool, - crate marker: bool, - crate upstream: bool, - crate fundamental: bool, - pub deref: bool, -} - -/// An inline bound, e.g. `: Foo` in `impl> SomeType`. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum InlineBound { - TraitBound(TraitBound), - ProjectionEqBound(ProjectionEqBound), -} - -enum_fold!(InlineBound[] { TraitBound(a), ProjectionEqBound(a) }); - -pub type QuantifiedInlineBound = Binders; - -crate trait IntoWhereClauses { - type Output; - - fn into_where_clauses(&self, self_ty: Ty) -> Vec; -} - -impl IntoWhereClauses for InlineBound { - type Output = WhereClause; - - /// Applies the `InlineBound` to `self_ty` and lowers to a - /// [`chalk_ir::DomainGoal`]. - /// - /// Because an `InlineBound` does not know anything about what it's binding, - /// you must provide that type as `self_ty`. - fn into_where_clauses(&self, self_ty: Ty) -> Vec { - match self { - InlineBound::TraitBound(b) => b.into_where_clauses(self_ty), - InlineBound::ProjectionEqBound(b) => b.into_where_clauses(self_ty), - } - } -} - -impl IntoWhereClauses for QuantifiedInlineBound { - type Output = QuantifiedWhereClause; - - fn into_where_clauses(&self, self_ty: Ty) -> Vec { - let self_ty = self_ty.shifted_in(self.binders.len()); - self.value - .into_where_clauses(self_ty) - .into_iter() - .map(|wc| Binders { - binders: self.binders.clone(), - value: wc, - }).collect() - } -} - -/// Represents a trait bound on e.g. a type or type parameter. -/// Does not know anything about what it's binding. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TraitBound { - crate trait_id: ItemId, - crate args_no_self: Vec, -} - -struct_fold!(TraitBound { - trait_id, - args_no_self, -}); - -impl TraitBound { - fn into_where_clauses(&self, self_ty: Ty) -> Vec { - let trait_ref = self.as_trait_ref(self_ty); - vec![WhereClause::Implemented(trait_ref)] - } - - crate fn as_trait_ref(&self, self_ty: Ty) -> TraitRef { - let self_ty = ParameterKind::Ty(self_ty); - TraitRef { - trait_id: self.trait_id, - parameters: iter::once(self_ty) - .chain(self.args_no_self.iter().cloned()) - .collect(), - } - } -} -/// Represents a projection equality bound on e.g. a type or type parameter. -/// Does not know anything about what it's binding. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ProjectionEqBound { - crate trait_bound: TraitBound, - crate associated_ty_id: ItemId, - /// Does not include trait parameters. - crate parameters: Vec, - crate value: Ty, -} - -struct_fold!(ProjectionEqBound { - trait_bound, - associated_ty_id, - parameters, - value, -}); - -impl ProjectionEqBound { - fn into_where_clauses(&self, self_ty: Ty) -> Vec { - let trait_ref = self.trait_bound.as_trait_ref(self_ty); - - let mut parameters = self.parameters.clone(); - parameters.extend(trait_ref.parameters.clone()); - - vec![ - WhereClause::Implemented(trait_ref), - WhereClause::ProjectionEq(ProjectionEq { - projection: ProjectionTy { - associated_ty_id: self.associated_ty_id, - parameters: parameters, - }, - ty: self.value.clone(), - }), - ] - } -} - -pub trait Anonymize { - /// Utility function that converts from a list of generic parameters - /// which *have* names (`ParameterKind`) to a list of - /// "anonymous" generic parameters that just preserves their - /// kinds (`ParameterKind<()>`). Often convenient in lowering. - fn anonymize(&self) -> Vec>; -} - -impl Anonymize for [ParameterKind] { - fn anonymize(&self) -> Vec> { - self.iter().map(|pk| pk.map(|_| ())).collect() - } -} - -pub trait ToParameter { - /// Utility for converting a list of all the binders into scope - /// into references to those binders. Simply pair the binders with - /// the indices, and invoke `to_parameter()` on the `(binder, - /// index)` pair. The result will be a reference to a bound - /// variable of appropriate kind at the corresponding index. - fn to_parameter(&self) -> Parameter; -} - -impl<'a> ToParameter for (&'a ParameterKind<()>, usize) { - fn to_parameter(&self) -> Parameter { - let &(binder, index) = self; - match *binder { - ParameterKind::Lifetime(_) => ParameterKind::Lifetime(Lifetime::BoundVar(index)), - ParameterKind::Ty(_) => ParameterKind::Ty(Ty::BoundVar(index)), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct AssociatedTyDatum { - /// The trait this associated type is defined in. - crate trait_id: ItemId, - - /// The ID of this associated type - crate id: ItemId, - - /// Name of this associated type. - crate name: Identifier, - - /// Parameters on this associated type, beginning with those from the trait, - /// but possibly including more. - crate parameter_kinds: Vec>, - - /// Bounds on the associated type itself. - /// - /// These must be proven by the implementer, for all possible parameters that - /// would result in a well-formed projection. - crate bounds: Vec, - - /// Where clauses that must hold for the projection to be well-formed. - crate where_clauses: Vec, -} - -impl AssociatedTyDatum { - /// Returns the associated ty's bounds applied to the projection type, e.g.: - /// - /// ```notrust - /// Implemented(::Item: Sized) - /// ``` - crate fn bounds_on_self(&self) -> Vec { - let parameters = self - .parameter_kinds - .anonymize() - .iter() - .zip(0..) - .map(|p| p.to_parameter()) - .collect(); - let self_ty = Ty::Projection(ProjectionTy { - associated_ty_id: self.id, - parameters, - }); - self.bounds - .iter() - .flat_map(|b| b.into_where_clauses(self_ty.clone())) - .collect() - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct AssociatedTyValue { - crate associated_ty_id: ItemId, - - // note: these binders are in addition to those from the impl - crate value: Binders, -} - -struct_fold!(AssociatedTyValue { - associated_ty_id, - value, -}); - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct AssociatedTyValueBound { - /// Type that we normalize to. The X in `type Foo<'a> = X`. - crate ty: Ty, -} - -struct_fold!(AssociatedTyValueBound { ty }); - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TypeKind { - crate sort: TypeSort, - crate name: Identifier, - crate binders: Binders<()>, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum TypeSort { - Struct, - Trait, -} - -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] -pub enum PolarizedTraitRef { - Positive(TraitRef), - Negative(TraitRef), -} - -enum_fold!(PolarizedTraitRef[] { Positive(a), Negative(a) }); - -impl PolarizedTraitRef { - crate fn is_positive(&self) -> bool { - match *self { - PolarizedTraitRef::Positive(_) => true, - PolarizedTraitRef::Negative(_) => false, - } - } - - crate fn trait_ref(&self) -> &TraitRef { - match *self { - PolarizedTraitRef::Positive(ref tr) | PolarizedTraitRef::Negative(ref tr) => tr, - } - } -} diff --git a/src/rust_ir/lowering.rs b/src/rust_ir/lowering.rs deleted file mode 100644 index f618834344f..00000000000 --- a/src/rust_ir/lowering.rs +++ /dev/null @@ -1,1215 +0,0 @@ -use std::collections::BTreeMap; - -use chalk_parse::ast::*; -use lalrpop_intern::intern; - -use chalk_ir; -use chalk_ir::cast::{Cast, Caster}; -use chalk_solve::solve::SolverChoice; -use errors::*; -use itertools::Itertools; -use rust_ir::{self, Anonymize, ToParameter}; - -mod test; - -type TypeIds = BTreeMap; -type TypeKinds = BTreeMap; -type AssociatedTyInfos = BTreeMap<(chalk_ir::ItemId, chalk_ir::Identifier), AssociatedTyInfo>; -type ParameterMap = BTreeMap, usize>; - -#[derive(Clone, Debug)] -struct Env<'k> { - type_ids: &'k TypeIds, - type_kinds: &'k TypeKinds, - associated_ty_infos: &'k AssociatedTyInfos, - /// Parameter identifiers are used as keys, therefore - /// all indentifiers in an environment must be unique (no shadowing). - parameter_map: ParameterMap, -} - -#[derive(Debug, PartialEq, Eq)] -struct AssociatedTyInfo { - id: chalk_ir::ItemId, - addl_parameter_kinds: Vec>, -} - -enum NameLookup { - Type(chalk_ir::ItemId), - Parameter(usize), -} - -enum LifetimeLookup { - Parameter(usize), -} - -const SELF: &str = "Self"; - -impl<'k> Env<'k> { - fn lookup(&self, name: Identifier) -> Result { - if let Some(k) = self - .parameter_map - .get(&chalk_ir::ParameterKind::Ty(name.str)) - { - return Ok(NameLookup::Parameter(*k)); - } - - if let Some(id) = self.type_ids.get(&name.str) { - return Ok(NameLookup::Type(*id)); - } - - bail!(ErrorKind::InvalidTypeName(name)) - } - - fn lookup_lifetime(&self, name: Identifier) -> Result { - if let Some(k) = self - .parameter_map - .get(&chalk_ir::ParameterKind::Lifetime(name.str)) - { - return Ok(LifetimeLookup::Parameter(*k)); - } - - bail!("invalid lifetime name: {:?}", name.str); - } - - fn type_kind(&self, id: chalk_ir::ItemId) -> &rust_ir::TypeKind { - &self.type_kinds[&id] - } - - /// Introduces new parameters, shifting the indices of existing - /// parameters to accommodate them. The indices of the new binders - /// will be assigned in order as they are iterated. - fn introduce(&self, binders: I) -> Result - where - I: IntoIterator>, - I::IntoIter: ExactSizeIterator, - { - let binders = binders.into_iter().enumerate().map(|(i, k)| (k, i)); - let len = binders.len(); - let parameter_map: ParameterMap = self - .parameter_map - .iter() - .map(|(&k, &v)| (k, v + len)) - .chain(binders) - .collect(); - if parameter_map.len() != self.parameter_map.len() + len { - bail!("duplicate or shadowed parameters"); - } - Ok(Env { - parameter_map, - ..*self - }) - } - - fn in_binders(&self, binders: I, op: OP) -> Result> - where - I: IntoIterator>, - I::IntoIter: ExactSizeIterator, - OP: FnOnce(&Self) -> Result, - { - let binders: Vec<_> = binders.into_iter().collect(); - let env = self.introduce(binders.iter().cloned())?; - Ok(chalk_ir::Binders { - binders: binders.anonymize(), - value: op(&env)?, - }) - } -} - -pub trait LowerProgram { - /// Lowers from a Program AST to the internal IR for a program. - fn lower(&self, solver_choice: SolverChoice) -> Result; -} - -impl LowerProgram for Program { - fn lower(&self, solver_choice: SolverChoice) -> Result { - let mut index = 0; - let mut next_item_id = || -> chalk_ir::ItemId { - let i = index; - index += 1; - chalk_ir::ItemId { index: i } - }; - - // Make a vector mapping each thing in `items` to an id, - // based just on its position: - let item_ids: Vec<_> = self.items.iter().map(|_| next_item_id()).collect(); - - // Create ids for associated types - let mut associated_ty_infos = BTreeMap::new(); - for (item, &item_id) in self.items.iter().zip(&item_ids) { - if let Item::TraitDefn(ref d) = *item { - if d.flags.auto && !d.assoc_ty_defns.is_empty() { - bail!("auto trait cannot define associated types"); - } - for defn in &d.assoc_ty_defns { - let addl_parameter_kinds = defn.all_parameters(); - let info = AssociatedTyInfo { - id: next_item_id(), - addl_parameter_kinds, - }; - associated_ty_infos.insert((item_id, defn.name.str), info); - } - } - } - - let mut type_ids = BTreeMap::new(); - let mut type_kinds = BTreeMap::new(); - for (item, &item_id) in self.items.iter().zip(&item_ids) { - let k = match *item { - Item::StructDefn(ref d) => d.lower_type_kind()?, - Item::TraitDefn(ref d) => d.lower_type_kind()?, - Item::Impl(_) => continue, - Item::Clause(_) => continue, - }; - type_ids.insert(k.name, item_id); - type_kinds.insert(item_id, k); - } - - let mut struct_data = BTreeMap::new(); - let mut trait_data = BTreeMap::new(); - let mut impl_data = BTreeMap::new(); - let mut associated_ty_data = BTreeMap::new(); - let mut custom_clauses = Vec::new(); - let mut lang_items = BTreeMap::new(); - for (item, &item_id) in self.items.iter().zip(&item_ids) { - let empty_env = Env { - type_ids: &type_ids, - type_kinds: &type_kinds, - associated_ty_infos: &associated_ty_infos, - parameter_map: BTreeMap::new(), - }; - - match *item { - Item::StructDefn(ref d) => { - struct_data.insert(item_id, d.lower_struct(item_id, &empty_env)?); - } - Item::TraitDefn(ref d) => { - trait_data.insert(item_id, d.lower_trait(item_id, &empty_env)?); - - for defn in &d.assoc_ty_defns { - let info = &associated_ty_infos[&(item_id, defn.name.str)]; - - let mut parameter_kinds = defn.all_parameters(); - parameter_kinds.extend(d.all_parameters()); - let env = empty_env.introduce(parameter_kinds.clone())?; - - associated_ty_data.insert( - info.id, - rust_ir::AssociatedTyDatum { - trait_id: item_id, - id: info.id, - name: defn.name.str, - parameter_kinds: parameter_kinds, - bounds: defn.bounds.lower(&env)?, - where_clauses: defn.where_clauses.lower(&env)?, - }, - ); - } - - if d.flags.deref { - use std::collections::btree_map::Entry::*; - match lang_items.entry(rust_ir::LangItem::DerefTrait) { - Vacant(entry) => { - entry.insert(item_id); - } - Occupied(_) => { - bail!(ErrorKind::DuplicateLangItem(rust_ir::LangItem::DerefTrait)) - } - } - } - } - Item::Impl(ref d) => { - impl_data.insert(item_id, d.lower_impl(&empty_env)?); - } - Item::Clause(ref clause) => { - custom_clauses.extend(clause.lower_clause(&empty_env)?); - } - } - } - - let mut program = rust_ir::Program { - type_ids, - type_kinds, - struct_data, - trait_data, - impl_data, - associated_ty_data, - custom_clauses, - lang_items, - default_impl_data: Vec::new(), - }; - - program.add_default_impls(); - program.record_specialization_priorities(solver_choice)?; - program.verify_well_formedness(solver_choice)?; - program.perform_orphan_check(solver_choice)?; - Ok(program) - } -} - -trait LowerTypeKind { - fn lower_type_kind(&self) -> Result; -} - -trait LowerParameterMap { - fn synthetic_parameters(&self) -> Option>; - fn declared_parameters(&self) -> &[ParameterKind]; - fn all_parameters(&self) -> Vec> { - self.synthetic_parameters() - .into_iter() - .chain(self.declared_parameters().iter().map(|id| id.lower())) - .collect() - - /* TODO: switch to this ordering, but adjust *all* the code to match - - self.declared_parameters() - .iter() - .map(|id| id.lower()) - .chain(self.synthetic_parameters()) // (*) see below - .collect() - */ - } - - fn parameter_refs(&self) -> Vec { - self.all_parameters() - .anonymize() - .iter() - .zip(0..) - .map(|p| p.to_parameter()) - .collect() - } - - fn parameter_map(&self) -> ParameterMap { - // (*) It is important that the declared parameters come - // before the subtle parameters in the ordering. This is - // because of traits, when used as types, only have the first - // N parameters in their kind (that is, they do not have Self). - // - // Note that if `Self` appears in the where-clauses etc, the - // trait is not object-safe, and hence not supposed to be used - // as an object. Actually the handling of object types is - // probably just kind of messed up right now. That's ok. - self.all_parameters() - .into_iter() - .zip(0..) - .collect() - } -} - -impl LowerParameterMap for StructDefn { - fn synthetic_parameters(&self) -> Option> { - None - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -impl LowerParameterMap for Impl { - fn synthetic_parameters(&self) -> Option> { - None - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -impl LowerParameterMap for AssocTyDefn { - fn synthetic_parameters(&self) -> Option> { - None - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -impl LowerParameterMap for AssocTyValue { - fn synthetic_parameters(&self) -> Option> { - None - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -impl LowerParameterMap for TraitDefn { - fn synthetic_parameters(&self) -> Option> { - Some(chalk_ir::ParameterKind::Ty(intern(SELF))) - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -impl LowerParameterMap for Clause { - fn synthetic_parameters(&self) -> Option> { - None - } - - fn declared_parameters(&self) -> &[ParameterKind] { - &self.parameter_kinds - } -} - -trait LowerParameterKind { - fn lower(&self) -> chalk_ir::ParameterKind; -} - -impl LowerParameterKind for ParameterKind { - fn lower(&self) -> chalk_ir::ParameterKind { - match *self { - ParameterKind::Ty(ref n) => chalk_ir::ParameterKind::Ty(n.str), - ParameterKind::Lifetime(ref n) => chalk_ir::ParameterKind::Lifetime(n.str), - } - } -} - -trait LowerWhereClauses { - fn where_clauses(&self) -> &[QuantifiedWhereClause]; - - fn lower_where_clauses(&self, env: &Env) -> Result> { - self.where_clauses().lower(env) - } -} - -impl LowerTypeKind for StructDefn { - fn lower_type_kind(&self) -> Result { - Ok(rust_ir::TypeKind { - sort: rust_ir::TypeSort::Struct, - name: self.name.str, - binders: chalk_ir::Binders { - binders: self.all_parameters().anonymize(), - value: (), - }, - }) - } -} - -impl LowerWhereClauses for StructDefn { - fn where_clauses(&self) -> &[QuantifiedWhereClause] { - &self.where_clauses - } -} - -impl LowerTypeKind for TraitDefn { - fn lower_type_kind(&self) -> Result { - let binders: Vec<_> = self.parameter_kinds.iter().map(|p| p.lower()).collect(); - Ok(rust_ir::TypeKind { - sort: rust_ir::TypeSort::Trait, - name: self.name.str, - binders: chalk_ir::Binders { - // for the purposes of the *type*, ignore `Self`: - binders: binders.anonymize(), - value: (), - }, - }) - } -} - -impl LowerWhereClauses for TraitDefn { - fn where_clauses(&self) -> &[QuantifiedWhereClause] { - &self.where_clauses - } -} - -impl LowerWhereClauses for Impl { - fn where_clauses(&self) -> &[QuantifiedWhereClause] { - &self.where_clauses - } -} - -trait LowerWhereClauseVec { - fn lower(&self, env: &Env) -> Result>; -} - -impl LowerWhereClauseVec for [QuantifiedWhereClause] { - fn lower(&self, env: &Env) -> Result> { - self.iter() - .flat_map(|wc| match wc.lower(env) { - Ok(v) => v.into_iter().map(Ok).collect(), - Err(e) => vec![Err(e)], - }).collect() - } -} - -trait LowerWhereClause { - /// Lower from an AST `where` clause to an internal IR. - /// Some AST `where` clauses can lower to multiple ones, this is why we return a `Vec`. - /// As for now, this is the only the case for `where T: Foo` which lowers to - /// `Implemented(T: Foo)` and `ProjectionEq(::Item = U)`. - fn lower(&self, env: &Env) -> Result>; -} - -impl LowerWhereClause for WhereClause { - fn lower(&self, env: &Env) -> Result> { - let where_clauses = match self { - WhereClause::Implemented { trait_ref } => { - vec![chalk_ir::WhereClause::Implemented(trait_ref.lower(env)?)] - } - WhereClause::ProjectionEq { projection, ty } => vec![ - chalk_ir::WhereClause::ProjectionEq(chalk_ir::ProjectionEq { - projection: projection.lower(env)?, - ty: ty.lower(env)?, - }), - chalk_ir::WhereClause::Implemented(projection.trait_ref.lower(env)?), - ], - }; - Ok(where_clauses) - } -} - -impl LowerWhereClause for QuantifiedWhereClause { - fn lower(&self, env: &Env) -> Result> { - let parameter_kinds = self.parameter_kinds.iter().map(|pk| pk.lower()); - let binders = env.in_binders(parameter_kinds, |env| Ok(self.where_clause.lower(env)?))?; - Ok(binders.into_iter().collect()) - } -} - -trait LowerDomainGoal { - fn lower(&self, env: &Env) -> Result>; -} - -impl LowerDomainGoal for DomainGoal { - fn lower(&self, env: &Env) -> Result> { - let goals = match self { - DomainGoal::Holds { where_clause } => { - where_clause.lower(env)?.into_iter().casted().collect() - } - DomainGoal::Normalize { projection, ty } => { - vec![chalk_ir::DomainGoal::Normalize(chalk_ir::Normalize { - projection: projection.lower(env)?, - ty: ty.lower(env)?, - })] - } - DomainGoal::TyWellFormed { ty } => vec![chalk_ir::DomainGoal::WellFormed( - chalk_ir::WellFormed::Ty(ty.lower(env)?), - )], - DomainGoal::TraitRefWellFormed { trait_ref } => vec![chalk_ir::DomainGoal::WellFormed( - chalk_ir::WellFormed::Trait(trait_ref.lower(env)?), - )], - DomainGoal::TyFromEnv { ty } => vec![chalk_ir::DomainGoal::FromEnv( - chalk_ir::FromEnv::Ty(ty.lower(env)?), - )], - DomainGoal::TraitRefFromEnv { trait_ref } => vec![chalk_ir::DomainGoal::FromEnv( - chalk_ir::FromEnv::Trait(trait_ref.lower(env)?), - )], - DomainGoal::TraitInScope { trait_name } => { - let id = match env.lookup(*trait_name)? { - NameLookup::Type(id) => id, - NameLookup::Parameter(_) => bail!(ErrorKind::NotTrait(*trait_name)), - }; - - if env.type_kind(id).sort != rust_ir::TypeSort::Trait { - bail!(ErrorKind::NotTrait(*trait_name)); - } - - vec![chalk_ir::DomainGoal::InScope(id)] - } - DomainGoal::Derefs { source, target } => { - vec![chalk_ir::DomainGoal::Derefs(chalk_ir::Derefs { - source: source.lower(env)?, - target: target.lower(env)?, - })] - } - DomainGoal::IsLocal { ty } => vec![chalk_ir::DomainGoal::IsLocal(ty.lower(env)?)], - DomainGoal::IsUpstream { ty } => vec![chalk_ir::DomainGoal::IsUpstream(ty.lower(env)?)], - DomainGoal::IsFullyVisible { ty } => { - vec![chalk_ir::DomainGoal::IsFullyVisible(ty.lower(env)?)] - } - DomainGoal::LocalImplAllowed { trait_ref } => { - vec![chalk_ir::DomainGoal::LocalImplAllowed( - trait_ref.lower(env)?, - )] - } - DomainGoal::Compatible => vec![chalk_ir::DomainGoal::Compatible(())], - DomainGoal::DownstreamType { ty } => { - vec![chalk_ir::DomainGoal::DownstreamType(ty.lower(env)?)] - } - }; - Ok(goals) - } -} - -trait LowerLeafGoal { - fn lower(&self, env: &Env) -> Result>; -} - -impl LowerLeafGoal for LeafGoal { - fn lower(&self, env: &Env) -> Result> { - let goals = match self { - LeafGoal::DomainGoal { goal } => goal - .lower(env)? - .into_iter() - .map(|goal| chalk_ir::LeafGoal::DomainGoal(goal)) - .collect(), - LeafGoal::UnifyTys { a, b } => vec![ - chalk_ir::EqGoal { - a: chalk_ir::ParameterKind::Ty(a.lower(env)?), - b: chalk_ir::ParameterKind::Ty(b.lower(env)?), - }.cast(), - ], - LeafGoal::UnifyLifetimes { ref a, ref b } => vec![ - chalk_ir::EqGoal { - a: chalk_ir::ParameterKind::Lifetime(a.lower(env)?), - b: chalk_ir::ParameterKind::Lifetime(b.lower(env)?), - }.cast(), - ], - }; - Ok(goals) - } -} - -trait LowerStructDefn { - fn lower_struct(&self, item_id: chalk_ir::ItemId, env: &Env) -> Result; -} - -impl LowerStructDefn for StructDefn { - fn lower_struct(&self, item_id: chalk_ir::ItemId, env: &Env) -> Result { - let binders = env.in_binders(self.all_parameters(), |env| { - let self_ty = chalk_ir::ApplicationTy { - name: chalk_ir::TypeName::ItemId(item_id), - parameters: self - .all_parameters() - .anonymize() - .iter() - .zip(0..) - .map(|p| p.to_parameter()) - .collect(), - }; - - if self.flags.fundamental && self_ty.len_type_parameters() != 1 { - bail!("Only fundamental types with a single parameter are supported"); - } - - let fields: Result<_> = self.fields.iter().map(|f| f.ty.lower(env)).collect(); - let where_clauses = self.lower_where_clauses(env)?; - - Ok(rust_ir::StructDatumBound { - self_ty, - fields: fields?, - where_clauses, - flags: rust_ir::StructFlags { - upstream: self.flags.upstream, - fundamental: self.flags.fundamental, - }, - }) - })?; - - Ok(rust_ir::StructDatum { binders }) - } -} - -fn check_type_kinds(msg: &str, expected: &A, actual: &B) -> Result<()> { - let expected_kind = expected.kind(); - let actual_kind = actual.kind(); - if expected_kind != actual_kind { - bail!("{}: expected {}, found {}", msg, expected_kind, actual_kind); - } else { - Ok(()) - } -} - -trait LowerTraitRef { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerTraitRef for TraitRef { - fn lower(&self, env: &Env) -> Result { - let without_self = TraitBound { - trait_name: self.trait_name, - args_no_self: self.args.iter().cloned().skip(1).collect(), - }.lower(env)?; - - let self_parameter = self.args[0].lower(env)?; - Ok(without_self.as_trait_ref(self_parameter.ty().unwrap())) - } -} - -trait LowerTraitBound { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerTraitBound for TraitBound { - fn lower(&self, env: &Env) -> Result { - let id = match env.lookup(self.trait_name)? { - NameLookup::Type(id) => id, - NameLookup::Parameter(_) => bail!(ErrorKind::NotTrait(self.trait_name)), - }; - - let k = env.type_kind(id); - if k.sort != rust_ir::TypeSort::Trait { - bail!(ErrorKind::NotTrait(self.trait_name)); - } - - let parameters = self - .args_no_self - .iter() - .map(|a| Ok(a.lower(env)?)) - .collect::>>()?; - - if parameters.len() != k.binders.len() { - bail!( - "wrong number of parameters, expected `{:?}`, got `{:?}`", - k.binders.len(), - parameters.len() - ) - } - - for (binder, param) in k.binders.binders.iter().zip(parameters.iter()) { - check_type_kinds("incorrect kind for trait parameter", binder, param)?; - } - - Ok(rust_ir::TraitBound { - trait_id: id, - args_no_self: parameters, - }) - } -} - -trait LowerProjectionEqBound { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerProjectionEqBound for ProjectionEqBound { - fn lower(&self, env: &Env) -> Result { - let trait_bound = self.trait_bound.lower(env)?; - let info = match env - .associated_ty_infos - .get(&(trait_bound.trait_id, self.name.str)) - { - Some(info) => info, - None => bail!("no associated type `{}` defined in trait", self.name.str), - }; - let args: Vec<_> = try!(self.args.iter().map(|a| a.lower(env)).collect()); - - if args.len() != info.addl_parameter_kinds.len() { - bail!( - "wrong number of parameters for associated type (expected {}, got {})", - info.addl_parameter_kinds.len(), - args.len() - ) - } - - for (param, arg) in info.addl_parameter_kinds.iter().zip(args.iter()) { - check_type_kinds("incorrect kind for associated type parameter", param, arg)?; - } - - Ok(rust_ir::ProjectionEqBound { - trait_bound, - associated_ty_id: info.id, - parameters: args, - value: self.value.lower(env)?, - }) - } -} - -trait LowerInlineBound { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerInlineBound for InlineBound { - fn lower(&self, env: &Env) -> Result { - let bound = match self { - InlineBound::TraitBound(b) => rust_ir::InlineBound::TraitBound(b.lower(&env)?), - InlineBound::ProjectionEqBound(b) => { - rust_ir::InlineBound::ProjectionEqBound(b.lower(&env)?) - } - }; - Ok(bound) - } -} - -trait LowerQuantifiedInlineBound { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerQuantifiedInlineBound for QuantifiedInlineBound { - fn lower(&self, env: &Env) -> Result { - let parameter_kinds = self.parameter_kinds.iter().map(|pk| pk.lower()); - let binders = env.in_binders(parameter_kinds, |env| Ok(self.bound.lower(env)?))?; - Ok(binders) - } -} - -trait LowerQuantifiedInlineBoundVec { - fn lower(&self, env: &Env) -> Result>; -} - -impl LowerQuantifiedInlineBoundVec for [QuantifiedInlineBound] { - fn lower(&self, env: &Env) -> Result> { - self.iter().map(|b| b.lower(env)).collect() - } -} - -trait LowerPolarizedTraitRef { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerPolarizedTraitRef for PolarizedTraitRef { - fn lower(&self, env: &Env) -> Result { - Ok(match *self { - PolarizedTraitRef::Positive(ref tr) => { - rust_ir::PolarizedTraitRef::Positive(tr.lower(env)?) - } - PolarizedTraitRef::Negative(ref tr) => { - rust_ir::PolarizedTraitRef::Negative(tr.lower(env)?) - } - }) - } -} - -trait LowerProjectionTy { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerProjectionTy for ProjectionTy { - fn lower(&self, env: &Env) -> Result { - let ProjectionTy { - ref trait_ref, - ref name, - ref args, - } = *self; - let chalk_ir::TraitRef { - trait_id, - parameters: trait_parameters, - } = trait_ref.lower(env)?; - let info = match env.associated_ty_infos.get(&(trait_id, name.str)) { - Some(info) => info, - None => bail!("no associated type `{}` defined in trait", name.str), - }; - let mut args: Vec<_> = try!(args.iter().map(|a| a.lower(env)).collect()); - - if args.len() != info.addl_parameter_kinds.len() { - bail!( - "wrong number of parameters for associated type (expected {}, got {})", - info.addl_parameter_kinds.len(), - args.len() - ) - } - - for (param, arg) in info.addl_parameter_kinds.iter().zip(args.iter()) { - check_type_kinds("incorrect kind for associated type parameter", param, arg)?; - } - - args.extend(trait_parameters); - - Ok(chalk_ir::ProjectionTy { - associated_ty_id: info.id, - parameters: args, - }) - } -} - -trait LowerUnselectedProjectionTy { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerUnselectedProjectionTy for UnselectedProjectionTy { - fn lower(&self, env: &Env) -> Result { - let parameters: Vec<_> = try!(self.args.iter().map(|a| a.lower(env)).collect()); - let ret = chalk_ir::UnselectedProjectionTy { - type_name: self.name.str, - parameters: parameters, - }; - Ok(ret) - } -} - -trait LowerTy { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerTy for Ty { - fn lower(&self, env: &Env) -> Result { - match *self { - Ty::Id { name } => match env.lookup(name)? { - NameLookup::Type(id) => { - let k = env.type_kind(id); - if k.binders.len() > 0 { - bail!(ErrorKind::IncorrectNumberOfTypeParameters( - name, - k.binders.len(), - 0 - )) - } - - Ok(chalk_ir::Ty::Apply(chalk_ir::ApplicationTy { - name: chalk_ir::TypeName::ItemId(id), - parameters: vec![], - })) - } - NameLookup::Parameter(d) => Ok(chalk_ir::Ty::BoundVar(d)), - }, - - Ty::Apply { name, ref args } => { - let id = match env.lookup(name)? { - NameLookup::Type(id) => id, - NameLookup::Parameter(_) => bail!(ErrorKind::CannotApplyTypeParameter(name)), - }; - - let k = env.type_kind(id); - if k.binders.len() != args.len() { - bail!(ErrorKind::IncorrectNumberOfTypeParameters( - name, - k.binders.len(), - args.len() - )) - } - - let parameters = args - .iter() - .map(|t| Ok(t.lower(env)?)) - .collect::>>()?; - - for (param, arg) in k.binders.binders.iter().zip(args.iter()) { - check_type_kinds("incorrect parameter kind", param, arg)?; - } - - Ok(chalk_ir::Ty::Apply(chalk_ir::ApplicationTy { - name: chalk_ir::TypeName::ItemId(id), - parameters: parameters, - })) - } - - Ty::Projection { ref proj } => Ok(chalk_ir::Ty::Projection(proj.lower(env)?)), - - Ty::UnselectedProjection { ref proj } => { - Ok(chalk_ir::Ty::UnselectedProjection(proj.lower(env)?)) - } - - Ty::ForAll { - ref lifetime_names, - ref ty, - } => { - let quantified_env = env.introduce( - lifetime_names - .iter() - .map(|id| chalk_ir::ParameterKind::Lifetime(id.str)), - )?; - - let ty = ty.lower(&quantified_env)?; - let quantified_ty = chalk_ir::QuantifiedTy { - num_binders: lifetime_names.len(), - ty, - }; - Ok(chalk_ir::Ty::ForAll(Box::new(quantified_ty))) - } - } - } -} - -trait LowerParameter { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerParameter for Parameter { - fn lower(&self, env: &Env) -> Result { - match *self { - Parameter::Ty(ref t) => Ok(chalk_ir::ParameterKind::Ty(t.lower(env)?)), - Parameter::Lifetime(ref l) => Ok(chalk_ir::ParameterKind::Lifetime(l.lower(env)?)), - } - } -} - -trait LowerLifetime { - fn lower(&self, env: &Env) -> Result; -} - -impl LowerLifetime for Lifetime { - fn lower(&self, env: &Env) -> Result { - match *self { - Lifetime::Id { name } => match env.lookup_lifetime(name)? { - LifetimeLookup::Parameter(d) => Ok(chalk_ir::Lifetime::BoundVar(d)), - }, - } - } -} - -trait LowerImpl { - fn lower_impl(&self, empty_env: &Env) -> Result; -} - -impl LowerImpl for Impl { - fn lower_impl(&self, empty_env: &Env) -> Result { - let binders = empty_env.in_binders(self.all_parameters(), |env| { - let trait_ref = self.trait_ref.lower(env)?; - - if !trait_ref.is_positive() && !self.assoc_ty_values.is_empty() { - bail!("negative impls cannot define associated values"); - } - - let trait_id = trait_ref.trait_ref().trait_id; - let where_clauses = self.lower_where_clauses(&env)?; - let associated_ty_values = try!( - self.assoc_ty_values - .iter() - .map(|v| v.lower(trait_id, env)) - .collect() - ); - Ok(rust_ir::ImplDatumBound { - trait_ref, - where_clauses, - associated_ty_values, - specialization_priority: 0, - impl_type: match self.impl_type { - ImplType::Local => rust_ir::ImplType::Local, - ImplType::External => rust_ir::ImplType::External, - }, - }) - })?; - - Ok(rust_ir::ImplDatum { binders: binders }) - } -} - -trait LowerClause { - fn lower_clause(&self, env: &Env) -> Result>; -} - -impl LowerClause for Clause { - fn lower_clause(&self, env: &Env) -> Result> { - let implications = env.in_binders(self.all_parameters(), |env| { - let consequences: Vec = self.consequence.lower(env)?; - - let conditions: Vec = self - .conditions - .iter() - .map(|g| g.lower(env).map(|g| *g)) - .rev() // (*) - .collect::>()?; - - // (*) Subtle: in the SLG solver, we pop conditions from R to - // L. To preserve the expected order (L to R), we must - // therefore reverse. - - let implications = consequences - .into_iter() - .map(|consequence| chalk_ir::ProgramClauseImplication { - consequence, - conditions: conditions.clone(), - }).collect::>(); - Ok(implications) - })?; - - let clauses = implications - .into_iter() - .map( - |implication: chalk_ir::Binders| { - if implication.binders.is_empty() { - chalk_ir::ProgramClause::Implies(implication.value) - } else { - chalk_ir::ProgramClause::ForAll(implication) - } - }, - ).collect(); - Ok(clauses) - } -} - -trait LowerAssocTyValue { - fn lower(&self, trait_id: chalk_ir::ItemId, env: &Env) -> Result; -} - -impl LowerAssocTyValue for AssocTyValue { - fn lower(&self, trait_id: chalk_ir::ItemId, env: &Env) -> Result { - let info = &env.associated_ty_infos[&(trait_id, self.name.str)]; - let value = env.in_binders(self.all_parameters(), |env| { - Ok(rust_ir::AssociatedTyValueBound { - ty: self.value.lower(env)?, - }) - })?; - Ok(rust_ir::AssociatedTyValue { - associated_ty_id: info.id, - value: value, - }) - } -} - -trait LowerTrait { - fn lower_trait(&self, trait_id: chalk_ir::ItemId, env: &Env) -> Result; -} - -impl LowerTrait for TraitDefn { - fn lower_trait(&self, trait_id: chalk_ir::ItemId, env: &Env) -> Result { - let binders = env.in_binders(self.all_parameters(), |env| { - let trait_ref = chalk_ir::TraitRef { - trait_id: trait_id, - parameters: self.parameter_refs(), - }; - - if self.flags.auto { - if trait_ref.parameters.len() > 1 { - bail!("auto trait cannot have parameters"); - } - if !self.where_clauses.is_empty() { - bail!("auto trait cannot have where clauses"); - } - } - - Ok(rust_ir::TraitDatumBound { - trait_ref: trait_ref, - where_clauses: self.lower_where_clauses(env)?, - flags: rust_ir::TraitFlags { - auto: self.flags.auto, - marker: self.flags.marker, - upstream: self.flags.upstream, - fundamental: self.flags.fundamental, - deref: self.flags.deref, - }, - }) - })?; - - Ok(rust_ir::TraitDatum { binders: binders }) - } -} - -pub trait LowerGoal { - fn lower(&self, arg: &A) -> Result>; -} - -impl LowerGoal for Goal { - fn lower(&self, program: &rust_ir::Program) -> Result> { - let associated_ty_infos: BTreeMap<_, _> = program - .associated_ty_data - .iter() - .map(|(&associated_ty_id, datum)| { - let trait_datum = &program.trait_data[&datum.trait_id]; - let num_trait_params = trait_datum.binders.len(); - let num_addl_params = datum.parameter_kinds.len() - num_trait_params; - let addl_parameter_kinds = datum.parameter_kinds[..num_addl_params].to_owned(); - let info = AssociatedTyInfo { - id: associated_ty_id, - addl_parameter_kinds, - }; - ((datum.trait_id, datum.name), info) - }).collect(); - - let env = Env { - type_ids: &program.type_ids, - type_kinds: &program.type_kinds, - associated_ty_infos: &associated_ty_infos, - parameter_map: BTreeMap::new(), - }; - - self.lower(&env) - } -} - -impl<'k> LowerGoal> for Goal { - fn lower(&self, env: &Env<'k>) -> Result> { - match self { - Goal::ForAll(ids, g) => g.lower_quantified(env, chalk_ir::QuantifierKind::ForAll, ids), - Goal::Exists(ids, g) => g.lower_quantified(env, chalk_ir::QuantifierKind::Exists, ids), - Goal::Implies(hyp, g) => { - // We "elaborate" implied bounds by lowering goals like `T: Trait` and - // `T: Trait` to `FromEnv(T: Trait)` and `FromEnv(T: Trait)` - // in the assumptions of an `if` goal, e.g. `if (T: Trait) { ... }` lowers to - // `if (FromEnv(T: Trait)) { ... /* this part is untouched */ ... }`. - let where_clauses: Result> = hyp - .into_iter() - .flat_map(|h| h.lower_clause(env).apply_result()) - .map(|result| result.map(|h| h.into_from_env_clause())) - .collect(); - Ok(Box::new(chalk_ir::Goal::Implies( - where_clauses?, - g.lower(env)?, - ))) - } - Goal::And(g1, g2) => Ok(Box::new(chalk_ir::Goal::And( - g1.lower(env)?, - g2.lower(env)?, - ))), - Goal::Not(g) => Ok(Box::new(chalk_ir::Goal::Not(g.lower(env)?))), - Goal::Compatible(g) => Ok(Box::new(g.lower(env)?.compatible())), - Goal::Leaf(leaf) => { - // A where clause can lower to multiple leaf goals; wrap these in Goal::And. - let leaves = leaf.lower(env)?.into_iter().map(chalk_ir::Goal::Leaf); - let goal = leaves - .fold1(|goal, leaf| chalk_ir::Goal::And(Box::new(goal), Box::new(leaf))) - .expect("at least one goal"); - Ok(Box::new(goal)) - } - } - } -} - -trait LowerQuantifiedGoal { - fn lower_quantified( - &self, - env: &Env, - quantifier_kind: chalk_ir::QuantifierKind, - parameter_kinds: &[ParameterKind], - ) -> Result>; -} - -impl LowerQuantifiedGoal for Goal { - fn lower_quantified( - &self, - env: &Env, - quantifier_kind: chalk_ir::QuantifierKind, - parameter_kinds: &[ParameterKind], - ) -> Result> { - if parameter_kinds.is_empty() { - return self.lower(env); - } - - let parameter_kinds = parameter_kinds.iter().map(|pk| pk.lower()); - let subgoal = env.in_binders(parameter_kinds, |env| self.lower(env))?; - Ok(Box::new(chalk_ir::Goal::Quantified( - quantifier_kind, - subgoal, - ))) - } -} - -/// Lowers Result> -> Vec>. -trait ApplyResult { - type Output; - fn apply_result(self) -> Self::Output; -} - -impl ApplyResult for Result> { - type Output = Vec>; - fn apply_result(self) -> Self::Output { - match self { - Ok(v) => v.into_iter().map(Ok).collect(), - Err(e) => vec![Err(e)], - } - } -} - -trait Kinded { - fn kind(&self) -> Kind; -} - -impl Kinded for ParameterKind { - fn kind(&self) -> Kind { - match *self { - ParameterKind::Ty(_) => Kind::Ty, - ParameterKind::Lifetime(_) => Kind::Lifetime, - } - } -} - -impl Kinded for Parameter { - fn kind(&self) -> Kind { - match *self { - Parameter::Ty(_) => Kind::Ty, - Parameter::Lifetime(_) => Kind::Lifetime, - } - } -} - -impl Kinded for chalk_ir::ParameterKind { - fn kind(&self) -> Kind { - match *self { - chalk_ir::ParameterKind::Ty(_) => Kind::Ty, - chalk_ir::ParameterKind::Lifetime(_) => Kind::Lifetime, - } - } -} diff --git a/src/rust_ir/lowering/test.rs b/src/rust_ir/lowering/test.rs deleted file mode 100644 index 6660be3b76f..00000000000 --- a/src/rust_ir/lowering/test.rs +++ /dev/null @@ -1,462 +0,0 @@ -#![cfg(test)] - -use chalk_ir::tls; -use chalk_solve::solve::SolverChoice; -use std::sync::Arc; -use test_util::*; - -#[test] -fn lower_success() { - lowering_success! { - program { - struct Foo { field: Foo } - trait Bar { } - impl Bar for Foo { } - } - } -} - -#[test] -fn not_trait() { - lowering_error! { - program { - struct Foo { } - trait Bar { } - impl Foo for Bar { } - } - error_msg { - "expected a trait, found `Foo`, which is not a trait" - } - } -} - -#[test] -fn auto_trait() { - lowering_error! { - program { - #[auto] trait Foo { } - } - error_msg { - "auto trait cannot have parameters" - } - } - - lowering_error! { - program { - trait Bar { } - #[auto] trait Foo where Self: Bar { } - } - error_msg { - "auto trait cannot have where clauses" - } - } - - lowering_error! { - program { - #[auto] trait Foo { - type Item; - } - } - error_msg { - "auto trait cannot define associated types" - } - } - - lowering_success! { - program { - #[auto] trait Send { } - } - } -} - -#[test] -fn negative_impl() { - lowering_error! { - program { - trait Foo { - type Item; - } - - struct i32 { } - - impl !Foo for i32 { - type Item = i32; - } - } - error_msg { - "negative impls cannot define associated values" - } - } - - lowering_success! { - program { - trait Foo { } - - trait Iterator { - type Item; - } - - struct i32 { } - - impl !Foo for T where T: Iterator { } - } - } -} - -#[test] -fn invalid_name() { - lowering_error! { - program { - struct Foo { } - trait Bar { } - impl Bar for X { } - } - error_msg { - "invalid type name `X`" - } - } -} - -#[test] -fn type_parameter() { - lowering_success! { - program { - struct Foo { } - trait Bar { } - impl Bar for X { } - } - } -} - -#[test] -fn type_parameter_bound() { - lowering_success! { - program { - struct Foo { } - trait Bar { } - trait Eq { } - impl Bar for X where X: Eq { } - } - } -} - -#[test] -fn assoc_tys() { - lowering_success! { - program { - struct String { } - struct Char { } - - trait Iterator { type Item; } - impl Iterator for String { type Item = Char; } - - trait Foo { } - impl Foo for ::Item where X: Iterator { } - } - } -} - -#[test] -fn goal_quantifiers() { - let program = Arc::new( - parse_and_lower_program( - "trait Foo { }", - SolverChoice::default() - ).unwrap() - ); - let goal = parse_and_lower_goal( - &program, - "forall {exists {forall {Z: Foo}}}" - ).unwrap(); - tls::set_current_program(&program, || { - assert_eq!( - format!("{:?}", goal), - "ForAll { Exists { ForAll { Implemented(^0: Foo<^1, ^2>) } } }" - ); - }); -} - -#[test] -fn atc_accounting() { - let program = Arc::new( - parse_and_lower_program( - " - struct Vec { } - - trait Iterable { - type Iter<'a>; - } - - impl Iterable for Vec { - type Iter<'a> = Iter<'a, T>; - } - - struct Iter<'a, T> { } - ", - SolverChoice::default() - ).unwrap(), - ); - tls::set_current_program(&program, || { - let impl_text = format!("{:#?}", &program.impl_data.values().next().unwrap()); - println!("{}", impl_text); - assert_eq!( - &impl_text[..], - r#"ImplDatum { - binders: for ImplDatumBound { - trait_ref: Positive( - Vec<^0> as Iterable - ), - where_clauses: [], - associated_ty_values: [ - AssociatedTyValue { - associated_ty_id: (Iterable::Iter), - value: for AssociatedTyValueBound { - ty: Iter<'^0, ^1> - } - } - ], - specialization_priority: 0, - impl_type: Local - } -}"# - ); - let goal = parse_and_lower_goal( - &program, - "forall { forall<'a> { forall { \ - X: Iterable = Y> } } }", - ).unwrap(); - let goal_text = format!("{:?}", goal); - println!("{}", goal_text); - assert_eq!( - goal_text, - "ForAll { \ - ForAll { \ - ForAll { \ - (ProjectionEq(<^2 as Iterable>::Iter<'^1> = ^0), \ - Implemented(^2: Iterable)) \ - } \ - } \ - }" - ); - }); -} - -#[test] -fn check_parameter_kinds() { - lowering_error! { - program { - struct Foo<'a> { } - struct i32 { } - trait Bar { } - impl Bar for Foo { } - } - error_msg { - "incorrect parameter kind: expected lifetime, found type" - } - }; - - lowering_error! { - program { - struct Foo { } - trait Bar { } - impl<'a> Bar for Foo<'a> { } - } - error_msg { - "incorrect parameter kind: expected type, found lifetime" - } - }; - - lowering_error! { - program { - trait Iterator { type Item<'a>; } - trait Foo { } - impl Foo for ::Item where X: Iterator { } - } - error_msg { - "incorrect kind for associated type parameter: expected lifetime, found type" - } - }; - - lowering_error! { - program { - trait Iterator { type Item; } - trait Foo { } - impl Foo for ::Item<'a> where X: Iterator { } - } - error_msg { - "incorrect kind for associated type parameter: expected type, found lifetime" - } - }; - - lowering_error! { - program { - trait Into {} - struct Foo {} - impl<'a> Into<'a> for Foo {} - } - error_msg { - "incorrect kind for trait parameter: expected type, found lifetime" - } - } - - lowering_error! { - program { - trait IntoTime<'a> {} - struct Foo {} - impl IntoTime for Foo {} - } - error_msg { - "incorrect kind for trait parameter: expected lifetime, found type" - } - } -} - -#[test] -fn gat_parse() { - lowering_success! { - program { - trait Sized {} - trait Clone {} - - trait Foo { - type Item<'a, T>: Sized + Clone where Self: Sized; - } - - trait Bar { - type Item<'a, T> where Self: Sized; - } - - struct Container { - value: T - } - - trait Baz { - type Item<'a, 'b, T>: Foo = Container> + Clone; - } - - trait Quux { - type Item<'a, T>; - } - } - } - - lowering_error! { - program { - trait Sized { } - - trait Foo { - type Item where K: Sized; - } - } - - error_msg { - "invalid type name `K`" - } - } -} - -#[test] -fn gat_higher_ranked_bound() { - lowering_success! { - program { - trait Fn {} - trait Ref<'a, T> {} - trait Sized {} - - trait Foo { - type Item: forall<'a> Fn> + Sized; - } - } - } -} - -#[test] -fn duplicate_parameters() { - lowering_error! { - program { - trait Foo { } - } - - error_msg { - "duplicate or shadowed parameters" - } - } - - lowering_error! { - program { - trait Foo { - type Item; - } - } - - error_msg { - "duplicate or shadowed parameters" - } - } - - lowering_error! { - program { - struct fn<'a> { } - struct Foo<'a> { - a: for<'a> fn<'a> - } - } error_msg { - "duplicate or shadowed parameters" - } - } - - lowering_error! { - program { - trait Fn {} - trait Ref<'a, T> {} - - trait Foo<'a> { - type Item: forall<'a> Fn>; - } - } error_msg { - "duplicate or shadowed parameters" - } - } -} - -#[test] -fn upstream_items() { - lowering_success! { - program { - #[upstream] trait Send { } - #[upstream] struct Vec { } - } - } -} - -#[test] -fn deref_trait() { - lowering_success! { - program { - #[lang_deref] trait Deref { type Target; } - } - } - - lowering_error! { - program { - #[lang_deref] trait Deref { } - #[lang_deref] trait DerefDupe { } - } error_msg { - "Duplicate lang item `DerefTrait`" - } - } -} - -#[test] -fn fundamental_multiple_type_parameters() { - lowering_error! { - program { - #[fundamental] - struct Boxes { } - } - - error_msg { - "Only fundamental types with a single parameter are supported" - } - } -} diff --git a/src/test.rs b/src/test.rs deleted file mode 100644 index b2cd38cc550..00000000000 --- a/src/test.rs +++ /dev/null @@ -1,2850 +0,0 @@ -#![cfg(test)] - -use chalk_engine::fallible::{Fallible, NoSolution}; -use chalk_ir; -use chalk_solve::ext::*; -use chalk_solve::solve::{Solution, SolverChoice}; -use std::collections::HashMap; -use std::sync::Arc; -use test_util::*; - -mod bench; -mod slg; - -fn result_to_string(result: &Fallible>) -> String { - match result { - Ok(Some(v)) => format!("{}", v), - Ok(None) => format!("No possible solution"), - Err(NoSolution) => format!("Error"), - } -} - -fn assert_result(result: &Fallible>, expected: &str) { - let result = result_to_string(result); - - println!("expected:\n{}", expected); - println!("actual:\n{}", result); - - let expected1: String = expected.chars().filter(|w| !w.is_whitespace()).collect(); - let result1: String = result.chars().filter(|w| !w.is_whitespace()).collect(); - assert!(!expected1.is_empty() && result1.starts_with(&expected1)); -} - -macro_rules! test { - (program $program:tt $($goals:tt)*) => { - test!(@program[$program] - @parsed_goals[] - @unparsed_goals[$($goals)*]) - }; - - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[]) => { - solve_goal(stringify!($program), vec![$($parsed_goals),*]) - }; - - // goal { G } yields { "Y" } -- test both solvers behave the same (the default) - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt yields { $expected:expr } - $($unparsed_goals:tt)* - ]) => { - test!(@program[$program] - @parsed_goals[ - $($parsed_goals)* - (stringify!($goal), SolverChoice::default(), $expected) - ] - @unparsed_goals[$($unparsed_goals)*]) - }; - - // goal { G } yields[C1] { "Y1" } yields[C2] { "Y2" } -- test that solver C1 yields Y1 - // and C2 yields Y2 - // - // Annoyingly, to avoid getting a parsing ambiguity error, we have - // to distinguish the case where there are other goals to come - // (this rule) for the last goal in the list (next rule). There - // might be a more elegant fix than copy-and-paste but this works. - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt $(yields[$($C:expr),+] { $expected:expr })* - goal $($unparsed_goals:tt)* - ]) => { - test!(@program[$program] - @parsed_goals[$($parsed_goals)* - $($((stringify!($goal), $C, $expected))+)+] - @unparsed_goals[goal $($unparsed_goals)*]) - }; - - // same as above, but for the final goal in the list. - (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ - goal $goal:tt $(yields[$($C:expr),+] { $expected:expr })* - ]) => { - test!(@program[$program] - @parsed_goals[$($parsed_goals)* - $($((stringify!($goal), $C, $expected))+)+] - @unparsed_goals[]) - }; -} - -fn solve_goal(program_text: &str, goals: Vec<(&str, SolverChoice, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let mut program_env_cache = HashMap::new(); - for (goal_text, solver_choice, expected) in goals { - let (program, env) = program_env_cache.entry(solver_choice).or_insert_with(|| { - let program_text = &program_text[1..program_text.len() - 1]; // exclude `{}` - let program = - Arc::new(parse_and_lower_program(program_text, solver_choice).unwrap()); - let env = Arc::new(program.environment()); - (program, env) - }); - - chalk_ir::tls::set_current_program(&program, || { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - - println!("using solver: {:?}", solver_choice); - let peeled_goal = goal.into_peeled_goal(); - let result = solver_choice.solve_root_goal(&env, &peeled_goal); - assert_result(&result, expected); - }); - } -} - -#[test] -fn prove_clone() { - test! { - program { - struct Foo { } - struct Bar { } - struct Vec { } - trait Clone { } - impl Clone for Vec where T: Clone { } - impl Clone for Foo { } - } - - goal { - Vec: Clone - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Foo: Clone - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Bar: Clone - } yields { - "No possible solution" - } - - goal { - Vec: Clone - } yields { - "No possible solution" - } - } -} - -#[test] -fn inner_cycle() { - // Interesting test that shows why recursive solver needs to run - // to an inner fixed point during iteration. Here, the first - // round, we get that `?T: A` has a unique sol'n `?T = i32`. On - // the second round, we ought to get ambiguous: but if we don't - // run the `?T: B` to a fixed point, it will terminate with `?T = - // i32`, leading to an (incorrect) unique solution. - test! { - program { - #[marker] - trait A { } - #[marker] - trait B { } - - struct i32 { } - struct Vec { } - - impl A for T where T: B { } - impl A for i32 { } - - impl B for T where T: A { } - impl B for Vec where T: B { } - } - - goal { - exists { T: A } - } yields { - "Ambiguous" - } - } -} - -#[test] -fn prove_infer() { - test! { - program { - struct Foo { } - struct Bar { } - trait Map { } - impl Map for Foo { } - impl Map for Bar { } - } - - goal { - exists { A: Map } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { A: Map } - } yields { - "Unique; substitution [?0 := Foo], lifetime constraints []" - } - - goal { - exists { Foo: Map } - } yields { - "Unique; substitution [?0 := Bar], lifetime constraints []" - } - } -} - -#[test] -fn prove_forall() { - test! { - program { - struct Foo { } - struct Vec { } - - trait Marker { } - impl Marker for Vec { } - - trait Clone { } - impl Clone for Foo { } - - impl Clone for Vec where T: Clone { } - } - - goal { - forall { T: Marker } - } yields { - "No possible solution" - } - - goal { - forall { not { T: Marker } } - } yields { - "No" - } - - goal { - not { forall { T: Marker } } - } yields { - "Unique" - } - - // If we assume `T: Marker`, then obviously `T: Marker`. - goal { - forall { if (T: Marker) { T: Marker } } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - // We don't have to know anything about `T` to know that - // `Vec: Marker`. - goal { - forall { Vec: Marker } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - // Here, we don't know that `T: Clone`, so we can't prove that - // `Vec: Clone`. - goal { - forall { Vec: Clone } - } yields { - "No possible solution" - } - - // Here, we do know that `T: Clone`, so we can. - goal { - forall { - if (T: Clone) { - Vec: Clone - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn higher_ranked() { - test! { - program { - struct u8 { } - struct SomeType { } - trait Foo { } - impl Foo for SomeType { } - } - - goal { - exists { - forall { - SomeType: Foo - } - } - } yields { - "Unique; substitution [?0 := u8], lifetime constraints []" - } - } -} - -#[test] -fn ordering() { - test! { - program { - trait Foo { } - impl Foo for U { } - } - - goal { - exists { - forall { - U: Foo - } - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn cycle_no_solution() { - test! { - program { - trait Foo { } - struct S { } - impl Foo for S where T: Foo { } - } - - // only solution: infinite type S { - T: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn cycle_many_solutions() { - test! { - program { - trait Foo { } - struct S { } - struct i32 { } - impl Foo for S where T: Foo { } - impl Foo for i32 { } - } - - // infinite family of solutions: {i32, S, S>, ... } - goal { - exists { - T: Foo - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn cycle_unique_solution() { - test! { - program { - trait Foo { } - trait Bar { } - struct S { } - struct i32 { } - impl Foo for S where T: Foo, T: Bar { } - impl Foo for i32 { } - } - - goal { - exists { - T: Foo - } - } yields { - "Unique; substitution [?0 := i32]" - } - } -} - -#[test] -fn multiple_ambiguous_cycles() { - test! { - program { - trait WF { } - trait Sized { } - - struct Vec { } - struct Int { } - - impl Sized for Int { } - impl WF for Int { } - - impl WF for Vec where T: Sized { } - impl Sized for Vec where T: WF, T: Sized { } - } - - // ?T: WF - // | - // | - // | - // Int: WF. <-----> (Vec: WF) :- (?T: Sized) - // | - // | - // | - // Int: Sized. <-------> (Vec: Sized) :- (?T: Sized), (?T: WF) - // | | - // | | - // | | - // cycle cycle - // - // Depending on the evaluation order of the above tree (which cycle we come upon first), - // we may fail to reach a fixed point if we loop continuously because `Ambig` does not perform - // any unification. We must stop looping as soon as we encounter `Ambig`. In fact without - // this strategy, the above program will not even be loaded because of the overlap check which - // will loop forever. - goal { - exists { - T: WF - } - } yields { - "Ambig" - } - } -} - -#[test] -#[should_panic] -fn overflow() { - test! { - program { - trait Q { } - struct Z { } - struct G - struct S - - impl Q for Z { } - impl Q for G where X: Q { } - impl Q for S where X: Q, S>: Q { } - } - - // Will try to prove S>: Q then S>>: Q etc ad infinitum - goal { - S: Q - } yields { - "" - } - } -} - -#[test] -fn normalize_basic() { - test! { - program { - trait Iterator { type Item; } - struct Vec { } - struct u32 { } - impl Iterator for Vec { - type Item = T; - } - } - - goal { - forall { - exists { - Normalize( as Iterator>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := !1_0], lifetime constraints []" - } - - goal { - forall { - Vec: Iterator - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall { - if (T: Iterator) { - ::Item = u32 - } - } - } yields { - "Unique; substitution []" - } - - goal { - forall { - if (T: Iterator) { - exists { - T: Iterator - } - } - } - } yields { - "Unique; substitution [?0 := (Iterator::Item)]" - } - - goal { - forall { - if (T: Iterator) { - exists { - T: Iterator - } - } - } - } yields { - "Unique; substitution [?0 := (Iterator::Item)]" - } - - goal { - forall { - if (T: Iterator) { - ::Item = ::Item - } - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Iterator) { - exists { - ::Item = ::Item - } - } - } - } yields { - // True for `U = T`, of course, but also true for `U = Vec`. - "Ambiguous" - } - } -} - -#[test] -fn normalize_gat1() { - test! { - program { - struct Vec { } - - trait Iterable { - type Iter<'a>; - } - - impl Iterable for Vec { - type Iter<'a> = Iter<'a, T>; - } - - trait Iterator { - type Item; - } - - struct Iter<'a, T> { } - struct Ref<'a, T> { } - - impl<'a, T> Iterator for Iter<'a, T> { - type Item = Ref<'a, T>; - } - } - - goal { - forall { - forall<'a> { - exists { - Normalize( as Iterable>::Iter<'a> -> U) - } - } - } - } yields { - "Unique; substitution [?0 := Iter<'!2_0, !1_0>], lifetime constraints []" - } - } -} - -#[test] -fn normalize_gat2() { - test! { - program { - trait StreamingIterator { type Item<'a>; } - struct Span<'a, T> { } - struct StreamIterMut { } - struct u32 { } - impl StreamingIterator for StreamIterMut { - type Item<'a> = Span<'a, T>; - } - } - - goal { - forall<'a, T> { - exists { - Normalize( as StreamingIterator>::Item<'a> -> U) - } - } - } yields { - "Unique; substitution [?0 := Span<'!1_0, !1_1>], lifetime constraints []" - } - - goal { - forall<'a, T> { - as StreamingIterator>::Item<'a> = Span<'a, T> - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall<'a, T, U> { - if (T: StreamingIterator = Span<'a, U>>) { - >::Item<'a> = Span<'a, U> - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn normalize_gat_with_where_clause() { - test! { - program { - trait Sized { } - trait Foo { - type Item where T: Sized; - } - - struct Value { } - struct Sometype { } - impl Foo for Sometype { - type Item = Value; - } - } - - goal { - forall { - exists { - Normalize(::Item -> U) - } - } - } yields { - "No possible solution" - } - - goal { - forall { - exists { - if (T: Sized) { - Normalize(::Item -> U) - } - } - } - } yields { - "Unique; substitution [?0 := Value]" - } - } -} - -#[test] -fn normalize_gat_with_where_clause2() { - test! { - program { - trait Bar { } - trait Foo { - type Item where U: Bar; - } - - struct i32 { } - impl Foo for i32 { - type Item = U; - } - } - - goal { - forall { - exists { - Normalize(>::Item -> V) - } - } - } yields { - "No possible solution" - } - - goal { - forall { - exists { - if (U: Bar) { - Normalize(>::Item -> V) - } - } - } - } yields { - "Unique; substitution [?0 := !1_1]" - } - } -} - -#[test] -fn normalize_gat_with_higher_ranked_trait_bound() { - test! { - program { - trait Foo<'a, T> { } - struct i32 { } - - trait Bar<'a, T> { - type Item: Foo<'a, T> where forall<'b> V: Foo<'b, T>; - } - - impl<'a, T> Foo<'a, T> for i32 { } - impl<'a, T> Bar<'a, T> for i32 { - type Item = i32; - } - } - - goal { - forall<'a, T, V> { - if (forall<'b> { V: Foo<'b, T> }) { - exists { - Normalize(>::Item -> U) - } - } - } - } yields { - "Unique; substitution [?0 := i32], lifetime constraints []" - } - } -} - -#[test] -fn implied_bounds() { - test! { - program { - trait Clone { } - trait Iterator where Self: Clone { type Item; } - struct u32 { } - } - - goal { - forall { - if (T: Iterator) { - T: Clone - } - } - } yields { - "Unique; substitution []" - } - } -} - -#[test] -fn gat_implied_bounds() { - test! { - program { - trait Clone { } - trait Foo { type Item: Clone; } - struct u32 { } - } - - goal { - forall { - if (T: Foo = V>) { - V: Clone - } - } - } yields { - "Unique; substitution []" - } - } - - test! { - program { - trait Clone { } - trait Foo { type Item; } - struct u32 { } - } - - goal { - forall { - if (T: Foo = V>) { - // Without the bound Item: Clone, there is no way to infer this. - V: Clone - } - } - } yields { - "No possible solution" - } - } - - test! { - program { - trait Fn { } - struct Ref<'a, T> { } - trait Sized { } - - trait Foo { - type Item: forall<'a> Fn> + Sized; - } - } - - goal { - forall { - if (Type: Foo) { - forall<'a, T> { - ::Item: Fn> - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn implied_from_env() { - test! { - program { - trait Clone { } - trait Foo { type Item; } - } - - goal { - forall { - if (FromEnv(>::Item)) { - FromEnv(T: Foo) - } - } - } yields { - "Unique" - } - - goal { - forall { - if (FromEnv(>::Item)) { - FromEnv(T: Clone) - } - } - } yields { - "No possible solution" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn normalize_rev_infer() { - test! { - program { - trait Identity { type Item; } - struct u32 { } - struct i32 { } - impl Identity for u32 { type Item = u32; } - impl Identity for i32 { type Item = i32; } - } - - goal { - exists { - T: Identity - } - } yields { - "Unique; substitution [?0 := u32]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn normalize_rev_infer_gat() { - test! { - program { - trait Combine { type Item; } - struct u32 { } - struct i32 { } - struct Either { } - impl Combine for u32 { type Item = Either; } - impl Combine for i32 { type Item = Either; } - } - - goal { - exists { - T: Combine = Either> - } - } yields { - // T is ?1 and U is ?0, so this is surprising, but correct! (See #126.) - "Unique; substitution [?0 := i32, ?1 := u32]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn region_equality() { - test! { - program { - trait Eq { } - impl Eq for T { } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - forall<'a, 'b> { - Ref<'a, Unit>: Eq> - } - } yields { - "Unique; substitution [], - lifetime constraints \ - [InEnvironment { environment: Env([]), goal: '!1_1 == '!1_0 }] - " - } - - goal { - forall<'a> { - exists<'b> { - Ref<'a, Unit>: Eq> - } - } - } yields { - "Unique; substitution [?0 := '!1_0], lifetime constraints []" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_equality() { - test! { - program { - trait Eq { } - impl Eq for T { } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - // A valid equality; we get back a series of solvable - // region constraints, since each region variable must - // refer to exactly one placeholder region, and they are - // all in a valid universe to do so (universe 4). - for<'a, 'b> Ref<'a, Ref<'b, Unit>>: Eq Ref<'c, Ref<'d, Unit>>> - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - // Note: this equality is false, but we get back successful; - // this is because the region constraints are unsolvable. - // - // Note that `?0` (in universe 2) must be equal to both - // `!1_0` and `!1_1`, which of course it cannot be. - for<'a, 'b> Ref<'a, Ref<'b, Ref<'a, Unit>>>: Eq< - for<'c, 'd> Ref<'c, Ref<'d, Ref<'d, Unit>>>> - } yields { - "Unique; substitution [], lifetime constraints [ - InEnvironment { environment: Env([]), goal: '!1_1 == '!1_0 } - ]" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_projection() { - test! { - program { - trait Eq { } - impl Eq for T { } - - trait DropLt<'a> { type Item; } - impl<'a, T> DropLt<'a> for T { type Item = T; } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - for<'a> >::Item: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -/// Demonstrates that, given the expected value of the associated -/// type, we can use that to narrow down the relevant impls. -#[test] -fn forall_projection_gat() { - test! { - program { - trait Eq { } - impl Eq for T { } - - trait Sized { } - - trait DropOuter<'a> { type Item where U: Sized; } - impl<'a, T> DropOuter<'a> for T { type Item = T; } - - struct Unit { } - struct Ref<'a, T> { } - } - - goal { - forall { - for<'a> >::Item: Eq - } - } yields { - "No possible solution" - } - - goal { - forall { - if (T: Sized) { - for<'a> >::Item: Eq - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - forall<'a, T> { - WellFormed(>::Item) - } - } yields { - "No possible solution" - } - - goal { - forall { - if (T: Sized) { - WellFormed(for<'a> >::Item: Eq) - } - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn struct_wf() { - test! { - program { - struct Foo where T: Eq { } - struct Bar { } - struct Baz { } - - trait Eq { } - - impl Eq for Baz { } - impl Eq for Foo where T: Eq { } - } - - goal { - WellFormed(Foo) - } yields { - "No possible solution" - } - - goal { - WellFormed(Foo) - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - WellFormed(Foo>) - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn generic_trait() { - test! { - program { - struct Int { } - struct Uint { } - - trait Eq { } - - impl Eq for Int { } - impl Eq for Uint { } - } - - goal { - Int: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Uint: Eq - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - Int: Eq - } yields { - "No possible solution" - } - } -} - -#[test] -fn normalize_under_binder() { - test! { - program { - struct Ref<'a, T> { } - struct I32 { } - - trait Deref<'a> { - type Item; - } - - trait Id<'a> { - type Item; - } - - impl<'a, T> Deref<'a> for Ref<'a, T> { - type Item = T; - } - - impl<'a, T> Id<'a> for Ref<'a, T> { - type Item = Ref<'a, T>; - } - } - - goal { - exists { - forall<'a> { - Ref<'a, I32>: Deref<'a, Item = U> - } - } - } yields { - "Ambiguous" - } - - goal { - exists { - forall<'a> { - Normalize( as Deref<'a>>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := I32], lifetime constraints []" - } - - goal { - forall<'a> { - exists { - Ref<'a, I32>: Id<'a, Item = U> - } - } - } yields { - "Ambiguous" - } - - goal { - forall<'a> { - exists { - Normalize( as Id<'a>>::Item -> U) - } - } - } yields { - "Unique; substitution [?0 := Ref<'!1_0, I32>], lifetime constraints []" - } - - goal { - exists { - forall<'a> { - Normalize( as Id<'a>>::Item -> U) - } - } - } yields { - "Unique; for { \ - substitution [?0 := Ref<'^0, I32>], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '^0 == '!1_0 }] \ - }" - } - } -} - -#[test] -fn unify_quantified_lifetimes() { - test! { - program { - } - - // Check that `'a` (here, `'^0`) is not unified - // with `'!1_0`, because they belong to incompatible - // universes. - goal { - exists<'a> { - forall<'b> { - 'a = 'b - } - } - } yields { - "Unique; for { \ - substitution [?0 := '^0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '^0 == '!1_0 }] \ - }" - } - - // Similar to the previous test, but indirect. - goal { - exists<'a> { - forall<'b> { - exists<'c> { - 'a = 'c, - 'c = 'b - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '^0, ?1 := '!1_0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '^0 == '!1_0 }] \ - }" - } - } -} - -#[test] -fn equality_binder() { - test! { - program { - struct Ref<'a, T> { } - } - - // Check that `'a` (here, `'?0`) is not unified - // with `'!1_0`, because they belong to incompatible - // universes. - goal { - forall { - exists<'a> { - for<'c> Ref<'c, T> = Ref<'a, T> - } - } - } yields { - "Unique; for { \ - substitution [?0 := '^0], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0 == '^0 }] \ - }" - } - } -} - -#[test] -fn mixed_indices_unify() { - test! { - program { - struct Ref<'a, T> { } - } - - goal { - exists { - exists<'a> { - exists { - Ref<'a, T> = Ref<'a, U> - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '^0, ?1 := ^1, ?2 := ^1], \ - lifetime constraints []\ - }" - } - } -} - -#[test] -fn mixed_indices_match_program() { - test! { - program { - struct S { } - struct Bar<'a, T, U> { } - trait Foo {} - impl<'a> Foo for Bar<'a, S, S> {} - } - - goal { - exists { - exists<'a> { - exists { - Bar<'a, T, U>: Foo - } - } - } - } yields { - "Unique; for { \ - substitution [?0 := '^0, ?1 := S, ?2 := S], \ - lifetime constraints [] \ - }" - } - } -} - -#[test] -fn mixed_indices_normalize_application() { - test! { - program { - struct Ref<'a, T> { } - trait Foo { - type T; - } - - impl Foo for Ref<'a, U> { - type T = U; - } - } - - goal { - exists { - exists<'a> { - exists { - Normalize( as Foo>::T -> U) - } - } - } - } yields { - "Unique; for { substitution [?0 := '^0, ?1 := ^1, ?2 := ^1], " - } - } -} - -#[test] -fn mixed_indices_normalize_gat_application() { - test! { - program { - struct Either { } - struct Ref<'a, T> { } - trait Foo { - type T; - } - - impl Foo for Ref<'a, U> { - type T = Either; - } - } - - goal { - exists { - Normalize( as Foo>::T -> Either) - } - } yields { - // Our GAT parameter is mapped to ?0; all others appear left to right - // in our Normalize(...) goal. - "Unique; for { \ - substitution [?0 := ^0, ?1 := '^1, ?2 := ^2, ?3 := ^0, ?4 := ^2], " - } - } -} - -#[test] -// Test that we properly detect failure even if there are applicable impls at -// the top level, if we can't find anything to fill in those impls with -fn deep_failure() { - test! { - program { - struct Foo {} - trait Bar {} - trait Baz {} - - impl Bar for Foo where T: Baz {} - } - - goal { - exists { T: Baz } - } yields { - "No possible solution" - } - - goal { - exists { Foo: Bar } - } yields { - "No possible solution" - } - } -} - -#[test] -// Test that we infer a unique solution even if it requires multiple levels of -// search to do so -fn deep_success() { - test! { - program { - struct Foo {} - struct ImplsBaz {} - trait Bar {} - trait Baz {} - - impl Baz for ImplsBaz {} - impl Bar for Foo where T: Baz {} - } - - goal { - exists { Foo: Bar } - } yields { - "Unique; substitution [?0 := ImplsBaz]" - } - } -} - -#[test] -fn definite_guidance() { - test! { - program { - trait Display {} - trait Debug {} - struct Foo {} - struct Bar {} - struct Baz {} - - impl Display for Bar {} - impl Display for Baz {} - - impl Debug for Foo where T: Display {} - } - - goal { - exists { - T: Debug - } - } yields { - "Ambiguous; definite substitution for { [?0 := Foo<^0>] }" - } - } -} - -#[test] -fn suggested_subst() { - test! { - program { - trait SomeTrait {} - struct Foo {} - struct Bar {} - struct i32 {} - struct bool {} - impl SomeTrait for Foo {} - impl SomeTrait for Bar {} - impl SomeTrait for Bar {} - } - - goal { - exists { - Foo: SomeTrait - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (i32: SomeTrait) { - i32: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := bool]" - } - - goal { - exists { - if (i32: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - "Unique; substitution [?0 := i32]" - } - - goal { - exists { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } yields { - // FIXME: we need to rework the "favor environment" heuristic. - // Should be: "Ambiguous; suggested substitution [?0 := bool]" - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Foo: SomeTrait) { - if (Foo: SomeTrait) { - Foo: SomeTrait - } - } - } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { - Bar: SomeTrait - } - } yields { - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Bar: SomeTrait) { - Bar: SomeTrait - } - } - } yields { - // FIXME: same as above, should be: "Ambiguous; suggested substitution [?0 := bool]" - "Ambiguous; no inference guidance" - } - - goal { - exists { - if (Bar: SomeTrait) { - if (Bar: SomeTrait) { - Bar: SomeTrait - } - } - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn simple_negation() { - test! { - program { - struct i32 {} - trait Foo {} - } - - goal { - not { i32: Foo } - } yields { - "Unique" - } - - goal { - not { - not { i32: Foo } - } - } yields { - "No" - } - - goal { - not { - not { - not { i32: Foo } - } - } - } yields { - "Unique" - } - - goal { - exists { - not { T: Foo } - } - } yields { - "Ambig" - } - - goal { - forall { - not { T: Foo } - } - } yields { - "Unique" - } - - goal { - not { - exists { T: Foo } - } - } yields { - "Unique" - } - - goal { - not { - forall { T: Foo } - } - } yields { - "Unique" - } - } -} - -#[test] -fn deep_negation() { - test! { - program { - struct Foo {} - trait Bar {} - trait Baz {} - - impl Bar for Foo where T: Baz {} - } - - goal { - not { - exists { T: Baz } - } - } yields { - "Unique" - } - - goal { - not { - exists { Foo: Bar } - } - } yields { - "Unique" - } - } -} - -#[test] -fn negation_quantifiers() { - test! { - program { - struct i32 {} - struct u32 {} - } - - goal { - not { - forall { - T = U - } - } - } yields { - "Unique" - } - - goal { - not { - exists { - T = U - } - } - } yields { - "No" - } - - goal { - forall { - not { - T = U - } - } - } yields { - "No" - } - } -} - -#[test] -fn negation_free_vars() { - test! { - program { - struct Vec {} - struct i32 {} - struct u32 {} - trait Foo {} - impl Foo for Vec {} - } - - goal { - exists { - not { Vec: Foo } - } - } yields { - "Ambig" - } - } -} - -#[test] -fn where_clause_trumps() { - test! { - program { - struct Foo { } - - trait Marker { } - impl Marker for Foo { } - } - - goal { - forall { - if (T: Marker) { - T: Marker - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn inapplicable_assumption_does_not_shadow() { - test! { - program { - struct i32 { } - struct u32 { } - - trait Foo { } - - impl Foo for T { } - } - - goal { - forall { - exists { - if (i32: Foo) { - T: Foo - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn auto_trait_without_impls() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - - struct Useless { } - - struct Data { - data: T - } - } - - goal { - i32: Send - } yields { - "Unique" - } - - // No fields so `Useless` is `Send`. - goal { - forall { - Useless: Send - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Send) { - Data: Send - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn auto_trait_with_impls() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - struct f32 { } - struct Vec { } - - impl Send for Vec where T: Send { } - impl !Send for i32 { } - } - - goal { - i32: Send - } yields { - "No possible solution" - } - - goal { - f32: Send - } yields { - "Unique" - } - - goal { - Vec: Send - } yields { - "No possible solution" - } - - goal { - Vec: Send - } yields { - "Unique" - } - - goal { - forall { - Vec: Send - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn coinductive_semantics() { - test! { - program { - #[auto] trait Send { } - - struct i32 { } - - struct Ptr { } - impl Send for Ptr where T: Send { } - - struct List { - data: T, - next: Ptr> - } - } - - goal { - forall { - List: Send - } - } yields { - "No possible solution" - } - goal { - forall { - if (T: Send) { - List: Send - } - } - } yields { - "Unique" - } - - goal { - List: Send - } yields { - "Unique" - } - - goal { - exists { - T: Send - } - } yields { - "Ambiguous" - } - } -} - -#[test] -fn mixed_semantics() { - test! { - program { - #[auto] trait Send { } - trait Foo { } - - impl Send for T where T: Foo { } - impl Foo for T where T: Send { } - } - - // We have a cycle `(T: Send) :- (T: Foo) :- (T: Send)` with a non-coinductive - // inner component `T: Foo` so we reject it. - goal { - exists { - T: Send - } - } yields { - "No possible solution" - } - - goal { - exists { - T: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn partial_overlap_2() { - test! { - program { - trait Marker {} - trait Foo {} - trait Bar {} - - struct i32 {} - struct u32 {} - - impl Marker for T where T: Foo {} - impl Marker for T where T: Bar {} - } - - goal { - forall { - if (T: Foo; T: Bar) { - exists { T: Marker } - } - } - } yields { - "Ambiguous" - } - - goal { - forall { - if (T: Foo; T: Bar) { - T: Marker - } - } - } yields { - "Unique" - } - - goal { - forall { - if (T: Foo; T: Bar) { - T: Marker - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn partial_overlap_3() { - test! { - program { - #[marker] trait Marker {} - trait Foo {} - trait Bar {} - - impl Marker for T where T: Foo {} - impl Marker for T where T: Bar {} - - struct i32 {} - impl Foo for i32 {} - impl Bar for i32 {} - } - - goal { - forall { - if (T: Foo; T: Bar) { T: Marker } - } - } yields { - "Unique" - } - - goal { - i32: Marker - } yields { - "Unique" - } - } -} - -#[test] -fn inscope() { - test! { - program { - trait Foo { } - } - - goal { - InScope(Foo) - } yields { - "No possible solution" - } - - goal { - if (InScope(Foo)) { - InScope(Foo) - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - } -} - -#[test] -fn unselected_projection() { - test! { - program { - trait Iterator { - type Item; - } - - trait Iterator2 { - type Item; - } - - struct Chars { } - struct char { } - struct char2 { } - - impl Iterator for Chars { - type Item = char; - } - - impl Iterator2 for Chars { - type Item = char2; - } - } - - goal { - Chars::Item = char - } yields { - "No possible solution" - } - - goal { - if (InScope(Iterator)) { - Chars::Item = char - } - } yields { - "Unique; substitution [], lifetime constraints []" - } - - goal { - exists { - if (InScope(Iterator)) { - Chars::Item = T - } - } - } yields { - "Unique; substitution [?0 := char], lifetime constraints []" - } - - goal { - exists { - if (InScope(Iterator); InScope(Iterator2)) { - Chars::Item = T - } - } - } yields { - "Ambiguous; no inference guidance" - } - } -} - -#[test] -fn unselected_projection_with_gat() { - test! { - program { - trait Foo { - type Item<'a>; - } - - struct Ref<'a, T> { } - struct i32 { } - - impl Foo for i32 { - type Item<'a> = Ref<'a, i32>; - } - } - - goal { - forall<'a> { - if (InScope(Foo)) { - i32::Item<'a> = Ref<'a, i32> - } - } - } yields { - "Unique" - } - - goal { - forall<'a> { - if (InScope(Foo)) { - WellFormed(i32::Item<'a>) - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn unselected_projection_with_parametric_trait() { - test! { - program { - trait Foo { - type Item; - } - - struct i32 { } - - impl Foo for i32 { - type Item = i32; - } - } - goal { - if (InScope(Foo)) { - i32::Item = i32 - } - } yields { - "Unique" - } - } -} - -#[test] -fn overflow_universe() { - test! { - program { - struct Foo { } - - trait Bar { } - - // When asked to solve X: Bar, we will produce a - // requirement to solve !1_0: Bar. And then when asked to - // solve that, we'll produce a requirement to solve !1_1: - // Bar. And so forth. - forall { X: Bar if forall { Y: Bar } } - } - - goal { - Foo: Bar - } yields { - // The internal universe canonicalization in the on-demand/recursive - // solver means that when we are asked to solve (e.g.) - // `!1_1: Bar`, we rewrite that to `!1_0: Bar`, identifying a - // cycle. - "No possible solution" - } - } -} - -#[test] -fn projection_from_env() { - test! { - program { - trait Sized { } - - struct Slice where T: Sized { } - impl Sized for Slice { } - - trait SliceExt - { - type Item; - } - - impl SliceExt for Slice - { - type Item = T; - } - } - - goal { - forall { - if ( - as SliceExt>::Item: Sized - ) { - T: Sized - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn gat_unify_with_implied_wc() { - test! { - program { - struct Slice { } - - trait Cast { } - trait CastingIter { - type Item: Cast where T: Cast; - } - - impl CastingIter for Slice { - type Item = Castable; - } - - struct Castable { } - impl Cast for Castable { } - } - - goal { - forall { - if ( - FromEnv( as CastingIter>::Item) - ) { - T: Cast - } - } - } yields { - "Unique" - } - - goal { - forall { - T: Cast - } - } yields { - "No possible solution" - } - } -} - -// This variant of the above test used to be achingly slow on SLG -// solvers, before the "trivial answer" green cut was introduced. -// -// The problem was that we wound up enumerating a goal like -// -// ::Item = !1_0 -// -// which meant "find me the types that normalize to `!1_0`". We had no -// problem finding these types, but after the first such type, we had -// the only unique answer we would ever find, and we wanted to reach -// the point where we could say "no more answers", so we kept -// requesting more answers. -#[test] -fn projection_from_env_slow() { - test! { - program { - trait Clone { } - trait Sized { } - - struct Slice where T: Sized { } - impl Sized for Slice { } - - struct u32 { } - impl Clone for u32 { } - impl Sized for u32 { } - - trait SliceExt - where ::Item: Clone - { - type Item; - } - - impl SliceExt for Slice - where T: Clone - { - type Item = T; - } - } - - goal { - forall { - if ( - as SliceExt>::Item: Clone; - as SliceExt>::Item: Sized; - T: Clone - ) { - T: Sized - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn clauses_in_if_goals() { - test! { - program { - trait Foo { } - struct Vec { } - struct i32 { } - } - - goal { - if (forall { T: Foo }) { - forall { T: Foo } - } - } yields { - "Unique" - } - - goal { - forall { - if (Vec: Foo :- T: Foo) { - if (T: Foo) { - Vec: Foo - } - } - } - } yields { - "Unique" - } - - goal { - if (forall { Vec: Foo :- T: Foo }) { - if (i32: Foo) { - Vec: Foo - } - } - } yields { - "Unique" - } - - goal { - if (forall { Vec: Foo :- T: Foo }) { - Vec: Foo - } - } yields { - "No possible solution" - } - } -} - -#[test] -fn quantified_types() { - test! { - program { - trait Foo { } - struct fn<'a> { } - struct fn2<'a, 'b> { } - impl Foo for for<'a> fn<'a> { } - } - - goal { - for<'a> fn<'a>: Foo - } yields { - "Unique" - } - - goal { - for<'a, 'b> fn2<'a, 'b> = for<'b, 'a> fn2<'a, 'b> - } yields { - "Unique" - } - - goal { - forall<'a> { fn<'a>: Foo } - } yields { - // Lifetime constraints are unsatisfiable - "Unique; substitution [], \ - lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0 == '!1_0 }]" - } - } -} - -#[test] -fn higher_ranked_implied_bounds() { - test! { - program { - trait Foo<'a> { } - trait Bar where forall<'a> Self: Foo<'a> { } - } - - goal { - forall { - if (T: Bar) { - forall<'a> { - T: Foo<'a> - } - } - } - } yields { - "Unique" - } - } - - test! { - program { - trait Foo { } - trait Bar where forall Self: Foo { } - } - - goal { - forall { - if (T: Bar) { - forall { - T: Foo - } - } - } - } yields { - "Unique" - } - } -} - -#[test] -fn deref_goal() { - test! { - program { - #[lang_deref] - trait Deref { type Target; } - struct Foo { } - struct Bar { } - struct Baz { } - impl Deref for Foo { type Target = Bar; } - } - - goal { - Derefs(Foo, Bar) - } yields { - "Unique" - } - - goal { - Derefs(Foo, Baz) - } yields { - "No possible solution" - } - } - - test! { - program { - #[lang_deref] - trait Deref { type Target; } - struct Arc { } - struct i32 { } - struct u64 { } - impl Deref for Arc { type Target = T; } - } - - goal { - Derefs(Arc, i32) - } yields { - "Unique" - } - - goal { - Derefs(Arc, u64) - } yields { - "No possible solution" - } - } -} - -#[test] -fn local_and_upstream_types() { - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - } - - goal { IsLocal(Upstream) } yields { "No possible solution" } - goal { IsUpstream(Upstream) } yields { "Unique" } - - goal { IsLocal(Local) } yields { "Unique" } - goal { IsUpstream(Local) } yields { "No possible solution" } - } - - test! { - program { - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - goal { forall { IsLocal(Upstream) } } yields { "No possible solution" } - goal { forall { IsUpstream(Upstream) } } yields { "Unique" } - - goal { forall { IsLocal(Local) } } yields { "Unique" } - goal { forall { IsUpstream(Local) } } yields { "No possible solution" } - } -} - -#[test] -fn is_fully_visible() { - // Should be visible regardless of local, fundamental, or upstream - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - - #[upstream] - #[fundamental] - struct Box { } - } - - goal { IsFullyVisible(Upstream) } yields { "Unique" } - goal { IsFullyVisible(Local) } yields { "Unique" } - goal { IsFullyVisible(Box) } yields { "Unique" } - goal { IsFullyVisible(Box) } yields { "Unique" } - } - - // Should be visible regardless of local, fundamental, or upstream - test! { - program { - #[upstream] struct Upstream { } - struct Local { } - - #[upstream] struct Upstream2 { } - struct Local2 { } - - #[upstream] - #[fundamental] - struct Box { } - } - - // Unknown type parameters are not fully visible - goal { forall { IsFullyVisible(Box) } } yields { "No possible solution" } - goal { forall { IsFullyVisible(Upstream2) } } yields { "No possible solution" } - goal { forall { IsFullyVisible(Local2) } } yields { "No possible solution" } - - // Without any unknown type parameters, local and upstream should not matter - goal { forall { IsFullyVisible(Upstream2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2) } } yields { "Unique" } - - // Fundamental anywhere should not change the outcome - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Box>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Upstream2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2>) } } yields { "Unique" } - goal { forall { IsFullyVisible(Local2>) } } yields { "Unique" } - } -} - -#[test] -fn fundamental_types() { - // NOTE: These tests need to have both Local and Upstream structs since chalk will attempt - // to enumerate all of them. - - // This first test is a sanity check to make sure `Box` isn't a special case. - // By testing this, we ensure that adding the #[fundamental] attribute does in fact - // change behaviour - test! { - program { - #[upstream] struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // Without fundamental, Box should behave like a regular upstream type - goal { forall { not { IsLocal(Box) } } } yields { "Unique" } - goal { forall { IsLocal(Box) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box) } } yields { "Unique" } - - // Without fundamental, Box is upstream regardless of its inner type - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsUpstream(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "Unique" } - } - - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // With fundamental, Box can be local for certain types, so there is no unique solution - // anymore for any of these - goal { forall { not { IsLocal(Box) } } } yields { "No possible solution" } - goal { forall { IsLocal(Box) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box) } } yields { "No possible solution" } - - // With fundamental, some of these yield different results -- no longer depends on Box - // itself - goal { IsLocal(Box) } yields { "No possible solution" } - goal { IsLocal(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "Unique" } - goal { IsUpstream(Box) } yields { "No possible solution" } - } - - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - // Upstream is upstream no matter what, so this should not be local for any T - goal { forall { IsLocal(Box>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box>) } } yields { "Unique" } - - // A fundamental type inside an upstream type should not make a difference (i.e. the rules - // for the outer, non-fundamental type should apply) - goal { forall { IsLocal(Upstream>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Upstream>) } } yields { "Unique" } - - // Make sure internal types within an upstream type do not make a difference - goal { forall { IsLocal(Box>>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Box>>) } } yields { "Unique" } - - // Local is local no matter what, so this should be local for any T - goal { forall { IsLocal(Box>) } } yields { "Unique" } - goal { forall { IsUpstream(Box>) } } yields { "No possible solution" } - - // A fundamental type inside an internal type should not make a difference - goal { forall { IsLocal(Local>) } } yields { "Unique" } - goal { forall { IsUpstream(Local>) } } yields { "No possible solution" } - - // Make sure upstream types within an internal type and vice versa do not make a difference - goal { forall { IsLocal(Box>>) } } yields { "Unique" } - goal { forall { IsUpstream(Box>>) } } yields { "Unique" } - } - - // Nested fundamental types should still be local if they can be recursively proven to be local - test! { - program { - #[upstream] - #[fundamental] - struct Box { } - // This type represents &T which is also fundamental - #[upstream] - #[fundamental] - struct Ref { } - - trait Clone { } - #[upstream] struct Upstream where T: Clone { } - struct Local where T: Clone { } - - #[upstream] struct Upstream2 { } - struct Internal2 { } - } - - goal { forall { IsLocal(Ref>) } } yields { "No possible solution" } - goal { forall { IsUpstream(Ref>) } } yields { "No possible solution" } - - goal { IsLocal(Ref>) } yields { "No possible solution" } - goal { IsUpstream(Ref>) } yields { "Unique" } - - goal { IsLocal(Ref>) } yields { "Unique" } - goal { IsUpstream(Ref>) } yields { "No possible solution" } - } - - // If a type is not upstream, it is always local regardless of its parameters or #[fundamental] - test! { - program { - // if we were compiling std, Box would never be upstream - #[fundamental] - struct Box { } - - #[upstream] struct Upstream { } - struct Local { } - } - - goal { forall { IsLocal(Box) } } yields { "Unique" } - goal { IsLocal(Box) } yields { "Unique" } - goal { IsLocal(Box) } yields { "Unique" } - } -} - -#[test] -fn local_impl_allowed_for_traits() { - test! { - program { - trait LocalTrait { } - trait LocalTrait2 { } - - #[upstream] struct Upstream { } - struct Local { } - } - - // Local traits are always implementable - goal { forall { LocalImplAllowed(T: LocalTrait) } } yields { "Unique" } - goal { LocalImplAllowed(Local: LocalTrait) } yields { "Unique" } - goal { LocalImplAllowed(Upstream: LocalTrait) } yields { "Unique" } - goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Local: LocalTrait2) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream: LocalTrait2) } } yields { "Unique" } - } - - // Single-type parameter trait refs (Self only) - test! { - program { - #[upstream] trait UpstreamTrait { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - struct Local2 { } - } - - // No local type - goal { LocalImplAllowed(Upstream: UpstreamTrait) } yields { "No possible solution" } - goal { forall { LocalImplAllowed(T: UpstreamTrait) } } yields { "No possible solution" } - - // Local type, not preceded by anything - // Notice that the types after the first local type do not matter at all - goal { LocalImplAllowed(Local: UpstreamTrait) } yields { "Unique" } - } - - // Multi-type parameter trait refs (Self, T) - test! { - program { - trait Clone { } - #[upstream] trait UpstreamTrait2 where T: Clone { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - struct Local2 { } - } - - // No local type - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2) } } yields { "No possible solution" } - - // Local type, but preceded by a type parameter - goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { "No possible solution" } - - // Local type, not preceded by anything - // Notice that the types after the first local type do not matter at all - goal { forall { LocalImplAllowed(Local: UpstreamTrait2) } } yields { "Unique" } - goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { "Unique" } - - // Local type, but preceded by a fully visible type (i.e. no placeholder types) - goal { LocalImplAllowed(Upstream: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { "Unique" } - goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { "Unique" } - - // Type parameter covered by the local type - goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2>) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { "Unique" } - goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { "Unique" } - - // Type parameter covered by a deeply nested upstream type - // Notice that it does not matter that the T is wrapped in a local type because the outer - // type is still upstream - goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { "No possible solution" } - // Does not matter whether the covered type parameter is eventually covered or not by the - // first actually local type found - goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { "No possible solution" } - } - - test! { - program { - trait Clone { } - trait Eq { } - // Lifetime is just included to show that it does not break anything. - // Where clauses do not change the results at all. - #[upstream] trait UpstreamTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } - trait InternalTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } - - #[upstream] struct Upstream { } - #[upstream] struct Upstream2 { } - struct Local { } - } - - // Local traits can be implemented regardless of the types involved - goal { forall { LocalImplAllowed(Self: InternalTrait<'a, T, U, V>) } } yields { "Unique" } - - // Upstream traits definitely cannot be implemented for all types - goal { forall { LocalImplAllowed(Self: UpstreamTrait<'a, T, U, V>) } } yields { "No possible solution" } - - // No local types - goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait<'a, Upstream, Upstream, Upstream>) } } yields { "No possible solution" } - goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait< - 'a, - Upstream2, - Upstream2>>, - Upstream2> - >) } } yields { "No possible solution" } - - // Local type, not preceded by anything -- types after the first local type do not matter - goal { forall<'a, T, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, T, U, V>) } } yields { "Unique" } - goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Local, U, V>) } } yields { "Unique" } - goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, U, V>) } } yields { "Unique" } - goal { forall<'a> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, Local, Local>) } } yields { "Unique" } - - // Local type preceded by a type that is not fully visible - goal { forall<'a, T> { LocalImplAllowed(T: UpstreamTrait<'a, Upstream, Upstream, Local>) } } yields { "No possible solution" } - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, T, Upstream, Local>) } } yields { "No possible solution" } - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, T, Local>) } } yields { "No possible solution" } - - // Once again, types after the first local do not matter - goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, Local, T>) } } yields { "Unique" } - } -} diff --git a/src/test/slg.rs b/src/test/slg.rs deleted file mode 100644 index 51758013dce..00000000000 --- a/src/test/slg.rs +++ /dev/null @@ -1,1115 +0,0 @@ -#![cfg(test)] - -use chalk_solve::ext::*; -use chalk_solve::solve::slg::implementation::SlgContext; -use chalk_engine::forest::Forest; -use std::sync::Arc; -use test_util::*; -use chalk_solve::solve::SolverChoice; - -macro_rules! test { - (program $program:tt $(goal $goal:tt first $n:tt with max $depth:tt { $expected:expr })*) => { - solve_goal(stringify!($program), vec![$(($depth, $n, stringify!($goal), $expected)),*]) - }; - - (program $program:tt $(goal $goal:tt fixed $n:tt with max $depth:tt { $expected:expr })*) => { - solve_goal_fixed_num_answers( - stringify!($program), - vec![$(($depth, $n, stringify!($goal), $expected)),*], - ) - } -} - -fn solve_goal(program_text: &str, goals: Vec<(usize, usize, &str, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let program = &Arc::new( - parse_and_lower_program( - &program_text[1..program_text.len() - 1], - SolverChoice::default() - ).unwrap() - ); - let env = &Arc::new(program.environment()); - chalk_ir::tls::set_current_program(&program, || { - for (max_size, num_answers, goal_text, expected) in goals { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - let peeled_goal = goal.into_peeled_goal(); - let mut forest = Forest::new(SlgContext::new(env, max_size)); - let result = format!("{:#?}", forest.force_answers(peeled_goal, num_answers)); - - assert_test_result_eq(&expected, &result); - } - }); -} - -fn solve_goal_fixed_num_answers(program_text: &str, goals: Vec<(usize, usize, &str, &str)>) { - println!("program {}", program_text); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let program = &Arc::new( - parse_and_lower_program( - &program_text[1..program_text.len() - 1], - SolverChoice::default() - ).unwrap() - ); - let env = &Arc::new(program.environment()); - chalk_ir::tls::set_current_program(&program, || { - for (max_size, num_answers, goal_text, expected) in goals { - println!("----------------------------------------------------------------------"); - println!("goal {}", goal_text); - assert!(goal_text.starts_with("{")); - assert!(goal_text.ends_with("}")); - let goal = parse_and_lower_goal(&program, &goal_text[1..goal_text.len() - 1]).unwrap(); - let peeled_goal = goal.into_peeled_goal(); - let mut forest = Forest::new(SlgContext::new(env, max_size)); - let result = format!("{:?}", forest.solve(&peeled_goal)); - - assert_test_result_eq(&expected, &result); - - let num_cached_answers_for_goal = forest.num_cached_answers_for_goal(&peeled_goal); - // ::test_util::assert_test_result_eq( - // &format!("{}", num_cached_answers_for_goal), - // &format!("{}", expected_num_answers) - // ); - assert_eq!(num_cached_answers_for_goal, num_answers); - } - }); -} - -#[test] -fn basic() { - test! { - program { - trait Sized { } - - struct i32 { } - impl Sized for i32 { } - } - - goal { - forall { if (T: Sized) { T: Sized } } - } first 2 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn breadth_first() { - test! { - program { - trait Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Sized } - } first 5 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := i32], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Slice], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec>], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Slice>], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn infinite_recursion() { - test! { - program { - trait A { } - trait B { } - trait C { } - trait D { } - - struct Vec { } - impl A for Vec where T: B { } - impl B for Vec where T: C { } - impl C for Vec where T: D { } - impl D for Vec where T: A { } - } - - goal { - exists { T: A } - } first 5 with max 10 { - r"[]" - } - } -} - -/// Make sure we don't get a stack overflow or other badness for this -/// test from scalexm. -#[test] -fn subgoal_abstraction() { - test! { - program { - trait Foo { } - struct Box { } - impl Foo for T where Box: Foo { } - } - - goal { - exists { T: Foo } - } first 5 with max 50 { - r"[]" - } - } -} - -#[test] -fn flounder() { - test! { - program { - trait A { } - - struct Vec { } - impl A for Vec { } - } - - goal { - // This goal "flounders" because it has a free existential - // variable. We choose to replace it with a `CannotProve` - // result. - exists { not { T: A } } - } first 5 with max 10 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := ^0], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -// Test that, when solving `?T: Sized`, we only wind up pulling a few -// answers before we stop. -#[test] -fn only_draw_so_many() { - test! { - program { - trait Sized { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Sized } - } fixed 2 with max 10 { - "Some(Ambig(Unknown))" - } - } -} - -#[test] -fn only_draw_so_many_blow_up() { - test! { - program { - trait Sized { } - trait Foo { } - - struct Vec { } - impl Sized for Vec where T: Sized { } - impl Foo for Vec where T: Sized { } - - struct i32 { } - impl Sized for i32 { } - - struct Slice { } - impl Sized for Slice where T: Sized { } - } - - goal { - exists { T: Foo } - } fixed 2 with max 10 { - "Some(Ambig(Definite(Canonical { value: [?0 := Vec<^0>], binders: [Ty(U0)] })))" - } - } -} - -/// Here, P and Q depend on one another through a negative loop. -#[test] -fn negative_loop() { - test! { - program { - trait P { } - trait Q { } - struct u32 { } - - forall<> { u32: P if not { u32: Q } } - forall<> { u32: Q if not { u32: P } } - } - - goal { - u32: P - } first 5 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ) - } - } - } - ]" - } - } -} - -#[test] -fn subgoal_cycle_uninhabited() { - test! { - program { - trait Foo { } - struct Box { } - struct Vec { } - struct u32 { } - impl Foo for Box where Box>: Foo { } - } - - // There is no solution here with a finite proof, so we get - // back: 0 answer(s) found. - goal { - exists { T: Foo } - } first 10 with max 2 { - r"[]" - } - - // Unsurprisingly, applying negation succeeds then. - goal { - not { exists { T: Foo } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // Eqiuvalent to the previous. - goal { - forall { not { T: Foo } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // However, if we come across a negative goal that exceeds our - // size threshold, we have a problem. - goal { - exists { T = Vec, not { Vec>: Foo } } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - - // Same query with larger threshold works fine, though. - goal { - exists { T = Vec, not { Vec>: Foo } } - } first 10 with max 4 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vec], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - - // Here, due to the hypothesis, there does indeed exist a suitable T, `U`. - goal { - forall { if (U: Foo) { exists { T: Foo } } } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := !1_0], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn subgoal_cycle_inhabited() { - test! { - program { - trait Foo { } - struct Box { } - struct Vec { } - struct u32 { } - impl Foo for Box where Box>: Foo { } - impl Foo for u32 { } - } - - goal { - exists { T: Foo } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := u32], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -fn basic_region_constraint_from_positive_impl() { - test! { - program { - trait Foo { } - struct Ref<'a, 'b, T> { } - struct u32 { } - impl<'x, T> Foo for Ref<'x, 'x, T> { } - } - - goal { - forall<'a, 'b, T> { Ref<'a, 'b, T>: Foo } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [ - InEnvironment { - environment: Env([]), - goal: '!1_1 == '!1_0 - } - ] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_1_EWFS() { - test! { - program { - trait Edge { } - trait TransitiveClosure { } - struct a { } - struct b { } - struct c { } - - forall<> { a: Edge } - forall<> { b: Edge } - forall<> { b: Edge } - forall { X: TransitiveClosure if X: Edge } - forall { X: TransitiveClosure if X: Edge, Z: TransitiveClosure } - } - - goal { - exists { a: TransitiveClosure } - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := b], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := c], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := a], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_2_EWFS() { - test! { - program { - trait M { } - trait P { } - trait Q { } - - struct a { } - struct b { } - struct c { } - - forall { X: M if not { X: P } } - forall<> { a: P } - forall { X: P if X: Q } - forall<> { b: Q } - forall { X: Q if X: P } - } - - goal { - c: M - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_2_3_EWFS() { - test! { - program { - trait W { } - trait M { } - trait P { } - - struct a { } - struct b { } - struct c { } - - forall { X: W if X: M, not { Y: W }, Y: P } - forall<> { a: M } - forall<> { b: M } - forall<> { c: M } - forall<> { b: P } - } - - goal { - a: W - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -#[test] -#[allow(non_snake_case)] -fn example_3_3_EWFS() { - test! { - program { - trait S { } - trait P { } - trait Q { } - - struct a { } - - forall<> { a: S if not { a: P }, not { a: Q } } - forall<> { a: P if not { a: S }, a: Q } - forall<> { a: Q if not { a: S }, a: P } - } - - goal { - a: S - } first 10 with max 3 { - // We don't yet have support for **simplification** -- - // hence we delay the negatives here but that's it. - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ), - Negative( - TableIndex(6) - ) - } - } - } - ]" - } - } -} - -/// Here, P is neither true nor false. If it were true, then it would -/// be false, and so forth. -#[test] -fn contradiction() { - test! { - program { - trait P { } - struct u32 { } - - forall<> { u32: P if not { u32: P } } - } - - goal { - u32: P - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(0) - ) - } - } - } - ]" - } - } -} - -/// Test (along with the other `cached_answers` tests) that the -/// ordering in which we we encounter clauses doesn't affect the final -/// set of answers we get. In particular, all of them should get 5 -/// answers, but in Ye Olde Days Of Yore there were sometimes bugs -/// that came up when replaying tabled answers that led to fewer -/// answers being produced. -/// -/// This test is also a test for ANSWER ABSTRACTION: the only reason -/// we get 5 answers is because of the max size of 2. -#[test] -fn cached_answers_1() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - // Use explicit program clauses here rather than traits - // and impls to avoid hashmaps and other things that - // sometimes alter the final order of the program clauses: - forall<> { Lemon: Sour } - forall<> { Vinegar: Sour } - forall { HotSauce: Sour if T: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce<^0>], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -/// See `cached_answers_1`. -#[test] -fn cached_answers_2() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - forall { HotSauce: Sour if T: Sour } - forall<> { Lemon: Sour } - forall<> { Vinegar: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce<^0>], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - } - ]" - } - } -} - -/// See `cached_answers_1`. -#[test] -fn cached_answers_3() { - test! { - program { - trait Sour { } - struct Lemon { } - struct Vinegar { } - struct HotSauce { } - - forall<> { Lemon: Sour } - forall { HotSauce: Sour if T: Sour } - forall<> { Vinegar: Sour } - } - - goal { - exists { T: Sour } - } first 10 with max 2 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Lemon], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := Vinegar], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce<^0>], - constraints: [] - }, - binders: [ - Ty(U0) - ] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - CannotProve( - () - ) - } - } - }, - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [?0 := HotSauce], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: {} - } - } - ]" - } - } -} - -/// Here, P depends on Q negatively, but Q depends only on itself. -/// What happens is that P adds a negative link on Q, so that when Q -/// delays, P is also delayed. -#[test] -fn negative_answer_delayed_literal() { - test! { - program { - trait P { } - trait Q { } - struct u32 { } - - forall<> { u32: P if not { u32: Q } } - forall<> { u32: Q if not { u32: Q } } - } - - goal { - u32: P - } first 10 with max 3 { - r"[ - Answer { - subst: Canonical { - value: ConstrainedSubst { - subst: [], - constraints: [] - }, - binders: [] - }, - delayed_literals: DelayedLiteralSet { - delayed_literals: { - Negative( - TableIndex(1) - ) - } - } - } - ]" - } - } -} diff --git a/src/test_util.rs b/src/test_util.rs deleted file mode 100644 index 8bda2a02fe5..00000000000 --- a/src/test_util.rs +++ /dev/null @@ -1,110 +0,0 @@ -#![cfg(test)] - -use diff; -use chalk_parse; -use itertools::Itertools; -use std::fmt::Write; -use rust_ir::lowering::{LowerProgram, LowerGoal}; -use chalk_ir::Goal; -use rust_ir::Program; -use chalk_solve::solve::SolverChoice; -use errors::Result; - -pub fn parse_and_lower_program(text: &str, solver_choice: SolverChoice) -> Result { - chalk_parse::parse_program(text)?.lower(solver_choice) -} - -pub fn parse_and_lower_goal(program: &Program, text: &str) -> Result> { - chalk_parse::parse_goal(text)?.lower(program) -} - -macro_rules! lowering_success { - (program $program:tt) => { - let program_text = stringify!($program); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let result = parse_and_lower_program( - &program_text[1..program_text.len()-1], - chalk_solve::solve::SolverChoice::default() - ); - if let Err(ref e) = result { - println!("lowering error: {}", e); - } - assert!( - result.is_ok() - ); - } -} - -macro_rules! lowering_error { - (program $program:tt error_msg { $expected:expr }) => { - let program_text = stringify!($program); - assert!(program_text.starts_with("{")); - assert!(program_text.ends_with("}")); - let error = parse_and_lower_program( - &program_text[1..program_text.len()-1], - chalk_solve::solve::SolverChoice::default() - ).unwrap_err(); - let expected = $crate::errors::Error::from($expected); - assert_eq!( - error.to_string(), - expected.to_string() - ); - } -} - -crate fn assert_test_result_eq(expected: &str, actual: &str) { - let expected_trimmed: String = expected - .lines() - .map(|l| l.trim()) - .intersperse("\n") - .collect(); - - let actual_trimmed: String = actual - .lines() - .map(|l| l.trim()) - .intersperse("\n") - .collect(); - - if expected_trimmed == actual_trimmed { - return; - } - - println!("expected:\n{}", expected); - println!("actual:\n{}", actual); - - let diff = diff::lines( - &expected_trimmed, - &actual_trimmed, - ); - - // Skip to the first error: - let diff = diff.iter().skip_while(|r| match r { - diff::Result::Both(..) => true, - _ => false, - }); - - let mut final_diff = String::new(); - let mut accumulator = vec![]; - for result in diff { - let (prefix, s) = match result { - diff::Result::Both(a, _b) => { - // When we see things that are the same, don't print - // them right away; wait until we see another line of - // diff. - accumulator.push(a); - continue; - } - diff::Result::Left(a) => ("- ", a), - diff::Result::Right(a) => ("+ ", a), - }; - - for l in accumulator.drain(..) { - writeln!(&mut final_diff, " {}", l).unwrap(); - } - - writeln!(&mut final_diff, "{}{}", prefix, s).unwrap(); - } - - assert!(false, "expected did not match actual, diff:\n{}", final_diff); -} diff --git a/tests/display/assoc_ty.rs b/tests/display/assoc_ty.rs new file mode 100644 index 00000000000..53d665cb6e6 --- /dev/null +++ b/tests/display/assoc_ty.rs @@ -0,0 +1,413 @@ +#[test] +fn test_trait_impl_assoc_type() { + // Test printing a single associated type - simplest case. + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Bar { + type Assoc; + } + impl Bar for Foo { + type Assoc = Floo; + } + } + ); +} +#[test] +fn test_trait_with_multiple_assoc_types() { + // Test multiple associated types per trait + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Bax { + type Assoc1; + type Assoc2; + } + impl Bax for Foo { + type Assoc1 = Floo; + type Assoc2 = Foo; + } + } + ); +} + +// The four "test_impl_assoc_type_with_generics_*" tests test various +// combinations of generics within associated types in impl blocks in order to +// flush out debrujin index errors (in other words, errors where we name +// generics incorrectly in the output) + +#[test] +fn test_impl_assoc_type_with_generics_using_impl_generics() { + // Test associated type value using generics introduced in impl block. + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Baz { + type Assoc; + } + impl Baz for Foo { + type Assoc = Floo; + } + } + ); +} + +#[test] +fn test_impl_assoc_type_with_generics_using_gat_generics() { + // Test associated type value using generics introduced in associated type. + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Bur { + type Assoc; + } + impl Bur for Foo { + type Assoc = Floo; + } + } + ); +} + +#[test] +fn test_impl_assoc_type_with_generics_using_gat_generics_and_impl_block() { + // Test using both impl block generics and gat generics to ensure we give + // the first generic introduced in each scope a different name. + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Bun { + type Assoc; + } + impl Bun for Foo { + type Assoc = Floo; + } + } + ); +} + +#[test] +fn test_impl_assoc_type_with_generics_multiple_gat_generics_dont_conflict() { + // Grab bag test using different combinations of impl block and associated + // type generics in various places - try to flush out bugs the above 3 tests + // don't catch. + reparse_test!( + program { + struct Foo { } + struct Floo { } + trait Bun { + type Assoc1; + type Assoc2; + type Assoc3; + } + impl Bun for Foo { + type Assoc1 = Floo; + type Assoc2 = Floo; + type Assoc3 = Floo, Floo>; + } + } + ); +} + +#[test] +fn test_simple_assoc_type() { + // Test we can print a trait with an associated type. + reparse_test!( + program { + trait Foo { + type Assoc; + } + } + ); +} + +#[test] +fn test_assoc_type_bounds() { + // Test we can print associated type bounds. + reparse_test!( + program { + trait Byz {} + trait Buzz {} + trait Foo { + type Assoc: Byz + Buzz; + } + } + ); +} + +#[test] +fn test_simple_generic_assoc_type() { + // Test we can render a generic associated type. + reparse_test!( + program { + trait Trait {} + trait Foo { + type Assoc; + } + } + ); +} + +#[test] +fn test_simple_generic_assoc_type_with_bounds() { + // Test we render GATs with bounds correctly. + reparse_test!( + program { + trait Trait {} + trait Foo { + type Assoc: Trait; + } + } + ); +} + +#[test] +fn test_simple_generic_assoc_type_with_where_clause() { + // Test that generic vars in associated type introduced by an associated + // render correctly in that associated type's where clause. + reparse_test!( + program { + trait Trait {} + trait Foo { + type Assoc where Y: Trait; + } + } + ); +} + +#[test] +fn test_assoc_type_in_generic_trait() { + // Test traits with both generics and associated types render correctly. + reparse_test!( + program { + trait Foo { + type Assoc; + } + } + ); +} + +#[test] +fn test_assoc_type_in_trait_with_multiple_generics() { + // Test traits with multiple generic parameters and an associated type + // render correctly. + reparse_test!( + program { + trait Fou { + type Assoc; + } + } + ); +} + +#[test] +fn test_assoc_type_where_clause_referencing_trait_generics() { + // Test generics introduced in trait blocks render correctly when referenced + // inside an associated type where clause. (looking for debrujin index errors) + reparse_test!( + program { + trait Bax {} + trait Foo { + type Assoc where T: Bax; + } + } + ); +} + +#[test] +fn test_assoc_type_and_trait_generics_coexist() { + // Test that we give associated type generics and trait generics different + // names. (looking for debrujin index errors) + reparse_test!( + program { + trait Bix {} + trait Foo { + type Assoc where Y: Bix; + } + } + ); +} + +#[test] +fn test_impl_assoc_ty() { + // Test we can print associated type values in impl blocks. + reparse_test!( + program { + struct Fuu {} + trait Bhz { + type Assoc; + } + impl Bhz for Fuu { + type Assoc = Fuu; + } + } + ); +} + +#[test] +fn test_impl_assoc_ty_in_generic_block() { + // Test we can print associated type values in generic impl blocks. + reparse_test!( + program { + struct Fou {} + trait Bax { + type Assoc; + } + impl Bax for Fou { + type Assoc = Fou; + } + } + ); +} + +#[test] +fn test_impl_assoc_ty_value_referencing_block_generic() { + // Test we can print generics introduced in impl blocks inside associated + // type values. + reparse_test!( + program { + struct Fuu {} + trait Bmx { + type Assoc; + } + impl Bmx for Fuu { + type Assoc = T; + } + } + ); +} + +#[test] +fn test_impl_assoc_ty_value_referencing_block_generic_nested() { + // Test we can print generics introduced in impl blocks inside bigger + // expressions in an associated type value. + reparse_test!( + program { + struct Fuu {} + struct Guu {} + trait Bmx { + type Assoc; + } + impl Bmx for Fuu { + type Assoc = Guu; + } + } + ); +} + +#[test] +fn test_impl_generics_and_assoc_ty_generics_coexist() { + // Test we can print both generics introduced in an impl block and for an + // associated type in the same ty expression, and they aren't conflated with + // the same name. (looking for debrujin index errors) + reparse_test!( + program { + struct Fuu {} + struct Guu {} + trait Bmx { + type Assoc; + } + impl Bmx for Fuu { + type Assoc = Guu; + } + } + ); +} + +#[test] +fn test_impl_assoc_ty_alias() { + // Test printing `AliasTy` associated type bounds. In other words, test + // bounds which have `Assoc=Value` print correctly on associated types. + reparse_test!( + program { + struct Fow {} + struct Qac {} + trait Bow {} + trait Baq { + type Assoc: Boo; + } + trait Boo { + type Item; + } + impl Boo for Qac { + type Item = Fow; + } + impl Baq for Fow { + type Assoc = Qac; + } + } + ); +} + +// The "alias_ty" tests all use a separate `produces` clause as +// `Foo: Bax` bounds are lowered into two bounds, `Bax` and +// `Bax`, and the formatter does not coalesce those bounds. + +#[test] +fn test_alias_ty_bound_in_assoc_ty_where_clauses() { + // Test bounds which have `Assoc=Value` print correctly in associated type + // where clauses. + reparse_test!( + program { + struct Foo { } + trait Bax { type BaxT; } + trait Test { + type Assoc + where + Foo: Bax; + } + } + produces { + struct Foo { } + trait Bax { type BaxT; } + trait Test { + type Assoc + where + Foo: Bax, + Foo: Bax; + } + } + ); +} + +#[test] +fn test_alias_ty_bound_in_struct_where_clauses() { + // Test bounds which have `Assoc=Value` print correctly in struct where + // clauses. + reparse_test!( + program { + struct Foo where T: Baux { } + trait Baux { type Assoc; } + } + produces { + struct Foo where T: Baux, T: Baux { } + trait Baux { type Assoc; } + } + ); +} + +#[test] +fn test_alias_ty_bound_in_impl_where_clauses() { + // Test bounds which have `Assoc=Value` print correctly in impl where clauses. + reparse_test!( + program { + struct Foo {} + trait Boz { type Assoc; } + impl Boz for Foo where T: Boz> { + type Assoc = Foo; + } + } + produces + { + struct Foo {} + trait Boz { type Assoc; } + impl Boz for Foo where T: Boz>, T: Boz { + type Assoc = Foo; + } + } + ); +} diff --git a/tests/display/built_ins.rs b/tests/display/built_ins.rs new file mode 100644 index 00000000000..2026f72b155 --- /dev/null +++ b/tests/display/built_ins.rs @@ -0,0 +1,285 @@ +use super::*; + +#[test] +fn test_function_pointer_type() { + // Test we can print the `fn()` type at all. (impl blocks are simply used as + // a way to reference this concrete type conveniently) + reparse_test!( + program { + struct Foo { } + trait Baz { } + impl Baz for Foo { } + impl Baz for fn(Foo) { } + } + ); +} + +#[test] +fn test_generic_function_pointer_type() { + // Test we can print a `fn()` type which references generics introduced in + // outer scopes. + reparse_test!( + program { + struct Foo<'a, T> + { + bar: fn(&'a T) -> &'a (T, T) + } + } + ); +} + +#[test] +fn test_scalar_types() { + // This is intended to test every scalar in a variety of places. In other + // words, test the matrix of {every scalar} x {concrete type usages}. This + // test should be updated to include new scalars, but it isn't super + // important that it includes every place a concrete type can be used. + let basic = &["bool", "char", "f16", "f32", "f64", "f128"]; + let ints = { + let prefixes = &["u", "i"]; + let sizes = &["size", "8", "16", "32", "64", "128"]; + prefixes + .iter() + .flat_map(move |&p| sizes.iter().map(move |&size| format!("{}{}", p, size))) + }; + let basic = basic.iter().copied().map(str::to_owned); + + let scalars = basic.chain(ints); + + for scalar in scalars { + reparse_test(&format!( + " + struct Foo {{ + field: {0} + }} + trait Bar {{ + type Baz; + }} + impl Bar for Foo {{ + type Baz = {0}; + }} + impl Bar for {0} {{ + type Baz = {0}; + }} + ", + scalar + )); + } +} + +#[test] +fn test_slice_types() { + // Test that we print slice types correctly in a variety of places. + reparse_test!( + program { + struct Foo { + field: [T] + } + trait Bar { + type Baz; + } + impl Bar for Foo { + type Baz = [T]; + } + impl Bar for [T] { + type Baz = Foo; + } + } + ); +} + +#[test] +fn test_str_types() { + // Test that we print 'str' correctly in a variety of places. + reparse_test!( + program { + struct Foo { + field: str + } + trait Bar { + type Baz; + } + impl Bar for Foo { + type Baz = str; + } + impl Bar for str { + type Baz = str; + } + } + ); +} + +#[test] +fn test_const_ptr() { + // Test that we can print *const ptrs in various places, including with generics. + reparse_test!( + program { + struct Foo { + field: *const T + } + trait Bar { + type Baz; + } + impl Bar for Foo { + type Baz = *const u32; + } + impl Bar for *const u32 { + type Baz = *const u32; + } + impl Bar for *const T { + type Baz = *const T; + } + } + ); +} + +#[test] +fn test_mut_ptr() { + // Test that we can print *mut ptrs in various places, including with generics. + reparse_test!( + program { + struct Foo { + field: *mut T + } + trait Bar { + type Baz; + } + impl Bar for Foo { + type Baz = *mut u32; + } + impl Bar for *mut u32 { + type Baz = *mut u32; + } + impl Bar for *mut T { + type Baz = *mut T; + } + } + ); +} + +#[test] +fn test_immutable_references() { + reparse_test!( + program { + struct Foo<'a,T> { + field: &'a T + } + trait Bar { + type Baz; + } + impl<'a,T> Bar for Foo<'a,T> { + type Baz = &'a u32; + } + impl<'a> Bar for &'a u32 { + type Baz = &'a u32; + } + impl<'a,T> Bar for &'a T { + type Baz = &'a T; + } + } + ); +} + +#[test] +fn test_mutable_references() { + reparse_test!( + program { + struct Foo<'a,T> { + field: &'a mut T + } + trait Bar { + type Baz; + } + impl<'a,T> Bar for Foo<'a,T> { + type Baz = &'a mut u32; + } + impl<'a> Bar for &'a mut u32 { + type Baz = &'a u32; + } + impl<'a,T> Bar for &'a mut T { + type Baz = &'a mut T; + } + } + ); +} + +#[test] +fn test_empty_tuple() { + // Test empty tuples print correctly + reparse_test!( + program { + struct Fuu { + fuu_field: () + } + } + ); +} + +#[test] +fn test_one_and_many_tuples() { + // Test that single-element tuple is printed correctly with the required + // trailing comma that differentiates it from a parenthesized expression + reparse_test!( + program { + struct Uff { + fuu_field: (Iff,), + iff2_field: (Iff, Iff, Iff) + } + struct Iff { } + } + ); +} + +#[test] +fn test_tuples_using_generic_args() { + // Test 1, many tuples which contain generic parameters. + reparse_test!( + program { + struct Foo { + field: (u32,*const T,T), + field2: (T,), + field3: (T) + } + trait Bar { + type Baz; + } + impl Bar for Foo { + type Baz = (T,Foo,u32); + } + } + ); +} + +#[test] +fn test_impl_on_tuples_with_generics() { + // Test 0, 1, many tuples in one more place - impl blocks. + reparse_test!( + program { + trait Blug {} + impl Blug for (T1,T2) { + + } + impl Blug for (T1,) { + + } + impl Blug for () { + + } + } + ); +} + +#[test] +fn test_array_types() { + // Test that we print array types correctly in multiple places they can occur. + reparse_test!( + program { + struct Bazz { } + struct Bar { + field: [Bazz; T] + } + trait Foo { } + impl Foo for [Bazz; T] { } + } + ); +} diff --git a/tests/display/const_.rs b/tests/display/const_.rs new file mode 100644 index 00000000000..8d012ca0500 --- /dev/null +++ b/tests/display/const_.rs @@ -0,0 +1,60 @@ +#[test] +fn test_const_generics() { + // Test we render const generic parameters correctly in a variety of places. + reparse_test!( + program { + struct Usize { } + struct Bar { } + trait Foo { } + trait AssocTy { + type Type; + } + impl Foo for Bar { } + impl AssocTy for Bar { + type Type = Usize; + } + opaque type Gee: Foo = Usize; + } + ); +} + +#[test] +fn test_basic_const_values_in_impls() { + // Test we render const values correctly in impls. + reparse_test!( + program { + struct Foo { } + trait Bar { } + impl Bar for Foo<0> { } + impl Bar for Foo<1> { } + impl Bar for Foo<2> { } + } + ); +} + +#[test] +fn test_basic_const_values_in_opaque_ty_values() { + // Test we render const values correctly in opaque type values. + reparse_test!( + program { + struct Foo { } + opaque type Zed = Foo<0>; + } + ); +} + +#[test] +fn test_basic_const_values_in_assoc_ty_values() { + // Test we render const values correctly in associated type values. + reparse_test!( + program { + struct Foo { } + trait Bar { + type Assoc; + } + impl Bar for Foo<0> { + type Assoc = Foo<1>; + } + } + ); +} diff --git a/tests/display/dyn_.rs b/tests/display/dyn_.rs new file mode 100644 index 00000000000..139aeb9c03a --- /dev/null +++ b/tests/display/dyn_.rs @@ -0,0 +1,97 @@ +#[test] +fn test_dyn_forall_in_impl() { + // Test we render `dyn forall` types correctly (and with the correct + // lifetime names) in impl blocks. + reparse_test!( + program { + trait Foo<'t> {} + trait Bar<'a> {} + impl<'t> Foo<'t> for dyn forall<'a> Bar<'a> + 't {} + } + ); +} + +#[test] +fn test_dyn_forall_in_struct() { + // Test we render `dyn forall` types correctly (and with the correct + // lifetime names) in struct fields. + reparse_test!( + program { + struct Foo<'t> { + field: dyn forall<'a> Baz<'a> + 't + } + trait Baz<'a> {} + } + ); +} + +#[test] +fn test_dyn_forall_multiple_parameters() { + // Test we render `dyn forall` types with multiple lifetimes correctly, and + // with the correct lifetime names. + reparse_test!( + program { + struct Foo<'t> { + field: dyn forall<'a, 'b> Bix<'a, 'b> + 't + } + trait Bix<'a, 'b> {} + } + ); +} + +#[test] +fn test_multiple_forall_one_dyn() { + // Test we render `dyn forall A + forall B` correctly. + reparse_test!( + program { + struct Foo<'t> { + field1: dyn forall<'a> Bex<'a> + forall<'b> Byx<'b> + 't, + field2: dyn forall<'a, 'b> Bux<'a, 'b> + forall<'b, 'c> Brx<'b, 'c> + 't + } + trait Bex<'a> {} + trait Byx<'a> {} + trait Bux<'a, 'b> {} + trait Brx<'a, 'b> {} + } + ); +} + +#[test] +fn test_dyn_forall_with_trait_referencing_outer_lifetime() { + // Test we can render a trait inside a `dyn forall` referencing an outer + // lifetime correctly (in other words, test for debrujin index errors). + reparse_test!( + program { + struct Foo<'a> { + field: dyn forall<'b> Bpx<'a, 'b> + 'a + } + trait Bpx<'a, 'b> {} + } + ); +} + +#[test] +fn test_simple_dyn() { + // Test that we print `dyn Trait` types correctly. + reparse_test!( + program { + struct Foo<'a> { + field: dyn Bax + 'a + } + trait Bax {} + } + ); +} + +#[test] +fn test_simple_dyn_referencing_outer_generic_parameters() { + // Test that we print `dyn Trait` referencing outer generic parameters correctly. + reparse_test!( + program { + struct Foo<'a> { + field: dyn Bix<'a> + 'a + } + trait Bix<'a> {} + } + ); +} diff --git a/tests/display/enum_.rs b/tests/display/enum_.rs new file mode 100644 index 00000000000..4321048fd8c --- /dev/null +++ b/tests/display/enum_.rs @@ -0,0 +1,106 @@ +#[test] +fn test_simple_enum() { + reparse_test!( + program { + enum Foo {} + } + ); +} + +#[test] +fn test_enum_generics() { + reparse_test!( + program { + enum Foo {} + enum Bar {} + } + ); +} + +#[test] +fn test_enum_bounds() { + // Test printing where clauses + reparse_test!( + program { + enum Foo where T: Trait {} + trait Trait {} + } + ); +} + +#[test] +fn test_enum_fields() { + // Test printing enums with fields, enum fields with fields, and enum + // generics in enum fields. + reparse_test!( + program { + enum Foo {} + enum Bar {} + enum Baz { + A { + x: Foo, + b: Bar, + y: Foo + }, + B(u32), + } + } + ); +} + +#[test] +fn test_enum_keywords() { + reparse_test!( + program { + #[upstream] + enum UpstreamFoo {} + + #[fundamental] + enum FundamentalFoo {} + + #[phantom_data] + enum PhantomFoo {} + + #[upstream] + #[fundamental] + #[phantom_data] + enum Bar {} + } + ); +} + +#[test] +fn test_enum_repr() { + reparse_test!( + program { + #[repr(C)] + enum CFoo {} + + #[repr(packed)] + enum PackedFoo {} + + // Test all orderings of multiple `repr()` attributes + + #[repr(C)] + #[repr(packed)] + enum CPackedFoo {} + + #[repr(packed)] + #[repr(C)] + enum PackedCFoo {} + } + ); +} + +#[test] +fn test_enum_repr_and_keywords_ordered_correctly() { + // Test that when we print both `repr` and another keyword, we order them in + // a way accepted by the parser. + reparse_test!( + program { + #[upstream] + #[repr(C)] + enum UpstreamCFoo {} + } + ); +} diff --git a/tests/display/fn_.rs b/tests/display/fn_.rs new file mode 100644 index 00000000000..993f809a98f --- /dev/null +++ b/tests/display/fn_.rs @@ -0,0 +1,98 @@ +#[test] +fn test_basic_fn_def() { + // Test printing simple function definitions + reparse_test!( + program { + struct Foo {} + fn nothing(); + fn takes_foo(v: Foo); + fn gives_foo() -> Foo; + fn bar(a: Foo, _: Foo) -> Foo; + } + ); +} + +#[test] +fn test_generic_fn_def() { + // Test printing generics in function definitions + reparse_test!( + program { + struct Foo {} + struct Bar {} + fn identity(arg: T) -> T; + fn transform(a: Foo) -> Bar; + fn wrap(v: T) -> Foo; + } + ); +} + +#[test] +fn test_const_generic_fn_def() { + // Test printing const generics in function definitions + reparse_test!( + program { + fn uses_n(arg: [T; N]); + } + ); +} + +#[test] +fn test_opaque_ty_with_fn_def() { + // Test printing opaque types in function definitions + reparse_test!( + program { + opaque type Bar = (); + fn gives_bar() -> Bar; + fn receives_bar(param: Bar) -> (); + } + ); +} + +// These `test_fn_as_type_*` tests test various usages of fn types + +// We do not yet support "fn def" types, which this uses. +#[test] +#[ignore] +fn test_fn_as_type_in_functions() { + //(TODO: cover remaining places when functionality is implemented) + + // Test printing an fn type in a function definitions parameters and return + // type. + reparse_test!( + program { + fn foo(arg: u32); + fn baz(foo) -> u32; + fn bar() -> foo; + } + ); +} + +// We do not yet support "fn def" types, which this uses. +#[test] +#[ignore] +fn test_fn_as_type_in_opaque_ty_value() { + // Test printing an fn type as an opaque type's hidden value + reparse_test!( + program { + trait Bar {} + fn foo(); + impl Bar for Foo {} + opaque type Zed: Bar = foo; + } + ); +} + +// We do not yet support "fn def" types, which this uses. +#[test] +#[ignore] +fn test_fn_as_type_in_struct_field() { + // Test printing an fn type as a struct type's field + reparse_test!( + program { + fn foo(); + struct Vi { + field: foo + } + } + ); +} diff --git a/tests/display/formatting.rs b/tests/display/formatting.rs new file mode 100644 index 00000000000..40cc34879d7 --- /dev/null +++ b/tests/display/formatting.rs @@ -0,0 +1,156 @@ +#[test] +fn test_assoc_type_formatting() { + // Test associated type indentation + // This also tests spacing around trait, struct and impl items + reparse_test!( + program { + struct Foo {} + trait Bar { + type Assoc; + } + impl Bar for Foo { + type Assoc = (); + } + } + formatting matches +r#"struct [a-zA-Z0-9_-]+ \{\s*\} +trait [a-zA-Z0-9_-]+ \{ + type [a-zA-Z0-9_-]+; +\} +impl [a-zA-Z0-9_-]+ for [a-zA-Z0-9_-]+ \{ + type [a-zA-Z0-9_-]+ = \(\); +\}"# + ); +} + +#[test] +fn test_struct_field_formatting() { + // Test struct field indentation + reparse_test!( + program { + struct Foo {} + struct Bar { + field1: Foo + } + struct Azg { + field1: Foo, + field2: Bar + } + } + formatting matches +r#"struct [a-zA-Z0-9_-]+ \{\} +struct [a-zA-Z0-9_-]+ \{ + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+ +\} +struct [a-zA-Z0-9_-]+ \{ + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+, + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+ +\}"# + ); +} + +#[test] +fn test_where_clause_formatting() { + // Test where clause indentation and new-lining on impls, traits and structs + reparse_test!( + program { + struct Foo where Foo: Baz, Foo: Bar {} + trait Bar where Foo: Baz, forall<'a> dyn Baz + 'a: Bar {} + trait Baz {} + impl Bar for Foo where Foo: Baz, (): Baz {} + impl Baz for Foo {} + impl<'a> Bar for dyn Baz + 'a {} + } + formatting matches +r#"struct [a-zA-Z0-9_-]+ +where + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+, + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+ +\{\s*\} +trait [a-zA-Z0-9_-]+ +where + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+, + forall<'[a-zA-Z0-9_-]+> dyn [a-zA-Z0-9_-]+ \+ '[a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+ +\{\s*\} +trait [a-zA-Z0-9_-]+ \{\} +impl [a-zA-Z0-9_-]+ for [a-zA-Z0-9_-]+ +where + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+, + \(\): [a-zA-Z0-9_-]+ +\{\} +impl [a-zA-Z0-9_-]+ for [a-zA-Z0-9_-]+ \{\} +impl<'[a-zA-Z0-9_-]+> [a-zA-Z0-9_-]+ for dyn [a-zA-Z0-9_-]+ \+ '[a-zA-Z0-9_-]+ \{\}"# + ); +} + +#[test] +fn test_assoc_ty_where_clause() { + // Test associated ty where clause indentation (this verifies that the + // indentation is context aware) + reparse_test!( + program { + trait Bar {} + trait Fuzz { + type Assoc + where + u32: Bar, + Self: Bar; + } + } + formatting matches +r#"trait [a-zA-Z0-9_-]+ \{\s*\} +trait [a-zA-Z0-9_-]+ \{ + type [a-zA-Z0-9_-]+ + where + u32: [a-zA-Z0-9_-]+, + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+; +\} +"# + ); +} + +#[test] +fn test_fn_where_clause() { + // Test where clause indentation, and fn type spacing + reparse_test!( + program { + trait Bar {} + fn foo<'a, T>() -> T + where + dyn Bar + 'a: Bar, + T: Bar; + } + formatting matches +r#"trait [a-zA-Z0-9_-]+ \{\s*\} +fn foo<'[a-zA-Z0-9_-]+, [a-zA-Z0-9_-]+>\(\) -> [a-zA-Z0-9_-]+ +where + dyn [a-zA-Z0-9_-]+ \+ '[a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+, + [a-zA-Z0-9_-]+: [a-zA-Z0-9_-]+; +"# + ); +} + +#[test] +fn test_name_disambiguation() { + // Tests name disambiguation, types with the same name are renamed to avoid + // confusion. This can happen if the logging db contains types from + // different modules. + // we don't have modules in chalk so we can't actually test different + // structs or traits with the same name in Chalk - but luckily our + // implementation ignores types for name disambiguation, so we can test it + // indirectly by using a opaque type and trait of the same name. + reparse_test! ( + program { + struct Foo {} + trait Baz {} + impl Baz for Foo {} + opaque type Baz: Baz = Foo; + } + produces { + struct Foo {} + trait Baz {} + impl Baz for Foo {} + opaque type Baz_1: Baz = Foo; + } + ); +} diff --git a/tests/display/impl_.rs b/tests/display/impl_.rs new file mode 100644 index 00000000000..20e9e484f86 --- /dev/null +++ b/tests/display/impl_.rs @@ -0,0 +1,47 @@ +#[test] +fn test_negative_auto_trait_impl() { + // Test we can render negative impls. + reparse_test!( + program { + struct Foo { } + #[auto] + trait Baz {} + impl !Baz for Foo { } + } + ); +} + +#[test] +fn test_generic_impl() { + // Tests we can print generics in impl blocks + reparse_test!( + program { + trait Baz {} + impl Baz for T {} + } + ); +} + +#[test] +fn test_impl_for_generic_adt() { + // Test that we can refer to impl-introduced generics in the impl decl + reparse_test!( + program { + trait Bar {} + impl Bar for G {} + } + ); +} + +#[test] +fn test_upstream_impl_keyword() { + // Test we print the "upstream" keyword. + reparse_test!( + program { + struct Bar {} + trait Foo {} + #[upstream] + impl Foo for Bar {} + } + ); +} diff --git a/tests/display/lifetimes.rs b/tests/display/lifetimes.rs new file mode 100644 index 00000000000..544a1905977 --- /dev/null +++ b/tests/display/lifetimes.rs @@ -0,0 +1,53 @@ +#[test] +fn test_various_forall() { + // Test we print lifetime vars introduced by 'forall' in various situations. + reparse_test!( + program { + struct Foo<'b> where forall<'a> Foo<'a>: Baz<'a> { } + trait Baz<'a> {} + trait Bax<'a> {} + trait Biz { + type Bex: forall<'a> Bax<'a>; + } + impl<'a> Baz<'a> for for<'b> fn(Foo<'b>) { } + impl<'a> Bax<'a> for fn(Foo<'a>) { } + impl<'a> Bax<'a> for dyn forall<'b> Baz<'b> + 'a { } + } + ); +} + +#[test] +fn test_lifetimes_in_structs() { + // Test printing lifetimes introduced by structs. + reparse_test!( + program { + struct Foo<'b> { } + trait Baz<'a> {} + impl<'a> Baz<'a> for Foo<'a> { } + } + ); +} + +#[test] +fn test_lifetime_outlives() { + // Test printing lifetime outlives where clauses in a few places they can appear. + reparse_test!( + program { + struct Foo<'a, 'b> + where + 'a: 'b + { } + + trait Baz<'a, 'b> + where + 'a: 'b + { } + + impl<'a, 'b, 'c> Baz<'a, 'b> for Foo<'a, 'c> + where + 'a: 'c, + 'b: 'c + { } + } + ); +} diff --git a/tests/display/mod.rs b/tests/display/mod.rs new file mode 100644 index 00000000000..dd522d3c545 --- /dev/null +++ b/tests/display/mod.rs @@ -0,0 +1,20 @@ +#[macro_use] +mod util; + +mod assoc_ty; +mod built_ins; +mod const_; +mod dyn_; +mod enum_; +mod fn_; +mod formatting; +mod impl_; +mod lifetimes; +mod opaque_ty; +mod self_; +mod struct_; +mod trait_; +mod unique_names; +mod where_clauses; + +use self::util::*; diff --git a/tests/display/opaque_ty.rs b/tests/display/opaque_ty.rs new file mode 100644 index 00000000000..1c889cc1a9f --- /dev/null +++ b/tests/display/opaque_ty.rs @@ -0,0 +1,134 @@ +#[test] +fn opaque_types() { + // Test printing opaque type declarations, opaque types in associated types, + // and opaque types in impls. + reparse_test!( + program { + struct Bar {} + trait Buz {} + trait Baz { + type Hi; + } + impl Buz for Bar {} + impl Baz for Foo { + type Hi = Foo; + } + opaque type Foo: Buz = Bar; + } + ); +} + +#[test] +fn opaque_ty_no_bounds() { + // Test printing opaque types without any bounds + reparse_test!( + program { + opaque type Foo = (); + } + ); +} + +#[test] +fn test_generic_opaque_types() { + // Test printing opaque types with generic parameters + reparse_test!( + program { + struct Foo {} + trait Bar {} + opaque type Baz: Bar = Foo; + + struct Fou {} + struct Unit {} + trait Bau {} + opaque type Boz: Bau = Fou; + } + ); +} + +#[test] +fn test_opaque_type_as_type_value() { + // Test printing an opaque type as the value for an associated type + reparse_test!( + program { + struct Foo {} + trait Bar {} + trait Fuzz { + type Assoc: Bar; + } + impl Bar for Foo {} + impl Fuzz for Foo { + type Assoc = Bax; + } + opaque type Bax: Bar = Foo; + } + ); +} + +#[test] +fn test_opaque_type_in_fn_ptr() { + // Test printing an opaque type as the parameter for a fn ptr type + reparse_test!( + program { + struct Foo {} + trait Bar {} + trait Faz { + type Assoc; + } + impl Faz for Foo { + type Assoc = fn(Baz); + } + opaque type Baz: Bar = Foo; + } + ); +} + +#[test] +fn test_generic_opaque_type_as_value() { + // Test printing a generic opaque type as an associated type's value + reparse_test!( + program { + struct Foo {} + trait Bar {} + trait Fizz { + type Assoc: Bar; + } + impl Bar for Foo {} + impl Fizz for Foo { + type Assoc = Baz; + } + opaque type Baz: Bar = Foo; + } + ); +} + +#[test] +fn test_generic_opaque_type_in_fn_ptr() { + // Test printing a generic opaque type as an fn ptr's parameter + reparse_test!( + program { + struct Foo {} + trait Bar {} + trait Faz { + type Assoc; + } + impl Faz for Foo { + type Assoc = fn(Baz); + } + impl Bar for Foo { } + opaque type Baz: Bar = Foo; + } + ); +} + +#[test] +fn multiple_bounds() { + // Test printing an opaque type with multiple bounds + reparse_test!( + program { + struct Baz {} + trait Foo {} + trait Fuu {} + opaque type Bar: Foo + Fuu = Baz; + } + ); +} diff --git a/tests/display/self_.rs b/tests/display/self_.rs new file mode 100644 index 00000000000..406b5c469e8 --- /dev/null +++ b/tests/display/self_.rs @@ -0,0 +1,189 @@ +#[test] +fn test_self_in_trait_bounds() { + // Test 'Self' printing in trait where clauses. + reparse_test!( + program { + trait Bkz {} + trait Foo where Self: Bkz {} + } + ); +} + +#[test] +fn test_self_in_forall() { + // Test 'Self' printing inside a forall clause. + reparse_test!( + program { + trait Baz<'a> {} + trait Foo where forall<'a> Self: Baz<'a> {} + } + ); +} + +#[test] +fn test_self_in_assoc_type_declarations() { + // Test 'Self' in associated types declarations prints correctly. + reparse_test!( + program { + trait Extra {} + trait Bez {} + trait Foo { + type Assoc1: Extra; + type Assoc2 where Self: Bez; + } + } + ); +} + +#[test] +fn test_self_in_generic_associated_type_declarations() { + // Test 'Self' in generic associated type declarations prints correctly. + reparse_test!( + program { + trait Biz {} + trait Foo { + type Assoc where Self: Biz; + } + } + ); +} + +#[test] +fn test_self_in_dyn() { + // Test that 'Self' in dyn correctly refers to the outer Self correctly. + reparse_test!( + program { + trait Bun {} + trait Foo<'a, T> { + type Assoc where dyn Bun + 'a: Bun; + } + } + ); +} + +#[test] +fn test_self_in_dyn_with_generics() { + // Test that 'Self' in dyn correctly refers to the outer Self when in the + // presence of generics introduced at the same time as that Self. In + // addition, test those generics also print correctly inside `dyn`. + reparse_test!( + program { + trait Has {} + trait Bun {} + trait Fiz<'a, T> { + type Assoc1: Has + 'a>; + type Assoc2: Has + 'a>; + } + } + ); +} + +// Self doesn't work in these circumstances yet (test programs fail to lower) +#[ignore] +#[test] +fn test_self_in_struct_bounds() { + // Test 'self' prints correctly in various places in struct where clauses. + reparse_test!( + program { + trait Bax {} + trait Baz {} + struct Foo + where + T: Bax, + Self: Baz + { + } + } + ); +} + +// Self doesn't work in these circumstances yet (test programs fail to lower) +#[ignore] +#[test] +fn test_self_in_impl_block_associated_types() { + // Test 'Self' prints correctly in associated type values. + reparse_test!( + program { + trait Foo { + type Assoc; + } + struct Bix {} + impl Foo for Bix { + type Assoc = Self; + } + } + ); +} +// Self doesn't work in these circumstances yet (test programs fail to lower) +#[ignore] +#[test] +fn test_self_in_impl_block_associated_type_with_generics() { + // Test 'Self' prints correctly in impl blocks where we introduce + // generic parameters. In addition, test those parameters print correctly + // next to self. + reparse_test!( + program { + trait Faux {} + trait Paw { + type Assoc1; + type Assoc2; + } + struct Buzz {} + impl Paw for Buzz { + type Assoc1 = dyn Faux; + type Assoc2 = dyn Faux; + } + } + ); +} + +// Self doesn't work in these circumstances yet (test programs fail to lower) +#[ignore] +#[test] +fn test_self_in_impl_block_where_clauses() { + // Test 'Self' prints correctly in impl block where clauses. + reparse_test!( + program { + trait Foo {} + trait Fin {} + struct Bux {} + impl Foo for Bux where Self: Fin {} + } + ); +} + +#[test] +fn test_against_accidental_self() { + // In some of the writer code, it would be really easy to accidentally + // outputs the first generic parameter of an item as "Self". This tests + // against that. + let in_structs = reparse_test!( + program { + struct Foo { + field: T + } + } + ); + assert!(!in_structs.output_text.contains("Self")); + let in_impl = reparse_test!( + program { + struct Foo {} + trait Bux { + type Assoc; + } + impl Bux for Foo { + type Assoc = T; + } + } + ); + assert!(!in_impl.output_text.contains("Self")); + let in_opaque = reparse_test!( + program { + struct Foo {} + trait Que {} + impl Que for Foo {} + opaque type Bar: Que = Foo; + } + ); + assert!(!in_opaque.output_text.contains("Self")); +} diff --git a/tests/display/struct_.rs b/tests/display/struct_.rs new file mode 100644 index 00000000000..fc4b1af57cf --- /dev/null +++ b/tests/display/struct_.rs @@ -0,0 +1,115 @@ +#[test] +fn test_simple_struct() { + // Test simplest struct + reparse_test!( + program { + struct Foo {} + } + ); +} + +#[test] +fn test_generic_struct() { + // Test printing struct generics + reparse_test!( + program { + struct Foo {} + struct Bar {} + } + ); +} + +#[test] +fn test_struct_where_clauses() { + // Test printing struct where clauses + reparse_test!( + program { + struct Foo where T: Trait {} + trait Trait {} + } + ); +} + +#[test] +fn test_struct_fields() { + // Test printing fields in a struct + reparse_test!( + program { + struct Foo {} + struct Bar {} + struct Baz { + x: Foo, + b: Bar + } + } + ); +} + +#[test] +fn test_struct_generic_fields() { + // Test printing fields which reference a struct's generics + reparse_test!( + program { + struct Foo<'a, T, U> { + x: (U, T), + y: &'a (), + } + } + ); +} + +#[test] +fn test_struct_keywords() { + // Test each struct keyword, as well as the combination. + reparse_test!( + program { + #[upstream] + struct UpstreamFoo {} + + #[fundamental] + struct FundamentalFoo {} + + #[phantom_data] + struct PhantomFoo {} + + #[upstream] + #[fundamental] + #[phantom_data] + struct Bar {} + } + ); +} + +#[test] +fn test_struct_repr() { + // Test each struct repr, as well as the combination of two in any ordering. + reparse_test!( + program { + #[repr(C)] + struct CFoo {} + + #[repr(packed)] + struct PackedFoo {} + + #[repr(C)] + #[repr(packed)] + struct CPackedFoo {} + + #[repr(packed)] + #[repr(C)] + struct PackedCFoo {} + } + ); +} + +#[test] +fn test_struct_repr_with_flags() { + // Test printing both a repr and a flag (to ensure we get the ordering between them right). + reparse_test!( + program { + #[upstream] + #[repr(C)] + struct UpstreamCFoo {} + } + ); +} diff --git a/tests/display/trait_.rs b/tests/display/trait_.rs new file mode 100644 index 00000000000..d703724c1b5 --- /dev/null +++ b/tests/display/trait_.rs @@ -0,0 +1,110 @@ +use super::*; + +#[test] +fn test_simple_trait() { + // Simplest test for 'trait' + reparse_test!( + program { + trait Foo {} + } + ); +} + +#[test] +fn test_generic_trait() { + // Test we can print generics introduced by a trait + reparse_test!( + program { + trait Foo {} + trait Bar {} + } + ); +} + +#[test] +fn test_trait_where_clauses() { + // Test printing trait where clauses + reparse_test!( + program { + trait Foo where T: Trait {} + trait Trait {} + } + ); +} + +#[test] +fn test_basic_trait_impl() { + // Test simplest trait implementation + reparse_test!( + program { + struct Foo { } + trait Bar {} + impl Bar for Foo { } + } + ); +} + +#[test] +fn test_trait_flags() { + // Test every individual flag that can appear on a trait, as well as the + // combination of all of them. We test the combination to ensure that we + // satisfy any ordering requirements present. + let flags = vec![ + "auto", + "marker", + "upstream", + "fundamental", + "non_enumerable", + "coinductive", + "object_safe", + ]; + reparse_test(&format!( + "{}trait Hello {{}}", + flags + .iter() + .map(|f| format!("#[{}]", f)) + .collect::>() + .join("\n") + )); + for flag in flags { + reparse_test(&format!( + " + #[{0}] + trait Hello_{0} {{}} + ", + flag + )); + } +} + +#[test] +fn test_wellknown_traits() { + // Test all possible `#[lang]` attributes on traits. + let well_knowns = vec![ + "sized", "copy", "clone", "drop", "fn_once", "fn_mut", "fn", "unsize", + ]; + for flag in well_knowns { + reparse_test(&format!( + " + #[lang({0})] + trait Hello_{0} {{}} + ", + flag + )); + } +} + +#[test] +fn test_lang_with_flag() { + // Test we output the correct ordering when printing a trait with both flags + // and a #[lang] attribute. + reparse_test!( + program { + #[auto] + #[lang(sized)] + trait Foo { + + } + } + ); +} diff --git a/tests/display/unique_names.rs b/tests/display/unique_names.rs new file mode 100644 index 00000000000..c97fcc505af --- /dev/null +++ b/tests/display/unique_names.rs @@ -0,0 +1,328 @@ +use chalk_integration::{program::Program, query::LoweringDatabase, tls}; +use chalk_ir::{interner::Interner, UnificationDatabase}; +use chalk_solve::{ + display::{write_items, WriterState}, + RustIrDatabase, +}; +use std::marker::PhantomData; +use std::sync::Arc; + +use super::util::{program_item_ids, ReparseTestResult}; + +/// `DuplicateNamesDb` implements `RustIrDatabase`, and returns `Foo` for all +/// requested item names. This allows us to test that names are correctly +/// de-duplicated by the display code. +#[derive(Debug)] +struct DuplicateNamesDb<'a, I, DB> +where + I: Interner, + DB: RustIrDatabase, +{ + db: &'a DB, + _phantom: PhantomData, +} + +impl<'a, I, DB> DuplicateNamesDb<'a, I, DB> +where + I: Interner, + DB: RustIrDatabase, +{ + fn new(db: &'a DB) -> Self { + DuplicateNamesDb { + db, + _phantom: PhantomData, + } + } +} + +impl<'a, I, DB> RustIrDatabase for DuplicateNamesDb<'a, I, DB> +where + I: Interner, + DB: RustIrDatabase, +{ + fn trait_name(&self, _trait_id: chalk_ir::TraitId) -> String { + "Foo".to_owned() + } + fn adt_name(&self, _adt_id: chalk_ir::AdtId) -> String { + "Foo".to_owned() + } + fn assoc_type_name(&self, _assoc_ty_id: chalk_ir::AssocTypeId) -> String { + "Foo".to_owned() + } + fn opaque_type_name(&self, _opaque_ty_id: chalk_ir::OpaqueTyId) -> String { + "Foo".to_owned() + } + fn fn_def_name(&self, _fn_def_id: chalk_ir::FnDefId) -> String { + "Foo".to_owned() + } + fn custom_clauses(&self) -> Vec> { + self.db.custom_clauses() + } + fn associated_ty_data( + &self, + ty: chalk_ir::AssocTypeId, + ) -> std::sync::Arc> { + self.db.associated_ty_data(ty) + } + fn trait_datum( + &self, + trait_id: chalk_ir::TraitId, + ) -> std::sync::Arc> { + self.db.trait_datum(trait_id) + } + fn adt_datum( + &self, + adt_id: chalk_ir::AdtId, + ) -> std::sync::Arc> { + self.db.adt_datum(adt_id) + } + fn adt_repr(&self, id: chalk_ir::AdtId) -> Arc> { + self.db.adt_repr(id) + } + fn adt_size_align(&self, id: chalk_ir::AdtId) -> Arc { + self.db.adt_size_align(id) + } + fn fn_def_datum( + &self, + fn_def_id: chalk_ir::FnDefId, + ) -> std::sync::Arc> { + self.db.fn_def_datum(fn_def_id) + } + fn impl_datum( + &self, + impl_id: chalk_ir::ImplId, + ) -> std::sync::Arc> { + self.db.impl_datum(impl_id) + } + fn associated_ty_from_impl( + &self, + impl_id: chalk_ir::ImplId, + assoc_type_id: chalk_ir::AssocTypeId, + ) -> Option> { + self.db.associated_ty_from_impl(impl_id, assoc_type_id) + } + fn associated_ty_value( + &self, + id: chalk_solve::rust_ir::AssociatedTyValueId, + ) -> std::sync::Arc> { + self.db.associated_ty_value(id) + } + fn coroutine_datum( + &self, + coroutine_id: chalk_ir::CoroutineId, + ) -> std::sync::Arc> { + self.db.coroutine_datum(coroutine_id) + } + fn coroutine_witness_datum( + &self, + coroutine_id: chalk_ir::CoroutineId, + ) -> std::sync::Arc> { + self.db.coroutine_witness_datum(coroutine_id) + } + fn opaque_ty_data( + &self, + id: chalk_ir::OpaqueTyId, + ) -> std::sync::Arc> { + self.db.opaque_ty_data(id) + } + fn hidden_opaque_type(&self, id: chalk_ir::OpaqueTyId) -> chalk_ir::Ty { + self.db.hidden_opaque_type(id) + } + fn impls_for_trait( + &self, + trait_id: chalk_ir::TraitId, + parameters: &[chalk_ir::GenericArg], + binders: &chalk_ir::CanonicalVarKinds, + ) -> Vec> { + self.db.impls_for_trait(trait_id, parameters, binders) + } + fn local_impls_to_coherence_check( + &self, + trait_id: chalk_ir::TraitId, + ) -> Vec> { + self.db.local_impls_to_coherence_check(trait_id) + } + fn impl_provided_for( + &self, + auto_trait_id: chalk_ir::TraitId, + app_ty: &chalk_ir::TyKind, + ) -> bool { + self.db.impl_provided_for(auto_trait_id, app_ty) + } + fn well_known_trait_id( + &self, + well_known_trait: chalk_solve::rust_ir::WellKnownTrait, + ) -> Option> { + self.db.well_known_trait_id(well_known_trait) + } + fn well_known_assoc_type_id( + &self, + assoc_type: chalk_solve::rust_ir::WellKnownAssocType, + ) -> Option> { + self.db.well_known_assoc_type_id(assoc_type) + } + fn program_clauses_for_env( + &self, + environment: &chalk_ir::Environment, + ) -> chalk_ir::ProgramClauses { + self.db.program_clauses_for_env(environment) + } + fn interner(&self) -> I { + self.db.interner() + } + fn is_object_safe(&self, trait_id: chalk_ir::TraitId) -> bool { + self.db.is_object_safe(trait_id) + } + fn closure_kind( + &self, + closure_id: chalk_ir::ClosureId, + substs: &chalk_ir::Substitution, + ) -> chalk_solve::rust_ir::ClosureKind { + self.db.closure_kind(closure_id, substs) + } + fn closure_inputs_and_output( + &self, + closure_id: chalk_ir::ClosureId, + substs: &chalk_ir::Substitution, + ) -> chalk_ir::Binders> { + self.db.closure_inputs_and_output(closure_id, substs) + } + fn closure_upvars( + &self, + closure_id: chalk_ir::ClosureId, + substs: &chalk_ir::Substitution, + ) -> chalk_ir::Binders> { + self.db.closure_upvars(closure_id, substs) + } + fn closure_fn_substitution( + &self, + closure_id: chalk_ir::ClosureId, + substs: &chalk_ir::Substitution, + ) -> chalk_ir::Substitution { + self.db.closure_fn_substitution(closure_id, substs) + } + + fn discriminant_type(&self, ty: chalk_ir::Ty) -> chalk_ir::Ty { + self.db.discriminant_type(ty) + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self.db.unification_database() + } +} + +/// Writes the given program with all names duplicated and then deduplicated by +/// display code. +/// +/// This additionally tests to ensure that duplicated names are deduplicated +/// across `write_items` calls, by making one write_items call per item. +pub fn write_program_duplicated_names(db: &Program) -> String { + let mut out = String::new(); + let ids = program_item_ids(db); + let db = DuplicateNamesDb::new(db); + let ws = WriterState::new(db); + // Test that names are preserved between write_items calls + for id in ids { + write_items(&mut out, &ws, std::iter::once(id)).unwrap(); + } + out +} + +/// Tests that a given source file can function given a database which always +/// returns the same name for all variables. +/// +/// Only checks that the resulting program parses, not that it matches any +/// particular format. Use returned data to perform further checks. +pub fn run_reparse_with_duplicate_names(program_text: &str) -> ReparseTestResult<'_> { + let original_db = chalk_integration::db::ChalkDatabase::with(program_text, <_>::default()); + let original_program = original_db.program_ir().unwrap_or_else(|e| { + panic!( + "unable to lower test program:\n{}\nSource:\n{}\n", + e, program_text + ) + }); + let output_text = tls::set_current_program(&original_program, || { + write_program_duplicated_names(&*original_program) + }); + let output_db = chalk_integration::db::ChalkDatabase::with(&output_text, <_>::default()); + let output_program = output_db.program_ir().unwrap_or_else(|e| { + panic!( + "error lowering writer output:\n{}\nNew source:\n{}\n", + e, output_text + ) + }); + eprintln!("\nTest Succeeded:\n\n{}\n---", output_text); + ReparseTestResult { + original_text: program_text, + output_text, + target_text: "", + original_program: original_program.clone(), + output_program, + target_program: original_program, + } +} + +/// Performs a test on chalk's `display` code to render programs as `.chalk` files. +macro_rules! reparse_with_duplicated_names { + (program $program:tt) => { + run_reparse_with_duplicate_names(crate::display::util::strip_leading_trailing_braces( + stringify!($program), + )) + }; +} + +#[test] +fn lots_of_structs() { + reparse_with_duplicated_names! { + program { + struct A {} + struct B {} + struct C {} + struct D {} + } + }; +} +#[test] +fn lots_of_traits() { + reparse_with_duplicated_names! { + program { + trait A {} + trait B {} + trait C {} + trait D {} + } + }; +} +#[test] +fn traits_and_structs() { + reparse_with_duplicated_names! { + program { + trait A {} + struct B {} + trait C {} + struct D {} + } + }; +} +#[test] +fn assoc_types() { + reparse_with_duplicated_names! { + program { + trait A { + type A; + type A; + type C; + type D; + } + trait B { + type A; + type B; + type C; + type D; + } + struct Test where T: B { + field: ::C, + } + } + }; +} diff --git a/tests/display/util.rs b/tests/display/util.rs new file mode 100644 index 00000000000..1f40313b4a0 --- /dev/null +++ b/tests/display/util.rs @@ -0,0 +1,260 @@ +//! Utilities and macros for use in display tests. +//! +//! This can't live as a submodule of `test_util.rs`, as then it would conflict +//! with `display/mod.rs` for the name `mod display` when `test_util.rs` is +//! compiled as a standalone test (rather than from `lib.rs`). +use chalk_integration::{interner::ChalkIr, program::Program, query::LoweringDatabase, tls}; +use chalk_solve::{ + display::{write_items, WriterState}, + logging_db::RecordedItemId, +}; +use regex::Regex; +use std::{fmt::Debug, sync::Arc}; + +pub fn strip_leading_trailing_braces(input: &str) -> &str { + assert!(input.starts_with('{')); + assert!(input.ends_with('}')); + + &input[1..input.len() - 1] +} + +/// Performs a test on chalk's `display` code to render programs as `.chalk` files. +macro_rules! reparse_test { + // Test that a program, when rendered and then reparsed, results in a + // program identical to the input. + (program $program:tt) => { + crate::display::util::reparse_test(crate::display::util::strip_leading_trailing_braces( + stringify!($program), + )) + }; + // Tests that a program, when rendered and then reparsed, results in a + // second, different program. Good for cases where this process is non-convergent. + (program $program:tt produces $diff:tt) => { + crate::display::util::reparse_into_different_test( + crate::display::util::strip_leading_trailing_braces(stringify!($program)), + crate::display::util::strip_leading_trailing_braces(stringify!($diff)), + ) + }; + // Tests that a program, when rendered, results in a string which matches the + // given regex. + (program $program:tt formatting matches $res:literal) => { + crate::display::util::test_formatting( + crate::display::util::strip_leading_trailing_braces(stringify!($program)), + $res, + ) + }; +} + +/// Retrieves all item ids from a given `Program` necessary to print the entire +/// program. +pub fn program_item_ids(program: &Program) -> impl Iterator> + '_ { + macro_rules! grab_ids { + ($map:expr) => { + $map.keys() + .copied() + .map(|id| (id.0, RecordedItemId::from(id))) + }; + } + let mut ids = std::iter::empty() + .chain(grab_ids!(program.adt_data)) + .chain(grab_ids!(program.trait_data)) + .chain(grab_ids!(program.impl_data)) + .chain(grab_ids!(program.opaque_ty_data)) + .chain(grab_ids!(program.fn_def_data)) + .collect::>(); + + // sort by the RawIds so we maintain exact program input order (note: this + // is here rather than in logging_db.rs as we can't in general maintain this + // - only for `chalk_integration`'s RawIds). + // + // We need to maintain exact order because we abuse Program's Eq + // implementation to check the results of our tests, and which structs have + // which ids is part of that data. + ids.sort_by_key(|(raw_id, _)| *raw_id); + + // then discard the RawId since the RecordedItemId has the same information, + // and is what we actually want to consume. + ids.into_iter().map(|(_, id)| id) +} + +/// Sends all items in a `chalk_integration::Program` through `display` code and +/// returns the string representing the program. +pub fn write_program(program: &Program) -> String { + let mut out = String::new(); + let ids = program_item_ids(program); + write_items::<_, _, Program, _, _>(&mut out, &WriterState::new(program), ids).unwrap(); + out +} + +/// Diffs two `Program`s. This diffs the verbose debug output of `Program`, so +/// that you can see exactly what parts have changed in case a test fails. +/// +/// Will produces something akin to the following: +/// +/// ```diff +/// Program { +/// - adt_ids: { +/// - Atom('Foo' type=inline): AdtId(#0), +/// - }, +/// - adt_kinds: { +/// - AdtId(#0): TypeKind { +/// - sort: Struct, +/// - name: Atom('Foo' type=inline), +/// - binders: for[] Unit, +/// - }, +/// - }, +/// + adt_ids: {}, +/// + adt_kinds: {}, +/// fn_def_ids: {}, +/// fn_def_kinds: {}, +/// trait_ids: {}, +/// trait_kinds: {}, +/// - adt_data: { +/// - AdtId(#0): AdtDatum { +/// - binders: for[] AdtDatumBound { +/// - fields: [], +/// - where_clauses: [], +/// - }, +/// - id: AdtId(#0), +/// - flags: AdtFlags { +/// - upstream: false, +/// - fundamental: false, +/// - }, +/// - }, +/// - }, +/// + adt_data: {}, +/// fn_def_data: {}, +/// impl_data: {}, +/// associated_ty_values: {}, +/// opaque_ty_ids: {}, +/// opaque_ty_kinds: {}, +/// opaque_ty_data: {}, +/// trait_data: {}, +/// well_known_traits: {}, +/// associated_ty_data: {}, +/// custom_clauses: [], +/// object_safe_traits: {}, +/// } +/// ``` +fn program_diff(original: &impl Debug, produced: &impl Debug) -> String { + use std::fmt::Write; + + let mut out = String::new(); + let original = format!("{:#?}", original); + let produced = format!("{:#?}", produced); + for line in diff::lines(&original, &produced) { + match line { + diff::Result::Left(l) => writeln!(out, "-{}", l), + diff::Result::Both(l, _) => writeln!(out, " {}", l), + diff::Result::Right(r) => writeln!(out, "+{}", r), + } + .expect("writing to string never fails"); + } + out +} + +/// Data from performing a reparse test which can be used to make additional +/// assertions. +/// +/// Not necessary for use unless additional assertions are necessary. +#[allow(unused)] +pub struct ReparseTestResult<'a> { + /// The program text for the original test code + pub original_text: &'a str, + /// The program text for the code the test says should be output + pub target_text: &'a str, + /// The actual reparsed output text + pub output_text: String, + /// Lowered version of `original_text` + pub original_program: Arc, + /// Lowered version of `target_text` + pub target_program: Arc, + /// Lowered version of `output_text` + pub output_program: Arc, +} + +/// Parses the input, lowers it, prints it, then re-parses and re-lowers, +/// failing if the two lowered programs don't match. +pub fn reparse_test(program_text: &str) -> ReparseTestResult<'_> { + reparse_into_different_test(program_text, program_text) +} + +/// [`reparse_test`], but allows a non-convergent test program to be tested +/// against a different target. +pub fn reparse_into_different_test<'a>( + program_text: &'a str, + target_text: &'a str, +) -> ReparseTestResult<'a> { + let original_db = chalk_integration::db::ChalkDatabase::with(program_text, <_>::default()); + let original_program = original_db.program_ir().unwrap_or_else(|e| { + panic!( + "unable to lower test program:\n{}\nSource:\n{}\n", + e, program_text + ) + }); + let target_db = chalk_integration::db::ChalkDatabase::with(target_text, <_>::default()); + let target_program = target_db.program_ir().unwrap_or_else(|e| { + panic!( + "unable to lower test program:\n{}\nSource:\n{}\n", + e, program_text + ) + }); + let output_text = + tls::set_current_program(&original_program, || write_program(&original_program)); + let output_db = chalk_integration::db::ChalkDatabase::with(&output_text, <_>::default()); + let output_program = output_db.program_ir().unwrap_or_else(|e| { + panic!( + "error lowering writer output:\n{}\nNew source:\n{}\n", + e, output_text + ) + }); + if output_program != target_program { + panic!( + "WriteProgram produced different program.\n\ + Diff:\n{}\n\ + Source:\n{}\n{}\ + New Source:\n{}\n", + program_diff(&target_program, &output_program), + program_text, + if target_text != program_text { + format!( + "Test Should Output (different from original):\n{}\n", + target_text + ) + } else { + String::new() + }, + output_text + ); + } + eprintln!("\nTest Succeeded:\n\n{}\n---", output_text); + ReparseTestResult { + original_text: program_text, + output_text, + target_text, + original_program, + output_program, + target_program, + } +} + +/// Tests that a string matches a given regex pattern, erroring out if it +/// doesn't. +/// +/// This is used for exact formatting tests, for testing things like indentation. +pub fn test_formatting(src: &str, acceptable: &str) { + let result = reparse_test(src); + let acceptable = Regex::new(acceptable).unwrap(); + if !acceptable.is_match(&result.output_text) { + panic!( + "output_text's formatting didn't match the criteria.\ + \noutput_text:\n\"{0}\"\ + \ncriteria:\n\"{1}\"\ + \ndebug output: {0:?}\ + \ndebug criteria: {2:?}\n", + result.output_text, + acceptable, + acceptable.as_str() + ); + } +} diff --git a/tests/display/where_clauses.rs b/tests/display/where_clauses.rs new file mode 100644 index 00000000000..7896c746cc8 --- /dev/null +++ b/tests/display/where_clauses.rs @@ -0,0 +1,240 @@ +#[test] +fn test_alias_eq() { + // Test alias equals bounds (`Third`) in where clauses. + + // This test uses "produces" as a workaround for the lowering & writing + // code's behavior. Specifically, `Foo: Bax` will be transformed + // into `Foo: Bax, Foo: Bax`, even if the where clause already + // includes `Foo: Bax`. The writer code doesn't remove that addition. + // + // No matter how many `Foo: Bax` we have in our input, we're always going + // to get an extra `Foo: Bax` in the output, so they'll never be equal + // and we need the separate output program. + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Third { + type Assoc; + } + impl Bar for Foo + where + Foo: Third + { + } + } + produces { + struct Foo { } + trait Bar { } + trait Third { + type Assoc; + } + impl Bar for Foo + where + Foo: Third, + Foo: Third + { + } + } + ); + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Third { + type Assoc; + } + impl Bar for Foo + where + Foo: Third, + ::Assoc: Third + { + } + } + ); +} + +#[test] +fn test_dyn_on_left() { + // Test dyn on the left side of a where clause + // where dyn Bar + 'a: Bar + // ^^^^^^^^^^^^ + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Baz<'a> { + type Assoc + where + dyn Bar + 'a: Bar; + } + impl<'a> Bar for Foo + where + dyn Bar + 'a: Bar + { + } + } + ); +} + +#[test] +fn test_generic_vars_inside_assoc_bounds() { + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Baz<'a> { + type Assoc + where + dyn Bar + 'a: Bar, + T: Bar, + Foo: Bar; + } + } + ); + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Baz<'a, U> { + type Assoc + where + dyn Bar + 'a: Bar, + T: Bar, + Foo: Bar; + } + } + ); + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Baz { + type Assoc: Bar; + } + } + ); + reparse_test!( + program { + struct Foo { } + trait Bar { + type Quz; + } + trait Baz { + type Zuq: Bar; + } + } + ); +} + +#[test] +fn test_complicated_bounds() { + reparse_test!( + program { + struct Foo { } + trait Bar { } + trait Baz { } + trait Bax { type BaxT; } + trait Test<'a> { + type Assoc: Bar + Baz + Bax + where + Foo: Bax, + Foo: Bar, + dyn Bar + 'a: Baz; + } + } + produces { + struct Foo { } + trait Bar { } + trait Baz { } + trait Bax { type BaxT; } + trait Test<'a> { + type Assoc: Bar + Baz + Bax + where + Foo: Bax, + Foo: Bax, + Foo: Bar, + dyn Bar + 'a: Baz; + } + } + ); +} + +#[test] +fn test_struct_where_clauses() { + reparse_test!( + program { + struct Foo where T: Baz, U: Bez { } + trait Baz { } + trait Bez { } + } + ); +} + +#[test] +fn test_impl_where_clauses() { + reparse_test!( + program { + struct Foo where T: Baz, U: Bez { } + trait Baz { } + trait Bez { } + impl Bez for Foo where T: Baz, U: Bez { } + } + ); + // TODO: more of these +} + +#[test] +fn test_trait_projection() { + reparse_test!( + program { + struct Flux {} + struct Foo where U: Bez, >::Assoc: Baz { } + trait Baz { } + trait Bez { + type Assoc; + } + } + ); +} + +#[test] +fn test_trait_projection_with_dyn_arg() { + reparse_test!( + program { + struct Foo<'a, T, U> where U: Bez, >::Assoc: Baz { } + trait Baz { } + trait Bez { + type Assoc; + } + } + ); +} + +#[test] +fn test_forall_in_where() { + reparse_test!( + program { + trait Bax {} + trait Foo where forall T: Bax {} + } + ); + reparse_test!( + program { + trait Buz<'a> {} + trait Foo where forall<'a> T: Buz<'a> {} + } + ); + reparse_test!( + program { + struct Foo where forall T: Biz {} + trait Biz {} + } + ); + reparse_test!( + program { + struct Foo where forall<'a> T: Bez<'a> {} + trait Bez<'a> {} + } + ); +} diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs new file mode 100644 index 00000000000..2dabf71e941 --- /dev/null +++ b/tests/integration/mod.rs @@ -0,0 +1 @@ +mod panic; diff --git a/tests/integration/panic.rs b/tests/integration/panic.rs new file mode 100644 index 00000000000..9606f24754a --- /dev/null +++ b/tests/integration/panic.rs @@ -0,0 +1,441 @@ +use chalk_integration::interner::{ChalkIr, RawId}; +use chalk_integration::SolverChoice; +use chalk_ir::*; +use chalk_solve::rust_ir::*; +use chalk_solve::RustIrDatabase; +use std::sync::Arc; + +// FIXME: some of these are probably redundant, so we should figure out which panic in the same place in `chalk-engine` + +#[derive(Debug)] +enum PanickingMethod { + NoPanic, + CustomClauses, + TraitDatum, + ImplDatum, + ImplsForTrait, + ProgramClausesForEnv, + Interner, +} + +impl Default for PanickingMethod { + fn default() -> Self { + Self::NoPanic + } +} + +#[derive(Debug, Default)] +struct MockDatabase { + panicking_method: PanickingMethod, +} + +impl UnificationDatabase for MockDatabase { + fn fn_def_variance(&self, _fn_def_id: FnDefId) -> Variances { + Variances::empty(self.interner()) + } + + fn adt_variance(&self, _adt_id: AdtId) -> Variances { + Variances::empty(self.interner()) + } +} + +/// This DB represents the following lowered program: +/// +/// struct Foo { } +/// trait Bar { } +/// impl Bar for Foo { } +#[allow(unused_variables)] +impl RustIrDatabase for MockDatabase { + fn custom_clauses(&self) -> Vec> { + if let PanickingMethod::CustomClauses = self.panicking_method { + panic!("custom_clauses panic"); + } + + vec![] + } + + fn associated_ty_data(&self, ty: AssocTypeId) -> Arc> { + unimplemented!() + } + + // `trait Bar`, id `0` + fn trait_datum(&self, id: TraitId) -> Arc> { + if let PanickingMethod::TraitDatum = self.panicking_method { + panic!("trait_datum panic"); + } + + assert_eq!(id.0.index, 0); + Arc::new(TraitDatum { + id, + binders: Binders::new( + VariableKinds::empty(ChalkIr), + TraitDatumBound { + where_clauses: vec![], + }, + ), + flags: TraitFlags { + auto: false, + marker: false, + upstream: false, + fundamental: false, + non_enumerable: false, + coinductive: false, + }, + associated_ty_ids: vec![], + well_known: None, + }) + } + + // `impl Bar for Foo`, id `1` + fn impl_datum(&self, id: ImplId) -> Arc> { + if let PanickingMethod::ImplDatum = self.panicking_method { + panic!("impl_datum panic"); + } + + assert_eq!(id.0.index, 1); + + let substitution = Ty::new( + ChalkIr, + TyKind::Adt(AdtId(RawId { index: 1 }), Substitution::empty(ChalkIr)), + ); + + let binders = Binders::new( + VariableKinds::empty(ChalkIr), + ImplDatumBound { + trait_ref: TraitRef { + trait_id: TraitId(RawId { index: 0 }), + substitution: Substitution::from1(ChalkIr, substitution), + }, + where_clauses: vec![], + }, + ); + + Arc::new(ImplDatum { + polarity: Polarity::Positive, + binders, + impl_type: ImplType::Local, + associated_ty_value_ids: vec![], + }) + } + + fn associated_ty_from_impl( + &self, + _impl_id: ImplId, + _assoc_type_id: AssocTypeId, + ) -> Option> { + unimplemented!() + } + + fn associated_ty_value( + &self, + id: AssociatedTyValueId, + ) -> Arc> { + unimplemented!() + } + + fn opaque_ty_data(&self, id: OpaqueTyId) -> Arc> { + unimplemented!() + } + + fn hidden_opaque_type(&self, id: OpaqueTyId) -> Ty { + unimplemented!() + } + + fn adt_datum(&self, id: AdtId) -> Arc> { + // Only needed because we always access the adt datum for logging + Arc::new(AdtDatum { + binders: Binders::empty( + ChalkIr, + AdtDatumBound { + variants: vec![], + where_clauses: vec![], + }, + ), + flags: AdtFlags { + fundamental: false, + phantom_data: false, + upstream: false, + }, + id, + kind: AdtKind::Enum, + }) + } + + fn adt_repr(&self, id: AdtId) -> Arc> { + unimplemented!() + } + + fn adt_size_align(&self, id: AdtId) -> Arc { + unimplemented!() + } + + fn fn_def_datum(&self, fn_def_id: FnDefId) -> Arc> { + unimplemented!() + } + + fn coroutine_datum(&self, coroutine_id: CoroutineId) -> Arc> { + unimplemented!() + } + + fn coroutine_witness_datum( + &self, + coroutine_id: CoroutineId, + ) -> Arc> { + unimplemented!() + } + + // All `Bar` impls + fn impls_for_trait( + &self, + trait_id: TraitId, + parameters: &[GenericArg], + binders: &CanonicalVarKinds, + ) -> Vec> { + if let PanickingMethod::ImplsForTrait = self.panicking_method { + panic!("impls_for_trait panic"); + } + + assert_eq!(trait_id.0.index, 0); + vec![ImplId(RawId { index: 1 })] + } + + fn local_impls_to_coherence_check(&self, trait_id: TraitId) -> Vec> { + unimplemented!() + } + + fn impl_provided_for(&self, auto_trait_id: TraitId, app_ty: &TyKind) -> bool { + unimplemented!() + } + + fn well_known_trait_id(&self, well_known_trait: WellKnownTrait) -> Option> { + unimplemented!() + } + + fn well_known_assoc_type_id( + &self, + assoc_type: WellKnownAssocType, + ) -> Option> { + unimplemented!() + } + + fn program_clauses_for_env( + &self, + environment: &Environment, + ) -> ProgramClauses { + if let PanickingMethod::ProgramClausesForEnv = self.panicking_method { + panic!("program_clauses_for_env panic") + } + + ProgramClauses::empty(ChalkIr) + } + + fn interner(&self) -> ChalkIr { + if let PanickingMethod::Interner = self.panicking_method { + panic!("interner panic") + } + + ChalkIr + } + + fn is_object_safe(&self, trait_id: TraitId) -> bool { + unimplemented!() + } + + fn closure_inputs_and_output( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + unimplemented!() + } + + fn closure_kind( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> ClosureKind { + unimplemented!() + } + + fn closure_upvars( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Binders> { + unimplemented!() + } + + fn closure_fn_substitution( + &self, + closure_id: ClosureId, + substs: &Substitution, + ) -> Substitution { + unimplemented!() + } + + fn discriminant_type(&self, ty: Ty) -> Ty { + unimplemented!() + } + + fn unification_database(&self) -> &dyn UnificationDatabase { + self + } +} + +fn prepare_goal() -> UCanonical>> { + use chalk_integration::interner; + use chalk_ir::*; + + // Goal: + // + // Foo: Bar + UCanonical { + canonical: Canonical { + binders: CanonicalVarKinds::empty(ChalkIr), + value: InEnvironment { + environment: Environment::new(ChalkIr), + goal: GoalData::DomainGoal(DomainGoal::Holds(WhereClause::Implemented(TraitRef { + trait_id: TraitId(interner::RawId { index: 0 }), + substitution: Substitution::from1( + ChalkIr, + Ty::new( + ChalkIr, + TyKind::Adt( + AdtId(interner::RawId { index: 1 }), + Substitution::empty(ChalkIr), + ), + ), + ), + }))) + .intern(ChalkIr), + }, + }, + universes: 1, + } +} + +#[test] +fn custom_clauses_panics() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::CustomClauses, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} + +#[test] +fn trait_datum_panics() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::TraitDatum, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} + +#[test] +fn impl_datum_panics() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::ImplDatum, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} + +#[test] +fn impls_for_trait() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::ImplsForTrait, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} + +#[test] +fn program_clauses_for_env() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::ProgramClausesForEnv, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} + +#[test] +fn interner() { + use std::panic; + + let peeled_goal = prepare_goal(); + let mut solver = SolverChoice::slg_default().into_solver(); + + // solve goal but this will panic + let mut db = MockDatabase { + panicking_method: PanickingMethod::Interner, + }; + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + solver.solve(&db, &peeled_goal); + })); + assert!(result.is_err()); + + // solve again but without panicking this time + db.panicking_method = PanickingMethod::NoPanic; + assert!(solver.solve(&db, &peeled_goal).is_some()); +} diff --git a/tests/lib.rs b/tests/lib.rs new file mode 100644 index 00000000000..98e1659c38e --- /dev/null +++ b/tests/lib.rs @@ -0,0 +1,10 @@ +#[macro_use] +mod test_util; +#[macro_use] +mod test; + +mod display; +mod logging_db; +mod lowering; + +mod integration; diff --git a/tests/logging_db/mod.rs b/tests/logging_db/mod.rs new file mode 100644 index 00000000000..16b1d5ea9a0 --- /dev/null +++ b/tests/logging_db/mod.rs @@ -0,0 +1,459 @@ +//! Tests for `LoggingRustIrDatabase` which tests its functionality to record +//! types and stubs. +//! +//! Each tests records the trait solver solving something, and then runs the +//! solver on the output `LoggingRustIrDatabase` writes.These tests _don't_ test +//! that the output program is identical to the input, only that the resulting +//! program allows solving the same goals. +//! +//! Note that this does not, and should not, test the majority of the rendering +//! code. The code to render specific items and syntax details is rigorously +//! tested in `tests/display/`. +#[macro_use] +mod util; + +#[test] +fn records_struct_trait_and_impl() { + logging_db_output_sufficient! { + program { + struct S {} + + trait Trait {} + + impl Trait for S {} + } + + goal { + S: Trait + } yields { + "Unique" + } + } +} + +#[test] +fn records_opaque_type() { + logging_db_output_sufficient! { + program { + struct S {} + + trait Trait {} + impl Trait for S {} + + opaque type Foo: Trait = S; + } + + goal { + Foo: Trait + } yields { + "Unique" + } + } +} + +#[test] +fn records_fn_def() { + logging_db_output_sufficient! { + program { + #[lang(sized)] + trait Sized { } + + fn foo(); + } + goal { + foo: Sized + } yields { + "Unique" + } + } +} + +#[test] +fn records_generics() { + logging_db_output_sufficient! { + program { + struct Foo {} + trait Bar {} + impl Bar for Foo<()> {} + } + + goal { + Foo<()>: Bar + } yields { + "Unique" + } + goal { + Foo: Bar + } yields { + "No possible solution" + } + } +} + +#[test] +fn records_parents_parent() { + logging_db_output_sufficient! { + program { + struct S {} + + trait Grandparent {} + trait Parent where Self: Grandparent {} + trait Child where Self: Parent {} + impl Grandparent for S {} + impl Parent for S {} + impl Child for S {} + } + + goal { + S: Child + } yields { + "Unique" + } + } +} + +#[test] +fn records_associated_type_bounds() { + logging_db_output_sufficient! { + program { + trait Foo { + type Assoc: Bar; + } + trait Bar { + + } + + struct S {} + impl Foo for S { + type Assoc = S; + } + impl Bar for S {} + } + + goal { + S: Foo + } yields { + "Unique" + } + } +} + +#[test] +fn records_generic_impls() { + logging_db_output_sufficient! { + program { + struct S {} + struct V {} + + trait Foo {} + trait Bar {} + + impl Foo for S {} + + impl Bar for T where T: Foo { + + } + } + + goal { + S: Bar + } yields { + "Unique" + } + } + + logging_db_output_sufficient! { + program { + struct S {} + struct V {} + + trait Foo {} + trait Bar {} + + impl Foo for S {} + + impl Bar for T where T: Foo { + + } + } + + goal { + V: Bar + } yields { + "No possible solution" + } + } +} + +#[test] +fn stubs_types_from_assoc_type_bounds() { + logging_db_output_sufficient! { + program { + trait Foo { + type Assoc: Bar; + } + trait Bar {} + impl Foo for () { + type Assoc = (); + } + } + + goal { + (): Foo + } yields { + "Unique" + } + } +} + +#[test] +fn stubs_types_from_assoc_type_values_not_mentioned() { + logging_db_output_sufficient! { + program { + trait Foo { + type Assoc; + } + struct Baz {} + impl Foo for () { + type Assoc = Baz; + } + } + + goal { + (): Foo + } yields { + "Unique" + } + } +} + +#[test] +fn stubs_types_from_opaque_ty_bounds() { + logging_db_output_sufficient! { + program { + trait Foo {} + trait Fuu {} + struct Baz {} + opaque type Bar: Foo + Fuu = Baz; + } + + goal { + Bar: Foo + } yields { + "Unique" + } + } +} + +#[test] +fn opaque_ty_in_opaque_ty() { + logging_db_output_sufficient! { + program { + trait Foo {} + trait Fuu {} + struct Baz {} + opaque type Baq: Foo + Fuu = Baz; + opaque type Bar: Foo + Fuu = Baq; + } + + goal { + Bar: Foo + } yields { + "Unique" + } + } +} + +#[test] +fn opaque_ty_in_projection() { + logging_db_output_sufficient! { + program { + struct Baz {} + trait Foo {} + trait Fuu {} + trait Fuut { + type Assoc; + } + impl Fuut for Baz { + type Assoc = Baq; + } + impl Foo for Baz + where + Baz: Fuut + { } + opaque type Baq: Foo + Fuu = Baz; + } + + goal { + Baz: Foo + } yields { + "Unique" + } + } +} + +#[test] +fn stubs_types_in_dyn_ty() { + logging_db_output_sufficient! { + program { + trait Foo { + type Assoc<'a>; + } + trait Other {} + impl Foo for () { + type Assoc<'a> = dyn Other + 'a; + } + } + + goal { + (): Foo + } yields { + "Unique" + } + } +} + +#[test] +fn can_stub_traits_with_unreferenced_assoc_ty() { + // None of our code will bring in `SuperNotReferenced`'s definition, so if + // we fail to remove the bounds on `NotReferenced::Assoc`, then it will fail. + + // two tests where we don't reference the assoc ty. + logging_db_output_sufficient! { + program { + trait SuperNotReferenced {} + trait NotReferenced { + type Assoc: SuperNotReferenced; + } + trait Referenced where Self: NotReferenced {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } + logging_db_output_sufficient! { + program { + trait SuperNotReferenced {} + trait NotReferenced { + type Assoc where Self: SuperNotReferenced; + } + trait Referenced where Self: NotReferenced {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } +} + +#[test] +fn can_stub_traits_with_referenced_assoc_ty() { + // two tests where we do reference the assoc ty + logging_db_output_sufficient! { + program { + trait SuperNotReferenced {} + trait NotReferenced { + type Assoc: SuperNotReferenced; + } + trait Referenced where Self: NotReferenced {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } + logging_db_output_sufficient! { + program { + trait SuperNotReferenced {} + trait NotReferenced { + type Assoc where (): SuperNotReferenced; + } + trait Referenced where Self: NotReferenced {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } +} + +#[test] +fn can_stub_types_referenced_in_alias_ty_generics() { + logging_db_output_sufficient! { + program { + struct ThisTypeShouldBeStubbed {} + trait HasGenericAssoc { + type Assoc; + } + trait Referenced where Self: HasGenericAssoc=()> {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } +} + +#[test] +fn can_stub_types_referenced_in_alias_ty_bounds() { + logging_db_output_sufficient! { + program { + struct ThisTypeShouldBeStubbed {} + trait HasAssoc { + type Assoc; + } + trait Referenced where Self: HasAssoc {} + impl Referenced for () {} + } + + goal { + (): Referenced + } yields { + "Unique" + } + } +} + +#[test] +fn does_not_need_necessary_separate_impl() { + // this should leave out "impl Bar for Fox" and the result should pass the + // test (it won't be well-formed, but that's OK.) + logging_db_output_sufficient! { + program { + trait Box { + type Assoc: Bar; + } + trait Bar {} + + struct Foo {} + impl Box for Foo { + type Assoc = Fox; + } + + struct Fox {} + impl Bar for Fox {} + } + + goal { + Foo: Box + } yields { + "Unique" + } + } +} diff --git a/tests/logging_db/util.rs b/tests/logging_db/util.rs new file mode 100644 index 00000000000..ca00e9a24ff --- /dev/null +++ b/tests/logging_db/util.rs @@ -0,0 +1,117 @@ +//! Macros / utilities for logging_db tests. +//! +//! This is not a submodule of `test_util` as it depends on macros declared in +//! `test/mod.rs`, and `test_util.rs` is compiled both with and without access +//! to `test/`. We can't compile without access to `test/`, so we can't be under +//! of `test_util.rs`. +use chalk_integration::{ + db::ChalkDatabase, lowering::lower_goal, program::Program, query::LoweringDatabase, + SolverChoice, +}; +use chalk_solve::ext::*; +use chalk_solve::logging_db::LoggingRustIrDatabase; +use chalk_solve::RustIrDatabase; + +use crate::test::assert_result_str; + +type TestGoal = crate::test::TestGoal<&'static str>; + +macro_rules! logging_db_output_sufficient { + ($($arg:tt)*) => {{ + use chalk_integration::SolverChoice; + use crate::test::*; + let (program, goals) = parse_test_data!($($arg)*); + crate::logging_db::util::logging_db_output_sufficient(program, goals) + }}; +} + +pub fn logging_db_output_sufficient( + program_text: &str, + goals: Vec<(&str, Vec, TestGoal)>, +) { + println!("program {}", program_text); + assert!(program_text.starts_with('{')); + assert!(program_text.ends_with('}')); + + let goals = goals + .iter() + .flat_map(|(a, bs, c)| bs.iter().map(move |b| (a, b, c))); + + let output_text = { + let db = ChalkDatabase::with( + &program_text[1..program_text.len() - 1], + SolverChoice::default(), + ); + + let program = db.program_ir().unwrap(); + let wrapped = LoggingRustIrDatabase::<_, Program, _>::new(program.clone()); + + chalk_integration::tls::set_current_program(&program, || { + for (goal_text, solver_choice, expected) in goals.clone() { + let mut solver = solver_choice.into_solver(); + + println!("----------------------------------------------------------------------"); + println!("---- first run on original test code ---------------------------------"); + println!("goal {}", goal_text); + assert!(goal_text.starts_with('{')); + assert!(goal_text.ends_with('}')); + let goal = lower_goal( + &*chalk_parse::parse_goal(&goal_text[1..goal_text.len() - 1]).unwrap(), + &*program, + ) + .unwrap(); + + println!("using solver: {:?}", solver_choice); + let peeled_goal = goal.into_peeled_goal(db.interner()); + match expected { + TestGoal::Aggregated(expected) => { + let result = solver.solve(&wrapped, &peeled_goal); + assert_result_str(result, expected, db.interner()); + } + _ => panic!("only aggregated test goals supported for logger goals"), + } + } + + wrapped.to_string() + }) + }; + + println!("----------------------------------------------------------------------"); + println!("logging db output program:\n{}\n", output_text); + + let db = ChalkDatabase::with(&output_text, SolverChoice::default()); + + // Note: we are explicitly not calling `.checked_program()`, as our output + // is not intended to be well formed. + let new_program = match db.program_ir() { + Ok(v) => v, + Err(e) => panic!("Error checking recreated chalk program: {}", e), + }; + + for (goal_text, solver_choice, expected) in goals { + let mut solver = solver_choice.into_solver(); + + chalk_integration::tls::set_current_program(&new_program, || { + println!("----------------------------------------------------------------------"); + println!("---- second run on code output by logger -----------------------------"); + println!("goal {}", goal_text); + assert!(goal_text.starts_with('{')); + assert!(goal_text.ends_with('}')); + let goal = lower_goal( + &*chalk_parse::parse_goal(&goal_text[1..goal_text.len() - 1]).unwrap(), + &*new_program, + ) + .unwrap(); + + println!("using solver: {:?}", solver_choice); + let peeled_goal = goal.into_peeled_goal(db.interner()); + match expected { + TestGoal::Aggregated(expected) => { + let result = solver.solve(&db, &peeled_goal); + assert_result_str(result, expected, db.interner()); + } + _ => panic!("only aggregated test goals supported for logger goals"), + } + }); + } +} diff --git a/tests/lowering/mod.rs b/tests/lowering/mod.rs new file mode 100644 index 00000000000..7ddcba1fcc3 --- /dev/null +++ b/tests/lowering/mod.rs @@ -0,0 +1,817 @@ +use chalk_integration::db::ChalkDatabase; +use chalk_integration::query::LoweringDatabase; +use chalk_integration::SolverChoice; + +#[test] +fn lower_success() { + lowering_success! { + program { + struct Foo { field: Foo } + trait Bar { } + impl Bar for Foo { } + } + } +} + +#[test] +fn not_trait() { + lowering_error! { + program { + struct Foo { } + trait Bar { } + impl Foo for Bar { } + } + error_msg { + "expected a trait, found `Foo`, which is not a trait" + } + } +} + +#[test] +fn auto_trait() { + lowering_error! { + program { + #[auto] trait Foo { } + } + error_msg { + "auto trait `Foo` cannot have parameters" + } + } + + lowering_error! { + program { + trait Bar { } + #[auto] trait Foo where Self: Bar { } + } + error_msg { + "auto trait `Foo` cannot have where clauses" + } + } + + lowering_error! { + program { + #[auto] trait Foo { + type Item; + } + } + error_msg { + "auto trait `Foo` cannot define associated types" + } + } + + lowering_success! { + program { + #[auto] trait Send { } + } + } +} + +#[test] +fn negative_impl() { + lowering_error! { + program { + trait Foo { + type Item; + } + + impl !Foo for i32 { + type Item = i32; + } + } + error_msg { + "negative impl for trait `Foo` cannot define associated values" + } + } + + lowering_success! { + program { + trait Foo { } + + trait Iterator { + type Item; + } + + impl !Foo for T where T: Iterator { } + } + } +} + +#[test] +fn invalid_name() { + lowering_error! { + program { + struct Foo { } + trait Bar { } + impl Bar for X { } + } + error_msg { + "invalid parameter name `X`" + } + } +} + +#[test] +fn type_parameter() { + lowering_success! { + program { + struct Foo { } + trait Bar { } + impl Bar for X { } + } + } +} + +#[test] +fn type_parameter_bound() { + lowering_success! { + program { + struct Foo { } + trait Bar { } + trait Eq { } + impl Bar for X where X: Eq { } + } + } +} + +#[test] +fn assoc_tys() { + lowering_success! { + program { + struct String { } + struct Char { } + + trait Iterator { type Item; } + impl Iterator for String { type Item = Char; } + + trait Foo { } + impl Foo for ::Item where X: Iterator { } + } + } +} + +#[test] +fn goal_quantifiers() { + let db = ChalkDatabase::with("trait Foo { }", SolverChoice::default()); + let goal = db + .parse_and_lower_goal("forall {exists {forall {Z: Foo}}}") + .unwrap(); + db.with_program(|_| { + assert_eq!( + format!("{:?}", goal), + "ForAll { Exists { ForAll { Implemented(^0.0: Foo<^1.0, ^2.0>) } } }" + ); + }); +} + +#[test] +fn atc_accounting() { + let db = ChalkDatabase::with( + " + struct Vec { } + + trait Iterable { + type Iter<'a>; + } + + impl Iterable for Vec { + type Iter<'a> = Iter<'a, T>; + } + + struct Iter<'a, T> { } + ", + SolverChoice::default(), + ); + db.with_program(|program| { + let atv_text = format!( + "{:#?}", + &program.associated_ty_values.values().next().unwrap() + ); + println!("{}", atv_text); + assert_eq!( + &atv_text[..].replace(",\n", "\n"), + &r#"AssociatedTyValue { + impl_id: ImplId(#2), + associated_ty_id: (Iterable::Iter), + value: for AssociatedTyValueBound { + ty: Iter<'^0.1, ^0.0> + }, +}"# + .replace(",\n", "\n"), + ); + let goal = db + .parse_and_lower_goal( + "forall { forall<'a> { forall { \ + X: Iterable = Y> } } }", + ) + .unwrap(); + let goal_text = format!("{:?}", goal); + println!("{}", goal_text); + assert_eq!( + goal_text, + "ForAll { \ + ForAll { \ + ForAll { \ + all(AliasEq(<^2.0 as Iterable>::Iter<'^1.0> = ^0.0), \ + Implemented(^2.0: Iterable)) \ + } \ + } \ + }" + ); + }); +} + +#[test] +fn check_variable_kinds() { + lowering_error! { + program { + struct Foo<'a> { } + struct Myi32 { } + trait Bar { } + impl Bar for Foo { } + } + error_msg { + "incorrect parameter kind for `Foo`: expected lifetime, found type" + } + }; + + lowering_error! { + program { + struct Foo { } + trait Bar { } + impl<'a> Bar for Foo<'a> { } + } + error_msg { + "incorrect parameter kind for `Foo`: expected type, found lifetime" + } + }; + + lowering_error! { + program { + trait Iterator { type Item<'a>; } + trait Foo { } + impl Foo for ::Item where X: Iterator { } + } + error_msg { + "incorrect associated type parameter kind for `Item`: expected lifetime, found type" + } + }; + + lowering_error! { + program { + trait Iterator { type Item; } + trait Foo { } + impl Foo for ::Item<'a> where X: Iterator { } + } + error_msg { + "incorrect associated type parameter kind for `Item`: expected type, found lifetime" + } + }; + + lowering_error! { + program { + trait Into {} + struct Foo {} + impl<'a> Into<'a> for Foo {} + } + error_msg { + "incorrect parameter kind for trait `Into`: expected type, found lifetime" + } + } + + lowering_error! { + program { + trait IntoTime<'a> {} + struct Foo {} + impl IntoTime for Foo {} + } + error_msg { + "incorrect parameter kind for trait `IntoTime`: expected lifetime, found type" + } + } + + lowering_error! { + program { + trait Length {} + struct Foo {} + impl Length for Foo {} + } + error_msg { + "incorrect parameter kind for trait `Length`: expected const, found type" + } + } + + lowering_error! { + program { + trait Length {} + struct Foo {} + impl<'a> Length<'a> for Foo {} + } + error_msg { + "incorrect parameter kind for trait `Length`: expected const, found lifetime" + } + } + + lowering_error! { + program { + trait Into {} + struct Foo {} + impl Into for Foo {} + } + + error_msg { + "incorrect parameter kind for trait `Into`: expected type, found const" + } + } + + lowering_error! { + program { + trait IntoTime<'a> {} + struct Foo {} + impl IntoTime for Foo {} + } + + error_msg { + "incorrect parameter kind for trait `IntoTime`: expected lifetime, found const" + } + } +} + +#[test] +fn gat_parse() { + lowering_success! { + program { + trait Sized {} + trait Clone {} + + trait Foo { + type Item<'a, T>: Sized + Clone where Self: Sized; + } + + trait Bar { + type Item<'a, T> where Self: Sized; + } + + struct Container { + value: T + } + + trait Baz { + type Item<'a, 'b, T>: Foo = Container> + Clone; + } + + trait Quux { + type Item<'a, T>; + } + } + } + + lowering_error! { + program { + trait Sized { } + + trait Foo { + type Item where K: Sized; + } + } + + error_msg { + "invalid parameter name `K`" + } + } +} + +#[test] +fn gat_higher_ranked_bound() { + lowering_success! { + program { + trait Fn {} + struct Ref<'a, T> {} + trait Sized {} + + trait Foo { + type Item: forall<'a> Fn> + Sized; + } + } + } +} + +#[test] +fn duplicate_parameters() { + lowering_error! { + program { + trait Foo { } + } + + error_msg { + "duplicate or shadowed parameters" + } + } + + lowering_error! { + program { + trait Foo { + type Item; + } + } + + error_msg { + "duplicate or shadowed parameters" + } + } + + lowering_error! { + program { + struct fun<'a> { } + struct Foo<'a> { + a: for<'a> fn(fun<'a>) + } + } error_msg { + "duplicate or shadowed parameters" + } + } + + lowering_error! { + program { + trait Fn {} + trait Ref<'a, T> {} + + trait Foo<'a> { + type Item: forall<'a> Fn>; + } + } error_msg { + "duplicate or shadowed parameters" + } + } +} + +#[test] +fn upstream_items() { + lowering_success! { + program { + #[upstream] trait Send { } + #[upstream] struct Vec { } + } + } +} + +#[test] +fn tuples() { + lowering_success! { + program { + trait Foo { } + + // `()` is an empty tuple + impl Foo for () { } + // `(i32,)` is a tuple + impl Foo for (i32,) { } + // `(i32)` is `i32` is a scalar + impl Foo for (i32) { } + impl Foo for (i32, u32) { } + impl Foo for (i32, u32, f32) { } + } + } +} + +#[test] +fn scalars() { + lowering_success! { + program { + trait Foo { } + + impl Foo for i8 { } + impl Foo for i16 { } + impl Foo for i32 { } + impl Foo for i64 { } + impl Foo for i128 { } + impl Foo for isize { } + impl Foo for u8 { } + impl Foo for u16 { } + impl Foo for u32 { } + impl Foo for u64 { } + impl Foo for u128 { } + impl Foo for usize { } + impl Foo for f16 { } + impl Foo for f32 { } + impl Foo for f64 { } + impl Foo for f128 { } + impl Foo for bool { } + impl Foo for char { } + } + } + + lowering_error! { + program { + struct i32 { } + } + + error_msg { + "parse error: Unrecognizedtoken" + } + } +} + +#[test] +fn raw_pointers() { + lowering_success! { + program { + trait Quux { } + struct Foo { a: *const T } + + struct Bar { a: *mut T } + + impl Quux for Foo<*mut T> { } + impl Quux for Bar<*const T> { } + } + } + + lowering_error! { + program { + struct *const i32 { } + } + error_msg { + "parse error: Unrecognizedtoken" + } + } + + lowering_error! { + program { + trait Foo { } + impl Foo for *i32 { } + } + error_msg { + "parse error: Unrecognizedtoken" + } + } +} + +#[test] +fn refs() { + lowering_success! { + program { + trait Foo { } + + impl<'a, T> Foo for &'a T { } + impl<'b, T> Foo for &'b mut T { } + } + } + + lowering_error! { + program { + trait Foo { } + + impl Foo for &T { } + } + + error_msg { + "parse error: Unrecognizedtoken" + } + } +} + +#[test] +fn slices() { + lowering_success! { + program { + trait Foo { } + + impl Foo for [i32] { } + impl Foo for [T] { } + + impl Foo for [[i32]] { } + impl Foo for [()] { } + } + } + + lowering_error! { + program { + trait Foo { } + impl Foo for [] {} + } + + error_msg { + "parse error: Unrecognizedtoken" + } + } +} + +#[test] +fn fn_defs() { + lowering_success! { + program { + trait Quux { } + + fn foo<'a, T>(bar: T, baz: &'a mut T) -> u32 + where T: Quux; + } + } + + lowering_error! { + program { + trait Quux { } + + fn foo(bar: TT) -> T + where T: Quux; + } + + error_msg { + "invalid parameter name `TT`" + } + } +} +#[test] +fn arrays() { + lowering_success! { + program { + struct Baz { } + fn foo(bar: [Baz; 3]); + + fn bar(baz: [Baz; N]); + } + } + + lowering_error! { + program { + struct Baz { } + + fn foo(baz: [Baz; u32]); + } + + error_msg { + "parse error: Unrecognizedtoken" + } + } + + lowering_error! { + program { + struct Baz { } + + fn foo(baz: [Baz; T]); + } + + error_msg { + "incorrect parameter kind for `T`: expected const, found type" + } + } + + lowering_error! { + program { + struct Baz { } + + fn foo<'a>(baz: [Baz; 'a]); + } + + error_msg { + "parse error: Unrecognizedtoken" + } + } +} + +#[test] +fn lifetime_outlives() { + lowering_success! { + program { + trait Foo<'a, 'b> where 'a: 'b {} + } + } +} + +#[test] +fn type_outlives() { + lowering_success! { + program { + trait Foo<'a, T> where T: 'a {} + } + } +} + +#[test] +fn phantom_data() { + lowering_success! { + program { + #[phantom_data] + struct PhantomData {} + } + } +} + +#[test] +fn extern_functions() { + lowering_success! { + program { + extern "C" fn foo(); + + extern "Rust" fn bar(); + } + } + + lowering_error! { + program { + extern "Foo" fn foo(); + } + + error_msg { + "invalid extern ABI `Foo`" + } + } +} + +#[test] +fn unsafe_variadic_functions() { + lowering_success! { + program { + unsafe fn foo(_: u8); + unsafe fn bar(_: u8, _: ...); + unsafe extern "C" fn baz(); + } + } + lowering_success! { + program { + fn foo(_: u8, _: ...); + extern "C" fn bar(_: u8, _: ...); + } + } +} + +#[test] +fn closures() { + lowering_success! { + program { + closure foo(self,) {} + closure bar(&self,) {} + closure baz(&mut self,) {} + + closure buzz(self,) -> u32 {} + closure foobar<'a>(self,) -> u32 {} + closure foobaz<'a>(self, a: u8, b: f32) -> u32 {} + closure foobuzz<'a>(self, a: u8, b: f32) -> u32 { + u8; + &'a u16; + &'a mut u32 + } + } + } +} + +#[test] +fn struct_repr() { + lowering_success! { + program { + #[repr(C)] + struct Foo {} + + #[repr(packed)] + struct Bar {} + + #[repr(C)] + #[repr(packed)] + struct FooBar {} + } + } +} + +#[test] +fn algebraic_data_types() { + lowering_success! { + program { + enum Foo {} + + enum Bar { + Variant, + OtherVariant(u32, u32), + LastVariant { + foo: Foo, + bar: u32, + }, + } + } + } + + lowering_success! { + program { + enum Option { + None, + Some(T), + } + + enum Result { + Ok(T), + Err(E), + } + } + } + + lowering_success! { + program { + trait Borrow {} + + trait ToOwned { + type Owned: Borrow; + } + + enum Cow<'a, B> where B: ToOwned, B: 'a { + Borrowed(&'a B), + Owned(::Owned), + } + } + } +} diff --git a/tests/test/ambiguity_issue_727.rs b/tests/test/ambiguity_issue_727.rs new file mode 100644 index 00000000000..6c8981fac38 --- /dev/null +++ b/tests/test/ambiguity_issue_727.rs @@ -0,0 +1,133 @@ +use super::*; + +#[test] +fn issue_727_1() { + test!( + program { + #[upstream] #[non_enumerable] #[lang(sized)] + trait Sized {} + + #[non_enumerable] #[object_safe] + trait Database {} + + #[non_enumerable] + trait QueryGroup + where + Self: Sized, + { + type DynDb: Database + HasQueryGroup; + } + + #[non_enumerable] #[object_safe] + trait HasQueryGroup + where + Self: Database, + G: QueryGroup, + G: Sized, + { } + + #[non_enumerable] #[object_safe] + trait HelloWorld + where + Self: HasQueryGroup, + { } + + struct HelloWorldStorage {} + + impl QueryGroup for HelloWorldStorage { + type DynDb = dyn HelloWorld + 'static; + } + impl HelloWorld for DB + where + DB: Database, + DB: HasQueryGroup, + DB: Sized, + { } + } + + goal { + forall { + if (FromEnv(T: Database); FromEnv(T: HasQueryGroup); FromEnv(T: Sized)) { + T: HelloWorld + } + } + } yields[SolverChoice::slg_default()] { // ok + expect![["Unique"]] + } yields[SolverChoice::recursive_default()] { // fails: "Ambiguous; no inference guidance" + expect![["Unique"]] + } + ); +} + +#[test] +fn issue_727_2() { + test!( + program { + #[non_enumerable] #[object_safe] + trait Database {} + + #[non_enumerable] + trait QueryGroup + { + type DynDb: Database + HasQueryGroup; + } + + #[non_enumerable] #[object_safe] + trait HasQueryGroup + where + Self: Database, + G: QueryGroup, + { } + + struct HelloWorldStorage {} + + impl QueryGroup for HelloWorldStorage { + type DynDb = dyn HasQueryGroup + 'static; + } + } + + goal { + forall { + if (FromEnv(T: HasQueryGroup)) { + T: Database + } + } + } yields[SolverChoice::slg_default()] { + expect![["Unique"]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Ambiguous; no inference guidance"#]] // FIXME rust-lang/chalk#727 + } + ); +} + +#[test] +fn issue_727_3() { + test!( + program { + #[non_enumerable] + trait Database {} + + #[non_enumerable] + trait HasQueryGroup + where + Self: Database, + { } + + struct HelloWorldStorage {} + + impl Database for HelloWorldStorage { } + } + + goal { + forall { + if (FromEnv(HelloWorldStorage: HasQueryGroup); FromEnv(HelloWorldStorage: HasQueryGroup)) { + HelloWorldStorage: Database + } + } + } yields[SolverChoice::slg_default()] { + expect![["Unique"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique"]] + } + ); +} diff --git a/tests/test/arrays.rs b/tests/test/arrays.rs new file mode 100644 index 00000000000..ff5b8a3e045 --- /dev/null +++ b/tests/test/arrays.rs @@ -0,0 +1,138 @@ +use super::*; + +#[test] +fn arrays_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall { + [u32; N]: Sized + } + } yields { + expect![["Unique"]] + } + + } +} + +#[test] +fn arrays_are_copy_if_element_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + + struct Foo { } + impl Copy for Foo { } + } + + goal { + forall { + [Foo; N]: Copy + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn arrays_are_not_copy_if_element_not_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + + struct Foo { } + } + + goal { + forall { + [Foo; N]: Copy + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn arrays_are_clone_if_element_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + + struct Foo { } + impl Clone for Foo { } + } + + goal { + forall { + [Foo; N]: Clone + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn arrays_are_not_clone_if_element_not_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + + struct Foo { } + } + + goal { + forall { + [Foo; N]: Clone + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn arrays_are_well_formed_if_elem_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall { + if (T: Sized) { + WellFormed([T; N]) + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + WellFormed([T; N]) + } + } yields { + expect![["No possible solution"]] + } + + goal { + exists { + WellFormed([T; N]) + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} diff --git a/tests/test/auto_traits.rs b/tests/test/auto_traits.rs new file mode 100644 index 00000000000..2a3f579fa7a --- /dev/null +++ b/tests/test/auto_traits.rs @@ -0,0 +1,370 @@ +//! Tests targeting auto traits specifically + +use super::*; + +#[test] +fn auto_semantics() { + test! { + program { + trait Sized { } + #[auto] trait Send { } + + struct TypeA { } + + struct Ptr { } + impl Send for Ptr where T: Send { } + + struct List { + data: T, + next: Ptr> + } + } + + goal { + forall { + List: Send + } + } yields { + expect![["No possible solution"]] + } + goal { + forall { + if (T: Send) { + List: Send + } + } + } yields { + expect![["Unique"]] + } + + goal { + List: Send + } yields { + expect![["Unique"]] + } + + goal { + exists { + T: Send + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn auto_trait_without_impls() { + test! { + program { + #[auto] trait Send { } + + struct TypeA { } + + struct Useless { } + + struct Data { + data: T + } + } + + goal { + TypeA: Send + } yields { + expect![["Unique"]] + } + + // No fields so `Useless` is `Send`. + goal { + forall { + Useless: Send + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Send) { + Data: Send + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn auto_trait_with_impls() { + test! { + program { + #[auto] trait Send { } + + struct TypeA { } + struct TypeB { } + struct Vec { } + + impl Send for Vec where T: Send { } + impl !Send for TypeA { } + } + + goal { + TypeA: Send + } yields { + expect![["No possible solution"]] + } + + goal { + TypeB: Send + } yields { + expect![["Unique"]] + } + + goal { + Vec: Send + } yields { + expect![["No possible solution"]] + } + + goal { + Vec: Send + } yields { + expect![["Unique"]] + } + + goal { + forall { + Vec: Send + } + } yields { + expect![["No possible solution"]] + } + } +} + +/// This Flounders because auto traits can't be enumerated +#[test] +fn auto_traits_flounder() { + test! { + program { + struct Foo { } + struct Bar { } + + #[auto] + trait Send { } + } + + goal { + exists { A: Send } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + } +} + +#[test] +fn enum_auto_trait() { + test! { + program { + #[auto] trait Send { } + struct Foo { } + struct Bar { } + impl Send for Foo { } + impl !Send for Bar { } + + enum A { + X, + Y(Foo), + Z { + z: Foo, + } + } + + enum B { + X, + Y(Foo), + Z { + z: Bar, + } + } + + enum C { + X, + Y(Bar), + Z { + z: Foo, + } + } + } + + goal { + A: Send + } yields { + expect![["Unique"]] + } + + goal { + B: Send + } yields { + expect![["No possible solution"]] + } + + goal { + C: Send + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn builtin_auto_trait() { + test! { + program { + #[auto] trait AutoTrait {} + struct Struct {} + enum Enum { Var1, Var2 } + fn func(); + + struct Marker {} + impl !AutoTrait for Marker {} + + closure good_closure(self, arg: Marker) -> Marker { i32 } + closure bad_closure(self, arg: i32) -> i32 { Marker } + + extern type Ext; + enum ExtEnum { GoodVariant, BadVariant(Ext) } + } + + // The following types only contain AutoTrait-types, and thus implement AutoTrait themselves. + goal { (i32, f32): AutoTrait } + yields { expect![["Unique"]] } + + goal { [(); 1]: AutoTrait } + yields { expect![["Unique"]] } + + goal { [()]: AutoTrait } + yields { expect![["Unique"]] } + + goal { u32: AutoTrait } + yields { expect![["Unique"]] } + + goal { *const (): AutoTrait } + yields { expect![["Unique"]] } + + goal { *mut (): AutoTrait } + yields { expect![["Unique"]] } + + goal { forall<'a> { &'a (): AutoTrait } } + yields { expect![["Unique"]] } + + goal { forall<'a> { &'a mut (): AutoTrait } } + yields { expect![["Unique"]] } + + goal { str: AutoTrait } + yields { expect![["Unique"]] } + + goal { !: AutoTrait } + yields { expect![["Unique"]] } + + goal { Enum: AutoTrait } + yields { expect![["Unique"]] } + + goal { func: AutoTrait } + yields { expect![["Unique"]] } + + goal { good_closure: AutoTrait } + yields { expect![["Unique"]] } + + goal { fn(Marker) -> Marker: AutoTrait } + yields { expect![["Unique"]] } + + + // foreign types do not implement AutoTraits automatically + goal { Ext: AutoTrait } + yields { expect![["No possible solution"]] } + + // The following types do contain non-AutoTrait types, and thus do not implement AutoTrait. + goal { bad_closure: AutoTrait } + yields { expect![["No possible solution"]] } + + goal { ExtEnum: AutoTrait } + yields { expect![["No possible solution"]] } + + goal { (Struct, Marker): AutoTrait } + yields { expect![["No possible solution"]] } + } +} + +#[test] +fn adt_auto_trait() { + test! { + program { + #[auto] trait AutoTrait {} + struct Yes {} + struct No {} + impl !AutoTrait for No {} + + struct WrapperNo { t: T } + struct WrapperYes { t: T } + + struct X {} + impl !AutoTrait for WrapperNo {} + } + + goal { + Yes: AutoTrait + } + yields { + expect![["Unique"]] + } + + goal { + No: AutoTrait + } + yields { + expect![["No possible solution"]] + } + + goal { + X: AutoTrait + } + yields { + expect![["Unique"]] + } + + goal { + WrapperNo: AutoTrait + } + yields { + expect![["No possible solution"]] + } + + goal { + WrapperYes: AutoTrait + } + yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn phantom_auto_trait() { + test! { + program { + #[auto] trait AutoTrait {} + #[phantom_data] struct PhantomData {} + struct Bad {} + impl !AutoTrait for Bad {} + } + + goal { + PhantomData: AutoTrait + } + yields { + expect![["No possible solution"]] + } + } +} diff --git a/src/test/bench.rs b/tests/test/bench.rs similarity index 83% rename from src/test/bench.rs rename to tests/test/bench.rs index 9d80bf375d8..93e34f4ea41 100644 --- a/src/test/bench.rs +++ b/tests/test/bench.rs @@ -1,26 +1,21 @@ //! Benchmarking tests. -extern crate test; use self::test::Bencher; use chalk_ir; use chalk_solve::ext::*; -use chalk_solve::solve::SolverChoice; -use std::sync::Arc; +use chalk_solve::SolverChoice; -use super::{parse_and_lower_program, - parse_and_lower_goal, - assert_result}; +use super::{assert_result, parse_and_lower_goal, parse_and_lower_program_with_env}; fn run_bench( program_text: &str, solver_choice: SolverChoice, goal_text: &str, bencher: &mut Bencher, - expected: &str + expected: &str, ) { - let program = Arc::new(parse_and_lower_program(program_text, solver_choice).unwrap()); - let env = Arc::new(program.environment()); + let (program, env) = parse_and_lower_program_with_env(program_text, solver_choice).unwrap(); chalk_ir::tls::set_current_program(&program, || { let goal = parse_and_lower_goal(&program, goal_text).unwrap(); let peeled_goal = goal.into_peeled_goal(); @@ -102,11 +97,9 @@ forall { fn cycley_slg(b: &mut Bencher) { run_bench( CYCLEY, - SolverChoice::SLG { - max_size: 20, - }, + SolverChoice::SLG { max_size: 20 }, CYCLEY_GOAL, b, - "Unique" + "Unique", ); } diff --git a/tests/test/closures.rs b/tests/test/closures.rs new file mode 100644 index 00000000000..f0aa6ed13e1 --- /dev/null +++ b/tests/test/closures.rs @@ -0,0 +1,454 @@ +use super::*; + +#[test] +fn closure_is_well_formed() { + test! { + program { + closure foo(self,) {} + closure bar(&self,) {} + closure baz(&mut self,) {} + } + + goal { + WellFormed(foo) + } yields { + expect![["Unique"]] + } + goal { + WellFormed(bar) + } yields { + expect![["Unique"]] + } + goal { + WellFormed(baz) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn closure_is_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + + closure foo(self,) {} + } + + goal { + foo: Sized + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn closure_is_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + impl<'a, T> Copy for &'a T {} + impl Copy for u8 {} + impl Copy for u16 {} + impl Copy for u32 {} + + closure foo(self,) {} + closure bar(&self,) {} + closure baz(&mut self,) {} + + closure foobuzz<'a>(self, a: u8, b: f32) -> u32 { + u8; + &'a u16; + &'a mut u32 + } + closure foobar<'a>(self, a: u8, b: f32) -> u32 { + u8; + &'a u16 + } + closure with_ty(self,) { T } + } + + // A closure with no upvars is also copy, regardless of kind + goal { + foo: Copy + } yields { + expect![["Unique"]] + } + goal { + bar: Copy + } yields { + expect![["Unique"]] + } + goal { + baz: Copy + } yields { + expect![["Unique"]] + } + + // A closure with non-Copy upvars is not copy + goal { + forall<'a> { + foobuzz<'a>: Copy + } + } yields { + expect![["No possible solution"]] + } + // A closure with only Copy upvars is copy + goal { + forall<'a> { + foobar<'a>: Copy + } + } yields { + expect![["Unique"]] + } + goal { + forall { with_ty: Copy } + } yields { + expect![["No possible solution"]] + } + goal { + forall { if (T: Copy) { with_ty: Copy } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn closure_is_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + + closure foo(self,) {} + closure bar(&self,) {} + closure baz(&mut self,) {} + } + goal { + foo: Clone + } yields { + expect![["Unique"]] + } + goal { + bar: Clone + } yields { + expect![["Unique"]] + } + goal { + baz: Clone + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn closure_implements_fn_traits() { + test! { + program { + #[lang(fn_once)] + trait FnOnce { + type Output; + } + + #[lang(fn_mut)] + trait FnMut where Self: FnOnce { } + + #[lang(fn)] + trait Fn where Self: FnMut { } + + #[lang(future)] + trait Future { + type Output; + } + + #[lang(async_fn_once)] + trait AsyncFnOnce { + type CallOnceFuture: Future>::Output>; + #[lang(async_fn_once_output)] + type Output; + } + + #[lang(async_fn_mut)] + trait AsyncFnMut where Self: AsyncFnOnce { } + + #[lang(async_fn)] + trait AsyncFn where Self: AsyncFnMut { } + + struct ConcreteFuture { } + + impl Future for ConcreteFuture { + type Output = T; + } + + closure foo(self,) {} + closure bar(&self,) {} + closure baz(&mut self,) {} + + closure foobuzz<'a>(self, a: u8, b: f32) -> u32 { + u8; + &'a u16; + &'a mut u32 + } + closure foobar<'a>(self, a: u8, b: f32) -> u32 { + u8; + &'a u16 + } + + closure foo_async(self,) -> ConcreteFuture<()> { } + closure bar_async(&self,) -> ConcreteFuture<()> { } + closure baz_async(&mut self,) -> ConcreteFuture<()> { } + + closure foobuzz_async<'a>(self, a: u8, b: f32) -> ConcreteFuture { + u8; + &'a u16; + &'a mut u32 + } + closure foobar_async<'a>(self, a: u8, b: f32) -> ConcreteFuture { + u8; + &'a u16 + } + } + + // A closure with kind `FnOnce` only implements `FnOnce` + goal { + foo: Fn<()> + } yields { + expect![["No possible solution"]] + } + goal { + foo: FnMut<()> + } yields { + expect![["No possible solution"]] + } + goal { + foo: FnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure with kind `AsyncFnOnce` only implements `AsyncFnOnce` + goal { + foo_async: AsyncFn<()> + } yields { + expect![["No possible solution"]] + } + goal { + foo_async: AsyncFnMut<()> + } yields { + expect![["No possible solution"]] + } + goal { + foo_async: AsyncFnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure with kind `Fn` implements all `Fn` traits + goal { + bar: Fn<()> + } yields { + expect![["Unique"]] + } + goal { + bar: FnMut<()> + } yields { + expect![["Unique"]] + } + goal { + bar: FnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure with kind `AsyncFn` implements all `AsyncFn` traits + goal { + bar_async: AsyncFn<()> + } yields { + expect![["Unique"]] + } + goal { + bar_async: AsyncFnMut<()> + } yields { + expect![["Unique"]] + } + goal { + bar_async: AsyncFnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure with kind `FnMut` implements `FnMut` and `FnOnce` + goal { + baz: Fn<()> + } yields { + expect![["No possible solution"]] + } + goal { + baz: FnMut<()> + } yields { + expect![["Unique"]] + } + goal { + baz: FnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure with kind `AsyncFnMut` implements `AsyncFnMut` and `AsyncFnOnce` + goal { + baz_async: AsyncFn<()> + } yields { + expect![["No possible solution"]] + } + goal { + baz_async: AsyncFnMut<()> + } yields { + expect![["Unique"]] + } + goal { + baz_async: AsyncFnOnce<()> + } yields { + expect![["Unique"]] + } + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // A closure also implements the `Fn/AsyncFn` traits regardless of upvars + goal { + forall<'a> { + foobar<'a>: FnOnce<(u8, f32)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as FnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as FnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + foobuzz<'a>: FnOnce<(u8, f32)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as FnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + foobar_async<'a>: AsyncFnOnce<(u8, f32)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as AsyncFnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as AsyncFnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + foobuzz_async<'a>: AsyncFnOnce<(u8, f32)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + Normalize( as AsyncFnOnce<(u8, f32)>>::Output -> u32) + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn closures_propagate_auto_traits() { + test! { + program { + #[auto] + trait Send { } + + closure foo(self,) {} + + closure with_ty(self,) { T } + } + + goal { + foo: Send + } yields { + expect![["Unique"]] + } + + goal { + forall { with_ty: Send } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { if (T: Send) { with_ty: Send } } + } yields { + expect![["Unique"]] + } + } +} diff --git a/src/coherence/test.rs b/tests/test/coherence.rs similarity index 63% rename from src/coherence/test.rs rename to tests/test/coherence.rs index 10c3686507f..ba431e7be3f 100644 --- a/src/coherence/test.rs +++ b/tests/test/coherence.rs @@ -1,6 +1,4 @@ -#![cfg(test)] - -use test_util::*; +use chalk_integration::query::LoweringDatabase; #[test] fn two_impls_for_same_type() { @@ -12,7 +10,19 @@ fn two_impls_for_same_type() { impl Foo for Bar { } } error_msg { - "overlapping impls of trait \"Foo\"" + "overlapping impls of trait `Foo`" + } + } + + lowering_error! { + program { + trait Foo { } + struct Bar { } + impl Foo for Bar<3> { } + impl Foo for Bar<3> { } + } + error_msg { + "overlapping impls of trait `Foo`" } } } @@ -40,6 +50,16 @@ fn concrete_impl_and_blanket_impl() { impl Foo for T { } } } + + lowering_success! { + program { + trait Foo { } + struct S {} + struct Bar { } + impl Foo for Bar<3> { } + impl Foo for Bar { } + } + } } #[test] @@ -56,7 +76,7 @@ fn two_blanket_impls() { impl Baz for Quux { } } error_msg { - "overlapping impls of trait \"Foo\"" + "overlapping impls of trait `Foo`" } } } @@ -72,7 +92,7 @@ fn two_blanket_impls_open_ended() { impl Foo for T where T: Baz { } } error_msg { - "overlapping impls of trait \"Foo\"" + "overlapping impls of trait `Foo`" } } } @@ -113,7 +133,7 @@ fn multiple_parameters() { impl Foo for T { } impl Foo for Baz { } } error_msg { - "overlapping impls of trait \"Foo\"" + "overlapping impls of trait `Foo`" } } } @@ -184,7 +204,43 @@ fn overlapping_assoc_types_error() { impl Foo for B where A: Bar { } } error_msg { - "overlapping impls of trait \"Foo\"" + "overlapping impls of trait `Foo`" + } + } +} + +/// See https://github.com/rust-lang/chalk/issues/515 +#[test] +fn overlapping_assoc_types_error_simple() { + lowering_error! { + program { + trait Iterator { type Item; } + trait Trait {} + + struct Foo {} + + impl Trait for T where T: Iterator {} + impl Trait for T where T: Iterator {} + } error_msg { + "overlapping impls of trait `Trait`" + } + } +} + +/// See https://github.com/rust-lang/chalk/issues/515 +#[test] +fn overlapping_assoc_types_error_generics() { + lowering_error! { + program { + trait Iterator { type Item; } + trait Trait {} + + struct Foo {} + + impl Trait for T where T: Iterator {} + impl Trait for T where T: Iterator {} + } error_msg { + "overlapping impls of trait `Trait`" } } } @@ -194,12 +250,12 @@ fn overlapping_negative_positive_impls() { lowering_error! { program { trait Send { } - struct i32 { } + struct MyType { } - impl Send for i32 { } - impl !Send for i32 { } + impl Send for MyType { } + impl !Send for MyType { } } error_msg { - "overlapping impls of trait \"Send\"" + "overlapping impls of trait `Send`" } } } @@ -213,10 +269,10 @@ fn overlapping_negative_impls() { trait Bar { } struct Vec { } - struct i32 { } + struct MyType { } - impl Foo for i32 { } - impl Bar for i32 { } + impl Foo for MyType { } + impl Bar for MyType { } impl !Send for Vec where T: Foo { } impl !Send for Vec where T: Bar { } @@ -252,42 +308,40 @@ fn downstream_impl_of_fundamental_43355() { // This makes the first impl now apply to A, which means that both of these impls now // overlap for A even though they didn't overlap in the original crate where A is defined. } error_msg { - "overlapping impls of trait \"Trait1\"" + "overlapping impls of trait `Trait1`" } } } #[test] fn fundamental_traits() { - // We want to enable negative reasoning about some traits. For example, consider the str type. - // We know that str is never going to be Sized and we have made a decision to allow people to - // depend on that. The following two impls are rejected as overlapping despite the fact that we - // know that str will never be Sized. + // We want to enable negative reasoning about some traits. For example, assume we have some + // "Foo" type which we know is never going to be Sized (ex. str). The following two impls are + // rejected as overlapping despite the fact that we know that Foo will never be Sized. lowering_error! { program { #[upstream] trait Sized { } - #[upstream] struct str { } + #[upstream] struct Foo { } trait Bar { } - impl Bar for str { } + impl Bar for Foo { } impl Bar for T where T: Sized { } } error_msg { - "overlapping impls of trait \"Bar\"" + "overlapping impls of trait `Bar`" } } // If we make Sized fundamental, we're telling the Rust compiler that it can reason negatively - // about it. That means that `not { str: Sized }` is provable. With that change, these two - // impls are now valid. + // about it. That means that `not { Foo: Sized }` is provable. With that change, these two impls + // are now valid. lowering_success! { program { #[upstream] #[fundamental] trait Sized { } - #[upstream] struct str { } + #[upstream] struct Foo { } trait Bar { } - impl Bar for str { } + impl Bar for Foo { } impl Bar for T where T: Sized { } } } - } #[test] @@ -301,7 +355,7 @@ fn orphan_check() { impl Foo for Bar { } } error_msg { - "impl for trait \"Foo\" violates the orphan rules" + "impl for trait `Foo` violates the orphan rules" } } @@ -311,7 +365,7 @@ fn orphan_check() { impl Foo for T { } } error_msg { - "impl for trait \"Foo\" violates the orphan rules" + "impl for trait `Foo` violates the orphan rules" } } @@ -322,7 +376,7 @@ fn orphan_check() { impl Foo for T { } } error_msg { - "impl for trait \"Foo\" violates the orphan rules" + "impl for trait `Foo` violates the orphan rules" } } @@ -338,7 +392,7 @@ fn orphan_check() { impl Remote for Pair> { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } lowering_error! { @@ -349,7 +403,7 @@ fn orphan_check() { impl Remote for Pair, T> { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } lowering_error! { @@ -360,7 +414,7 @@ fn orphan_check() { impl Remote for Pair, U> { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } @@ -368,19 +422,19 @@ fn orphan_check() { program { #[auto] #[upstream] trait Send { } #[upstream] trait TheTrait { } - #[upstream] struct isize { } - #[upstream] struct usize { } + #[upstream] struct TypeA { } + #[upstream] struct TypeB { } struct TheType { } // These impls should be fine because they contain the local type - impl TheTrait for isize { } - impl TheTrait for TheType { } + impl TheTrait for TypeA { } + impl TheTrait for TheType { } // This impl should fail because it contains only upstream type - impl TheTrait for isize { } + impl TheTrait for TypeA { } } error_msg { - "impl for trait \"TheTrait\" violates the orphan rules" + "impl for trait `TheTrait` violates the orphan rules" } } @@ -388,11 +442,11 @@ fn orphan_check() { program { #[auto] #[upstream] trait Send { } #[upstream] struct Vec { } - #[upstream] struct isize { } + #[upstream] struct TypeA { } - impl !Send for Vec { } + impl !Send for Vec { } } error_msg { - "impl for trait \"Send\" violates the orphan rules" + "impl for trait `Send` violates the orphan rules" } } @@ -405,7 +459,7 @@ fn orphan_check() { impl Remote for Pair { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } @@ -413,13 +467,13 @@ fn orphan_check() { program { #[upstream] trait Remote1 { } #[upstream] struct Pair { } - #[upstream] struct i32 { } + #[upstream] struct TypeA { } struct Local { } - impl Remote1>> for i32 { } + impl Remote1>> for TypeA { } } error_msg { - "impl for trait \"Remote1\" violates the orphan rules" + "impl for trait `Remote1` violates the orphan rules" } } @@ -432,7 +486,7 @@ fn orphan_check() { impl Remote for Pair> { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } @@ -445,7 +499,7 @@ fn orphan_check() { impl Remote for Vec { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" } } @@ -458,7 +512,78 @@ fn orphan_check() { impl Remote for Vec> { } } error_msg { - "impl for trait \"Remote\" violates the orphan rules" + "impl for trait `Remote` violates the orphan rules" + } + } +} + +#[test] +fn fundamental_type_multiple_parameters() { + // Test that implementing a local trait on a fundamental + // type with multiple parameters is allowed + lowering_success! { + program { + #[upstream] + #[fundamental] + struct Box { } + + trait Local { } + + impl Local for Box { } + } + } + + // Test that implementing a remote trait on a fundamental + // type with multiple parameters is rejected + lowering_error! { + program { + #[upstream] + #[fundamental] + struct Box { } + + #[upstream] + trait Remote { } + + impl Remote for Box { } + } error_msg { + "impl for trait `Remote` violates the orphan rules" + } + } + + // Test that implementing a remote trait on a fundamental type + // with one local type parameter is allowed + lowering_success! { + program { + #[upstream] + #[fundamental] + struct Box { } + + struct Local { } + + #[upstream] + trait Remote { } + + impl Remote for Box { } + } + } + + // Test that implementing a remote trait on a fundamental type + // with one concrete remote type parameter is rejected + lowering_error! { + program { + #[upstream] + #[fundamental] + struct Box { } + + #[upstream] + struct Up { } + + #[upstream] + trait Remote { } + + impl Remote for Box { } + } error_msg { + "impl for trait `Remote` violates the orphan rules" } } } diff --git a/tests/test/coherence_goals.rs b/tests/test/coherence_goals.rs new file mode 100644 index 00000000000..86cbdb6d868 --- /dev/null +++ b/tests/test/coherence_goals.rs @@ -0,0 +1,362 @@ +//! Tests related to "coherence goals", which are the special goals we use to reflect +//! the coherence logic. + +use super::*; + +#[test] +fn local_and_upstream_types() { + test! { + program { + #[upstream] struct Upstream { } + struct Local { } + } + + goal { IsLocal(Upstream) } yields { expect![["No possible solution"]] } + goal { IsUpstream(Upstream) } yields { expect![["Unique"]] } + + goal { IsLocal(Local) } yields { expect![["Unique"]] } + goal { IsUpstream(Local) } yields { expect![["No possible solution"]] } + } + + test! { + program { + trait Clone { } + #[upstream] struct Upstream where T: Clone { } + struct Local where T: Clone { } + + #[upstream] struct Upstream2 { } + struct Internal2 { } + } + + goal { forall { IsLocal(Upstream) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Upstream) } } yields { expect![["Unique"]] } + + goal { forall { IsLocal(Local) } } yields { expect![["Unique"]] } + goal { forall { IsUpstream(Local) } } yields { expect![["No possible solution"]] } + } +} + +#[test] +fn is_fully_visible() { + // Should be visible regardless of local, fundamental, or upstream + test! { + program { + #[upstream] struct Upstream { } + struct Local { } + + #[upstream] + #[fundamental] + struct Box { } + } + + goal { IsFullyVisible(Upstream) } yields { expect![["Unique"]] } + goal { IsFullyVisible(Local) } yields { expect![["Unique"]] } + goal { IsFullyVisible(Box) } yields { expect![["Unique"]] } + goal { IsFullyVisible(Box) } yields { expect![["Unique"]] } + } + + // Should be visible regardless of local, fundamental, or upstream + test! { + program { + #[upstream] struct Upstream { } + struct Local { } + + #[upstream] struct Upstream2 { } + struct Local2 { } + + #[upstream] + #[fundamental] + struct Box { } + } + + // Unknown type parameters are not fully visible + goal { forall { IsFullyVisible(Box) } } yields { expect![["No possible solution"]] } + goal { forall { IsFullyVisible(Upstream2) } } yields { expect![["No possible solution"]] } + goal { forall { IsFullyVisible(Local2) } } yields { expect![["No possible solution"]] } + + // Without any unknown type parameters, local and upstream should not matter + goal { forall { IsFullyVisible(Upstream2) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Upstream2) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Local2) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Local2) } } yields { expect![["Unique"]] } + + // Fundamental anywhere should not change the outcome + goal { forall { IsFullyVisible(Box>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Box>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Box>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Box>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Upstream2>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Upstream2>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Local2>) } } yields { expect![["Unique"]] } + goal { forall { IsFullyVisible(Local2>) } } yields { expect![["Unique"]] } + } +} + +#[test] +fn fundamental_types() { + // NOTE: These tests need to have both Local and Upstream structs since chalk will attempt + // to enumerate all of them. + + // This first test is a sanity check to make sure `Box` isn't a special case. + // By testing this, we ensure that adding the #[fundamental] attribute does in fact + // change behaviour + test! { + program { + #[upstream] struct Box { } + + #[upstream] struct Upstream { } + struct Local { } + } + + // Without fundamental, Box should behave like a regular upstream type + goal { forall { not { IsLocal(Box) } } } yields { expect![["Unique"]] } + goal { forall { IsLocal(Box) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Box) } } yields { expect![["Unique"]] } + + // Without fundamental, Box is upstream regardless of its inner type + goal { IsLocal(Box) } yields { expect![["No possible solution"]] } + goal { IsLocal(Box) } yields { expect![["No possible solution"]] } + goal { IsUpstream(Box) } yields { expect![["Unique"]] } + goal { IsUpstream(Box) } yields { expect![["Unique"]] } + } + + test! { + program { + #[upstream] + #[fundamental] + struct Box { } + + #[upstream] struct Upstream { } + struct Local { } + } + + // With fundamental, Box can be local for certain types, so there is no unique solution + // anymore for any of these + goal { forall { not { IsLocal(Box) } } } yields { expect![["Ambiguous; no inference guidance"]] } + goal { forall { IsLocal(Box) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Box) } } yields { expect![["No possible solution"]] } + + // With fundamental, some of these yield different results -- no longer depends on Box + // itself + goal { IsLocal(Box) } yields { expect![["No possible solution"]] } + goal { IsLocal(Box) } yields { expect![["Unique"]] } + goal { IsUpstream(Box) } yields { expect![["Unique"]] } + goal { IsUpstream(Box) } yields { expect![["No possible solution"]] } + } + + test! { + program { + #[upstream] + #[fundamental] + struct Box { } + + trait Clone { } + #[upstream] struct Upstream where T: Clone { } + struct Local where T: Clone { } + + #[upstream] struct Upstream2 { } + struct Internal2 { } + } + + // Upstream is upstream no matter what, so this should not be local for any T + goal { forall { IsLocal(Box>) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Box>) } } yields { expect![["Unique"]] } + + // A fundamental type inside an upstream type should not make a difference (i.e. the rules + // for the outer, non-fundamental type should apply) + goal { forall { IsLocal(Upstream>) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Upstream>) } } yields { expect![["Unique"]] } + + // Make sure internal types within an upstream type do not make a difference + goal { forall { IsLocal(Box>>) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Box>>) } } yields { expect![["Unique"]] } + + // Local is local no matter what, so this should be local for any T + goal { forall { IsLocal(Box>) } } yields { expect![["Unique"]] } + goal { forall { IsUpstream(Box>) } } yields { expect![["No possible solution"]] } + + // A fundamental type inside an internal type should not make a difference + goal { forall { IsLocal(Local>) } } yields { expect![["Unique"]] } + goal { forall { IsUpstream(Local>) } } yields { expect![["No possible solution"]] } + + // Make sure upstream types within an internal type and vice versa do not make a difference + goal { forall { IsLocal(Box>>) } } yields { expect![["Unique"]] } + goal { forall { IsUpstream(Box>>) } } yields { expect![["Unique"]] } + } + + // Nested fundamental types should still be local if they can be recursively proven to be local + test! { + program { + #[upstream] + #[fundamental] + struct Box { } + // This type represents &T which is also fundamental + #[upstream] + #[fundamental] + struct Ref { } + + trait Clone { } + #[upstream] struct Upstream where T: Clone { } + struct Local where T: Clone { } + + #[upstream] struct Upstream2 { } + struct Internal2 { } + } + + goal { forall { IsLocal(Ref>) } } yields { expect![["No possible solution"]] } + goal { forall { IsUpstream(Ref>) } } yields { expect![["No possible solution"]] } + + goal { IsLocal(Ref>) } yields { expect![["No possible solution"]] } + goal { IsUpstream(Ref>) } yields { expect![["Unique"]] } + + goal { IsLocal(Ref>) } yields { expect![["Unique"]] } + goal { IsUpstream(Ref>) } yields { expect![["No possible solution"]] } + } + + // If a type is not upstream, it is always local regardless of its parameters or #[fundamental] + test! { + program { + // if we were compiling std, Box would never be upstream + #[fundamental] + struct Box { } + + #[upstream] struct Upstream { } + struct Local { } + } + + goal { forall { IsLocal(Box) } } yields { expect![["Unique"]] } + goal { IsLocal(Box) } yields { expect![["Unique"]] } + goal { IsLocal(Box) } yields { expect![["Unique"]] } + } +} + +#[test] +fn local_impl_allowed_for_traits() { + test! { + program { + trait LocalTrait { } + trait LocalTrait2 { } + + #[upstream] struct Upstream { } + struct Local { } + } + + // Local traits are always implementable + goal { forall { LocalImplAllowed(T: LocalTrait) } } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Local: LocalTrait) } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Upstream: LocalTrait) } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(T: LocalTrait2) } } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(Local: LocalTrait2) } } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(Upstream: LocalTrait2) } } yields { expect![["Unique"]] } + } + + // Single-type parameter trait refs (Self only) + test! { + program { + #[upstream] trait UpstreamTrait { } + + #[upstream] struct Upstream { } + #[upstream] struct Upstream2 { } + struct Local { } + struct Local2 { } + } + + // No local type + goal { LocalImplAllowed(Upstream: UpstreamTrait) } yields { expect![["No possible solution"]] } + goal { forall { LocalImplAllowed(T: UpstreamTrait) } } yields { expect![["No possible solution"]] } + + // Local type, not preceded by anything + // Notice that the types after the first local type do not matter at all + goal { LocalImplAllowed(Local: UpstreamTrait) } yields { expect![["Unique"]] } + } + + // Multi-type parameter trait refs (Self, T) + test! { + program { + trait Clone { } + #[upstream] trait UpstreamTrait2 where T: Clone { } + + #[upstream] struct Upstream { } + #[upstream] struct Upstream2 { } + struct Local { } + struct Local2 { } + } + + // No local type + goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { expect![["No possible solution"]] } + goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { expect![["No possible solution"]] } + goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2) } } yields { expect![["No possible solution"]] } + + // Local type, but preceded by a type parameter + goal { forall { LocalImplAllowed(T: UpstreamTrait2) } } yields { expect![["No possible solution"]] } + + // Local type, not preceded by anything + // Notice that the types after the first local type do not matter at all + goal { forall { LocalImplAllowed(Local: UpstreamTrait2) } } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Local: UpstreamTrait2) } yields { expect![["Unique"]] } + + // Local type, but preceded by a fully visible type (i.e. no placeholder types) + goal { LocalImplAllowed(Upstream: UpstreamTrait2) } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { expect![["Unique"]] } + goal { LocalImplAllowed(Upstream2: UpstreamTrait2) } yields { expect![["Unique"]] } + + // Type parameter covered by the local type + goal { forall { LocalImplAllowed(Upstream: UpstreamTrait2>) } } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { expect![["Unique"]] } + goal { forall { LocalImplAllowed(Upstream2: UpstreamTrait2>) } } yields { expect![["Unique"]] } + + // Type parameter covered by a deeply nested upstream type + // Notice that it does not matter that the T is wrapped in a local type because the outer + // type is still upstream + goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { expect![["No possible solution"]] } + // Does not matter whether the covered type parameter is eventually covered or not by the + // first actually local type found + goal { forall { LocalImplAllowed(Upstream2>: UpstreamTrait2>) } } yields { expect![["No possible solution"]] } + } + + test! { + program { + trait Clone { } + trait Eq { } + // Lifetime is just included to show that it does not break anything. + // Where clauses do not change the results at all. + #[upstream] trait UpstreamTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } + trait InternalTrait<'a, T, U, V> where T: Clone, U: Eq, V: Clone, V: Eq { } + + #[upstream] struct Upstream { } + #[upstream] struct Upstream2 { } + struct Local { } + } + + // Local traits can be implemented regardless of the types involved + goal { forall { LocalImplAllowed(Self: InternalTrait<'a, T, U, V>) } } yields { expect![["Unique"]] } + + // Upstream traits definitely cannot be implemented for all types + goal { forall { LocalImplAllowed(Self: UpstreamTrait<'a, T, U, V>) } } yields { expect![["No possible solution"]] } + + // No local types + goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait<'a, Upstream, Upstream, Upstream>) } } yields { expect![["No possible solution"]] } + goal { forall<'a> { LocalImplAllowed(Upstream2: UpstreamTrait< + 'a, + Upstream2, + Upstream2>>, + Upstream2> + >) } } yields { expect![["No possible solution"]] } + + // Local type, not preceded by anything -- types after the first local type do not matter + goal { forall<'a, T, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, T, U, V>) } } yields { expect![["Unique"]] } + goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Local, U, V>) } } yields { expect![["Unique"]] } + goal { forall<'a, U, V> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, U, V>) } } yields { expect![["Unique"]] } + goal { forall<'a> { LocalImplAllowed(Local: UpstreamTrait<'a, Upstream, Local, Local>) } } yields { expect![["Unique"]] } + + // Local type preceded by a type that is not fully visible + goal { forall<'a, T> { LocalImplAllowed(T: UpstreamTrait<'a, Upstream, Upstream, Local>) } } yields { expect![["No possible solution"]] } + goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, T, Upstream, Local>) } } yields { expect![["No possible solution"]] } + goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, T, Local>) } } yields { expect![["No possible solution"]] } + + // Once again, types after the first local do not matter + goal { forall<'a, T> { LocalImplAllowed(Upstream: UpstreamTrait<'a, Upstream, Local, T>) } } yields { expect![["Unique"]] } + } +} diff --git a/tests/test/coinduction.rs b/tests/test/coinduction.rs new file mode 100644 index 00000000000..65bf0f328f2 --- /dev/null +++ b/tests/test/coinduction.rs @@ -0,0 +1,640 @@ +//! Tests targeting coinduction specifically + +use super::*; + +#[test] +fn mixed_semantics() { + test! { + program { + #[coinductive] trait Send { } + trait Foo { } + + struct Bar { } + + impl Send for Bar where Bar: Foo { } + impl Foo for Bar where Bar: Send { } + } + + // We have a cycle `(T: Send) :- (T: Foo) :- (T: Send)` with a non-coinductive + // inner component `T: Foo` so we reject it. + goal { + Bar: Send + } yields { + expect![["No possible solution"]] + } + + goal { + Bar: Foo + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_unification_forall() { + test! { + program { + #[coinductive] + trait C1 { } + #[coinductive] + trait C2 { } + #[coinductive] + trait C3 { } + + struct X { } + struct Y { } + + forall { T: C1 if T: C2, T = X } + forall { T: C2 if T: C3, T = Y } + forall { T: C3 if T: C1, T: C2 } + } + + goal { + forall { T: C1 } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_unification_exists() { + test! { + program { + #[coinductive] + trait C1 { } + #[coinductive] + trait C2 { } + #[coinductive] + trait C3 { } + + struct X { } + struct Y { } + + forall { T: C1 if T: C2, T = X } + forall { T: C2 if T: C3, T = Y } + forall { T: C3 if T: C1, T: C2 } + } + + goal { + exists { T: C1 } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_nontrivial() { + test! { + program { + #[coinductive] + trait C1 { } + trait C2 { } + + struct X { } + struct Y { } + + forall { A: C1 if B: C1, B = X, A: C2 } + impl C2 for Y { } + } + + goal { + exists { T: C1 } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_trivial_variant1() { + test! { + program { + #[coinductive] + trait C1 { } + #[coinductive] + trait C2 { } + + struct X { } + + forall { A: C1 if A: C2, A = X, B = X } + forall { A: C2 if B: C1 } + } + + goal { + exists { T: C1 } + } yields { + expect![["Unique; substitution [?0 := X, ?1 := X]"]] + } + } +} + +#[test] +fn coinductive_trivial_variant2() { + test! { + program { + #[coinductive] + trait C1 { } + #[coinductive] + trait C2 { } + + struct X { } + struct Y { } + + forall { A: C1 if A: C2, A = X } + forall { A: C2 if B: C1 } + } + + goal { + exists { T: C1 } + } yields { + expect![["Unique; substitution [?0 := X, ?1 := X]"]] + } + } +} + +#[test] +fn coinductive_trivial_variant3() { + test! { + program { + #[coinductive] + trait C1 { } + + forall { A: C1 if B: C1 } + } + + goal { + exists { T: C1 } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0, ?1 := ^0.1] }"]] + } + } +} + +/// Test a tricky case for coinductive handling: +/// +/// While proving C1, we try to prove C2, which recursively requires +/// proving C1. If you are naive, you will assume that C2 therefore +/// holds -- but this is wrong, because C1 later fails when proving +/// C3. +#[test] +fn coinductive_unsound1() { + test! { + program { + trait C1orC2 { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + forall { + T: C1 if T: C2, T: C3 + } + + forall { + T: C2 if T: C1 + } + + forall { + T: C1orC2 if T: C1 + } + + forall { + T: C1orC2 if T: C2 + } + } + + goal { + forall { X: C1orC2 } + } yields { + expect![["No possible solution"]] + } + } +} + +/// The only difference between this test and `coinductive_unsound1` +/// is the order of the final `forall` clauses. +#[test] +fn coinductive_unsound2() { + test! { + program { + trait C1orC2 { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + forall { + T: C1 if T: C2, T: C3 + } + + forall { + T: C2 if T: C1 + } + + forall { + T: C1orC2 if T: C2 + } + + forall { + T: C1orC2 if T: C1 + } + } + + goal { + forall { X: C1orC2 } + } yields { + expect![["No possible solution"]] + } + } +} + +/// Tests whether a nested coinductive cycle +/// that is also unsound is handled correctly. +#[test] +fn coinductive_unsound_nested() { + test! { + program { + trait C1orC2 { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + #[coinductive] + trait C4 { } + + forall { + T: C4 if T:C2, T: C3 + } + + forall { + T: C1 if T: C2, T: C3 + } + + forall { + T: C2 if T: C1, T: C4 + } + + forall { + T: C1orC2 if T: C1 + } + + forall { + T: C1orC2 if T: C2 + } + } + + goal { + forall { X: C1orC2 } + } yields { + expect![["No possible solution"]] + } + } +} + +/// Test with two nested coinductive cycles where the inner fails +/// whereas the outer holds. No false positives should be kept from +/// the inner cycle. +#[test] +fn coinductive_unsound_nested2() { + test! { + program { + trait C1andC2 { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + #[coinductive] + trait C4 { } + + #[coinductive] + trait C5 { } + + #[coinductive] + trait C6 { } + + #[coinductive] + trait C7 { } + + forall { + T: C2 if T: C5 + } + + forall { + T: C6 if T: C2, T: C7 + } + + forall { + T: C5 if T:C6 + } + + forall { + T: C4 if T: C1 + } + + forall { + T: C3 if T: C5 + } + + forall { + T: C3 if T: C4 + } + + forall { + T: C1 if T: C3 + } + + forall { + T: C1andC2 if T: C1, T: C2 + } + } + + goal { + forall { X: C1andC2 } + } yields { + expect![["No possible solution"]] + } + } +} + +/// Another test with two nested coinductive cycles. +/// Here the inner cycle is also dependent on the outer one. +#[test] +fn coinductive_unsound_inter_cycle_dependency() { + test! { + program { + trait C1andC2 { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + #[coinductive] + trait C4 { } + + #[coinductive] + trait C5 { } + + #[coinductive] + trait C6 { } + + #[coinductive] + trait C7 { } + + forall { + T: C2 if T: C5, T: C1 + } + + forall { + T: C6 if T: C2, T: C7 + } + + forall { + T: C5 if T:C6 + } + + forall { + T: C4 if T: C1 + } + + forall { + T: C3 if T: C5 + } + + forall { + T: C3 if T: C4 + } + + forall { + T: C1 if T: C3 + } + + forall { + T: C1andC2 if T: C1, T: C2 + } + } + + goal { + forall { X: C1andC2 } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_multicycle1() { + test! { + program { + trait Any { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + forall { + T: C1 if T: C2 + } + + forall { + T: C2 if T: C3 + } + + forall { + T: C3 if T: C1 + } + + forall { + T: Any if T: C3 + } + + forall { + T: Any if T: C2 + } + + forall { + T: Any if T: C1 + } + } + + goal { + forall { X: Any } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn coinductive_multicycle2() { + test! { + program { + trait Any { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + forall { + T: C1 if T: C2 + } + + forall { + T: C2 if T: C3 + } + + forall { + T: C3 if T: C1, T: C2 + } + + forall { + T: Any if T: C1 + } + } + + goal { + forall { X: Any } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn coinductive_multicycle3() { + test! { + program { + trait Any { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + trait C4 { } + + forall { + T: C1 if T: C2 + } + + forall { + T: C2 if T: C3, T: C4 + } + + forall { + T: C3 if T: C1 + } + + forall { + T: Any if T: C3 + } + + forall { + T: Any if T: C2 + } + + forall { + T: Any if T: C1 + } + } + + goal { + forall { X: Any } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn coinductive_multicycle4() { + test! { + program { + trait Any { } + + #[coinductive] + trait C1 { } + + #[coinductive] + trait C2 { } + + #[coinductive] + trait C3 { } + + trait C4 { } + + forall { + T: C1 if T: C2 + } + + forall { + T: C2 if T: C3 + } + + forall { + T: C3 if T: C1, T: C4 + } + + forall { + T: Any if T: C3 + } + + forall { + T: Any if T: C2 + } + + forall { + T: Any if T: C1 + } + } + + goal { + forall { X: Any } + } yields { + expect![["No possible solution"]] + } + } +} diff --git a/tests/test/constants.rs b/tests/test/constants.rs new file mode 100644 index 00000000000..2189d14f8c6 --- /dev/null +++ b/tests/test/constants.rs @@ -0,0 +1,149 @@ +//! Tests related to const generics. + +use super::*; + +#[test] +fn single_impl() { + test! { + program { + struct S {} + + trait Trait {} + + impl Trait for S<3> {} + } + + goal { + exists { + S: Trait + } + } yields { + expect![["Unique; substitution [?0 := 3]"]] + } + + goal { + S<3>: Trait + } yields { + expect![["Unique"]] + } + + goal { + S<5>: Trait + } yields { + expect![["No possible solution"]] + } + + + goal { + forall { + S: Trait + } + } yields { + expect![["No possible solution"]] + } + + } +} + +#[test] +fn multi_impl() { + test! { + program { + struct S {} + + trait Trait {} + + impl Trait for S<3> {} + impl Trait for S<5> {} + } + + goal { + exists { + S: Trait + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { + S: Trait + } + } yields { + expect![["No possible solution"]] + } + + } +} + +#[test] +fn generic_impl() { + test! { + program { + struct S {} + + trait Trait {} + + impl Trait for S {} + } + + goal { + exists { + S: Trait + } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { + S: Trait + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn placeholders_eq() { + test! { + goal { + forall { + C = D + } + } yields { + expect![["No possible solution"]] + } + + goal { + exists { + forall { + C = D + } + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + exists { + C = D + } + } + } yields { + expect![["Unique; substitution [?0 := !1_0]"]] + } + + goal { + forall { + exists { + C1 = D1, C2 = D2, D1 = D2 + } + } + } yields { + expect![["No possible solution"]] + } + } +} diff --git a/tests/test/coroutines.rs b/tests/test/coroutines.rs new file mode 100644 index 00000000000..85f2700b6b1 --- /dev/null +++ b/tests/test/coroutines.rs @@ -0,0 +1,135 @@ +use super::*; + +#[test] +fn coroutine_test() { + test! { + program { + #[auto] trait Send { } + + #[lang(coroutine)] + trait Coroutine { + type Yield; + type Return; + } + + struct StructOne {} + struct NotSend {} + struct SendSameLifetime<'a, 'b, T> { val: &'a T, other: &'b T } + impl<'a, T> Send for SendSameLifetime<'a, 'a, T> {} + + struct SendAnyLifetime<'a, 'b, T> { val: &'a u8, other: &'b u8, field: T } + + impl !Send for NotSend {} + struct StructThree<'a> { val: &'a () } + + coroutine empty_gen<>[resume = (), yield = ()] { + upvars [] + witnesses [] + } + + coroutine upvar_lifetime_restrict[resume = (), yield = ()] { + upvars [T; StructOne] + witnesses exists<'a, 'b> [SendSameLifetime<'a, 'b, T>] + } + + coroutine send_any_lifetime[resume = (), yield = ()] { + upvars [] + witnesses exists<'a, 'b> [SendAnyLifetime<'a, 'b, T>; u8] + } + + coroutine not_send_resume_yield<>[resume = NotSend, yield = NotSend] { + upvars [] + witnesses [] + } + + coroutine gen_with_types[resume = U, yield = StructOne] -> NotSend { + upvars [] + witnesses [] + } + } + + goal { + WellFormed(empty_gen) + } yields { + expect![["Unique"]] + } + + goal { + empty_gen: Send + } yields { + expect![["Unique"]] + } + + goal { + empty_gen: Coroutine<()> + } yields { + expect![["Unique"]] + } + + goal { + forall { + gen_with_types: Coroutine + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + Normalize( as Coroutine>::Yield -> StructOne) + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + Normalize( as Coroutine>::Return -> NotSend) + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + upvar_lifetime_restrict: Send + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + if (T: Send) { + upvar_lifetime_restrict: Send + } + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0: '!2_1 }, InEnvironment { environment: Env([]), goal: '!2_1: '!2_0 }]"]] + } + + goal { + not_send_resume_yield: Send + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Send) { + send_any_lifetime: Send + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + send_any_lifetime: Send + } + } yields { + expect![["No possible solution"]] + } + } +} diff --git a/tests/test/cycle.rs b/tests/test/cycle.rs new file mode 100644 index 00000000000..fab9e7de3fc --- /dev/null +++ b/tests/test/cycle.rs @@ -0,0 +1,384 @@ +//! Tests related to cycles amongst impls, which we try to handle with +//! grace. + +use super::*; + +#[test] +fn inner_cycle() { + // Interesting test that shows why recursive solver needs to run + // to an inner fixed point during iteration. Here, the first + // round, we get that `?T: A` has a unique sol'n `?T = i32`. On + // the second round, we ought to get ambiguous: but if we don't + // run the `?T: B` to a fixed point, it will terminate with `?T = + // i32`, leading to an (incorrect) unique solution. + test! { + program { + #[marker] + trait A { } + #[marker] + trait B { } + + struct Foo { } + struct Vec { } + + impl A for T where T: B { } + impl A for Foo { } + + impl B for T where T: A { } + impl B for Vec where T: B { } + } + + goal { + exists { T: A } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn cycle_no_solution() { + test! { + program { + trait Foo { } + struct S { } + impl Foo for S where T: Foo { } + } + + // only solution: infinite type S { + T: Foo + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn cycle_many_solutions() { + test! { + program { + trait Foo { } + struct S { } + struct Zero { } + impl Foo for S where T: Foo { } + impl Foo for Zero { } + } + + // infinite family of solutions: {Zero, S, S>, ... } + goal { + exists { + T: Foo + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn cycle_unique_solution() { + test! { + program { + trait Foo { } + trait Bar { } + struct S { } + struct Zero { } + impl Foo for S where T: Foo, T: Bar { } + impl Foo for Zero { } + } + + goal { + exists { + T: Foo + } + } yields { + expect![["Unique; substitution [?0 := Zero]"]] + } + } +} + +#[test] +fn multiple_ambiguous_cycles() { + test! { + program { + trait WF { } + trait Sized { } + + struct Vec { } + struct Int { } + + impl Sized for Int { } + impl WF for Int { } + + impl WF for Vec where T: Sized { } + impl Sized for Vec where T: WF, T: Sized { } + } + + // ?T: WF + // | + // | + // | + // Int: WF. <-----> (Vec: WF) :- (?T: Sized) + // | + // | + // | + // Int: Sized. <-------> (Vec: Sized) :- (?T: Sized), (?T: WF) + // | | + // | | + // | | + // cycle cycle + // + // Depending on the evaluation order of the above tree (which cycle we come upon first), + // we may fail to reach a fixed point if we loop continuously because `Ambig` does not perform + // any unification. We must stop looping as soon as we encounter `Ambig`. In fact without + // this strategy, the above program will not even be loaded because of the overlap check which + // will loop forever. + goal { + exists { + T: WF + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn overflow() { + test! { + program { + trait Q { } + struct Z { } + struct G { } + struct S { } + + impl Q for Z { } + impl Q for G where X: Q { } + impl Q for S where X: Q, S>: Q { } + } + + // Will try to prove S>: Q then S>>: Q etc ad infinitum + goal { + S: Q + } yields[SolverChoice::slg(10, None)] { + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn overflow_universe() { + test! { + program { + struct Foo { } + + trait Bar { } + + // When asked to solve X: Bar, we will produce a + // requirement to solve !1_0: Bar. And then when asked to + // solve that, we'll produce a requirement to solve !1_1: + // Bar. And so forth. + forall { X: Bar if forall { Y: Bar } } + } + + goal { + Foo: Bar + } yields { + // The internal universe canonicalization in the on-demand/recursive + // solver means that when we are asked to solve (e.g.) + // `!1_1: Bar`, we rewrite that to `!1_0: Bar`, identifying a + // cycle. + expect![["No possible solution"]] + } + } +} + +#[test] +fn infinite_recursion() { + test! { + program { + trait A { } + trait B { } + trait C { } + trait D { } + + struct Vec { } + impl A for Vec where T: B { } + impl B for Vec where T: C { } + impl C for Vec where T: D { } + impl D for Vec where T: A { } + } + + goal { + exists { T: A } + } yields_all[SolverChoice::slg(10, None)] { + } + } +} + +// Regression test for chalk#571 +#[test] +fn cycle_with_ambiguity() { + test! { + program { + #[lang(sized)] + trait Sized { } + trait From {} + trait ToOwned { + type Owned; + } + + impl ToOwned for [T] where T: Sized { + type Owned = Vec; + } + + struct Rc { } + + struct Vec {} + struct Cow {} + + impl From> for Rc<[T]> {} + impl From> for Rc + where + B: ToOwned, + Rc: From<::Owned> + { + } + } + + goal { + exists { + Rc: From + } + } yields[SolverChoice::slg_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn inductive_canonical_cycle() { + test! { + program { + trait Trait {} + + trait IsNotU32 {} + impl IsNotU32 for i32 {} + impl IsNotU32 for i16 {} + + impl Trait for () + where + (): Trait, + T: IsNotU32, + {} + impl Trait for () {} + } + + goal { + (): Trait + } yields { + expect![["Unique"]] + } + + goal { + (): Trait + } yields { + expect![["Unique"]] + } + + goal { + exists { + (): Trait + } + } yields[SolverChoice::slg(10, None)] { + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn mixed_cycle_detection_not_on_stack1() { + test! { + program { + #[coinductive] + trait A {} + #[coinductive] + trait B {} + trait C {} + + impl A for () + where + (): B, + (): C, + {} + + impl B for () + where + (): A, + {} + + impl C for () + where + (): B, + {} + } + + goal { + exists { + (): A + } + } yields[SolverChoice::slg(10, None)] { + expect![["No possible solution"]] + } yields[SolverChoice::recursive_default()] { + expect![["No possible solution"]] + } + } +} + +#[test] +fn mixed_cycle_detection_not_on_stack2() { + test! { + program { + #[coinductive] + trait A {} + #[coinductive] + trait B {} + trait C {} + + impl A for () + where + (): C, + (): B, + {} + + impl B for () + where + (): A, + {} + + impl C for () + where + (): B, + {} + } + + goal { + exists { + (): A + } + } yields[SolverChoice::slg(10, None)] { + // FIXME: this should be no solution as `C` is inductive + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } yields[SolverChoice::recursive_default()] { + // FIXME: this should be no solution as `C` is inductive + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + } +} diff --git a/tests/test/discriminant_kind.rs b/tests/test/discriminant_kind.rs new file mode 100644 index 00000000000..ca9c832337f --- /dev/null +++ b/tests/test/discriminant_kind.rs @@ -0,0 +1,188 @@ +use super::*; + +// Test that user-provided impls of `Discriminantkind` are prohibited +#[test] +fn no_discriminant_kind_impls() { + lowering_error! { + program { + #[lang(discriminant_kind)] + trait DiscriminantKind { + type Discriminant; + } + + impl DiscriminantKind for u32 { + type Discriminant = u32; + } + } error_msg { + "trait impl for `DiscriminantKind` does not meet well-formedness requirements" + } + } +} + +// Test that all types are implementing DiscriminantKind +#[test] +fn discriminant_kind_impl() { + test! { + program { + #[lang(discriminant_kind)] + trait DiscriminantKind { + type Discriminant; + } + + #[object_safe] + trait Principal {} + + struct A { } + } + + goal { + A: DiscriminantKind + } yields { + expect![["Unique"]] + } + + goal { + i32: DiscriminantKind + } yields { + expect![["Unique"]] + } + + goal { + (i32, A): DiscriminantKind + } yields { + expect![["Unique"]] + } + + goal { + forall<'a> { + dyn Principal + 'a: DiscriminantKind + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn discriminant_kind_assoc() { + test! { + program { + #[lang(discriminant_kind)] + trait DiscriminantKind { + type Discriminant; + } + + #[object_safe] + trait Principal {} + + enum A { } + + #[repr(isize)] + enum B { } + + #[repr(i32)] + enum C {} + + #[repr(u32)] + enum D {} + + #[repr(usize)] + enum E {} + + coroutine empty_gen<>[resume = (), yield = ()] { + upvars [] + witnesses [] + } + } + + // Discriminant for types with no discriminant should be u8 + goal { + Normalize(::Discriminant -> u8) + } yields { + expect![["Unique"]] + } + + // Same as above + goal { + forall<'a> { + Normalize(::Discriminant -> u8) + } + } yields { + expect![["Unique"]] + } + + // Discriminant for enums with unspecified discriminant should be isize + goal { + Normalize(::Discriminant -> isize) + } yields { + expect![["Unique"]] + } + + // Discriminant should be the same as specified in `repr` + // ----- + goal { + Normalize(::Discriminant -> isize) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Discriminant -> i32) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Discriminant -> u32) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Discriminant -> usize) + } yields { + expect![["Unique"]] + } + //-------- + + // Coroutines have u32 as the discriminant + goal { + Normalize(::Discriminant -> u32) + } yields { + expect![["Unique"]] + } + + // Placeholders don't have a determined discriminant + goal { + forall { + exists { + ::Discriminant = U + } + } + } yields { + expect![["Unique; substitution [?0 := (DiscriminantKind::Discriminant)]"]] + } + } +} + +#[test] +fn discriminant_kind_with_infer_var() { + test! { + program { + #[lang(discriminant_kind)] + trait DiscriminantKind { + type Discriminant; + } + + enum Option {} + } + + goal { + exists { + Normalize( as DiscriminantKind>::Discriminant -> isize) + } + } yields { + expect![[r#"Unique; for { substitution [?0 := ^0.0] }"#]] + } + } +} diff --git a/tests/test/dispatch_from_dyn.rs b/tests/test/dispatch_from_dyn.rs new file mode 100644 index 00000000000..cb7ec43fcdb --- /dev/null +++ b/tests/test/dispatch_from_dyn.rs @@ -0,0 +1,198 @@ +use crate::test::*; + +#[test] +fn dispatch_from_dyn() { + test! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + impl<'a, T, U> DispatchFromDyn<&'a U> for &'a T {} + } + + // Smoke test that DispatchFromDyn works just like any other impl. + goal { + forall<'a> { + &'a u8: DispatchFromDyn<&'a u8> + } + } yields { + "Unique" + } + } +} + +#[test] +fn dispatch_from_dyn_wf() { + lowering_success! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + #[one_zst] + struct Zst {} + + struct Foo { + f: *mut T, + f2: Zst, + } + + // References and pointers + impl<'a, T, U> DispatchFromDyn<&'a U> for &'a T {} + impl<'a, T, U> DispatchFromDyn<&'a mut U> for &'a mut T {} + impl DispatchFromDyn<*const U> for *const T {} + impl DispatchFromDyn<*mut U> for *mut T {} + + // Struct + impl DispatchFromDyn> for Foo {} + } + } + + // Reference: mutability mismatch + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + impl<'a, T, U> DispatchFromDyn<&'a U> for &'a mut T {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Raw pointer: mutability mismatch + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + impl<'a, T, U> DispatchFromDyn<*mut U> for *const T {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // No non-ZST fields + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + #[one_zst] + struct Zst {} + + struct Foo { + f: Zst, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Too many fields + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + struct Foo { + f: *mut T, + f2: u8, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Field does not impl DispatchFromDyn + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + struct Foo { + f: T, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Field type does not change + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + #[one_zst] + struct Zst {} + + struct Foo { + f: *const u8, + f2: Zst, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Different definitions + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + struct Foo { + f: *const T, + } + + struct Bar { + f: *const T, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // Not a struct + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + enum Foo { + Bar(*const T), + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } + + // repr(C) + lowering_error! { + program { + #[lang(dispatch_from_dyn)] + trait DispatchFromDyn {} + + #[repr(C)] + struct Foo { + f: *mut T, + } + + impl DispatchFromDyn> for Foo {} + } error_msg { + "trait impl for `DispatchFromDyn` does not meet well-formedness requirements" + } + } +} diff --git a/tests/test/existential_types.rs b/tests/test/existential_types.rs new file mode 100644 index 00000000000..cf9be75bbdc --- /dev/null +++ b/tests/test/existential_types.rs @@ -0,0 +1,449 @@ +//! Tests related to the implied bounds rules. + +use super::*; + +#[test] +fn dyn_Clone_is_Clone() { + test! { + program { + trait Clone { } + } + + goal { + forall<'s> { + dyn Clone + 's: Clone + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn dyn_Clone_is_not_Send() { + test! { + program { + trait Clone { } + #[auto] trait Send { } + } + + goal { + forall<'s> { + dyn Clone + 's: Send + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn dyn_Clone_Send_is_Send() { + test! { + program { + trait Clone { } + #[auto] trait Send { } + } + + goal { + forall<'s> { + (dyn Clone + Send + 's): Send + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn dyn_Foo_Bar() { + test! { + program { + trait Foo { } + + struct Bar { } + struct Baz { } + } + + goal { + forall<'s> { + dyn Foo + 's: Foo + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall<'s> { + exists { + dyn Foo + 's: Foo + } + } + } yields { + expect![["Unique; substitution [?0 := Bar]"]] + } + } +} + +#[test] +fn dyn_super_trait_simple() { + test! { + program { + trait Foo {} + trait Bar where Self: Foo {} + + struct A {} + struct B {} + } + + goal { + forall<'s> { + dyn Bar + 's: Bar + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn Bar + 's: Foo + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn Bar + 's: Foo + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall<'s> { + exists { + dyn Bar + 's: Foo + } + } + } yields { + expect![["Unique; substitution [?0 := B]"]] + } + } +} + +#[test] +fn dyn_super_trait_cycle() { + test! { + program { + trait Foo where Self: Bar {} + trait Bar where Self: Foo {} + + struct A {} + struct B {} + } + + // We currently can't prove this because of the cyclic where clauses. + // But importantly, we don't crash or get into an infinite loop. + goal { + forall<'s> { + dyn Bar + 's: Bar + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn dyn_super_trait_not_a_cycle() { + test! { + program { + trait Thing {} + trait Foo where Self: Thing {} + trait Bar where Self: Foo, Self: Thing {} + + struct A {} + struct B {} + } + + goal { + forall<'s> { + dyn Bar + 's: Foo + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn Bar + 's: Thing + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn Bar + 's: Thing + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn dyn_super_trait_higher_ranked() { + test! { + program { + trait Foo<'a> {} + trait Bar<'a> where forall<'b> Self: Foo<'b> {} + trait Baz where forall<'a> Self: Bar<'a> {} + + struct A {} + struct B {} + } + + goal { + forall<'s> { + forall<'x> { + dyn Baz + 's: Bar<'x> + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + forall<'x> { + dyn Baz + 's: Foo<'x> + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'x, 'y, 's> { + dyn Bar<'y> + 's: Foo<'x> + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn dyn_super_trait_non_super_trait_clause() { + test! { + program { + trait Bar {} + trait Foo where A: Bar {} + + struct A {} + impl Bar for A {} + } + + goal { + forall<'s> { + dyn Foo + 's: Foo + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn Foo + 's: Bar + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn dyn_higher_ranked_type_arguments() { + test! { + program { + trait Foo { } + trait Bar { } + + struct Ref<'a> { } + } + + goal { + forall<'s> { + dyn forall<'a> Foo> + 's: Foo> + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn forall<'a> Foo> + Bar + 's: Foo> + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + dyn forall<'a> Foo> + Bar + 's: Bar + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'s> { + forall<'a> { + dyn Foo> + 's: Foo> + } + } + } yields { + // Note that this requires 'a == 's, so it would be resolveable later on. + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!2_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!1_0 }]"]] + } + } +} + +#[test] +fn dyn_binders_reverse() { + test! { + program { + trait Fn { } + + trait Eq { } + + struct Refs<'a, 'b> { } + + impl Eq for A { } + } + + // Note: these constraints are ultimately unresolveable (we + // have to show that 'a == 'b, basically) + goal { + forall<'s> { + dyn forall<'a, 'b> Fn> + 's: Eq< + dyn forall<'c> Fn> + 's + > + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!5_0: '!5_1 }, InEnvironment { environment: Env([]), goal: '!5_1: '!5_0 }]"]] + } + + // Note: these constraints are ultimately unresolveable (we + // have to show that 'a == 'b, basically) + goal { + forall<'s> { + dyn forall<'c> Fn> + 's: Eq< + dyn forall<'a, 'b> Fn> + 's + > + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!3_0: '!3_1 }, InEnvironment { environment: Env([]), goal: '!3_1: '!3_0 }]"]] + } + + // Note: ordering of parameters is reversed here, but that's no problem + goal { + forall<'s> { + dyn forall<'c, 'd> Fn> + 's: Eq< + dyn forall<'a, 'b> Fn> + 's + > + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn dyn_lifetime_bound() { + test! { + program { + trait Foo { } + + trait Eq { } + + impl Eq for A { } + } + + goal { + forall<'a> { + forall<'b> { + dyn Foo + 'a: Eq + } + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!2_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!1_0 }]"]] + } + } +} + +#[test] +fn dyn_associated_type_binding() { + test! { + program { + trait FnOnce { type Output; } + } + + goal { + exists { + forall<'s> { + + 's as FnOnce<()>>::Output = T + } + } + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Int(I32)]"]] + } yields[SolverChoice::slg_default()] { + // #234 + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn dyn_assoc_in_super_trait_bounds() { + test! { + program { + trait Base { type Output; } + trait Trait where Self: Base {} + } + + goal { + forall<'s> { + dyn Trait + 's: Trait + } + } yields { + expect![[r#"Unique"#]] + } + + goal { + forall<'s> { + dyn Trait + 's: Base + } + } yields { + expect![[r#"Unique"#]] + } + } +} +#[test] +fn dyn_well_formed() { + test! { + program { + trait MyTrait {} + } + + goal { + exists<'s> { + WellFormed(dyn MyTrait + 's) + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0] }"]] + } + } +} diff --git a/tests/test/fn_def.rs b/tests/test/fn_def.rs new file mode 100644 index 00000000000..193d47dc201 --- /dev/null +++ b/tests/test/fn_def.rs @@ -0,0 +1,341 @@ +use super::*; + +#[test] +fn fn_def_is_well_formed() { + test! { + program { + fn foo(); + } + goal { + WellFormed(foo) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn fn_def_is_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + + fn foo(); + } + goal { + foo: Sized + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn fn_def_is_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + + fn foo(); + } + goal { + foo: Copy + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn fn_def_is_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + + fn foo(); + } + goal { + foo: Clone + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn fn_def_implements_fn_traits() { + test! { + program { + #[lang(fn_once)] + trait FnOnce { + type Output; + } + + #[lang(fn_mut)] + trait FnMut where Self: FnOnce { } + + #[lang(fn)] + trait Fn where Self: FnMut { } + + #[lang(future)] + trait Future { + type Output; + } + + #[lang(async_fn_once)] + trait AsyncFnOnce { + type CallOnceFuture: Future>::Output>; + #[lang(async_fn_once_output)] + type Output; + } + + #[lang(async_fn_mut)] + trait AsyncFnMut where Self: AsyncFnOnce { } + + #[lang(async_fn)] + trait AsyncFn where Self: AsyncFnMut { } + + struct ConcreteFuture { } + + impl Future for ConcreteFuture { + type Output = T; + } + + fn foo(); + fn bar(one: i32); + fn baz(one: i32) -> u8; + + fn qux() -> ConcreteFuture<()>; + fn quux(one: i32) -> ConcreteFuture<()>; + fn quuz(one: i32) -> ConcreteFuture; + } + + goal { + foo: Fn<()> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + goal { + bar: Fn<(i32,)> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + goal { + baz: Fn<(i32,)> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> u8) + } yields { + expect![["Unique"]] + } + + goal { + qux: AsyncFn<()> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + goal { + quux: AsyncFn<(i32,)> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + goal { + quuz: AsyncFn<(i32,)> + } yields { + expect![["Unique"]] + } + + goal { + Normalize(>::Output -> u8) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn generic_fn_implements_fn_traits() { + test! { + program { + #[lang(fn_once)] + trait FnOnce { + type Output; + } + + #[lang(fn_mut)] + trait FnMut where Self: FnOnce { } + + #[lang(fn)] + trait Fn where Self: FnMut { } + + #[lang(future)] + trait Future { + type Output; + } + + #[lang(async_fn_once)] + trait AsyncFnOnce { + type CallOnceFuture: Future>::Output>; + #[lang(async_fn_once_output)] + type Output; + } + + #[lang(async_fn_mut)] + trait AsyncFnMut where Self: AsyncFnOnce { } + + #[lang(async_fn)] + trait AsyncFn where Self: AsyncFnMut { } + + struct ConcreteFuture { } + + impl Future for ConcreteFuture { + type Output = T; + } + + fn foo(t: T) -> T; + + fn bar(t: T) -> ConcreteFuture; + } + + goal { + exists { foo: Fn<(T,)> } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { foo: Fn<(T,)> } + } yields { + expect![["Unique"]] + } + + goal { + exists { Normalize( as FnOnce<(T,)>>::Output -> T) } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { Normalize( as FnOnce<(T,)>>::Output -> T) } + } yields { + expect![["Unique"]] + } + + goal { + exists { bar: AsyncFn<(T,)> } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { bar: AsyncFn<(T,)> } + } yields { + expect![["Unique"]] + } + + goal { + exists { Normalize( as AsyncFnOnce<(T,)>>::Output -> T) } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { Normalize( as AsyncFnOnce<(T,)>>::Output -> T) } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn fn_defs() { + test! { + program { + trait Foo { } + + struct Bar { } + + struct Xyzzy { } + impl Foo for Xyzzy { } + + fn baz(quux: T) -> T + where T: Foo; + + fn garply(thud: i32) -> i32; + } + + goal { + WellFormed(baz) + } yields { + expect![["No possible solution"]] + } + + goal { + WellFormed(baz) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(garply) + } yields { + expect![["Unique"]] + } + + } +} + +#[test] +fn fn_def_implied_bounds_from_env() { + test! { + program { + trait Foo { } + + struct Bar { } + impl Foo for Bar { } + + fn baz() where T: Foo; + } + goal { + if (FromEnv(baz)) { + Bar: Foo + } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/foreign_types.rs b/tests/test/foreign_types.rs new file mode 100644 index 00000000000..0133730c993 --- /dev/null +++ b/tests/test/foreign_types.rs @@ -0,0 +1,77 @@ +//! Tests for foreign types + +use super::*; + +// foreign types don't implement any builtin traits +#[test] +fn foreign_ty_trait_impl() { + test! { + program { + extern type A; + trait Foo {} + impl Foo for A {} + } + + goal { A: Foo } yields { expect![["Unique"]] } + } +} + +#[test] +fn foreign_ty_lowering() { + lowering_success! { + program { + extern type A; + } + } +} + +// foreign types are always well-formed +#[test] +fn foreign_ty_is_well_formed() { + test! { + program { + extern type A; + } + + goal { WellFormed(A) } yields { expect![["Unique"]] } + } +} + +// foreign types don't implement any builtin traits +#[test] +fn foreign_ty_is_not_sized() { + test! { + program { + #[lang(sized)] trait Sized {} + extern type A; + } + + goal { not { A: Sized } } yields { expect![["Unique"]] } + } +} + +// foreign types don't implement any builtin traits +#[test] +fn foreign_ty_is_not_copy() { + test! { + program { + #[lang(copy)] trait Copy {} + extern type A; + } + + goal { not { A: Copy } } yields { expect![["Unique"]] } + } +} + +// foreign types don't implement any builtin traits +#[test] +fn foreign_ty_is_not_clone() { + test! { + program { + #[lang(clone)] trait Clone {} + extern type A; + } + + goal { not { A: Clone } } yields { expect![["Unique"]] } + } +} diff --git a/tests/test/functions.rs b/tests/test/functions.rs new file mode 100644 index 00000000000..b8ef1da5e44 --- /dev/null +++ b/tests/test/functions.rs @@ -0,0 +1,469 @@ +use super::*; + +#[test] +fn functions_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + fn(()): Sized + } yields { + expect![["Unique"]] + } + + goal { + fn([u8]): Sized + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn functions_are_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + } + + goal { + fn(()): Copy + } yields { + expect![["Unique"]] + } + + goal { + fn([u8]): Copy + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn function_implement_fn_traits() { + test! { + program { + #[lang(fn_once)] + trait FnOnce { + type Output; + } + + #[lang(fn_mut)] + trait FnMut where Self: FnOnce { } + + #[lang(fn)] + trait Fn where Self: FnMut { } + + #[lang(future)] + trait Future { + type Output; + } + + #[lang(async_fn_once)] + trait AsyncFnOnce { + type CallOnceFuture: Future>::Output>; + #[lang(async_fn_once_output)] + type Output; + } + + #[lang(async_fn_mut)] + trait AsyncFnMut where Self: AsyncFnOnce { } + + #[lang(async_fn)] + trait AsyncFn where Self: AsyncFnMut { } + + struct Ty { } + + trait Clone { } + + impl Clone for Ty { } + + opaque type MyOpaque: Clone = Ty; + + struct ConcreteFuture { } + + impl Future for ConcreteFuture { + type Output = T; + } + } + + // Simple test: make sure a fully monomorphic type implements FnOnce + goal { + fn(u8): FnOnce<(u8,)> + } yields { + expect![["Unique"]] + } + + // Same as above, but for FnMut + goal { + fn(u8): FnMut<(u8,)> + } yields { + expect![["Unique"]] + } + + // Same as above, but for Fn + goal { + fn(u8): Fn<(u8,)> + } yields { + expect![["Unique"]] + } + + // Same as above, but for AsyncFnOnce + goal { + fn(u8) -> ConcreteFuture<()>: AsyncFnOnce<(u8,)> + } yields { + expect![["Unique"]] + } + + // Same as above, but for AsyncFnMut + goal { + fn(u8) -> ConcreteFuture<()>: AsyncFnMut<(u8,)> + } yields { + expect![["Unique"]] + } + + // Same as above, but for AsyncFn + goal { + fn(u8) -> ConcreteFuture<()>: AsyncFn<(u8,)> + } yields { + expect![["Unique"]] + } + + // Make sure unsafe function pointers don't implement FnOnce + goal { + unsafe fn(u8): FnOnce<(u8,)> + } yields { + expect![["No possible solution"]] + } + // Same as above but for FnMut + goal { + unsafe fn(u8): FnMut<(u8,)> + } yields { + expect![["No possible solution"]] + } + // Same as above but for Fn + goal { + unsafe fn(u8): Fn<(u8,)> + } yields { + expect![["No possible solution"]] + } + // Same as above but for AsyncFnOnce + goal { + unsafe fn(u8) -> ConcreteFuture<()>: AsyncFnOnce<(u8,)> + } yields { + expect![["No possible solution"]] + } + // Same as above but for AsyncFnMut + goal { + unsafe fn(u8) -> ConcreteFuture<()>: AsyncFnMut<(u8,)> + } yields { + expect![["No possible solution"]] + } + // Same as above but for AsyncFn + goal { + unsafe fn(u8) -> ConcreteFuture<()>: AsyncFn<(u8,)> + } yields { + expect![["No possible solution"]] + } + + // Function pointers implicit return `()` when no return + // type is specified - make sure that normalization understands + // this + goal { + Normalize(>::Output -> ()) + } yields { + expect![["Unique"]] + } + + // Tests normalizing when an explicit return type is used + goal { + Normalize( bool as FnOnce<(u8,)>>::Output -> bool) + } yields { + expect![["Unique"]] + } + + // Normalizing pointer which returns `Future` with `AsycFnOnce::Output` + goal { + Normalize( ConcreteFuture as AsyncFnOnce<(u8,)>>::Output -> bool) + } yields { + expect![["Unique"]] + } + + // Tests that we fail to normalize when there's a mismatch with + // fully monomorphic types. + goal { + Normalize( bool as FnOnce<(u8,)>>::Output -> u8) + } yields { + expect![["No possible solution"]] + } + goal { + Normalize( ConcreteFuture as AsyncFnOnce<(u8,)>>::Output -> u8) + } yields { + expect![["No possible solution"]] + } + + // Ensures that we don't find a solution when doing so would + // require us to conclude that two different universally quantified + // types (T and V) are equal. + goal { + forall { + Normalize( T as FnOnce<(u8, V)>>::Output -> V) + } + } yields { + expect![["No possible solution"]] + } + goal { + forall { + Normalize( ConcreteFuture as AsyncFnOnce<(u8, V)>>::Output -> V) + } + } yields { + expect![["No possible solution"]] + } + + // Tests that we can normalize a generic function pointer type + goal { + forall { + exists { + Normalize( T as FnOnce<(u8, V)>>::Output -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := !1_0]"]] + } + goal { + forall { + exists { + Normalize( ConcreteFuture as AsyncFnOnce<(u8, V)>>::Output -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := !1_0]"]] + } + + // Tests that we properly tuple function arguments when constructing + // the `FnOnce` impl + goal { + fn(u8, u32): FnOnce<(u8,u32)> + } yields { + expect![["Unique"]] + } + + // Tests that we properly tuple function arguments when constructing + // the `AsyncFnOnce` impl + goal { + fn(u8, u32) -> ConcreteFuture<()>: AsyncFnOnce<(u8,u32)> + } yields { + expect![["Unique"]] + } + + // Tests that we don't find a solution when fully monomorphic + // types are mismatched + goal { + fn(i32): FnOnce<(bool,)> + } yields { + expect![["No possible solution"]] + } + goal { + fn(i32) -> ConcreteFuture<()>: AsyncFnOnce<(bool,)> + } yields { + expect![["No possible solution"]] + } + + // Tests function pointer types that use the function's binder + // Universally quantified lifetimes that differ only in their + // name ('a vs 'b) should be considered equivalent here + goal { + forall<'a> { + for<'b> fn(&'b u8): FnOnce<(&'a u8,)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + for<'b> fn(&'b u8) -> ConcreteFuture<()>: AsyncFnOnce<(&'a u8,)> + } + } yields { + expect![["Unique"]] + } + + // Tests that a 'stricter' function (requires lifetimes to be the same) + // can implement `FnOnce` for a 'less strict' signature (dose not require + // lifetimes to be the same), provided that the lifetimes are *actually* + // the same. + goal { + forall<'a, 'b> { + for<'c> fn(&'c u8, &'c i32): FnOnce<(&'a u8, &'b i32)> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + goal { + forall<'a, 'b> { + for<'c> fn(&'c u8, &'c i32) -> ConcreteFuture<()>: AsyncFnOnce<(&'a u8, &'b i32)> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + + // Tests the opposite case as the previous test: a 'less strict' function + // (does not require lifetimes to be the same) can implement `FnOnce/AsyncFnOnce` + // for a 'stricter' signature (requires lifetimes to be the same) without + // any additional requirements + goal { + forall<'a> { + for<'b, 'c> fn(&'b u8, &'c i32): FnOnce<(&'a u8, &'a i32)> + } + } yields { + expect![["Unique"]] + } + goal { + forall<'a> { + for<'b, 'c> fn(&'b u8, &'c i32) -> ConcreteFuture<()>: AsyncFnOnce<(&'a u8, &'a i32)> + } + } yields { + expect![["Unique"]] + } + + // Similar to the above test, but for types instead of lifetimes: + // a 'stricter' function (requires types to be the same) can never + // implement `FnOnce/AsyncFnOnce` for a 'less strict' signature (does + // not require types to be the same) + goal { + forall { + fn(T, T): FnOnce<(T, U)> + } + } yields { + expect![["No possible solution"]] + } + goal { + forall { + fn(T, T) -> ConcreteFuture<()>: AsyncFnOnce<(T, U)> + } + } yields { + expect![["No possible solution"]] + } + + // Tests the opposite case as a previous test: a 'less strict' + // function can never implement 'FnOnce/AsyncFnOnce' for a 'more + // strict' signature (does not require types to bthe same) + goal { + forall { + fn(T, U): FnOnce<(T, T)> + } + } yields { + expect![["No possible solution"]] + } + goal { + forall { + fn(T, U) -> ConcreteFuture<()>: AsyncFnOnce<(T, T)> + } + } yields { + expect![["No possible solution"]] + } + + // Tests that we flounder for inference variables + goal { + exists { + T: FnOnce<()> + } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + goal { + exists { + T: AsyncFnOnce<()> + } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + + // No solution for alias type + goal { + MyOpaque: FnOnce<()> + } yields { + expect![["No possible solution"]] + } + goal { + MyOpaque: AsyncFnOnce<()> + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn functions_implement_fn_ptr_trait() { + test! { + program { + #[lang(fn_ptr_trait)] + trait FnPtr {} + + closure closure_ty(self,) {} + fn fn_def(); + } + + goal { + fn(()) -> (): FnPtr + } yields { + expect![[r#"Unique"#]] + } + + goal { + unsafe fn(): FnPtr + } yields { + expect![[r#"Unique"#]] + } + + goal { + extern "C" fn(u32, ...): FnPtr + } yields { + expect![[r#"Unique"#]] + } + + goal { + for<'a> fn(&'a ()): FnPtr + } yields { + expect![[r#"Unique"#]] + } + + goal { + forall { + fn(T) -> U: FnPtr + } + } yields { + expect![[r#"Unique"#]] + } + + goal { + i32: FnPtr + } yields { + expect![[r#"No possible solution"#]] + } + + goal { + closure_ty: FnPtr + } yields { + expect![[r#"No possible solution"#]] + } + + goal { + fn_def: FnPtr + } yields { + expect![[r#"No possible solution"#]] + } + + goal { + exists { + T: FnPtr + } + } yields { + expect![[r#"Ambiguous; no inference guidance"#]] + } + } +} diff --git a/tests/test/implied_bounds.rs b/tests/test/implied_bounds.rs new file mode 100644 index 00000000000..a9f9a90a628 --- /dev/null +++ b/tests/test/implied_bounds.rs @@ -0,0 +1,159 @@ +//! Tests related to the implied bounds rules. + +use super::*; + +#[test] +fn implied_bounds() { + test! { + program { + trait Clone { } + trait Iterator where Self: Clone { type Item; } + struct Struct { } + } + + goal { + forall { + if (T: Iterator) { + T: Clone + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn gat_implied_bounds() { + test! { + program { + trait Clone { } + trait Foo { type Item: Clone; } + struct Struct { } + } + + goal { + forall { + if (T: Foo = V>) { + V: Clone + } + } + } yields { + expect![["Unique"]] + } + } + + test! { + program { + trait Clone { } + trait Foo { type Item; } + struct Struct { } + } + + goal { + forall { + if (T: Foo = V>) { + // Without the bound Item: Clone, there is no way to infer this. + V: Clone + } + } + } yields { + expect![["No possible solution"]] + } + } + + test! { + program { + trait Fn { } + struct Ref<'a, T> { } + trait Sized { } + + trait Foo { + type Item: forall<'a> Fn> + Sized; + } + } + + goal { + forall { + if (Type: Foo) { + forall<'a, T> { + ::Item: Fn> + } + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn implied_from_env() { + test! { + program { + trait Clone { } + trait Foo { type Item; } + } + + goal { + forall { + if (FromEnv(>::Item)) { + FromEnv(T: Foo) + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (FromEnv(>::Item)) { + FromEnv(T: Clone) + } + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn higher_ranked_implied_bounds() { + test! { + program { + trait Foo<'a> { } + trait Bar where forall<'a> Self: Foo<'a> { } + } + + goal { + forall { + if (T: Bar) { + forall<'a> { + T: Foo<'a> + } + } + } + } yields { + expect![["Unique"]] + } + } + + test! { + program { + trait Foo { } + trait Bar where forall Self: Foo { } + } + + goal { + forall { + if (T: Bar) { + forall { + T: Foo + } + } + } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/impls.rs b/tests/test/impls.rs new file mode 100644 index 00000000000..aa15879dd9e --- /dev/null +++ b/tests/test/impls.rs @@ -0,0 +1,678 @@ +//! Tests related to the basic conversion of impls into logical predicates +//! and other core logic functions. + +use super::*; + +#[test] +fn prove_clone() { + test! { + program { + struct Foo { } + struct Bar { } + struct Vec { } + trait Clone { } + impl Clone for Vec where T: Clone { } + impl Clone for Foo { } + } + + goal { + Vec: Clone + } yields { + expect![["Unique"]] + } + + goal { + Foo: Clone + } yields { + expect![["Unique"]] + } + + goal { + Bar: Clone + } yields { + expect![["No possible solution"]] + } + + goal { + Vec: Clone + } yields { + expect![["No possible solution"]] + } + } +} + +/// Test that given `?0: Map` where *either* `?0` or `?1` is +/// known, we can infer the other (but if neither is known, we get an +/// ambiguous result). +/// +/// In rustc today, if `?0` is not known we will not attempt to match +/// impls. +#[test] +fn prove_infer() { + test! { + program { + struct Foo { } + struct Bar { } + trait Map { } + impl Map for Foo { } + impl Map for Bar { } + } + + goal { + exists { A: Map } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { A: Map } + } yields { + expect![["Unique; substitution [?0 := Foo]"]] + } + + goal { + exists { Foo: Map } + } yields { + expect![["Unique; substitution [?0 := Bar]"]] + } + } +} + +/// Test the interaction of `forall` goals and impls. For example, +/// test that we can prove things like +/// +/// ```notrust +/// forall { Vec: Marker } +/// ``` +/// +/// given a suitably generic impl. +#[test] +fn prove_forall() { + test! { + program { + struct Foo { } + struct Vec { } + + trait Marker { } + impl Marker for Vec { } + + trait Clone { } + impl Clone for Foo { } + + impl Clone for Vec where T: Clone { } + } + + goal { + forall { T: Marker } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { not { T: Marker } } + } yields { + expect![["No possible solution"]] + } + + goal { + not { forall { T: Marker } } + } yields { + expect![["Unique"]] + } + + // If we assume `T: Marker`, then obviously `T: Marker`. + goal { + forall { if (T: Marker) { T: Marker } } + } yields { + expect![["Unique"]] + } + + // We don't have to know anything about `T` to know that + // `Vec: Marker`. + goal { + forall { Vec: Marker } + } yields { + expect![["Unique"]] + } + + // Here, we don't know that `T: Clone`, so we can't prove that + // `Vec: Clone`. + goal { + forall { Vec: Clone } + } yields { + expect![["No possible solution"]] + } + + // Here, we do know that `T: Clone`, so we can. + goal { + forall { + if (T: Clone) { + Vec: Clone + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn higher_ranked() { + test! { + program { + struct BestType { } + struct SomeType { } + trait Foo { } + impl Foo for SomeType { } + } + + goal { + exists { + forall { + SomeType: Foo + } + } + } yields { + expect![["Unique; substitution [?0 := BestType]"]] + } + } +} + +#[test] +fn ordering() { + test! { + program { + trait Foo { } + impl Foo for U { } + } + + goal { + exists { + forall { + U: Foo + } + } + } yields { + expect![["No possible solution"]] + } + } +} + +/// Demonstrates that, given the expected value of the associated +/// type, we can use that to narrow down the relevant impls. +#[test] +fn normalize_rev_infer() { + test! { + program { + trait Identity { type Item; } + struct A { } + struct B { } + impl Identity for A { type Item = A; } + impl Identity for B { type Item = B; } + } + + goal { + exists { + T: Identity + } + } yields { + expect![["Unique; substitution [?0 := A]"]] + } + } +} + +/// Demonstrates that, given the expected value of the associated +/// type, we can use that to narrow down the relevant impls. +#[test] +fn normalize_rev_infer_gat() { + test! { + program { + trait Combine { type Item; } + struct A { } + struct B { } + struct Either { } + impl Combine for A { type Item = Either; } + impl Combine for B { type Item = Either; } + } + + goal { + exists { + T: Combine = Either> + } + } yields { + expect![["Unique; substitution [?0 := A, ?1 := B]"]] + } + } +} + +#[test] +fn generic_trait() { + test! { + program { + struct Int { } + struct Uint { } + + trait Eq { } + + impl Eq for Int { } + impl Eq for Uint { } + } + + goal { + Int: Eq + } yields { + expect![["Unique"]] + } + + goal { + Uint: Eq + } yields { + expect![["Unique"]] + } + + goal { + Int: Eq + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +// Test that we properly detect failure even if there are applicable impls at +// the top level, if we can't find anything to fill in those impls with +fn deep_failure() { + test! { + program { + struct Foo {} + trait Bar {} + trait Baz {} + + impl Bar for Foo where T: Baz {} + } + + goal { + exists { T: Baz } + } yields { + expect![["No possible solution"]] + } + + goal { + exists { Foo: Bar } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +// Test that we infer a unique solution even if it requires multiple levels of +// search to do so +fn deep_success() { + test! { + program { + struct Foo {} + struct ImplsBaz {} + trait Bar {} + trait Baz {} + + impl Baz for ImplsBaz {} + impl Bar for Foo where T: Baz {} + } + + goal { + exists { Foo: Bar } + } yields { + expect![["Unique; substitution [?0 := ImplsBaz]"]] + } + } +} + +#[test] +fn definite_guidance() { + test! { + program { + trait Display {} + trait Debug {} + struct Foo {} + struct Bar {} + struct Baz {} + + impl Display for Bar {} + impl Display for Baz {} + + impl Debug for Foo where T: Display {} + } + + goal { + exists { + T: Debug + } + } yields { + expect![["Ambiguous; definite substitution for { [?0 := Foo<^0.0>] }"]] + } + } +} + +#[test] +fn suggested_subst() { + test! { + program { + trait SomeTrait {} + struct Foo {} + struct Bar {} + struct Baz {} + struct Qux {} + impl SomeTrait for Foo {} + impl SomeTrait for Bar {} + impl SomeTrait for Bar {} + } + + goal { + exists { + Foo: SomeTrait + } + } yields { + expect![["Unique; substitution [?0 := Baz]"]] + } + + goal { + exists { + if (Baz: SomeTrait) { + Baz: SomeTrait + } + } + } yields { + expect![["Unique; substitution [?0 := Qux]"]] + } + + goal { + exists { + if (Baz: SomeTrait) { + Foo: SomeTrait + } + } + } yields { + expect![["Unique; substitution [?0 := Baz]"]] + } + + goal { + exists { + if (Foo: SomeTrait) { + Foo: SomeTrait + } + } + } yields { + expect![["Unique; substitution [?0 := Baz]"]] + } + + goal { + exists { + if (Foo: SomeTrait) { + Foo: SomeTrait + } + } + } yields { + // FIXME: we need to rework the "favor environment" heuristic. + // Should be: "Ambiguous; suggested substitution [?0 := bool]" + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { + if (Foo: SomeTrait) { + if (Foo: SomeTrait) { + Foo: SomeTrait + } + } + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { + Bar: SomeTrait + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { + if (Bar: SomeTrait) { + Bar: SomeTrait + } + } + } yields { + // FIXME: same as above, should be: expect![["Ambiguous; suggested substitution [?0 := bool]"]] + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { + if (Bar: SomeTrait) { + if (Bar: SomeTrait) { + Bar: SomeTrait + } + } + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn where_clause_trumps() { + test! { + program { + struct Foo { } + + trait Marker { } + impl Marker for Foo { } + } + + goal { + forall { + if (T: Marker) { + T: Marker + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn inapplicable_assumption_does_not_shadow() { + test! { + program { + struct A { } + struct B { } + + trait Foo { } + + impl Foo for T { } + } + + goal { + forall { + exists { + if (A: Foo) { + T: Foo + } + } + } + } yields { + expect![["Unique; substitution [?0 := A]"]] + } + } +} + +#[test] +fn partial_overlap_2() { + test! { + program { + trait Marker {} + trait Foo {} + trait Bar {} + + struct TypeA {} + struct TypeB {} + + impl Marker for T where T: Foo {} + impl Marker for T where T: Bar {} + } + + goal { + forall { + if (T: Foo; T: Bar) { + exists { T: Marker } + } + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { + if (T: Foo; T: Bar) { + T: Marker + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Foo; T: Bar) { + T: Marker + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn partial_overlap_3() { + test! { + program { + #[marker] trait Marker {} + trait Foo {} + trait Bar {} + + impl Marker for T where T: Foo {} + impl Marker for T where T: Bar {} + + struct Struct {} + impl Foo for Struct {} + impl Bar for Struct {} + } + + goal { + forall { + if (T: Foo; T: Bar) { T: Marker } + } + } yields { + expect![["Unique"]] + } + + goal { + Struct: Marker + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn clauses_in_if_goals() { + test! { + program { + trait Foo { } + struct Vec { } + struct A { } + } + + goal { + if (forall { T: Foo }) { + forall { T: Foo } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (Vec: Foo :- T: Foo) { + if (T: Foo) { + Vec: Foo + } + } + } + } yields { + expect![["Unique"]] + } + + goal { + if (forall { Vec: Foo :- T: Foo }) { + if (A: Foo) { + Vec: Foo + } + } + } yields { + expect![["Unique"]] + } + + goal { + if (forall { Vec: Foo :- T: Foo }) { + Vec: Foo + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn unify_types_in_ambiguous_impl() { + test! { + program { + #[non_enumerable] + trait Constraint {} + trait Trait {} + struct A {} + impl Trait for A where T: Constraint {} + } + + goal { + exists { A: Trait } + } yields { + expect![["Ambiguous; definite substitution for { [?0 := ^0.0, ?1 := ^0.0] }"]] + } + } +} + +#[test] +fn unify_types_in_impl() { + test! { + program { + #[non_enumerable] + trait Constraint {} + trait Trait {} + struct A {} + impl Trait for A {} + } + + goal { + exists { A: Trait } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0, ?1 := ^0.0] }"]] + } + } +} diff --git a/tests/test/lifetimes.rs b/tests/test/lifetimes.rs new file mode 100644 index 00000000000..81095cb0438 --- /dev/null +++ b/tests/test/lifetimes.rs @@ -0,0 +1,127 @@ +//! Tests for various concrete lifetimes + +use super::*; + +#[test] +fn static_lowering() { + lowering_success! { + program { + struct A<'a> where 'a: 'static {} + trait B<'a> where 'a: 'static {} + fn foo(a: &'static ()); + } + } +} + +#[test] +fn erased_lowering() { + lowering_success! { + program { + struct A<'a> where 'a: 'erased {} + trait B<'a> where 'a: 'erased {} + fn foo(a: &'erased ()); + } + } +} + +#[test] +fn static_outlives() { + test! { + program { + trait Foo<'a> where 'a: 'static {} + struct Bar {} + + impl<'a> Foo<'a> for Bar where 'a: 'static {} + } + + goal { + exists<'a> { + Bar: Foo<'a> + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: 'static }] }"]] + } + + goal { + forall<'a> { + Bar: Foo<'a> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: 'static }]"]] + } + } +} + +#[test] +fn erased_outlives() { + test! { + program { + trait Foo<'a> where 'a: 'erased {} + struct Bar {} + + impl<'a> Foo<'a> for Bar where 'a: 'erased {} + } + + goal { + exists<'a> { + Bar: Foo<'a> + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: ' }] }"]] + } + + goal { + forall<'a> { + Bar: Foo<'a> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: ' }]"]] + } + } +} + +#[test] +fn static_impls() { + test! { + program { + struct Foo {} + trait Bar {} + impl<'a> Bar for &'a Foo {} + } + + goal { + &'static Foo: Bar + } yields { + expect![["Unique"]] + } + + goal { + forall<'a> { &'a Foo: Bar } + } yields { + expect![["Unique"]] + } + + goal { + exists<'a> { &'a Foo: Bar } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0] }"]] + } + } +} + +#[test] +fn erased_impls() { + test! { + program { + struct Foo {} + trait Bar {} + impl<'a> Bar for &'a Foo {} + } + + goal { + &'erased Foo: Bar + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/misc.rs b/tests/test/misc.rs new file mode 100644 index 00000000000..313daf87a1f --- /dev/null +++ b/tests/test/misc.rs @@ -0,0 +1,880 @@ +//! Tests that don't fit a single category + +use super::*; + +// Regression test for rust-lang/chalk#111 +#[test] +fn futures_ambiguity() { + test! { + program { + struct Result { } + + trait Future { + type Output; + } + + trait FutureResult + where + Self: Future::Item, + ::Error + >> + { + type Item; + type Error; + } + + impl FutureResult for T + where + T: Future> + { + type Item = I; + type Error = E; + } + } + + goal { + forall { if (T: FutureResult) { exists { T: Future> } } } + } yields { + expect![["Unique; substitution [?0 := (FutureResult::Item), ?1 := (FutureResult::Error)]"]] + } + } +} + +#[test] +fn basic() { + test! { + program { + trait Sized { } + + struct Foo { } + impl Sized for Foo { } + } + + goal { + forall { if (T: Sized) { T: Sized } } + } yields_all[SolverChoice::slg(10, None)] { + expect![[""]] + } + } +} + +/// Make sure we don't get a stack overflow or other badness for this +/// test from scalexm. +#[test] +fn subgoal_abstraction() { + test! { + program { + trait Foo { } + struct Box { } + impl Foo for T where Box: Foo { } + } + + goal { + exists { T: Foo } + } yields_first[SolverChoice::slg(50, None)] { + expect![["Floundered"]] + } + } +} + +#[test] +fn flounder() { + test! { + program { + trait A { } + + struct Vec { } + impl A for Vec { } + } + + goal { + exists { not { T: A } } + } yields_first[SolverChoice::slg(10, None)] { + expect![["Floundered"]] + } + } +} + +// Test that, when solving `?T: Sized`, we only wind up pulling a few +// answers before we stop. +// Also tests that we search breadth-first. +#[test] +fn only_draw_so_many() { + test! { + program { + trait Sized { } + + struct Vec { } + impl Sized for Vec where T: Sized { } + + struct Foo { } + impl Sized for Foo { } + + + struct Slice { } + impl Sized for Slice where T: Sized { } + } + + goal { + exists { T: Sized } + } yields_first[SolverChoice::slg(10, None)] { + expect![["substitution [?0 := Foo]"]], + expect![["substitution [?0 := Slice]"]], + expect![["substitution [?0 := Vec]"]], + expect![["substitution [?0 := Slice>]"]], + expect![["substitution [?0 := Vec>]"]] + } + + goal { + exists { T: Sized } + } yields[SolverChoice::slg(10, Some(2))] { + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn only_draw_so_many_blow_up() { + test! { + program { + trait Sized { } + trait Foo { } + + struct Vec { } + impl Sized for Vec where T: Sized { } + impl Foo for Vec where T: Sized { } + + struct Alice { } + impl Sized for Alice { } + + struct Slice { } + impl Sized for Slice where T: Sized { } + } + + goal { + exists { T: Foo } + } yields[SolverChoice::slg(10, Some(2))] { + expect![["Ambiguous; definite substitution for { [?0 := Vec<^0.0>] }"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; definite substitution for { [?0 := Vec<^0.0>] }"]] + } + } +} + +#[test] +fn subgoal_cycle_uninhabited() { + test! { + program { + trait Foo { } + struct Box { } + struct Vec { } + struct Alice { } + impl Foo for Box where Box>: Foo { } + } + + // Infinite recursion -> we flounder + // Still return the necessary substitution T = Box<..> + goal { + exists { T: Foo } + } yields_first[SolverChoice::slg(2, None)] { + expect![["Ambiguous(for { substitution [?0 := Box<^0.0>] })"]] + } + + // Unsurprisingly, applying negation also flounders. + goal { + not { exists { T: Foo } } + } yields_first[SolverChoice::slg(2, None)] { + expect![["Floundered"]] + } + + // Equivalent to the previous. + goal { + forall { not { T: Foo } } + } yields_first[SolverChoice::slg(2, None)] { + expect![["Floundered"]] + } + + // However, if we come across a negative goal that exceeds our + // size threshold, we have a problem. + goal { + exists { T = Vec, not { Vec>: Foo } } + } yields_first[SolverChoice::slg(2, None)] { + expect![["Ambiguous(substitution [?0 := Vec])"]] + } + + // Same query with larger threshold works fine, though. + goal { + exists { T = Vec, not { Vec>: Foo } } + } yields_all[SolverChoice::slg(4, None)] { + expect![["substitution [?0 := Vec]"]] + } + + // Here, due to the hypothesis, there does indeed exist a suitable T, `U`. + goal { + forall { if (U: Foo) { exists { T: Foo } } } + } yields_first[SolverChoice::slg(2, None)] { + expect![["substitution [?0 := !1_0]"]], + expect![["Ambiguous(for { substitution [?0 := Box<^0.0>] })"]] + } + } +} + +#[test] +fn subgoal_cycle_inhabited() { + test! { + program { + trait Foo { } + struct Box { } + struct Vec { } + struct Alice { } + impl Foo for Box where Box>: Foo { } + impl Foo for Alice { } + } + + // Exceeds size threshold -> flounder + // Still return necessary substitution T = Box<..> + goal { + exists { T: Foo } + } yields_first[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Alice]"]], + expect![["Ambiguous(for { substitution [?0 := Box<^0.0>] })"]] + } + } +} + +#[test] +fn basic_region_constraint_from_positive_impl() { + test! { + program { + trait Foo { } + struct Ref<'a, 'b, T> { } + struct Bar { } + impl<'x, T> Foo for Ref<'x, 'x, T> { } + } + + goal { + forall<'a, 'b, T> { Ref<'a, 'b, T>: Foo } + } yields_all[SolverChoice::slg(3, None)] { + expect![["lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + } +} + +#[test] +#[allow(non_snake_case)] +fn example_2_1_EWFS() { + test! { + program { + trait Edge { } + trait TransitiveClosure { } + struct a { } + struct b { } + struct c { } + + forall<> { a: Edge } + forall<> { b: Edge } + forall<> { b: Edge } + forall { X: TransitiveClosure if X: Edge } + forall { X: TransitiveClosure if X: Edge, Z: TransitiveClosure } + } + + goal { + exists { a: TransitiveClosure } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := b]"]], + expect![["substitution [?0 := c]"]], + expect![["substitution [?0 := a]"]] + } + } +} + +/// Test (along with the other `cached_answers` tests) that the +/// ordering in which we we encounter clauses doesn't affect the final +/// set of answers we get. In particular, all of them should get 5 +/// answers, but in Ye Olde Days Of Yore there were sometimes bugs +/// that came up when replaying tabled answers that led to fewer +/// answers being produced. +/// +/// This test is also a test for ANSWER ABSTRACTION: the only reason +/// we get 5 answers is because of the max size of 2. +#[test] +fn cached_answers_1() { + test! { + program { + trait Sour { } + struct Lemon { } + struct Vinegar { } + struct HotSauce { } + + // Use explicit program clauses here rather than traits + // and impls to avoid hashmaps and other things that + // sometimes alter the final order of the program clauses: + forall<> { Lemon: Sour } + forall<> { Vinegar: Sour } + forall { HotSauce: Sour if T: Sour } + } + + goal { + exists { T: Sour } + } yields_first[SolverChoice::slg(2, None)] { + expect![["substitution [?0 := Lemon]"]], + expect![["substitution [?0 := Vinegar]"]], + expect![["substitution [?0 := HotSauce]"]], + expect![["substitution [?0 := HotSauce]"]], + expect![["Floundered"]] + } + } +} + +/// See `cached_answers_1`. +#[test] +fn cached_answers_2() { + test! { + program { + trait Sour { } + struct Lemon { } + struct Vinegar { } + struct HotSauce { } + + forall { HotSauce: Sour if T: Sour } + forall<> { Lemon: Sour } + forall<> { Vinegar: Sour } + } + + goal { + exists { T: Sour } + } yields_first[SolverChoice::slg(2, None)] { + expect![["substitution [?0 := Lemon]"]], + expect![["substitution [?0 := Vinegar]"]], + expect![["substitution [?0 := HotSauce]"]], + expect![["substitution [?0 := HotSauce]"]], + expect![["Floundered"]] + } + } +} + +/// See `cached_answers_1`. +#[test] +fn cached_answers_3() { + test! { + program { + trait Sour { } + struct Lemon { } + struct Vinegar { } + struct HotSauce { } + + forall<> { Lemon: Sour } + forall { HotSauce: Sour if T: Sour } + forall<> { Vinegar: Sour } + } + + goal { + exists { T: Sour } + } yields_first[SolverChoice::slg(2, None)] { + expect![["substitution [?0 := Lemon]"]], + expect![["substitution [?0 := HotSauce]"]], + expect![["substitution [?0 := Vinegar]"]], + expect![["Floundered"]] + } + } +} + +#[test] +fn non_enumerable_traits_direct() { + test! { + program { + struct Foo { } + struct Bar { } + + #[non_enumerable] + trait NonEnumerable { } + impl NonEnumerable for Foo { } + impl NonEnumerable for Bar { } + + trait Enumerable { } + impl Enumerable for Foo { } + impl Enumerable for Bar { } + } + + goal { + exists { A: NonEnumerable } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + + goal { + exists { A: Enumerable } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Foo]"]], + expect![["substitution [?0 := Bar]"]] + } + + goal { + Foo: NonEnumerable + } yields_all[SolverChoice::slg(3, None)] { + expect![[""]] + } + } +} + +#[test] +fn non_enumerable_traits_indirect() { + test! { + program { + struct Foo { } + struct Bar { } + + #[non_enumerable] + trait NonEnumerable { } + impl NonEnumerable for Foo { } + impl NonEnumerable for Bar { } + + trait Debug { } + impl Debug for T where T: NonEnumerable { } + } + + goal { + exists { A: Debug } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + } +} + +#[test] +fn non_enumerable_traits_double() { + test! { + program { + struct Foo { } + struct Bar { } + + #[non_enumerable] + trait NonEnumerable1 { } + impl NonEnumerable1 for Foo { } + impl NonEnumerable1 for Bar { } + + #[non_enumerable] + trait NonEnumerable2 { } + impl NonEnumerable2 for Foo { } + impl NonEnumerable2 for Bar { } + + trait Debug { } + impl Debug for T where T: NonEnumerable1, T: NonEnumerable2 { } + } + + goal { + exists { A: Debug } + } yields_first[SolverChoice::slg(3, None)] { + expect![["Floundered"]] + } + } +} + +#[test] +fn non_enumerable_traits_reorder() { + test! { + program { + struct Foo { } + struct Bar { } + + #[non_enumerable] + trait NonEnumerable { } + impl NonEnumerable for Foo { } + impl NonEnumerable for Bar { } + + trait Enumerable { } + impl Enumerable for Foo { } + + // In this test, we first try to solve to solve `T: + // NonEnumerable` but then we discover it's + // non-enumerable, and so we push it off for later. Then + // we try to solve the `T: Enumerable` trait. + + trait Debug1 { } + impl Debug1 for T where T: Enumerable, T: NonEnumerable { } + + trait Debug2 { } + impl Debug2 for T where T: NonEnumerable, T: Enumerable { } + } + + goal { + exists { A: Debug1 } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Foo]"]] + } + + + goal { + exists { A: Debug2 } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Foo]"]] + } + } +} + +#[test] +fn builtin_impl_enumeration() { + test! { + program { + #[lang(copy)] + trait Copy { } + + #[lang(sized)] + trait Sized { } + + #[lang(clone)] + trait Clone { } + + impl Copy for u8 {} + impl Clone for u8 {} + } + + goal { + exists { T: Copy } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { T: Clone } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + exists { T: Sized } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Don't return definite guidance if we flounder after finding one solution. +#[test] +fn flounder_ambiguous() { + test! { + program { + trait IntoIterator { } + #[non_enumerable] + trait OtherTrait { } + + struct Ref { } + struct A { } + + impl IntoIterator for Ref { } + impl IntoIterator for Ref where T: OtherTrait { } + } + + goal { + exists { Ref: IntoIterator } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Don't return definite guidance if we are able to merge two solutions and the +/// third one matches that as well (the fourth may not). +#[test] +fn normalize_ambiguous() { + test! { + program { + trait IntoIterator { type Item; } + + struct Ref { } + struct A { } + struct B { } + struct C { } + + struct D { } + + impl IntoIterator for Ref { type Item = Ref; } + impl IntoIterator for Ref { type Item = Ref; } + impl IntoIterator for Ref { type Item = Ref; } + impl IntoIterator for Ref { type Item = D; } + } + + goal { + exists { + Normalize( as IntoIterator>::Item -> U) + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn lifetime_outlives_constraints() { + test! { + program { + trait Foo<'a, 'b> where 'a: 'b {} + struct Bar {} + + impl<'a, 'b> Foo<'a, 'b> for Bar where 'a: 'b {} + } + + goal { + exists<'a, 'b> { + Bar: Foo<'a, 'b> + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := '^0.1], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '^0.1 }] }"]] + } + + goal { + forall<'a> { + exists<'b> { + Bar: Foo<'a, 'b> + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }] }"]] + } + } +} + +#[test] +fn type_outlives_constraints() { + test! { + program { + trait Foo<'a, T> where T: 'a {} + struct Bar {} + impl<'a, T> Foo<'a, T> for Bar where T: 'a {} + } + + goal { + exists<'a, T> { + Bar: Foo<'a, T> + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := ^0.1], lifetime constraints [InEnvironment { environment: Env([]), goal: ^0.1: '^0.0 }] }"]] + } + + goal { + forall { + exists<'a> { + Bar: Foo<'a, T> + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: !1_0: '^0.0 }] }"]] + } + } +} + +/// Example of fundamental ambiguity in the recursive solver, used in the +/// recursive solver book documentation. +#[test] +fn not_really_ambig() { + test! { + program { + struct Vec { } + + trait A { } + trait B { } + + impl A for Vec where T: A, T: B { } + + impl A for u32 { } + impl B for u32 { } + + impl A for i32 { } + impl B for i8 { } + } + + goal { + exists { Vec: A } + } yields[SolverChoice::slg_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn canonicalization_regression() { + test! { + program { + trait ForAny {} + trait ForSame {} + + impl ForAny for Y {} + impl ForSame for X {} + } + + goal { + forall { + forall { + exists { + A: ForAny, + B: ForSame + } + } + } + } yields { + expect![["Unique; substitution [?0 := !2_0]"]] + } + } +} + +#[test] +fn empty_definite_guidance() { + test! { + disable_coherence; + program { + trait Trait {} + + struct S<'a> {} + struct A {} + + impl<'a> Trait> for A {} + impl<'a> Trait> for A where A: 'a {} + + trait OtherTrait<'a> {} + impl<'a> OtherTrait<'a> for A where A: Trait> {} + } + + goal { + forall<'a> { + A: OtherTrait<'a> + } + // the program fails coherence, so which answer we get here exactly + // isn't that important -- this is mainly a regression test for a + // recursive solver infinite loop. + } yields[SolverChoice::slg_default()] { + expect![["Unique"]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Unique"#]] + } + } +} + +#[test] +fn ambiguous_unification_in_fn() { + test! { + program { + trait FnOnce { + type Output; + } + + struct MyClosure {} + impl FnOnce<(T,)> for MyClosure ()> { + type Output = (); + } + } + goal { + exists { + MyClosure ()>: FnOnce<(&'static T,)> + } + } yields[SolverChoice::slg_default()] { + expect![["Unique; for { substitution [?0 := ^0.0, ?1 := ^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: 'static: 'static }, InEnvironment { environment: Env([]), goal: 'static: 'static }] }"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; for { substitution [?0 := ^0.0, ?1 := ^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: 'static: 'static }] }"]] + } + } +} + +#[test] +fn endless_loop() { + test! { + disable_coherence; + program { + trait FnOnce { + type Output; + } + + struct MyClosure {} + impl FnOnce for MyClosure T> { + type Output = T; + } + } + goal { + exists { + T> as FnOnce>::Output = T + } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + } +} + +#[test] +fn env_bound_vars() { + test! { + goal { + exists<'a> { + if (WellFormed(&'a ())) { + WellFormed(&'a ()) + } + } + } yields[SolverChoice::slg_default()] { + expect![["Ambiguous; definite substitution for { [?0 := '^0.0] }"]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Unique; for { substitution [?0 := '^0.0] }"#]] + } + goal { + exists<'a> { + if (FromEnv(&'a ())) { + WellFormed(&'a ()) + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: 0: '^0.0 }] }"]] + } + } +} + +#[test] +fn recursive_hang() { + test! { + program {} + + goal { + exists<'a, T> { + if(T: 'a) { + WellFormed(&'a T) + } + } + } yields[SolverChoice::slg_default()] { + expect![["Ambiguous; definite substitution for { [?0 := ^0.0, ?1 := '^0.1] }"]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Ambiguous; no inference guidance"#]] + } + } +} + +#[test] +fn coinductive_wrapper() { + test! { + program { + #[coinductive] + trait Foo {} + struct Wrapper {} + + impl Foo for Wrapper> + where + Wrapper: Foo + {} + } + + goal { + exists { + Wrapper: Foo + } + } yields[SolverChoice::slg_default()] { + // FIXME: wrong?? + expect![["Unique; for { substitution [?0 := Wrapper<^0.0>] }"]] + } + } + /* + FIXME: overflows stack + yields[SolverChoice::recursive_default()] { + expect![[r#"Ambiguous; no inference guidance"#]] + } + */ +} diff --git a/tests/test/mod.rs b/tests/test/mod.rs new file mode 100644 index 00000000000..a23ad1b69d9 --- /dev/null +++ b/tests/test/mod.rs @@ -0,0 +1,442 @@ +#![allow(non_snake_case)] + +use std::sync::Arc; + +use chalk_integration::program::Program; +use expect_test::{expect, Expect}; + +use crate::test_util::assert_same; +use chalk_integration::db::ChalkDatabase; +use chalk_integration::interner::ChalkIr; +use chalk_integration::lowering::lower_goal; +use chalk_integration::query::LoweringDatabase; +use chalk_integration::SolverChoice; +use chalk_ir::Constraints; +use chalk_solve::ext::*; +use chalk_solve::logging::with_tracing_logs; +use chalk_solve::RustIrDatabase; +use chalk_solve::Solution; + +#[cfg(feature = "bench")] +mod bench; +mod coherence; +mod wf_lowering; + +fn format_solution(mut result: Option>, interner: ChalkIr) -> String { + // sort constraints, since the different solvers may output them in different order + if let Some(Solution::Unique(solution)) = &mut result { + let mut sorted = solution.value.constraints.as_slice(interner).to_vec(); + sorted.sort_by_key(|c| format!("{:?}", c)); + solution.value.constraints = Constraints::from_iter(interner, sorted); + } + match result { + Some(v) => v.display(ChalkIr).to_string(), + None => "No possible solution".to_string(), + } +} + +pub fn assert_result(result: Option>, expected: &Expect, interner: ChalkIr) { + let result = format_solution(result, interner); + expected.assert_eq(&result); +} + +pub fn assert_result_str(result: Option>, expected: &str, interner: ChalkIr) { + let result = format_solution(result, interner); + assert_same(&result, expected); +} + +// different goals +#[derive(Clone)] +pub enum TestGoal { + // solver should produce same aggregated single solution + Aggregated(T), + // solver should produce exactly multiple solutions + All(Vec), + // solver should produce first same multiple solutions + First(Vec), +} + +macro_rules! test { + (program $program:tt $($goals:tt)*) => {{ + let (program, goals) = parse_test_data!(program $program $($goals)*); + solve_goal(program, goals, true) + }}; + (disable_coherence; program $program:tt $($goals:tt)*) => {{ + let (program, goals) = parse_test_data!(program $program $($goals)*); + solve_goal(program, goals, false) + }}; + + // If `program` is omitted, default to an empty one. + ($($goals:tt)*) => { + test!(program {} $($goals)*) + }; +} + +macro_rules! parse_test_data { + (program $program:tt $($goals:tt)*) => { + parse_test_data!(@program[$program] + @parsed_goals[] + @unparsed_goals[$($goals)*]) + }; + + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[]) => { + (stringify!($program), vec![$($parsed_goals),*]) + }; + + // goal { G } yields { "Y" } -- test both solvers behave the same (the default) + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields { $expected:expr } + $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![SolverChoice::slg_default(), SolverChoice::recursive_default()], TestGoal::Aggregated($expected)) + ] + @unparsed_goals[$($unparsed_goals)*]) + }; + + // goal { G } yields_all { "Y1", "Y2", ... , "YN" } -- test that the SLG + // solver gets exactly N answers in this order (the recursive solver can't + // return multiple answers) + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_all { $($expected:expr),* } + $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![SolverChoice::slg_default()], TestGoal::All(vec![$($expected),*])) + ] + @unparsed_goals[$($unparsed_goals)*]) + }; + + // goal { G } yields_first { "Y1", "Y2", ... , "YN" } -- test that the SLG + // solver gets at least N same first answers + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_first { $($expected:expr),* } + $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![SolverChoice::default()], TestGoal::First(vec![$($expected),*])) + ] + @unparsed_goals[$($unparsed_goals)*]) + }; + + // goal { G } yields[C1] { "Y1" } yields[C2] { "Y2" } -- test that solver C1 yields Y1 + // and C2 yields Y2 + // + // Annoyingly, to avoid getting a parsing ambiguity error, we have + // to distinguish the case where there are other goals to come + // (this rule) for the last goal in the list (next rule). There + // might be a more elegant fix than copy-and-paste but this works. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields[$C:expr] { $expected:expr } + goal $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::Aggregated($expected)) + ] + @unparsed_goals[goal $($unparsed_goals)*]) + }; + + // same as above, but there are multiple yields clauses => duplicate the goal + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt + yields[$C:expr] { $expected:expr } + yields $($unparsed_tail:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::Aggregated($expected)) + ] + @unparsed_goals[goal $goal yields $($unparsed_tail)*]) + }; + + // same as above, but for the final goal in the list. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields[$C:expr] { $expected:expr } + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::Aggregated($expected)) + ] + @unparsed_goals[]) + }; + + // goal { G } yields_all[C1] { "Y1" } yields_all[C2] { "Y2" } -- test that solver C1 yields Y1 + // and C2 yields Y2 + // + // Annoyingly, to avoid getting a parsing ambiguity error, we have + // to distinguish the case where there are other goals to come + // (this rule) for the last goal in the list (next rule). There + // might be a more elegant fix than copy-and-paste but this works. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_all[$C:expr] { $($expected:expr),* } + goal $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::All(vec![$($expected),*])) + ] + @unparsed_goals[goal $($unparsed_goals)*]) + }; + + // same as above, but for the final goal in the list. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_all[$C:expr] { $($expected:expr),* } + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::All(vec![$($expected),*])) + ] + @unparsed_goals[]) + }; + + // goal { G } yields_first[C1] { "Y1" } yields_first[C2] { "Y2" } -- test that solver C1 yields Y1 + // and C2 yields Y2 + // + // Annoyingly, to avoid getting a parsing ambiguity error, we have + // to distinguish the case where there are other goals to come + // (this rule) for the last goal in the list (next rule). There + // might be a more elegant fix than copy-and-paste but this works. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_first[$C:expr] { $($expected:expr),* } + goal $($unparsed_goals:tt)* + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::First(vec![$($expected),*])) + ] + @unparsed_goals[goal $($unparsed_goals)*]) + }; + + // same as above, but for the final goal in the list. + (@program[$program:tt] @parsed_goals[$($parsed_goals:tt)*] @unparsed_goals[ + goal $goal:tt yields_first[$C:expr] { $($expected:expr),* } + ]) => { + parse_test_data!(@program[$program] + @parsed_goals[ + $($parsed_goals)* + (stringify!($goal), vec![$C], TestGoal::First(vec![$($expected),*])) + ] + @unparsed_goals[]) + }; +} + +fn solve_goal( + program_text: &str, + goals: Vec<(&str, Vec, TestGoal)>, + coherence: bool, +) { + with_tracing_logs(|| { + println!("program {}", program_text); + assert!(program_text.starts_with('{')); + assert!(program_text.ends_with('}')); + + let mut db = ChalkDatabase::with( + &program_text[1..program_text.len() - 1], + SolverChoice::default(), + ); + + let program = if coherence { + db.checked_program().unwrap() + } else { + db.program_ir().unwrap() + }; + + for (goal_text, solver_choices, expected) in goals { + let solver_choices = &*solver_choices; + let solver_choice = match solver_choices { + [] => panic!("No solvers?"), + [x] => *x, + _ => { + let expected = match expected { + TestGoal::Aggregated(x) => x, + _ => todo!("solver comparison only supported for `Aggregated` goals"), + }; + + solve_aggregated( + &mut db, + program.clone(), + goal_text, + solver_choices, + expected, + ); + continue; + } + }; + + match (&solver_choice, &expected) { + (SolverChoice::Recursive { .. }, TestGoal::All(_)) + | (SolverChoice::Recursive { .. }, TestGoal::First(_)) => { + panic!("cannot test the recursive solver with yields_first or yields_all"); + } + _ => {} + }; + + if db.solver_choice() != solver_choice { + db.set_solver_choice(solver_choice); + } + + chalk_integration::tls::set_current_program(&program, || { + println!("----------------------------------------------------------------------"); + println!("goal {}", goal_text); + assert!(goal_text.starts_with('{')); + assert!(goal_text.ends_with('}')); + let goal = lower_goal( + &*chalk_parse::parse_goal(&goal_text[1..goal_text.len() - 1]).unwrap(), + &*program, + ) + .unwrap(); + + println!("using solver: {:?}", solver_choice); + let peeled_goal = goal.into_peeled_goal(db.interner()); + match &expected { + TestGoal::Aggregated(expected) => { + let result = db.solve(&peeled_goal); + assert_result(result, expected, db.interner()); + } + TestGoal::All(expected) => { + let mut expected = expected.iter(); + assert!( + db.solve_multiple(&peeled_goal, &mut |result, next_result| { + match expected.next() { + Some(expected) => { + let actual = format!( + "{}", + result.as_ref().map(|v| v.display(ChalkIr)) + ); + expected.assert_eq(&actual) + } + None => { + assert!(!next_result, "Unexpected next solution"); + } + } + true + }), + "Not all solutions processed" + ); + if expected.next().is_some() { + panic!("Not all solutions processed"); + } + } + TestGoal::First(expected) => { + let mut expected = expected.iter(); + db.solve_multiple(&peeled_goal, &mut |result, next_result| match expected + .next() + { + Some(solution) => { + let actual = + format!("{}", result.as_ref().map(|v| v.display(ChalkIr))); + solution.assert_eq(&actual); + if !next_result { + assert!( + expected.next().is_none(), + "Not enough solutions found" + ); + } + true + } + None => false, + }); + assert!(expected.next().is_none(), "Not enough solutions found"); + } + } + }); + } + }) +} + +fn solve_aggregated( + db: &mut ChalkDatabase, + program: Arc, + goal_text: &str, + choices: &[SolverChoice], + expected: Expect, +) { + let mut solutions = vec![]; + + for solver_choice in choices.iter().copied() { + if db.solver_choice() != solver_choice { + db.set_solver_choice(solver_choice); + } + + chalk_integration::tls::set_current_program(&program, || { + println!("----------------------------------------------------------------------"); + println!("goal {}", goal_text); + assert!(goal_text.starts_with('{')); + assert!(goal_text.ends_with('}')); + let goal = lower_goal( + &*chalk_parse::parse_goal(&goal_text[1..goal_text.len() - 1]).unwrap(), + &*program, + ) + .unwrap(); + + println!("using solver: {:?}", solver_choice); + let peeled_goal = goal.into_peeled_goal(db.interner()); + let result = db.solve(&peeled_goal); + solutions.push(format_solution(result, db.interner())); + }); + } + + let (head, tail) = solutions + .split_first() + .expect("Test requires at least one solver"); + for (i, other) in tail.iter().enumerate() { + println!( + "\ncomparing solvers:\n\texpected: {:?}\n\tactual: {:?}\n", + &choices[0], + &choices[i + 1] + ); + assert_same(other, head); + } + + expected.assert_eq(head); +} + +mod ambiguity_issue_727; +mod arrays; +mod auto_traits; +mod closures; +mod coherence_goals; +mod coinduction; +mod constants; +mod coroutines; +mod cycle; +mod discriminant_kind; +mod existential_types; +mod fn_def; +mod foreign_types; +mod functions; +mod implied_bounds; +mod impls; +mod lifetimes; +mod misc; +mod negation; +mod never; +mod numerics; +mod object_safe; +mod opaque_types; +mod pointee; +mod projection; +mod refs; +mod scalars; +mod slices; +mod string; +mod subtype; +mod tuples; +mod type_flags; +mod unify; +mod unpin; +mod unsize; +mod wf_goals; diff --git a/tests/test/negation.rs b/tests/test/negation.rs new file mode 100644 index 00000000000..708b959bcc7 --- /dev/null +++ b/tests/test/negation.rs @@ -0,0 +1,356 @@ +//! Tests related to `not { }` goals. + +use super::*; + +#[test] +fn simple_negation() { + test! { + program { + struct Bar {} + trait Foo {} + } + + goal { + not { Bar: Foo } + } yields { + expect![["Unique"]] + } + + goal { + not { + not { Bar: Foo } + } + } yields { + expect![["No possible solution"]] + } + + goal { + not { + not { + not { Bar: Foo } + } + } + } yields { + expect![["Unique"]] + } + + goal { + exists { + not { T: Foo } + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { + not { T: Foo } + } + } yields { + expect![["Unique"]] + } + + goal { + not { + exists { T: Foo } + } + } yields { + expect![["Unique"]] + } + + goal { + not { + forall { T: Foo } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn deep_negation() { + test! { + program { + struct Foo {} + trait Bar {} + trait Baz {} + + impl Bar for Foo where T: Baz {} + } + + goal { + not { + exists { T: Baz } + } + } yields { + expect![["Unique"]] + } + + goal { + not { + exists { Foo: Bar } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn negation_quantifiers() { + test! { + program { + struct Alice {} + struct Bob {} + } + + goal { + not { + forall { + T = U + } + } + } yields { + expect![["Unique"]] + } + + goal { + not { + exists { + T = U + } + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + not { + T = U + } + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn negation_free_vars() { + test! { + program { + struct Vec {} + struct Alice {} + struct Bob {} + trait Foo {} + impl Foo for Vec {} + } + + goal { + exists { + not { Vec: Foo } + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Here, P and Q depend on one another through a negative loop. +#[test] +#[should_panic(expected = "negative cycle")] +fn negative_loop() { + test! { + program { + trait P { } + trait Q { } + struct Alice { } + + forall<> { Alice: P if not { Alice: Q } } + forall<> { Alice: Q if not { Alice: P } } + } + + goal { + Alice: P + } yields_all[SolverChoice::slg(10, None)] { + // Negative cycle -> panic + expect![[""]] + } + } +} + +#[test] +#[allow(non_snake_case)] +fn example_2_2_EWFS() { + test! { + program { + trait M { } + trait P { } + trait Q { } + + struct a { } + struct b { } + struct c { } + + forall { X: M if not { X: P } } + forall<> { a: P } + forall { X: P if X: Q } + forall<> { b: Q } + forall { X: Q if X: P } + } + + goal { + c: M + } yields_all[SolverChoice::slg(3, None)] { + expect![[""]] + } + } +} + +#[test] +#[should_panic(expected = "negative cycle")] +#[allow(non_snake_case)] +fn example_2_3_EWFS() { + test! { + program { + trait W { } + trait M { } + trait P { } + + struct a { } + struct b { } + struct c { } + + forall { X: W if X: M, not { Y: W }, Y: P } + forall<> { a: M } + forall<> { b: M } + forall<> { c: M } + forall<> { b: P } + } + + goal { + a: W + } yields_all[SolverChoice::slg(3, None)] { + // Negative cycle -> panic + expect![[""]] + } + } +} + +#[test] +#[should_panic(expected = "negative cycle")] +#[allow(non_snake_case)] +fn example_3_3_EWFS() { + test! { + program { + trait S { } + trait P { } + trait Q { } + + struct a { } + + forall<> { a: S if not { a: P }, not { a: Q } } + forall<> { a: P if not { a: S }, a: Q } + forall<> { a: Q if not { a: S }, a: P } + } + + goal { + a: S + } yields_all[SolverChoice::slg(3, None)] { + // Negative cycle -> panic + expect![[""]] + } + } +} + +/// Here, P is neither true nor false. If it were true, then it would +/// be false, and so forth. +#[test] +#[should_panic(expected = "negative cycle")] +fn contradiction() { + test! { + program { + trait P { } + struct Alice { } + + forall<> { Alice: P if not { Alice: P } } + } + + goal { + Alice: P + } yields_all[SolverChoice::slg(3, None)] { + // Negative cycle -> panic + expect![[""]] + } + } +} + +/// Here, P depends on Q negatively, but Q depends only on itself. +#[test] +#[should_panic(expected = "negative cycle")] +fn negative_answer_ambiguous() { + test! { + program { + trait P { } + trait Q { } + struct Alice { } + + forall<> { Alice: P if not { Alice: Q } } + forall<> { Alice: Q if not { Alice: Q } } + } + + goal { + Alice: P + } yields_all[SolverChoice::slg(3, None)] { + // Negative cycle -> panic + expect![[""]] + } + } +} + +#[test] +fn negative_reorder() { + test! { + program { + struct Foo { } + struct Bar { } + + trait IsFoo { } + impl IsFoo for Foo { } + + trait Enumerable { } + impl Enumerable for Foo { } + impl Enumerable for Bar { } + + // In this test, we first try to solve to solve `not { T: + // IsFoo }` but then we discover it's + // non-enumerable, and so we push it off for later. Then + // we try to solve the `T: Enumerable` trait. + + trait Debug1 { } + forall { + T: Debug1 if T: Enumerable, not { T: IsFoo } + } + + trait Debug2 { } + forall { + T: Debug2 if not { T: IsFoo }, T: Enumerable + } + } + + goal { + exists { A: Debug1 } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Bar]"]] + } + + + goal { + exists { A: Debug2 } + } yields_all[SolverChoice::slg(3, None)] { + expect![["substitution [?0 := Bar]"]] + } + } +} diff --git a/tests/test/never.rs b/tests/test/never.rs new file mode 100644 index 00000000000..ffd39b57243 --- /dev/null +++ b/tests/test/never.rs @@ -0,0 +1,27 @@ +use super::*; + +#[test] +fn never_is_well_formed() { + test! { + goal { + WellFormed(!) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn never_is_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + goal { + !: Sized + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/numerics.rs b/tests/test/numerics.rs new file mode 100644 index 00000000000..688cb47ba02 --- /dev/null +++ b/tests/test/numerics.rs @@ -0,0 +1,292 @@ +//! Tests related to integer/float variable kinds + +use super::*; + +/// If we know that the type is an integer, we can narrow down the possible +/// types. This test is based on the following example: +/// ```ignore +/// let x: &[u32]; +/// let i = 1; +/// x[i] +/// ``` +/// `i` must be `usize` because that is the only integer type used in `Index` +/// impls for slices. +#[test] +fn integer_index() { + test! { + program { + trait Index {} + struct Slice {} + struct Foo {} + + impl Index for Slice {} + impl Index for Slice {} + } + + goal { + exists { + Slice: Index + } + } yields { + expect![["Unique; substitution [?0 := Uint(Usize)]"]] + } + } +} + +/// A more straightforward version of the `integer_index` test where the +/// variable is on the impl side of the trait ref. +#[test] +fn integer_kind_trait() { + test! { + program { + // this should even work for non-enumerable traits, because we don't + // need to enumerate *all* impls for this! + #[non_enumerable] + trait Foo {} + struct Bar {} + + impl Foo for usize {} + impl Foo for Bar {} + } + + goal { + exists { + N: Foo + } + } yields { + expect![["Unique; substitution [?0 := Uint(Usize)]"]] + } + } +} + +/// The `integer_kind_trait` test, but for floats +#[test] +fn float_kind_trait() { + test! { + program { + #[non_enumerable] + trait Foo {} + struct Bar {} + + impl Foo for f32 {} + impl Foo for Bar {} + } + + goal { + exists { + N: Foo + } + } yields { + expect![["Unique; substitution [?0 := Float(F32)]"]] + } + } +} + +/// You can still get ambiguous results with integer variables +#[test] +fn integer_ambiguity() { + test! { + program { + trait Foo {} + + impl Foo for usize {} + impl Foo for isize {} + } + + goal { + exists { + N: Foo + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// You can still get ambiguous results with float variables +#[test] +fn float_ambiguity() { + test! { + program { + trait Foo {} + + impl Foo for f32 {} + impl Foo for f64 {} + } + + goal { + exists { + N: Foo + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Integer/float type kinds are just specialized type kinds, so they can unify +/// with general type kinds. +#[test] +fn integer_and_float_are_specialized_ty_kinds() { + test! { + goal { + exists { + T = N, N = usize + } + } yields { + expect![["Unique; substitution [?0 := Uint(Usize), ?1 := Uint(Usize)]"]] + } + + goal { + exists { + T = N, N = f32 + } + } yields { + expect![["Unique; substitution [?0 := Float(F32), ?1 := Float(F32)]"]] + } + } +} + +/// Once a general type kind is unified with a specific type kind, it cannot be +/// unified with an incompatible type (ex. integer type kind with char) +#[test] +fn general_ty_kind_becomes_specific() { + test! { + goal { + exists { + T = N, T = char + } + } yields { + expect![["No possible solution"]] + } + + goal { + exists { + T = N, T = char + } + } yields { + expect![["No possible solution"]] + } + } +} + +/// Integer and float type kinds can not be equated +#[test] +fn integers_are_not_floats() { + test! { + goal { + exists { + I = F + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn integers_are_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + } + + goal { + exists { + I: Copy + } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + } +} + +#[test] +fn integers_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + exists { + I: Sized + } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + } +} + +/// Simplified version of a goal that needs to be solved for type checking +/// `1 + 2`. +#[test] +fn ambiguous_add() { + test! { + program { + #[non_enumerable] + trait Add { type Output; } + + impl<'a> Add<&'a u32> for u32 { type Output = >::Output; } + impl Add for u32 { type Output = u32; } + } + + goal { + exists { + >::Output = V + } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Simplified version of a goal that needs to be solved for type checking +/// `1 << &2`. +#[test] +fn shl_ice() { + test! { + program { + //#[non_enumerable] + trait Shl { } + + impl<'a> Shl<&'a u32> for u32 { } + impl<'a> Shl<&'a u16> for u32 { } + } + + goal { + exists { + u32: Shl + } + } yields[SolverChoice::slg_default()] { + expect![["Ambiguous; definite substitution for { [?0 := (&'^0.0 ^0.1)] }"]] + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; no inference guidance"]] + } + } +} + +/// Regression test for rust-analyzer#5495 ("var_universe invoked on bound +/// variable" crash). +#[test] +fn unify_general_then_specific_ty() { + test! { + program { + #[non_enumerable] + trait Foo {} + struct Bar {} + + impl Foo for Bar<(T, T, i32, i32)> {} + } + + goal { + exists { + Bar<(N, T, T, T)>: Foo + } + } yields { + expect![["Unique; substitution [?0 := Int(I32), ?1 := Int(I32)]"]] + } + } +} diff --git a/tests/test/object_safe.rs b/tests/test/object_safe.rs new file mode 100644 index 00000000000..52839a7c2fb --- /dev/null +++ b/tests/test/object_safe.rs @@ -0,0 +1,15 @@ +use super::*; + +#[test] +fn object_safe_flag() { + test! { + program { + #[object_safe] + trait Foo {} + trait Bar {} + } + + goal { ObjectSafe(Foo) } yields { expect![["Unique"]] } + goal { not { ObjectSafe(Bar) } } yields { expect![["Unique"]] } + } +} diff --git a/tests/test/opaque_types.rs b/tests/test/opaque_types.rs new file mode 100644 index 00000000000..6821ae53222 --- /dev/null +++ b/tests/test/opaque_types.rs @@ -0,0 +1,315 @@ +use super::*; + +#[test] +fn opaque_bounds() { + test! { + program { + struct Ty { } + + trait Clone { } + + impl Clone for Ty { } + + opaque type T: Clone = Ty; + } + + goal { + T: Clone + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn opaque_reveal() { + test! { + program { + struct Ty { } + trait Trait { } + impl Trait for Ty { } + + trait Clone { } + impl Clone for Ty { } + opaque type T: Clone = Ty; + } + + goal { + if (Reveal) { + T: Trait + } + } yields { + expect![["Unique"]] + } + + goal { + T: Trait + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn opaque_where_clause() { + test! { + program { + struct Ty { } + + trait Clone { } + impl Clone for Ty { } + + trait Trait { } + impl Trait for Ty { } + + opaque type T: Clone where T: Trait = Ty; + + struct Vec { } + + impl Clone for Vec { } + impl Trait for Vec { } + + opaque type S: Clone where U: Trait = Vec; + } + + goal { + if (T: Trait) { + WellFormed(T) + } + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(T) + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + if (U : Trait) { + WellFormed(S) + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + WellFormed(S) + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn opaque_generics_simple() { + test! { + program { + trait Iterator { type Item; } + + struct Vec { } + struct Bar { } + impl Iterator for Vec { + type Item = u32; + } + + opaque type Foo: Iterator = Vec; + } + + goal { + Foo: Iterator + } yields { + expect![["Unique"]] + } + + } +} + +#[test] +fn opaque_generics() { + test! { + program { + trait Iterator { type Item; } + + struct Vec { } + struct Bar { } + + impl Iterator for Vec { + type Item = T; + } + + opaque type Foo: Iterator = Vec; + } + + goal { + Foo: Iterator + } yields { + expect![["Unique"]] + } + + goal { + forall { + Foo: Iterator + } + } yields { + expect![["Unique"]] + } + + goal { + exists { + as Iterator>::Item = T + } + } yields[SolverChoice::slg_default()] { + expect![["Ambiguous; no inference guidance"]] // #234 + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Bar]"]] + } + } +} + +#[test] +fn opaque_trait_generic() { + test! { + program { + trait Trait {} + struct Foo {} + impl Trait for Foo {} + + opaque type Bar: Trait = Foo; + } + + goal { + exists { + Bar: Trait + } + } yields { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + } +} + +#[test] +fn opaque_auto_traits() { + test! { + program { + struct Bar { } + struct Baz { } + trait Trait { } + + impl Trait for Bar { } + impl Trait for Baz { } + + #[auto] + trait Send { } + + impl !Send for Baz { } + + opaque type Opaque1: Trait = Bar; + opaque type Opaque2: Trait = Baz; + } + + goal { + Opaque1: Send + } yields { + expect![["Unique"]] + } + + goal { + Opaque2: Send + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn opaque_auto_traits_indirect() { + test! { + program { + struct Bar { } + struct Baz { } + trait Trait { } + + impl Trait for Bar { } + impl Trait for Baz { } + + #[auto] + trait Send { } + trait SendDerived where Self: Send { } + + impl SendDerived for T where T: Send { } + + impl !Send for Baz { } + + opaque type Opaque1: Trait = Bar; + opaque type Opaque2: Trait = Baz; + } + + goal { + Opaque1: SendDerived + } yields { + expect![["Unique"]] + } + + goal { + Opaque2: SendDerived + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn opaque_super_trait() { + test! { + program { + trait Base {} + trait Super where Self: Base {} + impl Base for () {} + impl Super for () {} + + opaque type Opaque: Super = (); + } + + goal { + Opaque: Base + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn opaque_assoc_in_super_trait_bounds() { + test! { + program { + trait Foo { + type A; + } + trait EmptyFoo where Self: Foo { } + impl Foo for i32 { + type A = (); + } + impl EmptyFoo for T where T: Foo { } + + opaque type T: EmptyFoo = i32; + } + + goal { + T: EmptyFoo + } yields { + expect![[r#"Unique"#]] + } + + goal { + T: Foo + } yields { + expect![[r#"Unique"#]] + } + } +} diff --git a/tests/test/pointee.rs b/tests/test/pointee.rs new file mode 100644 index 00000000000..b53a0cd6ebd --- /dev/null +++ b/tests/test/pointee.rs @@ -0,0 +1,152 @@ +use super::*; + +#[test] +fn last_field_metadata() { + test! { + program { + #[lang(pointee_trait)] + trait Pointee { + type Metadata; + } + + struct S { + field1: i32, + field2: [i32], + } + } + + goal { + Normalize(<(i32, str) as Pointee>::Metadata -> usize) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(<(u8, i64) as Pointee>::Metadata -> ()) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(<() as Pointee>::Metadata -> ()) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Metadata -> usize) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(<((), S) as Pointee>::Metadata -> usize) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn primitives() { + test! { + program { + #[lang(pointee_trait)] + trait Pointee { + type Metadata; + } + } + + goal { + Normalize(::Metadata -> usize) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Metadata -> ()) + } yields { + expect![["No possible solution"]] + } + + goal { + Normalize(::Metadata -> ()) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(::Metadata -> ()) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn everything_is_pointee() { + test! { + program { + #[lang(pointee_trait)] + trait Pointee { + type Metadata; + } + } + + goal { + forall { + T: Pointee + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + Normalize(::Metadata -> usize) + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn slices() { + test! { + program { + #[lang(pointee_trait)] + trait Pointee { + type Metadata; + } + + struct S {} + } + + goal { + [S]: Pointee + } yields { + expect![["Unique"]] + } + + goal { + Normalize(<[S] as Pointee>::Metadata -> usize) + } yields { + expect![["Unique"]] + } + + goal { + Normalize(<[S] as Pointee>::Metadata -> ()) + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + Normalize(<[T] as Pointee>::Metadata -> usize) + } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/projection.rs b/tests/test/projection.rs new file mode 100644 index 00000000000..795ef2494ab --- /dev/null +++ b/tests/test/projection.rs @@ -0,0 +1,1302 @@ +//! Tests related to projection of associated types and normalization. + +use super::*; + +#[test] +fn normalize_basic() { + test! { + program { + trait Iterator { type Item; } + struct Vec { } + struct Foo { } + impl Iterator for Vec { + type Item = T; + } + } + + goal { + forall { + exists { + Normalize( as Iterator>::Item -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := !1_0]"]] + } + + goal { + forall { + Vec: Iterator + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Iterator) { + ::Item = Foo + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Iterator) { + exists { + T: Iterator + } + } + } + } yields { + expect![["Unique; substitution [?0 := (Iterator::Item)]"]] + } + + goal { + forall { + if (T: Iterator) { + exists { + T: Iterator + } + } + } + } yields { + expect![["Unique; substitution [?0 := (Iterator::Item)]"]] + } + + goal { + forall { + if (T: Iterator) { + ::Item = ::Item + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: Iterator) { + exists { + ::Item = ::Item + } + } + } + } yields { + // True for `U = T`, of course, but also true for `U = Vec<::Item>`. + expect![["Ambiguous; no inference guidance"]] + } + } +} + +#[test] +fn normalize_into_iterator() { + test! { + program { + trait IntoIterator { type Item; } + trait Iterator { type Item; } + struct Vec { } + struct Foo { } + impl IntoIterator for Vec { + type Item = T; + } + impl IntoIterator for T where T: Iterator { + type Item = ::Item; + } + } + + goal { + forall { + exists { + Normalize( as IntoIterator>::Item -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := !1_0]"]] + } + } +} + +#[test] +fn projection_equality() { + test! { + program { + trait Trait1 { + type Type; + } + trait Trait2 { } + impl Trait2 for U where U: Trait1 {} + + struct S {} + impl Trait1 for S { + type Type = u32; + } + } + + goal { + exists { + S: Trait1 + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + + goal { + exists { + S: Trait2 + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + } +} + +#[test] +fn projection_equality_priority1() { + test! { + program { + trait Trait1 { + type Type; + } + + struct S1 {} + struct S2 {} + struct S3 {} + + impl Trait1 for S1 { + type Type = u32; + } + } + + goal { + exists { + S1: Trait1 + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; definite substitution for { [?0 := S2, ?1 := ^0.0] }"]] + } yields[SolverChoice::recursive_default()] { + // This is.. interesting, but not necessarily wrong. + // It's certainly true that based on the impls we see + // the only possible value for `U` is `u32`. + // + // Can we come to any harm by inferring that `T = S2` + // here, even though we could've chosen to say that + // `U = !>::Type` and thus not + // constrained `T` at all? I can't come up with + // an example where that's the case, so maybe + // not. -Niko + expect![["Unique; substitution [?0 := S2, ?1 := Uint(U32)]"]] + } + } +} + +#[test] +fn projection_equality_priority2() { + test! { + program { + trait Trait1 { + type Type; + } + + struct S1 {} + struct S2 {} + struct S3 {} + + impl Trait1 for X { + type Type = u32; + } + } + + goal { + forall { + if (X: Trait1) { + exists { + X: Trait1 + } + } + } + } yields { + // Correct: Ambiguous because Out1 = Y and Out1 = S1 are both value. + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { + if (X: Trait1) { + exists { + X: Trait1, + Out1 = Y + } + } + } + } yields { + // Constraining Out1 = Y gives us only one choice. + expect![["Unique; substitution [?0 := !1_1, ?1 := (Trait1::Type)]"]] + } + + goal { + forall { + if (X: Trait1) { + exists { + Out1 = Y, + X: Trait1 + } + } + } + } yields { + // Constraining Out1 = Y gives us only one choice. + expect![["Unique; substitution [?0 := !1_1, ?1 := (Trait1::Type)]"]] + } + + goal { + forall { + if (X: Trait1) { + exists { + Out1 = S1, + X: Trait1 + } + } + } + } yields[SolverChoice::slg_default()] { + // chalk#234: Constraining Out1 = S1 gives us only the choice to + // use the impl, but the SLG solver can't decide between + // the placeholder and the normalized form. + expect![["Ambiguous; definite substitution for { [?0 := S1, ?1 := ^0.0] }"]] + } yields[SolverChoice::recursive_default()] { + // Constraining Out1 = S1 gives us only one choice, use the impl, + // and the recursive solver prefers the normalized form. + expect![["Unique; substitution [?0 := S1, ?1 := Uint(U32)]"]] + } + } +} +#[test] +fn projection_equality_from_env() { + test! { + program { + trait Trait1 { + type Type; + } + } + + goal { + forall { + if (T: Trait1) { + exists { + ::Type = U + } + } + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + } +} + +#[test] +fn projection_equality_nested() { + test! { + program { + trait Iterator { + type Item; + } + } + + goal { + forall { + if (I: Iterator) { + if (::Item: Iterator) { + exists { + <::Item as Iterator>::Item = U + } + } + } + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + } +} + +#[test] +fn iterator_flatten() { + test! { + program { + trait Iterator { + type Item; + } + #[non_enumerable] + trait IntoIterator { + type Item; + type IntoIter: Iterator::Item>; + } + struct Flatten {} + + impl Iterator for Flatten + where + I: Iterator, + ::Item: IntoIterator, + ::Item: IntoIterator::Item>, + U: Iterator + { + type Item = ::Item; + } + } + + goal { + forall { + if (I: Iterator; U: IntoIterator) { + exists { + as Iterator>::Item = T + } + } + } + } yields[SolverChoice::slg_default()] { + // this is wrong, chalk#234 + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Uint(U32)]"]] + } + } +} + +#[test] +fn normalize_gat1() { + test! { + program { + struct Vec { } + + trait Iterable { + type Iter<'a>; + } + + impl Iterable for Vec { + type Iter<'a> = Iter<'a, T>; + } + + trait Iterator { + type Item; + } + + struct Iter<'a, T> { } + struct Ref<'a, T> { } + + impl<'a, T> Iterator for Iter<'a, T> { + type Item = Ref<'a, T>; + } + } + + goal { + forall { + forall<'a> { + exists { + Normalize( as Iterable>::Iter<'a> -> U) + } + } + } + } yields { + expect![["Unique; substitution [?0 := Iter<'!2_0, !1_0>]"]] + } + } +} + +#[test] +fn normalize_gat2() { + test! { + program { + trait StreamingIterator { type Item<'a>; } + struct Span<'a, T> { } + struct StreamIterMut { } + struct Foo { } + impl StreamingIterator for StreamIterMut { + type Item<'a> = Span<'a, T>; + } + } + + goal { + forall<'a, T> { + exists { + Normalize( as StreamingIterator>::Item<'a> -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := Span<'!1_0, !1_1>]"]] + } + + goal { + forall<'a, T> { + as StreamingIterator>::Item<'a> = Span<'a, T> + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'a, T, U> { + if (T: StreamingIterator = Span<'a, U>>) { + >::Item<'a> = Span<'a, U> + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn normalize_gat_const() { + test! { + program { + trait StreamingIterator { type Item; } + struct Span { } + struct StreamIterMut { } + impl StreamingIterator for StreamIterMut { + type Item = Span; + } + } + + goal { + forall { + exists { + Normalize( as StreamingIterator>::Item -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := Span]"]] + } + + goal { + forall { + as StreamingIterator>::Item = Span + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + if (T: StreamingIterator = Span>) { + >::Item = Span + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn normalize_gat_with_where_clause() { + test! { + program { + trait Sized { } + trait Foo { + type Item where T: Sized; + } + + struct Value { } + struct Sometype { } + impl Foo for Sometype { + type Item = Value; + } + } + + goal { + forall { + exists { + Normalize(::Item -> U) + } + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + exists { + if (T: Sized) { + Normalize(::Item -> U) + } + } + } + } yields { + expect![["Unique; substitution [?0 := Value]"]] + } + } +} + +#[test] +fn normalize_gat_with_where_clause2() { + test! { + program { + trait Bar { } + trait Foo { + type Item where U: Bar; + } + + struct Baz { } + impl Foo for Baz { + type Item = U; + } + } + + goal { + forall { + exists { + Normalize(>::Item -> V) + } + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + exists { + if (U: Bar) { + Normalize(>::Item -> V) + } + } + } + } yields { + expect![["Unique; substitution [?0 := !1_1]"]] + } + } +} + +#[test] +fn normalize_gat_with_higher_ranked_trait_bound() { + test! { + program { + trait Foo<'a, T> { } + struct Baz { } + + trait Bar<'a, T> { + type Item: Foo<'a, T> where forall<'b> V: Foo<'b, T>; + } + + impl<'a, T> Foo<'a, T> for Baz { } + impl<'a, T> Bar<'a, T> for Baz { + type Item = Baz; + } + } + + goal { + forall<'a, T, V> { + if (forall<'b> { V: Foo<'b, T> }) { + exists { + Normalize(>::Item -> U) + } + } + } + } yields { + expect![["Unique; substitution [?0 := Baz]"]] + } + } +} + +#[test] +fn gat_in_alias_in_alias_eq() { + test! { + program { + trait Foo { + type Rebind: Foo; + } + + struct S { } + impl Foo for S { + type Rebind = S; + } + } + + goal { + exists { + < as Foo>::Rebind as Foo>::Rebind: Foo + } + } yields { + expect![[r#"Unique"#]] + } + } +} + +#[test] +fn gat_bound_for_self_type() { + test! { + program { + struct I32 { } + trait Trait { + type Assoc: Another = usize>; + } + trait Another { + type Gat; + } + } + + goal { + forall { + exists { + if (T: Trait) { + <::Assoc as Another>::Gat = U + } + } + } + } yields[SolverChoice::recursive_default()] { + expect![[r#"Unique; substitution [?0 := Uint(Usize)]"#]] + } + } +} + +#[test] +fn forall_projection() { + test! { + program { + trait Eq { } + impl Eq for T { } + + trait DropLt<'a> { type Item; } + impl<'a, T> DropLt<'a> for T { type Item = T; } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + for<'a> fn(>::Item): Eq + } yields { + expect![["Unique"]] + } + } +} + +/// Demonstrates that, given the expected value of the associated +/// type, we can use that to narrow down the relevant impls. +#[test] +fn forall_projection_gat() { + test! { + program { + trait Eq { } + impl Eq for T { } + + trait Sized { } + + trait DropOuter<'a> { type Item where U: Sized; } + impl<'a, T> DropOuter<'a> for T { type Item = T; } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + forall { + for<'a> fn(>::Item): Eq + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + if (T: Sized) { + for<'a> fn(>::Item): Eq + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall<'a, T> { + WellFormed(>::Item) + } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { + if (T: Sized) { + WellFormed(for<'a> fn(>::Item): Eq) + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn gat_in_non_enumerable_trait() { + test! { + program { + #[non_enumerable] + trait Deref { } + + #[non_enumerable] + trait PointerFamily { + type Pointer: Deref; + } + } + + goal { + forall { + forall { + if (T: PointerFamily) { + ::Pointer: Deref + } + } + } + } yields { + expect![[r#"Unique"#]] + } + } +} + +#[test] +fn normalize_under_binder() { + test! { + program { + struct Ref<'a, T> { } + struct I32 { } + + trait Deref<'a> { + type Item; + } + + trait Id<'a> { + type Item; + } + + impl<'a, T> Deref<'a> for Ref<'a, T> { + type Item = T; + } + + impl<'a, T> Id<'a> for Ref<'a, T> { + type Item = Ref<'a, T>; + } + } + + goal { + exists { + forall<'a> { + Ref<'a, I32>: Deref<'a, Item = U> + } + } + } yields[SolverChoice::slg_default()] { + // chalk#234, I think + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := I32]"]] + } + + goal { + exists { + forall<'a> { + Normalize( as Deref<'a>>::Item -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := I32]"]] + } + + goal { + forall<'a> { + exists { + Ref<'a, I32>: Id<'a, Item = U> + } + } + } yields[SolverChoice::slg_default()] { + // chalk#234, I think + expect![["Ambiguous; no inference guidance"]] + } yields[SolverChoice::recursive_default()] { + expect![["Unique; substitution [?0 := Ref<'!1_0, I32>]"]] + } + + goal { + forall<'a> { + exists { + Normalize( as Id<'a>>::Item -> U) + } + } + } yields { + expect![["Unique; substitution [?0 := Ref<'!1_0, I32>]"]] + } + + goal { + exists { + forall<'a> { + Normalize( as Id<'a>>::Item -> U) + } + } + } yields { + expect![["Unique; for { \ + substitution [?0 := Ref<'^0.0, I32>], \ + lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }] \ + }"]] + } + } +} + +#[test] +fn normalize_under_binder_multi() { + test! { + program { + struct Ref<'a, T> { } + struct I32 { } + + trait Deref<'a> { + type Item; + } + + impl<'a, T> Deref<'a> for Ref<'a, T> { + type Item = T; + } + } + + goal { + exists { + forall<'a> { + Ref<'a, I32>: Deref<'a, Item = U> + } + } + } yields_all { + expect![["substitution [?0 := I32]"]], + expect![["for { substitution [?0 := (Deref::Item), '^0.1>], lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.1 }, \ + InEnvironment { environment: Env([]), goal: '^0.1: '!1_0 }, \ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }] }"]] + } + + goal { + exists { + forall<'a> { + Ref<'a, I32>: Deref<'a, Item = U> + } + } + } yields_first { + expect![["substitution [?0 := I32]"]] + } + } +} + +#[test] +fn projection_from_env_a() { + test! { + program { + trait Sized { } + + struct Slice where T: Sized { } + impl Sized for Slice { } + + trait SliceExt + { + type Item; + } + + impl SliceExt for Slice + { + type Item = T; + } + } + + goal { + forall { + if ( + as SliceExt>::Item: Sized + ) { + T: Sized + } + } + } yields { + expect![["Unique"]] + } + } +} + +// This variant of the above test used to be achingly slow on SLG +// solvers, before the "trivial answer" green cut was introduced. +// +// The problem was that we wound up enumerating a goal like +// +// ::Item = !1_0 +// +// which meant "find me the types that normalize to `!1_0`". We had no +// problem finding these types, but after the first such type, we had +// the only unique answer we would ever find, and we wanted to reach +// the point where we could say "no more answers", so we kept +// requesting more answers. +#[test] +fn projection_from_env_slow() { + test! { + program { + trait Clone { } + trait Sized { } + + struct Slice where T: Sized { } + impl Sized for Slice { } + + struct Foo { } + impl Clone for Foo { } + impl Sized for Foo { } + + trait SliceExt + where ::Item: Clone + { + type Item; + } + + impl SliceExt for Slice + where T: Clone + { + type Item = T; + } + } + + goal { + forall { + if ( + as SliceExt>::Item: Clone; + as SliceExt>::Item: Sized; + T: Clone + ) { + T: Sized + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn gat_unify_with_implied_wc() { + test! { + program { + struct Slice { } + + trait Cast { } + trait CastingIter { + type Item: Cast where T: Cast; + } + + impl CastingIter for Slice { + type Item = Castable; + } + + struct Castable { } + impl Cast for Castable { } + } + + goal { + forall { + if ( + FromEnv( as CastingIter>::Item) + ) { + T: Cast + } + } + } yields { + expect![["Unique"]] + } + + goal { + forall { + T: Cast + } + } yields { + expect![["No possible solution"]] + } + } +} + +// See rust-lang/chalk#280 +#[test] +fn rust_analyzer_regression() { + test! { + program { + trait FnOnce { + type Output; + } + + trait Try { + type Ok; + type Error; + } + + struct Tuple { } + + trait ParallelIterator { + type Item; + } + } + + //fn try_reduce_with(pi: PI, reduce_op: R) -> Option + // where + // PI: ParallelIterator, + // R: FnOnce(T::Ok) -> T, + // T: Try, + // { + // pi.drive_unindexed() + // } + // + // where `drive_unindexed` is a method in `ParallelIterator`: + // + // fn drive_unindexed(self) -> (); + + goal { + forall { + if ( + PI: ParallelIterator; + R: FnOnce::Ok, ::Ok >>; + T: Try + ) { + PI: ParallelIterator + } + } + } yields_first[SolverChoice::slg(4, None)] { + expect![["Floundered"]] + } + } +} + +#[test] +fn issue_144_regression() { + test! { + program { + trait Bar { } + trait Foo { type Item: Bar; } + } + + goal { + forall { + if (T: Foo) { + ::Item: Bar + } + } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn guidance_for_projection_on_flounder() { + test! { + program { + trait Iterator { type Item; } + #[non_enumerable] + trait Step {} + + struct Range {} + + impl Iterator for Range where T: Step { + type Item = T; + } + } + + goal { + exists { + exists { + as Iterator>::Item = U + } + } + } yields[SolverChoice::recursive_default()] { + expect![["Ambiguous; definite substitution for { [?0 := ^0.0, ?1 := ^0.0] }"]] + } + } +} + +#[test] +fn projection_to_dyn() { + test! { + program { + trait AsDyn { + type Dyn; + } + + #[object_safe] + trait Debug {} + + impl AsDyn for () { + type Dyn = dyn Debug + 'static; + } + } + + goal { + <() as AsDyn>::Dyn: Debug + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn projection_to_opaque() { + test! { + program { + #[non_enumerable] + trait Debug { + type Output; + } + + impl Debug for () { + type Output = (); + } + + opaque type OpaqueDebug: Debug = (); + + struct A {} + + trait AsProj { + type Proj; + } + + impl AsProj for A { + type Proj = OpaqueDebug; + } + } + + goal { + ::Proj: Debug + } yields { + expect![["Unique"]] + } + + goal { + <::Proj as Debug>::Output = () + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn projection_from_super_trait_bounds() { + test! { + program { + trait Foo { + type A; + } + trait Bar where Self: Foo {} + impl Foo for i32 { + type A = (); + } + impl Bar for i32 {} + opaque type Opaque: Bar = i32; + } + + goal { + forall<'a> { + ::A = () + } + } yields { + expect![[r#"Unique"#]] + } + + goal { + ::A = () + } yields { + expect![[r#"Unique"#]] + } + } +} + +#[test] +fn nested_proj_eq_nested_proj_should_flounder() { + test! { + program { + #[non_enumerable] + trait Trait1 { + type Assoc: Trait2; + } + #[non_enumerable] + trait Trait2 { + type Assoc; + } + + impl Trait1 for () { + type Assoc = (); + } + impl Trait1 for i32 { + type Assoc = (); + } + impl Trait2 for () { + type Assoc = (); + } + } + + goal { + exists { + <::Assoc as Trait2>::Assoc = <::Assoc as Trait2>::Assoc + } + } yields[SolverChoice::slg_default()] { + // FIXME + expect![[r#"Ambiguous; definite substitution for { [?0 := ^0.0, ?1 := ^0.0] }"#]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Ambiguous; no inference guidance"#]] + } + } +} + +#[test] +fn clauses_for_placeholder_projection_types() { + test! { + program { + trait Iterator { type Item; } + trait IntoIterator { + type Item; + type IntoIter: Iterator::Item>; + } + + struct Vec { } + impl IntoIterator for Vec { + type Item = T; + type IntoIter = Iter; + } + + struct Iter { } + impl Iterator for Iter { + type Item = T; + } + + opaque type Opaque: IntoIterator = Vec; + } + + goal { + forall { + as IntoIterator>::IntoIter: Iterator + } + } yields { + expect![[r#"Unique"#]] + } + + goal { + forall { + exists { + < as IntoIterator>::IntoIter as Iterator>::Item = U + } + } + } yields[SolverChoice::slg_default()] { + // FIXME: chalk#234? + expect![[r#"Ambiguous; no inference guidance"#]] + } yields[SolverChoice::recursive_default()] { + expect![[r#"Unique; substitution [?0 := !1_0]"#]] + } + } +} diff --git a/tests/test/refs.rs b/tests/test/refs.rs new file mode 100644 index 00000000000..c8933071890 --- /dev/null +++ b/tests/test/refs.rs @@ -0,0 +1,69 @@ +use super::*; + +#[test] +fn immut_refs_are_well_formed() { + test! { + program { + struct A { } + } + + goal { + forall<'a, T> { + WellFormed(&'a T) + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: !1_1: '!1_0 }]"]] + } + + goal { + exists<'a> { + WellFormed(&'a A) + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: A: '^0.0 }] }"]] + } + } +} + +#[test] +fn immut_refs_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall<'a, T> { &'a T: Sized } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn mut_refs_are_well_formed() { + test! { + goal { + forall<'a, T> { WellFormed(&'a mut T) } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: !1_1: '!1_0 }]"]] + } + } +} + +#[test] +fn mut_refs_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall<'a, T> { &'a mut T: Sized } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/scalars.rs b/tests/test/scalars.rs new file mode 100644 index 00000000000..bdf265af7d6 --- /dev/null +++ b/tests/test/scalars.rs @@ -0,0 +1,172 @@ +use super::*; + +#[test] +fn scalar_in_tuple_trait_impl() { + test! { + program { + trait Foo { } + impl Foo for usize { } + impl Foo for isize { } + impl Foo for (T1, T2) where T1: Foo, T2: Foo { } + impl Foo for (T,T,T) where T: Foo { } + } + + goal { + (usize, usize): Foo + } yields { + expect![["Unique"]] + } + + goal { + (usize, isize): Foo + } yields { + expect![["Unique"]] + } + + goal { + (usize, bool): Foo + } yields { + expect![["No possible solution"]] + } + + goal { + (usize, usize, usize): Foo + } yields { + expect![["Unique"]] + } + + goal { + (char, u8, i8): Foo + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn scalar_trait_impl() { + test! { + program { + trait Foo { } + trait UnsignedFoo { } + + impl Foo for i8 { } + impl Foo for i16 { } + impl Foo for i32 { } + impl Foo for i64 { } + impl Foo for i128 { } + impl Foo for isize { } + impl Foo for u8 { } + impl Foo for u16 { } + impl Foo for u32 { } + impl Foo for u64 { } + impl Foo for u128 { } + impl Foo for usize { } + impl Foo for f16 { } + impl Foo for f32 { } + impl Foo for f64 { } + impl Foo for f128 { } + impl Foo for bool { } + impl Foo for char { } + + impl UnsignedFoo for u8 { } + impl UnsignedFoo for u16 { } + impl UnsignedFoo for u32 { } + impl UnsignedFoo for u64 { } + impl UnsignedFoo for u128 { } + impl UnsignedFoo for usize { } + + } + + goal { i8: Foo } yields { expect![["Unique"]] } + goal { i16: Foo } yields { expect![["Unique"]] } + goal { i32: Foo } yields { expect![["Unique"]] } + goal { i64: Foo } yields { expect![["Unique"]] } + goal { i128: Foo } yields { expect![["Unique"]] } + goal { isize: Foo } yields { expect![["Unique"]] } + goal { u8: Foo } yields { expect![["Unique"]] } + goal { u16: Foo } yields { expect![["Unique"]] } + goal { u32: Foo } yields { expect![["Unique"]] } + goal { u64: Foo } yields { expect![["Unique"]] } + goal { u128: Foo } yields { expect![["Unique"]] } + goal { usize: Foo } yields { expect![["Unique"]] } + goal { f16: Foo } yields { expect![["Unique"]] } + goal { f32: Foo } yields { expect![["Unique"]] } + goal { f64: Foo } yields { expect![["Unique"]] } + goal { f128: Foo } yields { expect![["Unique"]] } + goal { bool: Foo } yields { expect![["Unique"]] } + goal { char: Foo } yields { expect![["Unique"]] } + + goal { i8: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { i16: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { i32: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { i64: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { i128: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { isize: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { u8: UnsignedFoo } yields { expect![["Unique"]] } + goal { u16: UnsignedFoo } yields { expect![["Unique"]] } + goal { u32: UnsignedFoo } yields { expect![["Unique"]] } + goal { u64: UnsignedFoo } yields { expect![["Unique"]] } + goal { u128: UnsignedFoo } yields { expect![["Unique"]] } + goal { usize: UnsignedFoo } yields { expect![["Unique"]] } + goal { f16: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { f32: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { f64: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { f128: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { bool: UnsignedFoo } yields { expect![["No possible solution"]] } + goal { char: UnsignedFoo } yields { expect![["No possible solution"]] } + + } +} + +#[test] +fn scalars_are_well_formed() { + test! { + goal { WellFormed(i8) } yields { expect![["Unique"]] } + goal { WellFormed(i16) } yields { expect![["Unique"]] } + goal { WellFormed(i32) } yields { expect![["Unique"]] } + goal { WellFormed(i64) } yields { expect![["Unique"]] } + goal { WellFormed(i128) } yields { expect![["Unique"]] } + goal { WellFormed(isize) } yields { expect![["Unique"]] } + goal { WellFormed(u8) } yields { expect![["Unique"]] } + goal { WellFormed(u16) } yields { expect![["Unique"]] } + goal { WellFormed(u32) } yields { expect![["Unique"]] } + goal { WellFormed(u64) } yields { expect![["Unique"]] } + goal { WellFormed(u128) } yields { expect![["Unique"]] } + goal { WellFormed(usize) } yields { expect![["Unique"]] } + goal { WellFormed(f16) } yields { expect![["Unique"]] } + goal { WellFormed(f32) } yields { expect![["Unique"]] } + goal { WellFormed(f64) } yields { expect![["Unique"]] } + goal { WellFormed(f128) } yields { expect![["Unique"]] } + goal { WellFormed(bool) } yields { expect![["Unique"]] } + goal { WellFormed(char) } yields { expect![["Unique"]] } + } +} + +#[test] +fn scalars_are_sized() { + test! { + program { + #[lang(sized)] trait Sized { } + } + + goal { i8: Sized } yields { expect![["Unique"]] } + goal { i16: Sized } yields { expect![["Unique"]] } + goal { i32: Sized } yields { expect![["Unique"]] } + goal { i64: Sized } yields { expect![["Unique"]] } + goal { i128: Sized } yields { expect![["Unique"]] } + goal { isize: Sized } yields { expect![["Unique"]] } + goal { u8: Sized } yields { expect![["Unique"]] } + goal { u16: Sized } yields { expect![["Unique"]] } + goal { u32: Sized } yields { expect![["Unique"]] } + goal { u64: Sized } yields { expect![["Unique"]] } + goal { u128: Sized } yields { expect![["Unique"]] } + goal { usize: Sized } yields { expect![["Unique"]] } + goal { f16: Sized } yields { expect![["Unique"]] } + goal { f32: Sized } yields { expect![["Unique"]] } + goal { f64: Sized } yields { expect![["Unique"]] } + goal { f128: Sized } yields { expect![["Unique"]] } + goal { bool: Sized } yields { expect![["Unique"]] } + goal { char: Sized } yields { expect![["Unique"]] } + } +} diff --git a/tests/test/slices.rs b/tests/test/slices.rs new file mode 100644 index 00000000000..90d4b348ec0 --- /dev/null +++ b/tests/test/slices.rs @@ -0,0 +1,71 @@ +use super::*; + +#[test] +fn slices_are_not_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall { not { [T]: Sized } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn slices_are_well_formed_if_elem_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + forall { if (T: Sized) { WellFormed([T]) } } + } yields { + expect![["Unique"]] + } + + goal { + forall { WellFormed([T]) } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn slices_are_not_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + } + + goal { + forall { not { [T]: Copy } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn slices_are_not_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + } + + goal { + forall { not { [T]: Clone } } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/string.rs b/tests/test/string.rs new file mode 100644 index 00000000000..355a0af60e0 --- /dev/null +++ b/tests/test/string.rs @@ -0,0 +1,53 @@ +use super::*; + +#[test] +fn str_trait_impl() { + test! { + program { + trait Foo {} + impl Foo for str {} + } + + goal { str: Foo } yields { expect![["Unique"]] } + } +} + +#[test] +fn str_is_well_formed() { + test! { + goal { WellFormed(str) } yields { expect![["Unique"]] } + } +} + +#[test] +fn str_is_not_sized() { + test! { + program { + #[lang(sized)] trait Sized {} + } + + goal { not { str: Sized } } yields { expect![["Unique"]] } + } +} + +#[test] +fn str_is_not_copy() { + test! { + program { + #[lang(copy)] trait Copy {} + } + + goal { not { str: Copy } } yields { expect![["Unique"]] } + } +} + +#[test] +fn str_is_not_clone() { + test! { + program { + #[lang(clone)] trait Clone {} + } + + goal { not { str: Clone } } yields { expect![["Unique"]] } + } +} diff --git a/tests/test/subtype.rs b/tests/test/subtype.rs new file mode 100644 index 00000000000..815fd111760 --- /dev/null +++ b/tests/test/subtype.rs @@ -0,0 +1,551 @@ +use super::*; + +#[test] +fn variance_lowering() { + lowering_success! { + program { + #[variance(Invariant, Covariant)] + struct Foo {} + struct Bar {} + #[variance(Invariant, Contravariant)] + fn foo(t: T, u: U); + fn bar(t: T, u: U); + } + } +} + +#[test] +fn subtype_simple() { + test! { + program { + struct Foo { } + } + + goal { + Subtype(Foo, Foo) + } yields { + expect![["Unique"]] + } + } +} + +/// Test that `Foo<'a>` and `Foo<'b>` can be subtypes +/// if we constrain the lifetimes `'a` and `'b` appropriately. +#[test] +fn struct_lifetime_variance() { + test! { + program { + #[variance(Covariant)] + struct Foo<'a> { } + } + + goal { + forall<'a, 'b> { + Subtype(Foo<'a>, Foo<'b>) + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + } +} + +/// Test that `&'a u32 <: &'b u32` if `'a: 'b` +#[test] +fn ref_lifetime_variance() { + test! { + goal { + forall<'a, 'b> { + Subtype(&'a u32, &'b u32) + } + } yields { + // Seems good! + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }]"]] + } + } +} + +#[test] +fn fn_lifetime_variance_args() { + test! { + goal { + for<'a, 'b> fn(&'a u32, &'b u32) = for<'a> fn(&'a u32, &'a u32) + } yields[SolverChoice::recursive_default()] { + expect![["Unique; for { lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }, InEnvironment { environment: Env([]), goal: '!2_0: '^0.1 }, InEnvironment { environment: Env([]), goal: '!2_0: '^0.2 }] }"]] + } yields[SolverChoice::slg_default()] { + expect![["Unique; for { lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.2 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.2 }, InEnvironment { environment: Env([]), goal: '!2_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!2_0: '^0.1 }] }"]] + } + } +} + +#[test] +fn fn_lifetime_variance_with_return_type() { + test! { + goal { + Subtype(for<'a, 'b> fn(&'a u32, &'b u32) -> &'a u32, for<'a> fn(&'a u32, &'a u32) -> &'a u32) + } yields { + // TODO: are these results actually correct? + expect![["Unique; for { lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_0: '^0.1 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }] }"]] + } + goal { + Subtype(for<'a> fn(&'a u32, &'a u32) -> &'a u32, for<'a, 'b> fn(&'a u32, &'b u32) -> &'a u32) + } yields { + expect![["Unique; for { lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }] }"]] + } + } +} + +#[test] +fn generalize() { + test! { + program { + struct Foo { } + } + + goal { + forall<'a> { + exists { + Subtype(Foo<&'a u32>, Foo) + } + } + } yields { + // If this is invariant, then the generalizer might be doing + // the right thing here by creating the general form of `&'a u32` equal to + // just `&'a u32` + expect![["Unique; substitution [?0 := (&'!1_0 Uint(U32))]"]] + } + } +} + +/// Tests that the generalizer correctly generalizes lifetimes. +#[test] +fn multi_lifetime() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype(&'a u32, U), + Subtype(&'b u32, U) + } + } + } yields { + // Without the generalizer, we would yield a result like this: + // + // expect![["Unique; substitution [?0 := (&'!1_1 Uint(U32))], lifetime + // constraints [InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 + // }]"]] + // + // This is incorrect, as we shouldn't be requiring 'a and 'b to be + // related to eachother. Instead, U should be &'?1 u32, with constraints + // ?1 : 'a, ?1: 'b. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + } +} + +/// Tests that the generalizer correctly generalizes lifetimes when given an +/// inference var on the left hand side. +#[test] +fn multi_lifetime_inverted() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype(U, &'a u32), + Subtype(U, &'b u32) + } + } + } yields { + // Without the generalizer, we would yield a result like this: + // + // "Unique; substitution [?0 := (&'!1_1 Uint(U32))], lifetime + // constraints [InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 + // }]" + // + // This is incorrect, as we shouldn't be requiring 'a and 'b to be + // related to eachother. Instead, U should be &'?1 u32, with constraints + // ?1 : 'a, ?1: 'b. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that we handle variance for covariant structs correctly. +#[test] +fn multi_lifetime_covariant_struct() { + test! { + program { + #[variance(Covariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, Foo), + Subtype(Foo<&'b u32>, Foo) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype(Foo, Foo<&'a u32>), + Subtype(Foo, Foo<&'b u32>) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that we handle variance for contravariant structs correctly. +#[test] +fn multi_lifetime_contravariant_struct() { + test! { + program { + #[variance(Contravariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, Foo), + Subtype(Foo<&'b u32>, Foo) + } + } + } yields { + // Result should be opposite multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype(Foo, Foo<&'a u32>), + Subtype(Foo, Foo<&'b u32>) + } + } + } yields { + // Result should be opposite multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + } +} + +/// Tests that we handle variance for invariant structs correctly. +#[test] +fn multi_lifetime_invariant_struct() { + test! { + program { + #[variance(Invariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, Foo), + Subtype(Foo<&'b u32>, Foo) + } + } + } yields[SolverChoice::recursive_default()] { + // Because A is invariant, we require the lifetimes to be equal + expect![["Unique; substitution [?0 := (&'!1_0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } yields[SolverChoice::slg_default()] { + // Because A is invariant, we require the lifetimes to be equal + expect![["Unique; substitution [?0 := (&'!1_1 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo, Foo<&'a u32>), + Subtype(Foo, Foo<&'b u32>) + } + } + } yields[SolverChoice::recursive_default()] { + // Because A is invariant, we require the lifetimes to be equal + expect![["Unique; substitution [?0 := (&'!1_0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } yields[SolverChoice::slg_default()] { + // Because A is invariant, we require the lifetimes to be equal + expect![["Unique; substitution [?0 := (&'!1_1 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + } +} + +/// Tests that we handle variance for slices correctly. +#[test] +fn multi_lifetime_slice() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype([&'a u32], [U]), + Subtype([&'b u32], [U]) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype([U], [&'a u32]), + Subtype([U], [&'b u32]) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that we handle variance for tuples correctly. +#[test] +fn multi_lifetime_tuple() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype((&'a u32,), (U,)), + Subtype((&'b u32,), (U,)) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype((U,), (&'a u32,)), + Subtype((U,), (&'b u32,)) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that we handle variance for arrays correctly. +#[test] +fn multi_lifetime_array() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype([&'a u32; 16], [U; 16]), + Subtype([&'b u32; 16], [U; 16]) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype([U; 16], [&'a u32; 16]), + Subtype([U; 16], [&'b u32; 16]) + } + } + } yields { + // Result should be identical to multi_lifetime result. + expect![["Unique; for { substitution [?0 := (&'^0.0 Uint(U32))], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into covariant structs correctly. +#[test] +fn generalize_covariant_struct() { + test! { + program { + #[variance(Covariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, U), + Subtype(Foo<&'b u32>, U) + } + } + } yields { + expect![["Unique; for { substitution [?0 := Foo<(&'^0.0 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into contravariant structs correctly. +#[test] +fn generalize_contravariant_struct() { + test! { + program { + #[variance(Contravariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, U), + Subtype(Foo<&'b u32>, U) + } + } + } yields { + // Result should be opposite generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := Foo<(&'^0.0 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into invariant structs correctly. +#[test] +fn generalize_invariant_struct() { + test! { + program { + #[variance(Invariant)] + struct Foo {} + } + + goal { + forall<'a, 'b> { + exists { + Subtype(Foo<&'a u32>, U), + Subtype(Foo<&'b u32>, U) + } + } + } yields[SolverChoice::recursive_default()] { + // Because A is invariant, we require the lifetimes to be equal + expect![["Unique; substitution [?0 := Foo<(&'!1_0 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } yields[SolverChoice::slg_default()] { + expect![["Unique; substitution [?0 := Foo<(&'!1_1 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + } +} + +/// Tests that the generalizer recurses into slices correctly. +#[test] +fn generalize_slice() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype([&'a u32], U), + Subtype([&'b u32], U) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := [(&'^0.0 Uint(U32))]], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype(U, [&'a u32]), + Subtype(U, [&'b u32]) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := [(&'^0.0 Uint(U32))]], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into tuples correctly. +#[test] +fn generalize_tuple() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype((&'a u32,), U), + Subtype((&'b u32,), U) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := 1<(&'^0.0 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + goal { + forall<'a, 'b> { + exists { + Subtype(U, (&'a u32,)), + Subtype(U, (&'b u32,)) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := 1<(&'^0.0 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into N-tuples correctly. +#[test] +fn generalize_2tuple() { + test! { + goal { + forall<'a, 'b, 'c, 'd> { + exists { + Subtype((&'a u32, &'c u32), U), + Subtype((&'b u32, &'d u32), U) + } + } + } yields { + expect![["Unique; for { substitution [?0 := 2<(&'^0.0 Uint(U32)), (&'^0.1 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_2: '^0.1 }, InEnvironment { environment: Env([]), goal: '!1_3: '^0.1 }] }"]] + } + goal { + forall<'a, 'b, 'c, 'd> { + exists { + Subtype(U, (&'a u32, &'c u32)), + Subtype(U, (&'b u32, &'d u32)) + } + } + } yields { + expect![["Unique; for { substitution [?0 := 2<(&'^0.0 Uint(U32)), (&'^0.1 Uint(U32))>], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }, InEnvironment { environment: Env([]), goal: '^0.1: '!1_2 }, InEnvironment { environment: Env([]), goal: '^0.1: '!1_3 }] }"]] + } + } +} + +/// Tests that the generalizer recurses into arrays correctly. +#[test] +fn generalize_array() { + test! { + goal { + forall<'a, 'b> { + exists { + Subtype([&'a u32; 16], U), + Subtype([&'b u32; 16], U) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := [(&'^0.0 Uint(U32)); 16]], lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, InEnvironment { environment: Env([]), goal: '!1_1: '^0.0 }] }"]] + } + + goal { + forall<'a, 'b> { + exists { + Subtype(U, [&'a u32; 16]), + Subtype(U, [&'b u32; 16]) + } + } + } yields { + // Result should be identical to generalize_covariant_struct result. + expect![["Unique; for { substitution [?0 := [(&'^0.0 Uint(U32)); 16]], lifetime constraints [InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }, InEnvironment { environment: Env([]), goal: '^0.0: '!1_1 }] }"]] + } + } +} diff --git a/tests/test/tuples.rs b/tests/test/tuples.rs new file mode 100644 index 00000000000..d295ba4b415 --- /dev/null +++ b/tests/test/tuples.rs @@ -0,0 +1,348 @@ +use super::*; + +#[test] +fn tuple_trait_impl() { + test! { + program { + trait Foo { } + struct S1 { } + impl Foo for (S1, S1) { } + impl Foo for () { } + } + goal { + (S1, S1): Foo + } yields { + expect![["Unique"]] + } + + goal { + (): Foo + } yields { + expect![["Unique"]] + } + } + test! { + program { + trait Foo { } + impl Foo for (i32, i32, (i32,)) { } + } + + goal { + (i32, i32, (i32, )): Foo + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn tuples_are_copy() { + test! { + program { + #[lang(copy)] + trait Copy { } + + struct S {} + + impl Copy for u8 {} + } + + goal { + ([u8],): Copy + } yields { + expect![["No possible solution"]] + } + + goal { + (u8, [u8]): Copy + } yields { + expect![["No possible solution"]] + } + + goal { + ([u8], u8): Copy + } yields { + expect![["No possible solution"]] + } + + goal { + (): Copy + } yields { + expect![["Unique"]] + } + + goal { + (u8,): Copy + } yields { + expect![["Unique"]] + } + + goal { + (u8, u8): Copy + } yields { + expect![["Unique"]] + } + + goal { + exists { (T, u8): Copy } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { if (T: Copy) { (T, u8): Copy } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn tuples_are_sized() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + ([u8],): Sized + } yields { + expect![["No possible solution"]] + } + + goal { + (u8, [u8]): Sized + } yields { + expect![["No possible solution"]] + } + + // It should not be well-formed because for tuples, only + // the last element is allowed not to be Sized. + goal { + ([u8], u8): Sized + } yields { + expect![["Unique"]] + } + + goal { + (): Sized + } yields { + expect![["Unique"]] + } + + goal { + (u8,): Sized + } yields { + expect![["Unique"]] + } + + goal { + (u8, u8): Sized + } yields { + expect![["Unique"]] + } + + goal { + exists { (T, u8): Sized } + } yields { + expect![["Unique; for { substitution [?0 := ^0.0] }"]] + } + + goal { + forall { (T, u8): Sized } + } yields { + expect![["Unique"]] + } + + goal { + forall { (u8, T): Sized } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { if (T: Sized) { (u8, T): Sized } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn tuples_are_clone() { + test! { + program { + #[lang(clone)] + trait Clone { } + + struct S {} + + impl Clone for u8 {} + } + + goal { + ([u8],): Clone + } yields { + expect![["No possible solution"]] + } + + goal { + (u8, [u8]): Clone + } yields { + expect![["No possible solution"]] + } + + goal { + ([u8], u8): Clone + } yields { + expect![["No possible solution"]] + } + + goal { + (): Clone + } yields { + expect![["Unique"]] + } + + goal { + (u8,): Clone + } yields { + expect![["Unique"]] + } + + goal { + (u8, u8): Clone + } yields { + expect![["Unique"]] + } + + goal { + exists { (T, u8): Clone } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { if (T: Clone) { (T, u8): Clone } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn tuples_are_wf() { + test! { + program { + #[lang(sized)] + trait Sized { } + } + + goal { + WellFormed(()) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed((u8,)) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed((u8, u8)) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(([u8],)) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed((u8, [u8])) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(([u8], u8)) + } yields { + expect![["No possible solution"]] + } + + goal { + exists { WellFormed((T, u8)) } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + + goal { + forall { WellFormed((T, u8)) } + } yields { + expect![["No possible solution"]] + } + + goal { + forall { if (T: Sized) { WellFormed((T, u8)) } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn tuples_implement_tuple_trait() { + test! { + program { + #[lang(tuple_trait)] + trait Tuple { } + } + + goal { + (): Tuple + } yields { + expect![["Unique"]] + } + + goal { + (u8,): Tuple + } yields { + expect![["Unique"]] + } + + goal { + (i32, i32): Tuple + } yields { + expect![["Unique"]] + } + + goal { + ([u8],): Tuple + } yields { + expect![["Unique"]] + } + + goal { + forall { (T,): Tuple } + } yields { + expect![["Unique"]] + } + + goal { + i32: Tuple + } yields { + expect![["No possible solution"]] + } + + goal { + exists { T: Tuple } + } yields { + expect![["Ambiguous; no inference guidance"]] + } + } +} diff --git a/tests/test/type_flags.rs b/tests/test/type_flags.rs new file mode 100644 index 00000000000..5678781eab6 --- /dev/null +++ b/tests/test/type_flags.rs @@ -0,0 +1,118 @@ +use chalk_integration::interner::ChalkIr; +use chalk_integration::{empty_substitution, lifetime, ty}; +use chalk_ir::cast::Cast; +use chalk_ir::{PlaceholderIndex, TyKind, TypeFlags, UniverseIndex}; + +#[test] +fn placeholder_ty_flags_correct() { + let placeholder_ty = ty!(placeholder 0); + assert_eq!( + placeholder_ty.data(ChalkIr).flags, + TypeFlags::HAS_TY_PLACEHOLDER + ); +} + +#[test] +fn opaque_ty_flags_correct() { + let opaque_ty = TyKind::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy { + opaque_ty_id: chalk_ir::OpaqueTyId(chalk_integration::interner::RawId { index: 0 }), + substitution: chalk_ir::Substitution::from_iter( + ChalkIr, + Some( + chalk_ir::ConstData { + ty: TyKind::Placeholder(PlaceholderIndex { + ui: chalk_ir::UniverseIndex::ROOT, + idx: 0, + }) + .intern(ChalkIr), + value: chalk_ir::ConstValue::InferenceVar(chalk_ir::InferenceVar::from(0)), + } + .intern(ChalkIr) + .cast(ChalkIr), + ), + ), + })) + .intern(ChalkIr); + assert_eq!( + opaque_ty.data(ChalkIr).flags, + TypeFlags::HAS_TY_OPAQUE + | TypeFlags::HAS_CT_INFER + | TypeFlags::STILL_FURTHER_SPECIALIZABLE + | TypeFlags::HAS_TY_PLACEHOLDER + ); +} + +#[test] +fn dyn_ty_flags_correct() { + let internal_ty = TyKind::Scalar(chalk_ir::Scalar::Bool).intern(ChalkIr); + let projection_ty = chalk_ir::ProjectionTy { + associated_ty_id: chalk_ir::AssocTypeId(chalk_integration::interner::RawId { index: 0 }), + substitution: empty_substitution!(), + }; + let bounds = chalk_ir::Binders::>::empty( + ChalkIr, + chalk_ir::QuantifiedWhereClauses::from_iter( + ChalkIr, + vec![chalk_ir::Binders::>::empty( + ChalkIr, + chalk_ir::WhereClause::AliasEq(chalk_ir::AliasEq { + ty: internal_ty, + alias: chalk_ir::AliasTy::Projection(projection_ty), + }), + )], + ), + ); + let dyn_ty = chalk_ir::DynTy { + lifetime: lifetime!(placeholder 5), + bounds, + }; + let ty = TyKind::Dyn(dyn_ty).intern(ChalkIr); + assert_eq!( + ty.data(ChalkIr).flags, + TypeFlags::HAS_TY_PROJECTION + | TypeFlags::HAS_RE_PLACEHOLDER + | TypeFlags::HAS_FREE_LOCAL_REGIONS + | TypeFlags::HAS_FREE_REGIONS + ); +} + +#[test] +fn flagless_ty_has_no_flags() { + let ty = TyKind::Str.intern(ChalkIr); + assert_eq!(ty.data(ChalkIr).flags, TypeFlags::empty()); + + let fn_ty = TyKind::Function(chalk_ir::FnPointer { + num_binders: 0, + substitution: chalk_ir::FnSubst(empty_substitution!()), + sig: chalk_ir::FnSig { + abi: chalk_integration::interner::ChalkFnAbi::Rust, + safety: chalk_ir::Safety::Safe, + variadic: false, + }, + }) + .intern(ChalkIr); + assert_eq!(fn_ty.data(ChalkIr).flags, TypeFlags::empty()); +} + +#[test] +fn static_and_bound_lifetimes() { + let substitutions = chalk_ir::Substitution::from_iter( + ChalkIr, + vec![ + chalk_ir::GenericArgData::Lifetime(chalk_ir::LifetimeData::Static.intern(ChalkIr)) + .intern(ChalkIr), + chalk_ir::GenericArgData::Lifetime(lifetime!(bound 5)).intern(ChalkIr), + ], + ); + + let ty = TyKind::Adt( + chalk_ir::AdtId(chalk_integration::interner::RawId { index: 0 }), + substitutions, + ) + .intern(ChalkIr); + + assert_eq!( + ty.data(ChalkIr).flags, + TypeFlags::HAS_FREE_REGIONS | TypeFlags::HAS_RE_LATE_BOUND + ); +} diff --git a/tests/test/unify.rs b/tests/test/unify.rs new file mode 100644 index 00000000000..6de70054910 --- /dev/null +++ b/tests/test/unify.rs @@ -0,0 +1,404 @@ +//! Tests targeting the unification logic directly. Note that tests +//! related to *associated type normalization* are included in +//! `projection.rs`, however. + +use super::*; + +/// Basic tests of region equality: we generate constraints. +#[test] +fn region_equality() { + test! { + program { + trait Eq { } + impl Eq for T { } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + forall<'a, 'b> { + Ref<'a, Unit>: Eq> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + + goal { + forall<'a> { + exists<'b> { + Ref<'a, Unit>: Eq> + } + } + } yields { + expect![["Unique; substitution [?0 := '!1_0]"]] + } + } +} + +/// Temporary test extracted from the first goal in forall_equality for the sake of independent investigation. +#[test] +fn forall_equality_solveable_simple() { + test! { + program { + trait Eq { } + impl Eq for T { } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + // A valid equality; we get back a series of solvable + // region constraints, since each region variable must + // refer to exactly one placeholder region, and they are + // all in a valid universe to do so (universe 4). + for<'a> fn(Ref<'a, Unit>): Eq fn(Ref<'c, Unit>)> + } yields { + expect![["Unique"]] + } + } +} + +/// Temporary test extracted from the second goal in forall_equality for the sake of independent investigation. +#[test] +fn forall_equality_unsolveable_simple() { + test! { + program { + trait Eq { } + impl Eq for T { } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + // Note: this equality is false, but we get back successful; + // this is because the region constraints are unsolvable. + // + // Note that `?0` (in universe 2) must be equal to both + // `!1_0` and `!1_1`, which of course it cannot be. + for<'a, 'b> fn(Ref<'a, Ref<'b, Ref<'a, Unit>>>): Eq< + for<'c, 'd> fn(Ref<'c, Ref<'d, Ref<'d, Unit>>>)> + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!2_1 }, InEnvironment { environment: Env([]), goal: '!2_1: '!2_0 }]"]] + } + } +} + +/// Tests of region equality and "foralls" -- we generate contraints that are sometimes +/// not solvable. +#[test] +fn forall_equality() { + test! { + program { + trait Eq { } + impl Eq for T { } + + struct Unit { } + struct Ref<'a, T> { } + } + + goal { + // A valid equality; we get back a series of solvable + // region constraints, since each region variable must + // refer to exactly one placeholder region, and they are + // all in a valid universe to do so (universe 4). + for<'a, 'b> fn(Ref<'a, Ref<'b, Unit>>): Eq fn(Ref<'c, Ref<'d, Unit>>)> + } yields { + expect![["Unique"]] + } + + goal { + // Note: this equality is false, but we get back successful; + // this is because the region constraints are unsolvable. + // + // Note that `?0` (in universe 2) must be equal to both + // `!1_0` and `!1_1`, which of course it cannot be. + for<'a, 'b> fn(Ref<'a, Ref<'b, Ref<'a, Unit>>>): Eq< + for<'c, 'd> fn(Ref<'c, Ref<'d, Ref<'d, Unit>>>)> + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!2_1 }, InEnvironment { environment: Env([]), goal: '!2_1: '!2_0 }]"]] + } + + goal { + // Function pointers with different ABIs should not be equal. + extern "Rust" fn(): Eq + } yields { + expect![["No possible solution"]] + } + + goal { + // Function pointers with identical ABIs should be equal. + extern "Rust" fn(): Eq + } yields { + expect![["Unique"]] + } + + goal { + // Function pointers with different safety should not be equal. + unsafe fn(): Eq + } yields { + expect![["No possible solution"]] + } + + goal { + // Function pointers with identical safety should be equal. + unsafe fn(): Eq + } yields { + expect![["Unique"]] + } + + goal { + // Function pointers with different parameters should not be equal. + fn(u32): Eq + } yields { + expect![["No possible solution"]] + } + + goal { + // Variadic function pointers should not be equal to non-variadic fn pointers. + fn(u8, ...): Eq + } yields { + expect![["No possible solution"]] + } + + goal { + // Variadic function pointers should be equal to variadic fn pointers. + fn(u8, ...): Eq + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn unify_quantified_lifetimes() { + test! { + // Check that `'a` (here, `'^0.0`) is not unified + // with `'!1_0`, because they belong to incompatible + // universes. + goal { + exists<'a> { + forall<'b> { + 'a = 'b + } + } + } yields { + expect![["Unique; for { \ + substitution [?0 := '^0.0], \ + lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }\ + ] \ + }"]] + } + + // Similar to the previous test, but indirect. + goal { + exists<'a> { + forall<'b> { + exists<'c> { + 'a = 'c, + 'c = 'b + } + } + } + } yields[SolverChoice::slg(10, None)] { + expect![["Unique; for { \ + substitution [?0 := '^0.0, ?1 := '!1_0], \ + lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }\ + ] \ + }"]] + } yields[SolverChoice::recursive_default()] { + // only difference is in the value of ?1, which is equivalent + expect![["Unique; for { \ + substitution [?0 := '^0.0, ?1 := '^0.0], \ + lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!1_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!1_0 }\ + ] \ + }"]] + } + } +} + +#[test] +fn equality_binder() { + test! { + program { + struct Ref<'a, T> { } + } + + // Check that `'a` (here, `'?0`) is not unified + // with `'!1_0`, because they belong to incompatible + // universes. + goal { + forall { + exists<'a> { + for<'c> fn(Ref<'c, T>) = fn(Ref<'a, T>) + } + } + } yields { + expect![["Unique; for { \ + substitution [?0 := '^0.0], \ + lifetime constraints [\ + InEnvironment { environment: Env([]), goal: '!2_0: '^0.0 }, \ + InEnvironment { environment: Env([]), goal: '^0.0: '!2_0 }\ + ] \ + }"]] + } + } +} + +#[test] +fn equality_binder2() { + test! { + program { + struct Ref<'a, 'b> { } + } + + goal { + for<'b, 'c> fn(Ref<'b, 'c>) = for<'a> fn(Ref<'a, 'a>) + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_1 }, InEnvironment { environment: Env([]), goal: '!1_1: '!1_0 }]"]] + } + + goal { + for<'a> fn(Ref<'a, 'a>) = for<'b, 'c> fn(Ref<'b, 'c>) + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!2_0: '!2_1 }, InEnvironment { environment: Env([]), goal: '!2_1: '!2_0 }]"]] + } + } +} + +#[test] +fn mixed_indices_unify() { + test! { + program { + struct Ref<'a, T> { } + } + + goal { + exists { + exists<'a> { + exists { + Ref<'a, T> = Ref<'a, U> + } + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := ^0.1, ?2 := ^0.1] }"]] + } + } +} + +#[test] +fn mixed_indices_match_program() { + test! { + program { + struct S { } + struct Bar<'a, T, U> { } + trait Foo {} + impl<'a> Foo for Bar<'a, S, S> {} + } + + goal { + exists { + exists<'a> { + exists { + Bar<'a, T, U>: Foo + } + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := S, ?2 := S] }"]] + } + } +} + +#[test] +fn mixed_indices_normalize_application() { + test! { + program { + struct Ref<'a, T> { } + trait Foo { + type T; + } + + impl Foo for Ref<'a, U> { + type T = U; + } + } + + goal { + exists { + exists<'a> { + exists { + Normalize( as Foo>::T -> U) + } + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := ^0.1, ?2 := ^0.1] }"]] + } + } +} + +#[test] +fn mixed_indices_normalize_gat_application() { + test! { + program { + struct Either { } + struct Ref<'a, T> { } + trait Foo { + type T; + } + + impl Foo for Ref<'a, U> { + type T = Either; + } + } + + goal { + exists { + Normalize( as Foo>::T -> Either) + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0, ?1 := ^0.1, ?2 := ^0.2, ?3 := ^0.2, ?4 := ^0.1] }"]] + } + } +} + +#[test] +fn quantified_types() { + test! { + program { + trait Foo { } + struct fn1<'a> { } + struct fn2<'a, 'b> { } + impl Foo for for<'a> fn(fn1<'a>) { } + } + + goal { + for<'a> fn(fn1<'a>): Foo + } yields { + expect![["Unique"]] + } + + goal { + for<'a, 'b> fn(fn2<'a, 'b>) = for<'b, 'a> fn(fn2<'a, 'b>) + } yields { + expect![["Unique"]] + } + + goal { + forall<'a> { fn(fn1<'a>): Foo } + } yields { + // Lifetime constraints are unsatisfiable + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!2_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!1_0 }]"]] + } + } +} diff --git a/tests/test/unpin.rs b/tests/test/unpin.rs new file mode 100644 index 00000000000..648bffc9c73 --- /dev/null +++ b/tests/test/unpin.rs @@ -0,0 +1,130 @@ +//! Tests targeting the Unpin trait + +use super::*; + +#[test] +fn unpin_lowering() { + lowering_success! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + enum A { Variant } + struct B { } + impl !Unpin for A {} + impl Unpin for B {} + } + } +} + +#[test] +fn unpin_auto_trait() { + test! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + struct A { } + } + + goal { + A: Unpin + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn unpin_negative() { + test! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + struct A { } + impl !Unpin for A {} + } + + goal { + A: Unpin + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn unpin_inherit_negative() { + test! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + struct A { } + impl !Unpin for A {} + struct B { a: A } + } + + goal { + B: Unpin + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn unpin_overwrite() { + test! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + struct A { } + impl !Unpin for A {} + struct B { a: A } + impl Unpin for B {} + } + + goal { + B: Unpin + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn coroutine_unpin() { + test! { + program { + #[auto] #[lang(unpin)] trait Unpin { } + struct A { } + impl !Unpin for A {} + + coroutine static static_gen<>[resume = (), yield = ()] { + upvars [] + witnesses [] + } + + coroutine movable_gen<>[resume = (), yield = ()] { + upvars [] + witnesses [] + } + + coroutine movable_with_pin<>[resume = (), yield = ()] { + upvars [A] + witnesses [] + } + } + + goal { + static_gen: Unpin + } yields { + expect![["No possible solution"]] + } + + goal { + movable_gen: Unpin + } yields { + expect![["Unique"]] + } + + goal { + movable_with_pin: Unpin + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/unsize.rs b/tests/test/unsize.rs new file mode 100644 index 00000000000..e3b26f52e96 --- /dev/null +++ b/tests/test/unsize.rs @@ -0,0 +1,734 @@ +use crate::test::*; + +#[test] +fn dyn_to_dyn_unsizing() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + + #[object_safe] + trait Principal {} + #[object_safe] + trait OtherPrincipal {} + #[object_safe] + trait GenericPrincipal { + type Item; + } + + #[auto] + #[object_safe] + trait Auto1 {} + + #[auto] + #[object_safe] + trait Auto2 {} + + #[auto] + #[object_safe] + trait Auto3 {} + } + + // Tests with the same principal and auto traits + goal { + forall<'a> { + forall<'b> { + dyn Principal + 'a: Unsize + } + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!2_0 }]"]] + } + + goal { + forall<'a> { + forall<'b> { + dyn Principal + Auto1 + Auto2 + Auto3 + 'a: Unsize + } + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!2_0 }]"]] + } + + // Target has a subset of source auto traits + goal { + forall<'a> { + dyn Principal + Auto1 + Auto2 + 'a: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"]] + } + + // Both target and source don't have principal as their first trait + goal { + forall<'a> { + dyn Auto1 + Principal + 'a: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"]] + } + + // Different order of traits in target and source + goal { + forall<'a> { + dyn Principal + Auto1 + 'a: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"]] + } + + // See above + goal { + forall<'a> { + dyn Principal + Auto2 + Auto1 + 'a: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"]] + } + + // Source has a subset of auto traits of target + goal { + forall<'a> { + dyn Principal + Auto2 + 'a: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Source and target have different set of auto traits + goal { + forall<'a> { + dyn Principal + Auto1 + Auto2 + 'a: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Source has a principal trait, while target doesnt, both have the same auto traits. + goal { + forall<'a> { + dyn Principal + Auto1 + 'a: Unsize + } + } yields { + expect!["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"] + } + + // Non-matching principal traits + goal { + forall<'a> { + dyn Principal + 'a: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Matching generic principal traits + goal { + forall<'a> { + dyn GenericPrincipal + 'a: Unsize + 'a> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"]] + } + + // Non-matching generic principal traits + goal { + forall<'a> { + dyn GenericPrincipal + 'a: Unsize + 'a> + } + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn super_auto_trait() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + + #[object_safe] + trait Principal where Self: SuperAuto {} + + #[auto] + #[object_safe] + trait SuperAuto {} + + #[auto] + #[object_safe] + trait Auto {} + } + + goal { + forall<'a> { + dyn Principal + 'a: Unsize + } + } yields { + expect!["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"] + } + + goal { + forall<'a> { + dyn Principal + Auto + 'a: Unsize + } + } yields { + expect!["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"] + } + } +} + +#[test] +fn dyn_upcasting() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + + #[object_safe] + trait SuperSuper {} + #[object_safe] + trait GenericSuper {} + #[object_safe] + trait LifetimedSuper<'a> {} + #[object_safe] + trait Super + where + Self: SuperSuper, + Self: GenericSuper, + Self: GenericSuper, + forall<'a> Self: LifetimedSuper<'a>, + {} + #[object_safe] + trait Principal where Self: Super {} + + #[auto] + #[object_safe] + trait Auto1 {} + + #[auto] + #[object_safe] + trait Auto2 {} + } + + goal { + forall<'a> { + dyn Principal + 'a: Unsize + } + } yields { + expect![[r#"Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"#]] + } + + goal { + forall<'a> { + dyn Principal + Auto1 + 'a: Unsize + } + } yields { + expect![[r#"Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"#]] + } + + // Different set of auto traits + goal { + forall<'a> { + dyn Principal + Auto1 + 'a: Unsize + } + } yields { + expect![[r#"No possible solution"#]] + } + + // Dropping auto traits is allowed + goal { + forall<'a> { + dyn Principal + Auto1 + Auto2 + 'a: Unsize + } + } yields { + expect![[r#"Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"#]] + } + + // Upcasting to indirect super traits + goal { + forall<'a> { + dyn Principal + 'a: Unsize + } + } yields { + expect![[r#"Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"#]] + } + + goal { + forall<'a> { + dyn Principal + 'a: Unsize + 'a> + } + } yields { + expect![[r#"Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!1_0 }]"#]] + } + + // Ambiguous if there are multiple super traits applicable + goal { + exists { + forall<'a> { + dyn Principal + 'a: Unsize + 'a> + } + } + } yields { + expect![[r#"Ambiguous; no inference guidance"#]] + } + + goal { + forall<'a> { + forall<'b> { + forall<'c> { + dyn Principal + 'a: Unsize + 'c> + } + } + } + } yields { + expect!["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: '!1_0: '!3_0 }, InEnvironment { environment: Env([]), goal: '!2_0: '!5_0 }, InEnvironment { environment: Env([]), goal: '!5_0: '!2_0 }]"] + } + } +} + +#[test] +fn ty_to_dyn_unsizing() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + #[lang(sized)] + trait Sized {} + + #[object_safe] + trait Principal {} + #[object_safe] + trait GenericPrincipal { + type Item; + } + + trait UnsafePrincipal {} + + #[auto] + #[object_safe] + trait Auto {} + + struct Foo {} + struct FooLifetime<'a> {} + struct Bar {} + struct Baz {} + struct FooNotSized { + t: T + } + + impl Principal for Foo {} + impl UnsafePrincipal for Foo {} + + impl<'a> Principal for FooLifetime<'a> {} + + impl Principal for Bar {} + impl !Auto for Bar {} + + impl Principal for FooNotSized {} + + impl GenericPrincipal for Foo { + type Item = u32; + } + } + + goal { + forall<'a> { + Foo: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Principal is not the first trait + goal { + forall<'a> { + Foo: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Auto-only trait object + goal { + forall<'a> { + Foo: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // TypeOutlives test + goal { + forall<'a> { + FooLifetime<'a>: Unsize + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: FooLifetime<'!1_0>: '!1_0 }]"]] + } + + // See above + goal { + forall<'a> { + exists<'b> { + FooLifetime<'a>: Unsize + } + } + } yields { + expect![["Unique; for { substitution [?0 := '^0.0], lifetime constraints [InEnvironment { environment: Env([]), goal: FooLifetime<'!1_0>: '^0.0 }] }"]] + } + + // Source does not implement auto trait (with principal) + goal { + forall<'a> { + Bar: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Source does not implement auto trait (without principal) + goal { + forall<'a> { + Bar: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Source does not implement principal + goal { + forall<'a> { + Baz: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Implemeted generic principal + goal { + forall<'a> { + Foo: Unsize + 'a> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + + // Non-implemeted generic principal + goal { + forall<'a> { + Foo: Unsize + 'a> + } + } yields { + expect![["No possible solution"]] + } + + // Not object-safe principal trait + goal { + forall<'a> { + Foo: Unsize + } + } yields { + expect![["No possible solution"]] + } + + // Source ty is not Sized + goal { + forall<'a> { + forall { + FooNotSized: Unsize + } + } + } yields { + expect![["No possible solution"]] + } + + // Sized counterpart for the previous test + goal { + forall<'a> { + forall { + if (T: Sized) { + FooNotSized: Unsize + } + } + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: FooNotSized: '!1_0 }]"]] + } + } +} + +#[test] +fn tuple_unsizing() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + #[lang(sized)] + trait Sized {} + + struct Foo {} + + #[object_safe] + trait Principal {} + #[object_safe] + trait OtherPrincipal {} + + impl Principal for Foo {} + } + + goal { + (): Unsize<()> + } yields { + expect![["No possible solution"]] + } + + goal { + (u32, u32): Unsize<(u32, u32)> + } yields { + expect![["No possible solution"]] + } + + goal { + forall<'a> { + (u32, Foo): Unsize<(u32, dyn Principal + 'a)> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Last field does not implement `Unsize` + goal { + forall<'a> { + (u32, Foo): Unsize<(u32, dyn OtherPrincipal + 'a)> + } + } yields { + expect![["No possible solution"]] + } + + // Mismatch of head fields + goal { + forall<'a> { + (u32, Foo): Unsize<(u64, dyn Principal + 'a)> + } + } yields { + expect![["No possible solution"]] + } + + // Tuple length mismatch + goal { + forall<'a> { + (u32, u32, Foo): Unsize<(u32, dyn Principal + 'a)> + } + } yields { + expect![["No possible solution"]] + } + + // Multilevel tuple test + goal { + forall<'a> { + (u32, (u32, Foo)): Unsize<(u32, (u32, dyn Principal + 'a))> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + } +} + +#[test] +fn array_unsizing() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + + struct Foo {} + } + + goal { + [Foo; 2]: Unsize<[Foo]> + } yields { + expect![["Unique"]] + } + + goal { + [Foo; 5]: Unsize<[Foo]> + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn struct_unsizing() { + test! { + program { + #[lang(unsize)] + trait Unsize {} + #[lang(sized)] + trait Sized {} + + struct Foo {} + struct Bar {} + struct Baz<'a> {} + + struct S1 { + t1: T1 + } + + struct S12 where T1: Sized { + t1: T1, + t2: T2 + } + + struct SParamsInMultipleFields { + t1: Bar, + t2: T + } + + struct SNested where T1: Sized, T2: Sized { + t1: T1, + t2: (T2, S1) + } + + struct SBad where T1: Sized { + t1: Bar>, + t2: (T1, S1) + } + + struct SWithBinders where T1: Sized { + t1: T1, + t2: for<'a> fn(dyn Principal + 'a), + t3: T3 + } + + struct SLifetime<'a, T> { + t1: Baz<'a>, + t2: S12, T> + } + + struct SConst { + t: T + } + + struct SGoodConst { + t1: u32, + t2: SConst + } + + struct SBadConst { + t1: [u32; N], + t2: SConst + } + + #[object_safe] + trait Principal {} + #[object_safe] + trait OtherPrincipal {} + + impl Principal for Foo {} + } + + // Single field struct tests + goal { + Foo: Unsize + } yields { + expect![["No possible solution"]] + } + + goal { + forall<'a> { + S1: Unsize> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + goal { + forall<'a> { + S1: Unsize> + } + } yields { + expect![["No possible solution"]] + } + + // Unsizing parameter is used in head fields + goal { + forall<'a> { + SParamsInMultipleFields: + Unsize> + } + } yields { + expect![["No possible solution"]] + } + + // Two-field struct test + goal { + forall<'a> { + S12: Unsize> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Test for the unsizing parameters collector + // (checking that it ignores the binder inside `SWithBinders`) + goal { + forall<'a> { + SWithBinders: Unsize> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Non-trivial unsizing of the last field + goal { + forall<'a> { + SNested, Foo>: Unsize, dyn Principal + 'a>> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + goal { + forall<'a> { + SBad: Unsize> + } + } yields { + expect![["No possible solution"]] + } + + // Check that lifetimes can't be used as unsizing parameters + goal { + forall<'a> { + SLifetime<'a, Foo>: Unsize> + } + } yields { + expect![["Unique; lifetime constraints [InEnvironment { environment: Env([]), goal: Foo: '!1_0 }]"]] + } + + // Tests with constant as an unsizing parameter + goal { + SGoodConst<5, [u32; 2]>: Unsize> + } yields { + expect![["Unique"]] + } + + + // Target does not match source + goal { + SGoodConst<4, [u32; 2]>: Unsize> + } yields { + expect![["No possible solution"]] + } + + // Unsizing parameter is used in head fields + goal { + SBadConst<5, [u32; 2]>: Unsize> + } yields { + expect![["No possible solution"]] + } + } +} diff --git a/tests/test/wf_goals.rs b/tests/test/wf_goals.rs new file mode 100644 index 00000000000..08436ed331a --- /dev/null +++ b/tests/test/wf_goals.rs @@ -0,0 +1,120 @@ +//! Tests for `WellFormed(_)` goals and clauses + +use super::*; + +#[test] +fn struct_wf() { + test! { + program { + struct Foo where T: Eq { } + struct Bar { } + struct Baz { } + + trait Eq { } + + impl Eq for Baz { } + impl Eq for Foo where T: Eq { } + } + + goal { + WellFormed(Foo) + } yields { + expect![["No possible solution"]] + } + + goal { + WellFormed(Foo) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(Foo>) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn enum_wf() { + test! { + program { + enum Foo where T: Eq { } + enum Bar { } + enum Baz { } + + trait Eq { } + + impl Eq for Baz { } + impl Eq for Foo where T: Eq { } + } + + goal { + WellFormed(Foo) + } yields { + expect![["No possible solution"]] + } + + goal { + WellFormed(Foo) + } yields { + expect![["Unique"]] + } + + goal { + WellFormed(Foo>) + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn recursive_where_clause_on_type() { + test! { + program { + trait Bar { } + trait Foo where Self: Bar { } + + struct S where S: Foo { } + + impl Foo for S { } + } + + goal { + WellFormed(S) + } yields { + expect![["No possible solution"]] + } + } +} + +#[test] +fn drop_compatible() { + test! { + program { + #[lang(drop)] + trait Drop { } + + struct S { } + } + + goal { + compatible { not { exists { S: Drop } } } + } yields { + expect![["Unique"]] + } + } +} + +#[test] +fn placeholder_wf() { + test! { + goal { + forall { WellFormed(T) } + } yields { + expect![["Unique"]] + } + } +} diff --git a/tests/test/wf_lowering.rs b/tests/test/wf_lowering.rs new file mode 100644 index 00000000000..29b5bad08b2 --- /dev/null +++ b/tests/test/wf_lowering.rs @@ -0,0 +1,1440 @@ +use chalk_integration::query::LoweringDatabase; + +#[test] +fn well_formed_trait_decl() { + lowering_success! { + program { + trait Clone { } + trait Copy where Self: Clone { } + + struct Foo { } + + impl Clone for Foo { } + impl Copy for Foo { } + } + } +} + +#[test] +fn ill_formed_trait_decl() { + lowering_error! { + program { + trait Clone { } + trait Copy where Self: Clone { } + + struct Foo { } + + impl Copy for Foo { } + } error_msg { + "trait impl for `Copy` does not meet well-formedness requirements" + } + } +} +#[test] +fn cyclic_traits() { + lowering_success! { + program { + trait A where Self: B { } + trait B where Self: A { } + + impl B for T { } + impl A for T { } + } + } + + lowering_error! { + program { + trait Copy { } + + trait A where Self: B, Self: Copy {} + trait B where Self: A { } + + // This impl won't be able to prove that `T: Copy` holds. + impl B for T {} + + impl A for T where T: B {} + } error_msg { + "trait impl for `B` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Copy { } + + trait A where Self: B, Self: Copy {} + trait B where Self: A { } + + impl B for T where T: Copy {} + impl A for T where T: B {} + } + } +} + +#[test] +fn cyclic_wf_requirements() { + lowering_success! { + program { + trait Foo where ::Value: Foo { + type Value; + } + + struct Unit { } + impl Foo for Unit { + type Value = Unit; + } + } + } +} + +#[test] +fn ill_formed_assoc_ty() { + lowering_error! { + program { + trait Foo { } + struct OnlyFoo where T: Foo { } + + struct MyType { } + + trait Bar { + type Value; + } + + impl Bar for MyType { + // `OnlyFoo` is ill-formed because `MyType: Foo` does not hold. + type Value = OnlyFoo; + } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn implied_bounds() { + lowering_success! { + program { + trait Eq { } + trait Hash where Self: Eq { } + + struct Set where K: Hash { } + + struct OnlyEq where T: Eq { } + + trait Foo { + type Value; + } + + impl Foo for Set { + // Here, `WF(Set)` implies `K: Hash` and hence `OnlyEq` is WF. + type Value = OnlyEq; + } + } + } +} + +#[test] +fn ill_formed_ty_decl() { + lowering_error! { + program { + trait Hash { } + struct Set where K: Hash { } + + struct MyType { + value: Set + } + } error_msg { + "type declaration `MyType` does not meet well-formedness requirements" + } + } +} + +#[test] +fn implied_bounds_on_ty_decl() { + lowering_success! { + program { + trait Eq { } + trait Hash where Self: Eq { } + struct OnlyEq where T: Eq { } + + struct MyType where K: Hash { + value: OnlyEq + } + } + } +} + +#[test] +fn wf_requiremements_for_projection() { + lowering_error! { + program { + trait Foo { + type Value; + } + + trait Iterator { + type Item; + } + + impl Foo for T { + // The projection is well-formed if `T: Iterator` holds, which cannot + // be proved here. + type Value = ::Item; + } + } error_msg { + "trait impl for `Foo` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Foo { + type Value; + } + + trait Iterator { + type Item; + } + + impl Foo for T where T: Iterator { + type Value = ::Item; + } + } + } +} + +#[test] +fn ill_formed_type_in_header() { + lowering_error! { + program { + trait Foo { + type Value; + } + + trait Bar { } + + // Types in where clauses are not assumed to be well-formed, + // an explicit where clause would be needed (see below). + impl Bar for T where ::Value: Bar { } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } + + lowering_success! { + program { + trait Foo { + type Value; + } + + trait Bar { } + + impl Bar for T where T: Foo, ::Value: Bar { } + } + } +} + +#[test] +fn bound_in_header_from_env() { + lowering_success! { + program { + trait Foo { } + + trait Bar { + type Item: Foo; + } + + struct Stuff { } + + impl Bar for Stuff where T: Foo { + // Should have FromEnv(T: Foo) here. + type Item = T; + } + } + } + + lowering_error! { + program { + trait Foo { } + trait Baz { } + + trait Bar { + type Item: Baz; + } + + struct Stuff { } + + impl Bar for Stuff where T: Foo { + // No T: Baz here. + type Item = T; + } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn mixed_indices_check_projection_bounds() { + lowering_success! { + program { + trait Foo { } + + trait Bar { + type Item: Foo; + } + + struct Stuff { } + + impl Bar for Stuff where U: Foo { + type Item = U; + } + } + } + + lowering_error! { + program { + trait Foo { } + trait Baz { } + + trait Bar { + type Item: Baz; + } + + struct Stuff { } + + impl Bar for Stuff where U: Foo { + type Item = U; + } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn mixed_indices_check_generic_projection_bounds() { + lowering_success! { + program { + struct Stuff { } + + trait Foo { } + + // A type that impls Foo as long as U: Foo. + struct Fooey { } + impl Foo for Fooey where U: Foo { } + + trait Bar { + type Item: Foo where V: Foo; + } + + impl Bar for Stuff where U: Foo { + type Item = Fooey; + } + } + } + + lowering_error! { + program { + struct Stuff { } + + trait Foo { } + trait Baz { } + + // A type that impls Foo as long as U: Foo. + struct Fooey { } + impl Foo for Fooey where U: Foo { } + + trait Bar { + type Item: Baz where V: Foo; + } + + impl Bar for Stuff where U: Foo { + type Item = Fooey; + } + } error_msg { + "trait impl for `Bar` does not meet well-formedness requirements" + } + } +} + +#[test] +fn generic_projection_where_clause() { + lowering_success! { + program { + trait PointerFamily { type Pointer; } + + struct Cow { } + struct CowFamily { } + impl PointerFamily for CowFamily { type Pointer = Cow; } + + struct String { } + struct Foo