aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam Delmerico <delmerico@google.com>2023-05-10 09:14:19 -0400
committerSam Delmerico <delmerico@google.com>2023-05-10 09:14:19 -0400
commitdf3fbff25a502bf70fa494864a25a5e6b316f6e8 (patch)
treefb4ce8ff451007b17df3094f955fe6d341186bf0
parentb610a0ec58eeaf54e65f7ebf791e1a74a8718fc0 (diff)
parente817e2529246d99ced020ce9838b4b6edb04f024 (diff)
downloadbazelbuild-rules_testing-df3fbff25a502bf70fa494864a25a5e6b316f6e8.tar.gz
Merge remote-tracking branch 'aosp/upstream-master' into HEAD
Change-Id: I6499409d2887d4993c8920c613d6517df14c74c1
-rw-r--r--.bazelci/presubmit.yml29
-rw-r--r--.bazelignore3
-rw-r--r--.bazelrc2
-rw-r--r--.bcr/config.yml3
-rw-r--r--.bcr/metadata.template.json15
-rw-r--r--.bcr/presubmit.yml12
-rw-r--r--.bcr/source.template.json5
-rw-r--r--.github/release.yml5
-rw-r--r--.github/workflows/ci.bazelrc12
-rw-r--r--.github/workflows/release.yml51
-rwxr-xr-x.github/workflows/workspace_snippet.sh25
-rw-r--r--.gitignore5
-rw-r--r--.readthedocs.yaml23
-rw-r--r--AUTHORS13
-rw-r--r--BUILD29
-rw-r--r--CODEOWNERS1
-rw-r--r--CONTRIBUTING.md30
-rw-r--r--LICENSE202
-rw-r--r--MODULE.bazel64
-rw-r--r--README.md9
-rw-r--r--RELEASING.md11
-rw-r--r--WORKSPACE.bazel65
-rw-r--r--WORKSPACE.bzlmod0
-rwxr-xr-xaddlicense.sh23
-rw-r--r--dev_extension.bzl47
-rw-r--r--docgen/BUILD48
-rw-r--r--docgen/docgen.bzl70
-rw-r--r--docgen/func_template.vm56
-rw-r--r--docgen/header_template.vm1
-rw-r--r--docgen/provider_template.vm29
-rw-r--r--docgen/rule_template.vm48
-rw-r--r--docs/BUILD74
-rw-r--r--docs/README.md63
-rw-r--r--docs/crossrefs.md26
-rw-r--r--docs/requirements.in5
-rw-r--r--docs/requirements.txt288
-rwxr-xr-xdocs/run_sphinx_build.sh48
-rw-r--r--docs/source/_static/css/custom.css34
-rw-r--r--docs/source/analysis_tests.md251
-rw-r--r--docs/source/api/index.md7
-rw-r--r--docs/source/conf.py73
-rw-r--r--docs/source/guides.md14
-rw-r--r--docs/source/index.md49
-rw-r--r--docs/source/truth.md108
-rw-r--r--docs/sphinx_build.py4
-rw-r--r--e2e/bzlmod/BUILD.bazel9
-rw-r--r--e2e/bzlmod/MODULE.bazel11
-rw-r--r--e2e/bzlmod/WORKSPACE1
-rw-r--r--e2e/bzlmod/tests.bzl68
-rw-r--r--lib/BUILD82
-rw-r--r--lib/analysis_test.bzl253
-rw-r--r--lib/private/BUILD246
-rw-r--r--lib/private/action_subject.bzl377
-rw-r--r--lib/private/bool_subject.bzl78
-rw-r--r--lib/private/check_util.bzl339
-rw-r--r--lib/private/collection_subject.bzl350
-rw-r--r--lib/private/compare_util.bzl267
-rw-r--r--lib/private/depset_file_subject.bzl292
-rw-r--r--lib/private/dict_subject.bzl181
-rw-r--r--lib/private/execution_info_subject.bzl84
-rw-r--r--lib/private/expect.bzl271
-rw-r--r--lib/private/expect_meta.bzl278
-rw-r--r--lib/private/failure_messages.bzl311
-rw-r--r--lib/private/file_subject.bzl104
-rw-r--r--lib/private/instrumented_files_info_subject.bzl73
-rw-r--r--lib/private/int_subject.bzl102
-rw-r--r--lib/private/label_subject.bzl83
-rw-r--r--lib/private/matching.bzl200
-rw-r--r--lib/private/ordered.bzl64
-rw-r--r--lib/private/run_environment_info_subject.bzl80
-rw-r--r--lib/private/runfiles_subject.bzl266
-rw-r--r--lib/private/str_subject.bzl116
-rw-r--r--lib/private/target_subject.bzl419
-rw-r--r--lib/private/truth_common.bzl129
-rw-r--r--lib/truth.bzl70
-rw-r--r--lib/util.bzl275
-rw-r--r--lib/utils.bzl37
-rw-r--r--tests/BUILD51
-rw-r--r--tests/analysis_test_tests.bzl222
-rw-r--r--tests/testdata/file1.txt1
-rw-r--r--tests/truth_tests.bzl1481
81 files changed, 9251 insertions, 0 deletions
diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml
new file mode 100644
index 0000000..2ea8c13
--- /dev/null
+++ b/.bazelci/presubmit.yml
@@ -0,0 +1,29 @@
+buildifier: latest
+validate_config: 1
+matrix:
+ platform: ["ubuntu2004", "windows", "macos"]
+ bazel: ["latest", "5.x"]
+tasks:
+ all_tests_workspace:
+ name: Workspace
+ platform: ${{platform}}
+ bazel: ${{bazel}}
+ test_targets:
+ - "..."
+ all_tests_bzlmod:
+ name: Bzlmod
+ platform: ${{platform}}
+ bazel: latest
+ test_flags:
+ - "--enable_bzlmod"
+ - "--test_tag_filters=-skip-bzlmod"
+ test_targets:
+ - "..."
+
+ e2e_bzlmod:
+ platform: ${{platform}}
+ working_directory: e2e/bzlmod
+ test_flags:
+ - "--enable_bzlmod"
+ test_targets:
+ - "..."
diff --git a/.bazelignore b/.bazelignore
new file mode 100644
index 0000000..91dfb74
--- /dev/null
+++ b/.bazelignore
@@ -0,0 +1,3 @@
+# Ignore directories that contain nested WORKSPACE or MODULE.bazel files.
+# This allows //... to Just Work
+e2e
diff --git a/.bazelrc b/.bazelrc
new file mode 100644
index 0000000..9589bd9
--- /dev/null
+++ b/.bazelrc
@@ -0,0 +1,2 @@
+# Project-specific bazelrc settings
+# NOTE: While empty, this is referenced by the release workflow
diff --git a/.bcr/config.yml b/.bcr/config.yml
new file mode 100644
index 0000000..ac951f6
--- /dev/null
+++ b/.bcr/config.yml
@@ -0,0 +1,3 @@
+fixedReleaser:
+ login: rickeylev
+ email: rlevasseur@google.com
diff --git a/.bcr/metadata.template.json b/.bcr/metadata.template.json
new file mode 100644
index 0000000..fd6d7d5
--- /dev/null
+++ b/.bcr/metadata.template.json
@@ -0,0 +1,15 @@
+{
+ "homepage": "https://github.com/bazelbuild/rules_testing",
+ "maintainers": [
+ {
+ "email": "rlevasseur@google.com",
+ "github": "rickeylev",
+ "name": "Richard Levasseur"
+ }
+ ],
+ "repository": [
+ "github:bazelbuild/rules_testing"
+ ],
+ "versions": [],
+ "yanked_versions": {}
+}
diff --git a/.bcr/presubmit.yml b/.bcr/presubmit.yml
new file mode 100644
index 0000000..65078a6
--- /dev/null
+++ b/.bcr/presubmit.yml
@@ -0,0 +1,12 @@
+# We recommend included a bcr test workspace that exercises your ruleset with bzlmod.
+# For an example, see https://github.com/aspect-build/bazel-lib/tree/main/e2e/bzlmod.
+bcr_test_module:
+ module_path: "e2e/bzlmod"
+ matrix:
+ platform: ["debian10", "macos", "ubuntu2004", "windows"]
+ tasks:
+ run_tests:
+ name: "Run test module"
+ platform: ${{ platform }}
+ test_targets:
+ - "//..."
diff --git a/.bcr/source.template.json b/.bcr/source.template.json
new file mode 100644
index 0000000..54d1e7b
--- /dev/null
+++ b/.bcr/source.template.json
@@ -0,0 +1,5 @@
+{
+ "integrity": "",
+ "strip_prefix": "{REPO}-{VERSION}",
+ "url": "https://github.com/{OWNER}/{REPO}/releases/download/{TAG}/rules_testing-{TAG}.tar.gz"
+}
diff --git a/.github/release.yml b/.github/release.yml
new file mode 100644
index 0000000..7b88d26
--- /dev/null
+++ b/.github/release.yml
@@ -0,0 +1,5 @@
+changelog:
+ categories:
+ - title: Other Changes
+ labels:
+ - "*"
diff --git a/.github/workflows/ci.bazelrc b/.github/workflows/ci.bazelrc
new file mode 100644
index 0000000..dc89744
--- /dev/null
+++ b/.github/workflows/ci.bazelrc
@@ -0,0 +1,12 @@
+# This file contains Bazel settings to apply on CI only.
+
+# Debug where options came from
+build --announce_rc
+# This directory is configured in GitHub actions to be persisted between runs.
+build --disk_cache=~/.cache/bazel
+build --repository_cache=~/.cache/bazel-repo
+# Don't rely on test logs being easily accessible from the test runner,
+# though it makes the log noisier.
+test --test_output=errors
+# Allows tests to run bazelisk-in-bazel, since this is the cache folder used
+test --test_env=XDG_CACHE_HOME
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..ea97575
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,51 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Cut a release whenever a new tag is pushed to the repo.
+name: Release
+
+on:
+ push:
+ tags:
+ - "v*.*.*"
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Mount bazel caches
+ uses: actions/cache@v3
+ with:
+ path: |
+ ~/.cache/bazel
+ ~/.cache/bazel-repo
+ key: bazel-cache-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'WORKSPACE') }}
+ restore-keys: bazel-cache-
+ # Skipping tests for now to test the release workflow
+ # - name: bazel test //...
+ # env:
+ # # Bazelisk will download bazel to here
+ # XDG_CACHE_HOME: ~/.cache/bazel-repo
+ # run: bazel --bazelrc=.github/workflows/ci.bazelrc --bazelrc=.bazelrc test --keep_going //...
+ - name: Prepare workspace snippet
+ run: .github/workflows/workspace_snippet.sh ${{ env.GITHUB_REF_NAME }} > release_notes.txt
+ - name: Release
+ uses: softprops/action-gh-release@v1
+ with:
+ # Use GH feature to populate the changelog automatically
+ generate_release_notes: true
+ body_path: release_notes.txt
+ fail_on_unmatched_files: true
+ files: rules_testing-*.tar.gz
diff --git a/.github/workflows/workspace_snippet.sh b/.github/workflows/workspace_snippet.sh
new file mode 100755
index 0000000..b4152a9
--- /dev/null
+++ b/.github/workflows/workspace_snippet.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+set -o errexit -o nounset -o pipefail
+
+# Set by GH actions, see
+# https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
+TAG=${GITHUB_REF_NAME}
+PREFIX="rules_testing-${TAG:1}"
+ARCHIVE="rules_testing-$TAG.tar.gz"
+# The prefix is chosen to match what GitHub generates for source archives
+git archive --format=tar --prefix=${PREFIX}/ ${TAG} | gzip > $ARCHIVE
+SHA=$(shasum -a 256 $ARCHIVE | awk '{print $1}')
+
+cat << EOF
+WORKSPACE snippet:
+\`\`\`starlark
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+http_archive(
+ name = "rules_testing",
+ sha256 = "${SHA}",
+ strip_prefix = "${PREFIX}",
+ url = "https://github.com/bazelbuild/rules_testing/releases/download/${TAG}/${ARCHIVE}",
+)
+\`\`\`
+EOF
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4a39ee3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+# Generated sphinx docs
+docs/_build/
+# Generated API docs
+/docs/source/api/
+!/docs/source/api/index.md
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 0000000..cbb61ae
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,23 @@
+
+version: 2
+
+formats:
+ - pdf
+ - htmlzip
+
+sphinx:
+ configuration: docs/source/conf.py
+
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.11"
+ nodejs: "19"
+ jobs:
+ pre_build:
+ - npm install -g @bazel/bazelisk
+ - bazel run //docs:run_sphinx_build
+
+python:
+ install:
+ - requirements: docs/requirements.txt
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..b0f617e
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,13 @@
+# This the official list of authors for copyright purposes.
+#
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+#
+# Please keep the lists sorted.
+
+# Organizations
+
+Google LLC
+
+# Individuals \ No newline at end of file
diff --git a/BUILD b/BUILD
new file mode 100644
index 0000000..e023dab
--- /dev/null
+++ b/BUILD
@@ -0,0 +1,29 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@rules_license//rules:license.bzl", "license")
+
+package(
+ default_applicable_licenses = [":package_license"],
+ default_visibility = ["//visibility:private"],
+)
+
+licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+license(
+ name = "package_license",
+ package_name = "rules_testing",
+)
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 0000000..6176c0e
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+* @comius @hvadehra @rickeylev
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..c44c5fb
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,30 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## Community Guidelines
+
+This project
+follows [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/)
+.
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MODULE.bazel b/MODULE.bazel
new file mode 100644
index 0000000..515a444
--- /dev/null
+++ b/MODULE.bazel
@@ -0,0 +1,64 @@
+module(
+ name = "rules_testing",
+ version = "0.0.1",
+ compatibility_level = 1,
+)
+
+bazel_dep(name = "platforms", version = "0.0.6")
+bazel_dep(name = "bazel_skylib", version = "1.3.0")
+bazel_dep(name = "rules_license", version = "0.0.4")
+
+# ===== The rest of these are development dependencies =====
+
+# TODO(https://github.com/bazelbuild/stardoc/issues/117): stardoc doesn't yet
+# work with bzlmod enabled. This defines the repo so load() works.
+bazel_dep(
+ name = "stardoc",
+ version = "0.5.3",
+ dev_dependency = True,
+ repo_name = "io_bazel_stardoc",
+)
+bazel_dep(name = "rules_python", version = "0.20.0", dev_dependency = True)
+
+python = use_extension(
+ "@rules_python//python:extensions.bzl",
+ "python",
+ dev_dependency = True,
+)
+python.toolchain(
+ name = "python3_11",
+ python_version = "3.11",
+)
+
+# NOTE: use_repo() must be called for each platform that runs the docgen tools
+use_repo(
+ python,
+ "python3_11_toolchains",
+ "python3_11_x86_64-unknown-linux-gnu",
+)
+
+# NOTE: This is actualy a dev dependency, but due to
+# https://github.com/bazelbuild/bazel/issues/18248 it has to be non-dev to
+# generate the repo name used in the subsequent register_toolchains() call.
+# Once 6.2 is the minimum supported version, the register_toolchains
+# call can use dev_dependency=True and this can go away entirely.
+dev = use_extension(
+ "//:dev_extension.bzl",
+ "dev",
+)
+use_repo(dev, "rules_testing_dev_toolchains")
+
+# NOTE: This call will be run by downstream users, so the
+# repos it mentions must exist.
+register_toolchains("@rules_testing_dev_toolchains//:all")
+
+pip = use_extension(
+ "@rules_python//python:extensions.bzl",
+ "pip",
+ dev_dependency = True,
+)
+pip.parse(
+ name = "docs-pypi",
+ requirements_lock = "//docs:requirements.txt",
+)
+use_repo(pip, "docs-pypi")
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..9582b1f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,9 @@
+[![Build
+status](https://badge.buildkite.com/a82ebafd30ad56e0596dcd3a3a19f36985d064f7f7fb89e21e.svg?branch=master)](https://buildkite.com/bazel/rules-testing)
+
+# Framworks and utilities for testing Bazel Starlark rules
+
+`rules_testing` provides frameworks and utilities to make testing Starlark rules
+easier and convenient.
+
+For detailed docs, see the [docs directory](docs/index.md).
diff --git a/RELEASING.md b/RELEASING.md
new file mode 100644
index 0000000..e2f3df0
--- /dev/null
+++ b/RELEASING.md
@@ -0,0 +1,11 @@
+# Releases
+
+Releases are mostly automated and triggered by adding a tag:
+
+Assuming you have a remote named `upstream` pointing to the repo:
+
+* `git tag v<VERSION> upstream/master && git push upstream --tags`
+
+After pushing, the release action will trigger. It will package it up, create a
+relase on the GitHub release page, and trigger an update to the Bazel Central
+Registry (BCR).
diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel
new file mode 100644
index 0000000..4f36512
--- /dev/null
+++ b/WORKSPACE.bazel
@@ -0,0 +1,65 @@
+workspace(name = "rules_testing")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
+
+maybe(
+ http_archive,
+ name = "bazel_skylib",
+ sha256 = "74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "io_bazel_stardoc",
+ sha256 = "3fd8fec4ddec3c670bd810904e2e33170bedfe12f90adf943508184be458c8bb",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/stardoc/releases/download/0.5.3/stardoc-0.5.3.tar.gz",
+ "https://github.com/bazelbuild/stardoc/releases/download/0.5.3/stardoc-0.5.3.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_license",
+ sha256 = "6157e1e68378532d0241ecd15d3c45f6e5cfd98fc10846045509fb2a7cc9e381",
+ urls = [
+ "https://github.com/bazelbuild/rules_license/releases/download/0.0.4/rules_license-0.0.4.tar.gz",
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_license/releases/download/0.0.4/rules_license-0.0.4.tar.gz",
+ ],
+)
+
+load("@io_bazel_stardoc//:setup.bzl", "stardoc_repositories")
+
+stardoc_repositories()
+
+http_archive(
+ name = "rules_python",
+ sha256 = "a644da969b6824cc87f8fe7b18101a8a6c57da5db39caa6566ec6109f37d2141",
+ strip_prefix = "rules_python-0.20.0",
+ url = "https://github.com/bazelbuild/rules_python/releases/download/0.20.0/rules_python-0.20.0.tar.gz",
+)
+
+load("@rules_python//python:repositories.bzl", "python_register_toolchains")
+
+python_register_toolchains(
+ name = "python3_11",
+ # Available versions are listed in @rules_python//python:versions.bzl.
+ # We recommend using the same version your team is already standardized on.
+ python_version = "3.11",
+)
+
+load("@python3_11//:defs.bzl", "interpreter")
+load("@rules_python//python:pip.bzl", "pip_parse")
+
+pip_parse(
+ name = "docs-pypi",
+ python_interpreter_target = interpreter,
+ requirements_lock = "//docs:requirements.txt",
+)
+
+load("@docs-pypi//:requirements.bzl", "install_deps")
+
+install_deps()
diff --git a/WORKSPACE.bzlmod b/WORKSPACE.bzlmod
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/WORKSPACE.bzlmod
diff --git a/addlicense.sh b/addlicense.sh
new file mode 100755
index 0000000..8cc8fb3
--- /dev/null
+++ b/addlicense.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+if ! command -v addlicense @>&1 >/dev/null; then
+ echo "ERROR: addlicense not installed."
+ echo "Install using https://github.com/google/addlicense#install"
+ exit 1
+fi
+
+addlicense -v -l apache -c 'The Bazel Authors. All rights reserved.' "$@"
diff --git a/dev_extension.bzl b/dev_extension.bzl
new file mode 100644
index 0000000..8be534e
--- /dev/null
+++ b/dev_extension.bzl
@@ -0,0 +1,47 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Extension only used for development purposes."""
+
+def _dev_ext_impl(mctx):
+ module = mctx.modules[0]
+ _dev_toolchains_repo(
+ name = "rules_testing_dev_toolchains",
+ is_root = module.is_root,
+ )
+
+dev = module_extension(
+ implementation = _dev_ext_impl,
+)
+
+def _dev_toolchains_repo_impl(rctx):
+ # If its the root module, then we're in rules_testing and
+ # it's a dev dependency situation.
+ if rctx.attr.is_root:
+ toolchain_build = Label("@python3_11_toolchains//:BUILD.bazel")
+
+ # NOTE: This is brittle. It only works because, luckily,
+ # rules_python's toolchain BUILD file is essentially self-contained.
+ # It only uses absolute references and doesn't load anything,
+ # so we can copy it elsewhere and it still works.
+ rctx.symlink(toolchain_build, "BUILD.bazel")
+ else:
+ rctx.file("BUILD.bazel", "")
+
+_dev_toolchains_repo = repository_rule(
+ implementation = _dev_toolchains_repo_impl,
+ attrs = {
+ "is_root": attr.bool(),
+ },
+)
diff --git a/docgen/BUILD b/docgen/BUILD
new file mode 100644
index 0000000..3acaa53
--- /dev/null
+++ b/docgen/BUILD
@@ -0,0 +1,48 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated documentation for the docs/ directory
+
+load(":docgen.bzl", "sphinx_stardocs")
+
+package(
+ default_applicable_licenses = ["//:package_license"],
+ default_visibility = ["//:__subpackages__"],
+)
+
+sphinx_stardocs(
+ name = "docs",
+ bzl_libraries = [
+ "//lib:analysis_test_bzl",
+ "//lib:truth_bzl",
+ "//lib:util_bzl",
+ "//lib/private:action_subject_bzl",
+ "//lib/private:bool_subject_bzl",
+ "//lib/private:collection_subject_bzl",
+ "//lib/private:depset_file_subject_bzl",
+ "//lib/private:dict_subject_bzl",
+ "//lib/private:execution_info_subject_bzl",
+ "//lib/private:expect_bzl",
+ "//lib/private:expect_meta_bzl",
+ "//lib/private:file_subject_bzl",
+ "//lib/private:instrumented_files_info_subject_bzl",
+ "//lib/private:int_subject_bzl",
+ "//lib/private:label_subject_bzl",
+ "//lib/private:ordered_bzl",
+ "//lib/private:run_environment_info_subject_bzl",
+ "//lib/private:runfiles_subject_bzl",
+ "//lib/private:str_subject_bzl",
+ "//lib/private:target_subject_bzl",
+ ],
+)
diff --git a/docgen/docgen.bzl b/docgen/docgen.bzl
new file mode 100644
index 0000000..f89328a
--- /dev/null
+++ b/docgen/docgen.bzl
@@ -0,0 +1,70 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rules to help generate rules_testing docs."""
+
+load("@io_bazel_stardoc//stardoc:stardoc.bzl", "stardoc")
+load("@bazel_skylib//rules:build_test.bzl", "build_test")
+
+def sphinx_stardocs(name, bzl_libraries, **kwargs):
+ """Generate Sphinx-friendly markdown docs using Stardoc for bzl libraries.
+
+ Args:
+ name: str, the name of the resulting file group with the generated docs.
+ bzl_libraries: list of targets, the libraries to generate docs for.
+ The must be in "//foo:{name}_bzl" format; the `{name}` portion
+ will become the output file name.
+ **kwargs: Additional kwargs to pass onto generated targets (e.g.
+ tags)
+ """
+
+ # Stardoc doesn't yet work with bzlmod; we can detect this by
+ # looking for "@@" vs "@" in labels.
+ if "@@" in str(Label("//:X")):
+ kwargs["target_compatible_with"] = ["@platforms//:incompatible"]
+
+ docs = []
+ for label in bzl_libraries:
+ lib_name = Label(label).name.replace("_bzl", "")
+
+ doc_rule_name = "_{}_{}".format(name, lib_name)
+ sphinx_stardoc(
+ name = "_{}_{}".format(name, lib_name),
+ out = lib_name + ".md",
+ input = label.replace("_bzl", ".bzl"),
+ deps = [label],
+ **kwargs
+ )
+ docs.append(doc_rule_name)
+
+ native.filegroup(
+ name = name,
+ srcs = docs,
+ **kwargs
+ )
+ build_test(
+ name = name + "_build_test",
+ targets = docs,
+ **kwargs
+ )
+
+def sphinx_stardoc(**kwargs):
+ stardoc(
+ # copybara-marker: stardoc format
+ func_template = "func_template.vm",
+ header_template = "header_template.vm",
+ rule_template = "rule_template.vm",
+ provider_template = "provider_template.vm",
+ **kwargs
+ )
diff --git a/docgen/func_template.vm b/docgen/func_template.vm
new file mode 100644
index 0000000..ee6a2bf
--- /dev/null
+++ b/docgen/func_template.vm
@@ -0,0 +1,56 @@
+#set( $nl = "
+" )
+#set( $fn = $funcInfo.functionName)
+#set( $fnl = $fn.replaceAll("[.]", "_").toLowerCase())
+{.starlark-object}
+#[[##]]# $fn
+
+#set( $hasParams = false)
+{.starlark-signature}
+${funcInfo.functionName}(## Comment to consume newline
+#foreach ($param in $funcInfo.getParameterList())
+#if($param.name != "self")
+#set( $hasParams = true)
+[${param.name}](#${fnl}_${param.name})## Comment to consume newline
+#if(!$param.getDefaultValue().isEmpty())
+=$param.getDefaultValue()#end#if($foreach.hasNext),
+#end
+#end
+#end
+)
+
+${funcInfo.docString}
+
+#if ($hasParams)
+{#${fnl}_parameters}
+**PARAMETERS** [¶](#${fnl}_parameters){.headerlink}
+
+#foreach ($param in $funcInfo.getParameterList())
+#if($param.name != "self")
+#set($link = $fnl + "_" + $param.name)
+#if($foreach.first)
+{.params-box}
+#end
+## The .span wrapper is necessary so the trailing colon doesn't wrap
+:[${param.name}[¶](#$link){.headerlink}]{.span}: []{#$link}
+#if(!$param.getDefaultValue().isEmpty())(_default `${param.getDefaultValue()}`_) #end
+#if(!$param.docString.isEmpty())
+ $param.docString.replaceAll("$nl", "$nl ")
+#else
+ _undocumented_
+#end
+#end
+#end
+#end
+#if (!$funcInfo.getReturn().docString.isEmpty())
+
+{#${fnl}_returns}
+RETURNS [¶](#${fnl}_returns){.headerlink}
+: ${funcInfo.getReturn().docString.replaceAll("$nl", "$nl ")}
+#end
+#if (!$funcInfo.getDeprecated().docString.isEmpty())
+
+**DEPRECATED**
+
+${funcInfo.getDeprecated().docString}
+#end
diff --git a/docgen/header_template.vm b/docgen/header_template.vm
new file mode 100644
index 0000000..fee7e2c
--- /dev/null
+++ b/docgen/header_template.vm
@@ -0,0 +1 @@
+$moduleDocstring
diff --git a/docgen/provider_template.vm b/docgen/provider_template.vm
new file mode 100644
index 0000000..55e6871
--- /dev/null
+++ b/docgen/provider_template.vm
@@ -0,0 +1,29 @@
+#set( $nl = "
+" )
+#set( $pn = $providerInfo.providerName)
+#set( $pnl = $pn.replaceAll("[.]", "_").toLowerCase())
+{.starlark-object}
+#[[##]]# ${providerName}
+
+#set( $hasFields = false)
+{.starlark-signature}
+${providerInfo.providerName}(## Comment to consume newline
+#foreach ($field in $providerInfo.getFieldInfoList())
+#set( $hasFields = true)
+[${field.name}](#${pnl}_${field.name})## Comment to consume newline
+#if($foreach.hasNext),
+#end
+#end
+)
+
+$providerInfo.docString
+
+#if ($hasFields)
+**FIELDS** [¶](#${pnl}_fields){.headerlink}
+
+#foreach ($field in $providerInfo.getFieldInfoList())
+#set($link = $pnl + "_" + $field.name)
+:[${field.name}[¶](#$link){.headerlink}]{.span}: []{#$link}
+ $field.docString.replaceAll("$nl", "$nl ")
+#end
+#end
diff --git a/docgen/rule_template.vm b/docgen/rule_template.vm
new file mode 100644
index 0000000..d91bad2
--- /dev/null
+++ b/docgen/rule_template.vm
@@ -0,0 +1,48 @@
+#set( $nl = "
+" )
+#set( $rn = $ruleInfo.ruleName)
+#set( $rnl = $rn.replaceAll("[.]", "_").toLowerCase())
+{.starlark-object}
+#[[##]]# $ruleName
+
+#set( $hasAttrs = false)
+{.starlark-signature}
+${ruleInfo.ruleName}(## Comment to consume newline
+#foreach ($attr in $ruleInfo.getAttributeList())
+#set( $hasAttrs = true)
+[${attr.name}](#${rnl}_${attr.name})## Comment to consume newline
+#if(!$attr.getDefaultValue().isEmpty())
+=$attr.getDefaultValue()#end#if($foreach.hasNext),
+#end
+#end
+)
+
+$ruleInfo.docString
+
+#if ($hasAttrs)
+{#${rnl}_attributes}
+**ATTRIBUTES** [¶](#${rnl}_attributes){.headerlink}
+
+#foreach ($attr in $ruleInfo.getAttributeList())
+#set($link = $rnl + "_" + $attr.name)
+#if($attr.mandatory)
+#set($opt = "required")
+#else
+#set($opt = "optional")
+#end
+#if($attr.type == "NAME")
+#set($type = "[Name][target-name]")
+#elseif($attr.type == "LABEL_LIST")
+#set($type = "list of [label][attr-label]s")
+#end
+#if(!$attr.getDefaultValue().isEmpty())
+#set($default = ", default `" + $attr.getDefaultValue() + "`")
+#else
+#set($default = "")
+#end
+:[${attr.name}[¶](#$link){.headerlink}]{.span}: []{#$link}
+ _($opt $type$default)_
+ $attr.docString.replaceAll("$nl", "$nl ")
+
+#end
+#end
diff --git a/docs/BUILD b/docs/BUILD
new file mode 100644
index 0000000..59d0295
--- /dev/null
+++ b/docs/BUILD
@@ -0,0 +1,74 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@docs-pypi//:requirements.bzl", "requirement")
+load("@rules_python//python:pip.bzl", "compile_pip_requirements")
+load("@rules_python//python:py_binary.bzl", "py_binary")
+
+package(
+ default_applicable_licenses = ["//:package_license"],
+)
+
+sh_binary(
+ name = "run_sphinx_build",
+ srcs = ["run_sphinx_build.sh"],
+ args = [
+ "$(rootpath :sphinx_build)",
+ "$(rootpath :crossrefs.md)",
+ "$(rootpaths //docgen:docs)",
+ ],
+ data = [
+ "crossrefs.md",
+ ":sphinx_build",
+ ":sphinx_sources",
+ "//docgen:docs",
+ ],
+)
+
+py_binary(
+ name = "sphinx_build",
+ srcs = ["sphinx_build.py"],
+ deps = [
+ requirement("sphinx"),
+ requirement("sphinx_rtd_theme"),
+ requirement("myst_parser"),
+ ],
+)
+
+# Run bazel run //docs:requirements.update
+compile_pip_requirements(
+ name = "requirements",
+ requirements_in = "requirements.in",
+ requirements_txt = "requirements.txt",
+ # The requirements output differs on Windows, so just restrict it to Linux.
+ # The build process is only run on, and only works for, Linux anyways.
+ target_compatible_with = ["@platforms//os:linux"],
+)
+
+filegroup(
+ name = "sphinx_sources",
+ srcs = [
+ # This isn't generated like the other files under the api directory,
+ # but it can't go in the glob because the exclude param will ignore it.
+ "source/api/index.md",
+ ] + glob(
+ [
+ "**",
+ ],
+ exclude = [
+ "source/api/**", # These are all generated files
+ "_build/**",
+ ],
+ ),
+)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..893a1b0
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,63 @@
+# rules_testing docs generation
+
+The docs for rules_testing are generated using a combination of Sphinx, Bazel,
+and Readthedocs.org. The Markdown files in source control are unlikely to render
+properly without the Sphinx processing step because they rely on Sphinx and
+MyST-specific Markdown functionalit.
+
+The actual sources that Sphinx consumes are in the docs/source directory.
+
+Manually building the docs isn't necessary -- readthedocs.org will
+automatically build and deploy them when commits are pushed to the repo.
+
+## Generating docs for development
+
+To generate docs for development/preview purposes, install
+[ibazel](https://github.com/bazelbuild/bazel-watcher)[^ibazel] and run:
+
+```
+ibazel run //docs:run_sphinx_build
+```
+
+This will build the docs and start a local webserver at http://localhost:8000
+where you can view the output. As you edit files, ibazel will detect the file
+changes and re-run the build process, and you can simply refresh your browser to
+see the changes.
+
+## MyST Markdown flavor
+
+Sphinx is configured to parse Markdown files using MyST, which is a more
+advanced flavor of Markdown that supports most features of restructured text and
+integrates with Sphinx functionality such as automatic cross references,
+creating indexes, and using concise markup to generate rich documentation.
+
+MyST features and behaviors are controlled by the Sphinx configuration file,
+`docs/source/conf.py`. For more info, see https://myst-parser.readthedocs.io.
+
+## Sphinx configuration
+
+The Sphinx-specific configuration files and input doc files live in
+docs/source -- anything under this directory will be treated by Sphinx as
+something it should create documentation for.
+
+The Sphinx configuration is `docs/source/conf.py`. See
+https://www.sphinx-doc.org/ for details about the configuration file.
+
+## Readthedocs configuration
+
+There's two basic parts to the readthedocs configuration:
+
+* `.readthedocs.yaml`: This configuration file controls most settings, such as
+ the OS version used to build, Python version, dependencies, what Bazel
+ commands to run, etc.
+* https://readthedocs.org/projects/rules-testing: This is the project
+ administration page. While most settings come from the config file, this
+ controls additional settings such as permissions, what versions are
+ published, when to publish changes, etc.
+
+For more readthedocs configuration details, see docs.readthedocs.io.
+
+Of particular note, `//docs:requirements.txt` is used by readthedocs for
+specifying Python dependencies (including Sphinx version).
+
+[^ibazel]: Quick install: `npm install -g @bazel/ibazel`
diff --git a/docs/crossrefs.md b/docs/crossrefs.md
new file mode 100644
index 0000000..59d6be1
--- /dev/null
+++ b/docs/crossrefs.md
@@ -0,0 +1,26 @@
+[`Action`]: https://bazel.build/rules/lib/Action
+[`ActionSubject`]: /api/action_subject
+[`bool`]: https://bazel.build/rules/lib/bool
+[`BoolSubject`]: /api/bool_subject
+[`CollectionSubject`]: /api/collection_subject
+[`depset`]: https://bazel.build/rules/lib/depset
+[`DepsetFileSubject`]: /api/depset_file_subject
+[`dict`]: https://bazel.build/rules/lib/dict
+[`DictSubject`]: /api/dict_subject
+[`Expect`]: /api/expect
+[`ExpectMeta`]: /api/expect_meta
+[`File`]: https://bazel.build/rules/lib/File
+[`FileSubject`]: /api/file_subject
+[`format_str`]: /api/expect_meta.html#expectmeta-format-str
+[`IntSubject`]: /api/int_subject
+[`Label`]: https://bazel.build/rules/lib/Label
+[`LabelSubject`]: /api/label_subject
+[`list`]: https://bazel.build/rules/lib/list
+[`Ordered`]: /api/ordered
+[`RunfilesSubject`]: /api/runfiles_subject
+[`str`]: https://bazel.build/rules/lib/string
+[`StrSubject`]: /api/str_subject
+[`Target`]: https://bazel.build/rules/lib/Target
+[`TargetSubject`]: /api/target_subject
+[target-name]: https://bazel.build/concepts/labels#target-names
+[attr-label]: https://bazel.build/concepts/labels
diff --git a/docs/requirements.in b/docs/requirements.in
new file mode 100644
index 0000000..aa179a4
--- /dev/null
+++ b/docs/requirements.in
@@ -0,0 +1,5 @@
+# NOTE: This is only used as input to create the resolved requirements.txt file,
+# which is what builds, both Bazel and Readthedocs, both use.
+sphinx
+myst-parser
+sphinx_rtd_theme
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..db2141e
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,288 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# bazel run //docs:requirements.update
+#
+alabaster==0.7.13 \
+ --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+ --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
+ # via sphinx
+babel==2.12.1 \
+ --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+ --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
+ # via sphinx
+certifi==2022.12.7 \
+ --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
+ --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+ # via requests
+charset-normalizer==3.1.0 \
+ --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
+ --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
+ --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
+ --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
+ --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
+ --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
+ --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
+ --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
+ --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
+ --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
+ --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
+ --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
+ --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
+ --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
+ --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
+ --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
+ --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
+ --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
+ --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
+ --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
+ --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
+ --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
+ --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
+ --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
+ --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
+ --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
+ --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
+ --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
+ --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
+ --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
+ --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
+ --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
+ --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
+ --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
+ --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
+ --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
+ --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
+ --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
+ --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
+ --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
+ --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
+ --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
+ --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
+ --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
+ --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
+ --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
+ --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
+ --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
+ --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
+ --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
+ --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
+ --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
+ --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
+ --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
+ --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
+ --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
+ --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
+ --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
+ --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
+ --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
+ --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
+ --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
+ --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
+ --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
+ --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
+ --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
+ --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
+ --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
+ --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
+ --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
+ --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
+ --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
+ --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
+ --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
+ --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
+ # via requests
+docutils==0.18.1 \
+ --hash=sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c \
+ --hash=sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06
+ # via
+ # myst-parser
+ # sphinx
+ # sphinx-rtd-theme
+idna==3.4 \
+ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+ # via requests
+imagesize==1.4.1 \
+ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
+ # via sphinx
+jinja2==3.1.2 \
+ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
+ # via
+ # myst-parser
+ # sphinx
+markdown-it-py==2.2.0 \
+ --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \
+ --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1
+ # via
+ # mdit-py-plugins
+ # myst-parser
+markupsafe==2.1.2 \
+ --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \
+ --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \
+ --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \
+ --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \
+ --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \
+ --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \
+ --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \
+ --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \
+ --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \
+ --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \
+ --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \
+ --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \
+ --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \
+ --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \
+ --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \
+ --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \
+ --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \
+ --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \
+ --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \
+ --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \
+ --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \
+ --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \
+ --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \
+ --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \
+ --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \
+ --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \
+ --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \
+ --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \
+ --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \
+ --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \
+ --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \
+ --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \
+ --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \
+ --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \
+ --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \
+ --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \
+ --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \
+ --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \
+ --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \
+ --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \
+ --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \
+ --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \
+ --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \
+ --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \
+ --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \
+ --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \
+ --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \
+ --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \
+ --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \
+ --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58
+ # via jinja2
+mdit-py-plugins==0.3.5 \
+ --hash=sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e \
+ --hash=sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a
+ # via myst-parser
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+myst-parser==1.0.0 \
+ --hash=sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae \
+ --hash=sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c
+ # via -r docs/requirements.in
+packaging==23.0 \
+ --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
+ --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
+ # via sphinx
+pygments==2.15.0 \
+ --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \
+ --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500
+ # via sphinx
+pyyaml==6.0 \
+ --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
+ --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
+ --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \
+ --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \
+ --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \
+ --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \
+ --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \
+ --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \
+ --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \
+ --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \
+ --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \
+ --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \
+ --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \
+ --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \
+ --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \
+ --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \
+ --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \
+ --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \
+ --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \
+ --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \
+ --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \
+ --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \
+ --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \
+ --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \
+ --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \
+ --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \
+ --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \
+ --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \
+ --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \
+ --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \
+ --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \
+ --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \
+ --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \
+ --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \
+ --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \
+ --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \
+ --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \
+ --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \
+ --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
+ --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
+ # via myst-parser
+requests==2.28.2 \
+ --hash=sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa \
+ --hash=sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf
+ # via sphinx
+snowballstemmer==2.2.0 \
+ --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \
+ --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a
+ # via sphinx
+sphinx==6.1.3 \
+ --hash=sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2 \
+ --hash=sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc
+ # via
+ # -r docs/requirements.in
+ # myst-parser
+ # sphinx-rtd-theme
+ # sphinxcontrib-jquery
+sphinx-rtd-theme==1.2.0 \
+ --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
+ --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
+ # via -r docs/requirements.in
+sphinxcontrib-applehelp==1.0.4 \
+ --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+ --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
+ # via sphinx
+sphinxcontrib-devhelp==1.0.2 \
+ --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \
+ --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4
+ # via sphinx
+sphinxcontrib-htmlhelp==2.0.1 \
+ --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+ --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
+ # via sphinx
+sphinxcontrib-jquery==4.1 \
+ --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
+ --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
+ # via sphinx-rtd-theme
+sphinxcontrib-jsmath==1.0.1 \
+ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
+ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
+ # via sphinx
+sphinxcontrib-qthelp==1.0.3 \
+ --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \
+ --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6
+ # via sphinx
+sphinxcontrib-serializinghtml==1.1.5 \
+ --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \
+ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952
+ # via sphinx
+urllib3==1.26.15 \
+ --hash=sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305 \
+ --hash=sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42
+ # via requests
diff --git a/docs/run_sphinx_build.sh b/docs/run_sphinx_build.sh
new file mode 100755
index 0000000..dea8984
--- /dev/null
+++ b/docs/run_sphinx_build.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+#
+# NOTE: This is meant to be run using `bazel run`. Directly running it
+# won't work.
+#
+# Build docs for Sphinx. This is usually run by the readthedocs build process.
+#
+# It can also be run locally during development using Bazel, in which case,
+# it will run Sphinx and start a local webserver to server HTML.
+#
+# To make the local devx nicer, run it using ibazel, and it will automatically
+# update docs:
+# ibazel run //docs:run_sphinx_build
+
+set -e
+
+if [[ -z "$BUILD_WORKSPACE_DIRECTORY" ]]; then
+ echo "ERROR: Must be run using bazel run"
+ exit 1
+fi
+
+sphinx=$(pwd)/$1
+shift
+
+crossrefs=$1
+shift
+
+dest_dir="$BUILD_WORKSPACE_DIRECTORY/docs/source/api"
+mkdir -p "$dest_dir"
+for path in "$@"; do
+ dest="$dest_dir/$(basename $path)"
+ if [[ -e $dest ]]; then
+ chmod +w $dest
+ fi
+ cat $path $crossrefs > $dest
+done
+
+if [[ -z "$READTHEDOCS" ]]; then
+ sourcedir="$BUILD_WORKSPACE_DIRECTORY/docs/source"
+ outdir="$BUILD_WORKSPACE_DIRECTORY/docs/_build"
+ # This avoids stale files or since-deleted files from being processed.
+ rm -fr "$outdir"
+ "$sphinx" -T -b html "$sourcedir" "$outdir"
+
+ echo "HTML built, to view, run:"
+ echo "python3 -m http.server --directory $outdir"
+ python3 -m http.server --directory "$outdir"
+fi
diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css
new file mode 100644
index 0000000..c97d2f5
--- /dev/null
+++ b/docs/source/_static/css/custom.css
@@ -0,0 +1,34 @@
+.wy-nav-content {
+ max-width: 70%;
+}
+
+.starlark-object {
+ border: thin solid grey;
+ margin-bottom: 1em;
+}
+
+.starlark-object h2 {
+ background-color: #e7f2fa;
+ border-bottom: thin solid grey;
+ padding-left: 0.5ex;
+}
+
+.starlark-object>p, .starlark-object>dl {
+ /* Prevent the words from touching the border line */
+ padding-left: 0.5ex;
+}
+
+.starlark-signature {
+ font-family: monospace;
+}
+
+/* Fixup the headerlinks in param names */
+.starlark-object dt a {
+ /* Offset the link icon to be outside the colon */
+ position: relative;
+ right: -1ex;
+ /* Remove the empty space between the param name and colon */
+ width: 0;
+ /* Override the .headerlink margin */
+ margin-left: 0 !important;
+}
diff --git a/docs/source/analysis_tests.md b/docs/source/analysis_tests.md
new file mode 100644
index 0000000..192b319
--- /dev/null
+++ b/docs/source/analysis_tests.md
@@ -0,0 +1,251 @@
+# Analysis Tests
+
+Analysis tests are the typical way to test rule behavior. They allow observing
+behavior about a rule that isn't visible to a regular test as well as modifying
+Bazel configuration state to test rule behavior for e.g. different platforms.
+
+If you've ever wanted to verify...
+ * A certain combination of flags
+ * Building for another OS
+ * That certain providers are returned
+ * That aspects behaved a certain way
+
+Or other observable information, then an analysis test does that.
+
+## Quick start
+
+For a quick copy/paste start, create a `.bzl` file with your test code, and a
+`BUILD.bazel` file to load your tests and declare them. Here's a skeleton:
+
+```
+# BUILD
+load(":my_tests.bzl", "my_test_suite")
+
+my_test_suite(name="my_test_suite")
+```
+
+```
+# my_tests.bzl
+
+load("@rules_testing//lib:analysis_test.bzl", "test_suite", "analysis_test")
+load("@rules_testing//lib:util.bzl", "util")
+
+def _test_hello(name):
+ util.helper_target(
+ native.filegroup,
+ name = name + "_subject",
+ srcs = ["hello_world.txt"],
+ )
+ analysis_test(
+ name = name,
+ impl = _test_hello_impl,
+ target = name + "_subject"
+ )
+
+def _test_hello_impl(env, target):
+ env.expect.that_target(target).default_outputs().contains(
+ "hello_world.txt"
+ )
+
+def my_test_suite(name):
+ test_suite(
+ name = name,
+ tests = [
+ _test_hello,
+ ]
+ )
+```
+
+## Arranging the test
+
+The arrange part of a test defines a target using the rule under test and sets
+up its dependencies. This is done by writing a macro, which runs during the
+loading phase, that instantiates the target under test and dependencies. All the
+targets taking part in the arrangement should be tagged with `manual` so that
+they are ignored by common build patterns (e.g. `//...` or `foo:all`).
+
+Example:
+
+```python
+load("@rules_proto/defs:proto_library.bzl", "proto_library")
+
+
+def _test_basic(name):
+ """Verifies basic behavior of a proto_library rule."""
+ # (1) Arrange
+ proto_library(name=name + '_foo', srcs=["foo.proto"], deps=[name + "_bar"], tags=["manual"])
+ proto_library(name=name + '_bar', srcs=["bar.proto"], tags=["manual"])
+
+ # (2) Act
+ ...
+```
+
+TIP: Source source files aren't required to exist. This is because the analysis
+phase only records the path to source files; they aren't read until after the
+analysis phase. The macro function should be named after the behaviour being
+tested (e.g. `_test_frob_compiler_passed_qux_flag`). The setup targets should
+follow the
+[macro naming conventions](https://bazel.build/rules/macros#conventions), that
+is all targets should include the name argument as a prefix -- this helps tests
+avoid creating conflicting names.
+
+<!-- TODO(ilist): Mocking implicit dependencies -->
+
+### Limitations
+
+Bazel limits the number of transitive dependencies that can be used in the
+setup. The limit is controlled by
+[`--analysis_testing_deps_limit`](https://bazel.build/reference/command-line-reference#flag--analysis_testing_deps_limit)
+flag.
+
+Mocking toolchains (adding a toolchain used only in the test) is not possible at
+the moment.
+
+## Running the analysis phase
+
+The act part runs the analysis phase for a specific target and calls a user
+supplied function. All of the work is done by Bazel and the framework. Use
+`analysis_test` macro to pass in the target to analyse and a function that will
+be called with the analysis results:
+
+```python
+load("@rules_testing//lib:analysis_test.bzl", "analysis_test")
+
+
+def _test_basic(name):
+ ...
+
+ # (2) Act
+ analysis_test(name, target=name + "_foo", impl=_test_basic)
+```
+
+<!-- TODO(ilist): Setting configuration flags -->
+
+## Assertions
+
+The assert function (in example `_test_basic`) gets `env` and `target` as
+parameters, where...
+ * `env` is information about the overall build and test
+ * `target` is the target under test (as specified in the `target` attribute
+ during the arrange step).
+
+The `env.expect` attribute provides a `truth.Expect` object, which allows
+writing fluent asserts:
+
+```python
+
+
+def _test_basic(env, target):
+ env.expect.assert_that(target).runfiles().contains_at_least("foo.txt")
+ env.expect.assert_that(target).action_generating("foo.txt").contains_flag_values("--a")
+
+```
+
+Note that you aren't _required_ to use `env.expect`. If you want to perform
+asserts another way, then `env.fail()` can be called to register any failures.
+
+<!-- TODO(ilist): ### Assertions on providers -->
+<!-- TODO(ilist): ### Assertions on actions -->
+<!-- TODO(ilist): ## testing aspects -->
+
+
+## Collecting the tests together
+
+Use the `test_suite` function to collect all tests together:
+
+```python
+load("@rules_testing//lib:analysis_test.bzl", "test_suite")
+
+
+def proto_library_test_suite(name):
+ test_suite(
+ name=name,
+ tests=[
+ _test_basic,
+ _test_advanced,
+ ]
+ )
+```
+
+In your `BUILD` file instantiate the suite:
+
+```
+load("//path/to/your/package:proto_library_tests.bzl", "proto_library_test_suite")
+proto_library_test_suite(name = "proto_library_test_suite")
+```
+
+The function instantiates all test macros and wraps them into a single target. This removes the need
+to load and call each test separately in the `BUILD` file.
+
+### Advanced test collection, reuse, and parameterizing
+
+If you have many tests and rules and need to re-use them between each other,
+then there are a couple tricks to make it easy:
+
+* Tests aren't required to all be in the same file. So long as you can load the
+ arrange function and pass it to `test_suite`, then you can split tests into
+ multiple files for reuse.
+* Similarly, arrange functions themselves aren't required to take only a `name`
+ argument -- only the functions passed to `test_suite.test` require this.
+
+By using lists and lambdas, we can define collections of tests and have multiple
+rules reuse them:
+
+```
+# base_tests.bzl
+
+_base_tests = []
+
+def _test_common(name, rule_under_test):
+ rule_under_test(...)
+ analysis_test(...)
+
+def _test_common_impl(env, target):
+ env.expect.that_target(target).contains(...)
+
+_base_tests.append(_test_common)
+
+def create_base_tests(rule_under_test):
+ return [
+ lambda name: test(name=name, rule_under_test=rule_under_test)
+ for test in _base_tests
+ ]
+
+# my_binary_tests.bzl
+load("//my/my_binary.bzl", "my_binary")
+load(":base_tests.bzl", "create_base_tests")
+load("@rules_testing//lib:analysis_test.bzl", "test_suite")
+
+def my_binary_suite(name):
+ test_suite(
+ name = name,
+ tests = create_base_tests(my_binary)
+ )
+
+# my_test_tests.bzl
+load("//my/my_test.bzl", "my_test")
+load(":base_tests.bzl", "base_tests")
+load("@rules_testing//lib:analysis_test.bzl", "test_suite")
+
+def my_test_suite(name):
+ test_suite(
+ name = name,
+ tests = create_base_tests(my_test)
+ )
+```
+
+## Tips and best practices
+
+* Use private names for your tests, `def _test_foo`. This allows buildifier to
+ detect when you've forgotten to put a test in the `tests` attribute. The
+ framework will strip leading underscores from the test name
+* Tag the arranged inputs of your tests with `tags=["manual"]`; the
+ `util.helper_target` function helps with this. This prevents common build
+ patterns (e.g. `bazel test //...` or `bazel test :all`) from trying to
+ build them.
+* Put each rule's tests into their own directory with their own BUILD
+ file. This allows better isolation between the rules' test suites in several ways:
+ * When reusing tests, target names are less likely to collide.
+ * During the edit-run cycle, modifications to verify one rule that would
+ break another rule can be ignored until you're ready to test the other
+ rule.
diff --git a/docs/source/api/index.md b/docs/source/api/index.md
new file mode 100644
index 0000000..b180478
--- /dev/null
+++ b/docs/source/api/index.md
@@ -0,0 +1,7 @@
+# API Reference
+
+```{toctree}
+:glob:
+
+**
+```
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..993d6f7
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,73 @@
+# Configuration file for the Sphinx documentation builder.
+
+# -- Project information
+project = 'rules_testing'
+copyright = '2023, The Bazel Authors'
+author = 'Bazel'
+
+# Readthedocs fills these in
+release = '0.0.0'
+version = release
+
+# -- General configuration
+
+# Any extensions here not built into Sphinx must also be added to
+# the dependencies of Bazel and Readthedocs.
+# * //docs:requirements.in
+# * Regenerate //docs:requirements.txt (used by readthedocs)
+# * Add the dependencies to //docs:sphinx_build
+extensions = [
+ 'sphinx.ext.duration',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.autosectionlabel',
+ 'myst_parser',
+ 'sphinx_rtd_theme', # Necessary to get jquery to make flyout work
+]
+
+intersphinx_mapping = {
+}
+
+intersphinx_disabled_domains = ['std']
+
+# Prevent local refs from inadvertently linking elsewhere, per
+# https://docs.readthedocs.io/en/stable/guides/intersphinx.html#using-intersphinx
+intersphinx_disabled_reftypes = ["*"]
+
+templates_path = ['_templates']
+
+# -- Options for HTML output
+
+html_theme = 'sphinx_rtd_theme'
+
+# See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
+# for options
+html_theme_options = {}
+
+# Keep this in sync with the stardoc templates
+html_permalinks_icon = '¶'
+
+# See https://myst-parser.readthedocs.io/en/latest/syntax/optional.html
+# for additional extensions.
+myst_enable_extensions = [
+ "fieldlist",
+ "attrs_block",
+ "attrs_inline",
+ "colon_fence",
+ "deflist",
+]
+
+# These folders are copied to the documentation's HTML output
+html_static_path = ['_static']
+
+# These paths are either relative to html_static_path
+# or fully qualified paths (eg. https://...)
+html_css_files = [
+ 'css/custom.css',
+]
+
+# -- Options for EPUB output
+epub_show_urls = 'footnote'
+
diff --git a/docs/source/guides.md b/docs/source/guides.md
new file mode 100644
index 0000000..94e2c7a
--- /dev/null
+++ b/docs/source/guides.md
@@ -0,0 +1,14 @@
+# Guides
+
+## Analysis tests
+
+Analysis testing means testing something during the analysis phase of Bazel
+execution -- this is when rule logic is run.
+
+See [Analysis testing](analysis_tests.md) for how to write analysis tests.
+
+## Fluent asserts
+
+Included in rules_testing is a fluent, truth-style asserts library.
+
+See [Truth docs](truth.md) for how to use it.
diff --git a/docs/source/index.md b/docs/source/index.md
new file mode 100644
index 0000000..38cf0c2
--- /dev/null
+++ b/docs/source/index.md
@@ -0,0 +1,49 @@
+# Bazel Rules Testing
+
+rules_testing is a collection of utilities, libraries, and frameworks to make
+testing Starlark and Bazel rules easy and pleasant.
+
+version |version|
+
+## Installation
+
+To use rules_testing, you need to modify `WORKSPACE` or `MODULE.bazel`
+to depend on rules_testing. We recommend using bzlmod because it's simpler.
+
+For bzlmod, add this to your `MODULE.bazel`:
+
+```
+bazel_dep(name = "rules_testing", version = "<VERSION>", dev_dependency=True)
+```
+
+See the [GitHub releases
+page](https://github.com/bazelbuild/rules_testing/releases) for available
+versions.
+
+For `WORKSPACE`, see the [GitHub releases
+page](https://github.com/bazelbuild/rules_testing/releases) for the necessary
+config to copy and paste.
+
+
+## Analysis tests
+
+Analysis testing means testing something during the analysis phase of Bazel
+execution -- this is when rule logic is run.
+
+See [Analysis tests](/analysis_tests.md) for how to write analysis tests.
+
+## Fluent asserts
+
+Included in rules_testing is a fluent, truth-style asserts library.
+
+See [Truth docs](/truth.md) for how to use it.
+
+
+```{toctree}
+:glob:
+:hidden:
+
+self
+*
+api/index
+```
diff --git a/docs/source/truth.md b/docs/source/truth.md
new file mode 100644
index 0000000..9ae1153
--- /dev/null
+++ b/docs/source/truth.md
@@ -0,0 +1,108 @@
+# Truth Guide
+
+Also see: [Truth API reference](api/truth.md)
+
+## What is Truth?
+
+Truth is a style of doing asserts that makes it easy to perform complex
+assertions that are easy to understand and give actionable error messages.
+
+The basic way it works is wrapping a value in a type-specific object that
+provides type-specific assertion methods. This style provides several benefits:
+
+* A fluent API that more directly expresses the assertion
+* More egonomic assert functions
+* Error messages with more informative context
+* Promotes code reuses at the type-level.
+
+## Example Usage
+
+Note that all examples assume usage of the rules_testing `analysis_test`
+framework, but truth itself does not require it.
+
+```
+def test_foo(env, target):
+ subject = env.expect.that_target(target)
+ subject.runfiles().contains_at_least(["foo.txt"])
+ subject.executable().equals("bar.exe")
+
+ subject = env.expect.that_action(...)
+ subject.contains_at_least_args(...)
+```
+
+## Subjects
+
+Subjects are wrappers around a value that provide ways to assert on the value,
+access sub-values of it, or otherwise augment interacting with the wrapped
+value. For example, `TargetSubject` wraps Bazel `Target` objects and
+`RunfilesSubject` wraps Bazel `runfiles` objects. Normally accessing a target's
+runfiles and verifying the runfiles contents would require the verbose
+`target[DefaultInfo].default_runfiles`, plus additional code to convert a
+`runfiles` object's `files`, `symlinks`, `root_symlinks`, and `empty_filenames`
+into a single list to verify. With subject classes, however, it can be concisely
+expressed as `expect.that_target(target).runfiles().contains(path)`.
+
+The Truth library provides subjects for types that are built into Bazel, but
+custom subjects can be implemented to handle custom providers or other objects.
+
+## Predicates
+
+Because Starlark's data model doesn't allow customizing equality checking, some
+subjects allow matching values by using a predicate function. This makes it
+easier to, for example, ignore a platform-specific file extension.
+
+This is implemented using the structural `Matcher` "interface". This is a struct
+that contains the predicate function and a description of what the function
+does, which allows for more intelligible error messages.
+
+A variety of matchers are in `truth.bzl#matching`, but custom matches can be
+implemented using `matching.custom_matcher`
+
+## Writing a new Subject
+
+Writing a new Subject involves two basic pieces:
+
+1. Creating a constructor function, e.g. `_foo_subject_new`, that takes the
+ actual value and an `ExpectMeta` object (see `_expect_meta_new()`).
+
+2. Adding a method to `expect` or another Subject class to pass along state and
+ instantiate the new subject; both may be modified if the actual object can
+ be independenly created or obtained through another subject.
+
+ For top-level subjects, a method named `that_foo()` should be added to the
+ `expect` class.
+
+ For child-subjects, an appropriately named method should be added to the
+ parent subject, and the parent subject should call `ExpectMeta.derive()` to
+ create a new set of meta data for the child subject.
+
+The assert methods a subject provides are up to the subject, but try to follow
+the naming scheme of other subjects. The purpose of a custom subject is to make
+it easier to write tests that are correct and informative. It's common to have a
+combination of ergonomic asserts for common cases, and delegating to
+child-subjects for other cases.
+
+## Adding asserts to a subject
+
+Fundamentally, an assert method calls `ExpectMeta.add_failure()` to record when
+there is a failure. That method will wire together any surrounding context with
+the provided error message information. Otherwise an assertion is free to
+implement checks how it pleases.
+
+The naming of functions should mostly read naturally, but doesn't need to be
+perfect grammatically. Be aware of ambiguous words like "contains" or "matches".
+For example, `contains_flag("--foo")` -- does this check that "--flag" was
+specified at all (ignoring value), or that it was specified and has no value?
+
+Assertion functions can make use of a variety of helper methods in processing
+values, comparing them, and generating error messages. Helpers of particular
+note are:
+
+* `_check_*`: These functions implement comparison, error formatting, and
+ error reporting.
+* `_compare_*`: These functions implements comparison for different cases
+ and take care of various edge cases.
+* `_format_failure_*`: These functions create human-friendly messages
+ describing both the observed values and the problem with them.
+* `_format_problem_*`: These functions format only the problem identified.
+* `_format_actual_*`: These functions format only the observed values.
diff --git a/docs/sphinx_build.py b/docs/sphinx_build.py
new file mode 100644
index 0000000..a06f380
--- /dev/null
+++ b/docs/sphinx_build.py
@@ -0,0 +1,4 @@
+import sys
+from sphinx.cmd.build import main
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/e2e/bzlmod/BUILD.bazel b/e2e/bzlmod/BUILD.bazel
new file mode 100644
index 0000000..5b8c49a
--- /dev/null
+++ b/e2e/bzlmod/BUILD.bazel
@@ -0,0 +1,9 @@
+# Basic tests to verify things work under bzlmod
+
+load(":tests.bzl", "bzlmod_test_suite")
+
+bzlmod_test_suite(name = "bzlmod_tests")
+
+toolchain_type(
+ name = "fake",
+)
diff --git a/e2e/bzlmod/MODULE.bazel b/e2e/bzlmod/MODULE.bazel
new file mode 100644
index 0000000..3e47836
--- /dev/null
+++ b/e2e/bzlmod/MODULE.bazel
@@ -0,0 +1,11 @@
+module(
+ name = "e2e_bzlmod",
+ version = "0.0.0",
+ compatibility_level = 1,
+)
+
+bazel_dep(name = "rules_testing", version = "0.0.0")
+local_path_override(
+ module_name = "rules_testing",
+ path = "../..",
+)
diff --git a/e2e/bzlmod/WORKSPACE b/e2e/bzlmod/WORKSPACE
new file mode 100644
index 0000000..bb8ca81
--- /dev/null
+++ b/e2e/bzlmod/WORKSPACE
@@ -0,0 +1 @@
+# Empty marker
diff --git a/e2e/bzlmod/tests.bzl b/e2e/bzlmod/tests.bzl
new file mode 100644
index 0000000..12471fe
--- /dev/null
+++ b/e2e/bzlmod/tests.bzl
@@ -0,0 +1,68 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for basic bzlmod functionality."""
+
+load("@rules_testing//lib:analysis_test.bzl", "analysis_test", "test_suite")
+load("@rules_testing//lib:util.bzl", "util")
+
+def _simple_test(name):
+ util.helper_target(
+ native.filegroup,
+ name = name + "_subject",
+ srcs = ["src.txt"],
+ data = ["data.txt"],
+ )
+ analysis_test(
+ name = name,
+ impl = _simple_test_impl,
+ target = name + "_subject",
+ )
+
+def _simple_test_impl(env, target):
+ subject = env.expect.that_target(target)
+ subject.default_outputs().contains_exactly(["src.txt"])
+ subject.runfiles().contains_exactly(["{workspace}/data.txt"])
+
+def bzlmod_test_suite(name):
+ test_suite(name = name, tests = [
+ _simple_test,
+ _trigger_toolchains_test,
+ ])
+
+def _needs_toolchain_impl(ctx):
+ # We just need to trigger toolchain resolution, we don't
+ # care about the result.
+ _ = ctx.toolchains["//:fake"] # @unused
+
+_needs_toolchain = rule(
+ implementation = _needs_toolchain_impl,
+ toolchains = [config_common.toolchain_type("//:fake", mandatory = False)],
+)
+
+def _trigger_toolchains_test_impl(env, target):
+ # Building is sufficient evidence of success
+ _ = env, target # @unused
+
+# A regression test for https://github.com/bazelbuild/rules_testing/issues/33
+def _trigger_toolchains_test(name):
+ util.helper_target(
+ _needs_toolchain,
+ name = name + "_subject",
+ )
+ analysis_test(
+ name = name,
+ impl = _trigger_toolchains_test_impl,
+ target = name + "_subject",
+ )
diff --git a/lib/BUILD b/lib/BUILD
new file mode 100644
index 0000000..8d612b3
--- /dev/null
+++ b/lib/BUILD
@@ -0,0 +1,82 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
+
+licenses(["notice"])
+
+package(
+ default_visibility = ["//visibility:private"],
+)
+
+bzl_library(
+ name = "analysis_test_bzl",
+ srcs = ["analysis_test.bzl"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//lib:truth_bzl",
+ ],
+)
+
+bzl_library(
+ name = "truth_bzl",
+ srcs = ["truth.bzl"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//lib/private:bool_subject_bzl",
+ "//lib/private:collection_subject_bzl",
+ "//lib/private:depset_file_subject_bzl",
+ "//lib/private:expect_bzl",
+ "//lib/private:int_subject_bzl",
+ "//lib/private:label_subject_bzl",
+ "//lib/private:matching_bzl",
+ ],
+)
+
+bzl_library(
+ name = "util_bzl",
+ srcs = ["util.bzl"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@bazel_skylib//lib:paths",
+ "@bazel_skylib//lib:types",
+ "@bazel_skylib//lib:unittest",
+ "@bazel_skylib//rules:write_file",
+ ],
+)
+
+filegroup(
+ name = "test_deps",
+ testonly = True,
+ srcs = [
+ "BUILD",
+ ":analysis_test_bzl",
+ ":truth_bzl",
+ ":util_bzl",
+ ],
+ visibility = [
+ "//tools/build_defs/python/tests/base_rules:__pkg__",
+ ],
+)
+
+exports_files(
+ srcs = [
+ "analysis_test.bzl",
+ "truth.bzl",
+ "util.bzl",
+ ],
+ visibility = [
+ "//docgen:__pkg__",
+ ],
+)
diff --git a/lib/analysis_test.bzl b/lib/analysis_test.bzl
new file mode 100644
index 0000000..02164a4
--- /dev/null
+++ b/lib/analysis_test.bzl
@@ -0,0 +1,253 @@
+# Copyright 2022 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# Analysis test
+
+Support for testing analysis phase logic, such as rules.
+"""
+
+load("//lib:truth.bzl", "truth")
+load("//lib:util.bzl", "recursive_testing_aspect", "testing_aspect")
+
+def _impl_function_name(impl):
+ """Derives the name of the given rule implementation function.
+
+ This can be used for better test feedback.
+
+ Args:
+ impl: the rule implementation function
+
+ Returns:
+ The name of the given function
+ """
+
+ # Starlark currently stringifies a function as "<function NAME>", so we use
+ # that knowledge to parse the "NAME" portion out. If this behavior ever
+ # changes, we'll need to update this.
+ # TODO(bazel-team): Expose a ._name field on functions to avoid this.
+ impl_name = str(impl)
+ impl_name = impl_name.partition("<function ")[-1]
+ impl_name = impl_name.rpartition(">")[0]
+ impl_name = impl_name.partition(" ")[0]
+
+ # Strip leading/trailing underscores so that test functions can
+ # have private names. This better allows unused tests to be flagged by
+ # buildifier (indicating a bug or code to delete)
+ return impl_name.strip("_")
+
+def _fail(env, msg):
+ """Unconditionally causes the current test to fail.
+
+ Args:
+ env: The test environment returned by `unittest.begin`.
+ msg: The message to log describing the failure.
+ """
+ full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg)
+
+ # There isn't a better way to output the message in Starlark, so use print.
+ # buildifier: disable=print
+ print(full_msg)
+ env.failures.append(full_msg)
+
+def _begin_analysis_test(ctx):
+ """Begins a unit test.
+
+ This should be the first function called in a unit test implementation
+ function. It initializes a "test environment" that is used to collect
+ assertion failures so that they can be reported and logged at the end of the
+ test.
+
+ Args:
+ ctx: The Starlark context. Pass the implementation function's `ctx` argument
+ in verbatim.
+
+ Returns:
+ An analysis_test "environment" struct. The following fields are public:
+ * ctx: the underlying rule ctx
+ * expect: a truth Expect object (see truth.bzl).
+ * fail: A function to register failures for later reporting.
+
+ Other attributes are private, internal details and may change at any time. Do not rely
+ on internal details.
+ """
+ target = getattr(ctx.attr, "target")
+ target = target[0] if type(target) == type([]) else target
+ failures = []
+ failures_env = struct(
+ ctx = ctx,
+ failures = failures,
+ )
+ truth_env = struct(
+ ctx = ctx,
+ fail = lambda msg: _fail(failures_env, msg),
+ )
+ analysis_test_env = struct(
+ ctx = ctx,
+ # Visibility: package; only exposed so that our own tests can verify
+ # failure behavior.
+ _failures = failures,
+ fail = truth_env.fail,
+ expect = truth.expect(truth_env),
+ )
+ return analysis_test_env, target
+
+def _end_analysis_test(env):
+ """Ends an analysis test and logs the results.
+
+ This must be called and returned at the end of an analysis test implementation function so
+ that the results are reported.
+
+ Args:
+ env: The test environment returned by `analysistest.begin`.
+
+ Returns:
+ A list of providers needed to automatically register the analysis test result.
+ """
+ return [AnalysisTestResultInfo(
+ success = (len(env._failures) == 0),
+ message = "\n".join(env._failures),
+ )]
+
+def analysis_test(
+ name,
+ target,
+ impl,
+ expect_failure = False,
+ attrs = {},
+ fragments = [],
+ config_settings = {},
+ extra_target_under_test_aspects = [],
+ collect_actions_recursively = False):
+ """Creates an analysis test from its implementation function.
+
+ An analysis test verifies the behavior of a "real" rule target by examining
+ and asserting on the providers given by the real target.
+
+ Each analysis test is defined in an implementation function. This function handles
+ the boilerplate to create and return a test target and captures the
+ implementation function's name so that it can be printed in test feedback.
+
+ An example of an analysis test:
+
+ ```
+ def basic_test(name):
+ my_rule(name = name + "_subject", ...)
+
+ analysistest(name = name, target = name + "_subject", impl = _your_test)
+
+ def _your_test(env, target, actions):
+ env.assert_that(target).runfiles().contains_at_least("foo.txt")
+ env.assert_that(find_action(actions, generating="foo.txt")).argv().contains("--a")
+ ```
+
+ Args:
+ name: Name of the target. It should be a Starlark identifier, matching pattern
+ '[A-Za-z_][A-Za-z0-9_]*'.
+ target: The target to test.
+ impl: The implementation function of the unit test.
+ expect_failure: If true, the analysis test will expect the target
+ to fail. Assertions can be made on the underlying failure using truth.expect_failure
+ attrs: An optional dictionary to supplement the attrs passed to the
+ unit test's `rule()` constructor.
+ fragments: An optional list of fragment names that can be used to give rules access to
+ language-specific parts of configuration.
+ config_settings: A dictionary of configuration settings to change for the target under
+ test and its dependencies. This may be used to essentially change 'build flags' for
+ the target under test, and may thus be utilized to test multiple targets with different
+ flags in a single build. NOTE: When values that are labels (e.g. for the
+ --platforms flag), it's suggested to always explicitly call `Label()`
+ on the value before passing it in. This ensures the label is resolved
+ in your repository's context, not rule_testing's.
+ extra_target_under_test_aspects: An optional list of aspects to apply to the target_under_test
+ in addition to those set up by default for the test harness itself.
+ collect_actions_recursively: If true, runs testing_aspect over all attributes, otherwise
+ it is only applied to the target under test.
+
+ Returns:
+ (None)
+ """
+
+ attrs = dict(attrs)
+ attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
+
+ changed_settings = dict(config_settings)
+ if expect_failure:
+ changed_settings["//command_line_option:allow_analysis_failures"] = "True"
+
+ target_attr_kwargs = {}
+ if changed_settings:
+ test_transition = analysis_test_transition(
+ settings = changed_settings,
+ )
+ target_attr_kwargs["cfg"] = test_transition
+
+ attrs["target"] = attr.label(
+ aspects = [recursive_testing_aspect if collect_actions_recursively else testing_aspect] + extra_target_under_test_aspects,
+ mandatory = True,
+ **target_attr_kwargs
+ )
+
+ def wrapped_impl(ctx):
+ env, target = _begin_analysis_test(ctx)
+ impl(env, target)
+ return _end_analysis_test(env)
+
+ return testing.analysis_test(
+ name,
+ wrapped_impl,
+ attrs = attrs,
+ fragments = fragments,
+ attr_values = {"target": target},
+ )
+
+def test_suite(name, tests, test_kwargs = {}):
+ """Instantiates given test macros and gathers their main targets into a `test_suite`.
+
+ Use this function to wrap all tests into a single target.
+
+ ```
+ def simple_test_suite(name):
+ test_suite(
+ name = name,
+ tests = [
+ your_test,
+ your_other_test,
+ ]
+ )
+ ```
+
+ Then, in your `BUILD` file, simply load the macro and invoke it to have all
+ of the targets created:
+
+ ```
+ load("//path/to/your/package:tests.bzl", "simple_test_suite")
+ simple_test_suite(name = "simple_test_suite")
+ ```
+
+ Args:
+ name: The name of the `test_suite` target.
+ tests: A list of test macros, each taking `name` as a parameter, which
+ will be passed the computed name of the test.
+ test_kwargs: Additional kwargs to pass onto each test function call.
+ """
+ test_targets = []
+ for call in tests:
+ test_name = _impl_function_name(call)
+ call(name = test_name, **test_kwargs)
+ test_targets.append(test_name)
+
+ native.test_suite(
+ name = name,
+ tests = test_targets,
+ )
diff --git a/lib/private/BUILD b/lib/private/BUILD
new file mode 100644
index 0000000..6372128
--- /dev/null
+++ b/lib/private/BUILD
@@ -0,0 +1,246 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
+
+licenses(["notice"])
+
+package(
+ default_applicable_licenses = ["//:package_license"],
+ default_visibility = ["//:__subpackages__"],
+)
+
+# Necessary for documentation generation
+exports_files(
+ glob(["*.bzl"]),
+)
+
+bzl_library(
+ name = "matching_bzl",
+ srcs = ["matching.bzl"],
+)
+
+bzl_library(
+ name = "int_subject_bzl",
+ srcs = ["int_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":truth_common_bzl",
+ "@bazel_skylib//lib:types",
+ ],
+)
+
+bzl_library(
+ name = "bool_subject_bzl",
+ srcs = ["bool_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ],
+)
+
+bzl_library(
+ name = "collection_subject_bzl",
+ srcs = ["collection_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":failure_messages_bzl",
+ ":int_subject_bzl",
+ ":matching_bzl",
+ ":truth_common_bzl",
+ ],
+)
+
+bzl_library(
+ name = "label_subject_bzl",
+ srcs = ["label_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":truth_common_bzl",
+ "@bazel_skylib//lib:types",
+ ],
+)
+
+bzl_library(
+ name = "ordered_bzl",
+ srcs = ["ordered.bzl"],
+ deps = [
+ ],
+)
+
+bzl_library(
+ name = "truth_common_bzl",
+ srcs = [
+ "truth_common.bzl",
+ ],
+ deps = ["@bazel_skylib//lib:types"],
+)
+
+bzl_library(
+ name = "check_util_bzl",
+ srcs = [
+ "check_util.bzl",
+ ],
+ deps = [
+ ":compare_util_bzl",
+ ":failure_messages_bzl",
+ ":matching_bzl",
+ ":ordered_bzl",
+ ":truth_common_bzl",
+ "@bazel_skylib//lib:types",
+ ],
+)
+
+bzl_library(
+ name = "failure_messages_bzl",
+ srcs = ["failure_messages.bzl"],
+ deps = [":truth_common_bzl"],
+)
+
+bzl_library(
+ name = "compare_util_bzl",
+ srcs = ["compare_util.bzl"],
+ deps = [":truth_common_bzl"],
+)
+
+bzl_library(
+ name = "expect_meta_bzl",
+ srcs = ["expect_meta.bzl"],
+ deps = ["@bazel_skylib//lib:unittest"],
+)
+
+bzl_library(
+ name = "depset_file_subject_bzl",
+ srcs = ["depset_file_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":collection_subject_bzl",
+ ":failure_messages_bzl",
+ ":matching_bzl",
+ ":truth_common_bzl",
+ "//lib:util_bzl",
+ ],
+)
+
+bzl_library(
+ name = "instrumented_files_info_subject_bzl",
+ srcs = ["instrumented_files_info_subject.bzl"],
+ deps = [":depset_file_subject_bzl"],
+)
+
+bzl_library(
+ name = "str_subject_bzl",
+ srcs = ["str_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":collection_subject_bzl",
+ ],
+)
+
+bzl_library(
+ name = "file_subject_bzl",
+ srcs = ["file_subject.bzl"],
+ deps = [
+ ":str_subject_bzl",
+ ],
+)
+
+bzl_library(
+ name = "dict_subject_bzl",
+ srcs = ["dict_subject.bzl"],
+ deps = [
+ ":collection_subject_bzl",
+ ":compare_util_bzl",
+ ":failure_messages_bzl",
+ ],
+)
+
+bzl_library(
+ name = "action_subject_bzl",
+ srcs = ["action_subject.bzl"],
+ deps = [
+ ":collection_subject_bzl",
+ ":depset_file_subject_bzl",
+ ":dict_subject_bzl",
+ ":failure_messages_bzl",
+ ":str_subject_bzl",
+ ":truth_common_bzl",
+ ],
+)
+
+bzl_library(
+ name = "execution_info_subject_bzl",
+ srcs = ["execution_info_subject.bzl"],
+ deps = [
+ ":dict_subject_bzl",
+ ":str_subject_bzl",
+ ],
+)
+
+bzl_library(
+ name = "run_environment_info_subject_bzl",
+ srcs = ["run_environment_info_subject.bzl"],
+ deps = [
+ ":collection_subject_bzl",
+ ":dict_subject_bzl",
+ ],
+)
+
+bzl_library(
+ name = "runfiles_subject_bzl",
+ srcs = ["runfiles_subject.bzl"],
+ deps = [
+ ":check_util_bzl",
+ ":collection_subject_bzl",
+ ":failure_messages_bzl",
+ ":matching_bzl",
+ ":truth_common_bzl",
+ "//lib:util_bzl",
+ ],
+)
+
+bzl_library(
+ name = "target_subject_bzl",
+ srcs = ["target_subject.bzl"],
+ deps = [
+ ":action_subject_bzl",
+ ":bool_subject_bzl",
+ ":collection_subject_bzl",
+ ":depset_file_subject_bzl",
+ ":execution_info_subject_bzl",
+ ":file_subject_bzl",
+ ":instrumented_files_info_subject_bzl",
+ ":label_subject_bzl",
+ ":run_environment_info_subject_bzl",
+ ":runfiles_subject_bzl",
+ ":truth_common_bzl",
+ "//lib:util_bzl",
+ ],
+)
+
+bzl_library(
+ name = "expect_bzl",
+ srcs = ["expect.bzl"],
+ deps = [
+ ":action_subject_bzl",
+ ":bool_subject_bzl",
+ ":collection_subject_bzl",
+ ":depset_file_subject_bzl",
+ ":dict_subject_bzl",
+ ":expect_meta_bzl",
+ ":file_subject_bzl",
+ ":int_subject_bzl",
+ ":str_subject_bzl",
+ ":target_subject_bzl",
+ ],
+)
diff --git a/lib/private/action_subject.bzl b/lib/private/action_subject.bzl
new file mode 100644
index 0000000..7de79fc
--- /dev/null
+++ b/lib/private/action_subject.bzl
@@ -0,0 +1,377 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# ActionSubject"""
+
+load(":collection_subject.bzl", "CollectionSubject")
+load(":depset_file_subject.bzl", "DepsetFileSubject")
+load(":dict_subject.bzl", "DictSubject")
+load(
+ ":failure_messages.bzl",
+ "format_failure_missing_all_values",
+ "format_failure_unexpected_value",
+ "format_failure_unexpected_values",
+)
+load(":str_subject.bzl", "StrSubject")
+load(":truth_common.bzl", "enumerate_list_as_lines", "mkmethod")
+
+def _action_subject_new(action, meta):
+ """Creates an "ActionSubject" struct.
+
+ Method: ActionSubject.new
+
+ Example usage:
+
+ expect(env).that_action(action).not_contains_arg("foo")
+
+ Args:
+ action: ([`Action`]) value to check against.
+ meta: ([`ExpectMeta`]) of call chain information.
+
+ Returns:
+ [`ActionSubject`] object.
+ """
+
+ # buildifier: disable=uninitialized
+ self = struct(
+ action = action,
+ meta = meta,
+ # Dict[str, list[str]] of flags. The keys must be in the same order
+ # as found in argv to allow ordering asserts of them.
+ parsed_flags = _action_subject_parse_flags(action.argv),
+ )
+ public = struct(
+ # keep sorted start
+ actual = action,
+ argv = mkmethod(self, _action_subject_argv),
+ contains_at_least_args = mkmethod(self, _action_subject_contains_at_least_args),
+ contains_at_least_inputs = mkmethod(self, _action_subject_contains_at_least_inputs),
+ contains_flag_values = mkmethod(self, _action_subject_contains_flag_values),
+ contains_none_of_flag_values = mkmethod(self, _action_subject_contains_none_of_flag_values),
+ content = mkmethod(self, _action_subject_content),
+ env = mkmethod(self, _action_subject_env),
+ has_flags_specified = mkmethod(self, _action_subject_has_flags_specified),
+ inputs = mkmethod(self, _action_subject_inputs),
+ mnemonic = mkmethod(self, _action_subject_mnemonic),
+ not_contains_arg = mkmethod(self, _action_subject_not_contains_arg),
+ substitutions = mkmethod(self, _action_subject_substitutions),
+ # keep sorted end
+ )
+ return public
+
+def _action_subject_parse_flags(argv):
+ parsed_flags = {}
+
+ # argv might be none for e.g. builtin actions
+ if argv == None:
+ return parsed_flags
+ for i, arg in enumerate(argv):
+ if not arg.startswith("--"):
+ continue
+ if "=" in arg:
+ name, value = arg.split("=", 1)
+ else:
+ name = arg
+
+ # Handle a flag being the last arg in argv
+ if (i + 1) < len(argv):
+ value = argv[i + 1]
+ else:
+ value = None
+ parsed_flags.setdefault(name, []).append(value)
+ return parsed_flags
+
+def _action_subject_argv(self):
+ """Returns a CollectionSubject for the action's argv.
+
+ Method: ActionSubject.argv
+
+ Returns:
+ [`CollectionSubject`] object.
+ """
+ meta = self.meta.derive("argv()")
+ return CollectionSubject.new(
+ self.action.argv,
+ meta,
+ container_name = "argv",
+ sortable = False,
+ )
+
+def _action_subject_contains_at_least_args(self, args):
+ """Assert that an action contains at least the provided args.
+
+ Method: ActionSubject.contains_at_least_args
+
+ Example usage:
+ expect(env).that_action(action).contains_at_least_args(["foo", "bar"]).
+
+ Args:
+ self: implicitly added.
+ args: ([`list`] of [`str`]) all the args must be in the argv exactly
+ as provided. Multiplicity is respected.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ return CollectionSubject.new(
+ self.action.argv,
+ self.meta,
+ container_name = "argv",
+ element_plural_name = "args",
+ sortable = False, # Preserve argv ordering
+ ).contains_at_least(args)
+
+def _action_subject_not_contains_arg(self, arg):
+ """Assert that an action does not contain an arg.
+
+ Example usage:
+ expect(env).that_action(action).not_contains_arg("should-not-exist")
+
+ Args:
+ self: implicitly added.
+ arg: ([`str`]) the arg that cannot be present in the argv.
+ """
+ if arg in self.action.argv:
+ problem, actual = format_failure_unexpected_value(
+ container_name = "argv",
+ unexpected = arg,
+ actual = self.action.argv,
+ sort = False, # Preserve argv ordering
+ )
+ self.meta.add_failure(problem, actual)
+
+def _action_subject_substitutions(self):
+ """Creates a `DictSubject` to assert on the substitutions dict.
+
+ Method: ActionSubject.substitutions.
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ `DictSubject` struct.
+ """
+ return DictSubject.new(
+ actual = self.action.substitutions,
+ meta = self.meta.derive("substitutions()"),
+ )
+
+def _action_subject_has_flags_specified(self, flags):
+ """Assert that an action has the given flags present (but ignore any value).
+
+ Method: ActionSubject.has_flags_specified
+
+ This parses the argv, assuming the typical formats (`--flag=value`,
+ `--flag value`, and `--flag`). Any of the formats will be matched.
+
+ Example usage, given `argv = ["--a", "--b=1", "--c", "2"]`:
+ expect(env).that_action(action).has_flags_specified([
+ "--a", "--b", "--c"])
+
+ Args:
+ self: implicitly added.
+ flags: ([`list`] of [`str`]) The flags to check for. Include the leading "--".
+ Multiplicity is respected. A flag is considered present if any of
+ these forms are detected: `--flag=value`, `--flag value`, or a lone
+ `--flag`.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ return CollectionSubject.new(
+ # Starlark dict keys maintain insertion order, so it's OK to
+ # pass keys directly and return Ordered.
+ self.parsed_flags.keys(),
+ meta = self.meta,
+ container_name = "argv",
+ element_plural_name = "specified flags",
+ sortable = False, # Preserve argv ordering
+ ).contains_at_least(flags)
+
+def _action_subject_mnemonic(self):
+ """Returns a `StrSubject` for the action's mnemonic.
+
+ Method: ActionSubject.mnemonic
+
+ Returns:
+ [`StrSubject`] object.
+ """
+ return StrSubject.new(
+ self.action.mnemonic,
+ meta = self.meta.derive("mnemonic()"),
+ )
+
+def _action_subject_inputs(self):
+ """Returns a DepsetFileSubject for the action's inputs.
+
+ Method: ActionSubject.inputs
+
+ Returns:
+ `DepsetFileSubject` of the action's inputs.
+ """
+ meta = self.meta.derive("inputs()")
+ return DepsetFileSubject.new(self.action.inputs, meta)
+
+def _action_subject_contains_flag_values(self, flag_values):
+ """Assert that an action's argv has the given ("--flag", "value") entries.
+
+ Method: ActionSubject.contains_flag_values
+
+ This parses the argv, assuming the typical formats (`--flag=value`,
+ `--flag value`, and `--flag`). Note, however, that for the `--flag value`
+ and `--flag` forms, the parsing can't know how many args, if any, a flag
+ actually consumes, so it simply takes the first following arg, if any, as
+ the matching value.
+
+ NOTE: This function can give misleading results checking flags that don't
+ consume any args (e.g. boolean flags). Use `has_flags_specified()` to test
+ for such flags. Such cases will either show the subsequent arg as the value,
+ or None if the flag was the last arg in argv.
+
+ Example usage, given `argv = ["--b=1", "--c", "2"]`:
+ expect(env).that_action(action).contains_flag_values([
+ ("--b", "1"),
+ ("--c", "2")
+ ])
+
+ Args:
+ self: implicitly added.
+ flag_values: ([`list`] of ([`str`] name, [`str`]) tuples) Include the
+ leading "--" in the flag name. Order and duplicates aren't checked.
+ Flags without a value found use `None` as their value.
+ """
+ missing = []
+ for flag, value in sorted(flag_values):
+ if flag not in self.parsed_flags:
+ missing.append("'{}' (not specified)".format(flag))
+ elif value not in self.parsed_flags[flag]:
+ missing.append("'{}' with value '{}'".format(flag, value))
+ if not missing:
+ return
+ problem, actual = format_failure_missing_all_values(
+ element_plural_name = "flags with values",
+ container_name = "argv",
+ missing = missing,
+ actual = self.action.argv,
+ sort = False, # Preserve argv ordering
+ )
+ self.meta.add_failure(problem, actual)
+
+def _action_subject_contains_none_of_flag_values(self, flag_values):
+ """Assert that an action's argv has none of the given ("--flag", "value") entries.
+
+ Method: ActionSubject.contains_none_of_flag_values
+
+ This parses the argv, assuming the typical formats (`--flag=value`,
+ `--flag value`, and `--flag`). Note, however, that for the `--flag value`
+ and `--flag` forms, the parsing can't know how many args, if any, a flag
+ actually consumes, so it simply takes the first following arg, if any, as
+ the matching value.
+
+ NOTE: This function can give misleading results checking flags that don't
+ consume any args (e.g. boolean flags). Use `has_flags_specified()` to test
+ for such flags.
+
+ Args:
+ self: implicitly added.
+ flag_values: ([`list`] of ([`str`] name, [`str`] value) tuples) Include
+ the leading "--" in the flag name. Order and duplicates aren't
+ checked.
+ """
+ unexpected = []
+ for flag, value in sorted(flag_values):
+ if flag not in self.parsed_flags:
+ continue
+ elif value in self.parsed_flags[flag]:
+ unexpected.append("'{}' with value '{}'".format(flag, value))
+ if not unexpected:
+ return
+
+ problem, actual = format_failure_unexpected_values(
+ none_of = "\n" + enumerate_list_as_lines(sorted(unexpected), prefix = " "),
+ unexpected = unexpected,
+ actual = self.action.argv,
+ sort = False, # Preserve argv ordering
+ )
+ self.meta.add_failure(problem, actual)
+
+def _action_subject_contains_at_least_inputs(self, inputs):
+ """Assert the action's inputs contains at least all of `inputs`.
+
+ Method: ActionSubject.contains_at_least_inputs
+
+ Example usage:
+ expect(env).that_action(action).contains_at_least_inputs([<some file>])
+
+ Args:
+ self: implicitly added.
+ inputs: (collection of [`File`]) All must be present. Multiplicity
+ is respected.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ return DepsetFileSubject.new(
+ self.action.inputs,
+ meta = self.meta,
+ container_name = "action inputs",
+ element_plural_name = "inputs",
+ ).contains_at_least(inputs)
+
+def _action_subject_content(self):
+ """Returns a `StrSubject` for `Action.content`.
+
+ Method: ActionSubject.content
+
+ Returns:
+ [`StrSubject`] object.
+ """
+ return StrSubject.new(
+ self.action.content,
+ self.meta.derive("content()"),
+ )
+
+def _action_subject_env(self):
+ """Returns a `DictSubject` for `Action.env`.
+
+ Method: ActionSubject.env
+
+ Args:
+ self: implicitly added.
+ """
+ return DictSubject.new(
+ self.action.env,
+ self.meta.derive("env()"),
+ container_name = "environment",
+ key_plural_name = "envvars",
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+ActionSubject = struct(
+ new = _action_subject_new,
+ parse_flags = _action_subject_parse_flags,
+ argv = _action_subject_argv,
+ contains_at_least_args = _action_subject_contains_at_least_args,
+ not_contains_arg = _action_subject_not_contains_arg,
+ substitutions = _action_subject_substitutions,
+ has_flags_specified = _action_subject_has_flags_specified,
+ mnemonic = _action_subject_mnemonic,
+ inputs = _action_subject_inputs,
+ contains_flag_values = _action_subject_contains_flag_values,
+ contains_none_of_flag_values = _action_subject_contains_none_of_flag_values,
+ contains_at_least_inputs = _action_subject_contains_at_least_inputs,
+ content = _action_subject_content,
+ env = _action_subject_env,
+)
diff --git a/lib/private/bool_subject.bzl b/lib/private/bool_subject.bzl
new file mode 100644
index 0000000..d07f3c3
--- /dev/null
+++ b/lib/private/bool_subject.bzl
@@ -0,0 +1,78 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# BoolSubject"""
+
+load(":check_util.bzl", "check_not_equals", "common_subject_is_in")
+
+def _bool_subject_new(value, meta):
+ """Creates a "BoolSubject" struct.
+
+ Method: BoolSubject.new
+
+ Args:
+ value: ([`bool`]) the value to assert against.
+ meta: ([`ExpectMeta`]) the metadata about the call chain.
+
+ Returns:
+ A [`BoolSubject`].
+ """
+ self = struct(actual = value, meta = meta)
+ public = struct(
+ # keep sorted start
+ equals = lambda *a, **k: _bool_subject_equals(self, *a, **k),
+ is_in = lambda *a, **k: common_subject_is_in(self, *a, **k),
+ not_equals = lambda *a, **k: _bool_subject_not_equals(self, *a, **k),
+ # keep sorted end
+ )
+ return public
+
+def _bool_subject_equals(self, expected):
+ """Assert that the bool is equal to `expected`.
+
+ Method: BoolSubject.equals
+
+ Args:
+ self: implicitly added.
+ expected: ([`bool`]) the expected value.
+ """
+ if self.actual == expected:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(expected),
+ "actual: {}".format(self.actual),
+ )
+
+def _bool_subject_not_equals(self, unexpected):
+ """Assert that the bool is not equal to `unexpected`.
+
+ Method: BoolSubject.not_equals
+
+ Args:
+ self: implicitly added.
+ unexpected: ([`bool`]) the value actual cannot equal.
+ """
+ return check_not_equals(
+ actual = self.actual,
+ unexpected = unexpected,
+ meta = self.meta,
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+BoolSubject = struct(
+ new = _bool_subject_new,
+ equals = _bool_subject_equals,
+ not_equals = _bool_subject_not_equals,
+)
diff --git a/lib/private/check_util.bzl b/lib/private/check_util.bzl
new file mode 100644
index 0000000..f2b4813
--- /dev/null
+++ b/lib/private/check_util.bzl
@@ -0,0 +1,339 @@
+"""Helper functions to perform checks."""
+
+load("@bazel_skylib//lib:types.bzl", "types")
+load(":compare_util.bzl", "MatchResult", "compare_contains_exactly_predicates")
+load(":failure_messages.bzl", "format_failure_unexpected_values")
+load(":matching.bzl", "matching")
+load(":ordered.bzl", "IN_ORDER", "OrderedIncorrectly")
+load(":truth_common.bzl", "enumerate_list_as_lines", "maybe_sorted", "to_list")
+
+def check_contains_exactly(
+ *,
+ expect_contains,
+ actual_container,
+ format_actual,
+ format_expected,
+ format_missing,
+ format_unexpected,
+ format_out_of_order,
+ meta):
+ """Check that a collection contains exactly the given values and no more.
+
+ This checks that the collection contains exactly the given values. Extra
+ values are not allowed. Multiplicity of the expected values is respected.
+ Ordering is not checked; call `in_order()` to also check the order
+ of the actual values matches the order of the expected values.
+
+ Args:
+ expect_contains: the values that must exist (and no more).
+ actual_container: the values to check within.
+ format_actual: (callable) accepts no args and returns [`str`] (the
+ description of the actual values).
+ format_expected: (callable) accepts no args and returns [`str`] (
+ description of the expected values).
+ format_missing: (callable) accepts 1 position arg (list of values from
+ `expect_contains` that were missing), and returns [`str`] (description of
+ the missing values).
+ format_unexpected: (callable) accepts 1 positional arg (list of values from
+ `actual_container` that weren't expected), and returns [`str`] (description of
+ the unexpected values).
+ format_out_of_order: (callable) accepts 1 arg (a list of "MatchResult"
+ structs, see above) and returns a string (the problem message
+ reported on failure). The order of match results is the expected
+ order.
+ meta: ([`ExpectMeta`]) to record failures.
+
+ Returns:
+ [`Ordered`] object.
+ """
+ result = compare_contains_exactly_predicates(
+ expect_contains = [
+ matching.equals_wrapper(raw_expected)
+ for raw_expected in expect_contains
+ ],
+ actual_container = actual_container,
+ )
+ if not result.contains_exactly:
+ problems = []
+ if result.missing:
+ problems.append(format_missing([m.desc for m in result.missing]))
+ if result.unexpected:
+ problems.append(format_unexpected(result.unexpected))
+ problems.append(format_expected())
+
+ meta.add_failure("\n".join(problems), format_actual())
+
+ # We already recorded an error, so just pretend order is correct to
+ # avoid spamming another error.
+ return IN_ORDER
+ elif result.is_in_order:
+ return IN_ORDER
+ else:
+ return OrderedIncorrectly.new(
+ format_problem = lambda: format_out_of_order(result.matches),
+ format_actual = format_actual,
+ meta = meta,
+ )
+
+def check_contains_exactly_predicates(
+ *,
+ expect_contains,
+ actual_container,
+ format_actual,
+ format_expected,
+ format_missing,
+ format_unexpected,
+ format_out_of_order,
+ meta):
+ """Check that a collection contains values matching the given predicates and no more.
+
+ todo doc to describe behavior
+ This checks that the collection contains values that match the given exactly the given values.
+ Extra values that do not match a predicate are not allowed. Multiplicity of
+ the expected predicates is respected. Ordering is not checked; call
+ `in_order()` to also check the order of the actual values matches the order
+ of the expected predicates.
+
+ Args:
+ expect_contains: the predicates that must match (and no more).
+ actual_container: the values to check within.
+ format_actual: (callable) accepts no args and returns [`str`] (the
+ description of the actual values).
+ format_expected: (callable) accepts no args and returns [`str`] (
+ description of the expected values).
+ format_missing: (callable) accepts 1 position arg (list of values from
+ `expect_contains` that were missing), and returns [`str`] (description of
+ the missing values).
+ format_unexpected: (callable) accepts 1 positional arg (list of values from
+ `actual_container` that weren't expected), and returns [`str`] (description of
+ the unexpected values).
+ format_out_of_order: (callable) accepts 1 arg (a list of "MatchResult"
+ structs, see above) and returns a string (the problem message
+ reported on failure). The order of match results is the expected
+ order.
+ meta: ([`ExpectMeta`]) to record failures.
+
+ Returns:
+ [`Ordered`] object.
+ """
+ result = compare_contains_exactly_predicates(
+ expect_contains = expect_contains,
+ actual_container = actual_container,
+ )
+ if not result.contains_exactly:
+ problems = []
+ if result.missing:
+ problems.append(format_missing(result.missing))
+ if result.unexpected:
+ problems.append(format_unexpected(result.unexpected))
+ problems.append(format_expected())
+
+ meta.add_failure("\n".join(problems), format_actual())
+
+ # We already recorded an error, so just pretend order is correct to
+ # avoid spamming another error.
+ return IN_ORDER
+ elif result.is_in_order:
+ return IN_ORDER
+ else:
+ return OrderedIncorrectly.new(
+ format_problem = lambda: format_out_of_order(result.matches),
+ format_actual = format_actual,
+ meta = meta,
+ )
+
+def check_contains_predicate(collection, matcher, *, format_problem, format_actual, meta):
+ """Check that `matcher` matches any value in `collection`, and record an error if not.
+
+ Args:
+ collection: ([`collection`]) the collection whose values are compared against.
+ matcher: ([`Matcher`]) that must match.
+ format_problem: ([`str`] | callable) If a string, then the problem message
+ to use when failing. If a callable, a no-arg callable that returns
+ the problem string; see `_format_problem_*` for existing helpers.
+ format_actual: ([`str`] | callable) If a string, then the actual message
+ to use when failing. If a callable, a no-arg callable that returns
+ the actual string; see `_format_actual_*` for existing helpers.
+ meta: ([`ExpectMeta`]) to record failures
+ """
+ for value in collection:
+ if matcher.match(value):
+ return
+ meta.add_failure(
+ format_problem if types.is_string(format_problem) else format_problem(),
+ format_actual if types.is_string(format_actual) else format_actual(),
+ )
+
+def check_contains_at_least_predicates(
+ collection,
+ matchers,
+ *,
+ format_missing,
+ format_out_of_order,
+ format_actual,
+ meta):
+ """Check that the collection is a subset of the predicates.
+
+ The collection must match all the predicates. It can contain extra elements.
+ The multiplicity of matchers is respected. Checking that the relative order
+ of matches is the same as the passed-in matchers order can done by calling
+ `in_order()`.
+
+ Args:
+ collection: [`collection`] of values to check within.
+ matchers: [`collection`] of [`Matcher`] objects to match (see `matchers` struct)
+ format_missing: (callable) accepts 1 positional arg (a list of the
+ `matchers` that did not match) and returns a string (the problem
+ message reported on failure).
+ format_out_of_order: (callable) accepts 1 arg (a list of `MatchResult`s)
+ and returns a string (the problem message reported on failure). The
+ order of match results is the expected order.
+ format_actual: callable: accepts no args and returns a string (the
+ text describing the actual value reported on failure).
+ meta: ([`ExpectMeta`]) used for reporting errors.
+
+ Returns:
+ [`Ordered`] object to allow checking the order of matches.
+ """
+
+ # We'll later update this list in-place with results. We keep the order
+ # so that, on failure, the formatters receive the expected order of matches.
+ matches = [None for _ in matchers]
+
+ # A list of (original position, matcher) tuples. This allows
+ # mapping a matcher back to its original order and respecting
+ # the multiplicity of matchers.
+ remaining_matchers = enumerate(matchers)
+ ordered = True
+ for absolute_pos, value in enumerate(collection):
+ if not remaining_matchers:
+ break
+ found_i = -1
+ for cur_i, (_, matcher) in enumerate(remaining_matchers):
+ if matcher.match(value):
+ found_i = cur_i
+ break
+ if found_i > -1:
+ ordered = ordered and (found_i == 0)
+ orig_matcher_pos, matcher = remaining_matchers.pop(found_i)
+ matches[orig_matcher_pos] = MatchResult.new(
+ matched_value = value,
+ found_at = absolute_pos,
+ matcher = matcher,
+ )
+
+ if remaining_matchers:
+ meta.add_failure(
+ format_missing([v[1] for v in remaining_matchers]),
+ format_actual if types.is_string(format_actual) else format_actual(),
+ )
+
+ # We've added a failure, so no need to spam another error message, so
+ # just pretend things are in order.
+ return IN_ORDER
+ elif ordered:
+ return IN_ORDER
+ else:
+ return OrderedIncorrectly.new(
+ format_problem = lambda: format_out_of_order(matches),
+ format_actual = format_actual,
+ meta = meta,
+ )
+
+def check_contains_none_of(*, collection, none_of, meta, sort = True):
+ """Check that a collection does not have any of the `none_of` values.
+
+ Args:
+ collection: ([`collection`]) the values to check within.
+ none_of: the values that should not exist.
+ meta: ([`ExpectMeta`]) to record failures.
+ sort: ([`bool`]) If true, sort the values for display.
+ """
+ unexpected = []
+ for value in none_of:
+ if value in collection:
+ unexpected.append(value)
+ if not unexpected:
+ return
+
+ unexpected = maybe_sorted(unexpected, sort)
+ problem, actual = format_failure_unexpected_values(
+ none_of = "\n" + enumerate_list_as_lines(unexpected, prefix = " "),
+ unexpected = unexpected,
+ actual = collection,
+ sort = sort,
+ )
+ meta.add_failure(problem, actual)
+
+def check_not_contains_predicate(collection, matcher, *, meta, sort = True):
+ """Check that `matcher` matches no values in `collection`.
+
+ Args:
+ collection: ([`collection`]) the collection whose values are compared against.
+ matcher: ([`Matcher`]) that must not match.
+ meta: ([`ExpectMeta`]) to record failures
+ sort: ([`bool`]) If `True`, the collection will be sorted for display.
+ """
+ matches = maybe_sorted([v for v in collection if matcher.match(v)], sort)
+ if not matches:
+ return
+ problem, actual = format_failure_unexpected_values(
+ none_of = matcher.desc,
+ unexpected = matches,
+ actual = collection,
+ sort = sort,
+ )
+ meta.add_failure(problem, actual)
+
+def common_subject_is_in(self, any_of):
+ """Generic implementation of `Subject.is_in`
+
+ Args:
+ self: The subject object. It must provide `actual` and `meta`
+ attributes.
+ any_of: [`collection`] of values.
+ """
+ return _check_is_in(self.actual, to_list(any_of), self.meta)
+
+def _check_is_in(actual, any_of, meta):
+ """Check that `actual` is one of the values in `any_of`.
+
+ Args:
+ actual: value to check for in `any_of`
+ any_of: [`collection`] of values to check within.
+ meta: ([`ExpectMeta`]) to record failures
+ """
+ if actual in any_of:
+ return
+ meta.add_failure(
+ "expected any of:\n{}".format(
+ enumerate_list_as_lines(any_of, prefix = " "),
+ ),
+ "actual: {}".format(actual),
+ )
+
+def check_not_equals(*, unexpected, actual, meta):
+ """Check that the values are the same type and not equal (according to !=).
+
+ NOTE: This requires the same type for both values. This is to prevent
+ mistakes where different data types (usually) can never be equal.
+
+ Args:
+ unexpected: (object) the value that actual cannot equal
+ actual: (object) the observed value
+ meta: ([`ExpectMeta`]) to record failures
+ """
+ same_type = type(actual) == type(unexpected)
+ equal = not (actual != unexpected) # Use != to preserve semantics
+ if same_type and not equal:
+ return
+ if not same_type:
+ meta.add_failure(
+ "expected not to be: {} (type: {})".format(unexpected, type(unexpected)),
+ "actual: {} (type: {})".format(actual, type(actual)),
+ )
+ else:
+ meta.add_failure(
+ "expected not to be: {}".format(unexpected),
+ "actual: {}".format(actual),
+ )
diff --git a/lib/private/collection_subject.bzl b/lib/private/collection_subject.bzl
new file mode 100644
index 0000000..8b093eb
--- /dev/null
+++ b/lib/private/collection_subject.bzl
@@ -0,0 +1,350 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# CollectionSubject"""
+
+load(
+ ":check_util.bzl",
+ "check_contains_at_least_predicates",
+ "check_contains_exactly",
+ "check_contains_exactly_predicates",
+ "check_contains_none_of",
+ "check_contains_predicate",
+ "check_not_contains_predicate",
+)
+load(
+ ":failure_messages.bzl",
+ "format_actual_collection",
+ "format_problem_expected_exactly",
+ "format_problem_matched_out_of_order",
+ "format_problem_missing_required_values",
+ "format_problem_predicates_did_not_match",
+ "format_problem_unexpected_values",
+)
+load(":int_subject.bzl", "IntSubject")
+load(":matching.bzl", "matching")
+load(":truth_common.bzl", "to_list")
+
+def _collection_subject_new(
+ values,
+ meta,
+ container_name = "values",
+ sortable = True,
+ element_plural_name = "elements"):
+ """Creates a "CollectionSubject" struct.
+
+ Method: CollectionSubject.new
+
+ Public Attributes:
+ * `actual`: The wrapped collection.
+
+ Args:
+ values: ([`collection`]) the values to assert against.
+ meta: ([`ExpectMeta`]) the metadata about the call chain.
+ container_name: ([`str`]) conceptual name of the container.
+ sortable: ([`bool`]) True if output should be sorted for display, False if not.
+ element_plural_name: ([`str`]) the plural word for the values in the container.
+
+ Returns:
+ [`CollectionSubject`].
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ actual = values,
+ has_size = lambda *a, **k: _collection_subject_has_size(self, *a, **k),
+ contains = lambda *a, **k: _collection_subject_contains(self, *a, **k),
+ contains_at_least = lambda *a, **k: _collection_subject_contains_at_least(self, *a, **k),
+ contains_at_least_predicates = lambda *a, **k: _collection_subject_contains_at_least_predicates(self, *a, **k),
+ contains_exactly = lambda *a, **k: _collection_subject_contains_exactly(self, *a, **k),
+ contains_exactly_predicates = lambda *a, **k: _collection_subject_contains_exactly_predicates(self, *a, **k),
+ contains_none_of = lambda *a, **k: _collection_subject_contains_none_of(self, *a, **k),
+ contains_predicate = lambda *a, **k: _collection_subject_contains_predicate(self, *a, **k),
+ not_contains = lambda *a, **k: _collection_subject_not_contains(self, *a, **k),
+ not_contains_predicate = lambda *a, **k: _collection_subject_not_contains_predicate(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(
+ actual = values,
+ meta = meta,
+ element_plural_name = element_plural_name,
+ container_name = container_name,
+ sortable = sortable,
+ contains_predicate = public.contains_predicate,
+ contains_at_least_predicates = public.contains_at_least_predicates,
+ )
+ return public
+
+def _collection_subject_has_size(self, expected):
+ """Asserts that `expected` is the size of the collection.
+
+ Method: CollectionSubject.has_size
+
+ Args:
+ self: implicitly added.
+ expected: ([`int`]) the expected size of the collection.
+ """
+ return IntSubject.new(
+ len(self.actual),
+ meta = self.meta.derive("size()"),
+ ).equals(expected)
+
+def _collection_subject_contains(self, expected):
+ """Asserts that `expected` is within the collection.
+
+ Method: CollectionSubject.contains
+
+ Args:
+ self: implicitly added.
+ expected: ([`str`]) the value that must be present.
+ """
+ matcher = matching.equals_wrapper(expected)
+ return self.contains_predicate(matcher)
+
+def _collection_subject_contains_exactly(self, expected):
+ """Check that a collection contains exactly the given elements.
+
+ Method: CollectionSubject.contains_exactly
+
+ * Multiplicity is respected.
+ * The collection must contain all the values, no more or less.
+ * Checking that the order of matches is the same as the passed-in matchers
+ order can be done by call `in_order()`.
+
+ The collection must contain all the values and no more. Multiplicity of
+ values is respected. Checking that the order of matches is the same as the
+ passed-in matchers order can done by calling `in_order()`.
+
+ Args:
+ self: implicitly added.
+ expected: ([`list`]) values that must exist.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ expected = to_list(expected)
+ return check_contains_exactly(
+ actual_container = self.actual,
+ expect_contains = expected,
+ meta = self.meta,
+ format_actual = lambda: format_actual_collection(
+ self.actual,
+ name = self.container_name,
+ sort = False, # Don't sort; this might be rendered by the in_order() error.
+ ),
+ format_expected = lambda: format_problem_expected_exactly(
+ expected,
+ sort = False, # Don't sort; this might be rendered by the in_order() error.
+ ),
+ format_missing = lambda missing: format_problem_missing_required_values(
+ missing,
+ sort = self.sortable,
+ ),
+ format_unexpected = lambda unexpected: format_problem_unexpected_values(
+ unexpected,
+ sort = self.sortable,
+ ),
+ format_out_of_order = format_problem_matched_out_of_order,
+ )
+
+def _collection_subject_contains_exactly_predicates(self, expected):
+ """Check that the values correspond 1:1 to the predicates.
+
+ Method: CollectionSubject.contains_exactly_predicates
+
+ * There must be a 1:1 correspondence between the container values and the
+ predicates.
+ * Multiplicity is respected (i.e., if the same predicate occurs twice, then
+ two distinct elements must match).
+ * Matching occurs in first-seen order. That is, a predicate will "consume"
+ the first value in `actual_container` it matches.
+ * The collection must match all the predicates, no more or less.
+ * Checking that the order of matches is the same as the passed-in matchers
+ order can be done by call `in_order()`.
+
+ Note that confusing results may occur if predicates with overlapping
+ match conditions are used. For example, given:
+ actual=["a", "ab", "abc"],
+ predicates=[<contains a>, <contains b>, <equals a>]
+
+ Then the result will be they aren't equal: the first two predicates
+ consume "a" and "ab", leaving only "abc" for the <equals a> predicate
+ to match against, which fails.
+
+ Args:
+ self: implicitly added.
+ expected: ([`list`] of [`Matcher`]) that must match.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ expected = to_list(expected)
+ return check_contains_exactly_predicates(
+ actual_container = self.actual,
+ expect_contains = expected,
+ meta = self.meta,
+ format_actual = lambda: format_actual_collection(
+ self.actual,
+ name = self.container_name,
+ sort = False, # Don't sort; this might be rendered by the in_order() error.
+ ),
+ format_expected = lambda: format_problem_expected_exactly(
+ [e.desc for e in expected],
+ sort = False, # Don't sort; this might be rendered by the in_order() error.
+ ),
+ format_missing = lambda missing: format_problem_missing_required_values(
+ [m.desc for m in missing],
+ sort = self.sortable,
+ ),
+ format_unexpected = lambda unexpected: format_problem_unexpected_values(
+ unexpected,
+ sort = self.sortable,
+ ),
+ format_out_of_order = format_problem_matched_out_of_order,
+ )
+
+def _collection_subject_contains_none_of(self, values):
+ """Asserts the collection contains none of `values`.
+
+ Method: CollectionSubject.contains_none_of
+
+ Args:
+ self: implicitly added
+ values: ([`collection`]) values of which none of are allowed to exist.
+ """
+ check_contains_none_of(
+ collection = self.actual,
+ none_of = values,
+ meta = self.meta,
+ sort = self.sortable,
+ )
+
+def _collection_subject_contains_predicate(self, matcher):
+ """Asserts that `matcher` matches at least one value.
+
+ Method: CollectionSubject.contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: ([`Matcher`]) (see `matchers` struct).
+ """
+ check_contains_predicate(
+ self.actual,
+ matcher = matcher,
+ format_problem = "expected to contain: {}".format(matcher.desc),
+ format_actual = lambda: format_actual_collection(
+ self.actual,
+ name = self.container_name,
+ sort = self.sortable,
+ ),
+ meta = self.meta,
+ )
+
+def _collection_subject_contains_at_least(self, expect_contains):
+ """Assert that the collection is a subset of the given predicates.
+
+ Method: CollectionSubject.contains_at_least
+
+ The collection must contain all the values. It can contain extra elements.
+ The multiplicity of values is respected. Checking that the relative order
+ of matches is the same as the passed-in expected values order can done by
+ calling `in_order()`.
+
+ Args:
+ self: implicitly added.
+ expect_contains: ([`list`]) values that must be in the collection.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ matchers = [
+ matching.equals_wrapper(expected)
+ for expected in to_list(expect_contains)
+ ]
+ return self.contains_at_least_predicates(matchers)
+
+def _collection_subject_contains_at_least_predicates(self, matchers):
+ """Assert that the collection is a subset of the given predicates.
+
+ Method: CollectionSubject.contains_at_least_predicates
+
+ The collection must match all the predicates. It can contain extra elements.
+ The multiplicity of matchers is respected. Checking that the relative order
+ of matches is the same as the passed-in matchers order can done by calling
+ `in_order()`.
+
+ Args:
+ self: implicitly added.
+ matchers: ([`list`] of [`Matcher`]) (see `matchers` struct).
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ ordered = check_contains_at_least_predicates(
+ self.actual,
+ matchers,
+ format_missing = lambda missing: format_problem_predicates_did_not_match(
+ missing,
+ element_plural_name = self.element_plural_name,
+ container_name = self.container_name,
+ ),
+ format_out_of_order = format_problem_matched_out_of_order,
+ format_actual = lambda: format_actual_collection(
+ self.actual,
+ name = self.container_name,
+ sort = self.sortable,
+ ),
+ meta = self.meta,
+ )
+ return ordered
+
+def _collection_subject_not_contains(self, value):
+ check_not_contains_predicate(
+ self.actual,
+ matcher = matching.equals_wrapper(value),
+ meta = self.meta,
+ sort = self.sortable,
+ )
+
+def _collection_subject_not_contains_predicate(self, matcher):
+ """Asserts that `matcher` matches no values in the collection.
+
+ Method: CollectionSubject.not_contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: [`Matcher`] object (see `matchers` struct).
+ """
+ check_not_contains_predicate(
+ self.actual,
+ matcher = matcher,
+ meta = self.meta,
+ sort = self.sortable,
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+CollectionSubject = struct(
+ new = _collection_subject_new,
+ has_size = _collection_subject_has_size,
+ contains = _collection_subject_contains,
+ contains_exactly = _collection_subject_contains_exactly,
+ contains_exactly_predicates = _collection_subject_contains_exactly_predicates,
+ contains_none_of = _collection_subject_contains_none_of,
+ contains_predicate = _collection_subject_contains_predicate,
+ contains_at_least = _collection_subject_contains_at_least,
+ contains_at_least_predicates = _collection_subject_contains_at_least_predicates,
+ not_contains_predicate = _collection_subject_not_contains_predicate,
+)
diff --git a/lib/private/compare_util.bzl b/lib/private/compare_util.bzl
new file mode 100644
index 0000000..f84e2bb
--- /dev/null
+++ b/lib/private/compare_util.bzl
@@ -0,0 +1,267 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for performing comparisons for Truth."""
+
+load(":truth_common.bzl", "to_list")
+
+def _match_result_new(*, found_at, matched_value, matcher):
+ """Creates a "MatchResult" struct.
+
+ A `MatchResult` struct is information about how an expected value
+ matched to an actual value.
+
+ Args:
+ found_at: ([`int`]) the position in the actual container the match
+ occurred at.
+ matched_value: the actual value that caused the match
+ matcher: ([`Matcher`] | value) the value that matched
+ """
+ return struct(
+ found_at = found_at,
+ matched_value = matched_value,
+ matcher = matcher,
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+MatchResult = struct(
+ new = _match_result_new,
+)
+
+def compare_contains_exactly_predicates(*, expect_contains, actual_container):
+ """Tells how and if values and predicates correspond 1:1 in the specified order.
+
+ * There must be a 1:1 correspondence between the container values and the
+ predicates.
+ * Multiplicity is respected (i.e., if the same predicate occurs twice, then
+ two distinct elements must match).
+ * Matching occurs in first-seen order. That is, a predicate will "consume"
+ the first value in `actual_container` it matches.
+ * The collection must match all the predicates, no more or less.
+ * Checking that the order of matches is the same as the passed-in matchers
+ order can be done by call `in_order()`.
+
+ Note that confusing results may occur if predicates with overlapping
+ match conditions are used. For example, given:
+ actual=["a", "ab", "abc"],
+ predicates=[<contains a>, <contains b>, <equals a>]
+
+ Then the result will be they aren't equal: the first two predicates
+ consume "a" and "ab", leaving only "abc" for the <equals a> predicate
+ to match against, which fails.
+
+ Args:
+ expect_contains: ([`collection`] of `Matcher`s) the predicates that must match.
+ To perform simple equalty, use `matching.equals_wrapper()`.
+ actual_container: ([`collection`]) The container to check within.
+
+ Returns:
+ struct with the following attributes:
+ * contains_exactly: ([`bool`]) True if all the predicates (and no others)
+ matched a distinct element; does not consider order.
+ * is_in_order: ([`bool`]) True if the actuals values matched in the same
+ order as the expected predicates. False if they were out of order.
+ If `contains_exactly=False`, this attribute is undefined.
+ * missing: [`list`] of [`Matcher`]s from `expect_contains` that did not find a
+ corresponding element in `actual_container`.
+ * unexpected: ([`list`]) values from `actual_container` that were not
+ present in `expect_contains`.
+ * matches: ([`list`] of [`MatchResult`]) information about which elements
+ in the two lists that matched each other. If
+ `contains_exactly=False`, this attribute is undefined.
+ """
+
+ # The basic idea is treating the expected and actual lists as queues of
+ # remaining values to search for. This allows the multiplicity of values
+ # to be respected and ordering correctness to be computed.
+ #
+ # Each iteration, we "pop" an element off each queue and...
+ # * If the elements are equal, then all is good: ordering is still
+ # possible, and the required element is present. Start a new iteration.
+ # * Otherwise, we know ordering isn't possible anymore and need to
+ # answer two questions:
+ # 1. Is the actual value extra, or elsewhere in the expected values?
+ # 2. Is the expected value missing, or elsewhere in the actual values?
+ # If a value exists elsewhere in the other queue, then we have to
+ # remove it to prevent it from being searched for again in a later
+ # iteration.
+ # As we go along, we keep track of where expected values matched; this
+ # allows for better error reporting.
+ expect_contains = to_list(expect_contains)
+ actual_container = to_list(actual_container)
+
+ actual_queue = [] # List of (original pos, value)
+ for i, value in enumerate(actual_container):
+ actual_queue.append([i, value])
+
+ expected_queue = [] # List of (original pos, value)
+ matches = [] # List of "MatchResult" structs
+ for i, value in enumerate(expect_contains):
+ expected_queue.append([i, value])
+ matches.append(None)
+
+ missing = [] # List of expected values missing
+ unexpected = [] # List of actual values that weren't expected
+ ordered = True
+
+ # Start at -1 because the first iteration adds 1
+ pos = -1
+ loop = range(max(len(actual_queue), len(expected_queue)))
+ for _ in loop:
+ # Advancing the position is equivalent to removing the queues's head
+ pos += 1
+ if pos >= len(actual_queue) and pos >= len(expected_queue):
+ # Can occur when e.g. actual=[A, B], expected=[B]
+ break
+ if pos >= len(actual_queue):
+ # Fewer actual values than expected, so the rest are missing
+ missing.extend([v[1] for v in expected_queue[pos:]])
+ break
+ if pos >= len(expected_queue):
+ # More actual values than expected, so the rest are unexpected
+ unexpected.extend([v[1] for v in actual_queue[pos:]])
+ break
+
+ actual_entry = actual_queue[pos]
+ expected_entry = expected_queue[pos]
+
+ if expected_entry[1].match(actual_entry[1]):
+ # Happy path: both are equal and order is maintained.
+ matches[expected_entry[0]] = MatchResult.new(
+ found_at = actual_entry[0],
+ matched_value = actual_entry[1],
+ matcher = expected_entry[1],
+ )
+ continue
+ ordered = False
+ found_at, found_entry = _list_find(
+ actual_queue,
+ lambda v: expected_entry[1].match(v[1]),
+ start = pos,
+ end = len(actual_queue),
+ )
+ if found_at == -1:
+ missing.append(expected_entry[1])
+ else:
+ # Remove it from the queue so a subsequent iteration doesn't
+ # try to search for it again.
+ actual_queue.pop(found_at)
+ matches[expected_entry[0]] = MatchResult.new(
+ found_at = found_entry[0],
+ matched_value = found_entry[1],
+ matcher = expected_entry[1],
+ )
+
+ found_at, found_entry = _list_find(
+ expected_queue,
+ lambda entry: entry[1].match(actual_entry[1]),
+ start = pos,
+ end = len(expected_queue),
+ )
+ if found_at == -1:
+ unexpected.append(actual_entry[1])
+ else:
+ # Remove it from the queue so a subsequent iteration doesn't
+ # try to search for it again.
+ expected_queue.pop(found_at)
+ matches[found_entry[0]] = MatchResult.new(
+ found_at = actual_entry[0],
+ matched_value = actual_entry[1],
+ matcher = found_entry[1],
+ )
+
+ return struct(
+ contains_exactly = not (missing or unexpected),
+ is_in_order = ordered,
+ missing = missing,
+ unexpected = unexpected,
+ matches = matches,
+ )
+
+def _list_find(search_in, search_for, *, start = 0, end = None):
+ """Linear search a list for a value matching a predicate.
+
+ Args:
+ search_in: ([`list`]) the list to search within.
+ search_for: (callable) accepts 1 positional arg (the current value)
+ and returns `bool` (`True` if matched, `False` if not).
+ start: ([`int`]) the position within `search_in` to start at. Defaults
+ to `0` (start of list)
+ end: (optional [`int`]) the position within `search_in` to stop before
+ (i.e. the value is exclusive; given a list of length 5, specifying
+ `end=5` means it will search the whole list). Defaults to the length
+ of `search_in`.
+ Returns:
+ [`tuple`] of ([`int`] found_at, value).
+ * If the value was found, then `found_at` is the offset in `search_in`
+ it was found at, and matched value is the element at that offset.
+ * If the value was not found, then `found_at=-1`, and the matched
+ value is `None`.
+ """
+ end = len(search_in) if end == None else end
+ pos = start
+ for _ in search_in:
+ if pos >= end:
+ return -1, None
+ value = search_in[pos]
+ if search_for(value):
+ return pos, value
+ pos += 1
+ return -1, None
+
+def compare_dicts(*, expected, actual):
+ """Compares two dicts, reporting differences.
+
+ Args:
+ expected: ([`dict`]) the desired state of `actual`
+ actual: ([`dict`]) the observed dict
+ Returns:
+ Struct with the following attributes:
+ * missing_keys: [`list`] of keys that were missing in `actual`, but present
+ in `expected`
+ * unexpected_keys: [`list`] of keys that were present in `actual`, but not
+ present in `expected`
+ * incorrect_entries: ([`dict`] of key -> [`DictEntryMismatch`]) of keys that
+ were in both dicts, but whose values were not equal. The value is
+ a "DictEntryMismatch" struct, which is defined as a struct with
+ attributes:
+ * `actual`: the value from `actual[key]`
+ * `expected`: the value from `expected[key]`
+ """
+ all_keys = {key: None for key in actual.keys()}
+ all_keys.update({key: None for key in expected.keys()})
+ missing_keys = []
+ unexpected_keys = []
+ incorrect_entries = {}
+
+ for key in sorted(all_keys):
+ if key not in actual:
+ missing_keys.append(key)
+ elif key not in expected:
+ unexpected_keys.append(key)
+ else:
+ actual_value = actual[key]
+ expected_value = expected[key]
+ if actual_value != expected_value:
+ incorrect_entries[key] = struct(
+ actual = actual_value,
+ expected = expected_value,
+ )
+
+ return struct(
+ missing_keys = missing_keys,
+ unexpected_keys = unexpected_keys,
+ incorrect_entries = incorrect_entries,
+ )
diff --git a/lib/private/depset_file_subject.bzl b/lib/private/depset_file_subject.bzl
new file mode 100644
index 0000000..554e3a5
--- /dev/null
+++ b/lib/private/depset_file_subject.bzl
@@ -0,0 +1,292 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# DepsetFileSubject"""
+
+load("//lib:util.bzl", "is_file")
+load(
+ ":check_util.bzl",
+ "check_contains_at_least_predicates",
+ "check_contains_exactly",
+ "check_contains_predicate",
+ "check_not_contains_predicate",
+)
+load(":collection_subject.bzl", "CollectionSubject")
+load(
+ ":failure_messages.bzl",
+ "format_actual_collection",
+ "format_problem_expected_exactly",
+ "format_problem_matched_out_of_order",
+ "format_problem_missing_any_values",
+ "format_problem_missing_required_values",
+ "format_problem_predicates_did_not_match",
+ "format_problem_unexpected_values",
+)
+load(":matching.bzl", "matching")
+load(":truth_common.bzl", "to_list")
+
+def _depset_file_subject_new(files, meta, container_name = "depset", element_plural_name = "files"):
+ """Creates a DepsetFileSubject asserting on `files`.
+
+ Method: DepsetFileSubject.new
+
+ Args:
+ files: ([`depset`] of [`File`]) the values to assert on.
+ meta: ([`ExpectMeta`]) of call chain information.
+ container_name: ([`str`]) conceptual name of the container.
+ element_plural_name: ([`str`]) the plural word for the values in the container.
+
+ Returns:
+ [`DepsetFileSubject`] object.
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ contains = lambda *a, **k: _depset_file_subject_contains(self, *a, **k),
+ contains_any_in = lambda *a, **k: _depset_file_subject_contains_any_in(self, *a, **k),
+ contains_at_least = lambda *a, **k: _depset_file_subject_contains_at_least(self, *a, **k),
+ contains_at_least_predicates = lambda *a, **k: _depset_file_subject_contains_at_least_predicates(self, *a, **k),
+ contains_exactly = lambda *a, **k: _depset_file_subject_contains_exactly(self, *a, **k),
+ contains_predicate = lambda *a, **k: _depset_file_subject_contains_predicate(self, *a, **k),
+ not_contains = lambda *a, **k: _depset_file_subject_not_contains(self, *a, **k),
+ not_contains_predicate = lambda *a, **k: _depset_file_subject_not_contains_predicate(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(
+ files = to_list(files),
+ meta = meta,
+ public = public,
+ actual_paths = sorted([f.short_path for f in to_list(files)]),
+ container_name = container_name,
+ element_plural_name = element_plural_name,
+ )
+ return public
+
+def _depset_file_subject_contains(self, expected):
+ """Asserts that the depset of files contains the provided path/file.
+
+ Method: DepsetFileSubject.contains
+
+ Args:
+ self: implicitly added
+ expected: ([`str`] | [`File`]) If a string path is provided, it is
+ compared to the short path of the files and are formatted using
+ [`ExpectMeta.format_str`] and its current contextual keywords. Note
+ that, when using `File` objects, two files' configurations must be
+ the same for them to be considered equal.
+ """
+ if is_file(expected):
+ actual = self.files
+ else:
+ expected = self.meta.format_str(expected)
+ actual = self.actual_paths
+
+ CollectionSubject.new(
+ actual,
+ meta = self.meta,
+ container_name = self.container_name,
+ element_plural_name = self.element_plural_name,
+ ).contains(expected)
+
+def _depset_file_subject_contains_at_least(self, expected):
+ """Asserts that the depset of files contains at least the provided paths.
+
+ Method: DepsetFileSubject.contains_at_least
+
+ Args:
+ self: implicitly added
+ expected: ([`collection`] of [`str`] | collection of [`File`]) multiplicity
+ is respected. If string paths are provided, they are compared to the
+ short path of the files and are formatted using
+ `ExpectMeta.format_str` and its current contextual keywords. Note
+ that, when using `File` objects, two files' configurations must be the
+ same for them to be considered equal.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ expected = to_list(expected)
+ if len(expected) < 1 or is_file(expected[0]):
+ actual = self.files
+ else:
+ expected = [self.meta.format_str(v) for v in expected]
+ actual = self.actual_paths
+
+ return CollectionSubject.new(
+ actual,
+ meta = self.meta,
+ container_name = self.container_name,
+ element_plural_name = self.element_plural_name,
+ ).contains_at_least(expected)
+
+def _depset_file_subject_contains_any_in(self, expected):
+ """Asserts that any of the values in `expected` exist.
+
+ Method: DepsetFileSubject.contains_any_in
+
+ Args:
+ self: implicitly added.
+ expected: ([`collection`] of [`str`] paths | [`collection`] of [`File`])
+ at least one of the values must exist. Note that, when using `File`
+ objects, two files' configurations must be the same for them to be
+ considered equal. When string paths are provided, they are compared
+ to `File.short_path`.
+ """
+ expected = to_list(expected)
+ if len(expected) < 1 or is_file(expected[0]):
+ actual = self.files
+ else:
+ actual = self.actual_paths
+
+ expected_map = {value: None for value in expected}
+
+ check_contains_predicate(
+ actual,
+ matcher = matching.is_in(expected_map),
+ format_problem = lambda: format_problem_missing_any_values(expected),
+ format_actual = lambda: format_actual_collection(
+ actual,
+ container_name = self.container_name,
+ ),
+ meta = self.meta,
+ )
+
+def _depset_file_subject_contains_at_least_predicates(self, matchers):
+ """Assert that the depset is a subset of the given predicates.
+
+ Method: DepsetFileSubject.contains_at_least_predicates
+
+ The depset must match all the predicates. It can contain extra elements.
+ The multiplicity of matchers is respected. Checking that the relative order
+ of matches is the same as the passed-in matchers order can done by calling
+ `in_order()`.
+
+ Args:
+ self: implicitly added.
+ matchers: ([`list`] of [`Matcher`]) (see `matchers` struct) that
+ accept [`File`] objects.
+
+ Returns:
+ [`Ordered`] (see `_ordered_incorrectly_new`).
+ """
+ ordered = check_contains_at_least_predicates(
+ self.files,
+ matchers,
+ format_missing = lambda missing: format_problem_predicates_did_not_match(
+ missing,
+ element_plural_name = self.element_plural_name,
+ container_name = self.container_name,
+ ),
+ format_out_of_order = format_problem_matched_out_of_order,
+ format_actual = lambda: format_actual_collection(
+ self.files,
+ name = self.container_name,
+ ),
+ meta = self.meta,
+ )
+ return ordered
+
+def _depset_file_subject_contains_predicate(self, matcher):
+ """Asserts that `matcher` matches at least one value.
+
+ Method: DepsetFileSubject.contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: [`Matcher`] (see `matching` struct) that accepts `File` objects.
+ """
+ check_contains_predicate(
+ self.files,
+ matcher = matcher,
+ format_problem = matcher.desc,
+ format_actual = lambda: format_actual_collection(
+ self.files,
+ name = self.container_name,
+ ),
+ meta = self.meta,
+ )
+
+def _depset_file_subject_contains_exactly(self, paths):
+ """Asserts the depset of files contains exactly the given paths.
+
+ Method: DepsetFileSubject.contains_exactly
+
+ Args:
+ self: implicitly added.
+ paths: ([`collection`] of [`str`]) the paths that must exist. These are
+ compared to the `short_path` values of the files in the depset.
+ All the paths, and no more, must exist.
+ """
+ paths = [self.meta.format_str(p) for p in to_list(paths)]
+ check_contains_exactly(
+ expect_contains = paths,
+ actual_container = self.actual_paths,
+ format_actual = lambda: format_actual_collection(
+ self.actual_paths,
+ name = self.container_name,
+ ),
+ format_expected = lambda: format_problem_expected_exactly(
+ paths,
+ sort = True,
+ ),
+ format_missing = lambda missing: format_problem_missing_required_values(
+ missing,
+ sort = True,
+ ),
+ format_unexpected = lambda unexpected: format_problem_unexpected_values(
+ unexpected,
+ sort = True,
+ ),
+ format_out_of_order = lambda matches: fail("Should not be called"),
+ meta = self.meta,
+ )
+
+def _depset_file_subject_not_contains(self, short_path):
+ """Asserts that `short_path` is not in the depset.
+
+ Method: DepsetFileSubject.not_contains_predicate
+
+ Args:
+ self: implicitly added.
+ short_path: ([`str`]) the short path that should not be present.
+ """
+ short_path = self.meta.format_str(short_path)
+ matcher = matching.custom(short_path, lambda f: f.short_path == short_path)
+ check_not_contains_predicate(self.files, matcher, meta = self.meta)
+
+def _depset_file_subject_not_contains_predicate(self, matcher):
+ """Asserts that nothing in the depset matches `matcher`.
+
+ Method: DepsetFileSubject.not_contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: ([`Matcher`]) that must match. It operates on [`File`] objects.
+ """
+ check_not_contains_predicate(self.files, matcher, meta = self.meta)
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+DepsetFileSubject = struct(
+ new = _depset_file_subject_new,
+ contains = _depset_file_subject_contains,
+ contains_at_least = _depset_file_subject_contains_at_least,
+ contains_any_in = _depset_file_subject_contains_any_in,
+ contains_at_least_predicates = _depset_file_subject_contains_at_least_predicates,
+ contains_predicate = _depset_file_subject_contains_predicate,
+ contains_exactly = _depset_file_subject_contains_exactly,
+ not_contains = _depset_file_subject_not_contains,
+ not_contains_predicate = _depset_file_subject_not_contains_predicate,
+)
diff --git a/lib/private/dict_subject.bzl b/lib/private/dict_subject.bzl
new file mode 100644
index 0000000..48d9463
--- /dev/null
+++ b/lib/private/dict_subject.bzl
@@ -0,0 +1,181 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# DictSubject"""
+
+load(":collection_subject.bzl", "CollectionSubject")
+load(":compare_util.bzl", "compare_dicts")
+load(
+ ":failure_messages.bzl",
+ "format_dict_as_lines",
+ "format_problem_dict_expected",
+)
+
+def _dict_subject_new(actual, meta, container_name = "dict", key_plural_name = "keys"):
+ """Creates a new `DictSubject`.
+
+ Method: DictSubject.new
+
+ Args:
+ actual: ([`dict`]) the dict to assert against.
+ meta: ([`ExpectMeta`]) of call chain information.
+ container_name: ([`str`]) conceptual name of the dict.
+ key_plural_name: ([`str`]) the plural word for the keys of the dict.
+
+ Returns:
+ New `DictSubject` struct.
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ contains_exactly = lambda *a, **k: _dict_subject_contains_exactly(self, *a, **k),
+ contains_at_least = lambda *a, **k: _dict_subject_contains_at_least(self, *a, **k),
+ contains_none_of = lambda *a, **k: _dict_subject_contains_none_of(self, *a, **k),
+ keys = lambda *a, **k: _dict_subject_keys(self, *a, **k),
+ )
+ self = struct(
+ actual = actual,
+ meta = meta,
+ container_name = container_name,
+ key_plural_name = key_plural_name,
+ )
+ return public
+
+def _dict_subject_contains_at_least(self, at_least):
+ """Assert the dict has at least the entries from `at_least`.
+
+ Method: DictSubject.contains_at_least
+
+ Args:
+ self: implicitly added.
+ at_least: ([`dict`]) the subset of keys/values that must exist. Extra
+ keys are allowed. Order is not checked.
+ """
+ result = compare_dicts(
+ expected = at_least,
+ actual = self.actual,
+ )
+ if not result.missing_keys and not result.incorrect_entries:
+ return
+
+ self.meta.add_failure(
+ problem = format_problem_dict_expected(
+ expected = at_least,
+ missing_keys = result.missing_keys,
+ unexpected_keys = [],
+ incorrect_entries = result.incorrect_entries,
+ container_name = self.container_name,
+ key_plural_name = self.key_plural_name,
+ ),
+ actual = "actual: {{\n{}\n}}".format(format_dict_as_lines(self.actual)),
+ )
+
+def _dict_subject_contains_exactly(self, expected):
+ """Assert the dict has exactly the provided values.
+
+ Method: DictSubject.contains_exactly
+
+ Args:
+ self: implicitly added
+ expected: ([`dict`]) the values that must exist. Missing values or
+ extra values are not allowed. Order is not checked.
+ """
+ result = compare_dicts(
+ expected = expected,
+ actual = self.actual,
+ )
+
+ if (not result.missing_keys and not result.unexpected_keys and
+ not result.incorrect_entries):
+ return
+
+ self.meta.add_failure(
+ problem = format_problem_dict_expected(
+ expected = expected,
+ missing_keys = result.missing_keys,
+ unexpected_keys = result.unexpected_keys,
+ incorrect_entries = result.incorrect_entries,
+ container_name = self.container_name,
+ key_plural_name = self.key_plural_name,
+ ),
+ actual = "actual: {{\n{}\n}}".format(format_dict_as_lines(self.actual)),
+ )
+
+def _dict_subject_contains_none_of(self, none_of):
+ """Assert the dict contains none of `none_of` keys/values.
+
+ Method: DictSubject.contains_none_of
+
+ Args:
+ self: implicitly added
+ none_of: ([`dict`]) the keys/values that must not exist. Order is not
+ checked.
+ """
+ result = compare_dicts(
+ expected = none_of,
+ actual = self.actual,
+ )
+ none_of_keys = sorted(none_of.keys())
+ if (sorted(result.missing_keys) == none_of_keys or
+ sorted(result.incorrect_entries.keys()) == none_of_keys):
+ return
+
+ incorrect_entries = {}
+ for key, not_expected in none_of.items():
+ actual = self.actual[key]
+ if actual == not_expected:
+ incorrect_entries[key] = struct(
+ actual = actual,
+ expected = "<not {}>".format(not_expected),
+ )
+
+ self.meta.add_failure(
+ problem = format_problem_dict_expected(
+ expected = none_of,
+ missing_keys = [],
+ unexpected_keys = [],
+ incorrect_entries = incorrect_entries,
+ container_name = self.container_name + " to be missing",
+ key_plural_name = self.key_plural_name,
+ ),
+ actual = "actual: {{\n{}\n}}".format(format_dict_as_lines(self.actual)),
+ )
+
+def _dict_subject_keys(self):
+ """Returns a `CollectionSubject` for the dict's keys.
+
+ Method: DictSubject.keys
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ [`CollectionSubject`] of the keys.
+ """
+ return CollectionSubject.new(
+ self.actual.keys(),
+ meta = self.meta.derive("keys()"),
+ container_name = "dict keys",
+ element_plural_name = "keys",
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+DictSubject = struct(
+ new = _dict_subject_new,
+ contains_at_least = _dict_subject_contains_at_least,
+ contains_exactly = _dict_subject_contains_exactly,
+ contains_none_of = _dict_subject_contains_none_of,
+ keys = _dict_subject_keys,
+)
diff --git a/lib/private/execution_info_subject.bzl b/lib/private/execution_info_subject.bzl
new file mode 100644
index 0000000..35524f6
--- /dev/null
+++ b/lib/private/execution_info_subject.bzl
@@ -0,0 +1,84 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# ExecutionInfoSubject"""
+
+load(":dict_subject.bzl", "DictSubject")
+load(":str_subject.bzl", "StrSubject")
+
+def _execution_info_subject_new(info, *, meta):
+ """Create a new `ExecutionInfoSubject`
+
+ Method: ExecutionInfoSubject.new
+
+ Args:
+ info: ([`testing.ExecutionInfo`]) provider instance.
+ meta: ([`ExpectMeta`]) of call chain information.
+
+ Returns:
+ `ExecutionInfoSubject` struct.
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ requirements = lambda *a, **k: _execution_info_subject_requirements(self, *a, **k),
+ exec_group = lambda *a, **k: _execution_info_subject_exec_group(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(
+ actual = info,
+ meta = meta,
+ )
+ return public
+
+def _execution_info_subject_requirements(self):
+ """Create a `DictSubject` for the requirements values.
+
+ Method: ExecutionInfoSubject.requirements
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ `DictSubject` of the requirements.
+ """
+ return DictSubject.new(
+ self.actual.requirements,
+ meta = self.meta.derive("requirements()"),
+ )
+
+def _execution_info_subject_exec_group(self):
+ """Create a `StrSubject` for the `exec_group` value.
+
+ Method: ExecutionInfoSubject.exec_group
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ A [`StrSubject`] for the exec group.
+ """
+ return StrSubject.new(
+ self.actual.exec_group,
+ meta = self.meta.derive("exec_group()"),
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+ExecutionInfoSubject = struct(
+ new = _execution_info_subject_new,
+ requirements = _execution_info_subject_requirements,
+ exec_group = _execution_info_subject_exec_group,
+)
diff --git a/lib/private/expect.bzl b/lib/private/expect.bzl
new file mode 100644
index 0000000..e568a54
--- /dev/null
+++ b/lib/private/expect.bzl
@@ -0,0 +1,271 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# Expect"""
+
+load(":action_subject.bzl", "ActionSubject")
+load(":bool_subject.bzl", "BoolSubject")
+load(":collection_subject.bzl", "CollectionSubject")
+load(":depset_file_subject.bzl", "DepsetFileSubject")
+load(":dict_subject.bzl", "DictSubject")
+load(":expect_meta.bzl", "ExpectMeta")
+load(":file_subject.bzl", "FileSubject")
+load(":int_subject.bzl", "IntSubject")
+load(":str_subject.bzl", "StrSubject")
+load(":target_subject.bzl", "TargetSubject")
+
+def _expect_new_from_env(env):
+ """Wrapper around `env`.
+
+ This is the entry point to the Truth-style assertions. Example usage:
+ expect = expect(env)
+ expect.that_action(action).contains_at_least_args(...)
+
+ The passed in `env` object allows optional attributes to be set to
+ customize behavior. Usually this is helpful for testing. See `_fake_env()`
+ in truth_tests.bzl for examples.
+ * `fail`: callable that takes a failure message. If present, it
+ will be called instead of the regular `Expect.add_failure` logic.
+ * `get_provider`: callable that takes 2 positional args (target and
+ provider) and returns the found provider or fails.
+ * `has_provider`: callable that takes 2 positional args (a [`Target`] and
+ a [`provider`]) and returns [`bool`] (`True` if present, `False` otherwise) or fails.
+
+ Args:
+ env: unittest env struct, or some approximation. There are several
+ attributes that override regular behavior; see above doc.
+
+ Returns:
+ [`Expect`] object
+ """
+ return _expect_new(env, None)
+
+def _expect_new(env, meta):
+ """Creates a new Expect object.
+
+ Internal; only other `Expect` methods should be calling this.
+
+ Args:
+ env: unittest env struct or some approximation.
+ meta: ([`ExpectMeta`]) metadata about call chain and state.
+
+ Returns:
+ [`Expect`] object
+ """
+
+ meta = meta or ExpectMeta.new(env)
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ meta = meta,
+ that_action = lambda *a, **k: _expect_that_action(self, *a, **k),
+ that_bool = lambda *a, **k: _expect_that_bool(self, *a, **k),
+ that_collection = lambda *a, **k: _expect_that_collection(self, *a, **k),
+ that_depset_of_files = lambda *a, **k: _expect_that_depset_of_files(self, *a, **k),
+ that_dict = lambda *a, **k: _expect_that_dict(self, *a, **k),
+ that_file = lambda *a, **k: _expect_that_file(self, *a, **k),
+ that_int = lambda *a, **k: _expect_that_int(self, *a, **k),
+ that_str = lambda *a, **k: _expect_that_str(self, *a, **k),
+ that_target = lambda *a, **k: _expect_that_target(self, *a, **k),
+ where = lambda *a, **k: _expect_where(self, *a, **k),
+ # keep sorted end
+ # Attributes used by Subject classes and internal helpers
+ )
+ self = struct(env = env, public = public, meta = meta)
+ return public
+
+def _expect_that_action(self, action):
+ """Creates a subject for asserting Actions.
+
+ Args:
+ self: implicitly added.
+ action: ([`Action`]) the action to check.
+
+ Returns:
+ [`ActionSubject`] object.
+ """
+ return ActionSubject.new(
+ action,
+ self.meta.derive(
+ expr = "action",
+ details = ["action: [{}] {}".format(action.mnemonic, action)],
+ ),
+ )
+
+def _expect_that_bool(self, value, expr = "boolean"):
+ """Creates a subject for asserting a boolean.
+
+ Args:
+ self: implicitly added.
+ value: ([`bool`]) the bool to check.
+ expr: ([`str`]) the starting "value of" expression to report in errors.
+
+ Returns:
+ [`BoolSubject`] object.
+ """
+ return BoolSubject.new(
+ value,
+ meta = self.meta.derive(expr = expr),
+ )
+
+def _expect_that_collection(self, collection, expr = "collection"):
+ """Creates a subject for asserting collections.
+
+ Args:
+ self: implicitly added.
+ collection: The collection (list or depset) to assert.
+ expr: ([`str`]) the starting "value of" expression to report in errors.
+
+ Returns:
+ [`CollectionSubject`] object.
+ """
+ return CollectionSubject.new(collection, self.meta.derive(expr))
+
+def _expect_that_depset_of_files(self, depset_files):
+ """Creates a subject for asserting a depset of files.
+
+ Method: Expect.that_depset_of_files
+
+ Args:
+ self: implicitly added.
+ depset_files: ([`depset`] of [`File`]) the values to assert on.
+
+ Returns:
+ [`DepsetFileSubject`] object.
+ """
+ return DepsetFileSubject.new(depset_files, self.meta.derive("depset_files"))
+
+def _expect_that_dict(self, mapping, meta = None):
+ """Creates a subject for asserting a dict.
+
+ Method: Expect.that_dict
+
+ Args:
+ self: implicitly added
+ mapping: ([`dict`]) the values to assert on
+ meta: ([`ExpectMeta`]) optional custom call chain information to use instead
+
+ Returns:
+ [`DictSubject`] object.
+ """
+ meta = meta or self.meta.derive("dict")
+ return DictSubject.new(mapping, meta = meta)
+
+def _expect_that_file(self, file, meta = None):
+ """Creates a subject for asserting a file.
+
+ Method: Expect.that_file
+
+ Args:
+ self: implicitly added.
+ file: ([`File`]) the value to assert.
+ meta: ([`ExpectMeta`]) optional custom call chain information to use instead
+
+ Returns:
+ [`FileSubject`] object.
+ """
+ meta = meta or self.meta.derive("file")
+ return FileSubject.new(file, meta = meta)
+
+def _expect_that_int(self, value, expr = "integer"):
+ """Creates a subject for asserting an `int`.
+
+ Method: Expect.that_int
+
+ Args:
+ self: implicitly added.
+ value: ([`int`]) the value to check against.
+ expr: ([`str`]) the starting "value of" expression to report in errors.
+
+ Returns:
+ [`IntSubject`] object.
+ """
+ return IntSubject.new(value, self.meta.derive(expr))
+
+def _expect_that_str(self, value):
+ """Creates a subject for asserting a `str`.
+
+ Args:
+ self: implicitly added.
+ value: ([`str`]) the value to check against.
+
+ Returns:
+ [`StrSubject`] object.
+ """
+ return StrSubject.new(value, self.meta.derive("string"))
+
+def _expect_that_target(self, target):
+ """Creates a subject for asserting a `Target`.
+
+ This adds the following parameters to `ExpectMeta.format_str`:
+ {package}: The target's package, e.g. "foo/bar" from "//foo/bar:baz"
+ {name}: The target's base name, e.g., "baz" from "//foo/bar:baz"
+
+ Args:
+ self: implicitly added.
+ target: ([`Target`]) subject target to check against.
+
+ Returns:
+ [`TargetSubject`] object.
+ """
+ return TargetSubject.new(target, self.meta.derive(
+ expr = "target({})".format(target.label),
+ details = ["target: {}".format(target.label)],
+ format_str_kwargs = {
+ "name": target.label.name,
+ "package": target.label.package,
+ },
+ ))
+
+def _expect_where(self, **details):
+ """Add additional information about the assertion.
+
+ This is useful for attaching information that isn't part of the call
+ chain or some reason. Example usage:
+
+ expect(env).where(platform=ctx.attr.platform).that_str(...)
+
+ Would include "platform: {ctx.attr.platform}" in failure messages.
+
+ Args:
+ self: implicitly added.
+ **details: ([`dict`] of [`str`] to value) Each named arg is added to
+ the metadata details with the provided string, which is printed as
+ part of displaying any failures.
+
+ Returns:
+ [`Expect`] object with separate metadata derived from the original self.
+ """
+ meta = self.meta.derive(
+ details = ["{}: {}".format(k, v) for k, v in details.items()],
+ )
+ return _expect_new(env = self.env, meta = meta)
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+Expect = struct(
+ new_from_env = _expect_new_from_env,
+ new = _expect_new,
+ that_action = _expect_that_action,
+ that_bool = _expect_that_bool,
+ that_collection = _expect_that_collection,
+ that_depset_of_files = _expect_that_depset_of_files,
+ that_dict = _expect_that_dict,
+ that_file = _expect_that_file,
+ that_int = _expect_that_int,
+ that_str = _expect_that_str,
+ that_target = _expect_that_target,
+ where = _expect_where,
+)
diff --git a/lib/private/expect_meta.bzl b/lib/private/expect_meta.bzl
new file mode 100644
index 0000000..8ce9f1e
--- /dev/null
+++ b/lib/private/expect_meta.bzl
@@ -0,0 +1,278 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""# ExpectMeta
+
+ExpectMeta object implementation.
+"""
+
+load("@bazel_skylib//lib:unittest.bzl", ut_asserts = "asserts")
+
+def _expect_meta_new(env, exprs = [], details = [], format_str_kwargs = None):
+ """Creates a new "ExpectMeta" struct".
+
+ Method: ExpectMeta.new
+
+ ExpectMeta objects are internal helpers for the Expect object and Subject
+ objects. They are used for Subjects to store and communicate state through a
+ series of call chains and asserts.
+
+ This constructor should only be directly called by `Expect` objects. When a
+ parent Subject is creating a child-Subject, then [`derive()`] should be
+ used.
+
+ ### Env objects
+
+ The `env` object basically provides a way to interact with things outside
+ of the truth assertions framework. This allows easier testing of the
+ framework itself and decouples it from a particular test framework (which
+ makes it usuable by by rules_testing's analysis_test and skylib's
+ analysistest)
+
+ The `env` object requires the following attribute:
+ * ctx: The test's ctx.
+
+ The `env` object allows the following attributes to customize behavior:
+ * fail: A callable that accepts a single string, which is the failure
+ message. Its return value is ignored. This is called when an assertion
+ fails. It's generally expected that it records a failure instead of
+ immediately failing.
+ * has_provider: (callable) it accepts two positional args, target and
+ provider and returns [`bool`]. This is used to implement `Provider in
+ target` operations.
+ * get_provider: (callable) it accepts two positional args, target and
+ provider and returns the provder value. This is used to implement
+ `target[Provider]`.
+
+ Args:
+ env: unittest env struct or some approximation.
+ exprs: ([`list`] of [`str`]) the expression strings of the call chain for
+ the subject.
+ details: ([`list`] of [`str`]) additional details to print on error. These
+ are usually informative details of the objects under test.
+ format_str_kwargs: optional dict of format() kwargs. These kwargs
+ are propagated through `derive()` calls and used when
+ `ExpectMeta.format_str()` is called.
+
+ Returns:
+ [`ExpectMeta`] object.
+ """
+ if format_str_kwargs == None:
+ format_str_kwargs = {}
+ format_str_kwargs.setdefault("workspace", env.ctx.workspace_name)
+ format_str_kwargs.setdefault("test_name", env.ctx.label.name)
+
+ # buildifier: disable=uninitialized
+ self = struct(
+ ctx = env.ctx,
+ env = env,
+ add_failure = lambda *a, **k: _expect_meta_add_failure(self, *a, **k),
+ derive = lambda *a, **k: _expect_meta_derive(self, *a, **k),
+ format_str = lambda *a, **k: _expect_meta_format_str(self, *a, **k),
+ get_provider = lambda *a, **k: _expect_meta_get_provider(self, *a, **k),
+ has_provider = lambda *a, **k: _expect_meta_has_provider(self, *a, **k),
+ _exprs = exprs,
+ _details = details,
+ _format_str_kwargs = format_str_kwargs,
+ )
+ return self
+
+def _expect_meta_derive(self, expr = None, details = None, format_str_kwargs = {}):
+ """Create a derivation of the current meta object for a child-Subject.
+
+ Method: ExpectMeta.derive
+
+ When a Subject needs to create a child-Subject, it derives a new meta
+ object to pass to the child. This separates the parent's state from
+ the child's state and allows any failures generated by the child to
+ include the context of the parent creator.
+
+ Example usage:
+
+ def _foo_subject_action_named(self, name):
+ meta = self.meta.derive("action_named({})".format(name),
+ "action: {}".format(...))
+ return ActionSubject(..., meta)
+ def _foo_subject_name(self):
+ # No extra detail to include)
+ meta self.meta.derive("name()", None)
+
+
+ Args:
+ self: implicitly added.
+ expr: ([`str`]) human-friendly description of the call chain expression.
+ e.g., if `foo_subject.bar_named("baz")` returns a child-subject,
+ then "bar_named("bar")" would be the expression.
+ details: (optional [`list`] of [`str`]) human-friendly descriptions of additional
+ detail to include in errors. This is usually additional information
+ the child Subject wouldn't include itself. e.g. if
+ `foo.first_action_argv().contains(1)`, returned a ListSubject, then
+ including "first action: Action FooCompile" helps add context to the
+ error message. If there is no additional detail to include, pass
+ None.
+ format_str_kwargs: ([`dict`] of format()-kwargs) additional kwargs to
+ make available to [`format_str`] calls.
+
+ Returns:
+ [`ExpectMeta`] object.
+ """
+ if not details:
+ details = []
+ if expr:
+ exprs = [expr]
+ else:
+ exprs = []
+
+ if format_str_kwargs:
+ final_format_kwargs = {k: v for k, v in self._format_str_kwargs.items()}
+ final_format_kwargs.update(format_str_kwargs)
+ else:
+ final_format_kwargs = self._format_str_kwargs
+
+ return _expect_meta_new(
+ env = self.env,
+ exprs = self._exprs + exprs,
+ details = self._details + details,
+ format_str_kwargs = final_format_kwargs,
+ )
+
+def _expect_meta_format_str(self, template):
+ """Interpolate contextual keywords into a string.
+
+ This uses the normal `format()` style (i.e. using `{}`). Keywords
+ refer to parts of the call chain.
+
+ The particular keywords supported depend on the call chain. The following
+ are always present:
+ {workspace}: The name of the workspace, e.g. "rules_proto".
+ {test_name}: The base name of the current test.
+
+ Args:
+ self: implicitly added.
+ template: ([`str`]) the format template string to use.
+
+ Returns:
+ [`str`]; the template with parameters replaced.
+ """
+ return template.format(**self._format_str_kwargs)
+
+def _expect_meta_get_provider(self, target, provider):
+ """Get a provider from a target.
+
+ This is equivalent to `target[provider]`; the extra level of indirection
+ is to aid testing.
+
+ Args:
+ self: implicitly added.
+ target: ([`Target`]) the target to get the provider from.
+ provider: The provider type to get.
+
+ Returns:
+ The found provider, or fails if not present.
+ """
+ if hasattr(self.env, "get_provider"):
+ return self.env.get_provider(target, provider)
+ else:
+ return target[provider]
+
+def _expect_meta_has_provider(self, target, provider):
+ """Tells if a target has a provider.
+
+ This is equivalent to `provider in target`; the extra level of indirection
+ is to aid testing.
+
+ Args:
+ self: implicitly added.
+ target: ([`Target`]) the target to check for the provider.
+ provider: the provider type to check for.
+
+ Returns:
+ True if the target has the provider, False if not.
+ """
+ if hasattr(self.env, "has_provider"):
+ return self.env.has_provider(target, provider)
+ else:
+ return provider in target
+
+def _expect_meta_add_failure(self, problem, actual):
+ """Adds a failure with context.
+
+ Method: ExpectMeta.add_failure
+
+ Adds the given error message. Context from the subject and prior call chains
+ is automatically added.
+
+ Args:
+ self: implicitly added.
+ problem: ([`str`]) a string describing the expected value or problem
+ detected, and the expected values that weren't satisfied. A colon
+ should be used to separate the description from the values.
+ The description should be brief and include the word "expected",
+ e.g. "expected: foo", or "expected values missing: <list of missing>",
+ the key point being the reader can easily take the values shown
+ and look for it in the actual values displayed below it.
+ actual: ([`str`]) a string describing the values observed. A colon should
+ be used to separate the description from the observed values.
+ The description should be brief and include the word "actual", e.g.,
+ "actual: bar". The values should include the actual, observed,
+ values and pertinent information about them.
+ """
+ details = "\n".join([
+ " {}".format(detail)
+ for detail in self._details
+ if detail
+ ])
+ if details:
+ details = "where...\n" + details
+ msg = """\
+in test: {test}
+value of: {expr}
+{problem}
+{actual}
+{details}
+""".format(
+ test = self.ctx.label,
+ expr = ".".join(self._exprs),
+ problem = problem,
+ actual = actual,
+ details = details,
+ )
+ _expect_meta_call_fail(self, msg)
+
+def _expect_meta_call_fail(self, msg):
+ """Adds a failure to the test run.
+
+ Args:
+ self: implicitly added.
+ msg: ([`str`]) the failure message.
+ """
+ fail_func = getattr(self.env, "fail", None)
+ if fail_func != None:
+ fail_func(msg)
+ else:
+ # Add a leading newline because unittest prepends the repr() of the
+ # function under test, which is often long and uninformative, making
+ # the first line of our message hard to see.
+ ut_asserts.true(self.env, False, "\n" + msg)
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+ExpectMeta = struct(
+ new = _expect_meta_new,
+ derive = _expect_meta_derive,
+ format_str = _expect_meta_format_str,
+ get_provider = _expect_meta_get_provider,
+ has_provider = _expect_meta_has_provider,
+ add_failure = _expect_meta_add_failure,
+ call_fail = _expect_meta_call_fail,
+)
diff --git a/lib/private/failure_messages.bzl b/lib/private/failure_messages.bzl
new file mode 100644
index 0000000..006a0f2
--- /dev/null
+++ b/lib/private/failure_messages.bzl
@@ -0,0 +1,311 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions to aid formatting Truth failure messages."""
+
+load(
+ ":truth_common.bzl",
+ "enumerate_list_as_lines",
+ "guess_format_value",
+ "maybe_sorted",
+)
+
+def format_actual_collection(actual, name = "values", sort = True):
+ """Creates an error message for the observed values of a collection.
+
+ Args:
+ actual: ([`collection`]) the values to show
+ name: ([`str`]) the conceptual name of the collection.
+ sort: ([`bool`]) If true, the collection will be sorted for display.
+ Returns:
+ ([`str`]) the formatted error message.
+ """
+ actual = maybe_sorted(actual, sort)
+ return "actual {name}:\n{actual}".format(
+ name = name,
+ actual = enumerate_list_as_lines(actual, prefix = " "),
+ )
+
+def format_failure_missing_all_values(
+ element_plural_name,
+ container_name,
+ *,
+ missing,
+ actual,
+ sort = True):
+ """Create error messages when a container is missing all the expected values.
+
+ Args:
+ element_plural_name: ([`str`]) the plural word for the values in the container.
+ container_name: ([`str`]) the conceptual name of the container.
+ missing: the collection of values that are missing.
+ actual: the collection of values observed.
+ sort: ([`bool`]) if True, then missing and actual are sorted. If False, they
+ are not sorted.
+
+ Returns:
+ [`tuple`] of ([`str`] problem, [`str`] actual), suitable for passing to ExpectMeta's
+ `add_failure()` method.
+ """
+ missing = maybe_sorted(missing, sort)
+ problem_msg = "{count} expected {name} missing from {container}:\n{missing}".format(
+ count = len(missing),
+ name = element_plural_name,
+ container = container_name,
+ missing = enumerate_list_as_lines(missing, prefix = " "),
+ )
+ actual_msg = format_actual_collection(actual, name = container_name, sort = sort)
+ return problem_msg, actual_msg
+
+def format_failure_unexpected_values(*, none_of, unexpected, actual, sort = True):
+ """Create error messages when a container has unexpected values.
+
+ Args:
+ none_of: ([`str`]) description of the values that were not expected to be
+ present.
+ unexpected: ([`collection`]) the values that were unexpectedly found.
+ actual: ([`collection`]) the observed values.
+ sort: ([`bool`]) True if the collections should be sorted for output.
+
+ Returns:
+ [`tuple`] of ([`str`] problem, [`str`] actual), suitable for passing to ExpectMeta's
+ `add_failure()` method.
+ """
+ unexpected = maybe_sorted(unexpected, sort)
+ problem_msg = "expected not to contain any of: {none_of}\nbut {count} found:\n{unexpected}".format(
+ none_of = none_of,
+ count = len(unexpected),
+ unexpected = enumerate_list_as_lines(unexpected, prefix = " "),
+ )
+ actual_msg = format_actual_collection(actual, sort = sort)
+ return problem_msg, actual_msg
+
+def format_failure_unexpected_value(container_name, unexpected, actual, sort = True):
+ """Create error messages when a container contains a specific unexpected value.
+
+ Args:
+ container_name: ([`str`]) conceptual name of the container.
+ unexpected: the value that shouldn't have been in `actual`.
+ actual: ([`collection`]) the observed values.
+ sort: ([`bool`]) True if the collections should be sorted for output.
+
+ Returns:
+ [`tuple`] of ([`str`] problem, [`str`] actual), suitable for passing to ExpectMeta's
+ `add_failure()` method.
+ """
+ problem_msg = "expected not to contain: {}".format(unexpected)
+ actual_msg = format_actual_collection(actual, name = container_name, sort = sort)
+ return problem_msg, actual_msg
+
+def format_problem_dict_expected(
+ *,
+ expected,
+ missing_keys,
+ unexpected_keys,
+ incorrect_entries,
+ container_name = "dict",
+ key_plural_name = "keys"):
+ """Formats an expected dict, describing what went wrong.
+
+ Args:
+ expected: ([`dict`]) the full expected value.
+ missing_keys: ([`list`]) the keys that were not found.
+ unexpected_keys: ([`list`]) the keys that should not have existed
+ incorrect_entries: ([`list`] of [`DictEntryMismatch`]) (see [`_compare_dict`]).
+ container_name: ([`str`]) conceptual name of the `expected` dict.
+ key_plural_name: ([`str`]) the plural word for the keys of the `expected` dict.
+ Returns:
+ [`str`] that describes the problem.
+ """
+ problem_lines = ["expected {}: {{\n{}\n}}".format(
+ container_name,
+ format_dict_as_lines(expected),
+ )]
+ if missing_keys:
+ problem_lines.append("{count} missing {key_plural_name}:\n{keys}".format(
+ count = len(missing_keys),
+ key_plural_name = key_plural_name,
+ keys = enumerate_list_as_lines(sorted(missing_keys), prefix = " "),
+ ))
+ if unexpected_keys:
+ problem_lines.append("{count} unexpected {key_plural_name}:\n{keys}".format(
+ count = len(unexpected_keys),
+ key_plural_name = key_plural_name,
+ keys = enumerate_list_as_lines(sorted(unexpected_keys), prefix = " "),
+ ))
+ if incorrect_entries:
+ problem_lines.append("{} incorrect entries:".format(len(incorrect_entries)))
+ for key, mismatch in incorrect_entries.items():
+ problem_lines.append("key {}:".format(key))
+ problem_lines.append(" expected: {}".format(mismatch.expected))
+ problem_lines.append(" but was : {}".format(mismatch.actual))
+ return "\n".join(problem_lines)
+
+def format_problem_expected_exactly(expected, sort = True):
+ """Creates an error message describing the expected values.
+
+ This is for use when the observed value must have all the values and
+ no more.
+
+ Args:
+ expected: ([`collection`]) the expected values.
+ sort: ([`bool`]) True if to sort the values for display.
+ Returns:
+ ([`str`]) the formatted problem message
+ """
+ expected = maybe_sorted(expected, sort)
+ return "expected exactly:\n{}".format(
+ enumerate_list_as_lines(expected, prefix = " "),
+ )
+
+def format_problem_missing_any_values(any_of, sort = True):
+ """Create an error message for when any of a collection of values are missing.
+
+ Args:
+ any_of: ([`collection`]) the set of values, any of which were missing.
+ sort: ([`bool`]) True if the collection should be sorted for display.
+ Returns:
+ ([`str`]) the problem description string.
+ """
+ any_of = maybe_sorted(any_of, sort)
+ return "expected to contain any of:\n{}".format(
+ enumerate_list_as_lines(any_of, prefix = " "),
+ )
+
+def format_problem_missing_required_values(missing, sort = True):
+ """Create an error message for when the missing values must all be present.
+
+ Args:
+ missing: ([`collection`]) the values that must all be present.
+ sort: ([`bool`]) True if to sort the values for display
+ Returns:
+ ([`str`]) the problem description string.
+ """
+ missing = maybe_sorted(missing, sort)
+ return "{count} missing:\n{missing}".format(
+ count = len(missing),
+ missing = enumerate_list_as_lines(missing, prefix = " "),
+ )
+
+def format_problem_predicates_did_not_match(
+ missing,
+ *,
+ element_plural_name = "elements",
+ container_name = "values"):
+ """Create an error message for when a list of predicates didn't match.
+
+ Args:
+ missing: ([`list`] of [`Matcher`]) (see `_match_custom`).
+ element_plural_name: ([`str`]) the plural word for the values in the container.
+ container_name: ([`str`]) the conceptual name of the container.
+ Returns:
+ ([`str`]) the problem description string.
+ """
+
+ return "{count} expected {name} missing from {container}:\n{missing}".format(
+ count = len(missing),
+ name = element_plural_name,
+ container = container_name,
+ missing = enumerate_list_as_lines(
+ [m.desc for m in missing],
+ prefix = " ",
+ ),
+ )
+
+def format_problem_matched_out_of_order(matches):
+ """Create an error message for when a expected values matched in the wrong order.
+
+ Args:
+ matches: ([`list`] of [`MatchResult`]) see `_check_contains_at_least_predicates()`.
+ Returns:
+ ([`str`]) the problem description string.
+ """
+ format_matched_value = guess_format_value([m.matched_value for m in matches])
+
+ def format_value(value):
+ # The matcher might be a Matcher object or a plain value.
+ # If the matcher description equals the matched value, then we omit
+ # the extra matcher text because (1) it'd be redundant, and (2) such
+ # matchers are usually wrappers around an underlying value, e.g.
+ # how contains_exactly uses matcher predicates.
+ if hasattr(value.matcher, "desc") and value.matcher.desc != value.matched_value:
+ match_desc = value.matcher.desc
+ match_info = " (matched: {})".format(
+ format_matched_value(value.matched_value),
+ )
+ verb = "matched"
+ else:
+ match_desc = format_matched_value(value.matched_value)
+ match_info = ""
+ verb = "found"
+
+ return "{match_desc} {verb} at offset {at}{match_info}".format(
+ at = value.found_at,
+ verb = verb,
+ match_desc = match_desc,
+ match_info = match_info,
+ )
+
+ return "expected values all found, but with incorrect order:\n{}".format(
+ enumerate_list_as_lines(matches, format_value = format_value, prefix = " "),
+ )
+
+def format_problem_unexpected_values(unexpected, sort = True):
+ """Create an error message for when there are unexpected values.
+
+ Args:
+ unexpected: ([`list`]) the unexpected values.
+ sort: ([`bool`]) true if the values should be sorted for output.
+
+ Returns:
+ ([`str`]) the problem description string.
+ """
+ unexpected = maybe_sorted(unexpected, sort)
+ return "{count} unexpected:\n{unexpected}".format(
+ count = len(unexpected),
+ unexpected = enumerate_list_as_lines(unexpected, prefix = " "),
+ )
+
+def format_dict_as_lines(mapping, prefix = "", format_value = None, sort = True):
+ """Format a dictionary as lines of key->value for easier reading.
+
+ Args:
+ mapping: [`dict`] to show
+ prefix: ([`str`]) prefix to prepend to every line.
+ format_value: (optional callable) takes a value from the dictionary
+ to show and returns the string that shown be shown. If not
+ specified, one will be automatically determined from the
+ dictionary's values.
+ sort: ([`bool`]) `True` if the output should be sorted by dict key (if
+ the keys are sortable).
+
+ Returns:
+ ([`str`]) the dictionary formatted into lines
+ """
+ lines = []
+ if not mapping:
+ return " <empty dict>"
+ format_value = guess_format_value(mapping.values())
+ keys = maybe_sorted(mapping.keys(), sort)
+
+ max_key_width = max([len(str(key)) for key in keys])
+
+ for key in keys:
+ lines.append("{prefix} {key}{pad}: {value}".format(
+ prefix = prefix,
+ key = key,
+ pad = " " * (max_key_width - len(str(key))),
+ value = format_value(mapping[key]),
+ ))
+ return "\n".join(lines)
diff --git a/lib/private/file_subject.bzl b/lib/private/file_subject.bzl
new file mode 100644
index 0000000..e8fc825
--- /dev/null
+++ b/lib/private/file_subject.bzl
@@ -0,0 +1,104 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# FileSubject"""
+
+load(":str_subject.bzl", "StrSubject")
+
+def _file_subject_new(file, meta):
+ """Creates a FileSubject asserting against the given file.
+
+ Method: FileSubject.new
+
+ Args:
+ file: ([`File`]) the file to assert against.
+ meta: ([`ExpectMeta`])
+
+ Returns:
+ [`FileSubject`] object.
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ equals = lambda *a, **k: _file_subject_equals(self, *a, **k),
+ path = lambda *a, **k: _file_subject_path(self, *a, **k),
+ short_path_equals = lambda *a, **k: _file_subject_short_path_equals(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(file = file, meta = meta, public = public)
+ return public
+
+def _file_subject_equals(self, expected):
+ """Asserts that `expected` references the same file as `self`.
+
+ This uses Bazel's notion of [`File`] equality, which usually includes
+ the configuration, owning action, internal hash, etc of a `File`. The
+ particulars of comparison depend on the actual Java type implementing
+ the `File` object (some ignore owner, for example).
+
+ NOTE: This does not compare file content. Starlark cannot read files.
+
+ NOTE: Same files generated by different owners are likely considered
+ not equal to each other. The alternative for this is to assert the
+ `File.path` paths are equal using [`FileSubject.path()`]
+
+ Method: FileSubject.equals
+ """
+
+ if self.file == expected:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(expected),
+ "actual: {}".format(self.file),
+ )
+
+def _file_subject_path(self):
+ """Returns a `StrSubject` asserting on the files `path` value.
+
+ Method: FileSubject.path
+
+ Returns:
+ [`StrSubject`] object.
+ """
+ return StrSubject.new(
+ self.file.path,
+ meta = self.meta.derive("path()"),
+ )
+
+def _file_subject_short_path_equals(self, path):
+ """Asserts the file's short path is equal to the given path.
+
+ Method: FileSubject.short_path_equals
+
+ Args:
+ self: implicitly added.
+ path: ([`str`]) the value the file's `short_path` must be equal to.
+ """
+ path = self.meta.format_str(path)
+ if path == self.file.short_path:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(path),
+ "actual: {}".format(self.file.short_path),
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+FileSubject = struct(
+ new = _file_subject_new,
+ equals = _file_subject_equals,
+ path = _file_subject_path,
+ short_path_equals = _file_subject_short_path_equals,
+)
diff --git a/lib/private/instrumented_files_info_subject.bzl b/lib/private/instrumented_files_info_subject.bzl
new file mode 100644
index 0000000..4ba0b3b
--- /dev/null
+++ b/lib/private/instrumented_files_info_subject.bzl
@@ -0,0 +1,73 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""# InstrumentedFilesInfoSubject"""
+
+load(":depset_file_subject.bzl", "DepsetFileSubject")
+
+def _instrumented_files_info_subject_new(info, *, meta):
+ """Creates a subject to assert on `InstrumentedFilesInfo` providers.
+
+ Method: InstrumentedFilesInfoSubject.new
+
+ Args:
+ info: ([`InstrumentedFilesInfo`]) provider instance.
+ meta: ([`ExpectMeta`]) the meta data about the call chain.
+
+ Returns:
+ An `InstrumentedFilesInfoSubject` struct.
+ """
+ self = struct(
+ actual = info,
+ meta = meta,
+ )
+ public = struct(
+ actual = info,
+ instrumented_files = lambda *a, **k: _instrumented_files_info_subject_instrumented_files(self, *a, **k),
+ metadata_files = lambda *a, **k: _instrumented_files_info_subject_metadata_files(self, *a, **k),
+ )
+ return public
+
+def _instrumented_files_info_subject_instrumented_files(self):
+ """Returns a `DesetFileSubject` of the instrumented files.
+
+ Method: InstrumentedFilesInfoSubject.instrumented_files
+
+ Args:
+ self: implicitly added
+ """
+ return DepsetFileSubject.new(
+ self.actual.instrumented_files,
+ meta = self.meta.derive("instrumented_files()"),
+ )
+
+def _instrumented_files_info_subject_metadata_files(self):
+ """Returns a `DesetFileSubject` of the metadata files.
+
+ Method: InstrumentedFilesInfoSubject.metadata_files
+
+ Args:
+ self: implicitly added
+ """
+ return DepsetFileSubject.new(
+ self.actual.metadata_files,
+ meta = self.meta.derive("metadata_files()"),
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+InstrumentedFilesInfoSubject = struct(
+ new = _instrumented_files_info_subject_new,
+ instrumented_files = _instrumented_files_info_subject_instrumented_files,
+ metadata_files = _instrumented_files_info_subject_metadata_files,
+)
diff --git a/lib/private/int_subject.bzl b/lib/private/int_subject.bzl
new file mode 100644
index 0000000..cb8dda3
--- /dev/null
+++ b/lib/private/int_subject.bzl
@@ -0,0 +1,102 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# IntSubject"""
+
+load("@bazel_skylib//lib:types.bzl", "types")
+load(":check_util.bzl", "check_not_equals", "common_subject_is_in")
+load(":truth_common.bzl", "repr_with_type")
+
+def _int_subject_new(value, meta):
+ """Create an "IntSubject" struct.
+
+ Method: IntSubject.new
+
+ Args:
+ value: (optional [`int`]) the value to perform asserts against may be None.
+ meta: ([`ExpectMeta`]) the meta data about the call chain.
+
+ Returns:
+ [`IntSubject`].
+ """
+ if not types.is_int(value) and value != None:
+ fail("int required, got: {}".format(repr_with_type(value)))
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ equals = lambda *a, **k: _int_subject_equals(self, *a, **k),
+ is_greater_than = lambda *a, **k: _int_subject_is_greater_than(self, *a, **k),
+ is_in = lambda *a, **k: common_subject_is_in(self, *a, **k),
+ not_equals = lambda *a, **k: _int_subject_not_equals(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(actual = value, meta = meta)
+ return public
+
+def _int_subject_equals(self, other):
+ """Assert that the subject is equal to the given value.
+
+ Method: IntSubject.equals
+
+ Args:
+ self: implicitly added.
+ other: ([`int`]) value the subject must be equal to.
+ """
+ if self.actual == other:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(other),
+ "actual: {}".format(self.actual),
+ )
+
+def _int_subject_is_greater_than(self, other):
+ """Asserts that the subject is greater than the given value.
+
+ Method: IntSubject.is_greater_than
+
+ Args:
+ self: implicitly added.
+ other: ([`int`]) value the subject must be greater than.
+ """
+ if self.actual != None and other != None and self.actual > other:
+ return
+ self.meta.add_failure(
+ "expected to be greater than: {}".format(other),
+ "actual: {}".format(self.actual),
+ )
+
+def _int_subject_not_equals(self, unexpected):
+ """Assert that the int is not equal to `unexpected`.
+
+ Method: IntSubject.not_equals
+
+ Args:
+ self: implicitly added
+ unexpected: ([`int`]) the value actual cannot equal.
+ """
+ return check_not_equals(
+ actual = self.actual,
+ unexpected = unexpected,
+ meta = self.meta,
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+IntSubject = struct(
+ new = _int_subject_new,
+ equals = _int_subject_equals,
+ is_greater_than = _int_subject_is_greater_than,
+ not_equals = _int_subject_not_equals,
+)
diff --git a/lib/private/label_subject.bzl b/lib/private/label_subject.bzl
new file mode 100644
index 0000000..f801ef7
--- /dev/null
+++ b/lib/private/label_subject.bzl
@@ -0,0 +1,83 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# LabelSubject"""
+
+load("@bazel_skylib//lib:types.bzl", "types")
+load(":check_util.bzl", "common_subject_is_in")
+load(":truth_common.bzl", "to_list")
+
+def _label_subject_new(label, meta):
+ """Creates a new `LabelSubject` for asserting `Label` objects.
+
+ Method: LabelSubject.new
+
+ Args:
+ label: ([`Label`]) the label to check against.
+ meta: ([`ExpectMeta`]) the metadata about the call chain.
+
+ Returns:
+ [`LabelSubject`].
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ # keep sorted start
+ equals = lambda *a, **k: _label_subject_equals(self, *a, **k),
+ is_in = lambda *a, **k: _label_subject_is_in(self, *a, **k),
+ # keep sorted end
+ )
+ self = struct(actual = label, meta = meta)
+ return public
+
+def _label_subject_equals(self, other):
+ """Asserts the label is equal to `other`.
+
+ Method: LabelSubject.equals
+
+ Args:
+ self: implicitly added.
+ other: ([`Label`] | [`str`]) the expected value. If a `str` is passed, it
+ will be converted to a `Label` using the `Label` function.
+ """
+ if types.is_string(other):
+ other = Label(other)
+ if self.actual == other:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(other),
+ "actual: {}".format(self.actual),
+ )
+
+def _label_subject_is_in(self, any_of):
+ """Asserts that the label is any of the provided values.
+
+ Args:
+ self: implicitly added.
+ any_of: ([`collection`] of ([`Label`] | [`str`])) If strings are
+ provided, they must be parsable by `Label`.
+ """
+ any_of = [
+ Label(v) if types.is_string(v) else v
+ for v in to_list(any_of)
+ ]
+ common_subject_is_in(self, any_of)
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+LabelSubject = struct(
+ new = _label_subject_new,
+ equals = _label_subject_equals,
+ is_in = _label_subject_is_in,
+)
diff --git a/lib/private/matching.bzl b/lib/private/matching.bzl
new file mode 100644
index 0000000..6093488
--- /dev/null
+++ b/lib/private/matching.bzl
@@ -0,0 +1,200 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Implementation of matchers."""
+
+def _match_custom(desc, func):
+ """Wrap an arbitrary function up as a Matcher.
+
+ Method: Matcher.new
+
+ `Matcher` struct attributes:
+
+ * `desc`: ([`str`]) a human-friendly description
+ * `match`: (callable) accepts 1 positional arg (the value to match) and
+ returns [`bool`] (`True` if it matched, `False` if not).
+
+ Args:
+ desc: ([`str`]) a human-friendly string describing what is matched.
+ func: (callable) accepts 1 positional arg (the value to match) and
+ returns [`bool`] (`True` if it matched, `False` if not).
+
+ Returns:
+ [`Matcher`] (see above).
+ """
+ return struct(desc = desc, match = func)
+
+def _match_equals_wrapper(value):
+ """Match that a value equals `value`, but use `value` as the `desc`.
+
+ This is a helper so that simple equality comparisons can re-use predicate
+ based APIs.
+
+ Args:
+ value: object, the value that must be equal to.
+
+ Returns:
+ [`Matcher`] (see `_match_custom()`), whose description is `value`.
+ """
+ return _match_custom(value, lambda other: other == value)
+
+def _match_file_basename_contains(substr):
+ """Match that a a `File.basename` string contains a substring.
+
+ Args:
+ substr: ([`str`]) the substring to match.
+
+ Returns:
+ [`Matcher`] (see `_match_custom()`).
+ """
+ return struct(
+ desc = "<basename contains '{}'>".format(substr),
+ match = lambda f: substr in f.basename,
+ )
+
+def _match_file_path_matches(pattern):
+ """Match that a `File.path` string matches a glob-style pattern.
+
+ Args:
+ pattern: ([`str`]) the pattern to match. "*" can be used to denote
+ "match anything".
+
+ Returns:
+ [`Matcher`] (see `_match_custom`).
+ """
+ parts = pattern.split("*")
+ return struct(
+ desc = "<path matches '{}'>".format(pattern),
+ match = lambda f: _match_parts_in_order(f.path, parts),
+ )
+
+def _match_is_in(values):
+ """Match that the to-be-matched value is in a collection of other values.
+
+ This is equivalent to: `to_be_matched in values`. See `_match_contains`
+ for the reversed operation.
+
+ Args:
+ values: The collection that the value must be within.
+
+ Returns:
+ [`Matcher`] (see `_match_custom()`).
+ """
+ return struct(
+ desc = "<is any of {}>".format(repr(values)),
+ match = lambda v: v in values,
+ )
+
+def _match_never(desc):
+ """A matcher that never matches.
+
+ This is mostly useful for testing, as it allows preventing any match
+ while providing a custom description.
+
+ Args:
+ desc: ([`str`]) human-friendly string.
+
+ Returns:
+ [`Matcher`] (see `_match_custom`).
+ """
+ return struct(
+ desc = desc,
+ match = lambda value: False,
+ )
+
+def _match_contains(contained):
+ """Match that `contained` is within the to-be-matched value.
+
+ This is equivalent to: `contained in to_be_matched`. See `_match_is_in`
+ for the reversed operation.
+
+ Args:
+ contained: the value that to-be-matched value must contain.
+
+ Returns:
+ [`Matcher`] (see `_match_custom`).
+ """
+ return struct(
+ desc = "<contains {}>".format(contained),
+ match = lambda value: contained in value,
+ )
+
+def _match_str_endswith(suffix):
+ """Match that a string contains another string.
+
+ Args:
+ suffix: ([`str`]) the suffix that must be present
+
+ Returns:
+ [`Matcher`] (see `_match_custom`).
+ """
+ return struct(
+ desc = "<endswith '{}'>".format(suffix),
+ match = lambda value: value.endswith(suffix),
+ )
+
+def _match_str_matches(pattern):
+ """Match that a string matches a glob-style pattern.
+
+ Args:
+ pattern: ([`str`]) the pattern to match. `*` can be used to denote
+ "match anything". There is an implicit `*` at the start and
+ end of the pattern.
+
+ Returns:
+ [`Matcher`] object.
+ """
+ parts = pattern.split("*")
+ return struct(
+ desc = "<matches '{}'>".format(pattern),
+ match = lambda value: _match_parts_in_order(value, parts),
+ )
+
+def _match_str_startswith(prefix):
+ """Match that a string contains another string.
+
+ Args:
+ prefix: ([`str`]) the prefix that must be present
+
+ Returns:
+ [`Matcher`] (see `_match_custom`).
+ """
+ return struct(
+ desc = "<startswith '{}'>".format(prefix),
+ match = lambda value: value.startswith(prefix),
+ )
+
+def _match_parts_in_order(string, parts):
+ start = 0
+ for part in parts:
+ start = string.find(part, start)
+ if start == -1:
+ return False
+ return True
+
+# For the definition of a `Matcher` object, see `_match_custom`.
+matching = struct(
+ # keep sorted start
+ contains = _match_contains,
+ custom = _match_custom,
+ equals_wrapper = _match_equals_wrapper,
+ file_basename_contains = _match_file_basename_contains,
+ file_path_matches = _match_file_path_matches,
+ is_in = _match_is_in,
+ never = _match_never,
+ str_endswith = _match_str_endswith,
+ str_matches = _match_str_matches,
+ str_startswith = _match_str_startswith,
+ # keep sorted end
+)
diff --git a/lib/private/ordered.bzl b/lib/private/ordered.bzl
new file mode 100644
index 0000000..c9a0ed9
--- /dev/null
+++ b/lib/private/ordered.bzl
@@ -0,0 +1,64 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# Ordered"""
+
+# This is just a stub so doc generation is nicer.
+def _ordered_in_order():
+ """Checks that the values were in order."""
+
+IN_ORDER = struct(
+ in_order = _ordered_in_order,
+)
+
+def _ordered_incorrectly_new(format_problem, format_actual, meta):
+ """Creates a new `Ordered` object that fails due to incorrectly ordered values.
+
+ This creates an [`Ordered`] object that always fails. If order is correct,
+ use the `_IN_ORDER` constant.
+
+ Args:
+ format_problem: (callable) accepts no args and returns string (the
+ reported problem description).
+ format_actual: (callable) accepts not args and returns tring (the
+ reported actual description).
+ meta: ([`ExpectMeta`]) used to report the failure.
+
+ Returns:
+ [`Ordered`] object.
+ """
+ self = struct(
+ meta = meta,
+ format_problem = format_problem,
+ format_actual = format_actual,
+ )
+ public = struct(
+ in_order = lambda *a, **k: _ordered_incorrectly_in_order(self, *a, **k),
+ )
+ return public
+
+def _ordered_incorrectly_in_order(self):
+ """Checks that the values were in order.
+
+ Args:
+ self: implicitly added.
+ """
+ self.meta.add_failure(self.format_problem(), self.format_actual())
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+OrderedIncorrectly = struct(
+ new = _ordered_incorrectly_new,
+ in_order = _ordered_incorrectly_in_order,
+)
diff --git a/lib/private/run_environment_info_subject.bzl b/lib/private/run_environment_info_subject.bzl
new file mode 100644
index 0000000..43d9e67
--- /dev/null
+++ b/lib/private/run_environment_info_subject.bzl
@@ -0,0 +1,80 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# RunEnvironmentInfoSubject"""
+
+load(":collection_subject.bzl", "CollectionSubject")
+load(":dict_subject.bzl", "DictSubject")
+
+def _run_environment_info_subject_new(info, *, meta):
+ """Creates a new `RunEnvironmentInfoSubject`
+
+ Method: RunEnvironmentInfoSubject.new
+
+ Args:
+ info: ([`RunEnvironmentInfo`]) provider instance.
+ meta: ([`ExpectMeta`]) of call chain information.
+ """
+
+ # buildifier: disable=uninitialized
+ public = struct(
+ environment = lambda *a, **k: _run_environment_info_subject_environment(self, *a, **k),
+ inherited_environment = lambda *a, **k: _run_environment_info_subject_inherited_environment(self, *a, **k),
+ )
+ self = struct(
+ actual = info,
+ meta = meta,
+ )
+ return public
+
+def _run_environment_info_subject_environment(self):
+ """Creates a `DictSubject` to assert on the environment dict.
+
+ Method: RunEnvironmentInfoSubject.environment
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ [`DictSubject`] of the str->str environment map.
+ """
+ return DictSubject.new(
+ self.actual.environment,
+ meta = self.meta.derive("environment()"),
+ )
+
+def _run_environment_info_subject_inherited_environment(self):
+ """Creates a `CollectionSubject` to assert on the inherited_environment list.
+
+ Method: RunEnvironmentInfoSubject.inherited_environment
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ [`CollectionSubject`] of [`str`]; from the
+ [`RunEnvironmentInfo.inherited_environment`] list.
+ """
+ return CollectionSubject.new(
+ self.actual.inherited_environment,
+ meta = self.meta.derive("inherited_environment()"),
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+RunEnvironmentInfoSubject = struct(
+ new = _run_environment_info_subject_new,
+ environment = _run_environment_info_subject_environment,
+ inherited_environment = _run_environment_info_subject_inherited_environment,
+)
diff --git a/lib/private/runfiles_subject.bzl b/lib/private/runfiles_subject.bzl
new file mode 100644
index 0000000..f517795
--- /dev/null
+++ b/lib/private/runfiles_subject.bzl
@@ -0,0 +1,266 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# RunfilesSubject"""
+
+load(
+ "//lib:util.bzl",
+ "is_runfiles",
+ "runfiles_paths",
+)
+load(
+ ":check_util.bzl",
+ "check_contains_exactly",
+ "check_contains_predicate",
+ "check_not_contains_predicate",
+)
+load(":collection_subject.bzl", "CollectionSubject")
+load(
+ ":failure_messages.bzl",
+ "format_actual_collection",
+ "format_failure_unexpected_value",
+ "format_problem_expected_exactly",
+ "format_problem_missing_required_values",
+ "format_problem_unexpected_values",
+)
+load(":matching.bzl", "matching")
+load(":truth_common.bzl", "to_list")
+
+def _runfiles_subject_new(runfiles, meta, kind = None):
+ """Creates a "RunfilesSubject" struct.
+
+ Method: RunfilesSubject.new
+
+ Args:
+ runfiles: ([`runfiles`]) the runfiles to check against.
+ meta: ([`ExpectMeta`]) the metadata about the call chain.
+ kind: (optional [`str`]) what type of runfiles they are, usually "data"
+ or "default". If not known or not applicable, use None.
+
+ Returns:
+ [`RunfilesSubject`] object.
+ """
+ self = struct(
+ runfiles = runfiles,
+ meta = meta,
+ kind = kind,
+ actual_paths = sorted(runfiles_paths(meta.ctx.workspace_name, runfiles)),
+ )
+ public = struct(
+ # keep sorted start
+ actual = runfiles,
+ contains = lambda *a, **k: _runfiles_subject_contains(self, *a, **k),
+ contains_at_least = lambda *a, **k: _runfiles_subject_contains_at_least(self, *a, **k),
+ contains_exactly = lambda *a, **k: _runfiles_subject_contains_exactly(self, *a, **k),
+ contains_none_of = lambda *a, **k: _runfiles_subject_contains_none_of(self, *a, **k),
+ contains_predicate = lambda *a, **k: _runfiles_subject_contains_predicate(self, *a, **k),
+ not_contains = lambda *a, **k: _runfiles_subject_not_contains(self, *a, **k),
+ not_contains_predicate = lambda *a, **k: _runfiles_subject_not_contains_predicate(self, *a, **k),
+ # keep sorted end
+ )
+ return public
+
+def _runfiles_subject_contains(self, expected):
+ """Assert that the runfiles contains the provided path.
+
+ Method: RunfilesSubject.contains
+
+ Args:
+ self: implicitly added.
+ expected: ([`str`]) the path to check is present. This will be formatted
+ using `ExpectMeta.format_str` and its current contextual
+ keywords. Note that paths are runfiles-root relative (i.e.
+ you likely need to include the workspace name.)
+ """
+ expected = self.meta.format_str(expected)
+ matcher = matching.equals_wrapper(expected)
+ return _runfiles_subject_contains_predicate(self, matcher)
+
+def _runfiles_subject_contains_at_least(self, paths):
+ """Assert that the runfiles contains at least all of the provided paths.
+
+ Method: RunfilesSubject.contains_at_least
+
+ All the paths must exist, but extra paths are allowed. Order is not checked.
+ Multiplicity is respected.
+
+ Args:
+ self: implicitly added.
+ paths: ((collection of [`str`]) | [`runfiles`]) the paths that must
+ exist. If a collection of strings is provided, they will be
+ formatted using [`ExpectMeta.format_str`], so its template keywords
+ can be directly passed. If a `runfiles` object is passed, it is
+ converted to a set of path strings.
+ """
+ if is_runfiles(paths):
+ paths = runfiles_paths(self.meta.ctx.workspace_name, paths)
+
+ paths = [self.meta.format_str(p) for p in to_list(paths)]
+
+ # NOTE: We don't return Ordered because there isn't a well-defined order
+ # between the different sub-objects within the runfiles.
+ CollectionSubject.new(
+ self.actual_paths,
+ meta = self.meta,
+ element_plural_name = "paths",
+ container_name = "{}runfiles".format(self.kind + " " if self.kind else ""),
+ ).contains_at_least(paths)
+
+def _runfiles_subject_contains_predicate(self, matcher):
+ """Asserts that `matcher` matches at least one value.
+
+ Method: RunfilesSubject.contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: callable that takes 1 positional arg ([`str`] path) and returns
+ boolean.
+ """
+ check_contains_predicate(
+ self.actual_paths,
+ matcher = matcher,
+ format_problem = "expected to contain: {}".format(matcher.desc),
+ format_actual = lambda: format_actual_collection(
+ self.actual_paths,
+ name = "{}runfiles".format(self.kind + " " if self.kind else ""),
+ ),
+ meta = self.meta,
+ )
+
+def _runfiles_subject_contains_exactly(self, paths):
+ """Asserts that the runfiles contains_exactly the set of paths
+
+ Method: RunfilesSubject.contains_exactly
+
+ Args:
+ self: implicitly added.
+ paths: ([`collection`] of [`str`]) the paths to check. These will be
+ formatted using `meta.format_str`, so its template keywords can
+ be directly passed. All the paths must exist in the runfiles exactly
+ as provided, and no extra paths may exist.
+ """
+ paths = [self.meta.format_str(p) for p in to_list(paths)]
+ runfiles_name = "{}runfiles".format(self.kind + " " if self.kind else "")
+
+ check_contains_exactly(
+ expect_contains = paths,
+ actual_container = self.actual_paths,
+ format_actual = lambda: format_actual_collection(
+ self.actual_paths,
+ name = runfiles_name,
+ ),
+ format_expected = lambda: format_problem_expected_exactly(paths, sort = True),
+ format_missing = lambda missing: format_problem_missing_required_values(
+ missing,
+ sort = True,
+ ),
+ format_unexpected = lambda unexpected: format_problem_unexpected_values(
+ unexpected,
+ sort = True,
+ ),
+ format_out_of_order = lambda matches: fail("Should not be called"),
+ meta = self.meta,
+ )
+
+def _runfiles_subject_contains_none_of(self, paths, require_workspace_prefix = True):
+ """Asserts the runfiles contain none of `paths`.
+
+ Method: RunfilesSubject.contains_none_of
+
+ Args:
+ self: implicitly added.
+ paths: ([`collection`] of [`str`]) the paths that should not exist. They should
+ be runfiles root-relative paths (not workspace relative). The value
+ is formatted using `ExpectMeta.format_str` and the current
+ contextual keywords.
+ require_workspace_prefix: ([`bool`]) True to check that the path includes the
+ workspace prefix. This is to guard against accidentallly passing a
+ workspace relative path, which will (almost) never exist, and cause
+ the test to always pass. Specify False if the file being checked for
+ is _actually_ a runfiles-root relative path that isn't under the
+ workspace itself.
+ """
+ formatted_paths = []
+ for path in paths:
+ path = self.meta.format_str(path)
+ formatted_paths.append(path)
+ if require_workspace_prefix:
+ _runfiles_subject_check_workspace_prefix(self, path)
+
+ CollectionSubject.new(
+ self.actual_paths,
+ meta = self.meta,
+ ).contains_none_of(formatted_paths)
+
+def _runfiles_subject_not_contains(self, path, require_workspace_prefix = True):
+ """Assert that the runfiles does not contain the given path.
+
+ Method: RunfilesSubject.not_contains
+
+ Args:
+ self: implicitly added.
+ path: ([`str`]) the path that should not exist. It should be a runfiles
+ root-relative path (not workspace relative). The value is formatted
+ using `format_str`, so its template keywords can be directly
+ passed.
+ require_workspace_prefix: ([`bool`]) True to check that the path includes the
+ workspace prefix. This is to guard against accidentallly passing a
+ workspace relative path, which will (almost) never exist, and cause
+ the test to always pass. Specify False if the file being checked for
+ is _actually_ a runfiles-root relative path that isn't under the
+ workspace itself.
+ """
+ path = self.meta.format_str(path)
+ if require_workspace_prefix:
+ _runfiles_subject_check_workspace_prefix(self, path)
+
+ if path in self.actual_paths:
+ problem, actual = format_failure_unexpected_value(
+ container_name = "{}runfiles".format(self.kind + " " if self.kind else ""),
+ unexpected = path,
+ actual = self.actual_paths,
+ )
+ self.meta.add_failure(problem, actual)
+
+def _runfiles_subject_not_contains_predicate(self, matcher):
+ """Asserts that none of the runfiles match `matcher`.
+
+ Method: RunfilesSubject.not_contains_predicate
+
+ Args:
+ self: implicitly added.
+ matcher: [`Matcher`] that accepts a string (runfiles root-relative path).
+ """
+ check_not_contains_predicate(self.actual_paths, matcher, meta = self.meta)
+
+def _runfiles_subject_check_workspace_prefix(self, path):
+ if not path.startswith(self.meta.ctx.workspace_name + "/"):
+ fail("Rejecting path lacking workspace prefix: this often indicates " +
+ "a bug. Include the workspace name as part of the path, or pass " +
+ "require_workspace_prefix=False if the path is truly " +
+ "runfiles-root relative, not workspace relative.\npath=" + path)
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+RunfilesSubject = struct(
+ new = _runfiles_subject_new,
+ contains = _runfiles_subject_contains,
+ contains_at_least = _runfiles_subject_contains_at_least,
+ contains_predicate = _runfiles_subject_contains_predicate,
+ contains_exactly = _runfiles_subject_contains_exactly,
+ contains_none_of = _runfiles_subject_contains_none_of,
+ not_contains = _runfiles_subject_not_contains,
+ not_contains_predicate = _runfiles_subject_not_contains_predicate,
+ check_workspace_prefix = _runfiles_subject_check_workspace_prefix,
+)
diff --git a/lib/private/str_subject.bzl b/lib/private/str_subject.bzl
new file mode 100644
index 0000000..c4655b1
--- /dev/null
+++ b/lib/private/str_subject.bzl
@@ -0,0 +1,116 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# StrSubject"""
+
+load(
+ ":check_util.bzl",
+ "check_not_equals",
+ "common_subject_is_in",
+)
+load(":collection_subject.bzl", "CollectionSubject")
+
+def _str_subject_new(actual, meta):
+ """Creates a subject for asserting strings.
+
+ Method: StrSubject.new
+
+ Args:
+ actual: ([`str`]) the string to check against.
+ meta: ([`ExpectMeta`]) of call chain information.
+
+ Returns:
+ [`StrSubject`] object.
+ """
+ self = struct(actual = actual, meta = meta)
+ public = struct(
+ # keep sorted start
+ contains = lambda *a, **k: _str_subject_contains(self, *a, **k),
+ equals = lambda *a, **k: _str_subject_equals(self, *a, **k),
+ is_in = lambda *a, **k: common_subject_is_in(self, *a, **k),
+ not_equals = lambda *a, **k: _str_subject_not_equals(self, *a, **k),
+ split = lambda *a, **k: _str_subject_split(self, *a, **k),
+ # keep sorted end
+ )
+ return public
+
+def _str_subject_contains(self, substr):
+ """Assert that the subject contains the substring `substr`.
+
+ Method: StrSubject.contains
+
+ Args:
+ self: implicitly added.
+ substr: ([`str`]) the substring to check for.
+ """
+ if substr in self.actual:
+ return
+ self.meta.add_failure(
+ "expected to contain: {}".format(substr),
+ "actual: {}".format(self.actual),
+ )
+
+def _str_subject_equals(self, other):
+ """Assert that the subject string equals the other string.
+
+ Method: StrSubject.equals
+
+ Args:
+ self: implicitly added.
+ other: ([`str`]) the expected value it should equal.
+ """
+ if self.actual == other:
+ return
+ self.meta.add_failure(
+ "expected: {}".format(other),
+ "actual: {}".format(self.actual),
+ )
+
+def _str_subject_not_equals(self, unexpected):
+ """Assert that the string is not equal to `unexpected`.
+
+ Method: BoolSubject.not_equals
+
+ Args:
+ self: implicitly added.
+ unexpected: ([`str`]) the value actual cannot equal.
+ """
+ return check_not_equals(
+ actual = self.actual,
+ unexpected = unexpected,
+ meta = self.meta,
+ )
+
+def _str_subject_split(self, sep):
+ """Return a `CollectionSubject` for the actual string split by `sep`.
+
+ Method: StrSubject.split
+ """
+ return CollectionSubject.new(
+ self.actual.split(sep),
+ meta = self.meta.derive("split({})".format(repr(sep))),
+ container_name = "split string",
+ sortable = False,
+ element_plural_name = "parts",
+ )
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+StrSubject = struct(
+ new = _str_subject_new,
+ contains = _str_subject_contains,
+ equals = _str_subject_equals,
+ not_equals = _str_subject_not_equals,
+ split = _str_subject_split,
+)
diff --git a/lib/private/target_subject.bzl b/lib/private/target_subject.bzl
new file mode 100644
index 0000000..47d8b94
--- /dev/null
+++ b/lib/private/target_subject.bzl
@@ -0,0 +1,419 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# TargetSubject
+
+`TargetSubject` wraps a [`Target`] object and provides method for asserting
+its state.
+"""
+
+load(
+ "//lib:util.bzl",
+ "TestingAspectInfo",
+)
+load(":action_subject.bzl", "ActionSubject")
+load(":bool_subject.bzl", "BoolSubject")
+load(":collection_subject.bzl", "CollectionSubject")
+load(":depset_file_subject.bzl", "DepsetFileSubject")
+load(":execution_info_subject.bzl", "ExecutionInfoSubject")
+load(":file_subject.bzl", "FileSubject")
+load(":instrumented_files_info_subject.bzl", "InstrumentedFilesInfoSubject")
+load(":label_subject.bzl", "LabelSubject")
+load(":run_environment_info_subject.bzl", "RunEnvironmentInfoSubject")
+load(":runfiles_subject.bzl", "RunfilesSubject")
+load(":truth_common.bzl", "enumerate_list_as_lines")
+
+def _target_subject_new(target, meta):
+ """Creates a subject for asserting Targets.
+
+ Method: TargetSubject.new
+
+ **Public attributes**:
+ * `actual`: The wrapped [`Target`] object.
+
+ Args:
+ target: ([`Target`]) the target to check against.
+ meta: ([`ExpectMeta`]) metadata about the call chain.
+
+ Returns:
+ [`TargetSubject`] object
+ """
+ self = struct(target = target, meta = meta)
+ public = struct(
+ # keep sorted start
+ action_generating = lambda *a, **k: _target_subject_action_generating(self, *a, **k),
+ action_named = lambda *a, **k: _target_subject_action_named(self, *a, **k),
+ actual = target,
+ attr = lambda *a, **k: _target_subject_attr(self, *a, **k),
+ data_runfiles = lambda *a, **k: _target_subject_data_runfiles(self, *a, **k),
+ default_outputs = lambda *a, **k: _target_subject_default_outputs(self, *a, **k),
+ executable = lambda *a, **k: _target_subject_executable(self, *a, **k),
+ failures = lambda *a, **k: _target_subject_failures(self, *a, **k),
+ has_provider = lambda *a, **k: _target_subject_has_provider(self, *a, **k),
+ label = lambda *a, **k: _target_subject_label(self, *a, **k),
+ meta = meta,
+ output_group = lambda *a, **k: _target_subject_output_group(self, *a, **k),
+ provider = lambda *a, **k: _target_subject_provider(self, *a, **k),
+ runfiles = lambda *a, **k: _target_subject_runfiles(self, *a, **k),
+ tags = lambda *a, **k: _target_subject_tags(self, *a, **k),
+ # keep sorted end
+ )
+ return public
+
+def _target_subject_runfiles(self):
+ """Creates a subject asserting on the target's default runfiles.
+
+ Method: TargetSubject.runfiles
+
+ Args:
+ self: implicitly added.
+
+ Returns:
+ [`RunfilesSubject`] object.
+ """
+ meta = self.meta.derive("runfiles()")
+ return RunfilesSubject.new(self.target[DefaultInfo].default_runfiles, meta, "default")
+
+def _target_subject_tags(self):
+ """Gets the target's tags as a `CollectionSubject`
+
+ Method: TargetSubject.tags
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ [`CollectionSubject`] asserting the target's tags.
+ """
+ return CollectionSubject.new(
+ _target_subject_get_attr(self, "tags"),
+ self.meta.derive("tags()"),
+ )
+
+def _target_subject_get_attr(self, name):
+ if TestingAspectInfo not in self.target:
+ fail("TestingAspectInfo provider missing: if this is a second order or higher " +
+ "dependency, the recursing testing aspect must be enabled.")
+
+ attrs = self.target[TestingAspectInfo].attrs
+ if not hasattr(attrs, name):
+ fail("Attr '{}' not present for target {}".format(name, self.target.label))
+ else:
+ return getattr(attrs, name)
+
+def _target_subject_data_runfiles(self):
+ """Creates a subject asserting on the target's data runfiles.
+
+ Method: TargetSubject.data_runfiles
+
+ Args:
+ self: implicitly added.
+
+ Returns:
+ [`RunfilesSubject`] object
+ """
+ meta = self.meta.derive("data_runfiles()")
+ return RunfilesSubject.new(self.target[DefaultInfo].data_runfiles, meta, "data")
+
+def _target_subject_default_outputs(self):
+ """Creates a subject asserting on the target's default outputs.
+
+ Method: TargetSubject.default_outputs
+
+ Args:
+ self: implicitly added.
+
+ Returns:
+ [`DepsetFileSubject`] object.
+ """
+ meta = self.meta.derive("default_outputs()")
+ return DepsetFileSubject.new(self.target[DefaultInfo].files, meta)
+
+def _target_subject_executable(self):
+ """Creates a subject asesrting on the target's executable File.
+
+ Method: TargetSubject.executable
+
+ Args:
+ self: implicitly added.
+
+ Returns:
+ [`FileSubject`] object.
+ """
+ meta = self.meta.derive("executable()")
+ return FileSubject.new(self.target[DefaultInfo].files_to_run.executable, meta)
+
+def _target_subject_failures(self):
+ """Creates a subject asserting on the target's failure message strings.
+
+ Method: TargetSubject.failures
+
+ Args:
+ self: implicitly added
+
+ Returns:
+ [`CollectionSubject`] of [`str`].
+ """
+ meta = self.meta.derive("failures()")
+ if AnalysisFailureInfo in self.target:
+ failure_messages = sorted([
+ f.message
+ for f in self.target[AnalysisFailureInfo].causes.to_list()
+ ])
+ else:
+ failure_messages = []
+ return CollectionSubject.new(failure_messages, meta, container_name = "failure messages")
+
+def _target_subject_has_provider(self, provider):
+ """Asserts that the target as provider `provider`.
+
+ Method: TargetSubject.has_provider
+
+ Args:
+ self: implicitly added.
+ provider: The provider object to check for.
+ """
+ if self.meta.has_provider(self.target, provider):
+ return
+ self.meta.add_failure(
+ "expected to have provider: {}".format(_provider_name(provider)),
+ "but provider was not found",
+ )
+
+def _target_subject_label(self):
+ """Returns a `LabelSubject` for the target's label value.
+
+ Method: TargetSubject.label
+ """
+ return LabelSubject.new(
+ label = self.target.label,
+ meta = self.meta.derive(expr = "label()"),
+ )
+
+def _target_subject_output_group(self, name):
+ """Returns a DepsetFileSubject of the files in the named output group.
+
+ Method: TargetSubject.output_group
+
+ Args:
+ self: implicitly added.
+ name: ([`str`]) an output group name. If it isn't present, an error is raised.
+
+ Returns:
+ DepsetFileSubject of the named output group.
+ """
+ info = self.target[OutputGroupInfo]
+ if not hasattr(info, name):
+ fail("OutputGroupInfo.{} not present for target {}".format(name, self.target.label))
+ return DepsetFileSubject.new(
+ getattr(info, name),
+ meta = self.meta.derive("output_group({})".format(name)),
+ )
+
+def _target_subject_provider(self, provider_key, factory = None):
+ """Returns a subject for a provider in the target.
+
+ Method: TargetSubject.provider
+
+ Args:
+ self: implicitly added.
+ provider_key: The provider key to create a subject for
+ factory: optional callable. The factory function to use to create
+ the subject for the found provider. Required if the provider key is
+ not an inherently supported provider. It must have the following
+ signature: `def factory(value, /, *, meta)`.
+
+ Returns:
+ A subject wrapper of the provider value.
+ """
+ if not factory:
+ for key, value in _PROVIDER_SUBJECT_FACTORIES:
+ if key == provider_key:
+ factory = value
+ break
+
+ if not factory:
+ fail("Unsupported provider: {}".format(provider_key))
+ info = self.target[provider_key]
+
+ return factory(
+ info,
+ meta = self.meta.derive("provider({})".format(provider_key)),
+ )
+
+def _target_subject_action_generating(self, short_path):
+ """Get the single action generating the given path.
+
+ Method: TargetSubject.action_generating
+
+ NOTE: in order to use this method, the target must have the `TestingAspectInfo`
+ provider (added by the `testing_aspect` aspect.)
+
+ Args:
+ self: implicitly added.
+ short_path: ([`str`]) the output's short_path to match. The value is
+ formatted using [`format_str`], so its template keywords can be
+ directly passed.
+
+ Returns:
+ [`ActionSubject`] for the matching action. If no action is found, or
+ more than one action matches, then an error is raised.
+ """
+
+ if not self.meta.has_provider(self.target, TestingAspectInfo):
+ fail("TestingAspectInfo provider missing: if this is a second order or higher " +
+ "dependency, the recursing testing aspect must be enabled.")
+
+ short_path = self.meta.format_str(short_path)
+ actions = []
+ for action in self.meta.get_provider(self.target, TestingAspectInfo).actions:
+ for output in action.outputs.to_list():
+ if output.short_path == short_path:
+ actions.append(action)
+ break
+ if not actions:
+ fail("No action generating '{}'".format(short_path))
+ elif len(actions) > 1:
+ fail("Expected 1 action to generate '{output}', found {count}: {actions}".format(
+ output = short_path,
+ count = len(actions),
+ actions = "\n".join([str(a) for a in actions]),
+ ))
+ action = actions[0]
+ meta = self.meta.derive(
+ expr = "action_generating({})".format(short_path),
+ details = ["action: [{}] {}".format(action.mnemonic, action)],
+ )
+ return ActionSubject.new(action, meta)
+
+def _target_subject_action_named(self, mnemonic):
+ """Get the single action with the matching mnemonic.
+
+ Method: TargetSubject.action_named
+
+ NOTE: in order to use this method, the target must have the [`TestingAspectInfo`]
+ provider (added by the [`testing_aspect`] aspect.)
+
+ Args:
+ self: implicitly added.
+ mnemonic: ([`str`]) the mnemonic to match
+
+ Returns:
+ [`ActionSubject`]. If no action matches, or more than one action matches, an error
+ is raised.
+ """
+ if TestingAspectInfo not in self.target:
+ fail("TestingAspectInfo provider missing: if this is a second order or higher " +
+ "dependency, the recursing testing aspect must be enabled.")
+ actions = [a for a in self.target[TestingAspectInfo].actions if a.mnemonic == mnemonic]
+ if not actions:
+ fail(
+ "No action named '{name}' for target {target}.\nFound: {found}".format(
+ name = mnemonic,
+ target = self.target.label,
+ found = enumerate_list_as_lines([
+ a.mnemonic
+ for a in self.target[TestingAspectInfo].actions
+ ]),
+ ),
+ )
+ elif len(actions) > 1:
+ fail("Expected 1 action to match '{name}', found {count}: {actions}".format(
+ name = mnemonic,
+ count = len(actions),
+ actions = "\n".join([str(a) for a in actions]),
+ ))
+ action = actions[0]
+ meta = self.meta.derive(
+ expr = "action_named({})".format(mnemonic),
+ details = ["action: [{}] {}".format(action.mnemonic, action)],
+ )
+ return ActionSubject.new(action, meta)
+
+# NOTE: This map should only have attributes that are common to all target
+# types, otherwise we can't rely on an attribute having a specific type.
+_ATTR_NAME_TO_SUBJECT_FACTORY = {
+ "testonly": BoolSubject.new,
+}
+
+def _target_subject_attr(self, name, *, factory = None):
+ """Gets a subject-wrapped value for the named attribute.
+
+ Method: TargetSubject.attr
+
+ NOTE: in order to use this method, the target must have the `TestingAspectInfo`
+ provider (added by the `testing_aspect` aspect.)
+
+ Args:
+ self: implicitly added
+ name: ([`str`]) the attribute to get. If it's an unsupported attribute, and
+ no explicit factory was provided, an error will be raised.
+ factory: (callable) function to create the returned subject based on
+ the attribute value. If specified, it takes precedence over the
+ attributes that are inherently understood. It must have the
+ following signature: `def factory(value, *, meta)`, where `value` is
+ the value of the attribute, and `meta` is the call chain metadata.
+
+ Returns:
+ A Subject-like object for the given attribute. The particular subject
+ type returned depends on attribute and `factory` arg. If it isn't know
+ what type of subject to use for the attribute, an error is raised.
+ """
+ if TestingAspectInfo not in self.target:
+ fail("TestingAspectInfo provider missing: if this is a second order or higher " +
+ "dependency, the recursing testing aspect must be enabled.")
+
+ attr_value = getattr(self.target[TestingAspectInfo].attrs, name)
+ if not factory:
+ if name not in _ATTR_NAME_TO_SUBJECT_FACTORY:
+ fail("Unsupported attr: {}".format(name))
+ factory = _ATTR_NAME_TO_SUBJECT_FACTORY[name]
+
+ return factory(
+ attr_value,
+ meta = self.meta.derive("attr({})".format(name)),
+ )
+
+# Providers aren't hashable, so we have to use a list of (key, value)
+_PROVIDER_SUBJECT_FACTORIES = [
+ (InstrumentedFilesInfo, InstrumentedFilesInfoSubject.new),
+ (RunEnvironmentInfo, RunEnvironmentInfoSubject.new),
+ (testing.ExecutionInfo, ExecutionInfoSubject.new),
+]
+
+def _provider_name(provider):
+ # This relies on implementation details of how Starlark represents
+ # providers, and isn't entirely accurate, but works well enough
+ # for error messages.
+ return str(provider).split("<function ")[1].split(">")[0]
+
+# We use this name so it shows up nice in docs.
+# buildifier: disable=name-conventions
+TargetSubject = struct(
+ new = _target_subject_new,
+ runfiles = _target_subject_runfiles,
+ tags = _target_subject_tags,
+ get_attr = _target_subject_get_attr,
+ data_runfiles = _target_subject_data_runfiles,
+ default_outputs = _target_subject_default_outputs,
+ executable = _target_subject_executable,
+ failures = _target_subject_failures,
+ has_provider = _target_subject_has_provider,
+ label = _target_subject_label,
+ output_group = _target_subject_output_group,
+ provider = _target_subject_provider,
+ action_generating = _target_subject_action_generating,
+ action_named = _target_subject_action_named,
+ attr = _target_subject_attr,
+)
diff --git a/lib/private/truth_common.bzl b/lib/private/truth_common.bzl
new file mode 100644
index 0000000..c7e6b60
--- /dev/null
+++ b/lib/private/truth_common.bzl
@@ -0,0 +1,129 @@
+"""Common code used by truth."""
+
+load("@bazel_skylib//lib:types.bzl", "types")
+
+def mkmethod(self, method):
+ """Bind a struct as the first arg to a function.
+
+ This is loosely equivalent to creating a bound method of a class.
+ """
+ return lambda *args, **kwargs: method(self, *args, **kwargs)
+
+def repr_with_type(value):
+ return "<{} {}>".format(type(value), repr(value))
+
+def _informative_str(value):
+ value_str = str(value)
+ if not value_str:
+ return "<empty string ∅>"
+ elif value_str != value_str.strip():
+ return '"{}" <sans quotes; note whitespace within>'.format(value_str)
+ else:
+ return value_str
+
+def enumerate_list_as_lines(values, prefix = "", format_value = None):
+ """Format a list of values in a human-friendly list.
+
+ Args:
+ values: ([`list`]) the values to display, one per line.
+ prefix: ([`str`]) prefix to add before each line item.
+ format_value: optional callable to convert each value to a string.
+ If not specified, then an appropriate converter will be inferred
+ based on the values. If specified, then the callable must accept
+ 1 positional arg and return a string.
+
+ Returns:
+ [`str`]; the values formatted as a human-friendly list.
+ """
+ if not values:
+ return "{}<empty>".format(prefix)
+
+ if format_value == None:
+ format_value = guess_format_value(values)
+
+ # Subtract 1 because we start at 0; i.e. length 10 prints 0 to 9
+ max_i_width = len(str(len(values) - 1))
+
+ return "\n".join([
+ "{prefix}{ipad}{i}: {value}".format(
+ prefix = prefix,
+ ipad = " " * (max_i_width - len(str(i))),
+ i = i,
+ value = format_value(v),
+ )
+ for i, v in enumerate(values)
+ ])
+
+def guess_format_value(values):
+ """Guess an appropriate human-friendly formatter to use with the value.
+
+ Args:
+ values: The object to pick a formatter for.
+
+ Returns:
+ callable that accepts the value.
+ """
+ found_types = {}
+ for value in values:
+ found_types[type(value)] = None
+ if len(found_types) > 1:
+ return repr_with_type
+ found_types = found_types.keys()
+ if len(found_types) != 1:
+ return repr_with_type
+ elif found_types[0] in ("string", "File"):
+ # For strings: omit the extra quotes and escaping. Just noise.
+ # For Files: they include <TYPE path> already
+ return _informative_str
+ else:
+ return repr_with_type
+
+def maybe_sorted(container, allow_sorting = True):
+ """Attempts to return the values of `container` in sorted order, if possible.
+
+ Args:
+ container: ([`list`] | (or other object convertible to list))
+ allow_sorting: ([`bool`]) whether to sort even if it can be sorted. This
+ is primarly so that callers can avoid boilerplate when they have
+ a "should it be sorted" arg, but also always convert to a list.
+
+ Returns:
+ A list, in sorted order if possible, otherwise in the original order.
+ This *may* be the same object as given as input.
+ """
+ container = to_list(container)
+ if not allow_sorting:
+ return container
+
+ if all([_is_sortable(v) for v in container]):
+ return sorted(container)
+ else:
+ return container
+
+def _is_sortable(obj):
+ return (
+ types.is_string(obj) or types.is_int(obj) or types.is_none(obj) or
+ types.is_bool(obj)
+ )
+
+def to_list(obj):
+ """Attempt to convert the object to a list, else error.
+
+ NOTE: This only supports objects that are typically understood as
+ lists, not any iterable. Types like `dict` and `str` are iterable,
+ but will be rejected.
+
+ Args:
+ obj: ([`list`] | [`depset`]) The object to convert to a list.
+
+ Returns:
+ [`list`] of the object
+ """
+ if types.is_string(obj):
+ fail("Cannot pass string to to_list(): {}".format(obj))
+ elif types.is_list(obj):
+ return obj
+ elif types.is_depset(obj):
+ return obj.to_list()
+ else:
+ fail("Unable to convert to list: {}".format(repr_with_type(obj)))
diff --git a/lib/truth.bzl b/lib/truth.bzl
new file mode 100644
index 0000000..95f1fdd
--- /dev/null
+++ b/lib/truth.bzl
@@ -0,0 +1,70 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# Truth
+
+Truth-style asserts for Bazel's Starlark.
+
+These asserts follow the Truth-style way of performing assertions. This
+basically means the actual value is wrapped in a type-specific object that
+provides type-specific assertion methods. This style provides several benefits:
+ * A fluent API that more directly expresses the assertion
+ * More egonomic assert functions
+ * Error messages with more informative context
+ * Promotes code reuses at the type-level.
+
+For more detailed documentation, see the docs on GitHub.
+
+## Basic usage
+
+NOTE: This example assumes usage of [`rules_testing`]'s [`analysis_test`]
+framework, but that framework is not required.
+
+```
+def foo_test(env, target):
+ subject = env.expect.that_target(target)
+ subject.runfiles().contains_at_least(["foo.txt"])
+ subject.executable().equals("bar.exe")
+
+ subject = env.expect.that_action(...)
+ subject.contains_at_least_args(...)
+```
+"""
+
+load("//lib/private:bool_subject.bzl", "BoolSubject")
+load("//lib/private:collection_subject.bzl", "CollectionSubject")
+load("//lib/private:depset_file_subject.bzl", "DepsetFileSubject")
+load("//lib/private:expect.bzl", "Expect")
+load("//lib/private:int_subject.bzl", "IntSubject")
+load("//lib/private:label_subject.bzl", "LabelSubject")
+load("//lib/private:matching.bzl", _matching = "matching")
+
+# Rather than load many symbols, just load this symbol, and then all the
+# asserts will be available.
+truth = struct(
+ expect = Expect.new_from_env,
+)
+
+# For the definition of a `Matcher` object, see `_match_custom`.
+matching = _matching
+
+subjects = struct(
+ # keep sorted start
+ bool = BoolSubject.new,
+ collection = CollectionSubject.new,
+ depset_file = DepsetFileSubject.new,
+ int = IntSubject.new,
+ label = LabelSubject.new,
+ # keep sorted end
+)
diff --git a/lib/util.bzl b/lib/util.bzl
new file mode 100644
index 0000000..fba4e14
--- /dev/null
+++ b/lib/util.bzl
@@ -0,0 +1,275 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""# Util
+
+Various utilities to aid with testing.
+"""
+
+load("@bazel_skylib//lib:paths.bzl", "paths")
+load("@bazel_skylib//lib:types.bzl", "types")
+
+# TODO(ilist): remove references to skylib analysistest
+load("@bazel_skylib//lib:unittest.bzl", "analysistest")
+load("@bazel_skylib//rules:write_file.bzl", "write_file")
+
+_SKIP_CI_TAGS = [
+ # copybara-marker: skip-ci-tag
+]
+
+# We add the manual tag to prevent implicitly building and running the subject
+# targets. When the rule-under-test is a test rule, it prevents trying to run
+# it. For binary rules, it prevents implicitly building it (and thus activating
+# more validation logic) when --build_tests_only is enabled.
+PREVENT_IMPLICIT_BUILDING_TAGS = [
+ "manual", # Prevent `bazel ...` from directly building them
+ # copybara-marker: skip-coverage-tag
+] + _SKIP_CI_TAGS
+PREVENT_IMPLICIT_BUILDING = {"tags": PREVENT_IMPLICIT_BUILDING_TAGS}
+
+def merge_kwargs(*kwargs):
+ """Merges multiple dicts of kwargs.
+
+ This is similar to dict.update except:
+ * If a key's value is a list, it'll be concatenated to any existing value.
+ * An error is raised when the same non-list key occurs more than once.
+
+ Args:
+ *kwargs: kwarg arg dicts to merge
+
+ Returns:
+ dict of the merged kwarg dics.
+ """
+ final = {}
+ for kwarg in kwargs:
+ for key, value in kwarg.items():
+ if types.is_list(value):
+ final[key] = final.get(key, []) + value
+ elif key in final:
+ fail("Key already exists: {}: {}".format(key, final[key]))
+ else:
+ final[key] = value
+ return final
+
+def empty_file(name):
+ """Generates an empty file and returns the target name for it.
+
+ Args:
+ name: str, name of the generated output file.
+
+ Returns:
+ str, the name of the generated output.
+ """
+ write_file(
+ name = "write_" + name,
+ content = [],
+ out = name,
+ )
+ return name
+
+def helper_target(rule, **kwargs):
+ """Define a target only used as a Starlark test input.
+
+ This is useful for e.g. analysis tests, which have to setup a small
+ graph of targets that should only be built via the test (e.g. they
+ may require config settings the test sets). Tags are added to
+ hide the target from `:all`, `/...`, TAP, etc.
+
+ Args:
+ rule: rule-like function.
+ **kwargs: Any kwargs to pass to `rule`. Additional tags will
+ be added to hide the target.
+ """
+ kwargs = merge_kwargs(kwargs, PREVENT_IMPLICIT_BUILDING)
+ rule(**kwargs)
+
+def short_paths(files_depset):
+ """Returns the `short_path` paths for a depset of files."""
+ return [f.short_path for f in files_depset.to_list()]
+
+def runfiles_paths(workspace_name, runfiles):
+ """Returns the root-relative short paths for the files in runfiles.
+
+ Args:
+ workspace_name: str, the workspace name (`ctx.workspace_name`).
+ runfiles: runfiles, the runfiles to convert to short paths.
+
+ Returns:
+ list of short paths but runfiles root-relative. e.g.
+ 'myworkspace/foo/bar.py'.
+ """
+ paths = []
+ paths.extend(short_paths(runfiles.files))
+ paths.extend(runfiles.empty_filenames.to_list())
+ paths.extend(_runfiles_symlink_paths(runfiles.symlinks))
+ paths = _prepend_path(workspace_name, paths)
+
+ paths.extend(_runfiles_symlink_paths(runfiles.root_symlinks))
+ return paths
+
+def runfiles_map(workspace_name, runfiles):
+ """Convert runfiles to a path->file mapping.
+
+ This approximates how Bazel materializes the runfiles on the file
+ system.
+
+ Args:
+ workspace_name: str; the workspace the runfiles belong to.
+ runfiles: runfiles; the runfiles to convert to a map.
+
+ Returns:
+ `dict[str, optional File]` that maps the path under the runfiles root
+ to it's backing file. The file may be None if the path came
+ from `runfiles.empty_filenames`.
+ """
+ path_map = {}
+ workspace_prefix = workspace_name + "/"
+ for file in runfiles.files.to_list():
+ path_map[workspace_prefix + file.short_path] = file
+ for path in runfiles.empty_filenames.to_list():
+ path_map[workspace_prefix + path] = None
+
+ # NOTE: What happens when different files have the same symlink isn't
+ # exactly clear. For lack of a better option, we'll just take the last seen
+ # value.
+ for entry in runfiles.symlinks.to_list():
+ path_map[workspace_prefix + entry.path] = entry.target_file
+ for entry in runfiles.root_symlinks.to_list():
+ path_map[entry.path] = entry.target_file
+ return path_map
+
+def _prepend_path(prefix, path_strs):
+ return [paths.join(prefix, p) for p in path_strs]
+
+def _runfiles_symlink_paths(symlinks_depset):
+ return [entry.path for entry in symlinks_depset.to_list()]
+
+TestingAspectInfo = provider(
+ "Details about a target-under-test useful for testing.",
+ fields = {
+ "attrs": "The raw attributes of the target under test.",
+ "actions": "The actions registered for the target under test.",
+ "vars": "The var dict (ctx.var) for the target under text.",
+ "bin_path": "str; the ctx.bin_dir.path value (aka execroot).",
+ },
+)
+
+def _testing_aspect_impl(target, ctx):
+ return [TestingAspectInfo(
+ attrs = ctx.rule.attr,
+ actions = target.actions,
+ vars = ctx.var,
+ bin_path = ctx.bin_dir.path,
+ )]
+
+# TODO(ilist): make private, after switching python tests to new testing framework
+testing_aspect = aspect(
+ implementation = _testing_aspect_impl,
+)
+
+# The same as `testing_aspect`, but recurses through all attributes in the
+# whole graph. This is useful if you need to extract information about
+# targets that aren't direct dependencies of the target under test, or to
+# reconstruct a more complete graph of inputs/outputs/generating-target.
+# TODO(ilist): make private, after switching python tests to new testing framework
+recursive_testing_aspect = aspect(
+ implementation = _testing_aspect_impl,
+ attr_aspects = ["*"],
+)
+
+def get_target_attrs(env):
+ return analysistest.target_under_test(env)[TestingAspectInfo].attrs
+
+# TODO(b/203567235): Remove this after cl/382467002 lands and the regular
+# `analysistest.target_actions()` can be used.
+def get_target_actions(env):
+ return analysistest.target_under_test(env)[TestingAspectInfo].actions
+
+def is_runfiles(obj):
+ """Tells if an object is a runfiles object."""
+ return type(obj) == "runfiles"
+
+def is_file(obj):
+ """Tells if an object is a File object."""
+ return type(obj) == "File"
+
+def skip_test(name):
+ """Defines a test target that is always skipped.
+
+ This is useful for tests that should be skipped if some condition,
+ determinable during the loading phase, isn't met. The resulting target will
+ show up as "SKIPPED" in the output.
+
+ If possible, prefer to use `target_compatible_with` to mark tests as
+ incompatible. This avoids confusing behavior where the type of a target
+ varies depending on loading-phase behavior.
+
+ Args:
+ name: The name of the target.
+ """
+ _skip_test(
+ name = name,
+ target_compatible_with = ["@platforms//:incompatible"],
+ tags = _SKIP_CI_TAGS,
+ )
+
+def _skip_test_impl(ctx):
+ _ = ctx # @unused
+ fail("Should have been skipped")
+
+_skip_test = rule(
+ implementation = _skip_test_impl,
+ test = True,
+)
+
+def _force_exec_config_impl(ctx):
+ return [DefaultInfo(
+ files = depset(ctx.files.tools),
+ default_runfiles = ctx.runfiles().merge_all([
+ t[DefaultInfo].default_runfiles
+ for t in ctx.attr.tools
+ ]),
+ data_runfiles = ctx.runfiles().merge_all([
+ t[DefaultInfo].data_runfiles
+ for t in ctx.attr.tools
+ ]),
+ )]
+
+force_exec_config = rule(
+ implementation = _force_exec_config_impl,
+ doc = "Rule to force arbitrary targets to `cfg=exec` so they can be " +
+ "tested when used as tools.",
+ attrs = {
+ "tools": attr.label_list(
+ cfg = "exec",
+ allow_files = True,
+ doc = "A list of tools to force into the exec config",
+ ),
+ },
+)
+
+util = struct(
+ # keep sorted start
+ empty_file = empty_file,
+ force_exec_config = force_exec_config,
+ helper_target = helper_target,
+ merge_kwargs = merge_kwargs,
+ recursive_testing_aspect = recursive_testing_aspect,
+ runfiles_map = runfiles_map,
+ runfiles_paths = runfiles_paths,
+ short_paths = short_paths,
+ skip_test = skip_test,
+ testing_aspect = testing_aspect,
+ # keep sorted end
+)
diff --git a/lib/utils.bzl b/lib/utils.bzl
new file mode 100644
index 0000000..ee41485
--- /dev/null
+++ b/lib/utils.bzl
@@ -0,0 +1,37 @@
+# Copyright 2022 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions to use in analysis tests."""
+
+def find_action(env, artifact):
+ """Finds the action generating the artifact.
+
+ Args:
+ env: The testing environment
+ artifact: a File or a string
+ Returns:
+ The action"""
+
+ if type(artifact) == type(""):
+ basename = env.target.label.package + "/" + artifact.format(
+ name = env.target.label.name,
+ )
+ else:
+ basename = artifact.short_path
+
+ for action in env.actions:
+ for file in action.actual.outputs.to_list():
+ if file.short_path == basename:
+ return action
+ return None
diff --git a/tests/BUILD b/tests/BUILD
new file mode 100644
index 0000000..8049732
--- /dev/null
+++ b/tests/BUILD
@@ -0,0 +1,51 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
+load("@bazel_skylib//rules:build_test.bzl", "build_test")
+load(":analysis_test_tests.bzl", "analysis_test_test_suite")
+load(":truth_tests.bzl", "truth_test_suite")
+
+licenses(["notice"])
+
+bzl_library(
+ name = "analysis_test_tests_bzl",
+ srcs = ["analysis_test_tests.bzl"],
+ deps = [
+ "//lib:analysis_test_bzl",
+ "//lib:truth_bzl",
+ ],
+)
+
+bzl_library(
+ name = "truth_tests_bzl",
+ srcs = ["truth_tests.bzl"],
+ visibility = ["//visibility:private"],
+ deps = [
+ "//lib:truth_bzl",
+ "//lib:util_bzl",
+ "@bazel_skylib//lib:unittest",
+ ],
+)
+
+analysis_test_test_suite(name = "analysis_test_test_suite")
+
+truth_test_suite(name = "truth_tests")
+
+build_test(
+ name = "build_tests",
+ targets = [
+ "//lib:util_bzl",
+ ],
+)
diff --git a/tests/analysis_test_tests.bzl b/tests/analysis_test_tests.bzl
new file mode 100644
index 0000000..61350b0
--- /dev/null
+++ b/tests/analysis_test_tests.bzl
@@ -0,0 +1,222 @@
+# Copyright 2022 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for analysis_test.bzl."""
+
+load("//lib:analysis_test.bzl", "analysis_test", "test_suite")
+load("//lib:truth.bzl", "matching")
+load("//lib:util.bzl", "TestingAspectInfo")
+
+###################################
+####### change_setting_test #######
+###################################
+
+_ChangeSettingInfo = provider(
+ doc = "min_os_version for change_setting_test",
+ fields = ["min_os_version"],
+)
+
+def _change_setting_fake_rule(ctx):
+ return [_ChangeSettingInfo(min_os_version = ctx.fragments.cpp.minimum_os_version())]
+
+change_setting_fake_rule = rule(
+ implementation = _change_setting_fake_rule,
+ fragments = ["cpp"],
+)
+
+def test_change_setting(name):
+ """Test to verify that an analysis test may change configuration."""
+ change_setting_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(
+ name = name,
+ target = name + "_fake_target",
+ impl = _test_change_setting,
+ config_settings = {
+ "//command_line_option:minimum_os_version": "1234.5678",
+ },
+ )
+
+def _test_change_setting(env, target):
+ dep_min_os_version = target[_ChangeSettingInfo].min_os_version
+ env.expect.that_str(dep_min_os_version).equals("1234.5678")
+
+####################################
+####### failure_testing_test #######
+####################################
+
+def _failure_testing_fake_rule(_ctx):
+ fail("This rule should never work")
+
+failure_testing_fake_rule = rule(
+ implementation = _failure_testing_fake_rule,
+)
+
+def test_failure_testing(name):
+ """Test to verify that an analysis test may verify a rule fails with fail()."""
+ failure_testing_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(
+ name = name,
+ target = name + "_fake_target",
+ impl = _test_failure_testing,
+ expect_failure = True,
+ )
+
+def _test_failure_testing(env, target):
+ env.expect.that_target(target).failures().contains_predicate(matching.contains("This rule should never work"))
+
+############################################
+####### fail_unexpected_passing_test #######
+############################################
+
+def _fail_unexpected_passing_fake_rule(_ctx):
+ return []
+
+fail_unexpected_passing_fake_rule = rule(
+ implementation = _fail_unexpected_passing_fake_rule,
+)
+
+# @unused # TODO(ilist): add a shell test checking it fails
+def test_fail_unexpected_passing(name):
+ """Test that fails by expecting an error that never occurs."""
+ fail_unexpected_passing_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(
+ name = name,
+ target = name + "_fake_target",
+ impl = _test_fail_unexpected_passing,
+ expect_failure = True,
+ )
+
+def _test_fail_unexpected_passing(env, target):
+ env.expect.that_target(target).failures().contains_predicate(matching.contains("Oh no, going to fail"))
+
+################################################
+####### change_setting_with_failure_test #######
+################################################
+def _change_setting_with_failure_fake_rule(ctx):
+ if ctx.fragments.cpp.minimum_os_version() == "error_error":
+ fail("unexpected minimum_os_version!!!")
+ return []
+
+change_setting_with_failure_fake_rule = rule(
+ implementation = _change_setting_with_failure_fake_rule,
+ fragments = ["cpp"],
+)
+
+def test_change_setting_with_failure(name):
+ change_setting_with_failure_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(
+ name = name,
+ target = name + "_fake_target",
+ impl = _test_change_setting_with_failure,
+ expect_failure = True,
+ config_settings = {
+ "//command_line_option:minimum_os_version": "error_error",
+ },
+ )
+
+def _test_change_setting_with_failure(env, target):
+ """Test verifying failure while changing configuration."""
+ env.expect.that_target(target).failures().contains_predicate(
+ matching.contains("unexpected minimum_os_version!!!"),
+ )
+
+####################################
+####### inspect_actions_test #######
+####################################
+def _inspect_actions_fake_rule(ctx):
+ out_file = ctx.actions.declare_file("out.txt")
+ ctx.actions.run_shell(
+ command = "echo 'hello' > %s" % out_file.basename,
+ outputs = [out_file],
+ )
+ return [DefaultInfo(files = depset([out_file]))]
+
+inspect_actions_fake_rule = rule(implementation = _inspect_actions_fake_rule)
+
+def test_inspect_actions(name):
+ """Test verifying actions registered by a target."""
+ inspect_actions_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(name = name, target = name + "_fake_target", impl = _test_inspect_actions)
+
+def _test_inspect_actions(env, target):
+ env.expect.that_int(len(target[TestingAspectInfo].actions)).equals(1)
+ action_output = target[TestingAspectInfo].actions[0].outputs.to_list()[0]
+ env.expect.that_str(action_output.basename).equals("out.txt")
+
+####################################
+####### inspect_aspect_test #######
+####################################
+_AddedByAspectInfo = provider(
+ doc = "Example provider added by example aspect",
+ fields = {"value": "(str)"},
+)
+
+def _example_aspect_impl(_target, _ctx):
+ return [_AddedByAspectInfo(value = "attached by aspect")]
+
+example_aspect = aspect(implementation = _example_aspect_impl)
+
+def _inspect_aspect_fake_rule(ctx):
+ out_file = ctx.actions.declare_file("out.txt")
+ ctx.actions.run_shell(
+ command = "echo 'hello' > %s" % out_file.basename,
+ outputs = [out_file],
+ )
+ return [DefaultInfo(files = depset([out_file]))]
+
+inspect_aspect_fake_rule = rule(implementation = _inspect_aspect_fake_rule)
+
+def test_inspect_aspect(name):
+ """Test verifying aspect run on a target."""
+ inspect_aspect_fake_rule(name = name + "_fake_target", tags = ["manual"])
+
+ analysis_test(
+ name = name,
+ target = name + "_fake_target",
+ impl = _test_inspect_aspect,
+ extra_target_under_test_aspects = [example_aspect],
+ )
+
+def _test_inspect_aspect(env, target):
+ env.expect.that_str(target[_AddedByAspectInfo].value).equals("attached by aspect")
+
+########################################
+####### inspect_output_dirs_test #######
+########################################
+_OutputDirInfo = provider(
+ doc = "bin_path for inspect_output_dirs_test",
+ fields = ["bin_path"],
+)
+
+def _inspect_output_dirs_fake_rule(ctx):
+ return [_OutputDirInfo(bin_path = ctx.bin_dir.path)]
+
+inspect_output_dirs_fake_rule = rule(implementation = _inspect_output_dirs_fake_rule)
+
+def analysis_test_test_suite(name):
+ test_suite(
+ name = name,
+ tests = [
+ test_change_setting,
+ test_failure_testing,
+ test_change_setting_with_failure,
+ test_inspect_actions,
+ test_inspect_aspect,
+ ],
+ )
diff --git a/tests/testdata/file1.txt b/tests/testdata/file1.txt
new file mode 100644
index 0000000..7c59155
--- /dev/null
+++ b/tests/testdata/file1.txt
@@ -0,0 +1 @@
+nothing to see here
diff --git a/tests/truth_tests.bzl b/tests/truth_tests.bzl
new file mode 100644
index 0000000..d5fce52
--- /dev/null
+++ b/tests/truth_tests.bzl
@@ -0,0 +1,1481 @@
+# Copyright 2023 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for truth.bzl."""
+
+load("@bazel_skylib//lib:unittest.bzl", ut_asserts = "asserts")
+load("//lib:truth.bzl", "matching", "subjects", "truth")
+load("//lib:analysis_test.bzl", "analysis_test", "test_suite")
+
+# Bazel 5 has a bug where every access of testing.ExecutionInfo is a new
+# object that isn't equal to itself. This is fixed in Bazel 6.
+_IS_BAZEL_6_OR_HIGHER = (testing.ExecutionInfo == testing.ExecutionInfo)
+
+_suite = []
+
+def _fake_env(env):
+ failures = []
+ env1 = struct(
+ ctx = env.ctx,
+ failures = failures,
+ fail = lambda msg: failures.append(msg), # Silent fail
+ )
+ env2 = struct(
+ ctx = env.ctx,
+ failures = failures,
+ fail = lambda msg: failures.append(msg), # Silent fail
+ expect = truth.expect(env1),
+ reset = lambda: failures.clear(),
+ )
+ return env2
+
+def _end(env, fake_env):
+ _guard_against_stray_failures(env = env, fake_env = fake_env)
+
+def _guard_against_stray_failures(*, env, fake_env):
+ ut_asserts.true(
+ env,
+ len(fake_env.failures) == 0,
+ "failures remain: clear after each expected failure\n{}".format(
+ "\n".join(fake_env.failures),
+ ),
+ )
+
+def action_subject_test(name):
+ analysis_test(name, impl = _action_subject_test, target = "truth_tests_helper")
+
+def _action_subject_test(env, target):
+ fake_env = _fake_env(env)
+ subject = fake_env.expect.that_target(
+ target,
+ ).action_named("Action1")
+
+ subject.contains_flag_values([
+ ("--arg1flag", "arg1value"),
+ ("--arg2flag", "arg2value"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check contains_flag_values success",
+ )
+
+ subject.contains_flag_values([
+ ("--missingflag", "whatever"),
+ ("--arg1flag", "wrongvalue"),
+ ])
+ _assert_failure(
+ fake_env,
+ [
+ "2 expected flags with values missing from argv",
+ "0: '--arg1flag' with value 'wrongvalue'",
+ "1: '--missingflag' (not specified)",
+ "actual argv",
+ "1: arg1",
+ "2: --boolflag",
+ "3: --arg1flag",
+ "4: arg1value",
+ "5: --arg2flag=arg2value",
+ ],
+ env = env,
+ msg = "check contains_flag_values failure",
+ )
+
+ subject.contains_none_of_flag_values([
+ ("--doesnotexist", "whatever"),
+ ("--arg1flag", "differentvalue"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check contains_none_of_flag_values success",
+ )
+
+ subject.contains_none_of_flag_values([
+ ("--arg1flag", "arg1value"),
+ ])
+ _assert_failure(
+ fake_env,
+ [
+ ("expected not to contain any of: \n" + # note space after colon
+ " 0: '--arg1flag' with value 'arg1value'\n"),
+ ("but 1 found:\n" +
+ " 0: '--arg1flag' with value 'arg1value'\n"),
+ "actual values:\n",
+ # Element 0 of actual is omitted because it has build-config
+ # specific values within it.
+ (" 1: arg1\n" +
+ " 2: --boolflag\n" +
+ " 3: --arg1flag\n" +
+ " 4: arg1value\n" +
+ " 5: --arg2flag=arg2value\n"),
+ ],
+ env = env,
+ msg = "check contains_none_of_flag_values failure",
+ )
+ _end(env, fake_env)
+
+_suite.append(action_subject_test)
+
+def bool_subject_test(name):
+ analysis_test(name, impl = _bool_subject_test, target = "truth_tests_helper")
+
+def _bool_subject_test(env, _target):
+ fake_env = _fake_env(env)
+ fake_env.expect.that_bool(True).equals(True)
+ _assert_no_failures(fake_env, env = env)
+ fake_env.expect.that_bool(False).equals(False)
+ _assert_no_failures(fake_env, env = env)
+
+ fake_env.expect.that_bool(True).equals(False)
+ _assert_failure(fake_env, [
+ "expected: False",
+ "actual: True",
+ ], env = env)
+
+ fake_env.expect.that_bool(True, "MYEXPR").equals(False)
+ _assert_failure(fake_env, ["MYEXPR"], env = env)
+
+ subject = truth.expect(fake_env).that_bool(True)
+ subject.not_equals(True)
+ _assert_failure(
+ fake_env,
+ ["expected not to be: True", "actual: True"],
+ env = env,
+ msg = "check not_equals fails with same type",
+ )
+ subject.not_equals(None)
+ _assert_failure(
+ fake_env,
+ ["expected not to be: None (type: NoneType)", "actual: True (type: bool)"],
+ env = env,
+ msg = "check not_equals due to different type",
+ )
+ subject.not_equals(False)
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check BoolSubject.not_equals with unequal value of same type",
+ )
+
+ subject.is_in([True, False])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check BoolSubject.is_in with matching values",
+ )
+ subject.is_in([None, 39])
+ _assert_failure(
+ fake_env,
+ ["expected any of:", "None", "39", "actual: True"],
+ env = env,
+ msg = "check is_in mismatchd values",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(bool_subject_test)
+
+def collection_custom_expr_test(name):
+ analysis_test(name, impl = _collection_custom_expr_test, target = "truth_tests_helper")
+
+def _collection_custom_expr_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = fake_env.expect.that_collection(["a"], "MYEXPR")
+ subject.contains_exactly([])
+ _assert_failure(fake_env, ["MYEXPR"], env = env)
+ _end(env, fake_env)
+
+_suite.append(collection_custom_expr_test)
+
+def collection_has_size_test(name):
+ analysis_test(name, impl = _collection_has_size_test, target = "truth_tests_helper")
+
+def _collection_has_size_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = fake_env.expect.that_collection(["a", "b", "c", "d"])
+
+ subject.has_size(4)
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check actual has expected size",
+ )
+
+ subject.has_size(0)
+ _assert_failure(
+ fake_env,
+ ["value of: collection.size()"],
+ env = env,
+ msg = "check actual does not have expected size",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(collection_has_size_test)
+
+def collection_contains_test(name):
+ analysis_test(name, impl = _collection_contains_test, target = "truth_tests_helper")
+
+def _collection_contains_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = fake_env.expect.that_collection(["a", "b", "c", "d"])
+
+ subject.contains("a")
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check actual does contain expected",
+ )
+
+ subject.contains("never")
+ _assert_failure(
+ fake_env,
+ ["expected to contain: never", "actual values", "0: a"],
+ env = env,
+ msg = "check actual is missing expected",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(collection_contains_test)
+
+def collection_contains_predicate_test(name):
+ analysis_test(name, impl = _collection_contains_predicate_test, target = "truth_tests_helper")
+
+def _collection_contains_predicate_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a", "b", "c", "d"])
+
+ subject.contains_predicate(matching.contains("a"))
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check actual does contains expected",
+ )
+
+ subject.contains_predicate(matching.contains("never"))
+ _assert_failure(
+ fake_env,
+ ["expected to contain: <contains never>", "actual values", "0: a"],
+ env = env,
+ msg = "check actual is missing a value",
+ )
+ _end(env, fake_env)
+
+_suite.append(collection_contains_predicate_test)
+
+def collection_contains_at_least_test(name):
+ analysis_test(name, impl = _collection_contains_at_least_test, target = "truth_tests_helper")
+
+def _collection_contains_at_least_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a", "b", "c", "d"])
+
+ subject.contains_at_least(["a", "b", "c"]).in_order()
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual with same elements in same order",
+ )
+
+ subject.contains_at_least(["never"])
+ _assert_failure(
+ fake_env,
+ ["expected elements missing", "never", "actual values", "0: a"],
+ env = env,
+ msg = "check actual is missing a value",
+ )
+
+ subject.contains_at_least([
+ "b",
+ "a",
+ ]).in_order()
+ _assert_failure(
+ fake_env,
+ [
+ "incorrect order",
+ "0: b found at offset 1",
+ "1: a found at offset 0",
+ ],
+ env = env,
+ msg = "check expected values present in wrong order",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(collection_contains_at_least_test)
+
+def collection_contains_at_least_predicates_test(name):
+ analysis_test(name, impl = _collection_contains_at_least_predicates_test, target = "truth_tests_helper")
+
+def _collection_contains_at_least_predicates_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a", "b", "c", "d"])
+ subject.contains_at_least_predicates([
+ matching.contains("a"),
+ matching.contains("b"),
+ matching.contains("c"),
+ ]).in_order()
+
+ subject.contains_at_least_predicates([
+ matching.never("never"),
+ ])
+ _assert_failure(
+ fake_env,
+ ["expected elements missing", "never", "actual values", "0: a"],
+ env = env,
+ )
+
+ subject.contains_at_least_predicates([
+ matching.custom("<MATCHER-B>", lambda v: "b" in v),
+ matching.custom("<MATCHER-A>", lambda v: "a" in v),
+ ]).in_order()
+ _assert_failure(
+ fake_env,
+ [
+ "incorrect order",
+ "0: <MATCHER-B> matched at offset 1 (matched: b)",
+ "1: <MATCHER-A> matched at offset 0 (matched: a)",
+ ],
+ env = env,
+ )
+
+ _end(env, fake_env)
+
+_suite.append(collection_contains_at_least_predicates_test)
+
+def collection_contains_exactly_test(name):
+ analysis_test(name, impl = _collection_contains_exactly_test, target = "truth_tests_helper")
+
+def _collection_contains_exactly_test(env, _target):
+ fake_env = _fake_env(env)
+
+ subject = truth.expect(fake_env).that_collection([])
+ subject.contains_exactly(["a"])
+ _assert_failure(
+ fake_env,
+ [
+ "1 missing:\n 0: a",
+ "expected exactly:\n 0: a",
+ "actual values:\n <empty>",
+ ],
+ env = env,
+ msg = "check empty actual vs non-empty expected",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["b"])
+ subject.contains_exactly([])
+ _assert_failure(
+ fake_env,
+ [
+ "1 unexpected:\n 0: b",
+ "expected exactly:\n <empty>",
+ "actual values:\n 0: b",
+ ],
+ env = env,
+ msg = "check non-empty actual vs empty expected",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["c"])
+ order = subject.contains_exactly(["c"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual with same elements in same order",
+ )
+ order.in_order()
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check exact elements are in order",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["d"])
+ subject.contains_exactly(["e"])
+ _assert_failure(
+ fake_env,
+ [
+ "1 missing:\n 0: e",
+ "1 unexpected:\n 0: d",
+ "expected exactly:\n 0: e",
+ "actual values:\n 0: d",
+ ],
+ env = env,
+ msg = "check disjoint values; same length",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["f", "g"])
+ order = subject.contains_exactly(["g", "f"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check same elements with expected in different order",
+ )
+ order.in_order()
+ _assert_failure(
+ fake_env,
+ [
+ "expected values all found, but with incorrect order",
+ "0: g found at offset 1",
+ "1: f found at offset 0",
+ "actual values:",
+ "0: f",
+ "1: g",
+ ],
+ env = env,
+ msg = "check same elements out of order",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["x", "y"])
+ subject.contains_exactly(["y"])
+ _assert_failure(
+ fake_env,
+ [
+ "1 unexpected:\n 0: x",
+ "expected exactly:\n 0: y",
+ "actual values:\n 0: x\n 1: y",
+ ],
+ env = env,
+ msg = "check expected subset of actual",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["a", "b", "c", "d"])
+ subject.contains_exactly(["a", "b", "c", "d"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual with exact elements and order; 4 values",
+ )
+
+ subject.contains_exactly(["d", "b", "a", "c"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual same elements and different order; 4 values",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["a", "b", "a"])
+ subject.contains_exactly(["a", "b", "a"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check multiplicity, same expected/actual order",
+ )
+
+ subject.contains_exactly(["b", "a", "a"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check multiplicity; different expected/actual order",
+ )
+
+ subject = truth.expect(fake_env).that_collection([
+ "one",
+ "two",
+ "one",
+ "three",
+ "one",
+ "four",
+ ])
+
+ subject.contains_exactly(["one", "two", "three", "five"])
+ _assert_failure(
+ fake_env,
+ [
+ ("1 missing:\n" +
+ " 0: five"),
+ ("3 unexpected:\n" +
+ " 0: four\n" +
+ " 1: one\n" +
+ " 2: one\n"),
+ ("expected exactly:\n" +
+ " 0: one\n" +
+ " 1: two\n" +
+ " 2: three\n" +
+ " 3: five\n"),
+ ],
+ env = env,
+ msg = "check multiplicity; expected with multiple, expected with unique",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["one", "four", "three", "two", "five"])
+ order = subject.contains_exactly(["one", "two", "three", "four", "five"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check same elements with expected in different order",
+ )
+ order.in_order()
+ _assert_failure(
+ fake_env,
+ [
+ "expected values all found, but with incorrect order:",
+ "0: one found at offset 0",
+ "1: two found at offset 3",
+ "2: three found at offset 2",
+ "3: four found at offset 1",
+ "4: five found at offset 4",
+ "actual values:",
+ "0: one",
+ "1: four",
+ "2: three",
+ "3: two",
+ "4: five",
+ ],
+ env = env,
+ msg = "check same elements out of order",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(collection_contains_exactly_test)
+
+def collection_contains_exactly_predicates_test(name):
+ analysis_test(name, impl = _collection_contains_exactly_predicates_test, target = "truth_tests_helper")
+
+def _collection_contains_exactly_predicates_test(env, _target):
+ fake_env = _fake_env(env)
+
+ subject = truth.expect(fake_env).that_collection([])
+ subject.contains_exactly_predicates([matching.contains("a")])
+ _assert_failure(
+ fake_env,
+ [
+ "1 missing:\n 0: <contains a>",
+ "expected exactly:\n 0: <contains a>",
+ "actual values:\n <empty>",
+ ],
+ env = env,
+ msg = "check empty actual vs non-empty expected",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["b"])
+ subject.contains_exactly_predicates([])
+ _assert_failure(
+ fake_env,
+ [
+ "1 unexpected:\n 0: b",
+ "expected exactly:\n <empty>",
+ "actual values:\n 0: b",
+ ],
+ env = env,
+ msg = "check non-empty actual vs empty expected",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["c"])
+ order = subject.contains_exactly_predicates([matching.contains("c")])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual with same elements in same order",
+ )
+ order.in_order()
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check exact elements are in order",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["d"])
+ subject.contains_exactly_predicates([matching.contains("e")])
+ _assert_failure(
+ fake_env,
+ [
+ "1 missing:\n 0: <contains e>",
+ "1 unexpected:\n 0: d",
+ "expected exactly:\n 0: <contains e>",
+ "actual values:\n 0: d",
+ ],
+ env = env,
+ msg = "check disjoint values; same length",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["f", "g"])
+ order = subject.contains_exactly_predicates([
+ matching.contains("g"),
+ matching.contains("f"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check same elements with expected in different order",
+ )
+ order.in_order()
+ _assert_failure(
+ fake_env,
+ [
+ "expected values all found, but with incorrect order",
+ "0: <contains g> matched at offset 1 (matched: g)",
+ "1: <contains f> matched at offset 0 (matched: f)",
+ "actual values:",
+ "0: f",
+ "1: g",
+ ],
+ env = env,
+ msg = "check same elements out of order",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["x", "y"])
+ subject.contains_exactly_predicates([matching.contains("y")])
+ _assert_failure(
+ fake_env,
+ [
+ "1 unexpected:\n 0: x",
+ "expected exactly:\n 0: <contains y>",
+ "actual values:\n 0: x\n 1: y",
+ ],
+ env = env,
+ msg = "check expected subset of actual",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["a", "b", "c", "d"])
+ subject.contains_exactly_predicates([
+ matching.contains("a"),
+ matching.contains("b"),
+ matching.contains("c"),
+ matching.contains("d"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual with exact elements and order; 4 values",
+ )
+
+ subject.contains_exactly_predicates([
+ matching.contains("d"),
+ matching.contains("b"),
+ matching.contains("a"),
+ matching.contains("c"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check expected and actual same elements and different order; 4 values",
+ )
+
+ subject = truth.expect(fake_env).that_collection(["a", "b", "a"])
+ subject.contains_exactly_predicates([
+ matching.contains("a"),
+ matching.contains("b"),
+ matching.contains("a"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check multiplicity, same expected/actual order",
+ )
+
+ subject.contains_exactly_predicates([
+ matching.contains("b"),
+ matching.contains("a"),
+ matching.contains("a"),
+ ])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check multiplicity; different expected/actual order",
+ )
+
+ subject = truth.expect(fake_env).that_collection([
+ "one",
+ "two",
+ "one",
+ "three",
+ "one",
+ "four",
+ ])
+
+ subject.contains_exactly_predicates([
+ matching.contains("one"),
+ matching.contains("two"),
+ matching.contains("three"),
+ matching.contains("five"),
+ ])
+ _assert_failure(
+ fake_env,
+ [
+ ("1 missing:\n" +
+ " 0: <contains five>"),
+ ("3 unexpected:\n" +
+ " 0: four\n" +
+ " 1: one\n" +
+ " 2: one\n"),
+ ("expected exactly:\n" +
+ " 0: <contains one>\n" +
+ " 1: <contains two>\n" +
+ " 2: <contains three>\n" +
+ " 3: <contains five>\n"),
+ ],
+ env = env,
+ msg = "check multiplicity; expected with multiple, expected with unique",
+ )
+ _end(env, fake_env)
+
+_suite.append(collection_contains_exactly_predicates_test)
+
+def collection_contains_none_of_test(name):
+ analysis_test(name, impl = _collection_contains_none_of_test, target = "truth_tests_helper")
+
+def _collection_contains_none_of_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a"])
+
+ subject.contains_none_of(["b"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check actual contains none of",
+ )
+
+ subject.contains_none_of(["a"])
+ _assert_failure(
+ fake_env,
+ [
+ "expected not to contain any of:",
+ " 0: a",
+ "but 1 found",
+ "actual values:",
+ ],
+ env = env,
+ msg = "check actual contains an unexpected value",
+ )
+ _end(env, fake_env)
+
+_suite.append(collection_contains_none_of_test)
+
+def collection_not_contains_test(name):
+ analysis_test(name, impl = _collection_not_contains_test, target = "truth_tests_helper")
+
+def _collection_not_contains_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a"])
+
+ subject.not_contains("b")
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check not_contains passes",
+ )
+ subject.not_contains("a")
+ _assert_failure(
+ fake_env,
+ [
+ "expected not to contain",
+ "0: a",
+ ],
+ env = env,
+ msg = "check not_contains fails",
+ )
+
+_suite.append(collection_not_contains_test)
+
+def collection_not_contains_predicate_test(name):
+ analysis_test(name, impl = _collection_not_contains_predicate_test, target = "truth_tests_helper")
+
+def _collection_not_contains_predicate_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_collection(["a"])
+
+ subject.not_contains_predicate(matching.contains("b"))
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check actual does not contain a value",
+ )
+
+ subject.not_contains_predicate(matching.contains("a"))
+ _assert_failure(
+ fake_env,
+ ["expected not to contain any of: <contains a>", "but 1 found:", "0: a"],
+ env = env,
+ msg = "check actual contains an unexpected value",
+ )
+ _end(env, fake_env)
+
+_suite.append(collection_not_contains_predicate_test)
+
+def execution_info_test(name):
+ analysis_test(name, impl = _execution_info_test, target = "truth_tests_helper")
+
+def _execution_info_test(env, target):
+ # TODO(rlevasseur): Remove this after cl/474597236 is released in Blaze
+ exec_info_is_ctor = str(testing.ExecutionInfo) == "<function ExecutionInfo>"
+ if not exec_info_is_ctor:
+ return
+ fake_env = _fake_env(env)
+
+ subject = truth.expect(fake_env).that_target(target).provider(testing.ExecutionInfo)
+ subject.requirements().contains_exactly({"EIKEY1": "EIVALUE1"})
+ _assert_no_failures(fake_env, env = env)
+ if _IS_BAZEL_6_OR_HIGHER:
+ subject.exec_group().equals("THE_EXEC_GROUP")
+ _assert_no_failures(fake_env, env = env)
+ _end(env, fake_env)
+
+_suite.append(execution_info_test)
+
+def depset_file_subject_test(name):
+ analysis_test(name, impl = _depset_file_subject_test, target = "truth_tests_data_files")
+
+def _depset_file_subject_test(env, target):
+ fake_env = _fake_env(env)
+
+ # We go through a target so that the usual format_str kwargs are present.
+ subject = truth.expect(fake_env).that_target(target).default_outputs()
+
+ # The CollectionSubject tests cover contains_at_least_predicates in
+ # more depth, so just do some basic tests here.
+ subject.contains_at_least_predicates([
+ matching.file_path_matches("txt"),
+ ])
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains_at_least_predicates([
+ matching.file_path_matches("NOT THERE"),
+ ])
+ _assert_failure(
+ fake_env,
+ ["NOT THERE", "file1.txt"],
+ env = env,
+ )
+
+ subject.contains_predicate(matching.file_path_matches("txt"))
+ _assert_no_failures(fake_env, env = env)
+ subject.contains_predicate(matching.file_path_matches("NOT THERE"))
+ _assert_failure(
+ fake_env,
+ ["NOT THERE", "file1.txt"],
+ env = env,
+ )
+
+ subject.contains_exactly(["{package}/testdata/file1.txt"])
+ _assert_no_failures(fake_env, env = env)
+ subject.contains_exactly(["NOT THERE"])
+ _assert_failure(
+ fake_env,
+ ["NOT THERE", "file1.txt"],
+ env = env,
+ )
+
+ subject.not_contains("does-not-contain")
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "DepsetFilesubject.not_contains success test",
+ )
+ subject.not_contains("{package}/testdata/file1.txt")
+ _assert_failure(
+ fake_env,
+ ["expected not to contain any of", "file1.txt"],
+ env = env,
+ msg = "DepsetFilesubject.not_contains failure test",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(depset_file_subject_test)
+
+def dict_subject_test(name):
+ analysis_test(name, impl = _dict_subject_test, target = "truth_tests_helper")
+
+def _dict_subject_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_dict({"a": 1, "b": 2, "c": 3})
+
+ subject.contains_exactly({"a": 1, "b": 2, "c": 3})
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains_exactly({"d": 4, "a": 99})
+ _assert_failure(
+ fake_env,
+ [
+ ("expected dict: {\n" +
+ " a: <int 99>\n" +
+ " d: <int 4>\n" +
+ "}\n"),
+ ("1 missing keys:\n" +
+ " 0: d\n"),
+ ("2 unexpected keys:\n" +
+ " 0: b\n" +
+ " 1: c\n"),
+ ("1 incorrect entries:\n" +
+ "key a:\n" +
+ " expected: 99\n" +
+ " but was : 1\n"),
+ ("actual: {\n" +
+ " a: <int 1>\n" +
+ " b: <int 2>\n" +
+ " c: <int 3>\n" +
+ "}\n"),
+ ],
+ env = env,
+ )
+
+ subject.contains_at_least({"a": 1})
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains_at_least({"d": 91, "a": 74})
+ _assert_failure(
+ fake_env,
+ [
+ ("expected dict: {\n" +
+ " a: <int 74>\n" +
+ " d: <int 91>\n" +
+ "}\n"),
+ ("1 missing keys:\n" +
+ " 0: d\n"),
+ ("1 incorrect entries:\n" +
+ "key a:\n" +
+ " expected: 74\n" +
+ " but was : 1\n"),
+ ("actual: {\n" +
+ " a: <int 1>\n" +
+ " b: <int 2>\n" +
+ " c: <int 3>\n" +
+ "}\n"),
+ ],
+ env = env,
+ )
+
+ # NOTE: we use the real env for this, since we're doing a real assert
+ truth.expect(env).that_collection(
+ subject.keys().actual,
+ ).contains_exactly(["a", "b", "c"])
+
+ _end(env, fake_env)
+
+_suite.append(dict_subject_test)
+
+def expect_test(name):
+ analysis_test(name, impl = _expect_test, target = "truth_tests_helper")
+
+def _expect_test(env, target):
+ fake_env = _fake_env(env)
+ expect = truth.expect(fake_env)
+
+ ut_asserts.true(
+ env,
+ expect.that_target(target) != None,
+ msg = "expect.that_target",
+ )
+ _assert_no_failures(fake_env, env = env)
+
+ expect.where(
+ foo = "bar",
+ baz = "qux",
+ ).that_bool(True).equals(False)
+ _assert_failure(
+ fake_env,
+ ["foo: bar", "baz: qux"],
+ env = env,
+ )
+ _end(env, fake_env)
+
+_suite.append(expect_test)
+
+def file_subject_test(name):
+ analysis_test(name, impl = _file_subject_test, target = "truth_tests_data_files")
+
+def _file_subject_test(env, target):
+ fake_env = _fake_env(env)
+ package = target.label.package
+ expect = truth.expect(fake_env)
+ subject = expect.that_file(target.files.to_list()[0])
+ subject.short_path_equals(package + "/testdata/file1.txt")
+ _assert_no_failures(fake_env, env = env)
+
+ subject.short_path_equals("landon-and-hope-forever.txt")
+ _assert_failure(
+ fake_env,
+ [
+ "value of: file",
+ "expected: landon-and-hope-forever.txt",
+ "actual: {}/testdata/file1.txt".format(package),
+ ],
+ env = env,
+ )
+
+ subject = expect.that_file(
+ target.files.to_list()[0],
+ meta = expect.meta.derive(
+ format_str_kwargs = {"custom": "file1.txt"},
+ ),
+ )
+
+ # NOTE: We purposefully don't use `{package}` because we're just
+ # testing the `{custom}` keyword
+ subject.short_path_equals(package + "/testdata/{custom}")
+ _assert_no_failures(fake_env, env = env)
+
+ _end(env, fake_env)
+
+_suite.append(file_subject_test)
+
+def label_subject_test(name):
+ analysis_test(name, impl = _label_subject_test, target = "truth_tests_helper")
+
+def _label_subject_test(env, target):
+ fake_env = _fake_env(env)
+
+ expect = truth.expect(fake_env)
+ subject = expect.that_target(target).label()
+
+ subject.equals("//tests:truth_tests_helper")
+ _assert_no_failures(fake_env, env = env)
+
+ subject.equals(Label("//tests:truth_tests_helper"))
+ _assert_no_failures(fake_env, env = env)
+
+ subject.equals("//nope")
+ _assert_failure(
+ fake_env,
+ ["expected: " + str(Label("//nope")), "actual:", "_helper"],
+ env = env,
+ )
+
+ subject = subjects.label(Label("//some/pkg:label"), expect.meta)
+ subject.is_in(["//foo:bar", "//some/pkg:label"])
+ _assert_no_failures(fake_env, msg = "is_in with matched str values", env = env)
+ subject.is_in([Label("//bar:baz"), Label("//some/pkg:label")])
+ _assert_no_failures(fake_env, msg = "is_in with matched label values", env = env)
+ subject.is_in(["//not:there", Label("//other:value")])
+ _assert_failure(
+ fake_env,
+ [
+ "expected any of:",
+ "//not:there",
+ "//other:value",
+ "actual: " + str(Label("//some/pkg:label")),
+ ],
+ msg = "check is_in fails",
+ env = env,
+ )
+
+ _end(env, fake_env)
+
+_suite.append(label_subject_test)
+
+def matchers_contains_test(name):
+ analysis_test(name, impl = _matchers_contains_test, target = "truth_tests_helper")
+
+def _matchers_contains_test(env, _target):
+ fake_env = _fake_env(env)
+ ut_asserts.true(env, matching.contains("x").match("YYYxZZZ"))
+ ut_asserts.false(env, matching.contains("x").match("zzzzz"))
+ _end(env, fake_env)
+
+_suite.append(matchers_contains_test)
+
+def matchers_str_matchers_test(name):
+ analysis_test(name, impl = _matchers_str_matchers_test, target = "truth_tests_helper")
+
+def _matchers_str_matchers_test(env, _target):
+ fake_env = _fake_env(env)
+
+ ut_asserts.true(env, matching.str_matches("f*b").match("foobar"))
+ ut_asserts.false(env, matching.str_matches("f*b").match("nope"))
+
+ ut_asserts.true(env, matching.str_endswith("123").match("abc123"))
+ ut_asserts.false(env, matching.str_endswith("123").match("123xxx"))
+
+ ut_asserts.true(env, matching.str_startswith("true").match("truechew"))
+ ut_asserts.false(env, matching.str_startswith("buck").match("notbuck"))
+ _end(env, fake_env)
+
+_suite.append(matchers_str_matchers_test)
+
+def matchers_is_in_test(name):
+ analysis_test(name, impl = _matchers_is_in_test, target = "truth_tests_helper")
+
+def _matchers_is_in_test(env, _target):
+ fake_env = _fake_env(env)
+ ut_asserts.true(env, matching.is_in(["a", "b"]).match("a"))
+ ut_asserts.false(env, matching.is_in(["x", "y"]).match("z"))
+ _end(env, fake_env)
+
+_suite.append(matchers_is_in_test)
+
+def runfiles_subject_test(name):
+ analysis_test(name, impl = _runfiles_subject_test, target = "truth_tests_helper")
+
+def _runfiles_subject_test(env, target):
+ fake_env = _fake_env(env)
+
+ subject = truth.expect(fake_env).that_target(target).runfiles()
+ subject.contains("{workspace}/{package}/default_runfile1.txt")
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains("does-not-exist")
+ _assert_failure(
+ fake_env,
+ [
+ "expected to contain: does-not-exist",
+ "actual default runfiles:",
+ "default_runfile1.txt",
+ "target: ".format(target.label),
+ ],
+ env = env,
+ msg = "check contains",
+ )
+
+ subject.contains_none_of(["{workspace}/{package}/not-there.txt"])
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains_none_of(["{workspace}/{package}/default_runfile1.txt"])
+ _assert_failure(
+ fake_env,
+ [
+ "expected not to contain any of",
+ "default_runfile1.txt",
+ env.ctx.workspace_name,
+ ],
+ env = env,
+ msg = "check contains none of",
+ )
+
+ subject.contains_exactly([
+ "{workspace}/{package}/default_runfile1.txt",
+ "{workspace}/{package}/truth_tests_helper.txt",
+ ])
+ _assert_no_failures(fake_env, env = env)
+ subject.contains_exactly([
+ "{workspace}/{package}/not-there.txt",
+ ])
+ _assert_failure(
+ fake_env,
+ [
+ "1 missing",
+ "not-there.txt",
+ env.ctx.workspace_name,
+ ],
+ env = env,
+ msg = "check contains_exactly fails",
+ )
+
+ subject.contains_at_least([
+ "{workspace}/{package}/default_runfile1.txt",
+ ])
+ _assert_no_failures(fake_env, env = env)
+ subject.contains_at_least([
+ "not-there.txt",
+ ])
+ _assert_failure(
+ fake_env,
+ [
+ "1 expected paths missing",
+ "not-there.txt",
+ env.ctx.workspace_name,
+ ],
+ env = env,
+ msg = "check contains_at_least fails",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(runfiles_subject_test)
+
+def str_subject_test(name):
+ analysis_test(name, impl = _str_subject_test, target = "truth_tests_helper")
+
+def _str_subject_test(env, _target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_str("foobar")
+
+ subject.contains("ob")
+ _assert_no_failures(fake_env, env = env)
+
+ subject.contains("nope")
+ _assert_failure(
+ fake_env,
+ ["expected to contain: nope", "actual: foobar"],
+ env = env,
+ msg = "check contains",
+ )
+
+ subject.equals("foobar")
+ _assert_no_failures(fake_env, env = env)
+
+ subject.equals("not foobar")
+ _assert_failure(
+ fake_env,
+ ["expected: not foobar", "actual: foobar"],
+ env = env,
+ msg = "check equals",
+ )
+
+ result = subject.split("b")
+ ut_asserts.true(env, result.actual == ["foo", "ar"], "incorrectly split")
+
+ subject.not_equals("foobar")
+ _assert_failure(
+ fake_env,
+ ["expected not to be: foobar", "actual: foobar"],
+ env = env,
+ msg = "check not_equals with equal value",
+ )
+ subject.not_equals(47)
+ _assert_failure(
+ fake_env,
+ ["expected not to be: 47 (type: int)", "actual: foobar (type: string)"],
+ env = env,
+ msg = "check not_equals with different type",
+ )
+ subject.not_equals("not-foobar")
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check not_equals with unequal value of same type",
+ )
+
+ subject.is_in(["xxx", "yyy", "zzz"])
+ _assert_failure(
+ fake_env,
+ ["expected any of:", "xxx", "yyy", "zzz", "actual: foobar"],
+ env = env,
+ msg = "check is_in with non-matching values",
+ )
+ subject.is_in(["foobar", "y", "z"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check is_in with matching values",
+ )
+ _end(env, fake_env)
+
+_suite.append(str_subject_test)
+
+def target_subject_test(name):
+ analysis_test(name, impl = _target_subject_test, target = "truth_tests_helper") #TODO also file target
+
+def _target_subject_test(env, target):
+ fake_env = _fake_env(env)
+ subject = truth.expect(fake_env).that_target(target)
+
+ # First a static string, no formatting parameters
+ result = subject.action_generating("{package}/default_runfile1.txt")
+ ut_asserts.true(env, result != None, msg = "action_generating gave None")
+
+ # Now try it with formatting parameters
+ result = subject.action_generating("{package}/{name}.txt")
+ ut_asserts.true(env, result != None, msg = "action_generating gave None")
+
+ result = subject.label()
+ ut_asserts.true(env, result != None, msg = "label gave None")
+
+ subject = truth.expect(fake_env).that_target(target)
+
+ tags = subject.tags()
+ ut_asserts.true(env, tags != None, msg = "tags gave None")
+
+ tags.contains_exactly(["tag1", "tag2"])
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check TargetSubject.tags()",
+ )
+
+ attr_subject = subject.attr("testonly")
+ ut_asserts.true(env, attr_subject != None, msg = "attr(testonly) gave None")
+
+ custom_subject = subject.attr(
+ "testonly",
+ factory = lambda v, meta: struct(custom = True),
+ )
+ ut_asserts.true(
+ env,
+ custom_subject.custom == True,
+ msg = "attr() with custom factory gave wrong value",
+ )
+
+ output_group_subject = subject.output_group("some_group")
+ output_group_subject.contains("{package}/output_group_file.txt")
+ _assert_no_failures(
+ fake_env,
+ env = env,
+ msg = "check TargetSubject.output_group()",
+ )
+
+ _end(env, fake_env)
+
+_suite.append(target_subject_test)
+
+def run_environment_info_subject_test(name):
+ analysis_test(name, impl = _run_environment_info_subject_test, target = "truth_tests_helper")
+
+def _run_environment_info_subject_test(env, target):
+ fake_env = _fake_env(env)
+
+ subject = truth.expect(fake_env).that_target(target).provider(
+ RunEnvironmentInfo,
+ )
+
+ subject.environment().contains_exactly({
+ "EKEY1": "EVALUE1",
+ "EKEY2": "EVALUE2",
+ })
+ _assert_no_failures(fake_env, env = env)
+
+ subject.inherited_environment().contains_exactly(["INHERIT1", "INHERIT2"])
+ _assert_no_failures(fake_env, env = env)
+
+ _end(env, fake_env)
+
+_suite.append(run_environment_info_subject_test)
+
+def _add_failure_works_test(name):
+ analysis_test(
+ name,
+ impl = _add_failure_works_test_impl,
+ target = "truth_tests_noop",
+ expect_failure = True,
+ )
+
+def _add_failure_works_test_impl(env, target):
+ """Test that the real add_failure() codepath works.
+
+ All the other tests mock out the fail() call, this is the one test that doesn't.
+ """
+ _ = target # @unused
+
+ # NOTE: this prints a spurious message.
+ env.expect.meta.add_failure("FAKE PROBLEM", "FAKE ACTUAL")
+
+ failures = list(env._failures)
+ env._failures.clear()
+
+ if len(failures) != 1:
+ env.fail("Expected len(failures) == 1, got " + str(len(failures)))
+ else:
+ failure = failures[0]
+ if "FAKE PROBLEM" not in failure:
+ env.fail("Expected string 'FAKE PROBLEM' not found in failure message")
+ if "FAKE ACTUAL" not in failure:
+ env.fail("Expected string 'FAKE ACTUAL' not found in failure message")
+
+_suite.append(_add_failure_works_test)
+
+def _assert_no_failures(fake_env, *, env, msg = ""):
+ fail_lines = [
+ "expected no failures, but found failures",
+ msg,
+ "===== FAILURE MESSAGES =====",
+ ]
+ fail_lines.extend(fake_env.failures)
+ fail_lines.append("===== END FAILURE MESSAGES ====")
+ fail_msg = "\n".join(fail_lines)
+ ut_asserts.true(env, len(fake_env.failures) == 0, msg = fail_msg)
+ fake_env.reset()
+
+def _assert_failure(fake_env, expected_strs, *, env, msg = ""):
+ if len(fake_env.failures) != 1:
+ env.fail("expected exactly 1 failure, but found {}".format(len(fake_env.failures)))
+
+ if len(fake_env.failures) > 0:
+ failure = fake_env.failures[0]
+ for expected in expected_strs:
+ if expected not in failure:
+ env.fail((
+ "\nFailure message incorrect:\n{}\n" +
+ "===== EXPECTED ERROR SUBSTRING =====\n{}\n" +
+ "===== END EXPECTED ERROR SUBSTRING =====\n" +
+ "===== ACTUAL FAILURE MESSAGE =====\n{}\n" +
+ "===== END ACTUAL FAILURE MESSAGE ====="
+ ).format(
+ msg,
+ expected,
+ failure,
+ ))
+
+ fake_env.reset()
+
+def _test_helper_impl(ctx):
+ action_output = ctx.actions.declare_file("action.txt")
+ ctx.actions.run(
+ outputs = [action_output],
+ executable = ctx.executable.tool,
+ arguments = [
+ "arg1",
+ "--boolflag",
+ "--arg1flag",
+ "arg1value",
+ "--arg2flag=arg2value",
+ ],
+ mnemonic = "Action1",
+ )
+ if _IS_BAZEL_6_OR_HIGHER:
+ exec_info_bazel_6_kwargs = {"exec_group": "THE_EXEC_GROUP"}
+ else:
+ exec_info_bazel_6_kwargs = {}
+
+ return [
+ DefaultInfo(
+ default_runfiles = ctx.runfiles(
+ files = [
+ _empty_file(ctx, "default_runfile1.txt"),
+ _empty_file(ctx, ctx.label.name + ".txt"),
+ ],
+ ),
+ ),
+ testing.TestEnvironment(
+ environment = {"EKEY1": "EVALUE1", "EKEY2": "EVALUE2"},
+ inherited_environment = ["INHERIT1", "INHERIT2"],
+ ),
+ testing.ExecutionInfo({"EIKEY1": "EIVALUE1"}, **exec_info_bazel_6_kwargs),
+ OutputGroupInfo(
+ some_group = depset([_empty_file(ctx, "output_group_file.txt")]),
+ ),
+ ]
+
+test_helper = rule(
+ implementation = _test_helper_impl,
+ attrs = {
+ "tool": attr.label(
+ default = ":truth_tests_noop",
+ executable = True,
+ cfg = "exec",
+ ),
+ },
+)
+
+def _empty_file(ctx, name):
+ file = ctx.actions.declare_file(name)
+ ctx.actions.write(file, content = "")
+ return file
+
+def _noop_binary_impl(ctx):
+ return DefaultInfo(executable = _empty_file(ctx, ctx.label.name))
+
+noop_binary = rule(
+ implementation = _noop_binary_impl,
+ executable = True,
+)
+
+def truth_test_suite(name):
+ # Unit tests can't directly create File objects, so we have a generic
+ # collection of files they can put in custom attributes to use.
+ native.filegroup(
+ name = "truth_tests_data_files",
+ srcs = native.glob(["testdata/**"]),
+ )
+ test_helper(
+ name = "truth_tests_helper",
+ tags = ["tag1", "tag2"],
+ )
+ noop_binary(name = "truth_tests_noop")
+
+ test_suite(
+ name = name,
+ tests = _suite,
+ )