From 7cf9d434da6c02c19b5d8502ef4e820ee0f97593 Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Tue, 13 Feb 2024 15:18:56 +0000 Subject: [PATCH 1/6] first pass --- .github/ISSUE_TEMPLATE/feature_request.yml | 1 - .github/workflows/ci.yml | 39 - .gitignore | 12 - README.md | 21 +- build.sh | 10 - cedar-dafny-java-wrapper/README.md | 4 - cedar-dafny-java-wrapper/build.gradle | 120 -- .../gradle/wrapper/gradle-wrapper.jar | Bin 62076 -> 0 bytes .../gradle/wrapper/gradle-wrapper.properties | 6 - cedar-dafny-java-wrapper/gradlew | 245 --- cedar-dafny-java-wrapper/gradlew.bat | 92 - cedar-dafny-java-wrapper/settings.gradle | 4 - .../DafnyUtils.java | 105 -- .../DefinitionalEngine.java | 111 -- .../DefinitionalValidator.java | 86 - .../CedarDefinitionalImplementation/Main.java | 30 - .../log/LogTag.java | 45 - .../log/Logger.java | 59 - .../log/Timer.java | 42 - .../package-info.java | 21 - .../DefinitionalEngineTest.java | 32 - cedar-dafny/.config/dotnet-tools.json | 18 - cedar-dafny/.vscode/settings.json | 6 - cedar-dafny/Makefile | 86 - cedar-dafny/README.md | 33 - cedar-dafny/def/all.dfy | 27 - cedar-dafny/def/base.dfy | 100 -- cedar-dafny/def/core.dfy | 291 ---- cedar-dafny/def/engine.dfy | 284 --- cedar-dafny/def/ext.dfy | 57 - cedar-dafny/def/ext/decimal.dfy | 164 -- cedar-dafny/def/ext/fun.dfy | 87 - cedar-dafny/def/ext/ipaddr.dfy | 566 ------ cedar-dafny/def/ext/parser.dfy | 124 -- cedar-dafny/def/std.dfy | 73 - cedar-dafny/def/templates.dfy | 238 --- cedar-dafny/def/util.dfy | 233 --- cedar-dafny/def/wildcard.dfy | 145 -- cedar-dafny/difftest/helpers.dfy | 570 ------ cedar-dafny/difftest/main.dfy | 686 -------- cedar-dafny/test/decimal.dfy | 50 - cedar-dafny/test/ipaddr.dfy | 126 -- cedar-dafny/thm/basic.dfy | 58 - cedar-dafny/thm/eval/basic.dfy | 222 --- cedar-dafny/thm/pslicing.dfy | 207 --- cedar-dafny/thm/slicing.dfy | 60 - cedar-dafny/validation/all.dfy | 20 - cedar-dafny/validation/ext.dfy | 32 - cedar-dafny/validation/ext/decimal.dfy | 52 - cedar-dafny/validation/ext/ipaddr.dfy | 53 - cedar-dafny/validation/subtyping.dfy | 366 ---- cedar-dafny/validation/thm/base.dfy | 134 -- cedar-dafny/validation/thm/model.dfy | 1212 ------------- cedar-dafny/validation/thm/soundness.dfy | 1523 ----------------- .../validation/thm/strict_inf_strict.dfy | 173 -- .../validation/thm/strict_soundness.dfy | 393 ----- cedar-dafny/validation/thm/toplevel.dfy | 114 -- cedar-dafny/validation/typechecker.dfy | 718 -------- cedar-dafny/validation/types.dfy | 167 -- cedar-dafny/validation/util.dfy | 68 - cedar-dafny/validation/validator.dfy | 129 -- cedar-drt/README.md | 4 +- cedar-drt/fuzz/Cargo.toml | 50 +- cedar-drt/fuzz/fuzz_targets/abac-lean.rs | 26 - .../fuzz_targets/abac-type-directed-lean.rs | 26 - .../fuzz/fuzz_targets/abac-type-directed.rs | 157 +- cedar-drt/fuzz/fuzz_targets/abac.rs | 141 +- cedar-drt/fuzz/fuzz_targets/abac_shared.rs | 156 -- .../fuzz_targets/abac_type_directed_shared.rs | 172 -- .../fuzz_targets/eval-type-directed-lean.rs | 26 - .../fuzz/fuzz_targets/eval-type-directed.rs | 106 +- .../fuzz_targets/eval_type_directed_shared.rs | 121 -- .../fuzz/fuzz_targets/rbac-authorizer-lean.rs | 28 - .../fuzz/fuzz_targets/rbac-authorizer.rs | 123 +- cedar-drt/fuzz/fuzz_targets/rbac-lean.rs | 26 - cedar-drt/fuzz/fuzz_targets/rbac.rs | 195 ++- .../fuzz_targets/rbac_authorizer_shared.rs | 138 -- cedar-drt/fuzz/fuzz_targets/rbac_shared.rs | 210 --- ...trict-validation-drt-type-directed-lean.rs | 26 - .../strict-validation-drt-type-directed.rs | 26 - .../strict_validation_drt_shared.rs | 86 - .../validation-drt-type-directed.rs | 19 +- cedar-drt/fuzz/fuzz_targets/validation-drt.rs | 94 - cedar-drt/set_env_vars.sh | 27 - cedar-drt/src/dafny_java_impl.rs | 449 ----- cedar-drt/src/lib.rs | 2 - cedar-drt/tests/integration_tests.rs | 7 - 87 files changed, 707 insertions(+), 12484 deletions(-) delete mode 100644 cedar-dafny-java-wrapper/README.md delete mode 100644 cedar-dafny-java-wrapper/build.gradle delete mode 100644 cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.jar delete mode 100644 cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.properties delete mode 100755 cedar-dafny-java-wrapper/gradlew delete mode 100644 cedar-dafny-java-wrapper/gradlew.bat delete mode 100644 cedar-dafny-java-wrapper/settings.gradle delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DafnyUtils.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalEngine.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalValidator.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/Main.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/LogTag.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Logger.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Timer.java delete mode 100644 cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/package-info.java delete mode 100644 cedar-dafny-java-wrapper/src/test/java/com/CedarDefinitionalImplementation/DefinitionalEngineTest.java delete mode 100644 cedar-dafny/.config/dotnet-tools.json delete mode 100644 cedar-dafny/.vscode/settings.json delete mode 100644 cedar-dafny/Makefile delete mode 100644 cedar-dafny/README.md delete mode 100644 cedar-dafny/def/all.dfy delete mode 100644 cedar-dafny/def/base.dfy delete mode 100644 cedar-dafny/def/core.dfy delete mode 100644 cedar-dafny/def/engine.dfy delete mode 100644 cedar-dafny/def/ext.dfy delete mode 100644 cedar-dafny/def/ext/decimal.dfy delete mode 100644 cedar-dafny/def/ext/fun.dfy delete mode 100644 cedar-dafny/def/ext/ipaddr.dfy delete mode 100644 cedar-dafny/def/ext/parser.dfy delete mode 100644 cedar-dafny/def/std.dfy delete mode 100644 cedar-dafny/def/templates.dfy delete mode 100644 cedar-dafny/def/util.dfy delete mode 100644 cedar-dafny/def/wildcard.dfy delete mode 100644 cedar-dafny/difftest/helpers.dfy delete mode 100644 cedar-dafny/difftest/main.dfy delete mode 100644 cedar-dafny/test/decimal.dfy delete mode 100644 cedar-dafny/test/ipaddr.dfy delete mode 100644 cedar-dafny/thm/basic.dfy delete mode 100644 cedar-dafny/thm/eval/basic.dfy delete mode 100644 cedar-dafny/thm/pslicing.dfy delete mode 100644 cedar-dafny/thm/slicing.dfy delete mode 100644 cedar-dafny/validation/all.dfy delete mode 100644 cedar-dafny/validation/ext.dfy delete mode 100644 cedar-dafny/validation/ext/decimal.dfy delete mode 100644 cedar-dafny/validation/ext/ipaddr.dfy delete mode 100644 cedar-dafny/validation/subtyping.dfy delete mode 100644 cedar-dafny/validation/thm/base.dfy delete mode 100644 cedar-dafny/validation/thm/model.dfy delete mode 100644 cedar-dafny/validation/thm/soundness.dfy delete mode 100644 cedar-dafny/validation/thm/strict_inf_strict.dfy delete mode 100644 cedar-dafny/validation/thm/strict_soundness.dfy delete mode 100644 cedar-dafny/validation/thm/toplevel.dfy delete mode 100644 cedar-dafny/validation/typechecker.dfy delete mode 100644 cedar-dafny/validation/types.dfy delete mode 100644 cedar-dafny/validation/util.dfy delete mode 100644 cedar-dafny/validation/validator.dfy delete mode 100644 cedar-drt/fuzz/fuzz_targets/abac-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/abac-type-directed-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/abac_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/abac_type_directed_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/eval-type-directed-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/eval_type_directed_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/rbac-authorizer-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/rbac-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/rbac_authorizer_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/rbac_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed-lean.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/strict_validation_drt_shared.rs delete mode 100644 cedar-drt/fuzz/fuzz_targets/validation-drt.rs delete mode 100644 cedar-drt/src/dafny_java_impl.rs diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 9df8a5b6b..a06db6a2a 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -14,7 +14,6 @@ body: description: What component of the Cedar specification does this feature relate to? multiple: true options: - - Dafny formalization - Lean formalization - DRT target(s) - DRT generator(s) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 79382ac4f..3a8c70a1a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,46 +3,7 @@ name: Build cedar-spec on: pull_request: -env: - dotnet-version: 6.0.x # SDK Version for building Dafny - jobs: - build_and_test_dafny: - name: Build and Test Dafny - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - toolchain: - - stable - steps: - - name: Checkout cedar-spec - uses: actions/checkout@v3 - - name: Install Z3 - shell: bash - run: | - wget -q --show-progress https://github.com/Z3Prover/z3/releases/download/z3-4.12.1/z3-4.12.1-x64-glibc-2.35.zip - unzip z3-4.12.1-x64-glibc-2.35.zip - echo "$(pwd)/z3-4.12.1-x64-glibc-2.35/bin" >> $GITHUB_PATH - - name: Audit Dafny files - shell: bash - working-directory: ./cedar-dafny - run : | - dotnet tool restore - sudo apt-get install ack - find . -path test -prune -o -name '*.dfy' | xargs -I {} sh -c "printf '%s: ' {} && (dotnet tool run dafny audit {} | ack --passthru --nocolor '0 findings')" - - name: verify dafny - working-directory: ./cedar-dafny - run: make GEN_STATS=1 verify - - name: test dafny - working-directory: ./cedar-dafny - run: make test - - name: log resource usage - working-directory: ./cedar-dafny - run: | - dotnet tool restore - dotnet tool run dafny-reportgenerator summarize-csv-results --max-resource-count 10000000 . || true - build_and_test_lean: name: Build and Test Lean runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 1b1cde849..ab6989efb 100644 --- a/.gitignore +++ b/.gitignore @@ -11,18 +11,6 @@ # Don't check IntelliJ module files *.iml - -# cedar-dafny build artifacts -cedar-dafny/.dotnet -cedar-dafny/.local -cedar-dafny/.nuget -cedar-dafny/build -cedar-dafny/TestResults - -# cedar-dafny-java-wrapper build artifacts -cedar-dafny-java-wrapper/build -cedar-dafny-java-wrapper/.gradle - # cedar-drt build artifacts cedar-drt/target cedar-drt/Cargo.lock diff --git a/README.md b/README.md index e06511bc1..d71774a9b 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,16 @@ # Cedar Specification -This repository contains the Dafny formalization of Cedar and infrastructure for performing differential randomized testing (DRT) between the formalization and Rust production implementation available in [cedar](https://github.com/cedar-policy/cedar). +This repository contains the formalization of Cedar and infrastructure for performing differential randomized testing (DRT) between the formalization and Rust production implementation available in [cedar](https://github.com/cedar-policy/cedar). ## Repository Structure * [`cedar-lean`](./cedar-lean) contains the Lean formalization of, and proofs about, Cedar. -* [`cedar-dafny`](./cedar-dafny) contains the Dafny formalization of, and proofs about, Cedar. -* [`cedar-dafny-java-wrapper`](./cedar-dafny-java-wrapper) contains the Java interface for DRT. * [`cedar-drt`](./cedar-drt) contains code for fuzzing, property-based testing, and differential testing of Cedar. * [`cedar-policy-generators`](./cedar-policy-generators) contains code for generating schemas, entities, policies, and requests using the [arbitrary](https://docs.rs/arbitrary/latest/arbitrary/index.html#) crate. * `cedar` is a git submodule, pinned to the associated commit of [cedar](https://github.com/cedar-policy/cedar). ## Build -To build the Dafny formalization and proofs: - -* Install Dafny, following the instructions [here](https://github.com/dafny-lang/dafny/wiki/INSTALL). Our proofs expect Z3 version 4.12.1, so if you have another copy of Z3 installed locally, you may need to adjust your PATH. -* `cd cedar-dafny && make verify test` - To build the Lean formalization and proofs: * Install Lean, following the instructions [here](https://leanprover.github.io/lean4/doc/setup.html). @@ -25,7 +18,7 @@ To build the Lean formalization and proofs: To build the DRT framework: -* Install Dafny and Lean, following the instructions above. +* Install Lean, following the instructions above. * `./build.sh` Note that the build for DRT has only been tested on **Amazon Linux 2**. @@ -42,16 +35,6 @@ Available targets are described in the README in the `cedar-drt` directory. Additional commands available with `cargo fuzz help`. -## Checking Proof Stability - -You can measure the complexity of Dafny proofs using [dafny-reportgenerator](https://github.com/dafny-lang/dafny-reportgenerator/). -For example, the commands below check that all proofs have a [resource count](https://dafny.org/dafny/VerificationOptimization/VerificationOptimization#identifying-difficult-assertions) under 10M, which is our informal threshold for when a proof is "too expensive" and likely to break with future changes to Dafny and/or Z3. - -```bash -cd cedar-dafny && make verify GEN_STATS=1 -dotnet tool run dafny-reportgenerator summarize-csv-results --max-resource-count 10000000 . -``` - ## Security See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. diff --git a/build.sh b/build.sh index 54be91214..288f14327 100755 --- a/build.sh +++ b/build.sh @@ -10,16 +10,6 @@ cd cedar-drt source set_env_vars.sh cd .. -# Build the Dafny formalization and extract to Java code -cd cedar-dafny -make compile-difftest -cd .. - -# Build the Dafny Java wrapper -cd cedar-dafny-java-wrapper -./gradlew build dumpClasspath -cd .. - # Build the Lean formalization and extract to static C libraries cd cedar-lean lake build Cedar:static DiffTest:static Std:static diff --git a/cedar-dafny-java-wrapper/README.md b/cedar-dafny-java-wrapper/README.md deleted file mode 100644 index 47d3854f7..000000000 --- a/cedar-dafny-java-wrapper/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Cedar Dafny Java Wrapper - -This folder provides the Java interface for differential testing, used in [`cedar-drt`](../cedar-drt). -It relies on the Java code extracted from the Dafny formalization in [`cedar-dafny`](../cedar-dafny). diff --git a/cedar-dafny-java-wrapper/build.gradle b/cedar-dafny-java-wrapper/build.gradle deleted file mode 100644 index 96ef3ab32..000000000 --- a/cedar-dafny-java-wrapper/build.gradle +++ /dev/null @@ -1,120 +0,0 @@ -/* - Applies core Gradle plugins, which are ones built into Gradle itself. -*/ -plugins { - // Java for compile and unit test of Java source files. Read more at: - // https://docs.gradle.org/current/userguide/java_plugin.html - id 'java' - - // necessary to run the main() of the jar? - id 'application' - - // JaCoCo for coverage metrics and reports of Java source files. Read more at: - // https://docs.gradle.org/current/userguide/jacoco_plugin.html - id 'jacoco' -} - -/* - Applies community Gradle plugins, usually added as build-tools in Config. -*/ - -// SpotBugs for quality checks and reports of source files. Read more at: -// https://spotbugs.readthedocs.io/en/stable/gradle.html -//apply plugin: 'com.github.spotbugs' - -/* - Java plugin default project layout is 'src/main/java' and 'src/test/java'. - Uncomment below if you instead want to use 'src' and 'tst'. - -sourceSets.main.java.srcDirs = ['src'] -sourceSets.test.java.srcDirs = ['tst'] -*/ - -mainClassName = 'com.cedar_definitional_engine.Main' -jar { - manifest { - attributes( - /*'Class-Path': configurations.runtimeClasspath.files.collect { it.getName() }.join(' '),*/ - 'Main-Class': 'com.cedar_definitional_engine.Main' - ) - } -} - -/* - Configures the Checkstyle "checkstyle" plugin. Remove this and the plugin if - you want to skip these checks and report generation. -check.dependsOn verify -formatter { - formatterStyle = "AOSP" - includeTests = true -} -*/ - -/* - Configures the JaCoCo "jacoco" plugin. Remove this if you want to skip - these checks and report generation. - - Set minimum code coverage to fail build, where 0.01 = 1%. -check.dependsOn jacocoTestCoverageVerification -jacocoTestCoverageVerification { - violationRules { - rule { - limit { - minimum = 0.01 - } - } - } -} -*/ - -/* - Configures the SpotBugs "com.github.spotbugs" plugin. Remove this and the - plugin to skip these checks and report generation. -spotbugs { - ignoreFailures.set(false) -} -*/ -/* - The SpotBugs Gradle Plugin generates a task for each sourceSet generated by - Gradle Java Plugin. For instance, if you have two sourceSets main and test, - this plugin will generates two tasks: spotbugsMain and spotbugsTest. - Uncomment below if you want to skip checks for test code. - -spotbugsTest { - ignoreFailures = true -} -*/ - -repositories { - mavenCentral() -} - -/* - Resolve build, test, tool, and runtime dependencies. -*/ -dependencies { - implementation 'com.fasterxml.jackson.core:jackson-databind:2.12.7.1' - implementation files( - '../cedar-dafny/build/lib/CedarDafny-difftest.jar', - '../cedar-dafny/build/lib/DafnyRuntime.jar' - ) - // https://junit.org/junit5/docs/current/user-guide/#dependency-metadata-junit-jupiter: OK? - testImplementation 'org.junit.jupiter:junit-jupiter:5.8.2' -} - -/* - Specifies that JUnit Platform (a.k.a. JUnit 5) should be used to execute tests. - - For mixed JUnit 4 and 5 tests, add 'JUnit-4-12-migration = 5.x;' to - test-dependencies in Config. -*/ -test { - useJUnitPlatform() -} - -tasks.register('dumpClasspath') { - doLast { - buildDir.mkdirs() - new File(buildDir, "runtimeClasspath.txt").text = configurations.runtimeClasspath.asPath - } -} diff --git a/cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.jar b/cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index c1962a79e29d3e0ab67b14947c167a862655af9b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62076 zcmb5VV{~QRw)Y#`wrv{~+qP{x72B%VwzFc}c2cp;N~)5ZbDrJayPv(!dGEd-##*zr z)#n-$y^sH|_dchh3@8{H5D*j;5D<{i*8l5IFJ|DjL!e)upfGNX(kojugZ3I`oH1PvW`wFW_ske0j@lB9bX zO;2)`y+|!@X(fZ1<2n!Qx*)_^Ai@Cv-dF&(vnudG?0CsddG_&Wtae(n|K59ew)6St z#dj7_(Cfwzh$H$5M!$UDd8=4>IQsD3xV=lXUq($;(h*$0^yd+b{qq63f0r_de#!o_ zXDngc>zy`uor)4A^2M#U*DC~i+dc<)Tb1Tv&~Ev@oM)5iJ4Sn#8iRw16XXuV50BS7 zdBL5Mefch(&^{luE{*5qtCZk$oFr3RH=H!c3wGR=HJ(yKc_re_X9pD` zJ;uxPzUfVpgU>DSq?J;I@a+10l0ONXPcDkiYcihREt5~T5Gb}sT0+6Q;AWHl`S5dV>lv%-p9l#xNNy7ZCr%cyqHY%TZ8Q4 zbp&#ov1*$#grNG#1vgfFOLJCaNG@K|2!W&HSh@3@Y%T?3YI75bJp!VP*$*!< z;(ffNS_;@RJ`=c7yX04!u3JP*<8jeqLHVJu#WV&v6wA!OYJS4h<_}^QI&97-;=ojW zQ-1t)7wnxG*5I%U4)9$wlv5Fr;cIizft@&N+32O%B{R1POm$oap@&f| zh+5J{>U6ftv|vAeKGc|zC=kO(+l7_cLpV}-D#oUltScw})N>~JOZLU_0{Ka2e1evz z{^a*ZrLr+JUj;)K&u2CoCAXLC2=fVScI(m_p~0FmF>>&3DHziouln?;sxW`NB}cSX z8?IsJB)Z=aYRz!X=yJn$kyOWK%rCYf-YarNqKzmWu$ZvkP12b4qH zhS9Q>j<}(*frr?z<%9hl*i^#@*O2q(Z^CN)c2c z>1B~D;@YpG?G!Yk+*yn4vM4sO-_!&m6+`k|3zd;8DJnxsBYtI;W3We+FN@|tQ5EW= z!VU>jtim0Mw#iaT8t_<+qKIEB-WwE04lBd%Letbml9N!?SLrEG$nmn7&W(W`VB@5S zaY=sEw2}i@F_1P4OtEw?xj4@D6>_e=m=797#hg}f*l^`AB|Y0# z9=)o|%TZFCY$SzgSjS|8AI-%J4x}J)!IMxY3_KYze`_I=c1nmrk@E8c9?MVRu)7+Ue79|)rBX7tVB7U|w4*h(;Gi3D9le49B38`wuv zp7{4X^p+K4*$@gU(Tq3K1a#3SmYhvI42)GzG4f|u zwQFT1n_=n|jpi=70-yE9LA+d*T8u z`=VmmXJ_f6WmZveZPct$Cgu^~gFiyL>Lnpj*6ee>*0pz=t$IJ}+rE zsf@>jlcG%Wx;Cp5x)YSVvB1$yyY1l&o zvwX=D7k)Dn;ciX?Z)Pn8$flC8#m`nB&(8?RSdBvr?>T9?E$U3uIX7T?$v4dWCa46 z+&`ot8ZTEgp7G+c52oHJ8nw5}a^dwb_l%MOh(ebVj9>_koQP^$2B~eUfSbw9RY$_< z&DDWf2LW;b0ZDOaZ&2^i^g+5uTd;GwO(-bbo|P^;CNL-%?9mRmxEw~5&z=X^Rvbo^WJW=n_%*7974RY}JhFv46> zd}`2|qkd;89l}R;i~9T)V-Q%K)O=yfVKNM4Gbacc7AOd>#^&W&)Xx!Uy5!BHnp9kh z`a(7MO6+Ren#>R^D0K)1sE{Bv>}s6Rb9MT14u!(NpZOe-?4V=>qZ>}uS)!y~;jEUK z&!U7Fj&{WdgU#L0%bM}SYXRtM5z!6M+kgaMKt%3FkjWYh=#QUpt$XX1!*XkpSq-pl zhMe{muh#knk{9_V3%qdDcWDv}v)m4t9 zQhv{;} zc{}#V^N3H>9mFM8`i`0p+fN@GqX+kl|M94$BK3J-X`Hyj8r!#x6Vt(PXjn?N)qedP z=o1T^#?1^a{;bZ&x`U{f?}TMo8ToN zkHj5v|}r}wDEi7I@)Gj+S1aE-GdnLN+$hw!=DzglMaj#{qjXi_dwpr|HL(gcCXwGLEmi|{4&4#OZ4ChceA zKVd4K!D>_N=_X;{poT~4Q+!Le+ZV>=H7v1*l%w`|`Dx8{)McN@NDlQyln&N3@bFpV z_1w~O4EH3fF@IzJ9kDk@7@QctFq8FbkbaH7K$iX=bV~o#gfh?2JD6lZf(XP>~DACF)fGFt)X%-h1yY~MJU{nA5 ze2zxWMs{YdX3q5XU*9hOH0!_S24DOBA5usB+Ws$6{|AMe*joJ?RxfV}*7AKN9V*~J zK+OMcE@bTD>TG1*yc?*qGqjBN8mgg@h1cJLDv)0!WRPIkC` zZrWXrceVw;fB%3`6kq=a!pq|hFIsQ%ZSlo~)D z|64!aCnw-?>}AG|*iOl44KVf8@|joXi&|)1rB;EQWgm+iHfVbgllP$f!$Wf42%NO5b(j9Bw6L z;0dpUUK$5GX4QbMlTmLM_jJt!ur`_0~$b#BB7FL*%XFf<b__1o)Ao3rlobbN8-(T!1d-bR8D3S0@d zLI!*GMb5s~Q<&sjd}lBb8Nr0>PqE6_!3!2d(KAWFxa{hm`@u|a(%#i(#f8{BP2wbs zt+N_slWF4IF_O|{w`c~)Xvh&R{Au~CFmW#0+}MBd2~X}t9lz6*E7uAD`@EBDe$>7W zzPUkJx<`f$0VA$=>R57^(K^h86>09?>_@M(R4q($!Ck6GG@pnu-x*exAx1jOv|>KH zjNfG5pwm`E-=ydcb+3BJwuU;V&OS=6yM^4Jq{%AVqnTTLwV`AorIDD}T&jWr8pB&j28fVtk_y*JRP^t@l*($UZ z6(B^-PBNZ+z!p?+e8@$&jCv^EWLb$WO=}Scr$6SM*&~B95El~;W_0(Bvoha|uQ1T< zO$%_oLAwf1bW*rKWmlD+@CP&$ObiDy=nh1b2ejz%LO9937N{LDe7gle4i!{}I$;&Y zkexJ9Ybr+lrCmKWg&}p=`2&Gf10orS?4$VrzWidT=*6{KzOGMo?KI0>GL0{iFWc;C z+LPq%VH5g}6V@-tg2m{C!-$fapJ9y}c$U}aUmS{9#0CM*8pC|sfer!)nG7Ji>mfRh z+~6CxNb>6eWKMHBz-w2{mLLwdA7dA-qfTu^A2yG1+9s5k zcF=le_UPYG&q!t5Zd_*E_P3Cf5T6821bO`daa`;DODm8Ih8k89=RN;-asHIigj`n=ux>*f!OC5#;X5i;Q z+V!GUy0|&Y_*8k_QRUA8$lHP;GJ3UUD08P|ALknng|YY13)}!!HW@0z$q+kCH%xet zlWf@BXQ=b=4}QO5eNnN~CzWBbHGUivG=`&eWK}beuV*;?zt=P#pM*eTuy3 zP}c#}AXJ0OIaqXji78l;YrP4sQe#^pOqwZUiiN6^0RCd#D271XCbEKpk`HI0IsN^s zES7YtU#7=8gTn#lkrc~6)R9u&SX6*Jk4GFX7){E)WE?pT8a-%6P+zS6o&A#ml{$WX zABFz#i7`DDlo{34)oo?bOa4Z_lNH>n;f0nbt$JfAl~;4QY@}NH!X|A$KgMmEsd^&Y zt;pi=>AID7ROQfr;MsMtClr5b0)xo|fwhc=qk33wQ|}$@?{}qXcmECh>#kUQ-If0$ zseb{Wf4VFGLNc*Rax#P8ko*=`MwaR-DQ8L8V8r=2N{Gaips2_^cS|oC$+yScRo*uF zUO|5=?Q?{p$inDpx*t#Xyo6=s?bbN}y>NNVxj9NZCdtwRI70jxvm3!5R7yiWjREEd zDUjrsZhS|P&|Ng5r+f^kA6BNN#|Se}_GF>P6sy^e8kBrgMv3#vk%m}9PCwUWJg-AD zFnZ=}lbi*mN-AOm zCs)r=*YQAA!`e#1N>aHF=bb*z*hXH#Wl$z^o}x##ZrUc=kh%OHWhp=7;?8%Xj||@V?1c ziWoaC$^&04;A|T)!Zd9sUzE&$ODyJaBpvqsw19Uiuq{i#VK1!htkdRWBnb z`{rat=nHArT%^R>u#CjjCkw-7%g53|&7z-;X+ewb?OLWiV|#nuc8mp*LuGSi3IP<<*Wyo9GKV7l0Noa4Jr0g3p_$ z*R9{qn=?IXC#WU>48-k5V2Oc_>P;4_)J@bo1|pf=%Rcbgk=5m)CJZ`caHBTm3%!Z9 z_?7LHr_BXbKKr=JD!%?KhwdYSdu8XxPoA{n8^%_lh5cjRHuCY9Zlpz8g+$f@bw@0V z+6DRMT9c|>1^3D|$Vzc(C?M~iZurGH2pXPT%F!JSaAMdO%!5o0uc&iqHx?ImcX6fI zCApkzc~OOnfzAd_+-DcMp&AOQxE_EsMqKM{%dRMI5`5CT&%mQO?-@F6tE*xL?aEGZ z8^wH@wRl`Izx4sDmU>}Ym{ybUm@F83qqZPD6nFm?t?(7>h*?`fw)L3t*l%*iw0Qu#?$5eq!Qc zpQvqgSxrd83NsdO@lL6#{%lsYXWen~d3p4fGBb7&5xqNYJ)yn84!e1PmPo7ChVd%4 zHUsV0Mh?VpzZD=A6%)Qrd~i7 z96*RPbid;BN{Wh?adeD_p8YU``kOrGkNox3D9~!K?w>#kFz!4lzOWR}puS(DmfjJD z`x0z|qB33*^0mZdM&6$|+T>fq>M%yoy(BEjuh9L0>{P&XJ3enGpoQRx`v6$txXt#c z0#N?b5%srj(4xmPvJxrlF3H%OMB!jvfy z;wx8RzU~lb?h_}@V=bh6p8PSb-dG|-T#A?`c&H2`_!u+uenIZe`6f~A7r)`9m8atC zt(b|6Eg#!Q*DfRU=Ix`#B_dK)nnJ_+>Q<1d7W)eynaVn`FNuN~%B;uO2}vXr5^zi2 z!ifIF5@Zlo0^h~8+ixFBGqtweFc`C~JkSq}&*a3C}L?b5Mh-bW=e)({F_g4O3 zb@SFTK3VD9QuFgFnK4Ve_pXc3{S$=+Z;;4+;*{H}Rc;845rP?DLK6G5Y-xdUKkA6E3Dz&5f{F^FjJQ(NSpZ8q-_!L3LL@H* zxbDF{gd^U3uD;)a)sJwAVi}7@%pRM&?5IaUH%+m{E)DlA_$IA1=&jr{KrhD5q&lTC zAa3c)A(K!{#nOvenH6XrR-y>*4M#DpTTOGQEO5Jr6kni9pDW`rvY*fs|ItV;CVITh z=`rxcH2nEJpkQ^(;1c^hfb8vGN;{{oR=qNyKtR1;J>CByul*+=`NydWnSWJR#I2lN zTvgnR|MBx*XFsfdA&;tr^dYaqRZp*2NwkAZE6kV@1f{76e56eUmGrZ>MDId)oqSWw z7d&r3qfazg+W2?bT}F)4jD6sWaw`_fXZGY&wnGm$FRPFL$HzVTH^MYBHWGCOk-89y zA+n+Q6EVSSCpgC~%uHfvyg@ufE^#u?JH?<73A}jj5iILz4Qqk5$+^U(SX(-qv5agK znUkfpke(KDn~dU0>gdKqjTkVk`0`9^0n_wzXO7R!0Thd@S;U`y)VVP&mOd-2 z(hT(|$=>4FY;CBY9#_lB$;|Wd$aOMT5O_3}DYXEHn&Jrc3`2JiB`b6X@EUOD zVl0S{ijm65@n^19T3l%>*;F(?3r3s?zY{thc4%AD30CeL_4{8x6&cN}zN3fE+x<9; zt2j1RRVy5j22-8U8a6$pyT+<`f+x2l$fd_{qEp_bfxfzu>ORJsXaJn4>U6oNJ#|~p z`*ZC&NPXl&=vq2{Ne79AkQncuxvbOG+28*2wU$R=GOmns3W@HE%^r)Fu%Utj=r9t` zd;SVOnA(=MXgnOzI2@3SGKHz8HN~Vpx&!Ea+Df~`*n@8O=0!b4m?7cE^K*~@fqv9q zF*uk#1@6Re_<^9eElgJD!nTA@K9C732tV~;B`hzZ321Ph=^BH?zXddiu{Du5*IPg} zqDM=QxjT!Rp|#Bkp$(mL)aar)f(dOAXUiw81pX0DC|Y4;>Vz>>DMshoips^8Frdv} zlTD=cKa48M>dR<>(YlLPOW%rokJZNF2gp8fwc8b2sN+i6&-pHr?$rj|uFgktK@jg~ zIFS(%=r|QJ=$kvm_~@n=ai1lA{7Z}i+zj&yzY+!t$iGUy|9jH#&oTNJ;JW-3n>DF+ z3aCOzqn|$X-Olu_p7brzn`uk1F*N4@=b=m;S_C?#hy{&NE#3HkATrg?enaVGT^$qIjvgc61y!T$9<1B@?_ibtDZ{G zeXInVr5?OD_nS_O|CK3|RzzMmu+8!#Zb8Ik;rkIAR%6?$pN@d<0dKD2c@k2quB%s( zQL^<_EM6ow8F6^wJN1QcPOm|ehA+dP(!>IX=Euz5qqIq}Y3;ibQtJnkDmZ8c8=Cf3 zu`mJ!Q6wI7EblC5RvP*@)j?}W=WxwCvF3*5Up_`3*a~z$`wHwCy)2risye=1mSp%p zu+tD6NAK3o@)4VBsM!@);qgsjgB$kkCZhaimHg&+k69~drbvRTacWKH;YCK(!rC?8 zP#cK5JPHSw;V;{Yji=55X~S+)%(8fuz}O>*F3)hR;STU`z6T1aM#Wd+FP(M5*@T1P z^06O;I20Sk!bxW<-O;E081KRdHZrtsGJflFRRFS zdi5w9OVDGSL3 zNrC7GVsGN=b;YH9jp8Z2$^!K@h=r-xV(aEH@#JicPy;A0k1>g1g^XeR`YV2HfmqXY zYbRwaxHvf}OlCAwHoVI&QBLr5R|THf?nAevV-=~V8;gCsX>jndvNOcFA+DI+zbh~# zZ7`qNk&w+_+Yp!}j;OYxIfx_{f0-ONc?mHCiCUak=>j>~>YR4#w# zuKz~UhT!L~GfW^CPqG8Lg)&Rc6y^{%3H7iLa%^l}cw_8UuG;8nn9)kbPGXS}p3!L_ zd#9~5CrH8xtUd?{d2y^PJg+z(xIfRU;`}^=OlehGN2=?}9yH$4Rag}*+AWotyxfCJ zHx=r7ZH>j2kV?%7WTtp+-HMa0)_*DBBmC{sd$)np&GEJ__kEd`xB5a2A z*J+yx>4o#ZxwA{;NjhU*1KT~=ZK~GAA;KZHDyBNTaWQ1+;tOFFthnD)DrCn`DjBZ% zk$N5B4^$`n^jNSOr=t(zi8TN4fpaccsb`zOPD~iY=UEK$0Y70bG{idLx@IL)7^(pL z{??Bnu=lDeguDrd%qW1)H)H`9otsOL-f4bSu};o9OXybo6J!Lek`a4ff>*O)BDT_g z<6@SrI|C9klY(>_PfA^qai7A_)VNE4c^ZjFcE$Isp>`e5fLc)rg@8Q_d^Uk24$2bn z9#}6kZ2ZxS9sI(RqT7?El2@B+($>eBQrNi_k#CDJ8D9}8$mmm z4oSKO^F$i+NG)-HE$O6s1--6EzJa?C{x=QgK&c=)b(Q9OVoAXYEEH20G|q$}Hue%~ zO3B^bF=t7t48sN zWh_zA`w~|){-!^g?6Mqf6ieV zFx~aPUOJGR=4{KsW7I?<=J2|lY`NTU=lt=%JE9H1vBpkcn=uq(q~=?iBt_-r(PLBM zP-0dxljJO>4Wq-;stY)CLB4q`-r*T$!K2o}?E-w_i>3_aEbA^MB7P5piwt1dI-6o!qWCy0 ztYy!x9arGTS?kabkkyv*yxvsPQ7Vx)twkS6z2T@kZ|kb8yjm+^$|sEBmvACeqbz)RmxkkDQX-A*K!YFziuhwb|ym>C$}U|J)4y z$(z#)GH%uV6{ec%Zy~AhK|+GtG8u@c884Nq%w`O^wv2#A(&xH@c5M`Vjk*SR_tJnq z0trB#aY)!EKW_}{#L3lph5ow=@|D5LzJYUFD6 z7XnUeo_V0DVSIKMFD_T0AqAO|#VFDc7c?c-Q%#u00F%!_TW1@JVnsfvm@_9HKWflBOUD~)RL``-!P;(bCON_4eVdduMO>?IrQ__*zE@7(OX zUtfH@AX*53&xJW*Pu9zcqxGiM>xol0I~QL5B%Toog3Jlenc^WbVgeBvV8C8AX^Vj& z^I}H})B=VboO%q1;aU5ACMh{yK4J;xlMc`jCnZR^!~LDs_MP&8;dd@4LDWw~*>#OT zeZHwdQWS!tt5MJQI~cw|Ka^b4c|qyd_ly(+Ql2m&AAw^ zQeSXDOOH!!mAgzAp0z)DD>6Xo``b6QwzUV@w%h}Yo>)a|xRi$jGuHQhJVA%>)PUvK zBQ!l0hq<3VZ*RnrDODP)>&iS^wf64C;MGqDvx>|p;35%6(u+IHoNbK z;Gb;TneFo*`zUKS6kwF*&b!U8e5m4YAo03a_e^!5BP42+r)LFhEy?_7U1IR<; z^0v|DhCYMSj<-;MtY%R@Fg;9Kky^pz_t2nJfKWfh5Eu@_l{^ph%1z{jkg5jQrkvD< z#vdK!nku*RrH~TdN~`wDs;d>XY1PH?O<4^U4lmA|wUW{Crrv#r%N>7k#{Gc44Fr|t z@UZP}Y-TrAmnEZ39A*@6;ccsR>)$A)S>$-Cj!=x$rz7IvjHIPM(TB+JFf{ehuIvY$ zsDAwREg*%|=>Hw$`us~RP&3{QJg%}RjJKS^mC_!U;E5u>`X`jW$}P`Mf}?7G7FX#{ zE(9u1SO;3q@ZhDL9O({-RD+SqqPX)`0l5IQu4q)49TUTkxR(czeT}4`WV~pV*KY&i zAl3~X%D2cPVD^B43*~&f%+Op)wl<&|D{;=SZwImydWL6@_RJjxP2g)s=dH)u9Npki zs~z9A+3fj0l?yu4N0^4aC5x)Osnm0qrhz@?nwG_`h(71P znbIewljU%T*cC=~NJy|)#hT+lx#^5MuDDnkaMb*Efw9eThXo|*WOQzJ*#3dmRWm@! zfuSc@#kY{Um^gBc^_Xdxnl!n&y&}R4yAbK&RMc+P^Ti;YIUh|C+K1|=Z^{nZ}}rxH*v{xR!i%qO~o zTr`WDE@k$M9o0r4YUFFeQO7xCu_Zgy)==;fCJ94M_rLAv&~NhfvcLWCoaGg2ao~3e zBG?Ms9B+efMkp}7BhmISGWmJsKI@a8b}4lLI48oWKY|8?zuuNc$lt5Npr+p7a#sWu zh!@2nnLBVJK!$S~>r2-pN||^w|fY`CT{TFnJy`B|e5;=+_v4l8O-fkN&UQbA4NKTyntd zqK{xEKh}U{NHoQUf!M=2(&w+eef77VtYr;xs%^cPfKLObyOV_9q<(%76-J%vR>w9!us-0c-~Y?_EVS%v!* z15s2s3eTs$Osz$JayyH|5nPAIPEX=U;r&p;K14G<1)bvn@?bM5kC{am|C5%hyxv}a z(DeSKI5ZfZ1*%dl8frIX2?);R^^~LuDOpNpk-2R8U1w92HmG1m&|j&J{EK=|p$;f9 z7Rs5|jr4r8k5El&qcuM+YRlKny%t+1CgqEWO>3;BSRZi(LA3U%Jm{@{y+A+w(gzA< z7dBq6a1sEWa4cD0W7=Ld9z0H7RI^Z7vl(bfA;72j?SWCo`#5mVC$l1Q2--%V)-uN* z9ha*s-AdfbDZ8R8*fpwjzx=WvOtmSzGFjC#X)hD%Caeo^OWjS(3h|d9_*U)l%{Ab8 zfv$yoP{OuUl@$(-sEVNt{*=qi5P=lpxWVuz2?I7Dc%BRc+NGNw+323^ z5BXGfS71oP^%apUo(Y#xkxE)y?>BFzEBZ}UBbr~R4$%b7h3iZu3S(|A;&HqBR{nK& z$;GApNnz=kNO^FL&nYcfpB7Qg;hGJPsCW44CbkG1@l9pn0`~oKy5S777uH)l{irK!ru|X+;4&0D;VE*Ii|<3P zUx#xUqvZT5kVQxsF#~MwKnv7;1pR^0;PW@$@T7I?s`_rD1EGUdSA5Q(C<>5SzE!vw z;{L&kKFM-MO>hy#-8z`sdVx})^(Dc-dw;k-h*9O2_YZw}|9^y-|8RQ`BWJUJL(Cer zP5Z@fNc>pTXABbTRY-B5*MphpZv6#i802giwV&SkFCR zGMETyUm(KJbh+&$8X*RB#+{surjr;8^REEt`2&Dubw3$mx>|~B5IKZJ`s_6fw zKAZx9&PwBqW1Oz0r0A4GtnZd7XTKViX2%kPfv+^X3|_}RrQ2e3l=KG_VyY`H?I5&CS+lAX5HbA%TD9u6&s#v!G> zzW9n4J%d5ye7x0y`*{KZvqyXUfMEE^ZIffzI=Hh|3J}^yx7eL=s+TPH(Q2GT-sJ~3 zI463C{(ag7-hS1ETtU;_&+49ABt5!A7CwLwe z=SoA8mYZIQeU;9txI=zcQVbuO%q@E)JI+6Q!3lMc=Gbj(ASg-{V27u>z2e8n;Nc*pf}AqKz1D>p9G#QA+7mqqrEjGfw+85Uyh!=tTFTv3|O z+)-kFe_8FF_EkTw!YzwK^Hi^_dV5x-Ob*UWmD-})qKj9@aE8g240nUh=g|j28^?v7 zHRTBo{0KGaWBbyX2+lx$wgXW{3aUab6Bhm1G1{jTC7ota*JM6t+qy)c5<@ zpc&(jVdTJf(q3xB=JotgF$X>cxh7k*(T`-V~AR+`%e?YOeALQ2Qud( zz35YizXt(aW3qndR}fTw1p()Ol4t!D1pitGNL95{SX4ywzh0SF;=!wf=?Q?_h6!f* zh7<+GFi)q|XBsvXZ^qVCY$LUa{5?!CgwY?EG;*)0ceFe&=A;!~o`ae}Z+6me#^sv- z1F6=WNd6>M(~ z+092z>?Clrcp)lYNQl9jN-JF6n&Y0mp7|I0dpPx+4*RRK+VQI~>en0Dc;Zfl+x z_e_b7s`t1_A`RP3$H}y7F9_na%D7EM+**G_Z0l_nwE+&d_kc35n$Fxkd4r=ltRZhh zr9zER8>j(EdV&Jgh(+i}ltESBK62m0nGH6tCBr90!4)-`HeBmz54p~QP#dsu%nb~W z7sS|(Iydi>C@6ZM(Us!jyIiszMkd)^u<1D+R@~O>HqZIW&kearPWmT>63%_t2B{_G zX{&a(gOYJx!Hq=!T$RZ&<8LDnxsmx9+TBL0gTk$|vz9O5GkK_Yx+55^R=2g!K}NJ3 zW?C;XQCHZl7H`K5^BF!Q5X2^Mj93&0l_O3Ea3!Ave|ixx+~bS@Iv18v2ctpSt4zO{ zp#7pj!AtDmti$T`e9{s^jf(ku&E|83JIJO5Qo9weT6g?@vX!{7)cNwymo1+u(YQ94 zopuz-L@|5=h8A!(g-MXgLJC0MA|CgQF8qlonnu#j z;uCeq9ny9QSD|p)9sp3ebgY3rk#y0DA(SHdh$DUm^?GI<>%e1?&}w(b zdip1;P2Z=1wM+$q=TgLP$}svd!vk+BZ@h<^4R=GS2+sri7Z*2f`9 z5_?i)xj?m#pSVchk-SR!2&uNhzEi+#5t1Z$o0PoLGz*pT64%+|Wa+rd5Z}60(j?X= z{NLjtgRb|W?CUADqOS@(*MA-l|E342NxRaxLTDqsOyfWWe%N(jjBh}G zm7WPel6jXijaTiNita+z(5GCO0NM=Melxud57PP^d_U## zbA;9iVi<@wr0DGB8=T9Ab#2K_#zi=$igyK48@;V|W`fg~7;+!q8)aCOo{HA@vpSy-4`^!ze6-~8|QE||hC{ICKllG9fbg_Y7v z$jn{00!ob3!@~-Z%!rSZ0JO#@>|3k10mLK0JRKP-Cc8UYFu>z93=Ab-r^oL2 zl`-&VBh#=-?{l1TatC;VweM^=M7-DUE>m+xO7Xi6vTEsReyLs8KJ+2GZ&rxw$d4IT zPXy6pu^4#e;;ZTsgmG+ZPx>piodegkx2n0}SM77+Y*j^~ICvp#2wj^BuqRY*&cjmL zcKp78aZt>e{3YBb4!J_2|K~A`lN=u&5j!byw`1itV(+Q_?RvV7&Z5XS1HF)L2v6ji z&kOEPmv+k_lSXb{$)of~(BkO^py&7oOzpjdG>vI1kcm_oPFHy38%D4&A4h_CSo#lX z2#oqMCTEP7UvUR3mwkPxbl8AMW(e{ARi@HCYLPSHE^L<1I}OgZD{I#YH#GKnpRmW3 z2jkz~Sa(D)f?V?$gNi?6)Y;Sm{&?~2p=0&BUl_(@hYeX8YjaRO=IqO7neK0RsSNdYjD zaw$g2sG(>JR=8Iz1SK4`*kqd_3-?;_BIcaaMd^}<@MYbYisWZm2C2|Np_l|8r9yM|JkUngSo@?wci(7&O9a z%|V(4C1c9pps0xxzPbXH=}QTxc2rr7fXk$9`a6TbWKPCz&p=VsB8^W96W=BsB|7bc zf(QR8&Ktj*iz)wK&mW`#V%4XTM&jWNnDF56O+2bo<3|NyUhQ%#OZE8$Uv2a@J>D%t zMVMiHh?es!Ex19q&6eC&L=XDU_BA&uR^^w>fpz2_`U87q_?N2y;!Z!bjoeKrzfC)} z?m^PM=(z{%n9K`p|7Bz$LuC7!>tFOuN74MFELm}OD9?%jpT>38J;=1Y-VWtZAscaI z_8jUZ#GwWz{JqvGEUmL?G#l5E=*m>`cY?m*XOc*yOCNtpuIGD+Z|kn4Xww=BLrNYS zGO=wQh}Gtr|7DGXLF%|`G>J~l{k^*{;S-Zhq|&HO7rC_r;o`gTB7)uMZ|WWIn@e0( zX$MccUMv3ABg^$%_lNrgU{EVi8O^UyGHPNRt%R!1#MQJn41aD|_93NsBQhP80yP<9 zG4(&0u7AtJJXLPcqzjv`S~5;Q|5TVGccN=Uzm}K{v)?f7W!230C<``9(64}D2raRU zAW5bp%}VEo{4Rko`bD%Ehf=0voW?-4Mk#d3_pXTF!-TyIt6U+({6OXWVAa;s-`Ta5 zTqx&8msH3+DLrVmQOTBOAj=uoxKYT3DS1^zBXM?1W+7gI!aQNPYfUl{3;PzS9*F7g zWJN8x?KjBDx^V&6iCY8o_gslO16=kh(|Gp)kz8qlQ`dzxQv;)V&t+B}wwdi~uBs4? zu~G|}y!`3;8#vIMUdyC7YEx6bb^1o}G!Jky4cN?BV9ejBfN<&!4M)L&lRKiuMS#3} z_B}Nkv+zzxhy{dYCW$oGC&J(Ty&7%=5B$sD0bkuPmj7g>|962`(Q{ZZMDv%YMuT^KweiRDvYTEop3IgFv#)(w>1 zSzH>J`q!LK)c(AK>&Ib)A{g`Fdykxqd`Yq@yB}E{gnQV$K!}RsgMGWqC3DKE(=!{}ekB3+(1?g}xF>^icEJbc z5bdxAPkW90atZT+&*7qoLqL#p=>t-(-lsnl2XMpZcYeW|o|a322&)yO_8p(&Sw{|b zn(tY$xn5yS$DD)UYS%sP?c|z>1dp!QUD)l;aW#`%qMtQJjE!s2z`+bTSZmLK7SvCR z=@I4|U^sCwZLQSfd*ACw9B@`1c1|&i^W_OD(570SDLK`MD0wTiR8|$7+%{cF&){$G zU~|$^Ed?TIxyw{1$e|D$050n8AjJvvOWhLtLHbSB|HIfjMp+gu>DraHZJRrdO53(= z+o-f{+qNog+qSLB%KY;5>Av6X(>-qYk3IIEwZ5~6a+P9lMpC^ z8CJ0q>rEpjlsxCvJm=kms@tlN4+sv}He`xkr`S}bGih4t`+#VEIt{1veE z{ZLtb_pSbcfcYPf4=T1+|BtR!x5|X#x2TZEEkUB6kslKAE;x)*0x~ES0kl4Dex4e- zT2P~|lT^vUnMp{7e4OExfxak0EE$Hcw;D$ehTV4a6hqxru0$|Mo``>*a5=1Ym0u>BDJKO|=TEWJ5jZu!W}t$Kv{1!q`4Sn7 zrxRQOt>^6}Iz@%gA3&=5r;Lp=N@WKW;>O!eGIj#J;&>+3va^~GXRHCY2}*g#9ULab zitCJt-OV0*D_Q3Q`p1_+GbPxRtV_T`jyATjax<;zZ?;S+VD}a(aN7j?4<~>BkHK7bO8_Vqfdq1#W&p~2H z&w-gJB4?;Q&pG9%8P(oOGZ#`!m>qAeE)SeL*t8KL|1oe;#+uOK6w&PqSDhw^9-&Fa zuEzbi!!7|YhlWhqmiUm!muO(F8-F7|r#5lU8d0+=;<`{$mS=AnAo4Zb^{%p}*gZL! zeE!#-zg0FWsSnablw!9$<&K(#z!XOW z;*BVx2_+H#`1b@>RtY@=KqD)63brP+`Cm$L1@ArAddNS1oP8UE$p05R=bvZoYz+^6 z<)!v7pRvi!u_-V?!d}XWQR1~0q(H3{d^4JGa=W#^Z<@TvI6J*lk!A zZ*UIKj*hyO#5akL*Bx6iPKvR3_2-^2mw|Rh-3O_SGN3V9GRo52Q;JnW{iTGqb9W99 z7_+F(Op6>~3P-?Q8LTZ-lwB}xh*@J2Ni5HhUI3`ct|*W#pqb>8i*TXOLn~GlYECIj zhLaa_rBH|1jgi(S%~31Xm{NB!30*mcsF_wgOY2N0XjG_`kFB+uQuJbBm3bIM$qhUyE&$_u$gb zpK_r{99svp3N3p4yHHS=#csK@j9ql*>j0X=+cD2dj<^Wiu@i>c_v zK|ovi7}@4sVB#bzq$n3`EgI?~xDmkCW=2&^tD5RuaSNHf@Y!5C(Is$hd6cuyoK|;d zO}w2AqJPS`Zq+(mc*^%6qe>1d&(n&~()6-ZATASNPsJ|XnxelLkz8r1x@c2XS)R*H(_B=IN>JeQUR;T=i3<^~;$<+8W*eRKWGt7c#>N`@;#!`kZ!P!&{9J1>_g8Zj zXEXxmA=^{8A|3=Au+LfxIWra)4p<}1LYd_$1KI0r3o~s1N(x#QYgvL4#2{z8`=mXy zQD#iJ0itk1d@Iy*DtXw)Wz!H@G2St?QZFz zVPkM%H8Cd2EZS?teQN*Ecnu|PrC!a7F_XX}AzfZl3fXfhBtc2-)zaC2eKx*{XdM~QUo4IwcGgVdW69 z1UrSAqqMALf^2|(I}hgo38l|Ur=-SC*^Bo5ej`hb;C$@3%NFxx5{cxXUMnTyaX{>~ zjL~xm;*`d08bG_K3-E+TI>#oqIN2=An(C6aJ*MrKlxj?-;G zICL$hi>`F%{xd%V{$NhisHSL~R>f!F7AWR&7b~TgLu6!3s#~8|VKIX)KtqTH5aZ8j zY?wY)XH~1_a3&>#j7N}0az+HZ;is;Zw(Am{MX}YhDTe(t{ZZ;TG}2qWYO+hdX}vp9 z@uIRR8g#y~-^E`Qyem(31{H0&V?GLdq9LEOb2(ea#e-$_`5Q{T%E?W(6 z(XbX*Ck%TQM;9V2LL}*Tf`yzai{0@pYMwBu%(I@wTY!;kMrzcfq0w?X`+y@0ah510 zQX5SU(I!*Fag4U6a7Lw%LL;L*PQ}2v2WwYF(lHx_Uz2ceI$mnZ7*eZ?RFO8UvKI0H z9Pq-mB`mEqn6n_W9(s~Jt_D~j!Ln9HA)P;owD-l~9FYszs)oEKShF9Zzcmnb8kZ7% zQ`>}ki1kwUO3j~ zEmh140sOkA9v>j@#56ymn_RnSF`p@9cO1XkQy6_Kog?0ivZDb`QWOX@tjMd@^Qr(p z!sFN=A)QZm!sTh(#q%O{Ovl{IxkF!&+A)w2@50=?a-+VuZt6On1;d4YtUDW{YNDN_ zG@_jZi1IlW8cck{uHg^g=H58lPQ^HwnybWy@@8iw%G! zwB9qVGt_?~M*nFAKd|{cGg+8`+w{j_^;nD>IrPf-S%YjBslSEDxgKH{5p)3LNr!lD z4ii)^%d&cCXIU7UK?^ZQwmD(RCd=?OxmY(Ko#+#CsTLT;p#A%{;t5YpHFWgl+@)N1 zZ5VDyB;+TN+g@u~{UrWrv)&#u~k$S&GeW)G{M#&Di)LdYk?{($Cq zZGMKeYW)aMtjmKgvF0Tg>Mmkf9IB#2tYmH-s%D_9y3{tfFmX1BSMtbe<(yqAyWX60 zzkgSgKb3c{QPG2MalYp`7mIrYg|Y<4Jk?XvJK)?|Ecr+)oNf}XLPuTZK%W>;<|r+% zTNViRI|{sf1v7CsWHvFrkQ$F7+FbqPQ#Bj7XX=#M(a~9^80}~l-DueX#;b}Ajn3VE z{BWI}$q{XcQ3g{(p>IOzFcAMDG0xL)H%wA)<(gl3I-oVhK~u_m=hAr&oeo|4lZbf} z+pe)c34Am<=z@5!2;_lwya;l?xV5&kWe}*5uBvckm(d|7R>&(iJNa6Y05SvlZcWBlE{{%2- z`86)Y5?H!**?{QbzGG~|k2O%eA8q=gxx-3}&Csf6<9BsiXC)T;x4YmbBIkNf;0Nd5 z%whM^!K+9zH>on_<&>Ws?^v-EyNE)}4g$Fk?Z#748e+GFp)QrQQETx@u6(1fk2!(W zWiCF~MomG*y4@Zk;h#2H8S@&@xwBIs|82R*^K(i*0MTE%Rz4rgO&$R zo9Neb;}_ulaCcdn3i17MO3NxzyJ=l;LU*N9ztBJ30j=+?6>N4{9YXg$m=^9@Cl9VY zbo^{yS@gU=)EpQ#;UIQBpf&zfCA;00H-ee=1+TRw@(h%W=)7WYSb5a%$UqNS@oI@= zDrq|+Y9e&SmZrH^iA>Of8(9~Cf-G(P^5Xb%dDgMMIl8gk6zdyh`D3OGNVV4P9X|EvIhplXDld8d z^YWtYUz@tpg*38Xys2?zj$F8%ivA47cGSl;hjD23#*62w3+fwxNE7M7zVK?x_`dBSgPK zWY_~wF~OEZi9|~CSH8}Xi>#8G73!QLCAh58W+KMJJC81{60?&~BM_0t-u|VsPBxn* zW7viEKwBBTsn_A{g@1!wnJ8@&h&d>!qAe+j_$$Vk;OJq`hrjzEE8Wjtm)Z>h=*M25 zOgETOM9-8xuuZ&^@rLObtcz>%iWe%!uGV09nUZ*nxJAY%&KAYGY}U1WChFik7HIw% zZP$3Bx|TG_`~19XV7kfi2GaBEhKap&)Q<9`aPs#^!kMjtPb|+-fX66z3^E)iwyXK7 z8)_p<)O{|i&!qxtgBvWXx8*69WO$5zACl++1qa;)0zlXf`eKWl!0zV&I`8?sG)OD2Vy?reNN<{eK+_ za4M;Hh%&IszR%)&gpgRCP}yheQ+l#AS-GnY81M!kzhWxIR?PW`G3G?} z$d%J28uQIuK@QxzGMKU_;r8P0+oIjM+k)&lZ39i#(ntY)*B$fdJnQ3Hw3Lsi8z&V+ zZly2}(Uzpt2aOubRjttzqrvinBFH4jrN)f0hy)tj4__UTwN)#1fj3-&dC_Vh7}ri* zfJ=oqLMJ-_<#rwVyN}_a-rFBe2>U;;1(7UKH!$L??zTbbzP#bvyg7OQBGQklJ~DgP zd<1?RJ<}8lWwSL)`jM53iG+}y2`_yUvC!JkMpbZyb&50V3sR~u+lok zT0uFRS-yx@8q4fPRZ%KIpLp8R#;2%c&Ra4p(GWRT4)qLaPNxa&?8!LRVdOUZ)2vrh zBSx&kB%#Y4!+>~)<&c>D$O}!$o{<1AB$M7-^`h!eW;c(3J~ztoOgy6Ek8Pwu5Y`Xion zFl9fb!k2`3uHPAbd(D^IZmwR5d8D$495nN2`Ue&`W;M-nlb8T-OVKt|fHk zBpjX$a(IR6*-swdNk@#}G?k6F-~c{AE0EWoZ?H|ZpkBxqU<0NUtvubJtwJ1mHV%9v?GdDw; zAyXZiD}f0Zdt-cl9(P1la+vQ$Er0~v}gYJVwQazv zH#+Z%2CIfOf90fNMGos|{zf&N`c0@x0N`tkFv|_9af3~<0z@mnf*e;%r*Fbuwl-IW z{}B3=(mJ#iwLIPiUP`J3SoP~#)6v;aRXJ)A-pD2?_2_CZ#}SAZ<#v7&Vk6{*i(~|5 z9v^nC`T6o`CN*n%&9+bopj^r|E(|pul;|q6m7Tx+U|UMjWK8o-lBSgc3ZF=rP{|l9 zc&R$4+-UG6i}c==!;I#8aDIbAvgLuB66CQLRoTMu~jdw`fPlKy@AKYWS-xyZzPg&JRAa@m-H43*+ne!8B7)HkQY4 zIh}NL4Q79a-`x;I_^>s$Z4J4-Ngq=XNWQ>yAUCoe&SMAYowP>r_O}S=V+3=3&(O=h zNJDYNs*R3Y{WLmBHc?mFEeA4`0Y`_CN%?8qbDvG2m}kMAiqCv`_BK z_6a@n`$#w6Csr@e2YsMx8udNWtNt=kcqDZdWZ-lGA$?1PA*f4?X*)hjn{sSo8!bHz zb&lGdAgBx@iTNPK#T_wy`KvOIZvTWqSHb=gWUCKXAiB5ckQI`1KkPx{{%1R*F2)Oc z(9p@yG{fRSWE*M9cdbrO^)8vQ2U`H6M>V$gK*rz!&f%@3t*d-r3mSW>D;wYxOhUul zk~~&ip5B$mZ~-F1orsq<|1bc3Zpw6)Ws5;4)HilsN;1tx;N6)tuePw& z==OlmaN*ybM&-V`yt|;vDz(_+UZ0m&&9#{9O|?0I|4j1YCMW;fXm}YT$0%EZ5^YEI z4i9WV*JBmEU{qz5O{#bs`R1wU%W$qKx?bC|e-iS&d*Qm7S=l~bMT{~m3iZl+PIXq{ zn-c~|l)*|NWLM%ysfTV-oR0AJ3O>=uB-vpld{V|cWFhI~sx>ciV9sPkC*3i0Gg_9G!=4ar*-W?D9)?EFL1=;O+W8}WGdp8TT!Fgv z{HKD`W>t(`Cds_qliEzuE!r{ihwEv1l5o~iqlgjAyGBi)$%zNvl~fSlg@M=C{TE;V zQkH`zS8b&!ut(m)%4n2E6MB>p*4(oV>+PT51#I{OXs9j1vo>9I<4CL1kv1aurV*AFZ^w_qfVL*G2rG@D2 zrs87oV3#mf8^E5hd_b$IXfH6vHe&lm@7On~Nkcq~YtE!}ad~?5*?X*>y`o;6Q9lkk zmf%TYonZM`{vJg$`lt@MXsg%*&zZZ0uUSse8o=!=bfr&DV)9Y6$c!2$NHyYAQf*Rs zk{^?gl9E z5Im8wlAsvQ6C2?DyG@95gUXZ3?pPijug25g;#(esF_~3uCj3~94}b*L>N2GSk%Qst z=w|Z>UX$m!ZOd(xV*2xvWjN&c5BVEdVZ0wvmk)I+YxnyK%l~caR=7uNQ=+cnNTLZ@&M!I$Mj-r{!P=; z`C2)D=VmvK8@T5S9JZoRtN!S*D_oqOxyy!q6Zk|~4aT|*iRN)fL)c>-yycR>-is0X zKrko-iZw(f(!}dEa?hef5yl%p0-v-8#8CX8!W#n2KNyT--^3hq6r&`)5Y@>}e^4h- zlPiDT^zt}Ynk&x@F8R&=)k8j$=N{w9qUcIc&)Qo9u4Y(Ae@9tA`3oglxjj6c{^pN( zQH+Uds2=9WKjH#KBIwrQI%bbs`mP=7V>rs$KG4|}>dxl_k!}3ZSKeEen4Iswt96GGw`E6^5Ov)VyyY}@itlj&sao|>Sb5 zeY+#1EK(}iaYI~EaHQkh7Uh>DnzcfIKv8ygx1Dv`8N8a6m+AcTa-f;17RiEed>?RT zk=dAksmFYPMV1vIS(Qc6tUO+`1jRZ}tcDP? zt)=7B?yK2RcAd1+Y!$K5*ds=SD;EEqCMG6+OqPoj{&8Y5IqP(&@zq@=A7+X|JBRi4 zMv!czlMPz)gt-St2VZwDD=w_S>gRpc-g zUd*J3>bXeZ?Psjohe;z7k|d<*T21PA1i)AOi8iMRwTBSCd0ses{)Q`9o&p9rsKeLaiY zluBw{1r_IFKR76YCAfl&_S1*(yFW8HM^T()&p#6y%{(j7Qu56^ZJx1LnN`-RTwimdnuo*M8N1ISl+$C-%=HLG-s} zc99>IXRG#FEWqSV9@GFW$V8!{>=lSO%v@X*pz*7()xb>=yz{E$3VE;e)_Ok@A*~El zV$sYm=}uNlUxV~6e<6LtYli1!^X!Ii$L~j4e{sI$tq_A(OkGquC$+>Rw3NFObV2Z)3Rt~Jr{oYGnZaFZ^g5TDZlg;gaeIP} z!7;T{(9h7mv{s@piF{-35L=Ea%kOp;^j|b5ZC#xvD^^n#vPH=)lopYz1n?Kt;vZmJ z!FP>Gs7=W{sva+aO9S}jh0vBs+|(B6Jf7t4F^jO3su;M13I{2rd8PJjQe1JyBUJ5v zcT%>D?8^Kp-70bP8*rulxlm)SySQhG$Pz*bo@mb5bvpLAEp${?r^2!Wl*6d7+0Hs_ zGPaC~w0E!bf1qFLDM@}zso7i~(``)H)zRgcExT_2#!YOPtBVN5Hf5~Ll3f~rWZ(UsJtM?O*cA1_W0)&qz%{bDoA}{$S&-r;0iIkIjbY~ zaAqH45I&ALpP=9Vof4OapFB`+_PLDd-0hMqCQq08>6G+C;9R~}Ug_nm?hhdkK$xpI zgXl24{4jq(!gPr2bGtq+hyd3%Fg%nofK`psHMs}EFh@}sdWCd!5NMs)eZg`ZlS#O0 zru6b8#NClS(25tXqnl{|Ax@RvzEG!+esNW-VRxba(f`}hGoqci$U(g30i}2w9`&z= zb8XjQLGN!REzGx)mg~RSBaU{KCPvQx8)|TNf|Oi8KWgv{7^tu}pZq|BS&S<53fC2K4Fw6>M^s$R$}LD*sUxdy6Pf5YKDbVet;P!bw5Al-8I1Nr(`SAubX5^D9hk6$agWpF}T#Bdf{b9-F#2WVO*5N zp+5uGgADy7m!hAcFz{-sS0kM7O)qq*rC!>W@St~^OW@R1wr{ajyYZq5H!T?P0e+)a zaQ%IL@X_`hzp~vRH0yUblo`#g`LMC%9}P;TGt+I7qNcBSe&tLGL4zqZqB!Bfl%SUa z6-J_XLrnm*WA`34&mF+&e1sPCP9=deazrM=Pc4Bn(nV;X%HG^4%Afv4CI~&l!Sjzb z{rHZ3od0!Al{}oBO>F*mOFAJrz>gX-vs!7>+_G%BB(ljWh$252j1h;9p~xVA=9_`P z5KoFiz96_QsTK%B&>MSXEYh`|U5PjX1(+4b#1PufXRJ*uZ*KWdth1<0 zsAmgjT%bowLyNDv7bTUGy|g~N34I-?lqxOUtFpTLSV6?o?<7-UFy*`-BEUsrdANh} zBWkDt2SAcGHRiqz)x!iVoB~&t?$yn6b#T=SP6Ou8lW=B>=>@ik93LaBL56ub`>Uo!>0@O8?e)$t(sgy$I z6tk3nS@yFFBC#aFf?!d_3;%>wHR;A3f2SP?Na8~$r5C1N(>-ME@HOpv4B|Ty7%jAv zR}GJwsiJZ5@H+D$^Cwj#0XA_(m^COZl8y7Vv(k=iav1=%QgBOVzeAiw zaDzzdrxzj%sE^c9_uM5D;$A_7)Ln}BvBx^=)fO+${ou%B*u$(IzVr-gH3=zL6La;G zu0Kzy5CLyNGoKRtK=G0-w|tnwI)puPDOakRzG(}R9fl7#<|oQEX;E#yCWVg95 z;NzWbyF&wGg_k+_4x4=z1GUcn6JrdX4nOVGaAQ8#^Ga>aFvajQN{!+9rgO-dHP zIp@%&ebVg}IqnRWwZRTNxLds+gz2@~VU(HI=?Epw>?yiEdZ>MjajqlO>2KDxA>)cj z2|k%dhh%d8SijIo1~20*5YT1eZTDkN2rc^zWr!2`5}f<2f%M_$to*3?Ok>e9$X>AV z2jYmfAd)s|(h?|B(XYrIfl=Wa_lBvk9R1KaP{90-z{xKi+&8=dI$W0+qzX|ZovWGOotP+vvYR(o=jo?k1=oG?%;pSqxcU* zWVGVMw?z__XQ9mnP!hziHC`ChGD{k#SqEn*ph6l46PZVkm>JF^Q{p&0=MKy_6apts z`}%_y+Tl_dSP(;Ja&sih$>qBH;bG;4;75)jUoVqw^}ee=ciV;0#t09AOhB^Py7`NC z-m+ybq1>_OO+V*Z>dhk}QFKA8V?9Mc4WSpzj{6IWfFpF7l^au#r7&^BK2Ac7vCkCn{m0uuN93Ee&rXfl1NBY4NnO9lFUp zY++C1I;_{#OH#TeP2Dp?l4KOF8ub?m6zE@XOB5Aiu$E~QNBM@;r+A5mF2W1-c7>ex zHiB=WJ&|`6wDq*+xv8UNLVUy4uW1OT>ey~Xgj@MMpS@wQbHAh>ysYvdl-1YH@&+Q! z075(Qd4C!V`9Q9jI4 zSt{HJRvZec>vaL_brKhQQwbpQd4_Lmmr0@1GdUeU-QcC{{8o=@nwwf>+dIKFVzPriGNX4VjHCa zTbL9w{Y2V87c2ofX%`(48A+4~mYTiFFl!e{3K^C_k%{&QTsgOd0*95KmWN)P}m zTRr{`f7@=v#+z_&fKYkQT!mJn{*crj%ZJz#(+c?>cD&2Lo~FFAWy&UG*Op^pV`BR^I|g?T>4l5;b|5OQ@t*?_Slp`*~Y3`&RfKD^1uLezIW(cE-Dq2z%I zBi8bWsz0857`6e!ahet}1>`9cYyIa{pe53Kl?8|Qg2RGrx@AlvG3HAL-^9c^1GW;)vQt8IK+ zM>!IW*~682A~MDlyCukldMd;8P|JCZ&oNL(;HZgJ>ie1PlaInK7C@Jg{3kMKYui?e!b`(&?t6PTb5UPrW-6DVU%^@^E`*y-Fd(p|`+JH&MzfEq;kikdse ziFOiDWH(D< zyV7Rxt^D0_N{v?O53N$a2gu%1pxbeK;&ua`ZkgSic~$+zvt~|1Yb=UfKJW2F7wC^evlPf(*El+#}ZBy0d4kbVJsK- z05>;>?HZO(YBF&v5tNv_WcI@O@LKFl*VO?L(!BAd!KbkVzo;v@~3v`-816GG?P zY+H3ujC>5=Am3RIZDdT#0G5A6xe`vGCNq88ZC1aVXafJkUlcYmHE^+Z{*S->ol%-O znm9R0TYTr2w*N8Vs#s-5=^w*{Y}qp5GG)Yt1oLNsH7y~N@>Eghms|K*Sdt_u!&I}$ z+GSdFTpbz%KH+?B%Ncy;C`uW6oWI46(tk>r|5|-K6)?O0d_neghUUOa9BXHP*>vi; z={&jIGMn-92HvInCMJcyXwHTJ42FZp&Wxu+9Rx;1x(EcIQwPUQ@YEQQ`bbMy4q3hP zNFoq~Qd0=|xS-R}k1Im3;8s{BnS!iaHIMLx)aITl)+)?Yt#fov|Eh>}dv@o6R{tG>uHsy&jGmWN5+*wAik|78(b?jtysPHC#e+Bzz~V zS3eEXv7!Qn4uWi!FS3B?afdD*{fr9>B~&tc671fi--V}~E4un;Q|PzZRwk-azprM$4AesvUb5`S`(5x#5VJ~4%ET6&%GR$}muHV-5lTsCi_R|6KM(g2PCD@|yOpKluT zakH!1V7nKN)?6JmC-zJoA#ciFux8!)ajiY%K#RtEg$gm1#oKUKX_Ms^%hvKWi|B=~ zLbl-L)-=`bfhl`>m!^sRR{}cP`Oim-{7}oz4p@>Y(FF5FUEOfMwO!ft6YytF`iZRq zfFr{!&0Efqa{1k|bZ4KLox;&V@ZW$997;+Ld8Yle91he{BfjRhjFTFv&^YuBr^&Pe zswA|Bn$vtifycN8Lxr`D7!Kygd7CuQyWqf}Q_PM}cX~S1$-6xUD%-jrSi24sBTFNz(Fy{QL2AmNbaVggWOhP;UY4D>S zqKr!UggZ9Pl9Nh_H;qI`-WoH{ceXj?m8y==MGY`AOJ7l0Uu z)>M%?dtaz2rjn1SW3k+p`1vs&lwb%msw8R!5nLS;upDSxViY98IIbxnh{}mRfEp=9 zbrPl>HEJeN7J=KnB6?dwEA6YMs~chHNG?pJsEj#&iUubdf3JJwu=C(t?JpE6xMyhA3e}SRhunDC zn-~83*9=mADUsk^sCc%&&G1q5T^HR9$P#2DejaG`Ui*z1hI#h7dwpIXg)C{8s< z%^#@uQRAg-$z&fmnYc$Duw63_Zopx|n{Bv*9Xau{a)2%?H<6D>kYY7_)e>OFT<6TT z0A}MQLgXbC2uf`;67`mhlcUhtXd)Kbc$PMm=|V}h;*_%vCw4L6r>3Vi)lE5`8hkSg zNGmW-BAOO)(W((6*e_tW&I>Nt9B$xynx|sj^ux~?q?J@F$L4;rnm_xy8E*JYwO-02u9_@@W0_2@?B@1J{y~Q39N3NX^t7#`=34Wh)X~sU&uZWgS1Z09%_k|EjA4w_QqPdY`oIdv$dJZ;(!k)#U8L+|y~gCzn+6WmFt#d{OUuKHqh1-uX_p*Af8pFYkYvKPKBxyid4KHc}H` z*KcyY;=@wzXYR{`d{6RYPhapShXIV?0cg_?ahZ7do)Ot#mxgXYJYx}<%E1pX;zqHd zf!c(onm{~#!O$2`VIXezECAHVd|`vyP)Uyt^-075X@NZDBaQt<>trA3nY-Dayki4S zZ^j6CCmx1r46`4G9794j-WC0&R9(G7kskS>=y${j-2;(BuIZTLDmAyWTG~`0)Bxqk zd{NkDe9ug|ms@0A>JVmB-IDuse9h?z9nw!U6tr7t-Lri5H`?TjpV~8(gZWFq4Vru4 z!86bDB;3lpV%{rZ`3gtmcRH1hjj!loI9jN>6stN6A*ujt!~s!2Q+U1(EFQEQb(h4E z6VKuRouEH`G6+8Qv2C)K@^;ldIuMVXdDDu}-!7FS8~k^&+}e9EXgx~)4V4~o6P^52 z)a|`J-fOirL^oK}tqD@pqBZi_;7N43%{IQ{v&G9^Y^1?SesL`;Z(dt!nn9Oj5Odde%opv&t zxJ><~b#m+^KV&b?R#)fRi;eyqAJ_0(nL*61yPkJGt;gZxSHY#t>ATnEl-E%q$E16% zZdQfvhm5B((y4E3Hk6cBdwGdDy?i5CqBlCVHZr-rI$B#>Tbi4}Gcvyg_~2=6O9D-8 zY2|tKrNzbVR$h57R?Pe+gUU_il}ZaWu|Az#QO@};=|(L-RVf0AIW zq#pO+RfM7tdV`9lI6g;{qABNId`fG%U9Va^ravVT^)CklDcx)YJKeJdGpM{W1v8jg z@&N+mR?BPB=K1}kNwXk_pj44sd>&^;d!Z~P>O78emE@Qp@&8PyB^^4^2f7e)gekMv z2aZNvP@;%i{+_~>jK7*2wQc6nseT^n6St9KG#1~Y@$~zR_=AcO2hF5lCoH|M&c{vR zSp(GRVVl=T*m~dIA;HvYm8HOdCkW&&4M~UDd^H)`p__!4k+6b)yG0Zcek8OLw$C^K z3-BbLiG_%qX|ZYpXJ$(c@aa7b4-*IQkDF}=gZSV`*ljP|5mWuHSCcf$5qqhZTv&P?I$z^>}qP(q!Aku2yA5vu38d8x*q{6-1`%PrE_r0-9Qo?a#7Zbz#iGI7K<(@k^|i4QJ1H z4jx?{rZbgV!me2VT72@nBjucoT zUM9;Y%TCoDop?Q5fEQ35bCYk7!;gH*;t9t-QHLXGmUF;|vm365#X)6b2Njsyf1h9JW#x$;@x5Nx2$K$Z-O3txa%;OEbOn6xBzd4n4v)Va=sj5 z%rb#j7{_??Tjb8(Hac<^&s^V{yO-BL*uSUk2;X4xt%NC8SjO-3?;Lzld{gM5A=9AV z)DBu-Z8rRvXXwSVDH|dL-3FODWhfe1C_iF``F05e{dl(MmS|W%k-j)!7(ARkV?6r~ zF=o42y+VapxdZn;GnzZfGu<6oG-gQ7j7Zvgo7Am@jYxC2FpS@I;Jb%EyaJDBQC(q% zKlZ}TVu!>;i3t~OAgl@QYy1X|T~D{HOyaS*Bh}A}S#a9MYS{XV{R-|niEB*W%GPW! zP^NU(L<}>Uab<;)#H)rYbnqt|dOK(-DCnY==%d~y(1*{D{Eo1cqIV8*iMfx&J*%yh zx=+WHjt0q2m*pLx8=--UqfM6ZWjkev>W-*}_*$Y(bikH`#-Gn#!6_ zIA&kxn;XYI;eN9yvqztK-a113A%97in5CL5Z&#VsQ4=fyf&3MeKu70)(x^z_uw*RG zo2Pv&+81u*DjMO6>Mrr7vKE2CONqR6C0(*;@4FBM;jPIiuTuhQ-0&C)JIzo_k>TaS zN_hB;_G=JJJvGGpB?uGgSeKaix~AkNtYky4P7GDTW6{rW{}V9K)Cn^vBYKe*OmP!; zohJs=l-0sv5&phSCi&8JSrokrKP$LVa!LbtlN#T^cedgH@ijt5T-Acxd9{fQY z4qsg1O{|U5Rzh_j;9QD(g*j+*=xULyi-FY|-mUXl7-2O`TYQny<@jSQ%^ye*VW_N< z4mmvhrDYBJ;QSoPvwgi<`7g*Pwg5ANA8i%Kum;<=i|4lwEdN+`)U3f2%bcRZRK!P z70kd~`b0vX=j20UM5rBO#$V~+grM)WRhmzb15ya^Vba{SlSB4Kn}zf#EmEEhGruj| zBn0T2n9G2_GZXnyHcFkUlzdRZEZ0m&bP-MxNr zd;kl7=@l^9TVrg;Y6J(%!p#NV*Lo}xV^Nz0#B*~XRk0K2hgu5;7R9}O=t+R(r_U%j z$`CgPL|7CPH&1cK5vnBo<1$P{WFp8#YUP%W)rS*a_s8kKE@5zdiAh*cjmLiiKVoWD z!y$@Cc5=Wj^VDr$!04FI#%pu6(a9 zM_FAE+?2tp2<$Sqp5VtADB>yY*cRR+{OeZ5g2zW=`>(tA~*-T)X|ahF{xQmypWp%2X{385+=0S|Jyf`XA-c7wAx`#5n2b-s*R>m zP30qtS8aUXa1%8KT8p{=(yEvm2Gvux5z22;isLuY5kN{IIGwYE1Pj);?AS@ex~FEt zQ`Gc|)o-eOyCams!|F0_;YF$nxcMl^+z0sSs@ry01hpsy3p<|xOliR zr-dxK0`DlAydK!br?|Xi(>buASy4@C8)ccRCJ3w;v&tA1WOCaieifLl#(J% zODPi5fr~ASdz$Hln~PVE6xekE{Xb286t(UtYhDWo8JWN6sNyRVkIvC$unIl8QMe@^ z;1c<0RO5~Jv@@gtDGPDOdqnECOurq@l02NC#N98-suyq_)k(`G=O`dJU8I8LcP!4z z8fkgqViqFbR+3IkwLa)^>Z@O{qxTLU63~^lod{@${q;-l?S|4Tq0)As-Gz!D(*P)Vf6wm6B8GGWi7B)Q^~T?sseZeI+}LyBAG!LRZn_ktDlht1j2ok@ljteyuNUkG67 zipkCx-7k(FZQhYjZ%T9X7`tO99$Wj~K`9r0IkWhPul`Q_t1YnVK=YI1dMc_b!FEU4 zkv=PGf{5$P#w{|m92tfVnsnfd%%KW;1a*cLmga4bSYl^*49M4cs+Fe>P!n=$G6hL6 z>IM&0+c(Nvr0I!5CGx7WK*Z3V^w0+QcF=hU0B4=+;=tn*+XDxKa;NB-z4O~I zf}TSb^Z;L_Og>!D1`;w@zf@GCqCUNY%N?IPmEkTco^}bX~BWM_Hamu05>#B zBh%QfUeHPu`MsYVQQ3hOT;HmP_C|nOl zjluk7vaSICyQ01h`^c)DWp>cxPjGEc6D^~2L79hyK_J#<9H#8o`&XM4=aB`@< z<|1oR6Djf))P1l2C{qSwa4u-&LDG{FLz#ym_@I+vo}D}#%;vNN%& zW&9||THv_^B!1Fo+$3A6hEAed$I-{a^6FVvwMtT~e%*&RvY5mj<@(-{y^xn6ZCYqNK|#v^xbWpy15YL18z#Y&5YwOnd!A*@>k^7CaX0~4*6QB{Bgh$KJqesFc(lSQ{iQAKY%Ge}2CeuFJ{4YmgrP(gpcH zXJQjSH^cw`Z0tV^axT&RkOBP2A~#fvmMFrL&mwdDn<*l3;3A425_lzHL`+6sT9LeY zu@TH0u4tj199jQBzz*~Up5)7=4OP%Ok{rxQYNb!hphAoW-BFJn>O=%ov*$ir?dIx% z56Y`>?(1YQ8Fc(D7pq2`9swz@*RIoTAvMT%CPbt;$P%eG(P%*ZMjklLoXqTE*Jg^T zlEQbMi@_E|ll_>pTJ!(-x41R}4sY<5A2VVQ^#4eE{imHt#NEi+#p#EBC2C=9B4A|n zqe03T*czDqQ-VxZ+jPQG!}!M0SlFm^@wTW?otBZ+q~xkk29u1i7Q|kaJ(9{AiP1`p zbEe5&!>V;1wnQ1-Qpyn2B5!S(lh=38hl6IilCC6n4|yz~q94S9_5+Od*$c)%r|)f~ z;^-lf=6POs>Ur4i-F>-wm;3(v7Y_itzt)*M!b~&oK%;re(p^>zS#QZ+Rt$T#Y%q1{ zx+?@~+FjR1MkGr~N`OYBSsVr}lcBZ+ij!0SY{^w((2&U*M`AcfSV9apro+J{>F&tX zT~e zMvsv$Q)AQl_~);g8OOt4plYESr8}9?T!yO(Wb?b~1n0^xVG;gAP}d}#%^9wqN7~F5 z!jWIpqxZ28LyT|UFH!u?V>F6&Hd~H|<(3w*o{Ps>G|4=z`Ws9oX5~)V=uc?Wmg6y< zJKnB4Opz^9v>vAI)ZLf2$pJdm>ZwOzCX@Yw0;-fqB}Ow+u`wglzwznQAP(xbs`fA7 zylmol=ea)g}&;8;)q0h7>xCJA+01w+RY`x`RO% z9g1`ypy?w-lF8e5xJXS4(I^=k1zA46V)=lkCv?k-3hR9q?oZPzwJl$yOHWeMc9wFuE6;SObNsmC4L6;eWPuAcfHoxd59gD7^Xsb$lS_@xI|S-gb? z*;u@#_|4vo*IUEL2Fxci+@yQY6<&t=oNcWTVtfi1Ltveqijf``a!Do0s5e#BEhn5C zBXCHZJY-?lZAEx>nv3k1lE=AN10vz!hpeUY9gy4Xuy940j#Rq^yH`H0W2SgXtn=X1 zV6cY>fVbQhGwQIaEG!O#p)aE8&{gAS z^oVa-0M`bG`0DE;mV)ATVNrt;?j-o*?Tdl=M&+WrW12B{+5Um)qKHd_HIv@xPE+;& zPI|zXfrErYzDD2mOhtrZLAQ zP#f9e!vqBSyoKZ#{n6R1MAW$n8wH~)P3L~CSeBrk4T0dzIp&g9^(_5zY*7$@l%%nL zG$Z}u8pu^Mw}%{_KDBaDjp$NWes|DGAn~WKg{Msbp*uPiH9V|tJ_pLQROQY?T0Pmt zs4^NBZbn7B^L%o#q!-`*+cicZS9Ycu+m)rDb98CJ+m1u}e5ccKwbc0|q)ICBEnLN# zV)8P1s;r@hE3sG2wID0@`M9XIn~hm+W1(scCZr^Vs)w4PKIW_qasyjbOBC`ixG8K$ z9xu^v(xNy4HV{wu2z-B87XG#yWu~B6@|*X#BhR!_jeF*DG@n_RupAvc{DsC3VCHT# za6Z&9k#<*y?O0UoK3MLlSX6wRh`q&E>DOZTG=zRxj0pR0c3vskjPOqkh9;o>a1>!P zxD|LU0qw6S4~iN8EIM2^$k72(=a6-Tk?%1uSj@0;u$0f*LhC%|mC`m`w#%W)IK zN_UvJkmzdP84ZV7CP|@k>j^ zPa%;PDu1TLyNvLQdo!i1XA|49nN}DuTho6=z>Vfduv@}mpM({Jh289V%W@9opFELb z?R}D#CqVew1@W=XY-SoMNul(J)zX(BFP?#@9x<&R!D1X&d|-P;VS5Gmd?Nvu$eRNM zG;u~o*~9&A2k&w}IX}@x>LMHv`ith+t6`uQGZP8JyVimg>d}n$0dDw$Av{?qU=vRq zU@e2worL8vTFtK@%pdbaGdUK*BEe$XE=pYxE_q{(hUR_Gzkn=c#==}ZS^C6fKBIfG z@hc);p+atn`3yrTY^x+<y`F0>p02jUL8cgLa|&yknDj;g73m&Sm&@ju91?uG*w?^d%Yap&d2Bp3v7KlQmh z(N<38o-iRk9*UV?wFirV>|46JqxOZ_o8xv_eJ1dv} zw&zDHZOU%`U{9ckU8DS$lB6J!B`JuThCnwKphODv`3bd?_=~tjNHstM>xoA53-p#F zLCVB^E`@r_D>yHLr10Sm4NRX8FQ+&zw)wt)VsPmLK|vLwB-}}jwEIE!5fLE;(~|DA ztMr8D0w^FPKp{trPYHXI7-;UJf;2+DOpHt%*qRgdWawy1qdsj%#7|aRSfRmaT=a1> zJ8U>fcn-W$l-~R3oikH+W$kRR&a$L!*HdKD_g}2eu*3p)twz`D+NbtVCD|-IQdJlFnZ0%@=!g`nRA(f!)EnC0 zm+420FOSRm?OJ;~8D2w5HD2m8iH|diz%%gCWR|EjYI^n7vRN@vcBrsyQ;zha15{uh zJ^HJ`lo+k&C~bcjhccoiB77-5=SS%s7UC*H!clrU$4QY@aPf<9 z0JGDeI(6S%|K-f@U#%SP`{>6NKP~I#&rSHBTUUvHn#ul4*A@BcRR`#yL%yfZj*$_% zAa$P%`!8xJp+N-Zy|yRT$gj#4->h+eV)-R6l}+)9_3lq*A6)zZ)bnogF9`5o!)ub3 zxCx|7GPCqJlnRVPb&!227Ok@-5N2Y6^j#uF6ihXjTRfbf&ZOP zVc$!`$ns;pPW_=n|8Kw4*2&qx+WMb9!DQ7lC1f@DZyr|zeQcC|B6ma*0}X%BSmFJ6 zeDNWGf=Pmmw5b{1)OZ6^CMK$kw2z*fqN+oup2J8E^)mHj?>nWhBIN|hm#Km4eMyL= zXRqzro9k7(ulJi5J^<`KHJAh-(@W=5x>9+YMFcx$6A5dP-5i6u!k*o-zD z37IkyZqjlNh*%-)rAQrCjJo)u9Hf9Yb1f3-#a=nY&M%a{t0g7w6>{AybZ9IY46i4+%^u zwq}TCN@~S>i7_2T>GdvrCkf&=-OvQV9V3$RR_Gk7$t}63L}Y6d_4l{3b#f9vup-7s z3yKz5)54OVLzH~Ty=HwVC=c$Tl=cvi1L?R>*#ki4t6pgqdB$sx6O(IIvYO8Q>&kq;c3Y-T?b z*6XAc?orv>?V7#vxmD7geKjf%v~%yjbp%^`%e>dw96!JAm4ybAJLo0+4=TB% zShgMl)@@lgdotD?C1Ok^o&hFRYfMbmlbfk677k%%Qy-BG3V9txEjZmK+QY5nlL2D$Wq~04&rwN`-ujpp)wUm5YQc}&tK#zUR zW?HbbHFfSDsT{Xh&RoKiGp)7WPX4 zD^3(}^!TS|hm?YC16YV59v9ir>ypihBLmr?LAY87PIHgRv*SS>FqZwNJKgf6hy8?9 zaGTxa*_r`ZhE|U9S*pn5Mngb7&%!as3%^ifE@zDvX`GP+=oz@p)rAl2KL}ZO1!-us zY`+7ln`|c!2=?tVsO{C}=``aibcdc1N#;c^$BfJr84=5DCy+OT4AB1BUWkDw1R$=FneVh*ajD&(j2IcWH8stMShVcMe zAi6d7p)>hgPJbcb(=NMw$Bo;gQ}3=hCQsi{6{2s~=ZEOizY(j{zYY-W8RiNjycv00 z8(JpE{}=CHx0ib3(nZgo776X=wBUbfk$y2r*}aNG@A0_zOa4k3?1EeH7Z43{@IP>{^M+M`M)0w*@Go z>kg~UfgP1{vH+IU(0p(VRVlLNMHN1C&3cFnp*}4d1a*kwHJL)rjf`Fi5z)#RGTr7E zOhWfTtQyCo&8_N(zIYEugQI}_k|2X(=dMA43Nt*e93&otv`ha-i;ACB$tIK% zRDOtU^1CD5>7?&Vbh<+cz)(CBM}@a)qZ^ld?uYfp3OjiZOCP7u6~H# zMU;=U=1&DQ9Qp|7j4qpN5Dr7sH(p^&Sqy|{uH)lIv3wk?xoVuN`ILg}HUCLs1Bp2^ za8&M?ZQVWFX>Rg4_i$C$U`89i6O(RmWQ4&O=?B6@6`a8fI)Q6q0t{&o%)|n7jN)7V z{S;u+{UzXnUJN}bCE&4u5wBxaFv7De0huAjhy#o~6NH&1X{OA4Y>v0$F-G*gZqFym zhTZ7~nfaMdN8I&2ri;fk*`LhES$vkyq-dBuRF!BC)q%;lt0`Z(*=Sl>uvU`LAvbyt zL1|M@Jas<@1hK!prK}$@&fbf70o7>3&CovCKi815v$6T7R&1GOG~R4pEu2B z%bxG{n`u$7ps(}Tt(P608J@{+>X(?=-j8CkF!T79c`1@E%?vOL%TYrMe1ozi<##IsIC1YRojP!gD%|+7|z^-Vj$a85gbmtB#unyoy%gw9m1yB z|L^-wylT%}=pNpq!QYz9zoV7>zM2g2d9lm{Q zP|dx3=De3NSNGuMWRdO_ctQJUud?_96HbrHiSKmp;{MHZhX#*L+^I11#r;grJ8_21 zt6b*wmCaAw(>A`ftjlL@vi06Z7xF<&xNOrTHrDeMHk*$$+pGK0p+|}H=Kgl{=naBy zclyQsRTraO4!uo})OTSp_x`^0jj7>|H=FOGnAbKT_LuSUiSd3QuCMq>sEhB=V63Nm zZxrtB0)U@x2A#VHqo2ab=pn~tu>kJ;TVASb_&ePAgVcic@>^YM?^LYRLr^O12>~45 z-EE?-Z$xjxsN92EaBi)~D~1OzRVH`o!)kYv7IIx??(B)>R|xa&(wmlU2gdV0+N+3% z7r$w5(L<|?@46ITJZS5koAELgVV_&KHj(9KG??A);@gL`s1th*c#t5>U(*+nb0+H% zOhJG5tth59%*>S~JIi%<0VAi;k>}&(Ojg!fyH0(fza!1kA~a}Vt{|3z{`Pt@VuYyB zFUt(kR$<`X_J&UQ%;ui2zob1!H{PL8X>>wbpGn~@&h__AfBit)4`D^#->1+Qn^MH9 zYD?%)Pa)D-xQzVGm!g)N$^_z`9)(>)gyQ+(7N@k4GO?~43wcE-|77;CPwPXHQcfcJ^I&IOOah zzL|dhoR*#m5sw{b&L=@<-30s9F|{@V05;4Wf6Z_1gpZnJ*SVN}3O7)-=yYuj2)O0d zX=I9TzzTK%QG&ujvS!F*aJ8eqt4|#VE;``yKqCx7#8QC7AmVn+zW9km3L5TN=R>{5 zLcW`6NKkTz`c{`-w!X9zMG;JZP|skLGs7qBHaWj7Ew!VR=`>n30NX)7j~-RbDmQ6b zHr)zVcn^~e2xqFCBG4P$ZCcRDml-&1^5fqN=CHgBVu1yTg32_N>tZ;N%h*TwOf^1lE#w1$yF$kXaP|V$2XuZ+3wH4Ws6%U;^iP|c6`#etHogQ+E@+~PZ1zdGAty6qTmBM z>!)Wfgq~%lD)m>avXMm)ReN}s9!T_>ic6xA|m7$(&n(Z&j} zHC=}~I(^-*PS2pc7%>)6w}F1il&p*0jX1z)jSvG%S{I3d9w$A|5;TS)4w81yzq5f8 zZVfF~`74m1KXQg|`OS>;FCgZw!AL;2PV{&8%~rG!;`eD=g!luE0k40GjIgjD!JSDNf$eW zZtPMF)&EH_#?IwVLEx&Tosh9K8Ln4Pb$`j2=><6MAezsQvhP#YNnw&cL>12xf)dPz z1tk;{SH6HDcbV0x(+5=2n;A->&iYDa5Zr9$&j?2iAz-(l1;#Vc3-ULyqRV9d0*psG7QHE! z*J=*^sKK?iTO$g*+j~C?QzzIu`6Z{2N-ANrd5*?o%x& z&WMin)$Wq%G!?{EH(2}A?Wx@ zn8|q7xPad4Gu>l^&SBl|mhUxp;S+Cb125`h5aBz9pM34$7n-GHGx*=yqAphZKkds7 z$=5Jnt*6&8@y80jNXm|>2IR<$D5frk;c2f5zLS5xe*^W>kkZa5R1+Am34;mo{Gr=Z zD=z8fgTHwx%)7hzjOo9*Cogbru8GgDzrE;3y%TR+u`|zz%c0Tyd8;#EQXdr4Rgx(2LPRzVI2FwsbXwnF;DP^fg zdYOd|zU&AqgCJ;R+?oSgEgZM`ZX>7&$A-j2m|Tcz4ictXoQkz6Tr<2zhOudU16k<7 zLdk&FCL>=a^>0gV@m#9SnMd)R$5&1mh8p2McnUbk;1|C;`7pPkYjf|o>|a6`x`z1O zt>8~Q%zHX%C=D2!;_1eo3qfbB4QQK^{ON_f*7XhLk{6sr2(KIVmax}fUtF-zHZiUd zHPb9jidV`dE;lsw?1uQH!b%MvPE|lh9-8R_z4^PC8{XAf?S73(n*FvYPoMES+LfOx zcjm4ZZOmKY>M2e${QBVT+XnBQ(oC0fAYcXi7+=}_!hS9m>Y%G@zxn3z#Pb;bJ~-kI zAHNmWgQJp$e8L-uKQ|c4B;#0BTsfRB+}pl7xe=2_1U7pahx5S$TVbRnU0oi1?Wh|A zR7ebg9TK1GgKa4@ic#q_*<;c8?CkjX zMMyq`J()_&(j-FZY7q%z6CN^a0%V{UL)jmrvEg{doZd?qIjgJ^UPr(QUs`68;qkdI zzj_XBQ|#K2U!5?fmIEtXX6^rFY;h4=Vx<-C(d;W6Bi_Xsg{ZJPL*K;I?5U$=V-BNP zn9pKiMc=hZNe**GZBw1kVs#-8c2ZRjol}}^V@^}BqY7c0=!mA;v0`d|(d;R-iT|GK z>zt>Tt3oV09%Y;^RM6=p9C-ys_a``HB_D-pnyX(CeA(GiJqx7xxFE52Y`j~iMv;sP z%jPmx#8p%5`flAU(b!c9XBvV+fygn`BP-C#lyRa;9%>YyW6~A_g?@2J+oY0HAg{qO znT4%ViCgw&eE=W8yt-0{cw`tMieWOG3wyNX#3a^qPhE8TH1?QhwhR~}Ic zZ^q$TF8$p0b0=L8aw&qaTjuAYPmr-6x;U*k*vRnOaBwb_( z5+ls5b(E!(71*l)M&(7ZEgBCtB{6Kh#ArV4u0iNnK!ml!nK5=3;9e76yD9oU4xTAK zPGsGkjtFMMY3pRP5u07;#af?b0C7u) zD^=9X@DRasHaf#c>4rF5GAT!Ggj0!7!z?Q-1_X6ZP2g|+?nVutp|rp}eFlKc8}Q&_ z17$NpDQvQolMWZfj0W0|WKm`nd_KXYH_#wRRzs1aRBYqo#feM}a?joONn30Z4Z9PG zg1c!_<52-9D53Wq4z8pUzGkEFm1@Ws(kp4}CO7csZ-7+b)^)M)(xo}_IpTLl7}5BmbBCI{4>rw>4c_gBQHtRd5Z=SW&6Qp2qMOjr3W+ZRmP;S(U+h=^BHKohhRp6Zgf zwt&$zQXhMm@kh1@SB%dIE*kFDZym3Mky$NRljX?}&JGK`PIV1C;Pf!JV{hb4y;Ju- zlpfEPUd+mV5XQH<#BRFhZ}>b#IdF?a?x;rBg-v)@fZpA?+J{3WZjbl3E zv(a&1=pGYPxP@K!6Qg5Vx=-jwc=BA{xL3+QWb&9~DGS1EFkIC+>55{dvY4LV@s5$C zKJmCjigp7?m27*GN_GROz}y+y5%iIj=*JTYccaFjvD&VN%ewfSp=0P zspdFfDqj?gs!N64cEy5uR~wD>af!1PE*xo{^a^8BPIL2=U>B!m2AM0Jf<8qWLoHxi zxQfkbbwkRXgJgLW_j{ZkCxHLBU{@D6T5u90UNs5P769Zei|C$@nA5$L$4ZvxQl1i? z8vLHg17}e{zM$=&h%8Swbfz7yw~X^N|7Chp1bC(oV72l#R8&%Ne5>F=7wR(dB; zkDX!%&fxS19JBjP<6H7+!dO`nPLvB~xn{aDh#^iHKP|A5UQlCG%v%x9@q1w2fa#&% za^UwHu!~(qrv99G%9_e4OBbJ-CkB*1M_?t6UXZ#}4JFDzB|x(1Z}ckuiY}${zj`eVo})!rN8Je z%h2CVJG1$K$2deXx^h8trLs~Han^e>_-M6@0o4C7d548|#mKtm@DvdVAX5ZzA8=*! zKq5C+cM9u)qJ%YBJ1UAcG}6Ji4=$piaZ(K@>1BiD;$R9bR*QP`dH2T=)dgW#f7U)S zZ~i#VYLOnUZt^~Iu3x8QPJaHVUxtRyipQ+tbmWKl14iW1!f6JSDvT$xt8>~7-1ZlJ zU|)Ab*lhvz-JO!$a}RBH9u8$=R)*qeD@iS@(px~OVvML-qqO5&Ujnhw1>G~**Ld{W zE+7h|!{rDZ#;ipZx4^Tcr9vnO)0>WFPzpFu*MYST(`GFzCq*@Gqse6VwDH#x?-{rs z+=dqd$W0*AuAEhzM@GC&!oZa1*lRsx>>mP>DNYigdm^A~xzo}=uV$w#iadO+!&q_~ zT>AsHXOEGsNyfcJt2V$rhGxaIcTEvZr7CMVEu=>l30N~52^71U^<_uw6h@v@`BA2! z)ViU+wF#^$=5o44TpOj?#eyq*+A&c0ghrt8%}SiK)FgLk-;-^+ zXt|1}1vcKAAuR|?L*a8;04p%!M~U2~UC-OJK)DMtBQ#+ZttJgDFNA4zchA*T)cN(E zmpIMLU*c*NrCSV^qdLXD751DsO`#V#K1BVX4qI-B3Rg(zcvlg^mgY^V3Q*5RRQ4-8 z_kAlUisma2SNEx47euK5Y#eu_-gwRW0}M90hEI}eIJ9aU?t11^jSCn4>e~XLSF7Y3 z7JF)1ZbS_P<$<#y(*u@w!jF4FW_f~bxzi%cgP~B1K5N6GFYSAf=D_s5XomU0G9I%Y zPWc{&MItPR#^Le)?zsRkQMmHx^Cnn&;TrPzRVG`wyNH*U;|r3^2NY(z0lwikP}cWF z`p%R@?dy*7H~0&3ST>L9)b7#kwg+|n0#E&-FNf+Z_t7tpa711FogBPV`S3MW_FMGQ zJ@8Z}qXR4-l%p76mvcH`{Fu(^O;8H2@#LZUH#9p6!EX$AEYV$c`s zkPimL3kv>y=WQ+?KIAuim``%cAeBhA6g8}p_*FBH(#{vKi)CIz_D)DFXPql*ccC}O zRW;+Y6V@=&*d6QJUbRxPX+-_24tc-hYHEFaP-IAj*|-P5%xbWujQvu#TF>xigr_r! znuu7b(!PyYX=O#>;+0cGRx>Sy39(3y=TCf_BZ$<%m#inup$>o(3dA1Byfsip8S975-iVe7UklFm|$4&kaJ!n66_k-7-k}Z_?){LQe&wTeJ^CR{u6p+U#4_iSZZ1wjB-1gVGNQqnkk*-wFLj(eK8Ut{waU zb1jwb2I?Wg&98jSQWom8c?2>BWt*!3WQ?>fB$KguB9_sStno%x=JXPEFrT|hh~Po2 zSPzu3IL10O?9U(3{X8OLN-!l6DJVtgr$yYXeAPh~%(FECDe;$mIY7R4Miv1GEFk9x zpw`}E5M)qTr60D^;a#OCd0xP*w8y+my1^l8Qd*V`wLoj)GFFj;;esW2PMO=sbas{yX6asXIJ$|LW< zts$A+JaxoM({kv+2d@#bhl?#V#FZn_=8tTTvup?Vq!p!46W{be)EP=VlYE|UzAU}) zz})UzJVWi;9br0k&5>}sqwa_`TP*c}^$9+q)Dks#qEVg>p)71sqKF-YLP@UF{(>lp7;CHAWK;K0TZ_+?>EtZKprfU@;52a1IU8HNx-mnoZrb8| zP8FPb#T$0VE+G-l508;d{DSfC6#dbp(j|^i^I3z9?Qmkr+(dw^w??h}WTN{_ls-GuE~lF;1Urgbtq|Ud_r>wecb@?{{z? zX>X$&Ud+(I(5}5d^>&Z2m+qy=h#vR*lS084ATwUWZLg6PX1Ft+YI`0iI)ynij}{4X zrQE!Mr1m^-?kw<|VT0mG+5J{!;j;zJT`?_=P*09n+=e``CN|7rC$u~Ksg7LSMS(Q~ z51!n1htcK0q7*K-*u0?c8ZlvPXcNwXmFe0Or2}}R@?j@{ECCNZ6va1tZ>|ZOgGZ1j z9?mRkeSK%{X4O>J$@hyFsD)7s67Uldb>O93wQQiV%-FfbEY_@q>1VUstIJs|QgB`o1z**F#s z^joAYN~5{EQ_wZ~R6-nEV#HsQbNU59dT;G zovb$}pb=LdR^{W2Nh~8yWfq*vC_DvJxM=)2N`5x+N6Sl`3{Wl@$*BYol#0^idTuM` zJ=prt$REkxn6%dimg%99{(Dt6D67sTUR6l1F@9&Z9<)XgWK#x zVohUH6>_xRuw1^V**+BCZ@dZj97T*67OBO>6UUivH`<@ray~ym^E?bO=vKqFfK3Kv z`RKxs4raHacB<(XAeH`@0G*K2@ill_U@m=icT@F{k1PU3j4VBde`ThtW8%Z~A>)45ARjQCDXbH}_rS^IxHGp#utBEj3W3KSAU+$6I4s~9OWueETo!J-f~+DV8< z+VMtdcQ?M+?S}kl&uImYiIUJ-K0-te7W4sdWpS6Fqs-I!Tj{8Qp6lMn$Zm8uU)s{X z8|O}HN%8sEl4em&qv{VBq{}$@cCG{B z5~3DY$WRYSkO~z=sxRct5^G5bPZW;LF)(zY)HREgpRrkYV@H3^BTD6u+bJE~$cqr< zw@Gb3^|n*kHZ%Vnu6~B7pB4iM0C4kDuk8Q1R^<(x%>|sCOl%CTe^N)K?Tiepg?|#m z94!og0*38u|67h%*!)SJhUdvFimsktaqp#im9IpH-$fQc79gi259qPkEZ)XU?2uWW zRg?$8`vl;V%-Tk+rwpTGaxy)h%3AmF^78<#i+Q6~M4#>J4`NNEEzy~xZ&O*9q%}@7 zs9XBO#vSKSM<-OjPIDzO9JiAYFWrK14Am{uZT=S3zaCu~K%kZo&u*=k9L#xi6vyaG zQFD76MOE&=c1G;7Zivp<%%fRq+@3wgZg>k@AYQf|*Qyzy$tqc20m?F5nGbG@V#gW` z8RMb2oBxgiqa?)_G6&-;L#(HCoaJrs_ED{IUZ^$~)+e#0iZT!AJDb2V{Sen*70TO& zyI`*~#ZdLFhYP_#DTuoqQ0OS6j0o15r{}O&YoT5wCp|x_dD{#Y;Y}0P1ta?2VEh4* ztrRN5tL6UvoH@M9L z=%FKpf@iSp2P>C(*o<-Ng4qF#A?i!AxjXLG8%Gm`$rZxw;ZqSvv5@@sZ|N*~do5fb zKWR)T_>`kxaS|MHFh`-`fc`C%=i@EFk$O&)*_OVrgP4MWsZkE2RJB(WC>w}him zb3KV>1I&nHP9};o8Kw-K$wF8`(R?UMzNB22kSIn#dEe|V-CuMw8I7|#`qSB6dpYg$ zoaDHj%zV6*;`u`VVdsTBKv&g75Q`68rdQU6O>_wkMT9d!z@)q2E)R3(j$*C4jp$Fo z2pE>*ih{4Xzh}W+5!Qw)#M*^E(0X-6-!%wj@4*^)8F=N*0Y5Or+>d= zhMNs@R~>R9;KmyP@I@bpU3&w?)jj0rGrb@q)P>wLVbz1!TZY$#+H-mK6B^0{vdvt0 zaJ0~7p%I#1PpPm1DvBzh7*UsCl^I5^`@XzPzbg+v3T_WyKN?TJ9J=57v^IUO`aQN} z@>Y>WIj+gT@-sobU-tW%L5GP(qY?Eep&I;@osY}O*3i1Ar?Sv|EI6S-pK_!~*A$K| zs-hHESqd`vv;zIzgv2ho5-hsIL5Ke~siJ(v0`Qm7W_Rms2rB67=p&HGRhA-)$p-BS zvXSmgGIGgeJMBcsgp=L8U3Ep$VPBFhvJ!3M5{pocGBS~iZj0({9Jt9nbC{Z$LVb%= zGqzRBjlqkAU{#sOX56})^QjX;jQ26M`poAFIZ#H31td9sQlgBBrfIYgDC9+kO~}s{ zb1i*{#{5tPWhv4pecAZygXG>?5xKx7iPXd?nR;QaIfhlhqNBaLDy>9Yd1Sf3P!s4~ zhfHaFGsIFy&ZM=6^qc>>V>o!zk%5Lk5BtS7oU=YfjWUN;c zrh$6Cyr%KC@QNTzTZvb)QXQkV)01MEY+EzC%CJx)Q&6MM={paB}Dp=qCn^eJ}5LeXG9Gqynt0ir>DvSIZ=i?*_xR3=% zppf1w51ypF2KL6ug zCm}eCi>&>xT;Idzh^PmtDWrU(&eC2hAt(nmd#?;W)*&4lb2Z2Ykv*XLNDEm`_1n3C z`l!wZwiF9b?mN@z?s~>v%hT01C{E3md6M5_Xi3fKD6s26Tt~Z>8|~Ao9ds!cF_Y1| zRG>!=TD0k0`|T*)oX!SlSt8g4Uh@nc(QosCoen@i*ZCSyh|IliliuhEw$8?4ZL9N2 zMQ%%S=3Tj_QilhHW@cSr1UYTtDem{A-ZxyCa$K9A%(!`X_?ieJzXbfERST|JxqmbL zHe!hSqYk|!=!$8CJ5>q}Pj63@Q#PO{gpVb+0-qHFM`j5x_s#~dxvy5u62vywq8upP z_)N)3n9cn7YEf2D8L}x0#_B_~>HT8;;8JC5q+}1gEyd%XqYvY?deQzwD1Lx{ghI3; zv?f;&6CY$H&dDL$k#)hb)5lIqUZ~oU!z)hMI!B9THhw?9!}ykqpFJ|hB?JjV9uwqb z3_70pMV^C7I<3Cg&yMi8JJ3V2gYTOMV=IopfZ#1o>&+j-mB-V${Ok(f?I3{+vR~zE_RR$?9xI~^% z53~ z&bCl+6UeKkUWJ-%mnK{9K>?(3BM3C`@xi}v8)q#;YJhMr5dWvMtAL7X``!bHv~(%m zH8d#Q4N6G~lEW}aGn9ZZNT?v9bV$emf)dg#ASDV?(nu+wpu!_X;(vL<<1zBo-~X&N z>keyizVGaP&c65DbIyEwFn2%(L`P424ZI3nFBA%w{yJ?E} zlwSKF;jIhs(!TFOdMUW|(=qHjr#U-k>`>1u1_yL5Gyy;7@WTOt_)nfIp{D9kwR8f0 z;^Fq=iF(&yd|z30&+I`FBM-P6ouHQ@96TkIe@9=pDDL#_zgXos)-ri5lX-&2D~DsI z4R>xVM$c&aFLgFjwq{1I;jpODOx|n*#@e2+Wgdkm(E(Fad_)peD`1^CJ2TpglmgoC)F(Z)F7y2rzzDU^4wvO{bzw{mzSs4tF;*qabKkC?D!j!tbF z4D_6zbqFVI>n@2-Qmg1BiDdD}>E(72)aMv1Y9duOxwlG|E!L(QmQ#j5vmN@a7v{zIt3qQSP?96^$ITE=h~sLn|N|v8YqmA~-0HWgcPHZ@!3Dzm2X{Bozc{qm>J`Ehp}`FQ%Ecbw%+|H8f`pykvo-%&0a z?&ZtJF*{#AYs8Z|z(IFI8sBiZs)L!C9#1W@;hEInZZZdPz2ZnmhoSP9VHQt7mzZUZ zhM!!5IJbe4Z@zEoMjKaxH&Px8p}1<0YmtWwcG@ZPY@*oQSteU zRy+W=Rs>sJ##v^8EJJt0=5---o<@^?fOEp=N<~xXvcf?$gXD0zVHziRMMmC#Mp3o ze(eT!dvjmXp9_C%pV_>{H=nsqYO)n1J?Ihi zjy7f00`|S<;)I!ZyUO{~#+wXX)z(BWsN|$7n9s}H%ZzE8YQv#vRTHjq@D%tYyfe=3)|7jYxRT#E16nFk&1jFC6CH5d4kiJCVq+%r_$Rec7=G!GuZ-0*$5N2GqXB(dqWPS1Um4{xgi2k=;eO_LDy&GR=Q!)bjKY{f!0yoc0Rol&!E`2BkI$5y4U^*k0=GyL-m8XJL%8prM%;fwyX9M^ zs48n3Oh#a>FVWI7dsm~*l0$^J)lxnfTTw~1ceZ73yNvNurwd`;+^1XuucaFN85M8? z$fNl!D9g*O>6IE^POaoDq`86Sw0t4%jIi`&*EEZI?wwOiEvH8(qpfyDvAe`4pWf7k z3-pFgeT{qtj)B!1ZamZ5g3z6Nd40P(%^Kf@#!uzbIk~8w`9wbhWc~1E|sw6-FsOqrhb2DLDwlaq@)Y zAi$KoA=Vyn=Yxqxtf7wu*$47Ht>WZi{AdeN79#9ws~CtE;~gC$q7T>*5yKK3VT)Q=sllRR}lBIGd17+bOu| zeUeUrMgF=Gjk-{epAyUd_KNgwZK_Pz=H$+{4~E_ZRa3IJpU~IZ5U4Z3l%u3{Ls~`H z(iysmm+!HBJTC-$EpHM9yrXUM^_FZ(3sdmsyZ6=lU8bb3V(WK>P0$l~#QA&NMj@OA z*OQ>^-s_D-bda022~!G!bTh7@FR>t!1r`Js1;4$(^_*hH-_pUPf5C}K-v$%i#KBB! zU{~a7)R>ix z#LA|<6v#rwKkB1JBLWkWu#M0#8i1J0e4dFDP3jrlFfxhkDs%Q~)e6e7fR$U?e$<{x zfZb0?UMsB|E}Fk)@|^{)_^L7O%rp1GRNig@bUX(^6}6HoGi8IXoSKpI1A(GV)uA=7 zOXG&KjZYVjYn6}2YV0yfnKsnpDlF)h$Gv--|6$BsWFg|IWnp|#sk}zOAb6Bb?vb@t zs^7=4IdiKE_rUT@rG!D4Zy zcnas#XT77V&%igMXY(lQS|)lgO{pN9!P-94KeZH_+PK5jESYCSPMN)=D(JIAVeB%D zI_>_lvD;pylkZ#Ral0IzC6ei$J$4NnGw(pnVd`&aaNT5mfq-4)aPjj(v;`VvJ6Xxjm@3DX+Kju z@9-h++s7x>idTEL zd)ptYy?P2$S*_DI;eMR0ZdAuS)~fGEZEguO&+3AwW@Sw$&KvgJr6aGK*Ar;0wx`lr z7V&!+9C7`VcV^t+Wj~AweOGQL!)0)serr$8Fez7kC(VSVRdjqpQuq964RW^2euIre zh10&Tv)|dj*CoRozrW<4y_+5}3EGRok+G7ODl3-CF1r?JYDdw&NbcVT=7ljq_K+8bMeG3uRw@3=cof?j+v+WaKI`WqwByf#7aFK3 z0+R34xQ-6nxQ&9xJKl}`C9FlUe1-h^i?5fr5kjot#MA-$%k106t>*gM+yF3m2X#=1tt07`cK)37dA^A4d8%6R>@0U-UZ~wSvzMlK$tlm~aK`%e8|quXyH`aLM0#Dcu%sqEsKV%i zVn_*W-Qbnl)h?RP>)$rZ5JL!*H;Z{ zk7(FB`lo~h&zB|S6j-Na;y$QM*rn^tkO{>#DWZN@IwJps3*Nm&ox0{{;=J~hvPb-* zvAOEPImrdq()yl~`j`Q;R1Y%CdLKKw*;gtNaM~WDO95YXsTjKCOdRD2Is@aVRTYFD zpS=_EB!@Ub&c*JmNMF=F+)Bq)52|=83IEG;M5(Ol*97!W(S-5X-5w&7->`1Pw-0Ml zpA>jaofnyPQTCzoIG}OK9j^nn>F>jC#$iSnJY8y6ue4nxs@3HtfNx01XVK7NcX#Cu z34g-z=0!7ip&@wI>>6ynJYyFTEgH6DA?b>~V%2s_@NPDza5&6cno!S(|85*74}6_M z%s1c4`B{lqMu``(4~Jk#_`^=tu36TgXPv_}{lhhyi(rrSM_uoVVNuZOuxCXom9|wg zNf&BtzX=hVi*4dG&1J!^QW;O%fQ$jVH=W74B8WR)*tM1{(@cHRqiS_W6R^h8uxd@zV>KNI zR(-LNNkLqh>e=CmL|q9sRHm#15%q$o7_GQMp8FLX-HGnJ<+(;k{Q%+Sk+!^mM+2#1y9+gG2IDZGt%;Cfk{+ zT5}^x=!i2$tnH_se6eC zkn;kK>%ICpo=X&=cSsbxQ|AjJ;5Ff;AyIj>$YA8cw*?W^Nn}S|1jrbf@Bd zr82I8KlOh4#5C0sw3oVvuC0NFPKH4S0$~F$U4JM1Im$B%%oGm_5$Lnr{#Pv}eL1k& zMP(pG$MI^8&!nYffq#$zJ^3GF|cC%2d4V@qKV#fu6u2O

k)oKu82Fu=RODzQrHPEC+Mz{hW(G7VuCl8g1ou-Ot!41bp_>OC1&@A_6e*hc)1X zMuDvzEZyB*fW1^+7dL0%ofr;-xT6B@0~|VazatI{60!X=po^uOr6UB$1POKmuI_&b zOL&O+w*!>`k+y%?Z|wm4$@_1|WC|pKM(F{k8TR$-4hs?i|GBc9)qa{vYq)~5qa(2N zsR?s}0Pp^ufVGEB8oE9VCFa0K$x0HSpem!tIyR69y0rnjg8cqjmWyz7*Kx3~X> z|BZX}Y;oVB1HX@l9_-y7dI*WgruY@?rC&64`}3W`ECA>O@Y#Q@JS<4WBF(QbwJqHM zt)fE#6jTSyZ^E8y0INaIf!omWjvS=@15`O%V2CKg+}z=M9##kLKRN0uJuK250bXVU zwzT&n@30^dzKnlL^us;wClg?CKWEtiEb#zhPVx{PxFQiwEPp^C53zN21EdZAz?3D& zC6fK|_!S5Mq&0z;xWGLEv}!zjfpRg_orp7|fXMx=uP!@X`yT@5(N_Hza}p5fBk&|)J7fZ`NQ9Nz@5xT? zi?iV$q+bG!2LZUpF)>Yl!u;DEHV3!i{ipcJm_8Gj@Dac%N3|SQVGqRhrJ;WOR|CtrwzPTW^&$A6!A$E)h7xohm>hA8p{PUZ~ z_&zeg@OL3PxPtzkfsNZAqXCZ8Is7yQ+plm~8;}|~DEkv&f@?q5hB*OGQYXuwVQOp0 z?QQ`6qyp|-$47wjuV74IE_x2I17$+grwMBE^25d<5!lYhnszuh|5Yk;RB+Uk*hk=m zu73=E^7ul{40{A^?Rg^fq0ZfZO@C1HupR*_d;J>lkFv6&x&}4N;t}1T@2}~AC^<3b zA}RxFPPZe5R{_6dIN9N-GT29Oa}RzA2ekKuEVZbuMOB?Xf**`N5&m}?)TjigdY(rF z?~+a=`0);TlDa1j)1G`AfW? zRl883QPq=w zbB|bHEx%_u*$t@Yl#Vc;y*?2W^|^NJ)DmioQFr~1&>MSBL_b(YIpGWdDm3bT=Mgm1 e+h0K+-~H6qzyuy}`;+tYAZFmzUSVSYum1yJqxCBQ diff --git a/cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.properties b/cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index 0c85a1f75..000000000 --- a/cedar-dafny-java-wrapper/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,6 +0,0 @@ -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.1-bin.zip -networkTimeout=10000 -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists diff --git a/cedar-dafny-java-wrapper/gradlew b/cedar-dafny-java-wrapper/gradlew deleted file mode 100755 index aeb74cbb4..000000000 --- a/cedar-dafny-java-wrapper/gradlew +++ /dev/null @@ -1,245 +0,0 @@ -#!/bin/sh - -# -# Copyright © 2015-2021 the original authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -############################################################################## -# -# Gradle start up script for POSIX generated by Gradle. -# -# Important for running: -# -# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is -# noncompliant, but you have some other compliant shell such as ksh or -# bash, then to run this script, type that shell name before the whole -# command line, like: -# -# ksh Gradle -# -# Busybox and similar reduced shells will NOT work, because this script -# requires all of these POSIX shell features: -# * functions; -# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», -# «${var#prefix}», «${var%suffix}», and «$( cmd )»; -# * compound commands having a testable exit status, especially «case»; -# * various built-in commands including «command», «set», and «ulimit». -# -# Important for patching: -# -# (2) This script targets any POSIX shell, so it avoids extensions provided -# by Bash, Ksh, etc; in particular arrays are avoided. -# -# The "traditional" practice of packing multiple parameters into a -# space-separated string is a well documented source of bugs and security -# problems, so this is (mostly) avoided, by progressively accumulating -# options in "$@", and eventually passing that to Java. -# -# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, -# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; -# see the in-line comments for details. -# -# There are tweaks for specific operating systems such as AIX, CygWin, -# Darwin, MinGW, and NonStop. -# -# (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt -# within the Gradle project. -# -# You can find Gradle at https://github.com/gradle/gradle/. -# -############################################################################## - -# Attempt to set APP_HOME - -# Resolve links: $0 may be a link -app_path=$0 - -# Need this for daisy-chained symlinks. -while - APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path - [ -h "$app_path" ] -do - ls=$( ls -ld "$app_path" ) - link=${ls#*' -> '} - case $link in #( - /*) app_path=$link ;; #( - *) app_path=$APP_HOME$link ;; - esac -done - -# This is normally unused -# shellcheck disable=SC2034 -APP_BASE_NAME=${0##*/} -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD=maximum - -warn () { - echo "$*" -} >&2 - -die () { - echo - echo "$*" - echo - exit 1 -} >&2 - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "$( uname )" in #( - CYGWIN* ) cygwin=true ;; #( - Darwin* ) darwin=true ;; #( - MSYS* | MINGW* ) msys=true ;; #( - NONSTOP* ) nonstop=true ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD=$JAVA_HOME/jre/sh/java - else - JAVACMD=$JAVA_HOME/bin/java - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then - case $MAX_FD in #( - max*) - # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 - MAX_FD=$( ulimit -H -n ) || - warn "Could not query maximum file descriptor limit" - esac - case $MAX_FD in #( - '' | soft) :;; #( - *) - # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 - ulimit -n "$MAX_FD" || - warn "Could not set maximum file descriptor limit to $MAX_FD" - esac -fi - -# Collect all arguments for the java command, stacking in reverse order: -# * args from the command line -# * the main class name -# * -classpath -# * -D...appname settings -# * --module-path (only if needed) -# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. - -# For Cygwin or MSYS, switch paths to Windows format before running java -if "$cygwin" || "$msys" ; then - APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) - CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) - - JAVACMD=$( cygpath --unix "$JAVACMD" ) - - # Now convert the arguments - kludge to limit ourselves to /bin/sh - for arg do - if - case $arg in #( - -*) false ;; # don't mess with options #( - /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath - [ -e "$t" ] ;; #( - *) false ;; - esac - then - arg=$( cygpath --path --ignore --mixed "$arg" ) - fi - # Roll the args list around exactly as many times as the number of - # args, so each arg winds up back in the position where it started, but - # possibly modified. - # - # NB: a `for` loop captures its iteration list before it begins, so - # changing the positional parameters here affects neither the number of - # iterations, nor the values presented in `arg`. - shift # remove old arg - set -- "$@" "$arg" # push replacement arg - done -fi - - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' - -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. - -set -- \ - "-Dorg.gradle.appname=$APP_BASE_NAME" \ - -classpath "$CLASSPATH" \ - org.gradle.wrapper.GradleWrapperMain \ - "$@" - -# Stop when "xargs" is not available. -if ! command -v xargs >/dev/null 2>&1 -then - die "xargs is not available" -fi - -# Use "xargs" to parse quoted args. -# -# With -n1 it outputs one arg per line, with the quotes and backslashes removed. -# -# In Bash we could simply go: -# -# readarray ARGS < <( xargs -n1 <<<"$var" ) && -# set -- "${ARGS[@]}" "$@" -# -# but POSIX shell has neither arrays nor command substitution, so instead we -# post-process each arg (as a line of input to sed) to backslash-escape any -# character that might be a shell metacharacter, then use eval to reverse -# that process (while maintaining the separation between arguments), and wrap -# the whole thing up as a single "set" statement. -# -# This will of course break if any of these variables contains a newline or -# an unmatched quote. -# - -eval "set -- $( - printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | - xargs -n1 | - sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | - tr '\n' ' ' - )" '"$@"' - -exec "$JAVACMD" "$@" diff --git a/cedar-dafny-java-wrapper/gradlew.bat b/cedar-dafny-java-wrapper/gradlew.bat deleted file mode 100644 index 6689b85be..000000000 --- a/cedar-dafny-java-wrapper/gradlew.bat +++ /dev/null @@ -1,92 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%"=="" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%"=="" set DIRNAME=. -@rem This is normally unused -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if %ERRORLEVEL% equ 0 goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* - -:end -@rem End local scope for the variables with windows NT shell -if %ERRORLEVEL% equ 0 goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -set EXIT_CODE=%ERRORLEVEL% -if %EXIT_CODE% equ 0 set EXIT_CODE=1 -if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% -exit /b %EXIT_CODE% - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/cedar-dafny-java-wrapper/settings.gradle b/cedar-dafny-java-wrapper/settings.gradle deleted file mode 100644 index 9c3b64f7b..000000000 --- a/cedar-dafny-java-wrapper/settings.gradle +++ /dev/null @@ -1,4 +0,0 @@ -// Always define a settings file: -// https://docs.gradle.org/current/userguide/organizing_gradle_projects.html#always_define_a_settings_file - -rootProject.name = 'cedar-dafny-java-wrapper' diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DafnyUtils.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DafnyUtils.java deleted file mode 100644 index 33bd0b21f..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DafnyUtils.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.math.BigInteger; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ArrayNode; -import com.fasterxml.jackson.databind.node.BooleanNode; -import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.databind.node.BigIntegerNode; -import com.fasterxml.jackson.databind.node.NullNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.fasterxml.jackson.databind.node.TextNode; - -public class DafnyUtils { - public static difftest_mhelpers.Json convertJsonJacksonToDafny(JsonNode node) { - switch (node.getNodeType()) { - case NULL: - return difftest_mhelpers.Json.create_JsonNull(); - case BOOLEAN: - return difftest_mhelpers.Json.create_JsonBool(node.asBoolean()); - case NUMBER: - if (!node.canConvertToExactIntegral()) - throw new UnsupportedOperationException("Non-integer Jackson number is not supported by Dafny yet"); - else if (!node.canConvertToLong()) - throw new UnsupportedOperationException("Jackson integer is too big for Java long"); - else - return difftest_mhelpers.Json.create_JsonInt(BigInteger.valueOf(node.asLong())); - case STRING: - // Dafny currently doesn't offer an official API to convert - // between a Java `String` and a `DafnySequence` - // representing a Dafny unicode `string`. The functions - // `asUnicodeString` and `verbatimString` (used in - // `convertJsonDafnyToJackson` below) work as of this writing - // but may break at any time. If and when they break, our tests - // will detect the problem and we'll just copy the code of the - // working versions then. See - // https://github.com/dafny-lang/libraries/issues/73#issuecomment-1503247487. - return difftest_mhelpers.Json.create_JsonString( - dafny.DafnySequence.asUnicodeString(node.asText())); - case ARRAY: - difftest_mhelpers.Json dafnyElements[] = new difftest_mhelpers.Json[node.size()]; - for (int i = 0; i < node.size(); i++) - dafnyElements[i] = convertJsonJacksonToDafny(node.get(i)); - return difftest_mhelpers.Json.create_JsonArray( - dafny.DafnySequence.of(difftest_mhelpers.Json._typeDescriptor(), dafnyElements)); - case OBJECT: - HashMap, difftest_mhelpers.Json> mapForDafny = new HashMap<>(); - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - mapForDafny.put(dafny.DafnySequence.asUnicodeString(entry.getKey()), convertJsonJacksonToDafny(entry.getValue())); - } - return difftest_mhelpers.Json.create_JsonObject(new dafny.DafnyMap<>(mapForDafny)); - default: - throw new UnsupportedOperationException("Unsupported Jackson JsonNode type: " + node.getNodeType()); - } - } - - public static JsonNode convertJsonDafnyToJackson(difftest_mhelpers.Json node) { - if (node.is_JsonNull()) { - return NullNode.instance; - } else if (node.is_JsonBool()) { - return BooleanNode.valueOf(node.dtor_b()); - } else if (node.is_JsonInt()) { - return BigIntegerNode.valueOf(node.dtor_i()); - } else if (node.is_JsonString()) { - return TextNode.valueOf(node.dtor_s().verbatimString()); - } else if (node.is_JsonArray()) { - ArrayNode jacksonNode = new ArrayNode(JsonNodeFactory.instance); - for (difftest_mhelpers.Json item : node.dtor_a()) { - jacksonNode.add(convertJsonDafnyToJackson(item)); - } - return jacksonNode; - } else if (node.is_JsonObject()) { - ObjectNode jacksonNode = new ObjectNode(JsonNodeFactory.instance); - for (dafny.Tuple2, difftest_mhelpers.Json> dafnyEntry : node.dtor_o(). - , difftest_mhelpers.Json>entrySet().Elements()) { - jacksonNode.set(dafnyEntry.dtor__0().verbatimString(), - convertJsonDafnyToJackson(dafnyEntry.dtor__1())); - } - return jacksonNode; - } else { - throw new AssertionError("Dafny Json is not one of the known types"); - } - } -} diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalEngine.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalEngine.java deleted file mode 100644 index dfc32b663..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalEngine.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation; - -import com.CedarDefinitionalImplementation.log.Timer; -import com.CedarDefinitionalImplementation.log.Logger; -import com.CedarDefinitionalImplementation.log.LogTag; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.NullNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import java.util.Optional; - - -/** - * DefinitionalEngine - */ -public class DefinitionalEngine { - private ObjectMapper mapper; - - public DefinitionalEngine() { - this.mapper = new ObjectMapper(); - } - - /** - * Authorization query. - * - * @param json JSON string containing Query and Slice, using the serde - * serialization of the corresponding Rust objects. - * @return JSON string containing Response - */ - public String isAuthorized_str(String json) { - Timer> query = new Timer<>(() -> deserializeQuery(json)); - Logger.get().set(LogTag.Deserialization, query); - return query.get().map(x -> isAuthorized_json(x)).orElse("null"); - } - - public String eval_str(String json) { - return deserializeEvalQuery(json).map(x -> eval_json(x)).orElse("null"); - } - - private Optional deserializeEvalQuery(String json) { - try { - JsonNode js = mapper.readTree(json); - return Optional.of(DafnyUtils.convertJsonJacksonToDafny(js)); - } catch (JsonProcessingException e) { - return Optional.empty(); - } - } - - public String eval_json(difftest_mhelpers.Json json) { - try { - difftest_mhelpers.Json result = difftest_mmain.__default.evalJson(json); - JsonNode serialized = DafnyUtils.convertJsonDafnyToJackson(result); - return mapper.writeValueAsString(serialized); - } catch (JsonProcessingException e) { - return "null"; - } - } - - - private Optional deserializeQuery(String json) { - try { - JsonNode js = mapper.readTree(json); - return Optional.of(DafnyUtils.convertJsonJacksonToDafny(js)); - } catch (JsonProcessingException e) { - return Optional.empty(); - } - - } - - /** - * Authorization query. - * - * @param json JsonNode containing Query and Slice, using the Rust AST - * form of the JSON, not the official interchange format. - * @return JsonNode containing Response - */ - public String isAuthorized_json(difftest_mhelpers.Json json) { - try { - Timer authResult = new Timer<>(() -> difftest_mmain.__default.isAuthorizedJson(json)); - Logger.get().set(LogTag.Auth, authResult); - Timer serialResult = new Timer<>(() -> DafnyUtils.convertJsonDafnyToJackson(authResult.get())); - Logger.get().set(LogTag.Serialization, serialResult); - ObjectNode topLevel = mapper.createObjectNode(); - for (LogTag tag : LogTag.iter()) { - topLevel.put(tag.toString(), Logger.get().get(tag)); - } - topLevel.set("response", serialResult.get()); - return mapper.writeValueAsString(topLevel); - } catch (JsonProcessingException e) { - return "null"; - } - } - -} diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalValidator.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalValidator.java deleted file mode 100644 index 9ee44eee3..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/DefinitionalValidator.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation; - -import com.CedarDefinitionalImplementation.log.Timer; -import com.CedarDefinitionalImplementation.log.Logger; -import com.CedarDefinitionalImplementation.log.LogTag; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.NullNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import java.util.Optional; - - -/** - * DefinitionalValidator - */ -public class DefinitionalValidator { - private ObjectMapper mapper; - - public DefinitionalValidator() { - this.mapper = new ObjectMapper(); - } - - /** - * Validation query. - * - * @param json JSON string containing Schema and Policy, using the serde - * serialization of the corresponding Rust objects. - * @return JSON string containing validation result - */ - public String validate_str(String json) { - Timer> query = new Timer<>(() -> deserializeQuery(json)); - Logger.get().set(LogTag.Deserialization, query); - return query.get().map(x -> validate_json(x)).orElse("null"); - } - - private Optional deserializeQuery(String json) { - try { - JsonNode js = mapper.readTree(json); - return Optional.of(DafnyUtils.convertJsonJacksonToDafny(js)); - } catch (JsonProcessingException e) { - return Optional.empty(); - } - - } - - /** - * Validation query. - * - * @param json JsonNode containing Schema and Policy, using the Rust AST - * form of the JSON, not the official interchange format. - * @return JsonNode containing validation result - */ - public String validate_json(difftest_mhelpers.Json json) { - try { - Timer valResult = new Timer<>(() -> difftest_mmain.__default.validateJson(json)); - Logger.get().set(LogTag.Validation, valResult); - Timer serialResult = new Timer<>(() -> DafnyUtils.convertJsonDafnyToJackson(valResult.get())); - Logger.get().set(LogTag.Serialization, serialResult); - ObjectNode topLevel = mapper.createObjectNode(); - for (LogTag tag : LogTag.iter()) { - topLevel.put(tag.toString(), Logger.get().get(tag)); - } - topLevel.set("response", serialResult.get()); - return mapper.writeValueAsString(topLevel); - } catch (JsonProcessingException e) { - return "null"; - } - } -} diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/Main.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/Main.java deleted file mode 100644 index 504310867..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/Main.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation; - -import com.CedarDefinitionalImplementation.DefinitionalEngine; -import com.CedarDefinitionalImplementation.DefinitionalValidator; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import com.fasterxml.jackson.databind.ObjectMapper; - -public class Main { - public static void main(String[] args) { - } -} diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/LogTag.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/LogTag.java deleted file mode 100644 index 2df25c47e..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/LogTag.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation.log; - -public enum LogTag { - Serialization, - Deserialization, - Auth, - Validation; - - public String toString() { - switch (this) { - case Serialization: - return "serialization_nanos"; - case Deserialization: - return "deserialization_nanos"; - case Auth: - return "auth_nanos"; - case Validation: - return "validation_nanos"; - } - throw new RuntimeException("Unreachable"); - } - - - public static LogTag[] iter() { - LogTag[] arr = {LogTag.Serialization, LogTag.Deserialization, LogTag.Auth, LogTag.Validation}; - return arr; - } - -} diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Logger.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Logger.java deleted file mode 100644 index fc6896131..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Logger.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation.log; -import java.util.Optional; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import java.util.HashMap; - - - -public class Logger { - - /* Singleton Infrastructure */ - private static Logger globalSingleton; - - public static Logger get() { - if (globalSingleton == null) - globalSingleton = new Logger(); - return globalSingleton; - } - - - private HashMap durations; - - private Logger() { - durations = new HashMap<>(); - } - - - public void set(LogTag tag, Timer t) { - durations.put(tag, t.getDuration()); - } - - public long get(LogTag tag) { - if (durations.containsKey(tag)) - return durations.get(tag); - else - return 0; - } - - - -} - diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Timer.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Timer.java deleted file mode 100644 index 1f4323759..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/log/Timer.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation.log; -import java.util.function.Supplier; - -public class Timer implements Supplier { - - private final T result; - private final long duration; - - public Timer(Supplier s) { - long start = System.nanoTime(); - result = s.get(); - long end = System.nanoTime(); - duration = end - start; - } - - public T get() { - return result; - } - - public long getDuration() { - return duration; - } -} - - - diff --git a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/package-info.java b/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/package-info.java deleted file mode 100644 index d8e8ed0f2..000000000 --- a/cedar-dafny-java-wrapper/src/main/java/com/CedarDefinitionalImplementation/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the generated Java code from the Cedar definitional - * engine, and interfaces to it. See the README for more. - */ -package com.CedarDefinitionalImplementation; diff --git a/cedar-dafny-java-wrapper/src/test/java/com/CedarDefinitionalImplementation/DefinitionalEngineTest.java b/cedar-dafny-java-wrapper/src/test/java/com/CedarDefinitionalImplementation/DefinitionalEngineTest.java deleted file mode 100644 index 490e12330..000000000 --- a/cedar-dafny-java-wrapper/src/test/java/com/CedarDefinitionalImplementation/DefinitionalEngineTest.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.CedarDefinitionalImplementation; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.junit.jupiter.api.Test; - -/** - * DefinitionalEngineTest. - */ -public class DefinitionalEngineTest { - @Test - public void nullTest() { - DefinitionalEngine engine = new DefinitionalEngine(); - assertEquals("null", engine.isAuthorized_str("{")); - } -} diff --git a/cedar-dafny/.config/dotnet-tools.json b/cedar-dafny/.config/dotnet-tools.json deleted file mode 100644 index 039e08633..000000000 --- a/cedar-dafny/.config/dotnet-tools.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "version": 1, - "isRoot": true, - "tools": { - "dafny": { - "version": "4.3.0", - "commands": [ - "dafny" - ] - }, - "dafny-reportgenerator": { - "version": "1.2.0", - "commands": [ - "dafny-reportgenerator" - ] - } - } -} diff --git a/cedar-dafny/.vscode/settings.json b/cedar-dafny/.vscode/settings.json deleted file mode 100644 index 256ec15bf..000000000 --- a/cedar-dafny/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "files.trimFinalNewlines": true, - "files.trimTrailingWhitespace": true, - "diffEditor.ignoreTrimWhitespace": false, - "files.insertFinalNewline": true -} diff --git a/cedar-dafny/Makefile b/cedar-dafny/Makefile deleted file mode 100644 index cd4101788..000000000 --- a/cedar-dafny/Makefile +++ /dev/null @@ -1,86 +0,0 @@ -# For now, we assume that `dotnet` is on the PATH and Dafny is set up to use -# the correct Z3 (there are a few options for how that can be achieved). -# Maybe more similar assumptions. - -# To typecheck your code, but not run the verifier, do -# $ make DAFNY_VERIFY=0 -# This is much faster to iterate when you're just refactoring. - -# Make `all` the default target. -all: - -clean: - rm -rf build - -export DOTNET_CLI_TELEMETRY_OPTOUT := 1 - -export DOTNET_SYSTEM_GLOBALIZATION_INVARIANT := 1 - -# Make `dotnet` install Dafny into the current directory instead of the user's -# home directory to keep things self-contained. The files that get created are -# `.dotnet`, `.local/share/NuGet`, and `.nuget`. -export DOTNET_CLI_HOME := $(PWD) - -uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') - -# Assumes that we have `dotnet` installed and on the PATH locally -DOTNET = dotnet - -restore-dafny: - dotnet tool restore - -# If GEN_STATS is set to 1, compilation will generate statistics in a `TestResults` directory. -GEN_STATS ?= 0 -ifeq ($(GEN_STATS), 1) -clean-test-results: - rm -rf TestResults/*.csv TestResults/*.trx -DAFNY_ARGS = /dafnyVerify:1 /compile:0 /warnShadowing /warningsAsErrors /vcsLoad:1 /definiteAssignment:3 /verificationLogger:csv -else -clean-test-results: -DAFNY_ARGS = /compile:0 /warnShadowing /warningsAsErrors /vcsLoad:1 /definiteAssignment:3 -endif - -SOURCEDIR := . -TEST_SOURCEDIR := ./test -SOURCES := $(shell find $(SOURCEDIR) -path $(TEST_SOURCEDIR) -prune -o -name '*.dfy' -print | grep -v flycheck) -PACKAGE_BUILD_ROOT := build - -DAFNY_VERIFY ?= 1 - -# Currently, the name of the target that verifies an individual Dafny file is -# just the path of that file. This design is awkward because the files already -# exist and hence the targets have to be marked `.PHONY` (below); it would be -# better to add a prefix or suffix to the file path. However, by now, all our -# users are used to passing plain Dafny file paths as arguments, and it doesn't -# seem worth the disruption to change this. -$(SOURCES): restore-dafny clean-test-results - $(DOTNET) tool run dafny /dafnyVerify:$(DAFNY_VERIFY) $(DAFNY_ARGS) $@ - -verify: $(SOURCES) - -TEST_SOURCES := $(shell find $(TEST_SOURCEDIR) -name '*.dfy' | grep -v flycheck) -DAFNY_TEST_ARGS := /compile:4 /runAllTests:1 /noVerify - -$(TEST_SOURCES): restore-dafny - $(DOTNET) tool run dafny $(DAFNY_TEST_ARGS) $@ - -test: $(TEST_SOURCES) - -# The actual path of the output directory consists of the `/out` path plus -# `-java`. Dafny also generates a jar at the `/out` path plus `.jar`. We set -# the `DIFFTEST_*` variables below accordingly. -DIFFTEST_COMPILE_OUT := $(PACKAGE_BUILD_ROOT)/private/compile-difftest -DIFFTEST_JAVA_DIR := $(DIFFTEST_COMPILE_OUT)-java -DIFFTEST_JAR_FROM_DAFNY := $(DIFFTEST_COMPILE_OUT).jar -DIFFTEST_JAR_EXPORTED := $(PACKAGE_BUILD_ROOT)/lib/CedarDafny-difftest.jar -compile-difftest: restore-dafny - mkdir -p $(PACKAGE_BUILD_ROOT)/private - rm -rf $(DIFFTEST_COMPILE_OUT)* $(DIFFTEST_JAR_EXPORTED) - $(DOTNET) tool run dafny /noVerify /compile:2 /compileTarget:java \ - /out:$(DIFFTEST_COMPILE_OUT) difftest/main.dfy - mkdir -p $(PACKAGE_BUILD_ROOT)/lib - cp $(DIFFTEST_JAR_FROM_DAFNY) $(DIFFTEST_JAR_EXPORTED) - -all: verify test compile-difftest - -.PHONY: $(SOURCES) $(TEST_SOURCES) verify compile-difftest all test diff --git a/cedar-dafny/README.md b/cedar-dafny/README.md deleted file mode 100644 index 0925b4bc3..000000000 --- a/cedar-dafny/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Cedar Dafny - -This folder contains the Dafny formalization of, and proofs about, Cedar. - -## Key definitions - -Definitional engine ([`def/*`](def/)) - -* [`Evaluator.interpret`](def/engine.dfy#L81) returns the result of evaluating an expression -* [`Authorizer.evaluate`](def/engine.dfy#L37) returns the result of evaluating a policy -* [`Authorizer.isAuthorized`](def/engine.dfy#L69) checks whether a request is allowed or denied given the current policy store - -Definitional validator ([`validation/*`](validation)) - -* [`Typechecker.typecheck`](validation/typechecker.dfy#L648) checks if an expression matches a particular type according to permissive typechecking -* [`StrictTypechecker.typecheck`](validation/strict.dfy#L129) checks if an expression matches a particular type according to strict typechecking -* [`Validator.validate`](validation/validator.dfy#L103) validates a set of policies - -## Verified properties - -Basic theorems ([`thm/basic.dfy`](thm/basic.dfy)) - -* If some forbid policy is satisfied, then the request is denied. -* A request is allowed only if it is explicitly permitted (i.e., there is at least one permit policy that is satisfied). -* If not explicitly permitted, a request is denied. - -Sound policy slicing ([`thm/slicing.dfy`](thm/slicing.dfy) and [`thm/pslicing.dfy`](thm/pslicing.dfy)) - -* Given a _sound slice_, the result of authorization is the same with the slice as it is with the full store. - -Type soundness ([`validation/thm/toplevel.dfy`](validation/thm/toplevel.dfy)) - -* If an expression is well-typed according to the typechecker, then either evaluation returns a value of that type or it returns an error of type `EntityDoesNotExist` or `ExtensionError`. All other errors are impossible. diff --git a/cedar-dafny/def/all.dfy b/cedar-dafny/def/all.dfy deleted file mode 100644 index 834881f0f..000000000 --- a/cedar-dafny/def/all.dfy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "std.dfy" -include "base.dfy" -include "ext.dfy" -include "ext/fun.dfy" -include "ext/decimal.dfy" -include "ext/ipaddr.dfy" -include "core.dfy" -include "templates.dfy" -include "wildcard.dfy" -include "engine.dfy" -include "util.dfy" diff --git a/cedar-dafny/def/base.dfy b/cedar-dafny/def/base.dfy deleted file mode 100644 index e824407ed..000000000 --- a/cedar-dafny/def/base.dfy +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "std.dfy" - -module def.base { - import opened std - - // ----- Identifiers and names ----- // - - // Identifier for use in record fields, etc. - datatype Id = Id(id: string) - - // Name, which is an identifier along with optional namespaces. - // Valid names include `foo` (which has no namespace), `foo::bar`, - // `foo::bar::baz`, etc. - datatype Name = Name(id: Id, path: seq) { - static function fromId(id: Id): Name { - Name(id, []) - } - - static function fromStr(str: string): Name { - Name.fromId(Id(str)) - } - } - - // ----- Errors and Cedar-specific result type ----- // - - // There are four kinds of error values related to extension types: - // CallStyleError, ArityMismatchError, NoSuchFunctionError, and - // ExtensionError. The first three can be detected statically, through - // validation. The fourth is the abstract, catch-all error that represents all - // runtime errors thrown by extension functions, which cannot be prevented - // statically (e.g., string input parsing errors). - datatype Error = - EntityDoesNotExist | - AttrDoesNotExist | - TypeError | - ArityMismatchError | - NoSuchFunctionError | - ExtensionError - - // Customization of the standard Result type for concrete evaluation: the - // error type is fixed to Error in Result, and the value type is fixed to the - // unit type in UnitResult. We also introduce convenience functions Ok and Err - // that let us construct Result values without having to qualify the names. - type Result = std.Result - type UnitResult = Result<()> - - function Ok(v: T): Result { - Result.Ok(v) - } - - function Err(v: Error): Result { - Result.Err(v) - } - - // ----- Generic type coercions ----- // - - // A generic way to coerce a base type to a wrapper type and back. - datatype Coerce = - Coerce( - wrap: Base -> Wrapper, - unwrap: Wrapper -> Result) - { - ghost predicate wellFormed() { - (forall b: Base :: - unwrap(wrap(b)) == Ok(b)) && - (forall w: Wrapper | unwrap(w).Ok? :: - wrap(unwrap(w).Extract()) == w) && - (forall w: Wrapper | unwrap(w).Err? :: - forall b: Base :: wrap(b) != w) - } - - static function compose(c: Coerce, c': Coerce): (res: Coerce) - ensures (c.wellFormed() && c'.wellFormed()) ==> res.wellFormed() - { - var wrap := (t: T) => c'.wrap(c.wrap(t)); - var unwrap := - (w: Wrapper) => - var b :- c'.unwrap(w); - c.unwrap(b); - - Coerce(wrap, unwrap) - } - } -} diff --git a/cedar-dafny/def/core.dfy b/cedar-dafny/def/core.dfy deleted file mode 100644 index 56ba0fc6b..000000000 --- a/cedar-dafny/def/core.dfy +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "base.dfy" -include "ext.dfy" - -// Datatypes used by the reference (definitional) Cedar authorization -// engine. - -module def.core { - import opened base - import ext - - // ----- Pattern literals ----- // - - // Represents an element in a pattern literal (i.e., the RHS of the `like` operator) - // A pattern element is either a char literal or a wildcard star. - datatype PatElem = Star | JustChar(char) - - type Pattern = seq - - // ----- Values ----- // - - datatype EntityType = EntityType(id: Name) { - // CedarCore has an EntityType with "concrete" and "unspecified" - // alternatives, but making that change right now would break too much of - // CedarDafny. Instead, in places where we need to handle "unspecified - // entities" specially, we represent them using this sentinel type name. In - // places where we can get away with treating unspecified entities the same - // as others, we do. - static const UNSPECIFIED := EntityType(Name.fromStr("")) - } - - datatype EntityUID = EntityUID(ty: EntityType, eid: string) - - // We specify field names in Primitive and Value for convenience of extracting - // a value after calling typeCheck with a single expected type. - datatype Primitive = - Bool(b: bool) | - Int(i: int) | - String(s: string) | - EntityUID(uid: EntityUID) - - datatype Value = - Primitive(primitive: Primitive) | - Set(s: set) | - Record(record: Record) | - Extension(ex: ext.Value) - { - // Conveniences to reduce boilerplate. - static function Bool(b: bool): Value { - Primitive(Primitive.Bool(b)) - } - static const TRUE := Bool(true) - static const FALSE := Bool(false) - - static function Int(i: int): Value { - Primitive(Primitive.Int(i)) - } - - static function String(s: string): Value { - Primitive(Primitive.String(s)) - } - - static function EntityUID(uid: EntityUID): Value { - Primitive(Primitive.EntityUID(uid)) - } - - static function Ext(v: ext.Value): Value { - Value.Extension(v) - } - - // Dynamic conversion from a Value to a wrapped type. - // We're making these conversions static so that we can - // use them as first-class functions. - static function asBool(v: Value): Result { - match v { - case Primitive(Bool(b)) => Ok(b) - case _ => Err(TypeError) - } - } - - static function asInt(v: Value): Result { - match v { - case Primitive(Int(i)) => Ok(i) - case _ => Err(TypeError) - } - } - - static function asString(v: Value): Result { - match v { - case Primitive(String(s)) => Ok(s) - case _ => Err(TypeError) - } - } - - static function asEntity(v: Value): Result { - match v { - case Primitive(EntityUID(e)) => Ok(e) - case _ => Err(TypeError) - } - } - - static function asSet(v: Value): Result> { - match v { - case Set(s) => Ok(s) - case _ => Err(TypeError) - } - } - - static function asRecord(v: Value): Result { - match v { - case Record(r) => Ok(r) - case _ => Err(TypeError) - } - } - - static function asExt(v: Value): Result { - match v { - case Extension(e) => Ok(e) - case _ => Err(TypeError) - } - } - } - - type Attr = string - type Record = map - - const coerce: ext.fun.Coercions := - ext.fun.Coercions( - Coerce(Value.Bool, Value.asBool), - Coerce(Value.Int, Value.asInt), - Coerce(Value.String, Value.asString), - Coerce(Value.Ext, Value.asExt)) - - const extFuns: map> := ext.register(coerce) - - // ----- Expressions ----- // - - datatype Var = Principal | Action | Resource | Context - - datatype UnaryOp = - Not | - Neg | MulBy(i: int) | - Like(p: Pattern) | - Is(ety: EntityType) - - datatype BinaryOp = - Eq | In | - Less | LessEq | Add | Sub | - Contains | ContainsAll | ContainsAny - - datatype Expr = - PrimitiveLit(Primitive) | - Var(Var) | - If(Expr, Expr, Expr) | - And(Expr, Expr) | // shortcircuiting && - Or(Expr, Expr) | // shortcircuiting || - UnaryApp(UnaryOp, Expr) | - BinaryApp(BinaryOp, Expr, Expr) | - GetAttr(Expr, Attr) | - HasAttr(Expr, Attr) | - Set(seq) | - Record(fvs: seq<(Attr, Expr)>) | - Call(name: Name, args: seq) - - // ----- Policies, requests, stores, and responses ----- // - - datatype Effect = Permit | Forbid - - datatype PolicyID = PolicyID(id: string) - - datatype Policy = Policy( - effect: Effect, - principalScope: PrincipalScope, - actionScope: ActionScope, - resourceScope: ResourceScope, - condition: Expr) - { - function toExpr(): Expr { - Expr.And( - principalScope.toExpr(), - Expr.And( - actionScope.toExpr(), - Expr.And( - resourceScope.toExpr(), - condition))) - } - } - - datatype PrincipalScope = PrincipalScope(scope: Scope) - { - function toExpr(): Expr { - scope.toExpr(Var.Principal) - } - } - - datatype ResourceScope = ResourceScope(scope: Scope) - { - function toExpr(): Expr { - scope.toExpr(Var.Resource) - } - } - - datatype ActionScope = ActionScope(Scope) | ActionInAny(seq) - { - function toExpr(): Expr { - match this { - case ActionScope(scope) => scope.toExpr(Var.Action) - case ActionInAny(es) => - var exprs := seq(|es|, - i requires 0 <= i < |es| => - PrimitiveLit(Primitive.EntityUID(es[i]))); - BinaryApp(BinaryOp.In, Var(Var.Action), Expr.Set(exprs)) - } - } - } - - datatype Scope = - Any | - Eq(entity: EntityUID) | - In(entity: EntityUID) | - Is(ety: EntityType) | - IsIn(ety:EntityType, entity: EntityUID) - { - function toExpr(v: Var): Expr { - match this { - case Any => PrimitiveLit(Primitive.Bool(true)) - case Eq(e) => BinaryApp(BinaryOp.Eq, Var(v), PrimitiveLit(Primitive.EntityUID(e))) - case In(e) => BinaryApp(BinaryOp.In, Var(v), PrimitiveLit(Primitive.EntityUID(e))) - case Is(ety) => UnaryApp(UnaryOp.Is(ety), Var(v)) - case IsIn(ety, e) => - And(UnaryApp(UnaryOp.Is(ety), Var(v)), - BinaryApp(BinaryOp.In, Var(v), PrimitiveLit(Primitive.EntityUID(e)))) - } - } - } - - datatype Request = - Request(principal: EntityUID, - action: EntityUID, - resource: EntityUID, - context: Record) - - datatype EntityData = EntityData(attrs: Record, ancestors: set) - - datatype EntityStore = EntityStore( - entities: map) - { - // Can also be used just to test whether an entity exists in the store. - function getEntityAttrs(uid: EntityUID): base.Result { - if uid in entities.Keys then - Ok(entities[uid].attrs) - else - Err(EntityDoesNotExist) - } - - predicate entityIn(child: EntityUID, ancestor: EntityUID) - requires child in entities.Keys - { - ancestor in entities[child].ancestors - } - } - - // Note: PolicyStore previously had an `overrides` field and might have it - // again in the future. To reduce code churn, we aren't collapsing the - // datatype to a type alias. - datatype PolicyStore = PolicyStore( - policies: map) - - datatype Store = Store(entities: EntityStore, policies: PolicyStore) - - // ----- Authorization response ----- // - - datatype Response = Response(decision: Decision, policies: set) - - datatype Decision = Allow | Deny -} diff --git a/cedar-dafny/def/engine.dfy b/cedar-dafny/def/engine.dfy deleted file mode 100644 index 4479eab88..000000000 --- a/cedar-dafny/def/engine.dfy +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "base.dfy" -include "core.dfy" -include "wildcard.dfy" - -module def.engine { - import opened base - import opened core - import opened wildcard - - datatype Authorizer = Authorizer(request: Request, store: Store) { - - // Only isAuthorized is considered a public API, but we have to expose the - // intermediate steps in order to write intermediate tests so that we can - // write a test for isAuthorized without exceeding the limit on how much - // Dafny will evaluate in one step. - - function evaluator(): Evaluator { - Evaluator(request, store.entities) - } - - function evaluate(pid: PolicyID): Result - requires pid in store.policies.policies - { - evaluator().interpret(store.policies.policies[pid].toExpr()) - } - - predicate satisfied(pid: PolicyID) - requires pid in store.policies.policies - { - evaluate(pid) == Ok(Value.TRUE) - } - - function satisfiedPolicies(): set { - set pid | pid in store.policies.policies.Keys && - satisfied(pid) - } - - function forbids(): set { - set pid | pid in satisfiedPolicies() && - store.policies.policies[pid].effect == Forbid - } - - // NOTE: Now that overrides have been removed for the time being, `permits` - // has been redefined to return _all_ permit policies that are in force, not - // only those that override all forbid policies in force. This is - // consistent with the definitions in the version of the language - // specification without overrides. - function permits(): set { - set pid | pid in satisfiedPolicies() && - store.policies.policies[pid].effect == Permit - } - - function isAuthorized(): Response { - var f := forbids(); - var p := permits(); - if f == {} && p != {} then - Response(Allow, p) - else - Response(Deny, f) - } - } - - datatype Evaluator = Evaluator(request: Request, store: EntityStore) { - - function interpret(expr: Expr): Result { - match expr { - case PrimitiveLit(p) => Ok(Primitive(p)) - case Var(v) => - match v { - case Principal => Ok(Value.EntityUID(request.principal)) - case Action => Ok(Value.EntityUID(request.action)) - case Resource => Ok(Value.EntityUID(request.resource)) - case Context => Ok(Value.Record(request.context)) - } - case If(e_cond, e_true, e_false) => - var v_cond :- interpret(e_cond); - var b_cond :- Value.asBool(v_cond); - if b_cond then interpret(e_true) else interpret(e_false) - case And(left, right) => - interpretShortcircuit(expr, left, right, false) - case Or(left, right) => - interpretShortcircuit(expr, left, right, true) - case UnaryApp(op, e) => - var v :- interpret(e); - applyUnaryOp(op, v) - case BinaryApp(op, e1, e2) => - var v1 :- interpret(e1); - var v2 :- interpret(e2); - applyBinaryOp(op, v1, v2) - case GetAttr(e, a) => - var v :- interpret(e); - var r :- expectRecordDerefEntity(v, false); - if a in r.Keys then Ok(r[a]) else Err(AttrDoesNotExist) - case HasAttr(e, a) => - var v :- interpret(e); - var r :- expectRecordDerefEntity(v, true); - Ok(Value.Bool(a in r.Keys)) - case Set(es) => - var vs :- interpretSet(es); - Ok(Value.Set(vs)) - case Record(es) => - var fvs :- interpretRecord(es); - Ok(Value.Record(fvs)) - case Call(name, es) => - var args :- interpretList(es); - applyExtFun(name, args) - } - } - - static function applyExtFun(name: Name, args: seq): Result { - if name in extFuns.Keys - then - var fn := extFuns[name]; - fn.fun(args) - else Err(NoSuchFunctionError) - } - - // We allow repeated keys in Record literal definitions, and take the last - // value given for a field. For example, { a: 1, b: 2, a: 3 } evaluates to - // the record {b: 2, a: 3 }. We do not throw an error in the case of - // repeated field IDs, this allows for duplicate keys in map literals - function interpretRecord(es: seq<(Attr,Expr)>): Result> { - if es == [] then - Ok(map[]) - else - var k := es[0].0; - var v :- interpret(es[0].1); - var m :- interpretRecord(es[1..]); - if k in m.Keys then // If the same field is repeated later in the record, - Ok(m) // we give that occurrence priority and ignore this one. - else - Ok(m[k := v]) - } - - // We allow repeated elements in Set literal definitions, and drop - // duplicates. For example, { 1, 2, 3, 2 } evaluates to the Set {1, 2, 3}. - function interpretSet(es: seq): Result> { - if es == [] then - Ok({}) - else - var head_v :- interpret(es[0]); - var tail_vs :- interpretSet(es[1..]); - Ok({head_v} + tail_vs) - } - - function interpretList(es: seq): Result> { - if es == [] then - Ok([]) - else - var head_v :- interpret(es[0]); - var tail_vs :- interpretList(es[1..]); - Ok([head_v] + tail_vs) - } - - // Evaluates each expression in order. If left returns short or errors, then - // stop and return short or error. Otherwise, returns the result of right. - // This is used to implement && with short = false and || with short = true. - // The ghost variable expr is required to pass the termination check. - function interpretShortcircuit(ghost expr: Expr, left: Expr, right: Expr, short: bool): Result - requires left < expr && right < expr - { - var l :- interpret(left); - var b :- Value.asBool(l); - if b == short - then Ok(l) - else - var r :- interpret(right); - var _ :- Value.asBool(r); - Ok(r) - } - - function expectRecordDerefEntity(v: Value, treatMissingAsEmpty: bool): Result { - if v.Record? - then Ok(v.record) - else - var uid :- Value.asEntity(v); - var res := store.getEntityAttrs(uid); - if res.Err? && treatMissingAsEmpty then Ok(map[]) else res - } - - function applyUnaryOp(uop: UnaryOp, x: Value): Result { - match uop { - case Not => - var b :- Value.asBool(x); - Ok(Value.Bool(!b)) - case Neg => - applyIntUnaryOp(a => -a, x) - case MulBy(factor) => - applyIntUnaryOp(a => factor * a, x) - case Like(p) => - var s :- Value.asString(x); - Ok(Value.Bool(wildcardMatch(s, p))) - case Is(ety) => - var e :- Value.asEntity(x); - Ok(Value.Bool(e.ty == ety)) - } - } - - function applyIntUnaryOp(f: int -> int, x: Value): Result { - var i :- Value.asInt(x); - Ok(Value.Int(f(i))) - } - - function applyIntBinaryOp(f: (int, int) -> int, x: Value, y: Value): Result { - var xi :- Value.asInt(x); - var yi :- Value.asInt(y); - Ok(Value.Int(f(xi, yi))) - } - - function applyIntBinaryPred(f: (int, int) -> bool, x: Value, y: Value): Result { - var xi :- Value.asInt(x); - var yi :- Value.asInt(y); - Ok(Value.Bool(f(xi, yi))) - } - - function applySetBinaryPred(f: (set, set) -> bool, x: Value, y: Value): Result { - var xs :- Value.asSet(x); - var ys :- Value.asSet(y); - Ok(Value.Bool(f(xs, ys))) - } - - predicate entityInEntity(x: EntityUID, y: EntityUID) { - x == y || - (store.getEntityAttrs(x).Ok? && - store.entityIn(x, y)) - } - - // Returns true iff ys contains at least one element y such that - // entityInEntity(x, y) holds. - predicate entityInSetOfEntities(x: EntityUID, ys: set) - { - exists y | y in ys :: entityInEntity(x, y) - } - - function checkEntitySet(ys: set): Result> - { - if forall y | y in ys :: Value.asEntity(y).Ok? then - Ok(set y | y in ys :: y.primitive.uid) - else - Err(TypeError) - } - - function applyBinaryOp(bop: BinaryOp, x: Value, y: Value): Result { - match bop { - case Eq => Ok(Value.Bool(x == y)) - case Less => applyIntBinaryPred((a, b) => a < b, x, y) - case LessEq => applyIntBinaryPred((a, b) => a <= b, x, y) - case Add => applyIntBinaryOp((a, b) => a + b, x, y) - case Sub => applyIntBinaryOp((a, b) => a - b, x, y) - case In => - var e :- Value.asEntity(x); - if y.Set? then - var uids :- checkEntitySet(y.s); - Ok(Value.Bool(entityInSetOfEntities(e, uids))) - else - var g :- Value.asEntity(y); - Ok(Value.Bool(entityInEntity(e, g))) - case Contains => - var s :- Value.asSet(x); - Ok(Value.Bool(y in s)) - case ContainsAll => - applySetBinaryPred((xs, ys) => xs >= ys, x, y) - case ContainsAny => - applySetBinaryPred((xs, ys) => xs * ys != {}, x, y) - } - } - } -} diff --git a/cedar-dafny/def/ext.dfy b/cedar-dafny/def/ext.dfy deleted file mode 100644 index 5fc8a0c0d..000000000 --- a/cedar-dafny/def/ext.dfy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "base.dfy" -include "ext/fun.dfy" -include "ext/decimal.dfy" -include "ext/ipaddr.dfy" - -module def.ext { - import opened base - import opened fun - import dec = decimal - import ip = ipaddr - - datatype Value = - Decimal(d: dec.Decimal) | - IPAddr(ip: ip.IPAddr) - { - static function asDecimal(v: Value): Result { - if v.Decimal? then Ok(v.d) else Err(TypeError) - } - - static function asIPAddr(v: Value): Result { - if v.IPAddr? then Ok(v.ip) else Err(TypeError) - } - - } - - // Returns the map from extension function names to their implementations. - // Note that we're currently assuming that function names are unique. - // This might have to be revisited in the future if we allow different - // extension types to contain functions with the same name. In that case, - // we'll want to specify dispatching mechanisms for function and method-style - // invocations. - function register(coerce: Coercions): map> - { - var dec2val := Coerce((d: dec.Decimal) => Value.Decimal(d), Value.asDecimal); - var ip2val := Coerce((ip: ip.IPAddr) => Value.IPAddr(ip), Value.asIPAddr); - - dec.DecimalFunctions.register(Coercions.compose(dec2val, coerce)) + - ip.IPAddrFunctions.register(Coercions.compose(ip2val, coerce)) - } - -} diff --git a/cedar-dafny/def/ext/decimal.dfy b/cedar-dafny/def/ext/decimal.dfy deleted file mode 100644 index ffd410396..000000000 --- a/cedar-dafny/def/ext/decimal.dfy +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../base.dfy" -include "fun.dfy" -include "parser.dfy" - -// Decimal extension type -module def.ext.decimal { - import opened base - import opened fun - import opened core - import opened parseDecimal - - type Decimal = core.Decimal - - type Coercions = fun.Coercions - - datatype DecimalFunctions = DecimalFunctions(coerce: Coercions) - { - - // Returns the map from Decimal extension function names to their implementations. - static function register(coerce: Coercions): map> - { - var fns := DecimalFunctions(coerce); - map[ - Name.fromStr("decimal") := ExtFun(fns.decimal), - Name.fromStr("lessThan") := ExtFun(fns.lt), - Name.fromStr("lessThanOrEqual") := ExtFun(fns.lte), - Name.fromStr("greaterThan") := ExtFun(fns.gt), - Name.fromStr("greaterThanOrEqual") := ExtFun(fns.gte) - ] - } - - function decimal(args: seq): Result { - var s :- checkUnary(args, coerce.String); - match Parse(s) { - case Some(d) => Ok(coerce.fromExt(d)) - case None => Err(ExtensionError) - } - } - - function lt(args: seq): Result { - var (d0, d1) :- checkBinary(args, coerce.Ext); - Ok(coerce.fromBool(d0.i < d1.i)) - } - - function lte(args: seq): Result { - var (d0, d1) :- checkBinary(args, coerce.Ext); - Ok(coerce.fromBool(d0.i <= d1.i)) - } - - function gt(args: seq): Result { - var (d0, d1) :- checkBinary(args, coerce.Ext); - Ok(coerce.fromBool(d0.i > d1.i)) - } - - function gte(args: seq): Result { - var (d0, d1) :- checkBinary(args, coerce.Ext); - Ok(coerce.fromBool(d0.i >= d1.i)) - } - } -} - -module def.ext.decimal.core { - // A decimal number consists of an integer part and a fractional part. - // The former is the integer number before the decimal point. - // The latter is the decimal number minus its integer part. - // For instance, 10.234 is a decimal number. Its integer part is 10 and its fractional part is 0.234 - // We restrict the number of the digits after the decimal point to 4. - - const DIGITS := 4 - const i64_MIN := -0x8000_0000_0000_0000 - const i64_MAX := 0x7fff_ffff_ffff_ffff - // The internal representation of a decimal - newtype i64 = i: int | i64_MIN <= i <= i64_MAX - datatype Decimal = Decimal(i: i64) -} - -module def.ext.decimal.parseDecimal { - import opened utils.parser - import opened std - import opened core - export - provides - Parse, - std, - core - - function ParseComponents(s: string): Option<(string, string)> - { - var (ls, r) :- ParseDecStr(s); - var rr :- ParseChar('.', r); - var (rs, rrr) :- ParseDecStr(rr); - var _ :- EoS(rrr); - if 0 < |rs| <= DIGITS - then Some((ls, rs)) - else None - } - - function FillZeros(n: nat, zs: nat): nat { - if zs == 0 then n else 10 * FillZeros(n, zs - 1) - } - - function ParseNat(s: string): Option { - var (ls, rs) :- ParseComponents(s); - var i := FillZeros(DecStrToNat(ls+rs), DIGITS - |rs|); - Some(i) - } - - function Parse(s: string): Option { - match ParseChar('-', s) { - case Some(s') => - var n :- ParseNat(s'); - var i := -(n as int); - if i < i64_MIN then None else Some(Decimal.Decimal(i as i64)) - case None => - var n :- ParseNat(s); - var i := n as int; - if i > i64_MAX then None else Some(Decimal.Decimal(i as i64)) - } - } - - lemma ParseDigitsAndDot(s1: string, s2: string) - requires |s1| > 0 - requires forall i | 0 <= i < |s1| :: '0' <= s1[i] <= '9' - ensures ParseDecStr(s1+"."+s2).Some? && ParseDecStr(s1+"."+s2).value.0 == s1 && ParseDecStr(s1+"."+s2).value.1 == "."+s2 - { - if |s1| == 1 { - assert (s1+"."+s2)[1..] == "."+s2; - assert ParseDecStr("."+s2).None?; - } else { - ParseDecAll(s1); - ParseDigitsAndDot(s1[1..],s2); - assert s1+"."+s2 == [s1[0]]+(s1[1..]+"."+s2); - } - } - - lemma ParsePosNumStr(l: string, r: string) - requires |l| > 0 - requires forall i | 0 <= i < |l| :: '0' <= l[i] <= '9' - requires |r| > 0 - requires forall i | 0 <= i < |r| :: '0' <= r[i] <= '9' - requires 0 < |r| <= DIGITS - ensures ParseComponents(l+"."+r).Some? && ParseComponents(l+"."+r).value.0 == l && ParseComponents(l+"."+r).value.1 == r - { - ParseDecAll(l); - ParseDecAll(r); - ParseDigitsAndDot(l,r); - } -} diff --git a/cedar-dafny/def/ext/fun.dfy b/cedar-dafny/def/ext/fun.dfy deleted file mode 100644 index 62ce67783..000000000 --- a/cedar-dafny/def/ext/fun.dfy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../base.dfy" - -module def.ext.fun { - - import opened base - - datatype ExtFun = ExtFun(fun: seq -> Result) - - datatype Coercions = - Coercions( - Bool: Coerce, - Int: Coerce, - String: Coerce, - Ext: Coerce) - { - ghost predicate wellFormed() { - Bool.wellFormed() && - Int.wellFormed() && - String.wellFormed() && - Ext.wellFormed() - } - - static function compose(c: Coerce, cs: Coercions): (res: Coercions) - ensures (c.wellFormed() && cs.wellFormed()) ==> res.wellFormed() - { - Coercions(cs.Bool, cs.Int, cs.String, Coerce.compose(c, cs.Ext)) - } - - // Convenience functions for calling coercion wrapper. - function fromBool(arg: bool): T { Bool.wrap(arg) } - function fromInt(arg: int): T { Int.wrap(arg) } - function fromString(arg: string): T { String.wrap(arg) } - function fromExt(arg: E): T { Ext.wrap(arg)} - - function toBool(arg: T): Result { Bool.unwrap(arg) } - function toInt(arg: T): Result { Int.unwrap(arg) } - function toString(arg: T): Result { String.unwrap(arg) } - function toExt(arg: T): Result { Ext.unwrap(arg) } - } - - function checkArity(args: seq, expected: nat): (res: UnitResult) - ensures res.Ok? ==> |args| == expected - { - if |args| == expected - then Ok(()) - else Err(ArityMismatchError) - } - - function checkUnary(args: seq, expected: Coerce): (res: Result) - ensures - res.Ok? <==> - (|args| == 1 && expected.unwrap(args[0]).Ok?) - { - var _ :- checkArity(args, 1); - var a0 :- expected.unwrap(args[0]); - Ok(a0) - } - - function checkBinary(args: seq, expected: Coerce): (res: Result<(B, B)>) - ensures - res.Ok? <==> - (|args| == 2 && expected.unwrap(args[0]).Ok? && expected.unwrap(args[1]).Ok?) - { - var _ :- checkArity(args, 2); - var a0 :- expected.unwrap(args[0]); - var a1 :- expected.unwrap(args[1]); - Ok((a0, a1)) - } - - -} diff --git a/cedar-dafny/def/ext/ipaddr.dfy b/cedar-dafny/def/ext/ipaddr.dfy deleted file mode 100644 index 234a166cc..000000000 --- a/cedar-dafny/def/ext/ipaddr.dfy +++ /dev/null @@ -1,566 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../base.dfy" -include "fun.dfy" -include "parser.dfy" - -// IpAdddr extension type -module def.ext.ipaddr { - import opened base - import opened fun - import opened core - import opened parseIPAddr - - type IPAddr = core.IPNet - - type Coercions = fun.Coercions - - datatype IPAddrFunctions = IPAddrFunctions(coerce: Coercions) - { - - // Returns the map from IPAddr extension function names to their implementations. - static function register(coerce: Coercions): map> - { - var fns := IPAddrFunctions(coerce); - map[ - Name.fromStr("ip") := ExtFun(fns.ip), - Name.fromStr("isIpv4") := ExtFun(fns.isIpv4), - Name.fromStr("isIpv6") := ExtFun(fns.isIpv6), - Name.fromStr("isLoopback") := ExtFun(fns.isLoopback), - Name.fromStr("isMulticast") := ExtFun(fns.isMulticast), - Name.fromStr("isInRange") := ExtFun(fns.isInRange) - ] - } - - function ip(args: seq): Result { - var s :- checkUnary(args, coerce.String); - match parse(s) { - case Some(ip) => Ok(coerce.fromExt(ip)) - case None => Err(ExtensionError) - } - } - - function isIpv4(args: seq): Result { - var ip :- checkUnary(args, coerce.Ext); - Ok(coerce.fromBool(ip.isV4())) - } - - function isIpv6(args: seq): Result { - var ip :- checkUnary(args, coerce.Ext); - Ok(coerce.fromBool(ip.isV6())) - } - - function isLoopback(args: seq): Result { - var ip :- checkUnary(args, coerce.Ext); - Ok(coerce.fromBool(ip.isLoopback())) - } - - function isMulticast(args: seq): Result { - var ip :- checkUnary(args, coerce.Ext); - Ok(coerce.fromBool(ip.isMulticast())) - } - - function isInRange(args: seq): Result { - var (ip0, ip1) :- checkBinary(args, coerce.Ext); - Ok(coerce.fromBool(ip0.inRange(ip1))) - } - } -} - -module def.ext.ipaddr.core { - // The 16 bit number in each group of IPv6 addresses - newtype numV6 = x: nat | x <= 0xffff - // The 8 bit number in each group of IPv4 addresses - newtype numV4 = x: nat | x <= 0xff - - // The network address bit width - const V6_SIZE := 128 - const V4_SIZE := 32 - newtype prefixV6 = x: nat | x <= V6_SIZE - newtype prefixV4 = x: nat | x <= V4_SIZE - - // Normalized address space because we support IPv4 and IPv6 address comparison - // newtype u128 = i: nat | i < 0x1_0000_0000_0000_0000_0000_0000_0000_0000 - type u128 = nat - type Value = u128 - - // Constants - const LOOP_BACK_ADDRESS_V4 := IPv4Addr.Addr(127, 0, 0, 0) - const LOOP_BACK_NET_V4 := V4(LOOP_BACK_ADDRESS_V4, 8) - const LOOP_BACK_ADDRESS_V6 := IPv6Addr.Addr(0, 0, 0, 0, 0, 0, 0, 1) - const LOOP_BACK_NET_V6 := V6(LOOP_BACK_ADDRESS_V6, 128) - const MULTICAST_ADDRESS_V4 := IPv4Addr.Addr(224, 0, 0, 0) - const MULTICAST_NET_V4 := V4(MULTICAST_ADDRESS_V4, 4) - const MULTICAST_ADDRESS_V6 := IPv6Addr.Addr(65280, 0, 0, 0, 0, 0, 0, 0) - const MULTICAST_NET_V6 := V6(MULTICAST_ADDRESS_V6, 8) - - // IPv4 address: a 32 bit number partitioned into 4 groups. - // a0 is most signifcant and a3 is the least significant. - // In other words, the number represented is a0++a1++a2++a3 - datatype IPv4Addr = - Addr( - a0: numV4, - a1: numV4, - a2: numV4, - a3: numV4 - ) { - function getAddrValue(): Value { - ((a0 as nat)*0x100_0000 + - (a1 as nat)*0x1_0000 + - (a2 as nat)*0x100 + - (a3 as nat)) as u128 - } - } - - // TODO: prove getaddrValue . valueToIPv4Addr == id - function valueToIPv4Addr(v: Value): IPv4Addr { - var a3:= (v % 0x100) as numV4; - var a2 := ((v / 0x100) % 0x100) as numV4; - var a1 := ((v / 0x1_0000) % 0x100) as numV4; - var a0 := ((v / 0x100_0000) % 0x100) as numV4; - IPv4Addr.Addr(a0, a1, a2, a3) - } - - // IPv6 address: a 128 bit number partitioned into 8 groups - // a0 is most signifcant and a7 is the least significant. - // In other words, the number represented is a0++a1++a2++a3++a4++a5++a6++a7 - datatype IPv6Addr = - Addr( - a0: numV6, - a1: numV6, - a2: numV6, - a3: numV6, - a4: numV6, - a5: numV6, - a6: numV6, - a7: numV6 - ) { - function getAddrValue(): Value { - ((a0 as nat)*0x1_0000_0000_0000_0000_0000_0000_0000 + - (a1 as nat)*0x1_0000_0000_0000_0000_0000_0000 + - (a2 as nat)*0x1_0000_0000_0000_0000_0000 + - (a3 as nat)*0x1_0000_0000_0000_0000 + - (a4 as nat)*0x1_0000_0000_0000 + - (a5 as nat)*0x1_0000_0000 + - (a6 as nat)*0x1_0000 + - (a7 as nat)) as u128 - } - } - - // TODO: prove getaddrValue . valueToIPv4Addr == id - function valueToIPv6Addr(v: Value): IPv6Addr { - var a7 := (v % 0x1_0000) as numV6; - var a6 := ((v / 0x1_0000) % 0x1_0000) as numV6; - var a5 := ((v / 0x1_0000_0000) % 0x1_0000) as numV6; - var a4 := ((v / 0x1_0000_0000_0000) % 0x1_0000) as numV6; - var a3 := ((v / 0x1_0000_0000_0000_0000) % 0x1_0000) as numV6; - var a2 := ((v / 0x1_0000_0000_0000_0000_0000) % 0x1_0000) as numV6; - var a1 := ((v / 0x1_0000_0000_0000_0000_0000_0000) % 0x1_0000) as numV6; - var a0 := ((v / 0x1_0000_0000_0000_0000_0000_0000_0000) % 0x1_0000) as numV6; - IPv6Addr.Addr(a0, a1, a2, a3, a4, a5, a6, a7) - } - - datatype IPNet = - V6(addrV6: IPv6Addr, v6Prefix: prefixV6) | - V4(addrV4: IPv4Addr, v4Prefix: prefixV4) { - function normalize(): IPNet { - var v := this.applySubnetMask(); - match this { - case V6(addr, prefix) => V6(valueToIPv6Addr(v), prefix) - case V4(addr, prefix) => V4(valueToIPv4Addr(v), prefix) - } - } - - predicate isV4() { - V4? - } - - predicate isV6() { - V6? - } - - function getAddrValue(): Value { - match this { - case V6(addr, _) => addr.getAddrValue() - case V4(addr, _) => addr.getAddrValue() - } - } - - function getSubnetBitWidth(): nat { - if V6? then V6_SIZE - v6Prefix as nat else V4_SIZE - v4Prefix as nat - } - - // Getting the last n bits of a number x amounts to x >> n << n. - // (>> n) amounts to (/Powerof2(n)) and - // (<< n) amounts to (*Powerof2(n)) - function applySubnetMask(): Value { - var offset := powerOf2(getSubnetBitWidth()) as u128; - ((getAddrValue()/ offset) * offset) as u128 - } - - // i.e., 1 << n - function getSubnetSize(): Value { - powerOf2(getSubnetBitWidth()) as u128 - } - - predicate inRange(other: IPNet) { - match (this, other) { - case (V4(_, _), V6(_, _)) => false - case (V6(_, _), V4(_, _)) => false - case _ => - // lower value of the range - var tl := applySubnetMask(); - // higher value of the range - var th := tl + getSubnetSize() - 1; - // Likewise, values of the range other represents - var ol := other.applySubnetMask(); - var oh := ol + other.getSubnetSize() - 1; - // Interval inclusion test - oh >= th && tl >= ol - } - } - - predicate isLoopback() { - if V6? then inRange(LOOP_BACK_NET_V6) else inRange(LOOP_BACK_NET_V4) - } - - predicate isMulticast() { - if V6? then inRange(MULTICAST_NET_V6) else inRange(MULTICAST_NET_V4) - } - } - - function {:opaque} powerOf2(x: nat): (res: nat) - ensures res >= 1 - { - if x == 0 then 1 else 2*powerOf2(x - 1) - } -} - -module def.ext.ipaddr.parseIPAddr { - import opened utils.parser - import opened std - import opened core - - export - provides - parse, - std, - core - - // A segment of IPv6 address is either a 16 bit number or a "::" - datatype SegV6 = Num(n: numV6) | DC - - function parseDot(s: string): (res: Option) - { - ParseChar('.', s) - } - - function parseColon(s: string): (res: Option) - { - ParseChar(':', s) - } - - function parseSlash(s: string): (res: Option) - { - ParseChar('/', s) - } - - function parseCIDRV4(s: string): (res: Option<(nat, string)>) - ensures res.Some? ==> res.value.0 <= V4_SIZE - { - var ns :- parseSlash(s); - match ParseDecStr(ns) { - case Some((ds, ns')) => - if 0 < |ds| <= 2 - then - var n := DecStrToNat(ds); - if n <= V4_SIZE && (ds[0] == '0' ==> (|ds| == 1 && n == 0)) then - Some((n, ns')) - else None - else None - case None => None - } - } - - // Parse a group of number in strict dotted decimal format - function parseNumV4(s: string): (res: Option<(numV4, string)>) - { - match ParseDecStr(s) { - case Some((ds, ns)) => - if 0 < |ds| <= 3 - then - var n := DecStrToNat(ds); - if n <= 0xff then - // Reference: https://github.com/rust-lang/rust/pull/86984 - if ds[0] == '0' ==> (|ds| == 1 && n == 0) then - Some((n as numV4, ns)) - else None - else None - else None - case None => None - } - } - - function parseSegsV4(s: string): (res: Option<(seq, string)>) - ensures res.Some? ==> |res.value.0| == 4 - { - var (n0, s1_) :- parseNumV4(s); - var s1 :- parseDot(s1_); - var (n1, s2_) :- parseNumV4(s1); - var s2 :- parseDot(s2_); - var (n2, s3_) :- parseNumV4(s2); - var s3 :- parseDot(s3_); - var (n3, s4_) :- parseNumV4(s3); - Some(([n0, n1, n2, n3], s4_)) - } - - function parseIPv4Net(s: string): (res: Option) - { - var (ds, ns) :- parseSegsV4(s); - match EoS(ns) { - case Some(_) => Some(V4(IPv4Addr.Addr(ds[0], ds[1], ds[2], ds[3]), V4_SIZE as prefixV4)) - case None => - var (sn, ns') :- parseCIDRV4(ns); - var _ :- EoS(ns'); - Some(V4(IPv4Addr.Addr(ds[0], ds[1], ds[2], ds[3]), sn as prefixV4)) - } - } - - function parseNumV6(s: string): (res: Option<(numV6, string)>) - { - match ParseHexStr(s) { - case Some((ds, ns)) => - if 0 < |ds| <= 4 then - HexPowerMonotonic(|ds|, 4); - Some((HexStrToNat(ds) as numV6, ns)) - else None - case None => None - } - } - - // Parse (":" digits)+ - function parseNumSegsV6'(s: string): Option<(seq, string)> - decreases |s| - { - if |s| == 0 then None - else - match parseColon(s) { - case Some(s') => - var (d, ns) :- parseNumV6(s'); - match parseNumSegsV6'(ns) { - case Some((ds, ns')) => Some(([d]+ds, ns')) - case None => Some(([d], ns)) - } - case None => None - } - } - - // Parse digits (":" digits)+ - function parseNumSegsV6(s: string): Option<(seq, string)> - { - var (d, ns) :- parseNumV6(s); - match parseNumSegsV6'(ns) { - case Some((ds, ns')) => Some(([d]+ds, ns')) - case None => Some(([d], ns)) - } - } - - function wrapNumSegs(ds: seq): (res: seq) - ensures |res| == |ds| && forall i | 0 <= i < |ds| :: res[i].Num? && res[i].n == ds[i] - { - if |ds| == 0 then [] - else [SegV6.Num(ds[0])] + wrapNumSegs(ds[1..]) - } - - ghost function countDC(s: seq): nat - { - if |s| == 0 then 0 else (if s[0].DC? then 1 else 0) + countDC(s[1..]) - } - - lemma WrapNumSegsNoDC(ds: seq) - ensures countDC(wrapNumSegs(ds)) == 0 - {} - - lemma Count0MeansNone(s: seq) - ensures countDC(s) == 0 ==> forall i | 0 <= i < |s| :: s[i].Num? - { - if |s| == 0 { - - } else { - Count0MeansNone(s[1..]); - } - } - - lemma CountDCLast(s: seq) - ensures countDC(wrapNumSegs(s)+[DC]) == 1 - { - if |s| == 0 { - - } else { - assert wrapNumSegs(s)+[DC] == wrapNumSegs([s[0]]) + (wrapNumSegs(s[1..]) + [DC]); - } - } - - lemma CountDCComp(l: seq, r: seq) - ensures countDC(l+r) == countDC(l)+countDC(r) - { - if |l| == 0 { - assert l + r == r; - } else { - assert l + r == [l[0]] + (l[1..] + r); - } - } - - lemma CountDCMid(l: seq, r: seq) - ensures countDC(wrapNumSegs(l)+[DC]+wrapNumSegs(r)) == 1 - { - CountDCLast(l); - WrapNumSegsNoDC(r); - CountDCComp(wrapNumSegs(l)+[DC], wrapNumSegs(r)); - } - - function parseSegsV6(s: string): (res: Option<(seq, string)>) - ensures res.Some? ==> countDC(res.value.0) <= 1 - ensures res.Some? ==> |res.value.0| >= 1 - { - if |s| >= 2 && s[..2] == "::" then - match parseNumSegsV6(s[2..]) { - case Some((ds, ns)) => - WrapNumSegsNoDC(ds); - Some(([DC]+wrapNumSegs(ds), ns)) - case None => - Some(([DC], s[2..])) - } - else - match parseNumSegsV6(s) { - case Some((ds, ns)) => - if |ns| >= 2 && ns[..2] == "::" then - match parseNumSegsV6(ns[2..]) { - case Some((ds', ns')) => - CountDCMid(ds, ds'); - Some((wrapNumSegs(ds) + [DC] + wrapNumSegs(ds'), ns')) - case None => - CountDCLast(ds); - Some((wrapNumSegs(ds) + [DC], ns[2..])) - } - else - WrapNumSegsNoDC(ds); - Some((wrapNumSegs(ds), ns)) - case None => None - } - } - - function findDCIdx(segs: seq): (res: Option) - ensures res.Some? ==> res.value < |segs| - ensures res.Some? ==> segs[res.value].DC? - ensures res.None? ==> forall i | 0 <= i < |segs| :: segs[i].Num? - { - if |segs| == 0 - then None - else - if segs[0].DC? then Some(0) - else - var idx :- findDCIdx(segs[1..]); - Some(idx + 1) - } - - lemma CountDC1SepMeansNoDc(segs: seq) - requires countDC(segs) <= 1 - ensures findDCIdx(segs).Some? ==> countDC(segs[0..findDCIdx(segs).value]) == 0 && countDC(segs[findDCIdx(segs).value+1..]) == 0 - { - var idx := findDCIdx(segs); - if idx.None? { - - } else { - assert segs == segs[0..idx.value+1] + segs[idx.value+1..]; - CountDCComp(segs[0..idx.value+1], segs[idx.value+1..]); - assert countDC(segs[0..idx.value+1]) + countDC(segs[idx.value+1..]) == countDC(segs); - assert segs[0..idx.value+1] == segs[0..idx.value] + [segs[idx.value]]; - CountDCComp(segs[0..idx.value], [segs[idx.value]]); - assert countDC(segs[0..idx.value]) + countDC([segs[idx.value]]) == countDC(segs[0..idx.value+1]); - } - } - - function zeroSegs(n: nat): (res: seq) - decreases n - ensures |res| == n - ensures forall i | 0 <= i < n :: res[i].Num? && res[i].n == 0 - { - if n == 0 then [] else [SegV6.Num(0)] + zeroSegs(n - 1) - } - - function tryExpandSegs(segs: seq): (res: Option>) - requires countDC(segs) <= 1 - ensures res.Some? ==> |res.value| == 8 - ensures res.Some? ==> forall i | 0 <= i < 8 :: res.value[i].Num? - { - if |segs| > 8 - then None - else - CountDC1SepMeansNoDc(segs); - var dcIdx := findDCIdx(segs); - match dcIdx { - case Some(idx) => - Count0MeansNone(segs[..idx]); - Count0MeansNone(segs[idx+1..]); - Some(segs[..idx] + zeroSegs(8 - |segs| + 1) + segs[idx+1..]) - case None => if |segs| == 8 then Some(segs) else None - } - } - - function parseCIDRV6(s: string): (res: Option<(nat, string)>) - ensures res.Some? ==> res.value.0 <= V6_SIZE - { - var ns :- parseSlash(s); - match ParseDecStr(ns) { - case Some((ds, ns')) => - if 0 < |ds| <= 3 then - var n := DecStrToNat(ds); - if n <= V6_SIZE && (ds[0] == '0' ==> (|ds| == 1 && n == 0)) then - Some((n, ns')) - else None - else None - case None => None - } - } - - function parseIPv6Net(s: string): Option - { - var (ds, ns) :- parseSegsV6(s); - match(EoS(ns)) { - case Some(_) => - var ds' :- tryExpandSegs(ds); - Some(V6(IPv6Addr.Addr(ds'[0].n, ds'[1].n, ds'[2].n, ds'[3].n, ds'[4].n, ds'[5].n, ds'[6].n, ds'[7].n), V6_SIZE as prefixV6)) - case None => - var (sn, ns') :- parseCIDRV6(ns); - var _ :- EoS(ns'); - var ds' :- tryExpandSegs(ds); - Some(V6(IPv6Addr.Addr(ds'[0].n, ds'[1].n, ds'[2].n, ds'[3].n, ds'[4].n, ds'[5].n, ds'[6].n, ds'[7].n), sn as prefixV6)) - } - } - - function parse(s: string): Option - { - // Is it V4? - match parseIPv4Net(s) { - case Some(ip) => Some(ip) - case None => - // No. Is it V6? - parseIPv6Net(s) - } - } -} diff --git a/cedar-dafny/def/ext/parser.dfy b/cedar-dafny/def/ext/parser.dfy deleted file mode 100644 index 2ed059a63..000000000 --- a/cedar-dafny/def/ext/parser.dfy +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../std.dfy" - -module def.ext.utils.parser { - import opened std - - function ParseChar(c: char, s: string): (res: Option<(string)>) - { - if |s| == 0 then None - else if s[0] == c - then Some(s[1..]) - else None - } - - function EoS(s: string): (res: Option<()>) - { - if |s| == 0 then Some(()) else None - } - - function ParseDigits(s: string, pred: char -> bool): (res: Option<(string, string)>) - ensures res.Some? ==> |res.value.1| <= |s| - ensures res.Some? ==> |res.value.0| > 0 && forall i | 0 <= i < |res.value. 0| :: pred(res.value.0[i]) - { - if |s| == 0 then None - else if pred(s[0]) - then - match ParseDigits(s[1..], pred) { - case Some((ns, r)) => Some(([s[0]]+ns, r)) - case None => Some(([s[0]], s[1..])) - } - else None - } - - predicate IsHexDigit(x: char) - { - '0' <= x <= '9' || 'a' <= x <= 'f' || 'A' <= x <= 'F' - } - - predicate IsDecDigit(x: char) - { - '0' <= x <= '9' - } - - function ParseHexStr(s: string): (res: Option<(string, string)>) - ensures res.Some? ==> |res.value.1| <= |s| - ensures res.Some? ==> |res.value.0| > 0 && forall i | 0 <= i < |res.value.0| :: IsHexDigit(res.value.0[i]) - { - ParseDigits(s, IsHexDigit) - } - - function ParseDecStr(s: string): (res: Option<(string, string)>) - ensures res.Some? ==> |res.value.1| <= |s| - ensures res.Some? ==> |res.value.0| > 0 && forall i | 0 <= i < |res.value.0| :: IsDecDigit(res.value.0[i]) - { - ParseDigits(s, IsDecDigit) - } - - ghost function HexPowerOf(n: nat): nat - decreases n - { - match n { - case 0 => 1 - case _ => 16*HexPowerOf(n-1) - } - } - - lemma HexPowerMonotonic(n1: nat, n2: nat) - ensures n1 < n2 ==> HexPowerOf(n1) < HexPowerOf(n2) - {} - - function HexStrToNat(s: string): (res: nat) - requires |s| > 0 && forall i | 0 <= i < |s| :: IsHexDigit(s[i]) - ensures res < HexPowerOf(|s|) - { - var ld := s[|s| - 1]; - var lo := - if 'a' <= ld <= 'f' - then (ld - 'a') as nat + 10 - else if 'A' <= ld <= 'F' - then (ld - 'A') as nat + 10 - else (ld - '0') as nat; - if |s| == 1 - then lo - else lo + 16 * HexStrToNat(s[0..|s| - 1]) - } - - function DecStrToNat(s: string): nat - requires |s| > 0 && forall i | 0 <= i < |s| :: IsDecDigit(s[i]) - { - var lo := (s[|s| - 1] - '0') as nat; - if |s| == 1 - then lo - else lo + 10 * DecStrToNat(s[0..|s| - 1]) - } - - lemma ParseDigitsAll(s: string, pred: char -> bool) - requires |s| > 0 - requires forall i | 0 <= i < |s| :: pred(s[i]) - ensures ParseDigits(s, pred).Some? && ParseDigits(s, pred).value.0 == s && |ParseDigits(s, pred).value.1| == 0 - {} - - lemma ParseDecAll(s: string) - requires |s| > 0 - requires forall i | 0 <= i < |s| :: IsDecDigit(s[i]) - ensures ParseDecStr(s).Some? && ParseDecStr(s).value.0 == s && |ParseDecStr(s).value.1| == 0 - { - ParseDigitsAll(s, IsDecDigit); - } -} diff --git a/cedar-dafny/def/std.dfy b/cedar-dafny/def/std.dfy deleted file mode 100644 index d722a4438..000000000 --- a/cedar-dafny/def/std.dfy +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// ----------------- std.dfy ----------------- // -// The "std" module holds generic code that we're adopting from the Dafny -// standard library. - -module def.std { - - datatype Option<+T> = Some(value: T) | None { - predicate IsFailure() { - None? - } - - function PropagateFailure(): Option - requires None? - { - None - } - - function Extract(): T - requires Some? { - value - } - - function UnwrapOr(default: T): T { - match this - case Some(v) => v - case None() => default - } - } - - datatype Result<+T, +E> = Ok(value: T) | Err(error: E) { - predicate IsFailure() { - Err? - } - - function PropagateFailure(): Result - requires Err? - { - Err(error) - } - - function Extract(): T - requires Ok? - { - value - } - - function Map(f: T -> U): Result - { - if Ok? then Ok(f(value)) else PropagateFailure() - } - - function MapErr(f: E -> F): Result - { - if Ok? then Ok(value) else Err(f(error)) - } - } -} diff --git a/cedar-dafny/def/templates.dfy b/cedar-dafny/def/templates.dfy deleted file mode 100644 index 9a47d8cc0..000000000 --- a/cedar-dafny/def/templates.dfy +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "core.dfy" - -// Code for policy templates - -module def.templates { - import opened core - - // ----- Common definitions for policy templating ----- // - - // In the production engine as of this writing (2023-01-13), SlotId is an enum - // of `Principal` and `Resource`, but doing that in the definitional engine - // would complicate serialization for no benefit. - type SlotId = string - - // Currently, slots are only ever filled with entity UIDs. - type SlotEnv = map - - // A datatype that gives the "slot requirements" of a data structure, i.e., - // the information we need in order to determine whether a given SlotEnv is - // valid to instantiate the data structure. - // - // Currently, this is just the set of slot IDs, but in the future, if we - // support slots in `when` clauses, we might need to distinguish between slots - // in the policy head (which can only be filled with entity UIDs if we want - // the template-linked policy to be syntactically valid) and slots in `when` - // clauses (which can be filled with any Cedar value as far as the runtime - // semantics is concerned). - type SlotReqs = set - const emptySlotReqs: SlotReqs := {} - - predicate slotEnvSatisfiesReqs(se: SlotEnv, sr: SlotReqs) { - se.Keys >= sr - } - - // Compute the SlotReqs of a composite data structure from the SlotReqs of its - // parts. Currently just set union; might become more complicated if we have - // different kinds of slots in the future. - function combineSlotReqs(sr1: SlotReqs, sr2: SlotReqs): SlotReqs { - sr1 + sr2 - } - - // ----- Definitions of specific templated data structures ----- // - - // For certain Cedar data structures (e.g., `Policy`), there is some code in - // CedarDafny that wants a version that allows slots and other code that wants - // a version with no slots. As of this writing (2023-02-01), the - // production engine has only one version of each data structure, which allows - // slots, and code that doesn't expect a slot just raises an assertion error - // if it runs into one. That approach isn't viable for CedarDafny, where - // functions need to be defined on all inputs; we need two different - // datatypes. We name them according to the scheme `Foo` and `FooTemplate`. - // This differs from production, in which the single datatype that allows - // slots is named `Foo` (except for the top-level `Template`, which is handled - // specially); the naming difference seems sensible given our needs. - // - // Currently, we just write out separate definitions of `Foo` and - // `FooTemplate`. This leads to some code duplication, both in the definitions - // themselves and in any code that needs to accept both `Foo` and - // `FooTemplate`. In the hope of avoiding this code duplication, we explored - // an alternative design in which `Foo` is a subset type of `FooTemplate` with - // empty SlotReqs. In principle, this subset type should work just like a - // handwritten datatype without slots, assuming that the verifier - // automatically propagates the "empty SlotReqs" condition down to subterms - // and uses it to rule out "slot" cases during pattern matches. Unfortunately, - // we found that the verifier had trouble with this reasoning and we believe - // the problems would get worse in the future, so for now (2023-02-01), we - // consider the code duplication to be the lesser evil. - - datatype PolicyTemplateID = PolicyTemplateID(id: string) - - datatype PolicyTemplate = PolicyTemplate( - effect: Effect, - principalScope: PrincipalScopeTemplate, - actionScope: ActionScope, - resourceScope: ResourceScopeTemplate, - condition: Expr) - { - function slotReqs(): SlotReqs { - combineSlotReqs(principalScope.slotReqs(), resourceScope.slotReqs()) - } - } - - datatype PrincipalScopeTemplate = PrincipalScopeTemplate(scope: ScopeTemplate) - { - function slotReqs(): SlotReqs { - scope.slotReqs() - } - } - - datatype ResourceScopeTemplate = ResourceScopeTemplate(scope: ScopeTemplate) - { - function slotReqs(): SlotReqs { - scope.slotReqs() - } - } - - // Note: This differs from the production `EntityReference` by having a - // `slotId` field. The alternative (as seen in the production engine) is to - // pass an extra `slotId` parameter through several functions. I (Matt) find - // this design somewhat easier to understand (which is a design goal of the - // definitional engine) and believe that justifies the difference from - // production. - datatype EntityUIDOrSlot = EntityUID(entity: EntityUID) | Slot(slotId: SlotId) - { - function slotReqs(): SlotReqs { - match this { - case EntityUID(_) => emptySlotReqs - case Slot(slotId) => {slotId} - } - } - } - - datatype ScopeTemplate = - Any | - Eq(entityOrSlot: EntityUIDOrSlot) | - In(entityOrSlot: EntityUIDOrSlot) | - Is(ety: EntityType) | - IsIn(ety: EntityType, entityOrSlot: EntityUIDOrSlot) - { - function slotReqs(): SlotReqs { - match this { - case Any | Is(_) => emptySlotReqs - case _ => entityOrSlot.slotReqs() - } - } - } - - // Corresponds to production `Policy`. In the definitional engine, the - // datatype for a non-template policy body has a much stronger claim to the - // `Policy` name. - datatype TemplateLinkedPolicy = - TemplateLinkedPolicy(tid: PolicyTemplateID, slotEnv: SlotEnv) - - datatype TemplatedPolicyStoreUnvalidated = TemplatedPolicyStore( - templates: map, - linkedPolicies: map) { - predicate isValid() { - // Note: The production engine requires that each zero-slot template has - // exactly one instance because a violation of that property is almost - // certainly a mistake, but we don't enforce this in the definitional - // engine because it would add complexity for no benefit. - forall iid <- linkedPolicies.Keys :: - linkedPolicies[iid].tid in templates.Keys && - // Note: As in the production engine, this is a stronger condition than - // `slotEnvSatisfiesReqs(linkedPolicies[iid].slotEnv, templates[linkedPolicies[iid].tid].slotReqs())`: - // for uniformity, we require all linked policies of a given template to - // define exactly the slots actually referenced in the template and no - // more. - linkedPolicies[iid].slotEnv.Keys == templates[linkedPolicies[iid].tid].slotReqs() - } - } - type TemplatedPolicyStore = tps: TemplatedPolicyStoreUnvalidated | tps.isValid() - witness * - - datatype TemplatedStore = TemplatedStore(entities: EntityStore, policies: TemplatedPolicyStore) - - // ----- Code to link templated data structures ----- // - - // Group all the functions that take a `slotEnv` parameter into a single - // datatype to save us the boilerplate of passing the parameter along - // explicitly. - datatype Linker = Linker(slotEnv: SlotEnv) { - predicate reqsSatisfied(sr: SlotReqs) { - slotEnvSatisfiesReqs(slotEnv, sr) - } - - function linkEntityUIDOrSlot(es: EntityUIDOrSlot): EntityUID - requires reqsSatisfied(es.slotReqs()) - { - match es { - case EntityUID(e) => e - case Slot(slotId) => slotEnv[slotId] - } - } - - function linkScope(st: ScopeTemplate): Scope - requires reqsSatisfied(st.slotReqs()) - { - match st { - case Any => Scope.Any - case In(e) => Scope.In(linkEntityUIDOrSlot(e)) - case Eq(e) => Scope.Eq(linkEntityUIDOrSlot(e)) - case Is(ety) => Scope.Is(ety) - case IsIn(ety,e) => Scope.IsIn(ety,linkEntityUIDOrSlot(e)) - } - } - - function linkPrincipalScope(pst: PrincipalScopeTemplate): PrincipalScope - requires reqsSatisfied(pst.slotReqs()) - { - PrincipalScope(linkScope(pst.scope)) - } - - function linkResourceScope(rst: ResourceScopeTemplate): ResourceScope - requires reqsSatisfied(rst.slotReqs()) - { - ResourceScope(linkScope(rst.scope)) - } - - function linkPolicy(pt: PolicyTemplate): Policy - requires reqsSatisfied(pt.slotReqs()) - { - Policy( - pt.effect, - linkPrincipalScope(pt.principalScope), - pt.actionScope, - linkResourceScope(pt.resourceScope), - pt.condition) - } - } - - function linkPolicyStore(tps: TemplatedPolicyStore): PolicyStore { - PolicyStore( - map iid <- tps.linkedPolicies.Keys :: - (var inst := tps.linkedPolicies[iid]; - Linker(inst.slotEnv).linkPolicy(tps.templates[inst.tid]))) - } - - function linkStore(ts: TemplatedStore): Store { - Store(ts.entities, linkPolicyStore(ts.policies)) - } -} diff --git a/cedar-dafny/def/util.dfy b/cedar-dafny/def/util.dfy deleted file mode 100644 index 2605bb28c..000000000 --- a/cedar-dafny/def/util.dfy +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "base.dfy" -include "core.dfy" - -// The "util" module holds generic code that we're adopting from the Dafny -// standard library. -module def.util { - import opened base - import opened core - - // --------- Convert a set to a (sorted) sequence --------- // - // Adapted from http://leino.science/papers/krml275.html - - ghost predicate IsTotalOrder(R: (A, A) -> bool) { - // connexity - && (forall a, b :: R(a, b) || R(b, a)) - // antisymmetry - && (forall a, b | R(a, b) && R(b, a) :: a == b) - // transitivity - && (forall a, b, c | R(a, b) && R(b, c) :: R(a, c)) - } - - lemma ThereIsAMinimum(s: set, R: (A, A) -> bool) - requires s != {} && IsTotalOrder(R) - ensures exists x :: x in s && forall y | y in s :: R(x, y) - { - var x :| x in s; - if s == {x} { - assert forall y | y in s :: x == y; - } else { - var s' := s - {x}; - assert s == s' + {x}; - ThereIsAMinimum(s', R); - var z :| z in s' && forall y | y in s' :: R(z, y); - if - case R(z, x) => - assert s == s' + {x}; - assert z in s; - forall y | y in s ensures R(z, y) { if y == x { assert R(z, x); } else { assert y in s'; } } - case R(x, z) => - assert x in s; - forall y | y in s ensures R(x, y) { if y == x { assert R(x, x); } else { assert y in s'; } } - } - } - - // Converts the given set to a sequence that is sorted according to the total - // ordering R. Note that this is a naive way to sort a set, as it works by - // repeatedly picking the smallest element (n^2 algorithm). - function {: opaque } SetToSortedSeq(s: set, R: (A, A) -> bool): (ret: seq) - requires IsTotalOrder(R) - ensures |s| == |ret| - ensures forall i | 0 <= i < |ret| :: ret[i] in s - ensures forall e | e in s :: e in ret - ensures forall i, j | 0 <= i < j < |ret| :: R(ret[i], ret[j]) - { - if s == {} then [] else - ThereIsAMinimum(s, R); - var x :| x in s && forall y | y in s :: R(x, y); - [x] + SetToSortedSeq(s - {x}, R) - } - - function {: opaque } MapToSortedSeq(s: map): (ret: seq<(string, A)>) - ensures |s| == |ret| - { - StringLeqIsTotalOrder(); - var sortedKeys := SetToSortedSeq(s.Keys, StringLeq); - seq(|s|, i requires 0 <= i < |s| => (sortedKeys[i], s[sortedKeys[i]])) - } - - // --------- Sequence and string ordering --------- // - - predicate SeqLeq(s1: seq, s2: seq, lte: (T, T) -> bool) - { - s1 == [] || - (s2 != [] && - if s1[0] == s2[0] then SeqLeq(s1[1..], s2[1..], lte) else lte(s1[0], s2[0])) - } - - lemma SeqLeqIsTotalOrder(seqLeq: (seq, seq) -> bool, lte: (T, T) -> bool) - requires IsTotalOrder(lte) - requires forall s1, s2 :: seqLeq(s1, s2) == SeqLeq(s1, s2, lte) - ensures IsTotalOrder(seqLeq) - { - forall a, b - ensures seqLeq(a, b) || seqLeq(b, a) - { SeqLeqConnexity(a, b, lte); } - forall a, b - ensures seqLeq(a, b) && seqLeq(b, a) ==> a == b - { SeqLeqAntisymmetry(a, b, lte); } - forall a, b, c - ensures seqLeq(a, b) && seqLeq(b, c) ==> seqLeq(a, c) - { SeqLeqTransitivity(a, b, c, lte); } - } - - lemma SeqLeqConnexity(s1: seq, s2: seq, lte: (T, T) -> bool) - requires IsTotalOrder(lte) - ensures SeqLeq(s1, s2, lte) || SeqLeq(s2, s1, lte) - {} - - lemma SeqLeqAntisymmetry(s1: seq, s2: seq, lte: (T, T) -> bool) - requires IsTotalOrder(lte) - ensures SeqLeq(s1, s2, lte) && SeqLeq(s2, s1, lte) ==> s1 == s2 - { - if s1 != [] && s2 != [] { - var h1, h2 := s1[0], s2[0]; - if h1 == h2 { - SeqLeqAntisymmetry(s1[1..], s2[1..], lte); - } - } - } - - lemma SeqLeqTransitivity(s1: seq, s2: seq, s3: seq, lte: (T, T) -> bool) - requires IsTotalOrder(lte) - ensures SeqLeq(s1, s2, lte) && SeqLeq(s2, s3, lte) ==> SeqLeq(s1, s3, lte) - { - if SeqLeq(s1, s2, lte) && SeqLeq(s2, s3, lte) { - if s1 != [] && s2 != [] && s3 != [] { - var h1, h2, h3 := s1[0], s2[0], s3[0]; - if h1 == h2 == h3 { - SeqLeqTransitivity(s1[1..], s2[1..], s3[1..], lte); - } else { - assert lte(h1, h2) && lte(h2, h3) && lte(h1, h3); - } - } - } - } - - predicate StringLeq(s1: string, s2: string) - { - SeqLeq(s1, s2, (c1: char, c2: char) => c1 <= c2) - } - - lemma StringLeqIsTotalOrder() - ensures IsTotalOrder(StringLeq) - { - SeqLeqIsTotalOrder(StringLeq, (c1: char, c2: char) => c1 <= c2); - } - - lemma SeqAddInequality(s1: seq, t1: T, s2: seq, t2: T) - requires s1 != s2 || t1 != t2 - ensures s1 + [t1] != s2 + [t2] - { - if s1 == s2 { - assert t1 != t2; - var len := |s1|; - assert (s1 + [t1])[len] != (s2 + [t2])[len]; - } else if |s1| == |s2| { - var i :| 0 <= i < |s1| && s1[i] != s2[i]; - assert (s1 + [t1])[i] != (s2 + [t2])[i]; - } else { - assert |s1 + [t1]| != |s2 + [t2]|; - } - } - - // --------- Name and entity type ordering --------- // - - predicate IdLeq(id1: Id, id2: Id) { - StringLeq(id1.id, id2.id) - } - - predicate PathLeq(p1: seq, p2: seq) - { - SeqLeq(p1, p2, IdLeq) - } - - predicate NameLeq(n1: Name, n2: Name) - { - PathLeq(n1.path + [n1.id], n2.path + [n2.id]) - } - - predicate EntityTypeLeq(ety1: EntityType, ety2: EntityType) - { - NameLeq(ety1.id, ety2.id) - } - - lemma EntityTypeLeqIsTotalOrder() - ensures IsTotalOrder(EntityTypeLeq) - { - NameLeqIsTotalOrder(); - } - - lemma NameLeqIsTotalOrder() - ensures IsTotalOrder(NameLeq) - { - forall n1, n2: Name | n1 != n2 - ensures n1.path + [n1.id] != n2.path + [n2.id] - { - assert n1.path != n2.path || n1.id != n2.id; - SeqAddInequality(n1.path, n1.id, n2.path, n2.id); - } - PathLeqIsTotalOrder(); - } - - lemma PathLeqIsTotalOrder() - ensures IsTotalOrder(PathLeq) - { - StringLeqIsTotalOrder(); - assert IsTotalOrder(PathLeq) by { - SeqLeqIsTotalOrder(PathLeq, IdLeq); - } - } - - // KeyExists and LastOfKey are helpers about association lists that are used in - // validation.dfy, so we lift them here. - // We use these as an abbreviation for the quantifier alternation: - // exists i :: 0 <= i < |es| && (forall j :: i < j < |es| => ...) - // This helps dafny prove some of our lemmas about record evaluation and validation. - ghost predicate KeyExists(k: K, es: seq<(K,V)>) { - exists i :: 0 <= i < |es| && es[i].0 == k - } - - opaque ghost function LastOfKey(k: K, es: seq<(K,V)>): (res: V) - requires KeyExists(k,es) - ensures exists i :: 0 <= i < |es| && es[i].0 == k && es[i].1 == res && (forall j | i < j < |es| :: es[j].0 != k) - { - if (es[0].0 == k && (forall j | 0 < j < |es| :: es[j].0 != k)) then es[0].1 else LastOfKey(k,es[1..]) - } -} diff --git a/cedar-dafny/def/wildcard.dfy b/cedar-dafny/def/wildcard.dfy deleted file mode 100644 index 86fe76e45..000000000 --- a/cedar-dafny/def/wildcard.dfy +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "std.dfy" -include "core.dfy" - - -// Defines the wildcard matching functions in a separate module because they -// don't depend on the Evaluator state, and we want to reuse them to reason -// about other components of Cedar (e.g., policy analysis). -module def.engine.wildcard { - - import opened core - import opened std - - import opened lib = core - - export - provides lib, wildcardMatch - - predicate charMatch(textChar: char, patternChar: PatElem) - requires patternChar.JustChar? - { - match patternChar { - case JustChar(c) => textChar == c - } - } - - predicate wildcard(patternChar: PatElem) { - patternChar.Star? - } - - predicate wildcardMatch(text: string, pattern: Pattern) { - if pattern == [] && text == [] then - true - else if pattern == [] then - false - else if text == [] then - wildcard(pattern[0]) && wildcardMatch(text, pattern[1..]) - else if wildcard(pattern[0]) then - wildcardMatch(text, pattern[1..]) || - wildcardMatch(text[1..], pattern) - else - charMatch(textChar := text[0], patternChar := pattern[0]) && - wildcardMatch(text[1..], pattern[1..]) - } by method { - wildcardMatchEq(text, pattern); - return wildcardMatchIdx(text, pattern, 0, 0); - } - - predicate wildcardMatchIdx(text: string, pattern: Pattern, i: nat, j: nat) - requires i <= |text| && j <= |pattern| - decreases |text| - i - decreases |pattern| - j - { - if j == |pattern| && i == |text| then - true - else if j == |pattern| then - false - else if i == |text| then - wildcard(pattern[j]) && wildcardMatchIdx(text, pattern, i, j + 1) - else if wildcard(pattern[j]) then - wildcardMatchIdx(text, pattern, i, j + 1) || - wildcardMatchIdx(text, pattern, i + 1, j) - else - charMatch(textChar := text[i], patternChar := pattern[j]) && - wildcardMatchIdx(text, pattern, i + 1, j + 1) - } by method { - var cache: array2>; - cache := new std.Option[|text|+1, |pattern|+1]((_,_) => None); - var r := wildcardMatchIdxMem(text, pattern, i, j, cache); - return r; - } - - lemma wildcardMatchEqIdx(text: string, pattern: Pattern, i: nat, j: nat) - requires i <= |text| && j <= |pattern| - ensures wildcardMatch(text[i..], pattern[j..]) == wildcardMatchIdx(text, pattern, i, j) - decreases |text| - i - decreases |pattern| - j - { - } - - lemma wildcardMatchEq(text: string, pattern: Pattern) - ensures wildcardMatch(text, pattern) == wildcardMatchIdx(text, pattern, 0, 0) { - wildcardMatchEqIdx(text, pattern, 0, 0); - } - - method wildcardMatchIdxMem(text: string, pattern: Pattern, i: nat, j: nat, cache: array2>) returns(r: bool) - requires i <= |text| && j <= |pattern| - requires cache.Length0 == |text| + 1 && cache.Length1 == |pattern| + 1 - requires forall p: nat, q: nat | p < cache.Length0 && q < cache.Length1 && cache[p, q].Some? :: cache[p, q].value == wildcardMatchIdx(text, pattern, p, q) - modifies cache - decreases |text| - i - decreases |pattern| - j - ensures r == wildcardMatchIdx(text, pattern, i, j) - ensures cache[i, j] == Some(wildcardMatchIdx(text, pattern, i, j)) - ensures forall p: nat, q: nat | p < cache.Length0 && q < cache.Length1 && cache[p, q].Some? :: cache[p, q].value == wildcardMatchIdx(text, pattern, p, q) - { - if cache[i, j].Some? { - r := cache[i, j].value; - return; - } - if j == |pattern| && i == |text| { - r := true; - } - else if j == |pattern| { - r := false; - } - else if i == |text| { - if wildcard(pattern[j]) { - r := wildcardMatchIdxMem(text, pattern, i, j + 1, cache); - } else { - r := false; - } - } - else if wildcard(pattern[j]) { - r := wildcardMatchIdxMem(text, pattern, i, j + 1, cache); - if !r { - r := wildcardMatchIdxMem(text, pattern, i + 1, j, cache); - } - } - else { - if charMatch(textChar := text[i], patternChar := pattern[j]) { - r := wildcardMatchIdxMem(text, pattern, i + 1, j + 1, cache); - } else { - r := false; - } - } - cache[i, j] := Some(r); - return; - } -} diff --git a/cedar-dafny/difftest/helpers.dfy b/cedar-dafny/difftest/helpers.dfy deleted file mode 100644 index c4e3379dc..000000000 --- a/cedar-dafny/difftest/helpers.dfy +++ /dev/null @@ -1,570 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/std.dfy" - -module difftest.helpers { - // This module contains helper code for differential testing that isn't - // specific to the part of the definitional implementation being tested. - - import opened def.std - - // ----- Dafny utilities not specific to Cedar ----- - - function mapSeq(f: A --> B, s: seq): seq - requires forall i | 0 <= i < |s| :: f.requires(s[i]) { - seq(|s|, i requires 0 <= i < |s| => f(s[i])) - } - - function mapMapKeys( - keyFn: K1 -> K2, m: map): map - requires forall x1, x2 | x1 in m.Keys && x2 in m.Keys :: keyFn(x1) == keyFn(x2) ==> x1 == x2 { - var newKeys := set k | k in m.Keys :: keyFn(k); - map nk | nk in newKeys :: - (var k :| k in (set k1 | k1 in m.Keys && keyFn(k1) == nk); m[k]) - } - - method setToSequenceUnordered(st: set) returns (sq: seq) { - var st1 := st; - sq := []; - while st1 != {} { - var x :| x in st1; - sq := sq + [x]; - st1 := st1 - {x}; - } - } - - function singleElementOfSet(s: set): (x: T) - requires |s| == 1 - ensures s == {x} - { - var x :| x in s; - assert |s - {x}| == 0; - x - } - - method flattenSet_method(s: set>) returns (r: set) { - var s1 := s; - r := {}; - while s1 != {} { - var x :| x in s1; - r := r + x; - s1 := s1 - {x}; - } - } - - function flattenSet(s: set>): (r: set) - ensures forall x | x in r :: exists y :: x in y && y in s - ensures forall x, y | x in y && y in s :: x in r - { - set x, y | x in y && y in s :: x - } - - function printFromFunction(x: T): () { - () - } by method { - print x; - return (); - } - - // ----- Idealized JSON data structure ----- - // - // This is used as a narrow interface to marshal data - // between Dafny-generated target code and handwritten target code. - - datatype Json = - JsonNull | - JsonBool(b: bool) | - JsonInt(i: int) | - JsonString(s: string) | - JsonArray(a: seq) | - JsonObject(o: map) - { - function JsonAsString(): string { - match this { - case JsonNull => "null" - case JsonBool(true) => "true" - case JsonBool(false) => "false" - case JsonInt(_) => "integer" - case JsonString(s) => "\"" + s + "\"" - case JsonArray(_) => "array" - case JsonObject(o) => if |o.Keys| == 0 then - "empty object" - else if |o.Keys| == 1 then - "object with single field " + singleElementOfSet(o.Keys) - else - "object with 2 or more fields" - } - } - } - - // ----- Err and Result types for data structure conversion ----- - - /* Here and below, "FromProd" refers to conversion from a JSON dump of the - * production data structures to the definitional data structures. That is, we - * are both deserializing the JSON and handling the the differences between - * the production and definitional data structures, and either of those tasks - * can raise a FromProdErr. - * - * We choose to interleave the code to perform the two tasks rather than - * performing all the deserialization before all the production/definitional - * conversion. This saves us the work of maintaining a separate set of Dafny - * data structures that just mirror the production data structures. The - * downside is that it's less obvious where the production/definitional - * conversions are occurring, and understanding these conversions is helpful - * in understanding what kind of assurance we're getting from differential - * testing. - */ - - /* An UnexpectedFromProdErr indicates a problem that should never occur if DRT - * is working correctly (and in particular, input generation is limited to - * features supported by both the definitional and production - * implementations). - */ - - datatype FromProdErr = UnexpectedFromProdErr(desc: string) | InvalidAttrVal - type FromProdResult = std.Result> - - // ----- Helpers to extract pieces of data from JSON ----- - - function getJsonBool(j: Json): FromProdResult { - match j { - case JsonBool(b) => Ok(b) - case _ => Err({UnexpectedFromProdErr("expected bool, got " + j.JsonAsString())}) - } - } - function getJsonInt(j: Json): FromProdResult { - match j { - case JsonInt(i) => Ok(i) - case _ => Err({UnexpectedFromProdErr("expected int, got " + j.JsonAsString())}) - } - } - function getJsonString(j: Json): FromProdResult { - match j { - case JsonString(s) => Ok(s) - case _ => Err({UnexpectedFromProdErr("expected string, got " + j.JsonAsString())}) - } - } - function getJsonArray(j: Json): FromProdResult> { - match j { - case JsonArray(a) => Ok(a) - case _ => Err({UnexpectedFromProdErr("expected array, got " + j.JsonAsString())}) - } - } - function getJsonObject(j: Json): FromProdResult> { - match j { - case JsonObject(o) => Ok(o) - case _ => Err({UnexpectedFromProdErr("expected object, got " + j.JsonAsString())}) - } - } - function getJsonField(j: Json, f: string): (r: FromProdResult) - ensures r.Ok? ==> r.value < j - { - var o :- getJsonObject(j); - if f in o.Keys then - Ok(o[f]) - else - Err({UnexpectedFromProdErr("getJsonField: requested key " + f + " doesn't exist")}) - } - - /* Unpack the default Serde serialization of a Rust "enum" value into a tuple: - * (variant tag, body). - * - * If the Rust enum constructor has no fields, Serde just outputs the tag as a - * string. If the constructor has at least one field, Serde generates a JSON - * object with a single field, named after the tag, whose value is a JSON - * object containing the original fields. unpackJsonSum accepts both formats, - * and in the first case, it returns jsonEmptyObject as the body for - * consistency. - * - * When `j` is a JsonString, no matter what value we return as the body, it - * complicates the termination proof for callers that recurse on the body - * because we won't have `body < j`. For exprFromProdJson, we're OK because - * exprFromProdJson never recurses directly on `body` but only on a proper - * subterm of it, and jsonEmptyObject has no proper subterms that are Json - * values. But Dafny can't seem to prove the latter fact directly, so we give - * it a little help by including a `jsonEmptyObject` special case in - * `deserializerAcceptsSubterms`. Two alternative designs that would avoid the - * problem would be to (1) put the body in an option type or (2) take - * advantage of the fact that none of our recursive datatypes currently - * have any constructors that take no fields and have them use a separate - * version of unpackJsonSum that doesn't accept JsonStrings. But the current - * design seems nicer as long as proving termination doesn't become too much - * of a hassle. - */ - function unpackJsonSum(j: Json): (r: FromProdResult<(string, Json)>) - ensures r.Ok? ==> r.value.1 == jsonEmptyObject || r.value.1 < j - { - match j { - case JsonString(tag) => - Ok((tag, jsonEmptyObject)) - case JsonObject(o) => - if |o.Keys| == 1 then - var k := singleElementOfSet(o.Keys); - Ok((k, o[k])) - else - var _ := printFromFunction(j); - var _ := printFromFunction("\n"); - Err({UnexpectedFromProdErr("unpackJsonSum: expected exactly one key, got either zero or multiple")}) - case _ => Err({UnexpectedFromProdErr("unpackJsonSum: expected an object or a string")}) - } - } - - // ----- Helpers to convert composite data structures ----- - // - // These do not interact directly with the `Json` datatype but are often used - // with callbacks that do. - - function mapMapValuesFromProd( - valueFn: V1 -> FromProdResult, m: map): (res: FromProdResult>) - ensures (forall k | k in m.Keys :: valueFn(m[k]).Ok?) ==> res.Ok? - ensures res.Ok? ==> res.value.Keys == m.Keys - { - var m1 := map k | k in m.Keys :: valueFn(m[k]); - if forall k | k in m1.Keys :: m1[k].Ok? then - Ok(map k | k in m1.Keys :: m1[k].value) - else - Err(flattenSet(set k | k in m1.Keys && m1[k].Err? :: m1[k].error)) - } - - function mapMapKeysFromProd( - keyFn: K1 -> FromProdResult, m: map): FromProdResult> - requires forall x1, x2 | x1 in m.Keys && x2 in m.Keys :: keyFn(x1) == keyFn(x2) ==> x1 == x2 - { - var newKeys := set k | k in m.Keys :: keyFn(k); - if forall k | k in newKeys :: k.Ok? then - var newKeysOks := set k | k in newKeys :: k.value; - Ok(map nk | nk in newKeysOks :: - (var k :| k in (set k1 | k1 in m.Keys && keyFn(k1) == Ok(nk)); m[k])) - else - Err(flattenSet(set k | k in newKeys && k.Err? :: k.error)) - } - - function mapSeqFromProd(f: A --> FromProdResult, s: seq): FromProdResult> - requires forall i | 0 <= i < |s| :: f.requires(s[i]) { - var s1 := mapSeq(f, s); - if forall i | 0 <= i < |s1| :: s1[i].Ok? then - Ok(mapSeq((rb: FromProdResult) requires rb.Ok? => rb.value, s1)) - else - Err(flattenSet(set i | 0 <= i < |s1| && s1[i].Err? :: s1[i].error)) - } - - function mapSetFromProd(f: A --> FromProdResult, s: seq): FromProdResult> - requires forall i | 0 <= i < |s| :: f.requires(s[i]) { - var s1 :- mapSeqFromProd(f, s); - Ok(set i | 0 <= i < |s1| :: s1[i]) - } - - function mapFromEntriesProd(entries: seq<(K, V)>): FromProdResult> { - if (forall i, j | 0 <= i < |entries| && 0 <= j < |entries| :: - entries[i].0 == entries[j].0 ==> i == j) then - var keys := set i | 0 <= i < |entries| :: entries[i].0; - Ok(map k | k in keys :: - (var i :| i in (set i1 | 0 <= i1 < |entries| && entries[i1].0 == k); entries[i].1)) - else - Err({UnexpectedFromProdErr("duplicate key")}) - } - - // ----- "Serialization combinator" library ----- - // - // These functions factor out common patterns of deserialization logic so they - // can be performed with less boilerplate than by using lower-level functions - // such as getJson* directly. - // - // This library is agnostic to the meaning of the Dafny datatypes and hence is - // agnostic to whether callers are performing production/definitional - // conversion under the guise of deserialization (see the comment on - // FromProdErr above), as they sometimes are. - // - // The sets of Dafny datatypes that we need to convert _to_ and _from_ JSON - // are currently disjoint. For conversion to JSON, handwritten code is already - // reasonably clean and a library wouldn't help very much. Thus, this library - // currently only supports conversion from JSON, which is the direction that - // has all the messy error checks that we want to factor out. In the future, - // if we need to convert the same datatypes in both directions, then it might - // be beneficial to enhance the library to support sharing per-datatype code - // between the two directions. - // - // API overview: - // - // - `deserializeFoo(j, ...)` extracts the top-level structure of a "foo" from - // the JSON `j` and then uses one or more caller-provided callbacks to - // finish the conversion of the retrieved sub-items, as applicable. For - // example, `deserializeSeq(j, ed)` converts a JSON array to a Dafny `seq`, - // using the callback `ed` ("element deserializer") to convert each element - // of the array. - // - // - `fooDeserializer(...)` is a shortcut for `j => deserializeFoo(j, ...)`. - // `fooDeserializer` typically results in less boilerplate than - // `deserializeFoo` because it can be passed directly as a callback to - // another `deserializeBar` or `barDeserializer` function. The exception is - // when the data structure is recursive: `deserializeFoo(j, ...)` supports - // partial callbacks that only accept inputs smaller than `j` (see - // `deserializerAcceptsSubterms`), while it's unworkable to implement an - // analogous thing for `fooDeserializer`. - - type Deserializer = Json -> FromProdResult - - // A deserializer with a precondition. Preconditions should be used only to - // verify termination with recursive data structures and not to rule out - // invalid input, which should still be reported by returning a FromProdErr. - type PartialDeserializer = Json --> FromProdResult - - const jsonEmptyObject := JsonObject(map[]) - - ghost predicate deserializerAcceptsSubterms(d: PartialDeserializer, j: Json) { - // See the explanation in unpackJsonSum regarding jsonEmptyObject. - j == jsonEmptyObject || forall jr | jr < j :: d.requires(jr) - } - - function boolDeserializer(cons: bool -> FromProdResult): Deserializer { - j => (var b :- getJsonBool(j); cons(b)) - } - function intDeserializer(cons: int -> FromProdResult): Deserializer { - j => (var i :- getJsonInt(j); cons(i)) - } - function stringDeserializer(cons: string -> FromProdResult): Deserializer { - j => (var s :- getJsonString(j); cons(s)) - } - - function deserializeField( - j: Json, fn: string, fd: PartialDeserializer): FromProdResult - requires deserializerAcceptsSubterms(fd, j) - { - var jf :- getJsonField(j, fn); fd(jf) - } - - function deserializeObj1Field( - j: Json, - fn1: string, fd1: Deserializer, - cons: F1 -> FromProdResult): FromProdResult - { - var f1 :- deserializeField(j, fn1, fd1); - cons(f1) - } - function objDeserializer1Field( - fn1: string, fd1: Deserializer, - cons: F1 -> FromProdResult): Deserializer - { - j => deserializeObj1Field(j, fn1, fd1, cons) - } - function deserializeObj2Fields( - j: Json, - fn1: string, fd1: PartialDeserializer, - fn2: string, fd2: PartialDeserializer, - cons: (F1, F2) -> FromProdResult): FromProdResult - requires deserializerAcceptsSubterms(fd1, j) && deserializerAcceptsSubterms(fd2, j) - { - // If we wanted, we could restructure this code (and other code like it) to - // attempt deserialization of f2 even if f1 fails in order to report as many - // errors as possible in a single pass. Currently, we're satisfied to get - // one error at a time and don't see a need to make this code any more - // complex. - var f1 :- deserializeField(j, fn1, fd1); - var f2 :- deserializeField(j, fn2, fd2); - cons(f1, f2) - } - function objDeserializer2Fields( - fn1: string, fd1: Deserializer, - fn2: string, fd2: Deserializer, - cons: (F1, F2) -> FromProdResult): Deserializer - { - j => deserializeObj2Fields(j, fn1, fd1, fn2, fd2, cons) - } - function deserializeObj3Fields( - j: Json, - fn1: string, fd1: PartialDeserializer, - fn2: string, fd2: PartialDeserializer, - fn3: string, fd3: PartialDeserializer, - cons: (F1, F2, F3) -> FromProdResult): FromProdResult - requires deserializerAcceptsSubterms(fd1, j) && deserializerAcceptsSubterms(fd2, j) - && deserializerAcceptsSubterms(fd3, j) - { - var f1 :- deserializeField(j, fn1, fd1); - var f2 :- deserializeField(j, fn2, fd2); - var f3 :- deserializeField(j, fn3, fd3); - cons(f1, f2, f3) - } - function objDeserializer3Fields( - fn1: string, fd1: Deserializer, - fn2: string, fd2: Deserializer, - fn3: string, fd3: Deserializer, - cons: (F1, F2, F3) -> FromProdResult): Deserializer - { - j => deserializeObj3Fields(j, fn1, fd1, fn2, fd2, fn3, fd3, cons) - } - - - function deserializeObj4Fields( - j: Json, - fn1: string, fd1: PartialDeserializer, - fn2: string, fd2: PartialDeserializer, - fn3: string, fd3: PartialDeserializer, - fn4: string, fd4: PartialDeserializer, - cons: (F1, F2, F3, F4) -> FromProdResult): FromProdResult - requires deserializerAcceptsSubterms(fd1, j) && deserializerAcceptsSubterms(fd2, j) - && deserializerAcceptsSubterms(fd3, j) && deserializerAcceptsSubterms(fd4, j) - { - var f1 :- deserializeField(j, fn1, fd1); - var f2 :- deserializeField(j, fn2, fd2); - var f3 :- deserializeField(j, fn3, fd3); - var f4 :- deserializeField(j, fn4, fd4); - cons(f1, f2, f3, f4) - } - function objDeserializer4Fields( - fn1: string, fd1: Deserializer, - fn2: string, fd2: Deserializer, - fn3: string, fd3: Deserializer, - fn4 : string, fd4 : Deserializer, - cons: (F1, F2, F3, F4) -> FromProdResult): Deserializer - { - j => deserializeObj4Fields(j, fn1, fd1, fn2, fd2, fn3, fd3, fn4, fd4, cons) - } - function deserializeObj5Fields( - j: Json, - fn1: string, fd1: PartialDeserializer, - fn2: string, fd2: PartialDeserializer, - fn3: string, fd3: PartialDeserializer, - fn4: string, fd4: PartialDeserializer, - fn5: string, fd5: PartialDeserializer, - cons: (F1, F2, F3, F4, F5) -> FromProdResult): FromProdResult - requires deserializerAcceptsSubterms(fd1, j) && deserializerAcceptsSubterms(fd2, j) - && deserializerAcceptsSubterms(fd3, j) && deserializerAcceptsSubterms(fd4, j) - && deserializerAcceptsSubterms(fd5, j) - { - var f1 :- deserializeField(j, fn1, fd1); - var f2 :- deserializeField(j, fn2, fd2); - var f3 :- deserializeField(j, fn3, fd3); - var f4 :- deserializeField(j, fn4, fd4); - var f5 :- deserializeField(j, fn5, fd5); - cons(f1, f2, f3, f4, f5) - } - function objDeserializer5Fields( - fn1: string, fd1: Deserializer, - fn2: string, fd2: Deserializer, - fn3: string, fd3: Deserializer, - fn4: string, fd4: Deserializer, - fn5: string, fd5: Deserializer, - cons: (F1, F2, F3, F4, F5) -> FromProdResult): Deserializer - { - j => deserializeObj5Fields(j, fn1, fd1, fn2, fd2, fn3, fd3, fn4, fd4, fn5, fd5, cons) - } - - function deserializeTuple2Elts( - j: Json, - ed1: PartialDeserializer, - ed2: PartialDeserializer, - cons: (E1, E2) -> FromProdResult): FromProdResult - requires deserializerAcceptsSubterms(ed1, j) && deserializerAcceptsSubterms(ed2, j) - { - var tuple :- getJsonArray(j); - var (j1, j2) :- - if |tuple| == 2 - then Ok((tuple[0], tuple[1])) - else Err({UnexpectedFromProdErr("expected tuple of size 2")}); - var e1 :- ed1(j1); - var e2 :- ed2(j2); - cons(e1, e2) - } - function tupleDeserializer2Elts( - ed1: Deserializer, - ed2: Deserializer, - cons: (E1, E2) -> FromProdResult): Deserializer { - j => deserializeTuple2Elts(j, ed1, ed2, cons) - } - - function deserializeSeq(j: Json, ed: PartialDeserializer): FromProdResult> - requires deserializerAcceptsSubterms(ed, j) - { - var ja :- getJsonArray(j); - var sr := mapSeq(ed, ja); - if forall i | 0 <= i < |sr| :: sr[i].Ok? - then Ok(mapSeq((er: FromProdResult) requires er.Ok? => er.value, sr)) - else - // Work around a bug in the Dafny to Java compiler - // (https://github.com/dafny-lang/dafny/issues/3320). - //Err(set i, err | 0 <= i < |sr| && sr[i].Err? && err in sr[i].error :: err) - var serrs := mapSeq((r: FromProdResult) => if r.Err? then r.error else {}, sr); - Err(set i, err | 0 <= i < |sr| && err in serrs[i] :: err) - } - function seqDeserializer(ed: Deserializer): Deserializer> { - j => deserializeSeq(j, ed) - } - - function deserializeSet(j: Json, ed: PartialDeserializer): FromProdResult> - requires deserializerAcceptsSubterms(ed, j) - { - var sq :- deserializeSeq(j, ed); - Ok(set e | e in sq) - } - function setDeserializer(ed: Deserializer): Deserializer> { - j => deserializeSet(j, ed) - } - - function deserializeMap(j: Json, ed: PartialDeserializer): FromProdResult> - requires deserializerAcceptsSubterms(ed, j) - { - var o :- getJsonObject(j); - // copied from mapMapValuesFromProd, which is not defined over partial funcs - var m := map k | k in o.Keys :: ed(o[k]); - if forall k | k in m.Keys :: m[k].Ok? then - Ok(map k | k in m.Keys :: m[k].value) - else - Err(flattenSet(set k | k in m.Keys && m[k].Err? :: m[k].error)) - } - function mapDeserializer(ed: Deserializer): Deserializer> { - j => deserializeMap(j, ed) - } - - function deserializeSum(j: Json, consMap: map>): FromProdResult - requires forall t | t in consMap :: - deserializerAcceptsSubterms(consMap[t], j) && consMap[t].requires(jsonEmptyObject) - { - var (tag, body) :- unpackJsonSum(j); - if tag in consMap then - consMap[tag](body) - else - Err({UnexpectedFromProdErr("deserializeSum case: " + tag)}) - } - function sumDeserializer(consMap: map>): Deserializer { - j => deserializeSum(j, consMap) - } - - // `bodyDeserializer` can be convenient for a variant of a sum type where you - // have an existing deserializer for the body and the only follow-up you need - // is to call the constructor of the sum type. `bodyDeserializer` saves only - // a small amount of code but helps maintain the declarative style. We don't - // provide a `deserializeBody` counterpart because its benefit would be even - // smaller and it wouldn't be worth using. - function bodyDeserializer(bd: Deserializer, cons: B -> FromProdResult): Deserializer { - j => (var b :- bd(j); cons(b)) - } - - // Specialization of `deserializeSum` where none of the constructors have any - // fields, so we can just specify a T for each constructor and save some - // boilerplate. - function deserializeEnum(j: Json, valueMap: map): FromProdResult { - var tag :- getJsonString(j); - if tag in valueMap then - Ok(valueMap[tag]) - else - Err({UnexpectedFromProdErr("deserializeEnum case: " + tag)}) - } - function enumDeserializer(valueMap: map): Deserializer { - j => deserializeEnum(j, valueMap) - } - -} diff --git a/cedar-dafny/difftest/main.dfy b/cedar-dafny/difftest/main.dfy deleted file mode 100644 index 59ac64e2c..000000000 --- a/cedar-dafny/difftest/main.dfy +++ /dev/null @@ -1,686 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" -include "../validation/all.dfy" -include "helpers.dfy" - -module difftest.main { - // This module contains the entry points for differential testing for the - // definitional engine (`isAuthorized`) and validator (`Validate`). - - import opened def.base - import opened def.core - import opened def.engine - import opened def.std - import opened def.templates - import opened def.ext.fun - import opened def.util - import opened restrictedExpr - import opened validation.types - import opened validation.typechecker - import opened validation.validator - import opened helpers - - method responseToProdJson(r: Response, errs: set) returns (ja: Json) { - var errsSeq := setToSequenceUnordered(errs); - var reasonSeq := setToSequenceUnordered(r.policies); - return JsonObject( - map[ - "decision" := JsonString( - match r.decision { - case Allow => "Allow" - case Deny => "Deny" - }), - "diagnostics" := JsonObject( - map[ - // The order is nondeterministic, but errors are currently ignored in - // the differential-testing comparison - "errors" := JsonArray(mapSeq((e: string) => JsonString(e), errsSeq)), - // The order is nondeterministic, so we'll have to ignore order in the - // differential-testing comparison - "reason" := JsonArray(mapSeq((p: PolicyID) => JsonString(p.id), reasonSeq)) - ]) - ]); - } - - const idFromProdJson := stringDeserializer(s => Ok(Id(s))) - - const nameFromProdJson := - objDeserializer2Fields( - "path", seqDeserializer(idFromProdJson), - "id", idFromProdJson, - (tyPathIds, tyId) => Ok(Name(tyId, tyPathIds))) - - const entitytypeFromProdJson := - sumDeserializer( - map[ - "Specified" := j => var n :- nameFromProdJson(j); Ok(EntityType(n)), - "Unspecified" := _ => Ok(EntityType.UNSPECIFIED) - ]) - - const entityUIDFromProdJson := - objDeserializer2Fields( - "ty", entitytypeFromProdJson, - "eid", getJsonString, - (et, eid) => Ok(EntityUID.EntityUID(et, eid))) - - const binaryOpFromProdJson := - enumDeserializer( - map[ - "Eq" := BinaryOp.Eq, - "Less" := Less, - "LessEq" := LessEq, - "In" := BinaryOp.In, - "Contains" := Contains, - "ContainsAll" := ContainsAll, - "ContainsAny" := ContainsAny, - "Add" := Add, - "Sub" := Sub - ]) - - const unaryOpFromProdJson := - enumDeserializer( - map[ - "Not" := Not, - "Neg" := Neg - ]) - - function maybeValueFromProdJson(j : Json) : FromProdResult> { - maybeFromJson(j, valueFromProdJson) - } - - function maybeFromJson(j : Json, f : Json -> FromProdResult) : FromProdResult> { - match j { - case JsonNull => Ok(None) - case _ => - var v :- f(j); - Ok(Some(v)) - } - } - - function valueFromProdJson(j : Json) : FromProdResult { - var sourceExpr :- exprFromProdJson(j); - var euid := EntityUID.EntityUID(EntityType(Name.fromStr("test")), "test"); - var request := Request(euid, euid, euid, map[]); - var store := EntityStore(map[]); - var eval := Evaluator(request, store); - match eval.interpret(sourceExpr) { - case Ok(v) => Ok(v) - case _ => Err({UnexpectedFromProdErr("expr did not evaluate to a value")}) - } - } - - function exprFromProdJson(j: Json): FromProdResult { - var jkind :- getJsonField(j, "expr_kind"); - var exprFromProdJsonRec := jr requires jr < jkind => exprFromProdJson(jr); - var (tag, body) :- unpackJsonSum(jkind); - match tag { - case "Lit" => - var prim :- deserializeSum( - body, - map[ - "Bool" := boolDeserializer(b => Ok(Primitive.Bool(b))), - "Long" := intDeserializer(i => Ok(Primitive.Int(i))), - "String" := stringDeserializer(s => Ok(Primitive.String(s))), - "EntityUID" := bodyDeserializer(entityUIDFromProdJson, - uid => Ok(Primitive.EntityUID(uid))) - ]); - Ok(PrimitiveLit(prim)) - case "Var" => - var theVar :- deserializeEnum( - body, - map[ - "principal" := Var.Principal, - "action" := Action, - "resource" := Var.Resource, - "context" := Context - ]); - Ok(Var(theVar)) - case "If" => - var cond :- deserializeField(body, "test_expr", exprFromProdJsonRec); - var ethen :- deserializeField(body, "then_expr", exprFromProdJsonRec); - var eelse :- deserializeField(body, "else_expr", exprFromProdJsonRec); - Ok(If(cond, ethen, eelse)) - case "And" => - var left :- deserializeField(body, "left", exprFromProdJsonRec); - var right :- deserializeField(body, "right", exprFromProdJsonRec); - Ok(And(left, right)) - case "Or" => - var left :- deserializeField(body, "left", exprFromProdJsonRec); - var right :- deserializeField(body, "right", exprFromProdJsonRec); - Ok(Or(left, right)) - case "UnaryApp" => - var op :- deserializeField(body, "op", unaryOpFromProdJson); - var arg :- deserializeField(body, "arg", exprFromProdJsonRec); - Ok(UnaryApp(op, arg)) - case "BinaryApp" => - var op :- deserializeField(body, "op", binaryOpFromProdJson); - var arg1 :- deserializeField(body, "arg1", exprFromProdJsonRec); - var arg2 :- deserializeField(body, "arg2", exprFromProdJsonRec); - Ok(BinaryApp(op, arg1, arg2)) - case "MulByConst" => - var arg :- deserializeField(body, "arg", exprFromProdJsonRec); - var cons :- deserializeField(body, "constant", getJsonInt); - Ok(UnaryApp(MulBy(cons), arg)) - case "ExtensionFunctionApp" => - var name :- deserializeField(body, "fn_name", nameFromProdJson); - var jargs :- getJsonField(body, "args"); - var args :- deserializeSeq(jargs, exprFromProdJsonRec); - Ok(Expr.Call(name, args)) - case "GetAttr" => - var expr :- deserializeField(body, "expr", exprFromProdJsonRec); - var attr :- deserializeField(body, "attr", getJsonString); - Ok(GetAttr(expr, attr)) - case "HasAttr" => - var expr :- deserializeField(body, "expr", exprFromProdJsonRec); - var attr :- deserializeField(body, "attr", getJsonString); - Ok(HasAttr(expr, attr)) - case "Like" => - var expr :- deserializeField(body, "expr", exprFromProdJsonRec); - var pat :- deserializeField(body, "pattern", patternFromProdJson); - Ok(UnaryApp(Like(pat), expr)) - case "Is" => - var expr :- deserializeField(body, "expr", exprFromProdJsonRec); - var ety :- deserializeField(body, "entity_type", nameFromProdJson); - Ok(UnaryApp(UnaryOp.Is(EntityType(ety)), expr)) - case "Set" => - var exprs :- deserializeSeq(body, exprFromProdJsonRec); - Ok(Expr.Set(exprs)) - case "Record" => - var pairs :- deserializeMap( - body, - exprFromProdJsonRec); - Ok(Expr.Record(MapToSortedSeq(pairs))) - case _ => Err({UnexpectedFromProdErr("expr case " + tag)}) - } - } - - // https://github.com/dafny-lang/dafny/issues/3814 would let us write `u is char` instead. - predicate isChar(u: int) { - 0 <= u < 0xD800 || 0xE000 <= u <= 0x10_FFFF - } - - const patElemFromProdJson := - sumDeserializer( - map[ - "Char" := intDeserializer( - u => - if isChar(u) - then - Ok(JustChar(u as char)) - else - Err({UnexpectedFromProdErr("Unicode value out of valid range")})), - "Wildcard" := _ => Ok(Star) - ]) - - const patternFromProdJson := seqDeserializer(patElemFromProdJson) - - // Deserializers for datatypes where the definitional version contains the - // SlotId and the production one doesn't, so we need outside knowledge of the - // SlotId to use. Group them in a datatype to save the boilerplate of passing - // along the `slotId` parameter explicitly. - datatype ScopeDeserializers = ScopeDeserializers(slotId: SlotId) { - const entityUIDOrSlotFromProdJson := - sumDeserializer( - map[ - "EUID" := bodyDeserializer(entityUIDFromProdJson, e => Ok(EntityUIDOrSlot.EntityUID(e))), - // The temporary variable is needed to work around a verification issue, - // probably https://github.com/dafny-lang/dafny/issues/2083. - "Slot" := (var d := _ => Ok(EntityUIDOrSlot.Slot(slotId)); d) - ]) - - // Corresponds to production `PrincipalOrResourceConstraint`. - const scopeTemplateFromProdJson := - sumDeserializer( - map[ - "Any" := _ => Ok(ScopeTemplate.Any), - "In" := bodyDeserializer(entityUIDOrSlotFromProdJson, e => Ok(ScopeTemplate.In(e))), - "Eq" := bodyDeserializer(entityUIDOrSlotFromProdJson, e => Ok(ScopeTemplate.Eq(e))), - "Is" := bodyDeserializer(nameFromProdJson, ety => Ok(ScopeTemplate.Is(EntityType(ety)))), - "IsIn" := tupleDeserializer2Elts( - nameFromProdJson, - entityUIDOrSlotFromProdJson, - (ety, e) => Ok(ScopeTemplate.IsIn(EntityType(ety),e)) - ) - ]) - } - - // Corresponds to production `ActionConstraint`. - const actionScopeFromProdJson := - sumDeserializer( - map[ - "Any" := _ => Ok(ActionScope(Scope.Any)), - "In" := bodyDeserializer(seqDeserializer(entityUIDFromProdJson), es => Ok(ActionInAny(es))), - "Eq" := bodyDeserializer(entityUIDFromProdJson, e => Ok(ActionScope(Scope.Eq(e)))) - ]) - - const policyTemplateFromProdJson := - objDeserializer5Fields( - "effect", enumDeserializer(map[ - "permit" := Permit, - "forbid" := Forbid - ]), - "principal_constraint", objDeserializer1Field( - "constraint", ScopeDeserializers("?principal").scopeTemplateFromProdJson, - s => Ok(PrincipalScopeTemplate(s))), - "action_constraint", actionScopeFromProdJson, - "resource_constraint", objDeserializer1Field( - "constraint", ScopeDeserializers("?resource").scopeTemplateFromProdJson, - s => Ok(ResourceScopeTemplate(s))), - "non_head_constraints", exprFromProdJson, - (effect, pScope, aScope, rScope, cond) => Ok(PolicyTemplate(effect, pScope, aScope, rScope, cond)) - ) - - function attrsFromProdJsonObject(j: Json): FromProdResult> { - var attr_keys :- getJsonObject(j); - var expr_vals :- mapMapValuesFromProd(exprFromProdJson, attr_keys); - var value_vals :- mapMapValuesFromProd(exprToValue, expr_vals); - Ok(value_vals) - } - - // In the production engine, `EntityUIDEntry` is the data type for a request - // field that is either a "known" EntityUID or "unknown" (for partial - // evaluation). We currently don't support partial evaluation, so we just - // translate the "known" variant to an EntityUID. - function getEntityUIDEntryField(request: Json, f: string): FromProdResult { - var json :- getJsonField(request, f); - var known :- getJsonField(json, "Known"); - var euid :- getJsonField(known, "euid"); - entityUIDFromProdJson(euid) - } - - const entityEntryFromProdJson := - tupleDeserializer2Elts( - entityUIDFromProdJson, - objDeserializer2Fields( - "attrs", attrsFromProdJsonObject, - "ancestors", setDeserializer(entityUIDFromProdJson), - (attrs, ancestors) => Ok(EntityData(attrs, ancestors)) - ), - (uid, edata) => Ok((uid, edata)) - ) - - function exprToValue(expr: Expr): FromProdResult { - match evaluate(expr) { - case Some(v) => Ok(v) - case None => Err({InvalidAttrVal}) - } - } - - function buildContext(context_field: Json): FromProdResult { - var as_expr :- exprFromProdJson(context_field); - var value :- exprToValue(as_expr); - match value { - case Record(rcd) => Ok(rcd) - case _ => Err({UnexpectedFromProdErr("Context must be a record")}) - } - } - - const templateLinkedPolicyFromProdJson := - objDeserializer2Fields( - "template_id", stringDeserializer(s => Ok(PolicyTemplateID(s))), - "values", mapDeserializer(entityUIDFromProdJson), - (tid, slotEnv) => Ok(TemplateLinkedPolicy(tid, slotEnv)) - ) - - const policyStoreFromProdJson := - objDeserializer2Fields( - "templates", jtemplates => ( - var templates :- getJsonObject(jtemplates); - var templates1 :- mapMapValuesFromProd(policyTemplateFromProdJson, templates); - Ok(mapMapKeys(s => PolicyTemplateID(s), templates1)) - ), - "links", jlinkedPolicies => ( - var linkedPolicies :- getJsonObject(jlinkedPolicies); - var linkedPolicies1 :- mapMapValuesFromProd(templateLinkedPolicyFromProdJson, linkedPolicies); - Ok(mapMapKeys(s => PolicyID(s), linkedPolicies1)) - ), - (templates, linkedPolicies) => ( - var policyStore := TemplatedPolicyStore(templates, linkedPolicies); - if policyStore.isValid() - then Ok(linkPolicyStore(policyStore)) - else Err({UnexpectedFromProdErr("Invalid policy template link(s)")}) - ) - ) - - const evaluatorFromProdJson := - objDeserializer4Fields( - "request", jrequest => ( - var principal :- getEntityUIDEntryField(jrequest, "principal"); - var action :- getEntityUIDEntryField(jrequest, "action"); - var resource :- getEntityUIDEntryField(jrequest, "resource"); - var context :- deserializeField(jrequest, "context", buildContext); - Ok(Request(principal, action, resource, context)) - ), - - "entities", jentities => ( - var entities :- deserializeField(jentities, "entities", seqDeserializer(entityEntryFromProdJson)); - var entitiesMap :- mapFromEntriesProd(entities); - Ok(EntityStore(entitiesMap)) - ), - "expr", jexpr => exprFromProdJson(jexpr), - "expected", jv => maybeValueFromProdJson(jv), - (request, entities, expr, maybe_value) => Ok((Evaluator(request, entities), expr, maybe_value)) - ) - - const authorizerFromProdJson := - objDeserializer3Fields( - "request", jrequest => ( - var principal :- getEntityUIDEntryField(jrequest, "principal"); - var action :- getEntityUIDEntryField(jrequest, "action"); - var resource :- getEntityUIDEntryField(jrequest, "resource"); - // Note: In the production engine, the `context` field is wrapped in an - // `Option` that can be `None` for partial evaluation. But currently, for - // differential testing, the `context` is always `Some`, and the default - // Serde JSON serialization of `Some(x)` is just that of `x` without an - // explicit representation of the `Option` layer, so we don't have to do - // anything additional here. - var context :- deserializeField(jrequest, "context", buildContext); - Ok(Request(principal, action, resource, context)) - ), - "entities", jentities => ( - var entities :- deserializeField(jentities, "entities", seqDeserializer(entityEntryFromProdJson)); - var entitiesMap :- mapFromEntriesProd(entities); - Ok(EntityStore(entitiesMap)) - ), - "policies", jpolicySet => policyStoreFromProdJson(jpolicySet), - (request, entityStore, policyStore) => - Ok(Authorizer(request, Store(entityStore, policyStore))) - ) - - function isAuthorizedJson1(request: Json): FromProdResult { - var authorizer :- authorizerFromProdJson(request); - Ok(authorizer.isAuthorized()) - } - - function evalJson1(request : Json) : FromProdResult { - var (eval, expr, maybe_v) :- evaluatorFromProdJson(request); - var r := eval.interpret(expr); - match (r, maybe_v) { - case (Ok(def_answer), Some(prod_answer)) => - if def_answer == prod_answer then - Ok(true) - else - var _ := printMismatch(expr, def_answer, prod_answer); - Ok(false) - case (Err(_), None) => Ok(true) - case (Err(_), Some(v)) => - var _ := printFromFunction("Evaluation errored but prod engine got result: "); - var _ := printFromFunction(v); - Ok(false) - case (Ok(v1), None) => - var _ := printFromFunction("Evaluation return result but prod engine errored: "); - var _ := printFromFunction(v1); - Ok(false) - } - } - - function printMismatch(expr : Expr, def_answer : Value, prod_answer : Value) : () { - var _ := printFromFunction("EVALUATION MISMATCH\n"); - var _ := printFromFunction("Expression: "); - var _ := printFromFunction(expr); - var _ := printFromFunction("Evaluated to: "); - var _ := printFromFunction(def_answer); - var _ := printFromFunction("Production engine got: "); - var _ := printFromFunction(prod_answer); - () - } - - function evalResponseToProdJson(r : FromProdResult) : Json { - JsonObject(match r { - case Ok(b) => map["matches" := JsonBool(b)] - case Err(e) => map["error" := JsonString("JSON Decoding error encountered")] - }) - } - - method evalJson(request : Json) returns (response : Json) { - var answer := evalJson1(request); - response := evalResponseToProdJson(answer); - } - - // Main differential-testing entry point: receives Json and responds in Json - method isAuthorizedJson(request: Json) returns (response: Json) { - var answer := isAuthorizedJson1(request); - var ansAndErrors := match answer { - case Ok(ans) => (ans, {}) - case Err(errs) => - (Response(Deny, {}), set e | e in errs && e.UnexpectedFromProdErr? :: e.desc) - }; - response := responseToProdJson(ansAndErrors.0, ansAndErrors.1); - } - - // Note: the types we have to support here are limited to those allowed in - // the Rust SchemaFileFormat, which is more restrictive than our Schema type - function typeFromProdJson(j: Json): FromProdResult { - var typeFromProdJsonRec := jr requires jr < j => typeFromProdJson(jr); - var attrTypesFromProdJsonObjectRec := jr requires jr < j => attrTypesFromProdJsonObject(jr); - var (tag, body) :- unpackJsonSum(j); - match tag { - case "Primitive" => - var ty1 :- getJsonField(body, "primitiveType"); - var ty :- deserializeEnum( - ty1, - map[ - "Bool" := Type.Bool(AnyBool), - "Long" := Type.Int, - "String" := Type.String - ]); - Ok(ty) - case "Set" => - var inner :- deserializeField(body, "elementType", typeFromProdJsonRec); - Ok(Type.Set(inner)) - case "EntityOrRecord" => - var (tag1, body1) :- unpackJsonSum(body); - match tag1 { - case "Record" => - var attrs :- getJsonField(body1, "attrs"); - var attrs1 :- deserializeField(attrs, "attrs", attrTypesFromProdJsonObjectRec); - Ok(Type.Record(RecordType(attrs1, ClosedAttributes))) - case "Entity" => - var lub :- deserializeField(body1, "lub_elements", setDeserializer(nameFromProdJson)); - Ok(Type.Entity(EntityLUB(set e <- lub :: EntityType(e)))) - case _ => Err({UnexpectedFromProdErr("EntityOrRecord case " + tag)}) - } - case "ExtensionType" => - var name :- deserializeField(body, "name", nameFromProdJson); - Ok(Type.Extension(name)) - case _ => Err({UnexpectedFromProdErr("Type case " + tag)}) - } - } - - function attrtypeFromProdJson(j: Json): FromProdResult { - var typeFromProdJsonRec := jr requires jr < j => typeFromProdJson(jr); - var attrType :- deserializeField(j, "attrType", typeFromProdJsonRec); - var isRequired :- deserializeField(j, "isRequired", getJsonBool); - Ok(AttrType(attrType,isRequired)) - } - - function attrTypesFromProdJsonObject(j: Json): FromProdResult> { - var attrtypeFromProdJsonRec := jr requires jr < j => attrtypeFromProdJson(jr); - deserializeMap(j, attrtypeFromProdJsonRec) - } - - function entityTypePairFromProdJson(j: Json): FromProdResult<(EntityType, TypecheckerEntityType)> { - deserializeTuple2Elts( - j, - nameFromProdJson, - data => ( - var descendants :- deserializeField(data, "descendants", setDeserializer(nameFromProdJson)); - var descendants1 := set e <- descendants :: EntityType(e); - var attrs :- getJsonField(data, "attributes"); - var attrs1 :- deserializeField(attrs, "attrs", attrTypesFromProdJsonObject); - Ok(TypecheckerEntityType(descendants1, attrs1)) - ), - (ty, et) => Ok((EntityType(ty), et)) - ) - } - - const entitytypeFromProdJsonOption := - sumDeserializer( - map[ - "Specified" := j => var n :- nameFromProdJson(j); Ok(Some(EntityType(n))), - "Unspecified" := _ => Ok(None) - ]) - - function applySpecFromProdJson(j: Json): FromProdResult { - var pas :- deserializeField(j, "principalApplySpec", setDeserializer(entitytypeFromProdJsonOption)); - var ras :- deserializeField(j, "resourceApplySpec", setDeserializer(entitytypeFromProdJsonOption)); - Ok(TypecheckerApplySpec(pas,ras)) - } - - function actionIdPairFromProdJson(j: Json): FromProdResult<(EntityUID, TypecheckerActionId)> { - deserializeTuple2Elts( - j, - entityUIDFromProdJson, - data => ( - var appliesTo :- deserializeField(data, "appliesTo", applySpecFromProdJson); - var descendants :- deserializeField(data, "descendants", setDeserializer(entityUIDFromProdJson)); - var context :- deserializeField(data, "context", typeFromProdJson); - match context { - case Record(rty) => Ok(TypecheckerActionId(appliesTo, descendants, rty.attrs)) - case _ => Err({UnexpectedFromProdErr("context should be record-typed")}) - } - ), - (uid, act) => Ok((uid, act)) - ) - } - - const validatorFromProdJson := - objDeserializer3Fields( - "policies", jpolicies => policyStoreFromProdJson(jpolicies), - "schema", jschema => ( - var entityTypes :- deserializeField(jschema, "entityTypes", seqDeserializer(entityTypePairFromProdJson)); - var entityTypesMap :- mapFromEntriesProd(entityTypes); - var actionIds :- deserializeField(jschema, "actionIds", seqDeserializer(actionIdPairFromProdJson)); - var actionIdsMap :- mapFromEntriesProd(actionIds); - Ok(Schema(entityTypesMap, actionIdsMap)) - ), - "mode", jmode => - deserializeEnum(jmode, map[ "Strict" := Strict, "Permissive" := Permissive ]), - (policyStore, schema, mode) => Ok((policyStore,Validator(schema,mode))) - ) - - method validateJson1(request: Json) returns (res: FromProdResult>) { - var policyStoreAndValidator :- validatorFromProdJson(request); - var errs := policyStoreAndValidator.1.Validate(policyStoreAndValidator.0); - return Ok(errs); - } - - function typeErrorToString(e: TypeError): string { - match e { - case LubErr(_,_) => "LubErr" - case SubtyErr(_,_) => "SubtyErr" - case UnexpectedType(_) => "UnexpectedType" - case AttrNotFound(_,_) => "AttrNotFound" - case UnknownEntities(_) => "UnknownEntities" - case ExtensionErr(_) => "ExtensionErr" - case EmptyLUB => "EmptyLUB" - case EmptySetForbidden => "EmptySetForbidden" - case NonLitExtConstructor => "NonLitExtConstructor" - case NonSingletonLub => "NonSingletonLub" - case HierarchyNotRespected => "HierarchyNotRespected" - } - } - - function validationErrorToString(e: ValidationError): string { - match e { - case AllFalse => "AllFalse" - case TypeError(e1) => typeErrorToString(e1) - } - } - - - method validationResToProdJson(errs: seq, parseErrs: set) returns (ja: Json) { - var parseErrsSeq := setToSequenceUnordered(parseErrs); - return JsonObject( - map[ - "validationErrors" := JsonArray(mapSeq((e: ValidationError) => JsonString(validationErrorToString(e)), errs)), - "parseErrors" := JsonArray(mapSeq((e: string) => JsonString(e), parseErrsSeq)) - ]); - } - - // Differential testing entry point for validation - method validateJson(request: Json) returns (response: Json) { - var res := validateJson1(request); - var resAndErrors := match res { - case Ok(res1) => (res1, {}) - case Err(errs) => ([], set e | e in errs && e.UnexpectedFromProdErr? :: e.desc)}; - response := validationResToProdJson(resAndErrors.0, resAndErrors.1); - } -} - -module difftest.restrictedExpr { - import opened def.core - import opened def.base - import ext = def.ext.fun - import opened def.engine - import opened def.std - - export - provides - evaluate, - std, - core - - function evaluate(e: Expr): Option - { - match e { - case PrimitiveLit(l) => Some(Primitive(l)) - case Record(r) => - var r' :- evaluateRecord(r); - Some(Value.Record(r')) - case Set(s) => - var s' :- evaluateSet(s); - Some(Value.Set(s')) - case Call(name, args) => - var vs :- evaluateSeq(args); - match Evaluator.applyExtFun(name, vs) { - case Ok(v) => Some(v) - case Err(_) => None - } - case _ => None - } - } - - function evaluateRecord(m: seq<(Attr, Expr)>): Option> - { - if |m| == 0 then Some(map[]) - else - var attr := m[0].0; - var v :- evaluate(m[0].1); - var vs :- evaluateRecord(m[1..]); - Some(vs[attr := v]) - } - - function evaluateSet(es: seq): Option> - { - if |es| == 0 then Some({}) - else - var v :- evaluate(es[0]); - var vs :- evaluateSet(es[1..]); - Some({v}+vs) - } - - function evaluateSeq(es: seq): Option> - { - if |es| == 0 then Some([]) - else - var v :- evaluate(es[0]); - var vs :- evaluateSeq(es[1..]); - Some([v]+vs) - } -} diff --git a/cedar-dafny/test/decimal.dfy b/cedar-dafny/test/decimal.dfy deleted file mode 100644 index dae1b4c4e..000000000 --- a/cedar-dafny/test/decimal.dfy +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/ext/decimal.dfy" - -module test.decimal { - import opened def.ext.decimal.parseDecimal - method ParseValidStr() { - var validStr := ["-12.34", "1.2345"]; - var i := |validStr| - 1; - while 0 <= i - { - expect Parse(validStr[i]).Some?; - i := i - 1; - } - } - - method ParseInvalidStr() { - var invalidStr := - [ - "-12", // no decimal point - "1.23456", // too many fractional digits - "922337203685477.5808", // overflow - "-922337203685477.5809" // underflow - ]; - var i := |invalidStr| - 1; - while 0 <= i { - expect Parse(invalidStr[i]).None?; - i := i - 1; - } - } - - method {:test} Test() { - ParseValidStr(); - ParseInvalidStr(); - } -} diff --git a/cedar-dafny/test/ipaddr.dfy b/cedar-dafny/test/ipaddr.dfy deleted file mode 100644 index bf2738eb8..000000000 --- a/cedar-dafny/test/ipaddr.dfy +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/ext/ipaddr.dfy" - -module test.ipaddr { - import opened def.ext.ipaddr.parseIPAddr - - method parseValidStr() { - var validStr := - [ - "127.0.0.1", - "::", - "::/5", - "F:AE::F:5:F:F:0" // :: represents just one group of zeros - ]; - var i := |validStr| - 1; - while 0 <= i - { - expect parse(validStr[i]).Some?; - i := i - 1; - } - } - - method parseInvalidStr() { - var invalidStr := - [ - "127.0.0.1.", // one more dot - "::::", // too many double colons - "F:AE::F:5:F:F:0:0", // too many groups - "F:A:F:5:F:F:0:0:1", // too many groups - "F:A", // too few groups - "::/01", // leading zeros - "::/001", // leading zeros - "127.0.0.1/01" // leading zeros - ]; - var i := |invalidStr| - 1; - while 0 <= i { - expect parse(invalidStr[i]).None?; - i := i - 1; - } - } - - method loopback() { - var ipv4 := parse("127.0.0.1"); - expect ipv4.Some?; - expect ipv4.value.isLoopback(); - var ipv6 := parse("::B"); - expect ipv6.Some?; - expect !ipv6.value.isLoopback(); - - var ipv6_loopback := parse("::1"); - expect ipv6_loopback.Some?; - expect ipv6_loopback.value.isLoopback(); - - //Currently don't support parsing IPv4 embedded in IPv6 - var ipv4_embeded_in_ipv6 := parse("::ffff:127.0.0.1"); - expect ipv4_embeded_in_ipv6.None?; - - //As in Rust, IPv4 embedded in IPv6 only uses IPv6 loopback - var hex_ipv4_embeded_in_ipv6 := parse("::ffff:ff00:0001"); - expect hex_ipv4_embeded_in_ipv6.Some?; - expect !hex_ipv4_embeded_in_ipv6.value.isLoopback(); - } - - method getAddrValue() { - var ipv4 := parse("192.0.2.235"); - expect ipv4.Some?; - expect ipv4.value.getAddrValue() == 3221226219; - var ipv6 := parse("::1:2"); - expect ipv6.Some?; - expect ipv6.value.getAddrValue() == 0x1_0000 + 0x2; - } - - method normalization() { - var ipv4 := parse("127.0.0.1/16"); - expect ipv4.Some?; - var ipv4' := parse("127.0.255.255/16"); - expect ipv4'.Some?; - expect ipv4.value.normalize() == ipv4'.value.normalize(); - var ipv4'' := parse("255.255.255.255/0"); - expect ipv4''.Some?; - expect ipv4''.value.normalize().getAddrValue() == 0; - - var ipv6 := parse("::1/112"); - expect ipv6.Some?; - var ipv6' := parse("::2/112"); - expect ipv6'.Some?; - expect ipv6.value.normalize() == ipv6'.value.normalize(); - var ipv6'' := parse("::1:2/0"); - expect ipv6''.Some?; - expect ipv6''.value.normalize().getAddrValue() == 0; - } - - method testInRange() { - var ip1 := parse("238.238.238.238"); - expect ip1.Some?; - var ip2 := parse("238.238.238.41/12"); - expect ip2.Some?; - print ip1.value, "\n"; - print ip1.value.normalize(); - expect ip1.value.normalize().inRange(ip2.value.normalize()); - } - - method {:test} test() { - parseValidStr(); - parseInvalidStr(); - loopback(); - getAddrValue(); - normalization(); - testInRange(); - } -} diff --git a/cedar-dafny/thm/basic.dfy b/cedar-dafny/thm/basic.dfy deleted file mode 100644 index e5e5765ef..000000000 --- a/cedar-dafny/thm/basic.dfy +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" - -module basic { - import opened def.base - import opened def.core - import opened def.engine - - lemma ForbidTrumpsPermit(request: Request, store: Store) - requires // If some forbid policy is satisfied, then - exists f :: - f in store.policies.policies.Keys && - store.policies.policies[f].effect == Forbid && - Authorizer(request, store).satisfied(f) - ensures // the request is denied. - Authorizer(request, store).isAuthorized().decision == Deny - { - var f :| f in Authorizer(request, store).forbids(); - } - - // A request is explicitly permitted when there is at least one permit policy - // that is satisfied. - predicate IsExplicitlyPermitted(request: Request, store: Store) { - exists p :: - p in store.policies.policies.Keys && - store.policies.policies[p].effect == Permit && - Authorizer(request, store).satisfied(p) - } - - lemma AllowedIfExplicitlyPermitted(request: Request, store: Store) - ensures // A request is allowed only if it is explicitly permitted. - Authorizer(request, store).isAuthorized().decision == Allow ==> - IsExplicitlyPermitted(request, store) - { } - - // DefaultDeny is the contrapositive of AllowedIfExplicitlyPermitted - lemma DefaultDeny(request: Request, store: Store) - ensures // If not explicitly permitted, a request is denied - !IsExplicitlyPermitted(request,store) ==> - Authorizer(request, store).isAuthorized().decision == Deny - { } - -} diff --git a/cedar-dafny/thm/eval/basic.dfy b/cedar-dafny/thm/eval/basic.dfy deleted file mode 100644 index 043625f3f..000000000 --- a/cedar-dafny/thm/eval/basic.dfy +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" - -// This module contains lemmas stating the behaviors of Cedar evaluator. -module eval.basic { - import opened def.base - import opened def.core - import opened def.engine - import opened def.util - - lemma EntityInOrEqEntitySemantics(x1: Expr, e1: EntityUID, x2: Expr, e2: EntityUID, E: Evaluator) - requires E.interpret(x1) == Ok(Value.EntityUID(e1)) - requires E.interpret(x2) == Ok(Value.EntityUID(e2)) - requires - E.interpret(BinaryApp(BinaryOp.In, x1, x2)) == Ok(Value.TRUE) || - E.interpret(BinaryApp(BinaryOp.Eq, x1, x2)) == Ok(Value.TRUE) - ensures E.entityInEntity(e1, e2) - { } - - lemma AndSemantics(e1: Expr, e2: Expr, E: Evaluator) - requires E.interpret(And(e1, e2)) == Ok(Value.TRUE) - ensures E.interpret(e1) == Ok(Value.TRUE) - ensures E.interpret(e2) == Ok(Value.TRUE) - { } - - lemma RecordSemanticsOkImpliesAllOk(es: seq<(Attr, Expr)>, E: Evaluator) - requires E.interpretRecord(es).Ok? - ensures forall i :: 0 <= i < |es| ==> es[i].0 in E.interpretRecord(es).value.Keys && E.interpret(es[i].1).Ok? - { - if es == [] { - - } else if E.interpret(es[0].1).Ok? && E.interpretRecord(es[1..]).Ok? { - - } else { - assert E.interpretRecord(es).Err?; - } - } - - lemma RecordSemanticsOkAttrs(es: seq<(Attr, Expr)>, E: Evaluator) - requires E.interpretRecord(es).Ok? - ensures E.interpretRecord(es).value.Keys == set e | e in es :: e.0 - { - if es != [] { - RecordSemanticsOkAttrs(es[1..], E); - var rec' := E.interpretRecord(es[1..]).value; - if es[0].0 in rec'.Keys { - assert E.interpretRecord(es).value == rec'; - assert (set e | e in es :: e.0) == (set e | e in es[1..] :: e.0); - } else { - assert E.interpretRecord(es).value == rec'[es[0].0 := E.interpret(es[0].1).value]; - assert (set e | e in es :: e.0) == (set e | e in es[1..] :: e.0) + {es[0].0}; - } - } - } - - lemma RecordSemanticsOkLastofKey(es: seq<(Attr, Expr)>, E: Evaluator) - requires E.interpretRecord(es).Ok? - ensures forall k | k in E.interpretRecord(es).value.Keys :: KeyExists(k,es) && E.interpret(LastOfKey(k,es)) == base.Ok(E.interpretRecord(es).value[k]) - { - if |es| == 0 { - } else { - if E.interpret(es[0].1).Ok? && E.interpretRecord(es[1..]).Ok? { - var rec' := E.interpretRecord(es[1..]).value; - RecordSemanticsOkLastofKey(es[1..], E); - if es[0].0 in rec'.Keys { - assert E.interpretRecord(es).value == rec'; - } else { - RecordSemanticsOkAttrs(es[1..], E); - assert LastOfKey(es[0].0, es) == es[0].1; - } - } else { - assert E.interpretRecord(es).Err?; - } - } - } - - // Surprisingly, this lemma costs 4M RUs. - lemma RecordSemanticsValue(es: seq<(Attr,Expr)>, E: Evaluator) - requires E.interpretRecord(es).Ok? - ensures E.interpret(Expr.Record(es)) == base.Ok(Value.Record(E.interpretRecord(es).value)) - {} - - lemma RecordSemanticsOk(es: seq<(Attr,Expr)>, E: Evaluator) - requires E.interpretRecord(es).Ok? - ensures forall i :: 0 <= i < |es| ==> es[i].0 in E.interpretRecord(es).value.Keys && E.interpret(es[i].1).Ok? - ensures forall k | k in E.interpretRecord(es).value.Keys :: KeyExists(k,es) && E.interpret(LastOfKey(k,es)) == base.Ok(E.interpretRecord(es).value[k]) - ensures E.interpret(Expr.Record(es)) == base.Ok(Value.Record(E.interpretRecord(es).value)) - { - RecordSemanticsOkImpliesAllOk(es, E); - RecordSemanticsOkLastofKey(es, E); - RecordSemanticsValue(es, E); - } - - lemma RecordSemanticsErr(es: seq<(Attr,Expr)>, E: Evaluator) - requires E.interpretRecord(es).Err? - ensures exists i: nat :: i < |es| && E.interpret(es[i].1) == base.Err(E.interpretRecord(es).error) && (forall j | 0 <= j < i :: E.interpret(es[j].1).Ok?) - ensures E.interpret(Expr.Record(es)).Err? && E.interpret(Expr.Record(es)).error == E.interpretRecord(es).error - {} - - lemma RecordSemanticsErrRet(es: seq<(Attr,Expr)>, E: Evaluator) returns (i: nat) - requires E.interpretRecord(es).Err? - ensures i < |es| - ensures E.interpret(es[i].1).Err? && E.interpret(es[i].1).error == E.interpretRecord(es).error - ensures forall j | 0 <= j < i :: E.interpret(es[j].1).Ok? - { - RecordSemanticsErr(es, E); - var i': nat :| i' < |es| && E.interpret(es[i'].1) == base.Err(E.interpretRecord(es).error) && (forall j | 0 <= j < i' :: E.interpret(es[j].1).Ok?); - i := i'; - } - - lemma InSetSemantics(e: Expr, es: seq, E: Evaluator) - requires E.interpret(e).Ok? && Value.asEntity(E.interpret(e).value).Ok? - requires forall i: nat | i < |es| :: E.interpret(es[i]).Ok? && Value.asEntity(E.interpret(es[i]).value).Ok? - requires forall i: nat | i < |es| :: E.interpret(BinaryApp(BinaryOp.In, e, es[i])) == base.Ok(Value.Bool(false)) - ensures E.interpret(BinaryApp(BinaryOp.In, e, Expr.Set(es))) == base.Ok(Value.Bool(false)) - { - SetSemantics(es, E); - calc == { - E.interpret(BinaryApp(BinaryOp.In, e, Expr.Set(es))); - E.applyBinaryOp(BinaryOp.In, E.interpret(e).value, E.interpret(Expr.Set(es)).value); - E.applyBinaryOp(BinaryOp.In, E.interpret(e).value, E.interpretSet(es).Map(v => Value.Set(v)).value); - } - } - - lemma SetSemanticsOk(es: seq, E: Evaluator) - requires E.interpretSet(es).Ok? - ensures forall i: nat | i < |es| :: E.interpret(es[i]).Ok? - {} - - lemma SetSemantics(es: seq, E: Evaluator) - ensures E.interpretSet(es).Ok? ==> forall v | v in E.interpretSet(es).value :: exists i :: 0 <= i < |es| && E.interpret(es[i]) == base.Ok(v) - ensures (forall e | e in es :: E.interpret(e).Ok?) ==> E.interpretSet(es).Ok? - ensures (exists i :: 0 <= i < |es| && E.interpret(es[i]).Err?) ==> E.interpretSet(es).Err? - ensures E.interpretSet(es).Err? <==> exists i :: 0 <= i < |es| && E.interpret(es[i]).Err? && (forall j | 0 <= j < i :: E.interpret(es[j]).Ok?) - ensures E.interpretSet(es).Err? ==> exists i :: 0 <= i < |es| && E.interpret(es[i]).Err? && E.interpret(es[i]).error == E.interpretSet(es).error && (forall j | 0 <= j < i :: E.interpret(es[j]).Ok?) - {} - - lemma ListSemanticsOk(es: seq, E: Evaluator) - requires forall i | 0 <= i < |es| :: E.interpret(es[i]).Ok? - ensures E.interpretList(es).Ok? - ensures |E.interpretList(es).value| == |es| - ensures forall i | 0 <= i < |es| :: E.interpret(es[i]) == base.Ok(E.interpretList(es).value[i]) - { - ListSemantics(es, E); - } - - lemma ListSemanticsErrRet(es: seq, E: Evaluator) returns(i: nat) - requires exists i: nat | i < |es| :: E.interpret(es[i]).Err? - ensures i < |es| - ensures E.interpret(es[i]).Err? - ensures E.interpretList(es).Err? - ensures E.interpretList(es).error == E.interpret(es[i]).error - { - ListSemanticsErr(es, E); - var i':nat :| i' < |es| && E.interpret(es[i']).Err? && E.interpret(es[i']).error == E.interpretList(es).error; - i := i'; - } - - lemma ListSemanticsErr(es: seq, E: Evaluator) - requires exists i | 0 <= i < |es| :: E.interpret(es[i]).Err? - ensures E.interpretList(es).Err? - ensures exists i :: 0 <= i < |es| && E.interpret(es[i]).Err? && (forall j | 0 <= j < i :: E.interpret(es[j]).Ok?) && E.interpret(es[i]).error == E.interpretList(es).error - { - ListSemantics(es, E); - } - - lemma ListSemantics(es: seq, E: Evaluator) - ensures E.interpretList(es).Ok? ==> - (|E.interpretList(es).value| == |es| && - forall i | 0 <= i < |es| :: E.interpret(es[i]) == base.Ok(E.interpretList(es).value[i])) - ensures (forall e | e in es :: E.interpret(e).Ok?) ==> E.interpretList(es).Ok? - ensures (exists i :: 0 <= i < |es| && E.interpret(es[i]).Err?) ==> E.interpretList(es).Err? - ensures E.interpretList(es).Err? <==> exists i :: 0 <= i < |es| && E.interpret(es[i]).Err? && (forall j | 0 <= j < i :: E.interpret(es[j]).Ok?) - ensures E.interpretList(es).Err? ==> exists i :: 0 <= i < |es| && E.interpret(es[i]).Err? && E.interpret(es[i]).error == E.interpretList(es).error && (forall j | 0 <= j < i :: E.interpret(es[j]).Ok?) - {} - - lemma BinaryAppSemanticsOk(e1: Expr, e2: Expr, op: BinaryOp, E: Evaluator) - requires E.interpret(e1).Ok? - requires E.interpret(e2).Ok? - ensures E.interpret(BinaryApp(op, e1, e2)) == E.applyBinaryOp(op, E.interpret(e1).value, E.interpret(e2).value) - {} - - lemma BinaryAppSemanticsErrLeft(e1: Expr, e2: Expr, op: BinaryOp, E: Evaluator) - requires E.interpret(e1).Err? - ensures E.interpret(BinaryApp(op, e1, e2)).Err? - ensures E.interpret(e1).error == E.interpret(BinaryApp(op, e1, e2)).error - {} - - lemma BinaryAppSemanticsErrRight(e1: Expr, e2: Expr, op: BinaryOp, E: Evaluator) - requires E.interpret(e1).Ok? - requires E.interpret(e2).Err? - ensures E.interpret(BinaryApp(op, e1, e2)).Err? - ensures E.interpret(e2).error == E.interpret(BinaryApp(op, e1, e2)).error - {} - - lemma CallWithOkArgs(name: Name, args: seq, E: Evaluator) - requires E.interpretList(args).Ok? - ensures E.interpret(Call(name, args)) == E.applyExtFun(name, E.interpretList(args).value) - {} - - lemma CallWithErrArgs(name: Name, args: seq, E: Evaluator) - requires E.interpretList(args).Err? - ensures E.interpret(Call(name, args)).Err? - ensures E.interpret(Call(name, args)).error == E.interpretList(args).error - {} - -} diff --git a/cedar-dafny/thm/pslicing.dfy b/cedar-dafny/thm/pslicing.dfy deleted file mode 100644 index 7a6d71733..000000000 --- a/cedar-dafny/thm/pslicing.dfy +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" -include "slicing.dfy" -include "eval/basic.dfy" - -// This module proves it is sound to slice policies based on head constraints -// (see AuthorizationIsCorrectForHeadBasedPolicySlicing and -// HeadBasedSlicingIsSound). The proof is based on a more general lemma -// (TargetBasedSlicingIsSound) that covers all forms of slicing that are based -// on identifying "target" principal and resource entities (if any) for a -// policy, such that the policy evaluates to true on an input only if the -// request principal and resource are descendents of the corresponding target -// entities. Currently, we are extracting targets only from the head -// constraints. The general lemma also covers more sophisticated analyses that -// can extract targets from policy conditions as well. -module pslicing { - import opened def.base - import opened def.core - import opened def.engine - import opened def.std - import opened slicing - import opened eval.basic - - // Optional target principal and resource entities for a policy. - datatype Target = - Target( - principal: Option, - resource: Option) - { - predicate satisfiedBy(request: Request, store: EntityStore) - { - var eval := Evaluator(request, store); - (principal.None? || - eval.entityInEntity(request.principal, principal.value)) && - (resource.None? || - eval.entityInEntity(request.resource, resource.value)) - } - } - - // A target analysis takes as input a policy and returns Target. - type TargetAnalysis = Policy -> Target - - // A slicing function takes as input a policy and returns true iff - // the policy should be included in a slice. - type Slicer = Policy -> bool - - // Defines what it means for a target to be sound for a policy. Note that it's - // always okay to specify no target entities. When specified, it must be the - // case that the policy implies the membership of the principal or resource - // variable in the corresponding target entities. - ghost predicate isSoundTarget(tgt: Target, p: Policy) { - forall request: Request, store: EntityStore | - Evaluator(request, store).interpret(p.toExpr()) == Ok(Value.TRUE) :: - tgt.satisfiedBy(request, store) - } - - // A target analysis is sound if it produces sound targets for all policies. - ghost predicate isSoundTargetAnalysis(ta: TargetAnalysis) { - forall p: Policy :: isSoundTarget(ta(p), p) - } - - // Takes a target analysis, request, and principal / resource ancestors, and - // returns a slicer that can be passed as input to slicePolicies. - function targetSlicer( - ta: TargetAnalysis, - request: Request, - store: EntityStore): - Slicer - { - (p: Policy) => ta(p).satisfiedBy(request, store) - } - - function slicePolicies( - store: PolicyStore, - slicer: Slicer): (slice: PolicyStore) - ensures isSliceOfPolicyStore(slice, store) - { - PolicyStore( - map pid | - pid in store.policies.Keys && - slicer(store.policies[pid]) :: - store.policies[pid] - ) - } - - // ----- Soundness of policy slicing using a sound target analysis ----- // - - // When based on a sound target analysis, policy slicing returns a - // sound policy slice. - lemma TargetBasedSlicingIsSound(ta: TargetAnalysis, request: Request, slice: Store, store: Store) - requires isSoundTargetAnalysis(ta) - requires slice.entities == store.entities - requires slice.policies == slicePolicies(store.policies, targetSlicer(ta, request, store.entities)) - ensures isSoundSliceForRequest(request, slice, store) - { - forall pid | pid in store.policies.policies.Keys && pid !in slice.policies.policies.Keys - { - TargetBasedSlicingIsSoundAux(pid, ta(store.policies.policies[pid]), request, store); - } - } - - lemma TargetBasedSlicingIsSoundAux(pid: PolicyID, tgt: Target, request: Request, store: Store) - requires pid in store.policies.policies.Keys - requires isSoundTarget(tgt, store.policies.policies[pid]) - requires !tgt.satisfiedBy(request, store.entities) - ensures !Authorizer(request, store).satisfied(pid) - { - var eval := Evaluator(request, store.entities); - var p := store.policies.policies[pid]; - assert - (tgt.principal.Some? && - !eval.entityInEntity(request.principal, tgt.principal.value)) || - (tgt.resource.Some? && - !eval.entityInEntity(request.resource, tgt.resource.value)); - assert eval.interpret(p.toExpr()) != Ok(Value.TRUE); - } - - // ----- Soundness of policy slicing based on policy head targets ----- // - - function headBasedTarget(p: Policy): Target { - Target( - if p.principalScope.scope.Any? || p.principalScope.scope.Is? then None else Some(p.principalScope.scope.entity), - if p.resourceScope.scope.Any? || p.resourceScope.scope.Is? then None else Some(p.resourceScope.scope.entity)) - } - - function headBasedPolicySlice(request: Request, store: Store): PolicyStore { - slicePolicies(store.policies, targetSlicer(headBasedTarget, request, store.entities)) - } - - lemma AuthorizationIsCorrectForHeadBasedPolicySlicing(request: Request, slice: Store, store: Store) - requires slice.entities == store.entities - requires slice.policies == headBasedPolicySlice(request, store) - ensures Authorizer(request, slice).isAuthorized() == Authorizer(request, store).isAuthorized() - { - HeadBasedSlicingIsSound(request, slice, store); - AuthorizationIsCorrectForSoundSlicing(request, slice, store); - } - - lemma HeadBasedSlicingIsSound(request: Request, slice: Store, store: Store) - requires slice.entities == store.entities - requires slice.policies == headBasedPolicySlice(request, store) - ensures isSoundSliceForRequest(request, slice, store) - { - forall p: Policy, q: Request, s: EntityStore | - Evaluator(q, s).interpret(p.toExpr()) == Ok(Value.TRUE) - { - HeadBasedTargetIsSound(p, q, s); - } - TargetBasedSlicingIsSound(headBasedTarget, request, slice, store); - } - - lemma HeadBasedTargetIsSound(p: Policy, request: Request, store: EntityStore) - requires Evaluator(request, store).interpret(p.toExpr()) == Ok(Value.TRUE) - ensures - var tgt := headBasedTarget(p); - tgt.satisfiedBy(request, store) - { - var tgt := headBasedTarget(p); - var eval := Evaluator(request, store); - PolicyConditionImpliesHead(p, eval); - if tgt.principal.Some? { - EntityInOrEqEntitySemantics( - Var(Var.Principal), - eval.request.principal, - PrimitiveLit(Primitive.EntityUID(p.principalScope.scope.entity)), - p.principalScope.scope.entity, - eval); - } - if tgt.resource.Some? { - EntityInOrEqEntitySemantics( - Var(Var.Resource), - eval.request.resource, - PrimitiveLit(Primitive.EntityUID(p.resourceScope.scope.entity)), - p.resourceScope.scope.entity, - eval); - } - } - - lemma PolicyConditionImpliesHead(p: Policy, eval: Evaluator) - requires eval.interpret(p.toExpr()) == Ok(Value.TRUE) - ensures eval.interpret(p.principalScope.toExpr()) == Ok(Value.TRUE) - ensures eval.interpret(p.resourceScope.toExpr()) == Ok(Value.TRUE) - { - var e1 := And(p.resourceScope.toExpr(), p.condition); - var e2 := And(p.actionScope.toExpr(), e1); - AndSemantics(p.principalScope.toExpr(), e2, eval); - AndSemantics(p.actionScope.toExpr(), e1, eval); - AndSemantics(p.resourceScope.toExpr(), p.condition, eval); - } - - -} diff --git a/cedar-dafny/thm/slicing.dfy b/cedar-dafny/thm/slicing.dfy deleted file mode 100644 index e1dbe8284..000000000 --- a/cedar-dafny/thm/slicing.dfy +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" - -module slicing { - import opened def.base - import opened def.core - import opened def.engine - - - ghost predicate isSliceOfPolicyStore(slice: PolicyStore, store: PolicyStore) { - slice.policies.Keys <= store.policies.Keys && - (forall pid | - pid in slice.policies.Keys :: - slice.policies[pid] == store.policies[pid]) - } - - ghost predicate isSoundSliceForRequest(request: Request, slice: Store, store: Store) { - isSliceOfPolicyStore(slice.policies, store.policies) && - (forall pid | - (pid in store.policies.policies.Keys && pid !in slice.policies.policies.Keys) :: - !Authorizer(request, store).satisfied(pid)) && - (forall pid | - pid in slice.policies.policies.Keys :: - Authorizer(request, slice).satisfied(pid) == Authorizer(request, store).satisfied(pid)) - } - - lemma AuthorizationIsCorrectForSoundSlicing(request: Request, slice: Store, store: Store) - requires isSoundSliceForRequest(request, slice, store) - ensures Authorizer(request, slice).isAuthorized() == Authorizer(request, store).isAuthorized() - { - ForbidsEqv(request, slice, store); - PermitsEqv(request, slice, store); - } - - lemma ForbidsEqv(request: Request, slice: Store, store: Store) - requires isSoundSliceForRequest(request, slice, store) - ensures Authorizer(request, slice).forbids() == Authorizer(request, store).forbids() - { } - - lemma PermitsEqv(request: Request, slice: Store, store: Store) - requires isSoundSliceForRequest(request, slice, store) - ensures Authorizer(request, slice).permits() == Authorizer(request, store).permits() - { } - -} diff --git a/cedar-dafny/validation/all.dfy b/cedar-dafny/validation/all.dfy deleted file mode 100644 index c8c63b4b9..000000000 --- a/cedar-dafny/validation/all.dfy +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "subtyping.dfy" -include "typechecker.dfy" -include "types.dfy" -include "validator.dfy" diff --git a/cedar-dafny/validation/ext.dfy b/cedar-dafny/validation/ext.dfy deleted file mode 100644 index 9782f947a..000000000 --- a/cedar-dafny/validation/ext.dfy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" -include "ext/decimal.dfy" -include "ext/ipaddr.dfy" - -module validation.ext { - import opened def.base - import opened types - - // Returns the map from extension function names to their types. - function register(): map - { - decimal.register() + ipaddr.register() - } - - const extFunTypes: map := register() -} diff --git a/cedar-dafny/validation/ext/decimal.dfy b/cedar-dafny/validation/ext/decimal.dfy deleted file mode 100644 index bc30ba389..000000000 --- a/cedar-dafny/validation/ext/decimal.dfy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../types.dfy" - -module validation.ext.decimal { - import opened def.std - import opened def.base - import opened def.core - import opened def.ext.decimal.parseDecimal - import opened types - - // Returns the map from Decimal extension function names to their types. - function register(): map - { - var D := Type.Extension(Name.fromStr("decimal")); - map[ - Name.fromStr("decimal") := ExtFunType([Type.String],D,Some(checkDecimalArgs)), - Name.fromStr("lessThan") := ExtFunType([D,D],Type.Bool(AnyBool),None), - Name.fromStr("lessThanOrEqual") := ExtFunType([D,D],Type.Bool(AnyBool),None), - Name.fromStr("greaterThan") := ExtFunType([D,D],Type.Bool(AnyBool),None), - Name.fromStr("greaterThanOrEqual") := ExtFunType([D,D],Type.Bool(AnyBool),None) - ] - } - - function checkDecimalArgs(args: seq): types.Result<()> - { - if |args| != 1 then Ok(()) - else match args[0] { - case PrimitiveLit(String(s)) => - match Parse(s) { - case None => Err(ExtensionErr(Call(Name.fromStr("decimal"),args))) - case Some(_) => Ok(()) - } - case _ => Ok(()) - } - } -} diff --git a/cedar-dafny/validation/ext/ipaddr.dfy b/cedar-dafny/validation/ext/ipaddr.dfy deleted file mode 100644 index b222c1475..000000000 --- a/cedar-dafny/validation/ext/ipaddr.dfy +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../types.dfy" - -module validation.ext.ipaddr { - import opened def.std - import opened def.base - import opened def.core - import opened def.ext.ipaddr.parseIPAddr - import opened types - - // Returns the map from IPAddr extension function names to their types. - function register(): map - { - var I := Type.Extension(Name.fromStr("ipaddr")); - map[ - Name.fromStr("ip") := ExtFunType([Type.String],I,Some(checkIpArgs)), - Name.fromStr("isIpv4") := ExtFunType([I],Type.Bool(AnyBool),None), - Name.fromStr("isIpv6") := ExtFunType([I],Type.Bool(AnyBool),None), - Name.fromStr("isLoopback") := ExtFunType([I],Type.Bool(AnyBool),None), - Name.fromStr("isMulticast") := ExtFunType([I],Type.Bool(AnyBool),None), - Name.fromStr("isInRange") := ExtFunType([I,I],Type.Bool(AnyBool),None) - ] - } - - function checkIpArgs(args: seq): types.Result<()> - { - if |args| != 1 then Ok(()) - else match args[0] { - case PrimitiveLit(String(s)) => - match parse(s) { - case None => Err(ExtensionErr(Call(Name.fromStr("ip"),args))) - case Some(_) => Ok(()) - } - case _ => Ok(()) - } - } -} diff --git a/cedar-dafny/validation/subtyping.dfy b/cedar-dafny/validation/subtyping.dfy deleted file mode 100644 index 43354035a..000000000 --- a/cedar-dafny/validation/subtyping.dfy +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "types.dfy" - -module validation.subtyping { - import opened types - - predicate subtyBool(b1: BoolType, b2: BoolType) { - match (b1,b2) { - case (_,AnyBool) => true - case (True,True) => true - case (False,False) => true - case _ => false - } - } - - predicate subtyAttrType(a1: AttrType, a2: AttrType, m: ValidationMode) { - subty(a1.ty, a2.ty, m) && (a2.isRequired ==> a1.isRequired) - } - - predicate subtyRecordType(rt1: RecordType, rt2: RecordType, m: ValidationMode) - decreases Type.Record(rt1) , Type.Record(rt2) , 0 - { - (rt1.isOpen() ==> rt2.isOpen()) && - // width subtyping - rt2.attrs.Keys <= rt1.attrs.Keys && - // depth subtyping - (forall k | k in rt2.attrs.Keys :: - subtyAttrType(rt1.attrs[k], rt2.attrs[k], m)) && - // disable width subtyping if `rt2` is closed or we are in strict mode. - ((!rt2.isOpen() || m.isStrict()) ==> rt1.attrs.Keys == rt2.attrs.Keys) - } - - predicate subtyEntity(lub1: EntityLUB, lub2: EntityLUB, m:ValidationMode) { - if m.isStrict() - then lub1 == lub2 - else lub1.subset(lub2) - } - - predicate subty(t1: Type, t2: Type, m: ValidationMode) - decreases t1, t2 - { - match (t1,t2) { - case (Never,_) => true - case (String,String) => true - case (Int,Int) => true - case (Bool(b1),Bool(b2)) => subtyBool(b1,b2) - case (Set(t11),Set(t21)) => subty(t11,t21,m) - case (Record(rt1),Record(rt2)) => subtyRecordType(rt1,rt2,m) - case (Entity(lub1),Entity(lub2)) => subtyEntity(lub1,lub2,m) - case (Extension(e1),Extension(e2)) => e1 == e2 - case _ => false - } - } - - function lubBool(b1: BoolType, b2: BoolType): BoolType { - match (b1,b2) { - case (True,True) => True - case (False,False) => False - case _ => AnyBool - } - } - - function lubEntity(lub1: EntityLUB, lub2: EntityLUB, m: ValidationMode): Result { - if m.isStrict() && lub1 != lub2 - then Err(LubErr(Type.Entity(lub1), Type.Entity(lub2))) - else Ok(lub1.union(lub2)) - } - - function lubAttrType(a1: AttrType, a2: AttrType, m: ValidationMode): AttrType - requires lubOpt(a1.ty, a2.ty, m).Ok? - { - AttrType(lubOpt(a1.ty, a2.ty, m).value, a1.isRequired && a2.isRequired) - } - - // In permissive mode, this function produces a valid lub for any two maps, including ones that - // are inconsistent. For example: the upper bound of { foo: Int } and - // { foo: String } is the empty map type {}. This decision was made for the - // sake of consistency with the Rust production implementation. - // In strict mode, a lub does not exist for the case described above. A lub - // will also not exist if any field exists in one record type without existing - // in the other. - function lubRecordType(rt1: RecordType, rt2: RecordType, m: ValidationMode): Result - decreases Type.Record(rt1) , Type.Record(rt2) , 0 - { - var attrs := - map k | k in rt1.attrs.Keys && k in rt2.attrs.Keys && lubOpt(rt1.attrs[k].ty, rt2.attrs[k].ty, m).Ok? :: - lubAttrType(rt1.attrs[k], rt2.attrs[k], m); - var lubDropsAttr := attrs.Keys != (rt1.attrs.Keys + rt2.attrs.Keys); - if m.isStrict() && lubDropsAttr - then Err(LubErr(Type.Record(rt1), Type.Record(rt2))) - else - var openTag := if rt1.isOpen() || rt2.isOpen() || lubDropsAttr then OpenAttributes else ClosedAttributes; - Ok(RecordType(attrs, openTag)) - } - - function lubRecordTypeSeq(rts: seq, m: ValidationMode): Result - { - if rts == [] then Err(EmptyLUB) - else if |rts| == 1 then Ok(rts[0]) - else - var res :- lubRecordTypeSeq(rts[1..],m); - lubRecordType(rts[0],res,m) - } - - function lubOpt(t1: Type, t2: Type, m: ValidationMode): Result - decreases t1, t2 , 1 - { - match (t1,t2) { - case (Never,_) => Ok(t2) - case (_,Never) => Ok(t1) - case (String,String) => Ok(Type.String) - case (Int,Int) => Ok(Type.Int) - case (Bool(b1),Bool(b2)) => Ok(Type.Bool(lubBool(b1,b2))) - case (Entity(lub1),Entity(lub2)) => - var elub :- lubEntity(lub1,lub2, m); - Ok(Type.Entity(elub)) - case (Set(t11),Set(t12)) => - var t :- lubOpt(t11,t12, m); - Ok(Type.Set(t)) - case(Record(rt1),Record(rt2)) => - var rtlub :- lubRecordType(rt1,rt2,m); - Ok(Type.Record(rtlub)) - case (Extension(e1),Extension(e2)) => - if e1 == e2 then Ok(Extension(e1)) - else Err(LubErr(t1,t2)) - case _ => Err(LubErr(t1,t2)) - } - } - - ghost predicate LubDefined(t1: Type,t2: Type, m: ValidationMode) { - match lubOpt(t1,t2, m) { - case Ok(_) => true - case _ => false - } - } - - function lub(t1: Type, t2: Type, m: ValidationMode): Type - requires LubDefined(t1,t2,m) - { - match lubOpt(t1,t2,m) { - case Ok(t) => t - } - } - - lemma SubtyRefl(t: Type) - ensures forall m :: subty(t,t, m) - { - match t { - case Record(rt) => SubtyRecordTypeRefl(rt); - case _ => - } - } - - lemma SubtyRecordTypeRefl(rt: RecordType) - ensures forall m :: subtyRecordType(rt, rt, m) - { - forall m, k | k in rt.attrs.Keys ensures subtyAttrType(rt.attrs[k], rt.attrs[k], m) { - SubtyRefl(rt.attrs[k].ty); - } - } - - lemma SubtyRecordTypeTrans(rt1: RecordType, rt2: RecordType, rt3: RecordType, m: ValidationMode) - requires subtyRecordType(rt1,rt2,m) - requires subtyRecordType(rt2,rt3,m) - ensures subtyRecordType(rt1,rt3,m) - decreases Type.Record(rt1) , Type.Record(rt2) , Type.Record(rt3) , 0 - { - assert rt3.attrs.Keys <= rt1.attrs.Keys; - forall k | k in rt3.attrs.Keys - ensures subty(rt1.attrs[k].ty, rt3.attrs[k].ty, m) - ensures rt3.attrs[k].isRequired ==> rt1.attrs[k].isRequired - { - assert subty(rt1.attrs[k].ty, rt2.attrs[k].ty, m); - assert subty(rt2.attrs[k].ty, rt3.attrs[k].ty, m); - SubtyTrans(rt1.attrs[k].ty, rt2.attrs[k].ty, rt3.attrs[k].ty, m); - } - } - - lemma SubtyTrans(t1: Type, t2: Type, t3: Type, m: ValidationMode) - requires subty(t1,t2,m) - requires subty(t2,t3,m) - ensures subty(t1,t3,m) - decreases t1, t2, t3 - { - match (t1,t2,t3) { - case (Record(rt1),Record(rt2),Record(rt3)) => SubtyRecordTypeTrans(rt1,rt2,rt3,m); - case _ => - } - } - - lemma LubIsUB(t1: Type, t2: Type, t: Type, m: ValidationMode) - requires lubOpt(t1,t2,m) == Ok(t) - ensures subty(t1,t,m) - ensures subty(t2,t,m) - { - match (t1,t2,t) { - case (Never,_,_) => assert t2 == t; SubtyRefl(t); - case (_,Never,_) => assert t1 == t; SubtyRefl(t); - case (Int,Int,Int) => - case (String,String,String) => - case(Bool(b1),Bool(b2),Bool(bt)) => - case (Entity(e1),Entity(e2),Entity(e)) => - case (Set(t1'),Set(t2'),Set(t')) => LubIsUB(t1',t2',t',m); - case(Record(rt1'),Record(rt2'),Record(rt')) => - assert rt'.attrs.Keys <= rt1'.attrs.Keys; - assert rt'.attrs.Keys <= rt2'.attrs.Keys; - assert subty(Type.Record(rt1'),Type.Record(rt'),m) by { - forall k | k in rt'.attrs.Keys - ensures subtyAttrType(rt1'.attrs[k],rt'.attrs[k],m) - { - assert rt'.attrs[k] == lubAttrType(rt1'.attrs[k],rt2'.attrs[k],m); - assert lubOpt(rt1'.attrs[k].ty,rt2'.attrs[k].ty,m) == Ok(rt'.attrs[k].ty); - LubIsUB(rt1'.attrs[k].ty,rt2'.attrs[k].ty,rt'.attrs[k].ty,m); - } - } - assert subty(Type.Record(rt2'),Type.Record(rt'),m) by { - forall k | k in rt'.attrs.Keys - ensures subtyAttrType(rt2'.attrs[k],rt'.attrs[k],m) - { - assert rt'.attrs[k] == lubAttrType(rt1'.attrs[k],rt2'.attrs[k],m); - assert lubOpt(rt1'.attrs[k].ty,rt2'.attrs[k].ty,m) == Ok(rt'.attrs[k].ty); - LubIsUB(rt1'.attrs[k].ty,rt2'.attrs[k].ty,rt'.attrs[k].ty,m); - } - } - case (Extension(n1),Extension(n2),Extension(n)) => - } - } - - lemma LubUndefUbUndef(t1 : Type, t2 : Type, t : Type, m: ValidationMode) - requires !LubDefined(t1, t2, m) - ensures !subty(t1, t, m) || !subty(t2, t, m) - { - match t { - case Never => - case Int => - case String => - case Bool(b) => - case Entity(e) => - case Set(e) => { - if t1.Set? && t2.Set? { - LubUndefUbUndef(t1.ty, t2.ty, e, m); - } - } - case Record(rt) => { - match (t1, t2) { - case (Record(rt1), Record(rt2)) => { - if m.isStrict() { - assert lubRecordType(rt1, rt2, ValidationMode.Strict).Err?; - - if exists k | k in rt1.attrs.Keys && k in rt2.attrs.Keys :: !LubDefined(rt1.attrs[k].ty, rt2.attrs[k].ty, m) { - var k :| k in rt1.attrs.Keys && k in rt2.attrs.Keys && !LubDefined(rt1.attrs[k].ty, rt2.attrs[k].ty, m); - if k in rt.attrs.Keys { - assert (exists k' | k' in rt.attrs.Keys && k' in rt1.attrs.Keys :: !subtyAttrType(rt1.attrs[k'], rt.attrs[k'], m)) || - (exists k' | k' in rt.attrs.Keys && k' in rt2.attrs.Keys :: !subtyAttrType(rt2.attrs[k'], rt.attrs[k'], m)); - } - } - } else { - assert lubRecordType(rt1, rt2, ValidationMode.Permissive).Ok?; - } - } - case _ => - } - } - case Extension(e) => - } - } - - lemma StrictSubtyIsStrict(t1: Type, t2: Type) - requires subty(t1, t2, ValidationMode.Strict) - ensures subty(t1, t2, ValidationMode.Permissive) - { - match (t1,t2) { - case (Record(rt1),Record(rt2)) => { - if(rt2.attrs.Keys <= rt1.attrs.Keys) { - if ! (forall k | k in rt2.attrs.Keys :: subtyAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Permissive)) { - assert exists k | k in rt2.attrs.Keys :: !subtyAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Permissive); - assert exists k | k in rt2.attrs.Keys :: !subtyAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Strict); - } - } - } - case _ => - } - } - - lemma StrictLubIsStrict(t1: Type, t2: Type) - requires LubDefined(t1, t2, ValidationMode.Strict) - ensures lubOpt(t1, t2, ValidationMode.Permissive) == lubOpt(t1, t2, ValidationMode.Strict) - { - match (t1,t2) { - case (Never,_) => - case (_,Never) => - case (Int,Int) => - case (String,String) => - case (Bool(b1),Bool(b2)) => - case (Entity(e1),Entity(e2)) => - case (Set(t1'),Set(t2')) => - case (Record(rt1),Record(rt2)) => { - assert lubRecordType(rt1, rt2, ValidationMode.Strict).Ok?; - assert lubRecordType(rt1, rt2, ValidationMode.Permissive).Ok?; - - var strict_attrs := - map k | k in rt1.attrs.Keys && k in rt2.attrs.Keys && lubOpt(rt1.attrs[k].ty, rt2.attrs[k].ty, ValidationMode.Strict).Ok? :: - lubAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Strict); - assert strict_attrs == lubRecordType(rt1, rt2, ValidationMode.Strict).value.attrs; - - var permissive_attrs := - map k | k in rt1.attrs.Keys && k in rt2.attrs.Keys && lubOpt(rt1.attrs[k].ty, rt2.attrs[k].ty, ValidationMode.Permissive).Ok? :: - lubAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Permissive); - assert permissive_attrs == lubRecordType(rt1, rt2, ValidationMode.Permissive).value.attrs; - - assert permissive_attrs == strict_attrs; - } - case (Extension(n1),Extension(n2)) => - } - } - - // Proof that the LUB of two strict types is also strict. Additionally proves - // that the LUB is still strict if one of `t1` or `t2` is `Never` (not a - // strict type) which is required for proving that strict typechecking infers - // a strict type for sets. - lemma StrictTypeLub(t1: Type, t2: Type) - requires t1.isStrictType() || t1 == Never - requires t2.isStrictType() || t2 == Never - requires t1 != Never || t2 != Never - requires LubDefined(t1, t2, ValidationMode.Strict) - ensures lub(t1, t2, ValidationMode.Strict).isStrictType() - { - match (t1,t2) { - case (Never,_) => - case (_,Never) => - case (Int,Int) => - case (String,String) => - case (Bool(b1),Bool(b2)) => - case (Entity(e1),Entity(e2)) => assert e1 == e1.union(e2); - case (Set(t1'),Set(t2')) => StrictTypeLub(t1', t2'); - case (Record(rt1),Record(rt2)) => { - var strict_attrs := - map k | k in rt1.attrs.Keys && k in rt2.attrs.Keys && lubOpt(rt1.attrs[k].ty, rt2.attrs[k].ty, ValidationMode.Strict).Ok? :: - lubAttrType(rt1.attrs[k], rt2.attrs[k], ValidationMode.Strict); - assert strict_attrs == lubRecordType(rt1, rt2, ValidationMode.Strict).value.attrs; - - forall k | k in strict_attrs.Keys - ensures strict_attrs[k].ty.isStrictType() - { - StrictTypeLub(rt1.attrs[k].ty, rt2.attrs[k].ty); - } - } - case (Extension(n1),Extension(n2)) => - } - } -} diff --git a/cedar-dafny/validation/thm/base.dfy b/cedar-dafny/validation/thm/base.dfy deleted file mode 100644 index ce2a2dfeb..000000000 --- a/cedar-dafny/validation/thm/base.dfy +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" - -// This module contains the basic definitions used to state type soundness. -module validation.thm.base { - import opened def.base - import opened def.core - import opened def.engine - import opened types - import opened subtyping - import opened typechecker - - function Evaluate(e: Expr, r: Request, s: EntityStore): base.Result - { - Evaluator(r, s).interpret(e) - } - - const unspecifiedPrincipalEuid := EntityUID.EntityUID(EntityType.UNSPECIFIED, "principal") - const unspecifiedResourceEuid := EntityUID.EntityUID(EntityType.UNSPECIFIED, "resource") - - ghost predicate InstanceOfRequestType(r: Request, reqty: RequestType) { - match reqty.principal { - case None => r.principal == unspecifiedPrincipalEuid - case Some(pt) => InstanceOfEntityType(r.principal, pt) - } && - r.action == reqty.action && - match reqty.resource { - case None => r.resource == unspecifiedResourceEuid - case Some(rt) => InstanceOfEntityType(r.resource, rt) - } && - InstanceOfRecordType(r.context, reqty.context) - } - - ghost predicate InstanceOfEntityType(e: EntityUID, ety: EntityType) { - ety == e.ty - } - - // Note that this is stronger than the alternative - // `InstanceOfType(Value.Record(r), Type.Record(rt))` - // when `rt` has an open attributes record because it always enforces that the - // record type `rt` exactly describes the fields in `r` - ghost predicate InstanceOfRecordType(r: Record, rt: RecordType) { - // all attributes are declared and well typed - (forall k | k in r :: k in rt.attrs && InstanceOfType(r[k], rt.attrs[k].ty)) && - // required attributes are present - (forall k | k in rt.attrs && rt.attrs[k].isRequired :: k in r) - } - - ghost predicate InstanceOfEntityTypeStore(s: EntityStore, ets: EntityTypeStore) - { - forall e | e in s.entities.Keys :: - var ety := e.ty; - var edata := s.entities[e]; - // The EntityStore cannot contain unspecified entities. In particular, - // they cannot have ancestors, so they cannot be `in` other entities. - ety != EntityType.UNSPECIFIED && - ety in ets.types && - InstanceOfRecordType(edata.attrs, ets.types[ety]) && - forall p | p in edata.ancestors :: - p.ty in ets.descendants && ety in ets.descendants[p.ty] - } - - ghost predicate InstanceOfActionStore(s: EntityStore, acts: ActionStore) - { - forall e | e in s.entities.Keys && isAction(e.ty) :: - var edata := s.entities[e]; - forall p | p in edata.ancestors :: - p in acts.descendants && e in acts.descendants[p] - } - - function typeOfPrim(p: Primitive): Type { - match p { - case Bool(true) => Type.Bool(True) - case Bool(false) => Type.Bool(False) - case Int(_) => Type.Int - case String(_) => Type.String - case EntityUID(u) => Type.Entity(EntityLUB({u.ty})) - } - } - - ghost predicate InstanceOfBoolType(b: bool, bt: BoolType) { - match (b,bt) { - case (true,True) => true - case (false,False) => true - case (_,AnyBool) => true - case _ => false - } - } - - ghost predicate InstanceOfEntityLUB(e: EntityUID, ty: EntityLUB) { - match ty { - case AnyEntity => true - case EntityLUB(lub) => exists et | et in lub :: InstanceOfEntityType(e, et) - } - } - - ghost predicate InstanceOfType(v: Value, ty: Type) - decreases ty - { - match (v,ty) { - case (Primitive(Bool(b)),Bool(bt)) => InstanceOfBoolType(b,bt) - case (Primitive(Int(_)),Int) => true - case (Primitive(String(_)),String) => true - case (Primitive(EntityUID(e)),Entity(lub)) => InstanceOfEntityLUB(e,lub) - case (Set(s),Set(ty1)) => - forall v1 | v1 in s :: InstanceOfType(v1,ty1) - case (Record(r),Record(rt)) => - (!rt.isOpen() ==> (forall k | k in r :: k in rt.attrs)) && - // if an attribute is present, then it has the expected type - (forall k | k in rt.attrs && k in r :: InstanceOfType(r[k],rt.attrs[k].ty)) && - // required attributes are present - (forall k | k in rt.attrs && rt.attrs[k].isRequired :: k in r) - case (Extension(Decimal(_)),_) => ty == Type.Extension(Name.fromStr("decimal")) - case (Extension(IPAddr(_)),_) => ty == Type.Extension(Name.fromStr("ipaddr")) - case _ => false - } - } -} diff --git a/cedar-dafny/validation/thm/model.dfy b/cedar-dafny/validation/thm/model.dfy deleted file mode 100644 index e06ab7d28..000000000 --- a/cedar-dafny/validation/thm/model.dfy +++ /dev/null @@ -1,1212 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" -include "../../thm/eval/basic.dfy" -include "base.dfy" - -// This module contains an abstract model for the Cedar evaluator semantics. -module validation.thm.model { - import opened def - import opened def.core - import opened def.engine - import opened def.util - import opened eval.basic - import opened types - import opened subtyping - import opened base - import opened ext - - // ----- Semantic model of Cedar ----- // - - // The semantic model construction can be thought of as a way of axiomatizing - // the behavior of the evaluator that's necessary to prove soundness. When we - // prove soundness, hiding these properties behind the axiomatic interface - // of a trait improves the performance of the Dafny verifier significantly. - - ghost predicate IsTrue (r: Request, s: EntityStore, e: Expr) { - IsSafe(r,s,e,Type.Bool(True)) - } - - ghost predicate IsFalse (r: Request, s: EntityStore, e: Expr) { - IsSafe(r,s,e,Type.Bool(False)) - } - - ghost predicate GetAttrSafe (r: Request, s: EntityStore, e: Expr, k: Attr) { - IsTrue(r,s,HasAttr(e,k)) - } - - ghost predicate IsTrueStrong (r: Request, s: EntityStore, e: Expr) { - IsSafeStrong(r,s,e,Type.Bool(True)) - } - - ghost predicate IsFalseStrong (r: Request, s: EntityStore, e: Expr) { - IsSafeStrong(r,s,e,Type.Bool(False)) - } - - ghost predicate SemanticSubty(t1: Type, t2: Type) { - forall v | InstanceOfType(v,t1) :: InstanceOfType(v,t2) - } - - ghost predicate SemanticUB(t1: Type, t2: Type, ub: Type) { - SemanticSubty(t1,ub) && SemanticSubty(t2,ub) - } - - lemma SemSubtyTransportVal(t: Type, t': Type, v: Value) - requires SemanticSubty(t,t') - requires InstanceOfType(v,t) - ensures InstanceOfType(v,t') - {} - - ghost predicate ExistingEntityInLub(s: EntityStore, ev: EntityUID, lub: EntityLUB) { - InstanceOfType(Value.Primitive(Primitive.EntityUID(ev)),Type.Entity(lub)) && ev in s.entities - } - - ghost predicate EntityProjStoreCondition(s: EntityStore, l: Attr, lub: EntityLUB, t': Type, isRequired: bool) { - forall ev: EntityUID | ExistingEntityInLub(s, ev, lub) :: - (isRequired ==> l in s.entities[ev].attrs) && - (l in s.entities[ev].attrs ==> InstanceOfType(s.entities[ev].attrs[l],t')) - } - - // Duplicate Evaluator.EntityInEntity here so that SemanticModel and - // soundness.dfy don't have to depend on engine.dfy. - ghost predicate EntityInEntity(s: EntityStore, u1: EntityUID, u2: EntityUID) { - u1 == u2 || (s.getEntityAttrs(u1).Ok? && s.entityIn(u1, u2)) - } - - // An expression is safe if it evaluates to a value of the expected type - // or produces an error of type EntityDoesNotExist or ExtensionError. - // - // The validator cannot protect against errors where an entity literal is - // not defined in the entity store or extension errors, but it can protect - // against all other types of errors, namely: AttrDoesNotExist, TypeError, - // ArityMismatchError, NoSuchFunctionError - opaque ghost predicate IsSafe(r: Request, s: EntityStore, e: Expr, t: Type) { - Evaluate(e,r,s) == base.Err(base.EntityDoesNotExist) || - Evaluate(e,r,s) == base.Err(base.ExtensionError) || - (Evaluate(e,r,s).Ok? && InstanceOfType(Evaluate(e,r,s).value,t)) - } - - lemma IsSafeSemanticsOk(r: Request, s: EntityStore, e: Expr, t: Type) - requires Evaluate(e,r,s).Ok? && InstanceOfType(Evaluate(e,r,s).value,t) - ensures IsSafe(r, s, e, t) - { - reveal IsSafe(); - } - - lemma IsSafeSemanticsErr(r: Request, s: EntityStore, e: Expr, t: Type) - requires Evaluate(e, r, s) == base.Err(base.EntityDoesNotExist) || Evaluate(e,r,s) == base.Err(base.ExtensionError) - ensures IsSafe(r, s, e, t) - { - reveal IsSafe(); - } - - lemma IsSafeSemanticsOkRev(r: Request, s: EntityStore, e: Expr, t: Type) - requires IsSafe(r, s, e, t) - requires Evaluate(e, r, s).Ok? - ensures InstanceOfType(Evaluate(e,r,s).value,t) - { - reveal IsSafe(); - } - - lemma ExtensionFunSafeEnsuresSafe(r: Request, s: EntityStore, name: base.Name, es: seq, args: seq) - requires name in extFunTypes - requires |es| == |args| - requires Evaluator(r, s).interpretList(es).Ok? && Evaluator(r, s).interpretList(es).value == args - requires ExtensionFunSafeEnsures(name, args) - ensures IsSafe(r, s, Call(name, es), extFunTypes[name].ret) - { - var eft := extFunTypes[name]; - var res := extFuns[name].fun(args); - assert res == base.Err(base.ExtensionError) || (res.Ok? && InstanceOfType(res.value, eft.ret)); - var E := Evaluator(r, s); - assert E.interpretList(es).Ok?; - CallWithOkArgs(name, es, E); - if res == base.Err(base.ExtensionError) { - IsSafeSemanticsErr(r, s, Call(name, es), extFunTypes[name].ret); - } else { - IsSafeSemanticsOk(r, s, Call(name, es), extFunTypes[name].ret); - } - } - - lemma IsSafeSemanticsErrRev(r: Request, s: EntityStore, e: Expr, t: Type) - requires IsSafe(r, s, e, t) - requires Evaluate(e, r, s).Err? - ensures Evaluate(e, r, s) == base.Err(base.EntityDoesNotExist) || Evaluate(e,r,s) == base.Err(base.ExtensionError) - { - reveal IsSafe(); - } - - opaque ghost predicate IsSafeStrong (r: Request, s: EntityStore, e: Expr, t: Type) { - IsSafe(r,s,e,t) && Evaluate(e,r,s).Ok? - } - - lemma IsTrueStrongImpliesIsTrue(r: Request, s: EntityStore, e: Expr) - requires IsTrueStrong(r,s,e) - ensures IsTrue(r,s,e) - { - reveal IsSafeStrong(); - } - - lemma IsTrueImpliesIsTrueStrong(r: Request, s: EntityStore, e: Expr, t: Type) - requires IsSafeStrong(r,s,e,t) - requires IsTrue(r,s,e) - ensures IsTrueStrong(r,s,e) - { - reveal IsSafeStrong(); - } - - lemma NotTrueImpliesFalse(r: Request, s: EntityStore, e: Expr, bt: BoolType) - requires IsSafe(r,s,e,Type.Bool(bt)) - requires !IsTrue(r,s,e) - ensures IsFalse(r,s,e) - { - reveal IsSafe(); - } - - lemma NotSafeImpliesNotSafeStrong(r: Request, s: EntityStore, e: Expr, t: Type) - requires !IsSafe(r,s,e,t) - ensures !IsSafeStrong(r,s,e,t) - { - reveal IsSafeStrong(); - reveal IsSafe(); - } - - lemma FalseImpliesNotTrueStrong(r: Request, s: EntityStore, e: Expr) - requires IsFalse(r,s,e) - ensures !IsTrueStrong(r,s,e) - { - reveal IsSafeStrong(); - reveal IsSafe(); - } - - lemma SubtyCompat(t: Type, t': Type) - requires subty(t,t',ValidationMode.Permissive) - ensures SemanticSubty(t,t') - { - assert subty(t,t',ValidationMode.Permissive); - assert SemanticSubty(t,t') by { - forall v: Value | InstanceOfType(v,t) - ensures InstanceOfType(v,t') - { - SubtyCompatPointwise(t,t',v); - } - } - } - - lemma SubtyCompatMatchPointwise(t: Type, t': Type, v: Value) - requires subty(t,t',ValidationMode.Permissive) - requires InstanceOfType(v,t) - ensures InstanceOfType(v,t') - decreases t - { - match (t,t',v) { - case (Never,_,_) => - case (String,String,_) => - case (Int,Int,_) => - case (Bool(b1),Bool(b2),_) => - case (Set(t1),Set(t2),Set(s)) => - assert forall v' | v' in s :: InstanceOfType(v',t2) by { - forall v': Value | v' in s - ensures InstanceOfType(v',t2) - { - assert InstanceOfType(v',t1); - SubtyCompatMatchPointwise(t1,t2,v'); - } - } - case (Record(rt1),Record(rt2),Record(rv)) => - assert forall k | k in rt2.attrs && k in rv :: InstanceOfType(rv[k],rt2.attrs[k].ty) by { - forall k: Attr | k in rt2.attrs && k in rv - ensures InstanceOfType(rv[k],rt2.attrs[k].ty) - { - assert InstanceOfType(rv[k],rt1.attrs[k].ty); - assert subtyAttrType(rt1.attrs[k],rt2.attrs[k],ValidationMode.Permissive); - SubtyCompatMatchPointwise(rt1.attrs[k].ty,rt2.attrs[k].ty,rv[k]); - } - } - assert forall k | k in rt2.attrs && rt2.attrs[k].isRequired :: k in rv by { - forall k | k in rt2.attrs && rt2.attrs[k].isRequired - ensures k in rv - { - assert subtyAttrType(rt1.attrs[k],rt2.attrs[k],ValidationMode.Permissive); - } - } - case (Entity(e1),Entity(e2),_) => - case (Extension(e1),Extension(e2),_) => - } - } - - lemma SubtyCompatPointwise(t: Type, t': Type, v: Value) - requires subty(t,t',ValidationMode.Permissive) - requires InstanceOfType(v,t) - ensures InstanceOfType(v,t') - { - SubtyCompatMatchPointwise(t,t',v); - } - - lemma SemSubtyTransport(r: Request, s: EntityStore, e: Expr, t: Type, t': Type) - requires SemanticSubty(t,t') - requires IsSafe(r,s,e,t) - ensures IsSafe(r,s,e,t') - { - reveal IsSafe(); - if (exists v :: Evaluate(e,r,s) == base.Ok(v) && InstanceOfType(v,t)) { - var v :| Evaluate(e,r,s) == base.Ok(v) && InstanceOfType(v,t); - assert InstanceOfType(v,t') by { - SemSubtyTransportVal(t,t',v); - } - } - } - - lemma PrincipalIsSafe(r: Request, s: EntityStore, t: Type) - requires InstanceOfType(Value.EntityUID(r.principal),t) - ensures IsSafe(r,s,Var(Principal),t) - { - reveal IsSafe(); - } - - lemma ActionIsSafe(r: Request, s: EntityStore, t: Type) - requires InstanceOfType(Value.EntityUID(r.action),t) - ensures IsSafe(r,s,Var(Action),t) - { - reveal IsSafe(); - } - - lemma ResourceIsSafe(r: Request, s: EntityStore, t: Type) - requires InstanceOfType(Value.EntityUID(r.resource),t) - ensures IsSafe(r,s,Var(Resource),t) - { - reveal IsSafe(); - } - - lemma ContextIsSafe(r: Request, s: EntityStore, t: Type) - requires InstanceOfType(Value.Record(r.context),t) - ensures IsSafe(r,s,Var(Context),t) - { - reveal IsSafe(); - } - - lemma PrimSafeLift(r: Request, s: EntityStore, p: Primitive, t: Type) - requires InstanceOfType(Value.Primitive(p),t) - ensures IsSafe(r,s,Expr.PrimitiveLit(p),t) - { - reveal IsSafe(); - } - - lemma PrimSafeAtInferredType(p: Primitive) - ensures InstanceOfType(Value.Primitive(p),typeOfPrim(p)) - {} - - lemma EqIsSafe(r: Request, s: EntityStore, e: Expr, e': Expr, t: Type, t': Type) - requires IsSafe(r,s,e,t) - requires IsSafe(r,s,e',t') - ensures IsSafe(r,s,BinaryApp(BinaryOp.Eq,e,e'),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma EqFalseIsSafe(r: Request, s: EntityStore, e: Expr, e': Expr, lub: EntityLUB, lub': EntityLUB) - requires IsSafe(r,s,e,Type.Entity(lub)) - requires IsSafe(r,s,e',Type.Entity(lub')) - requires lub.disjoint(lub') - ensures IsFalse(r,s,BinaryApp(BinaryOp.Eq,e,e')) - { - reveal IsSafe(); - } - - lemma EqEntitySameSafe(r: Request, s: EntityStore, E: EntityUID) - ensures IsTrue(r,s,Expr.BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(E)),PrimitiveLit(Primitive.EntityUID(E)))) - { - reveal IsSafe(); - var e := Expr.BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(E)),PrimitiveLit(Primitive.EntityUID(E))); - assert Evaluator(r,s).interpret(e) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } - - lemma EqEntityDiffSafe(r: Request, s: EntityStore, E: EntityUID, E': EntityUID) - requires E != E' - ensures IsFalse(r,s,Expr.BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(E)),PrimitiveLit(Primitive.EntityUID(E')))) - { - reveal IsSafe(); - var e := Expr.BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(E)),PrimitiveLit(Primitive.EntityUID(E'))); - assert Evaluator(r,s).interpret(e) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } - - lemma AndLShortSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsFalse(r,s,e) - ensures IsFalse(r,s,And(e,e')) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? { - assert Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } else { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e,r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e,r,s); - } - } - - lemma AndRShortSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsSafe(r,s,e,Type.Bool(AnyBool)) - requires IsFalse(r,s,e') - ensures IsFalse(r,s,And(e,e')) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluate(e',r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e,r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e',r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e',r,s); - } else { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } - } - } - } - - lemma AndLRetSafe(r: Request, s: EntityStore, e: Expr, e': Expr, t: Type) - requires IsSafe(r,s,e,t) - requires IsTrue(r,s,e') - requires SemanticSubty(t,Type.Bool(AnyBool)) - ensures IsSafe(r,s,And(e,e'),t) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluate(e',r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - var v :| Evaluate(e,r,s) == base.Ok(v) && InstanceOfType(v,t); - assert InstanceOfType(v,Type.Bool(AnyBool)) by { - SemSubtyTransportVal(t,Type.Bool(AnyBool),v); - } - var b :| v == Value.Primitive(Primitive.Bool(b)); - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(b))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e,r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e',r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e',r,s); - } else { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } - } - } - } - - lemma AndSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsSafe(r,s,e,Type.Bool(AnyBool)) - requires IsSafe(r,s,e',Type.Bool(AnyBool)) - ensures IsSafe(r,s,And(e,e'),Type.Bool(AnyBool)) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false).Ok?; - assert Evaluate(And(e,e'),r,s).Ok?; - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e,r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == Evaluate(e',r,s); - assert Evaluate(And(e,e'),r,s) == Evaluate(e',r,s); - } else { - assert Evaluator(r,s).interpretShortcircuit(And(e,e'),e,e',false) == base.Ok(Value.Primitive(Primitive.Bool(false))); - assert Evaluate(And(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - } - } - } - } - - lemma AndTrueStrong(r: Request, s: EntityStore, e1: Expr, e2: Expr) - requires IsTrue(r,s,e1) - requires IsTrueStrong(r,s,And(e1,e2)) - ensures IsTrueStrong(r,s,e2) - { - reveal IsSafeStrong(); - reveal IsSafe(); - assert Evaluator(r,s).interpretShortcircuit(And(e1,e2),e1,e2,false) == base.Ok(Value.Bool(true)); - } - - lemma AndError(r: Request, s: EntityStore, e1: Expr, e2: Expr, t: Type, tnew: Type) - requires IsSafe(r,s,e1,t) - requires !IsSafeStrong(r,s,e1,t) - ensures IsSafe(r,s,And(e1,e2),tnew) - ensures !IsSafeStrong(r,s,And(e1,e2),tnew) - { - reveal IsSafeStrong(); - reveal IsSafe(); - assert Evaluator(r,s).interpretShortcircuit(And(e1,e2),e1,e2,false).Err?; - } - - lemma OrLShortSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsTrue(r,s,e) - ensures IsTrue(r,s,Or(e,e')) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? { - assert Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e,r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e,r,s); - } - } - - lemma OrRShortSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsSafe(r,s,e,Type.Bool(AnyBool)) - requires IsTrue(r,s,e') - ensures IsTrue(r,s,Or(e,e')) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluate(e',r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e,r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e',r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e',r,s); - } - } - } - } - - lemma OrLRetSafe(r: Request, s: EntityStore, e: Expr, e': Expr, t: Type) - requires IsSafe(r,s,e,t) - requires IsFalse(r,s,e') - requires SemanticSubty(t,Type.Bool(AnyBool)) - ensures IsSafe(r,s,Or(e,e'),t) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluate(e',r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - var v :| Evaluate(e,r,s) == base.Ok(v) && InstanceOfType(v,t); - assert InstanceOfType(v,Type.Bool(AnyBool)) by { - SemSubtyTransportVal(t,Type.Bool(AnyBool),v); - } - var b :| v == Value.Primitive(Primitive.Bool(b)); - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(b))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - assert IsSafe(r,s,e,Type.Bool(AnyBool)) by { - SemSubtyTransport(r,s,e,t,Type.Bool(AnyBool)); - } - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e,r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e',r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e',r,s); - } - } - } - } - - lemma OrRRetSafe(r: Request, s: EntityStore, e: Expr, e': Expr, t: Type) - requires IsFalse(r,s,e) - requires IsSafe(r,s,e',t) - requires SemanticSubty(t,Type.Bool(AnyBool)) - ensures IsSafe(r,s,Or(e,e'),t) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(false))); - var v :| Evaluate(e',r,s) == base.Ok(v) && InstanceOfType(v,t); - assert InstanceOfType(v,Type.Bool(AnyBool)) by { - SemSubtyTransportVal(t,Type.Bool(AnyBool),v); - } - var b :| v == Value.Primitive(Primitive.Bool(b)); - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(b))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - assert IsSafe(r,s,e',Type.Bool(AnyBool)) by { - SemSubtyTransport(r,s,e',t,Type.Bool(AnyBool)); - } - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e,r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e',r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e',r,s); - } - } - } - } - - lemma OrSafe(r: Request, s: EntityStore, e: Expr, e': Expr) - requires IsSafe(r,s,e,Type.Bool(AnyBool)) - requires IsSafe(r,s,e',Type.Bool(AnyBool)) - ensures IsSafe(r,s,Or(e,e'),Type.Bool(AnyBool)) - { - reveal IsSafe(); - if Evaluate(e,r,s).Ok? && Evaluate(e',r,s).Ok? { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true).Ok?; - assert Evaluate(Or(e,e'),r,s).Ok?; - } else { - if Evaluate(e,r,s).Err? { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e,r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e,r,s); - } else { - assert Evaluate(e',r,s).Err?; - var b :| Evaluate(e,r,s) == base.Ok(Value.Primitive(Primitive.Bool(b))); - if b { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == base.Ok(Value.Primitive(Primitive.Bool(true))); - assert Evaluate(Or(e,e'),r,s) == base.Ok(Value.Primitive(Primitive.Bool(true))); - } else { - assert Evaluator(r,s).interpretShortcircuit(Or(e,e'),e,e',true) == Evaluate(e',r,s); - assert Evaluate(Or(e,e'),r,s) == Evaluate(e',r,s); - } - } - } - } - - lemma OrTrueStrong(r: Request, s: EntityStore, e1: Expr, e2: Expr) - requires IsTrueStrong(r,s,Or(e1,e2)) - ensures IsTrueStrong(r,s,e1) || IsTrueStrong(r,s,e2) - { - reveal IsSafeStrong(); - reveal IsSafe(); - assert Evaluator(r,s).interpretShortcircuit(Or(e1,e2),e1,e2,true) == base.Ok(Value.Bool(true)); - } - - lemma NotTrueSafe(r: Request, s: EntityStore, e: Expr) - requires IsTrue(r,s,e) - ensures IsFalse(r,s,UnaryApp(Not,e)) - { - reveal IsSafe(); - } - - lemma NotFalseSafe(r: Request, s: EntityStore, e: Expr) - requires IsFalse(r,s,e) - ensures IsTrue(r,s,UnaryApp(Not,e)) - { - reveal IsSafe(); - } - - lemma NotSafe(r: Request, s: EntityStore, e: Expr) - requires IsSafe(r,s,e,Type.Bool(AnyBool)) - ensures IsSafe(r,s,UnaryApp(Not,e),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma NegSafe(r: Request, s: EntityStore, e: Expr) - requires IsSafe(r,s,e,Type.Int) - ensures IsSafe(r,s,UnaryApp(Neg,e),Type.Int) - { - reveal IsSafe(); - } - - lemma MulBySafe(r: Request, s: EntityStore, e: Expr, i: int) - requires IsSafe(r,s,e,Type.Int) - ensures IsSafe(r,s,UnaryApp(MulBy(i),e),Type.Int) - { - reveal IsSafe(); - } - - lemma IteTrueSafe(r: Request, s: EntityStore, e: Expr, e1: Expr, e2: Expr, t: Type) - requires IsTrue(r,s,e) - requires IsSafe(r,s,e1,t) - ensures IsSafe(r,s,If(e,e1,e2),t) - { - reveal IsSafe(); - } - - lemma IteFalseSafe(r: Request, s: EntityStore, e: Expr, e1: Expr, e2: Expr, t: Type) - requires IsFalse(r,s,e) - requires IsSafe(r,s,e2,t) - ensures IsSafe(r,s,If(e,e1,e2),t) - { - reveal IsSafe(); - } - - lemma IteTrueStrongTrue(r: Request, s: EntityStore, e1: Expr, e2: Expr, e3: Expr) - requires IsTrue(r,s,e1) - requires IsTrueStrong(r,s,If(e1,e2,e3)) - ensures IsTrueStrong(r,s,e2) - { - reveal IsSafeStrong(); - reveal IsSafe(); - } - - lemma IteTrueStrongFalse(r: Request, s: EntityStore, e1: Expr, e2: Expr, e3: Expr) - requires IsFalse(r,s,e1) - requires IsTrueStrong(r,s,If(e1,e2,e3)) - ensures IsTrueStrong(r,s,e3) - { - reveal IsSafeStrong(); - reveal IsSafe(); - } - - lemma IteError(r: Request, s: EntityStore, e1: Expr, e2: Expr, e3: Expr, t: Type, tnew: Type) - requires IsSafe(r,s,e1,t) - requires !IsSafeStrong(r,s,e1,t) - ensures IsSafe(r,s,If(e1,e2,e3),tnew) - ensures !IsSafeStrong(r,s,If(e1,e2,e3),tnew) - { - reveal IsSafeStrong(); - reveal IsSafe(); - } - - lemma ContainsSetSafe(r: Request, s: EntityStore, e: Expr, e': Expr, t1: Type, t2: Type) - requires IsSafe(r,s,e,Type.Set(t1)) - requires IsSafe(r,s,e',t2) - ensures IsSafe(r,s,BinaryApp(Contains,e,e'),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma LikeSafe(r: Request, s: EntityStore, e: Expr, p: Pattern) - requires IsSafe(r,s,e,Type.String) - ensures IsSafe(r,s,UnaryApp(Like(p),e),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma IsOpSafe(r: Request, s: EntityStore, e: Expr, ety: EntityType) - requires IsSafe(r,s,e,Type.Entity(AnyEntity)) - ensures IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma IsOpSafeTrue(r: Request, s: EntityStore, e: Expr, ety: EntityType, lub: EntityLUB) - requires IsSafe(r,s,e,Type.Entity(lub)) - requires lub.EntityLUB? - requires ety in lub.tys && |lub.tys| == 1 - ensures IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),Type.Bool(True)) - { - assert lub.tys == {ety} by { - def.util.EntityTypeLeqIsTotalOrder(); - var _ := def.util.SetToSortedSeq(lub.tys,def.util.EntityTypeLeq); - } - reveal IsSafe(); - } - - lemma IsOpSafeFalse(r: Request, s: EntityStore, e: Expr, ety: EntityType, lub: EntityLUB) - requires IsSafe(r,s,e,Type.Entity(lub)) - requires lub.EntityLUB? - requires ety !in lub.tys - ensures IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),Type.Bool(False)) - { - reveal IsSafe(); - } - - lemma SetConstrSafe(r: Request, s: EntityStore, es: seq, t: Type) - requires forall i | 0 <= i < |es| :: IsSafe(r,s,es[i],t) - ensures IsSafe(r,s,Expr.Set(es),Type.Set(t)) - { - reveal IsSafe(); - var E := Evaluator(r,s); - SetSemantics(es,E); - if(forall i | 0 <= i < |es| :: exists v :: Evaluate(es[i],r,s) == base.Ok(v) && InstanceOfType(v,t)){ - assert forall e | e in es :: Evaluate(e,r,s).Ok?; - assert Evaluate(Expr.Set(es),r,s).Ok?; - var vs :| E.interpretSet(es) == base.Ok(vs); - assert InstanceOfType(Value.Set(vs),Type.Set(t)) by { - forall v | v in vs ensures InstanceOfType(v,t) {} - } - } - } - - lemma ContainsAnyAllSafe(r: Request, s: EntityStore, op: BinaryOp, e1: Expr, e2: Expr, t1: Type, t2: Type) - requires op == ContainsAll || op == ContainsAny - requires IsSafe(r,s,e1,Type.Set(t1)) - requires IsSafe(r,s,e2,Type.Set(t2)) - ensures IsSafe(r,s,BinaryApp(op,e1,e2), Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma IneqSafe(r: Request, s: EntityStore, op: BinaryOp, e1: Expr, e2: Expr) - requires op == Less || op == BinaryOp.LessEq - requires IsSafe(r,s,e1,Type.Int) - requires IsSafe(r,s,e2,Type.Int) - ensures IsSafe(r,s,BinaryApp(op,e1,e2),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma ArithSafe(r: Request, s: EntityStore, op: BinaryOp, e1: Expr, e2: Expr) - requires op == Add || op == Sub - requires IsSafe(r,s,e1,Type.Int) - requires IsSafe(r,s,e2,Type.Int) - ensures IsSafe(r,s,BinaryApp(op,e1,e2),Type.Int) - { - reveal IsSafe(); - } - - // We prove that every extension function is safe with respect to the - // ExtFunType assigned to it by the validator. In particular, we show that - // the argument types of the ExtFunType match the argument type checks - // actually performed by the function at runtime, the return value has the - // correct type on success, and the function doesn't raise any error other - // than ExtensionError. - // - // Writing one lemma per extension function would be a lot of boilerplate. - // Instead, we put them in groups that have the same ExtFunType. - - ghost predicate ExtensionFunSafeRequires(name: base.Name, args: seq) - requires name in extFunTypes - { - var eft := extFunTypes[name]; - |args| == |eft.args| && - forall i | 0 <= i < |args| :: InstanceOfType(args[i], eft.args[i]) - } - - ghost predicate ExtensionFunSafeEnsures(name: base.Name, args: seq) - requires name in extFunTypes - { - var eft := extFunTypes[name]; - var res := extFuns[name].fun(args); - res == base.Err(base.ExtensionError) || (res.Ok? && InstanceOfType(res.value, eft.ret)) - } - - ghost predicate IsDecimalConstructorName(name: base.Name) { - name == base.Name.fromStr("decimal") - } - - lemma DecimalConstructorSafe(name: base.Name, args: seq) - requires IsDecimalConstructorName(name) - requires ExtensionFunSafeRequires(name, args) - ensures ExtensionFunSafeEnsures(name, args) - {} - - ghost predicate IsDecimalComparisonName(name: base.Name) { - name == base.Name.fromStr("lessThan") || - name == base.Name.fromStr("lessThanOrEqual") || - name == base.Name.fromStr("greaterThan") || - name == base.Name.fromStr("greaterThanOrEqual") - } - - lemma DecimalComparisonSafe(name: base.Name, args: seq) - requires IsDecimalComparisonName(name) - requires ExtensionFunSafeRequires(name, args) - ensures ExtensionFunSafeEnsures(name, args) - { - assert |args| == 2 && args[0].Extension? && args[0].ex.Decimal? && args[1].Extension? && args[1].ex.Decimal?; - assert extFunTypes[name].ret == Type.Bool(AnyBool); - var res := extFuns[name].fun(args); - assert res.Ok? && InstanceOfType(res.value, Type.Bool(AnyBool)) by { - match res.value { - case Primitive(Bool(b)) => - } - } - } - - ghost predicate IsIpConstructorName(name: base.Name) { - name == base.Name.fromStr("ip") - } - - lemma IpConstructorSafe(name: base.Name, args: seq) - requires IsIpConstructorName(name) - requires ExtensionFunSafeRequires(name, args) - ensures ExtensionFunSafeEnsures(name, args) - {} - - ghost predicate IsIpUnaryName(name: base.Name) { - name == base.Name.fromStr("isIpv4") || - name == base.Name.fromStr("isIpv6") || - name == base.Name.fromStr("isLoopback") || - name == base.Name.fromStr("isMulticast") - } - - lemma IpUnarySafe(name: base.Name, args: seq) - requires IsIpUnaryName(name) - requires ExtensionFunSafeRequires(name, args) - ensures ExtensionFunSafeEnsures(name, args) - { - assert |args| == 1 && args[0].Extension? && args[0].ex.IPAddr?; - assert extFunTypes[name].ret == Type.Bool(AnyBool); - var res := extFuns[name].fun(args); - assert res.Ok? && InstanceOfType(res.value, Type.Bool(AnyBool)) by { - match res.value { - case Primitive(Bool(b)) => - } - } - } - - ghost predicate IsIpBinaryName(name: base.Name) { - name == base.Name.fromStr("isInRange") - } - - lemma IpBinarySafe(name: base.Name, args: seq) - requires IsIpBinaryName(name) - requires ExtensionFunSafeRequires(name, args) - ensures ExtensionFunSafeEnsures(name, args) - {} - - lemma CallSafe(r: Request, s: EntityStore, name: base.Name, args: seq) - requires name in extFunTypes - requires |args| == |extFunTypes[name].args| - requires forall i | 0 <= i < |args| :: IsSafe(r,s,args[i],extFunTypes[name].args[i]) - ensures IsSafe(r,s,Call(name,args),extFunTypes[name].ret) - { - var eft := extFunTypes[name]; - var E := Evaluator(r, s); - if (forall i | 0 <= i < |args| :: E.interpret(args[i]).Ok?) { - ListSemanticsOk(args, E); - - var argVals := E.interpretList(args).value; - var res := E.applyExtFun(name, argVals); - assert forall i:nat | i < |args| :: InstanceOfType(argVals[i], eft.args[i]) by { - forall i: nat | i < |args| ensures InstanceOfType(argVals[i], eft.args[i]) { - assert E.interpret(args[i]) == base.Ok(argVals[i]); - IsSafeSemanticsOkRev(r, s, args[i], eft.args[i]); - } - } - if IsDecimalConstructorName(name) { - DecimalConstructorSafe(name, argVals); - ExtensionFunSafeEnsuresSafe(r, s, name, args, argVals); - } else if IsDecimalComparisonName(name) { - DecimalComparisonSafe(name, argVals); - ExtensionFunSafeEnsuresSafe(r, s, name, args, argVals); - } else if IsIpConstructorName(name) { - IpConstructorSafe(name, argVals); - ExtensionFunSafeEnsuresSafe(r, s, name, args, argVals); - } else if IsIpUnaryName(name) { - IpUnarySafe(name, argVals); - ExtensionFunSafeEnsuresSafe(r, s, name, args, argVals); - } else if IsIpBinaryName(name) { - IpBinarySafe(name, argVals); - ExtensionFunSafeEnsuresSafe(r, s, name, args, argVals); - } - - } else { - var i := ListSemanticsErrRet(args, E); - IsSafeSemanticsErrRev(r, s, args[i], extFunTypes[name].args[i]); - CallWithErrArgs(name, args, E); - IsSafeSemanticsErr(r,s,Call(name,args),extFunTypes[name].ret); - } - } - - ghost predicate ExistsSafeType(r: Request, s: EntityStore, e: Expr) { - exists t :: IsSafe(r,s,e,t) - } - - lemma RecordSafe(r: Request, s: EntityStore, es: seq<(Attr,Expr)>, rt: RecordType) - // every entry has some type - requires forall ae :: ae in es ==> ExistsSafeType(r,s,ae.1) - // and the last instance of every required key is safe at the correct type. - requires forall k :: k in rt.attrs ==> KeyExists(k,es) && IsSafe(r,s,LastOfKey(k,es),rt.attrs[k].ty) - requires !rt.isOpen() ==> forall ae :: ae in es ==> ae.0 in rt.attrs.Keys - ensures IsSafe(r,s,Expr.Record(es),Type.Record(rt)) - { - var E := Evaluator(r,s); - var res := E.interpretRecord(es); - match res { - case Ok(rv) => - assert E.interpret(Expr.Record(es)) == base.Ok(Value.Record(rv)); - RecordSemanticsOk(es, E); - forall k | k in rt.attrs - ensures InstanceOfType(rv[k],rt.attrs[k].ty) - { - assert KeyExists(k,es) && IsSafe(r,s,LastOfKey(k,es),rt.attrs[k].ty); - IsSafeSemanticsOkRev(r, s, LastOfKey(k,es),rt.attrs[k].ty); - } - assert InstanceOfType(Value.Record(rv),Type.Record(rt)); - IsSafeSemanticsOk(r, s, Expr.Record(es), Type.Record(rt)); - case Err(err) => - var i := RecordSemanticsErrRet(es, E); - var e := es[i].1; - var t :| IsSafe(r,s,e,t); - IsSafeSemanticsErrRev(r, s, e, t); - RecordSemanticsErr(es, E); - IsSafeSemanticsErr(r, s, Expr.Record(es), Type.Record(rt)); - } - } - - lemma ObjectProjSafeRequired(r: Request, s: EntityStore, e: Expr, t: Type, l: Attr, t': AttrType) - requires IsSafe(r,s,e,t) - requires t'.isRequired - requires SemanticSubty(t,Type.Record(RecordType(map[l := t'], OpenAttributes))) - ensures IsSafe(r,s,GetAttr(e,l),t'.ty) - { - reveal IsSafe(); - } - - lemma ObjectProjSafeGetAttrSafe(r: Request, s: EntityStore, e: Expr, t: Type, l: Attr, t': AttrType) - requires IsSafe(r,s,e,t) - requires SemanticSubty(t,Type.Record(RecordType(map[l := t'], OpenAttributes))) - requires GetAttrSafe(r,s,e,l) - ensures IsSafe(r,s,GetAttr(e,l),t'.ty) - { - reveal IsSafe(); - } - - lemma EntityProjSafe(r: Request, s: EntityStore, e: Expr, l: Attr, lub: EntityLUB, t': Type, isRequired: bool) - requires IsSafe(r,s,e,Type.Entity(lub)) - requires EntityProjStoreCondition(s, l, lub, t', isRequired) - requires isRequired || GetAttrSafe(r,s,e,l) - ensures IsSafe(r,s,GetAttr(e,l),t') - { - reveal IsSafe(); - } - - lemma RecordHasRequiredTrueSafe(r: Request, s: EntityStore, e: Expr, l: Attr, t: AttrType) - requires IsSafe(r,s,e,Type.Record(RecordType(map[l := t], OpenAttributes))) - requires t.isRequired - ensures IsTrue(r,s,HasAttr(e,l)) - { - reveal IsSafe(); - } - - lemma RecordHasOpenRecSafe(r: Request, s: EntityStore, e: Expr, l: Attr) - requires IsSafe(r,s,e,Type.Record(RecordType(map[], OpenAttributes))) - ensures IsSafe(r,s,HasAttr(e,l),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma RecordHasClosedRecFalseSafe(r: Request, s: EntityStore, e: Expr, l: Attr, rt: RecordType) - requires IsSafe(r,s,e,Type.Record(rt)) - requires l !in rt.attrs.Keys - requires !rt.isOpen() - ensures IsFalse(r,s,HasAttr(e,l)) - { - reveal IsSafe(); - var evaluator := Evaluator(r,s); - var v := evaluator.interpret(e); - if v.Ok? { - var rv :- assert Value.asRecord(v.value); - assert l !in rv.Keys; - } - } - - lemma EntityHasImpossibleFalseSafe(r: Request, s: EntityStore, e: Expr, l: Attr, lub: EntityLUB) - requires IsSafe(r,s,e,Type.Entity(lub)) - requires forall ev: EntityUID | ExistingEntityInLub(s, ev, lub) :: - l !in s.entities[ev].attrs - ensures IsFalse(r,s,HasAttr(e,l)) - { - reveal IsSafe(); - } - - lemma EntityHasOpenSafe(r: Request, s: EntityStore, e: Expr, l: Attr) - requires IsSafe(r,s,e,Type.Entity(AnyEntity)) - ensures IsSafe(r,s,HasAttr(e,l),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma InSingleSafe(r: Request, s: EntityStore, e1: Expr, e2: Expr) - requires IsSafe(r,s,e1,Type.Entity(AnyEntity)) - requires IsSafe(r,s,e2,Type.Entity(AnyEntity)) - ensures IsSafe(r,s,BinaryApp(BinaryOp.In,e1,e2),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma EntityInEntityMatchesEngine(r: Request, s: EntityStore, u1: EntityUID, u2: EntityUID) - ensures EntityInEntity(s,u1,u2) == Evaluator(r,s).entityInEntity(u1,u2) - {} - - lemma InSingleFalseLiterals(r: Request, s: EntityStore, u1: EntityUID, u2: EntityUID) - requires !EntityInEntity(s,u1,u2) - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2)))) - { - reveal IsSafe(); - var evaluator := Evaluator(r,s); - calc == { - evaluator.interpret(BinaryApp(BinaryOp.In,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2)))); - evaluator.applyBinaryOp(BinaryOp.In,Value.EntityUID(u1),Value.EntityUID(u2)); - base.Ok(Value.Bool(evaluator.entityInEntity(u1, u2))); - } - } - - lemma InSingleFalseEntityTypeAndLiteral(r: Request, s: EntityStore, e1: Expr, et1: EntityType, u2: EntityUID) - requires IsSafe(r,s,e1,Type.Entity(EntityLUB({et1}))) - requires forall u1: EntityUID | u1.ty == et1 :: !EntityInEntity(s,u1,u2) - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,PrimitiveLit(Primitive.EntityUID(u2)))) - { - reveal IsSafe(); - var evaluator := Evaluator(r,s); - var r1 := evaluator.interpret(e1); - if r1.Ok? { - var u1 :- assert Value.asEntity(r1.value); - assert u1.ty == et1; - assert !EntityInEntity(s,u1,u2); - assert evaluator.interpret(BinaryApp(BinaryOp.In,e1,PrimitiveLit(Primitive.EntityUID(u2)))) == base.Ok(Value.FALSE); - } - } - - lemma InSingleFalseTypes(r: Request, s: EntityStore, e1: Expr, e2: Expr, t1: Type, t2: Type) - requires subty(t1,Type.Entity(AnyEntity),ValidationMode.Permissive) - requires subty(t2,Type.Entity(AnyEntity),ValidationMode.Permissive) - requires IsSafe(r,s,e1,t1) - requires IsSafe(r,s,e2,t2) - requires forall u1, u2: EntityUID | - InstanceOfType(Value.EntityUID(u1), t1) && InstanceOfType(Value.EntityUID(u2), t2) :: - !EntityInEntity(s,u1,u2) - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,e2)) - { - var E := Evaluator(r,s); - var r1 := E.interpret(e1); - var r2 := E.interpret(e2); - var res := E.interpret(BinaryApp(BinaryOp.In,e1,e2)); - - if r1.Err? { - BinaryAppSemanticsErrLeft(e1, e2, BinaryOp.In, E); - assert res == r1; - IsSafeSemanticsErrRev(r, s, e1, t1); - IsSafeSemanticsErr(r, s, BinaryApp(BinaryOp.In, e1, e2), Type.Bool(False)); - } else if r2.Err? { - BinaryAppSemanticsErrRight(e1, e2, BinaryOp.In, E); - assert res == r2; - IsSafeSemanticsErrRev(r, s, e2, t2); - IsSafeSemanticsErr(r, s, BinaryApp(BinaryOp.In, e1, e2), Type.Bool(False)); - } else { - IsSafeSemanticsOkRev(r, s, e1, t1); - IsSafeSemanticsOkRev(r, s, e2, t2); - assert InstanceOfType(r1.value,t1); - assert InstanceOfType(r2.value,t2); - assert r1.value.Primitive? && r1.value.primitive.EntityUID?; - assert r2.value.Primitive? && r2.value.primitive.EntityUID?; - var u1 := r1.value.primitive.uid; - var u2 := r2.value.primitive.uid; - assert !EntityInEntity(s,u1,u2); - BinaryAppSemanticsOk(e1, e2, BinaryOp.In, E); - assert res == E.applyBinaryOp(BinaryOp.In,r1.value,r2.value); - assert res.value == Value.FALSE; - assert InstanceOfType(res.value, Type.Bool(False)); - IsSafeSemanticsOk(r, s, BinaryApp(BinaryOp.In,e1,e2), Type.Bool(False)); - } - } - - lemma InSetSafe(r: Request, s: EntityStore, e1: Expr, e2: Expr) - requires IsSafe(r,s,e1,Type.Entity(AnyEntity)) - requires IsSafe(r,s,e2,Type.Set(Type.Entity(AnyEntity))) - ensures IsSafe(r,s,BinaryApp(BinaryOp.In,e1,e2),Type.Bool(AnyBool)) - { - reveal IsSafe(); - } - - lemma InSetFalseIfAllFalse(r: Request, s: EntityStore, e1: Expr, e2s: seq) - requires IsSafe(r,s,e1,Type.Entity(AnyEntity)) - requires forall i | 0 <= i < |e2s| :: - IsSafe(r,s,e2s[i],Type.Entity(AnyEntity)) && - IsFalse(r,s,BinaryApp(BinaryOp.In,e1,e2s[i])) - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,Expr.Set(e2s))) - { - var E := Evaluator(r,s); - var res := E.interpret(BinaryApp(BinaryOp.In,e1,Expr.Set(e2s))); - var r1 := E.interpret(e1); - var r2 := E.interpret(Expr.Set(e2s)); - assert r2.Ok? ==> E.interpretSet(e2s).Ok? by { - assert r2 == E.interpretSet(e2s).Map(v => core.Value.Set(v)); - } - match (r1, r2) { - case (Ok(v1), Ok(v2)) => - IsSafeSemanticsOkRev(r, s, e1, Type.Entity(AnyEntity)); - assert core.Value.asEntity(v1).Ok?; - SetSemanticsOk(e2s, E); - forall i: nat | i < |e2s| - ensures E.interpret(e2s[i]).Ok? - ensures core.Value.asEntity(E.interpret(e2s[i]).value).Ok? - ensures E.interpret(BinaryApp(BinaryOp.In, e1, e2s[i])) == base.Ok(Value.Bool(false)) { - assert E.interpret(e2s[i]).Ok?; - IsSafeSemanticsOkRev(r, s, e2s[i], Type.Entity(AnyEntity)); - assert core.Value.asEntity(E.interpret(e2s[i]).value).Ok?; - IsSafeSemanticsOkRev(r, s, BinaryApp(BinaryOp.In, e1, e2s[i]), Type.Bool(False)); - assert E.interpret(BinaryApp(BinaryOp.In, e1, e2s[i])) == base.Ok(Value.Bool(false)); - } - InSetSemantics(e1, e2s, E); - IsSafeSemanticsOk(r, s, BinaryApp(BinaryOp.In,e1,Expr.Set(e2s)), Type.Bool(False)); - case (Err(err1), _) => - IsSafeSemanticsErrRev(r, s, e1, Type.Entity(AnyEntity)); - IsSafeSemanticsErr(r, s, BinaryApp(BinaryOp.In,e1,Expr.Set(e2s)), Type.Bool(False)); - case (_, Err(err2)) => - // Probably we're gonna pay for my laziness here in the future. - reveal IsSafe(); - SetSemantics(e2s, E); - } - } - - lemma InSetFalseTypes(r: Request, s: EntityStore, e1: Expr, e2: Expr, t1: Type, t2: Type) - requires subty(t1,Type.Entity(AnyEntity),ValidationMode.Permissive) - requires subty(t2,Type.Entity(AnyEntity),ValidationMode.Permissive) - requires IsSafe(r,s,e1,t1) - requires IsSafe(r,s,e2,Type.Set(t2)) - requires forall u1, u2: EntityUID | - InstanceOfType(Value.EntityUID(u1), t1) && InstanceOfType(Value.EntityUID(u2), t2) :: - !EntityInEntity(s,u1,u2) - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,e2)) - { - reveal IsSafe(); - var evaluator := Evaluator(r,s); - var r1 := evaluator.interpret(e1); - var r2 := evaluator.interpret(e2); - if r1.Ok? && r2.Ok? { - var u1 := Value.asEntity(r1.value).value; - var s2 := Value.asSet(r2.value).value; - assert forall us2 <- s2 :: InstanceOfType(us2,t2); - var us2 :- assert evaluator.checkEntitySet(s2); - forall u2 <- us2 ensures !EntityInEntity(s,u1,u2) { - assert InstanceOfType(Value.EntityUID(u1), t1); - assert InstanceOfType(Value.EntityUID(u2), t2); - } - - var res := Evaluate(BinaryApp(BinaryOp.In, e1, e2), r, s); - assert res.Ok?; - assert InstanceOfType(res.value,Type.Bool(BoolType.False)); - } else if r1.Ok? { - BinaryAppSemanticsErrRight(e1, e2, BinaryOp.In, evaluator); - } else if r2.Ok? { - BinaryAppSemanticsErrLeft(e1, e2, BinaryOp.In, evaluator); - } - } -} diff --git a/cedar-dafny/validation/thm/soundness.dfy b/cedar-dafny/validation/thm/soundness.dfy deleted file mode 100644 index 970943032..000000000 --- a/cedar-dafny/validation/thm/soundness.dfy +++ /dev/null @@ -1,1523 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" -include "../../thm/eval/basic.dfy" -include "model.dfy" -include "base.dfy" - -// This module contains the core type soundness proof. Rather than importing -// the definitional evaluator, it relies on the abstract model. -// The final lemma is `SoundToplevel`, at the bottom of the file. -module validation.thm.soundness { - import opened def - import opened def.core - import opened def.engine - import opened def.util - import opened eval.basic - import opened types - import opened subtyping - import opened typechecker - import opened model - import opened base - import opened ext - - type Result = types.Result - - // A value of type SemanticSoundnessProof(reqty,ets,acts,q,s) contains a - // proof (`lemma SoundToplevel` at the bottom of the file) that any - // expression `e` typed in context (reqty, ets, acts) and evaluated under (r,s) - // is safe according to the model, assuming the context assigns correct - // types to the fields in query `q`, and the entities in store `s`. - // - // The proofs in this file are robust to changes in the evaluator, since they - // only depend on the abstract `model`. - datatype SemanticSoundnessProof = SSP( - reqty: RequestType, - ets: EntityTypeStore, - acts: ActionStore, - r: Request, - s: EntityStore) - { - - const TC := Typechecker(ets,acts,reqty, ValidationMode.Permissive) - - ghost predicate WellTyped(e: Expr, effs: Effects) - { - TC.infer(e,effs).Ok? - } - - function getType(e: Expr, effs: Effects): Type - requires WellTyped(e,effs) - { - TC.infer(e,effs).value.0 - } - - function getEffects(e: Expr, effs: Effects): Effects - requires WellTyped(e,effs) - { - TC.infer(e,effs).value.1 - } - - ghost predicate Typesafe(e: Expr, effs: Effects, t: Type) - { - WellTyped(e,effs) && subty(getType(e, effs), t, ValidationMode.Permissive) - } - - ghost predicate {:opaque} WellFormedRequestAndStore() { - InstanceOfRequestType(r,reqty) && InstanceOfEntityTypeStore(s,ets) && InstanceOfActionStore(s,acts) - } - - // On input to the typechecking function, for any (e,k) in the Effects, - // e is a record- or entity-typed expression that has key k. - ghost predicate {:opaque} EffectsInvariant (effs: Effects) { - forall e, k | (e, k) in effs.effs :: GetAttrSafe(r,s,e,k) - } - - // The Effects output by the typechecking function, will satisfy - // `EffectsInvariant` provided that the input expression is true. - ghost predicate GuardedEffectsInvariant (e: Expr, effs: Effects) - { - IsTrueStrong(r,s,e) ==> EffectsInvariant(effs) - } - - lemma EmptyEffectsInvariant () - ensures EffectsInvariant(Effects.empty()) - { - reveal EffectsInvariant(); - } - - lemma SoundLit(p: Primitive, t: Type, effs: Effects) - decreases PrimitiveLit(p) , 0 - requires Typesafe(PrimitiveLit(p),effs,t) - ensures IsSafe(r,s,PrimitiveLit(p),t) - ensures getEffects(PrimitiveLit(p),effs) == Effects.empty() - { - assert InstanceOfType(Primitive(p),typeOfPrim(p)) by { - PrimSafeAtInferredType(p); - } - - assert SemanticSubty(typeOfPrim(p),t) by { - SubtyCompat(typeOfPrim(p),t); - } - - assert InstanceOfType(Primitive(p),t) by { - SemSubtyTransportVal(typeOfPrim(p),t,Primitive(p)); - } - - assert IsSafe(r,s,PrimitiveLit(p),t) by{ - PrimSafeLift(r,s,p,t); - } - } - - lemma SoundVar(x: Var, t: Type, effs: Effects) - decreases Var(x) , 0 - requires WellFormedRequestAndStore() - requires Typesafe(Var(x),effs,t) - ensures IsSafe(r,s,Var(x),t) - ensures getEffects(Var(x),effs) == Effects.empty() - { - assert InstanceOfRequestType(r, reqty) by { reveal WellFormedRequestAndStore(); } - var t' :| getType(Var(x),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferVar(x) == types.Ok(t'); - match x { - case Principal => - assert IsSafe(r,s,Var(Principal),t') by { PrincipalIsSafe(r,s,t'); } - assert IsSafe(r,s,Var(Principal),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Var(Principal),t',t); - } - case Action => - assert IsSafe(r,s,Var(Action),t') by { ActionIsSafe(r,s,t'); } - assert IsSafe(r,s,Var(Action),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Var(Action),t',t); - } - case Resource => - assert IsSafe(r,s,Var(Resource),t') by { ResourceIsSafe(r,s,t'); } - assert IsSafe(r,s,Var(Resource),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Var(Resource),t',t); - } - case Context => - assert IsSafe(r,s,Var(Context),Type.Record(reqty.context)) by { - ContextIsSafe(r,s,Type.Record(reqty.context)); - } - assert IsSafe(r,s,Var(Context),t) by { - SubtyCompat(Type.Record(reqty.context),t); - SemSubtyTransport(r,s,Var(Context),Type.Record(reqty.context),t); - } - } - } - - lemma EffectsInvariantUnion(effs1: Effects, effs2: Effects) - requires EffectsInvariant(effs1) - requires EffectsInvariant(effs2) - ensures EffectsInvariant(effs1.union(effs2)) - { - reveal EffectsInvariant(); - } - - lemma EffectsInvariantIntersectL(effs1: Effects, effs2: Effects) - requires EffectsInvariant(effs1) - ensures EffectsInvariant(effs1.intersect(effs2)) - { - assert effs1.intersect(effs2) == effs2.intersect(effs1) by { - reveal EffectsInvariant(); - } - EffectsInvariantIntersectR(effs2,effs1); - } - - lemma EffectsInvariantIntersectR(effs1: Effects, effs2: Effects) - requires EffectsInvariant(effs2) - ensures EffectsInvariant(effs1.intersect(effs2)) - { - reveal EffectsInvariant(); - } - - lemma SoundIf(e: Expr, e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases If(e,e1,e2) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(If(e,e1,e2),effs,t) - ensures IsSafe(r,s,If(e,e1,e2),t) - ensures GuardedEffectsInvariant(If(e,e1,e2),getEffects(If(e,e1,e2),effs)) - { - var t' :| getType(If(e,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferIf(e,e1,e2,effs).Ok?; - var (bt, effs1) := TC.inferBoolType(e,effs).value; - assert IsSafe(r,s,e,Type.Bool(bt)) && GuardedEffectsInvariant(e,effs1) by { - assert getType(e,effs) == Type.Bool(bt); - assert subty(Type.Bool(bt),Type.Bool(bt),ValidationMode.Permissive); - assert Typesafe(e,effs,Type.Bool(bt)); - Sound(e,Type.Bool(bt),effs); - } - match bt { - case True => - assert IsTrue(r,s,e); - var (t1,effs2) := TC.infer(e1,effs.union(effs1)).value; - assert Typesafe(e1,effs.union(effs1),t1) by { SubtyRefl(t1); } - if IsTrueStrong(r,s,e) { - assert EffectsInvariant(effs1); - assert IsSafe(r,s,e1,t1) && GuardedEffectsInvariant(e1,effs2) by { - EffectsInvariantUnion(effs,effs1); - Sound(e1,t1,effs.union(effs1)); - } - assert IsSafe(r,s,If(e,e1,e2),t') by { IteTrueSafe(r,s,e,e1,e2,t'); } - assert IsSafe(r,s,If(e,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,If(e,e1,e2),t',t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2)) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - IteTrueStrongTrue(r,s,e,e1,e2); - assert EffectsInvariant(effs2); - EffectsInvariantUnion(effs1,effs2); - } - } - } else { - assert IsSafe(r,s,If(e,e1,e2),t) by { - IteError(r,s,e,e1,e2,Type.Bool(True),t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2)) by { - IteError(r,s,e,e1,e2,Type.Bool(True),Type.Bool(True)); - assert !IsTrueStrong(r,s,If(e,e1,e2)); - } - } - case False => - assert IsFalse(r,s,e); - var (t2,effs2) := TC.infer(e2,effs).value; - assert Typesafe(e2,effs,t2) by { SubtyRefl(t2); } - assert IsSafe(r,s,e2,t2) && GuardedEffectsInvariant(e2,effs2) by { - Sound(e2,t2,effs); - } - assert IsSafe(r,s,If(e,e1,e2),t') by { IteFalseSafe(r,s,e,e1,e2,t'); } - assert IsSafe(r,s,If(e,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,If(e,e1,e2),t',t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs2) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - IteTrueStrongFalse(r,s,e,e1,e2); - assert EffectsInvariant(effs2); - } - } - case AnyBool => - var (t1,effs2) := TC.infer(e1,effs.union(effs1)).value; - var (t2,effs3) := TC.infer(e2,effs).value; - assert Typesafe(e1,effs.union(effs1),t1) by { SubtyRefl(t1); } - assert Typesafe(e2,effs,t2) by { SubtyRefl(t2); } - assert t' == lubOpt(t1,t2,ValidationMode.Permissive).value; - assert subty(t1,t',ValidationMode.Permissive) && subty(t2,t',ValidationMode.Permissive) by { LubIsUB(t1,t2,t',ValidationMode.Permissive); } - if IsSafeStrong(r,s,e,Type.Bool(bt)) { - if IsTrue(r,s,e) { - // `e` evaluates to true - IsTrueImpliesIsTrueStrong(r,s,e,Type.Bool(bt)); - assert IsTrueStrong(r,s,e); - assert EffectsInvariant(effs1); - assert IsSafe(r,s,e1,t1) && GuardedEffectsInvariant(e1,effs2) by { - EffectsInvariantUnion(effs,effs1); - Sound(e1,t1,effs.union(effs1)); - } - assert IsSafe(r,s,If(e,e1,e2),t1) by { IteTrueSafe(r,s,e,e1,e2,t1); } - assert IsSafe(r,s,If(e,e1,e2),t) by { - SubtyCompat(t1,t'); - SubtyCompat(t',t); - SemSubtyTransport(r,s,If(e,e1,e2),t1,t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2)) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - IteTrueStrongTrue(r,s,e,e1,e2); - EffectsInvariantUnion(effs1,effs2); - } - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2).intersect(effs3)) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - EffectsInvariantIntersectL(effs1.union(effs2),effs3); - } - } - } else { - // `e` evaluates to false - NotTrueImpliesFalse(r,s,e,bt); - assert IsFalse(r,s,e); - assert IsSafe(r,s,e2,t2) && GuardedEffectsInvariant(e2,effs3) by { - Sound(e2,t2,effs); - } - assert IsSafe(r,s,If(e,e1,e2),t2) by { IteFalseSafe(r,s,e,e1,e2,t2); } - assert IsSafe(r,s,If(e,e1,e2),t) by { - SubtyCompat(t2,t'); - SubtyCompat(t',t); - SemSubtyTransport(r,s,If(e,e1,e2),t2,t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs3) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - IteTrueStrongFalse(r,s,e,e1,e2); - } - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2).intersect(effs3)) by { - if IsTrueStrong(r,s,If(e,e1,e2)) { - EffectsInvariantIntersectR(effs1.union(effs2),effs3); - } - } - } - } else { - // `e` produces an error - assert IsSafe(r,s,If(e,e1,e2),t) by { - IteError(r,s,e,e1,e2,Type.Bool(bt),t); - } - assert GuardedEffectsInvariant(If(e,e1,e2),effs1.union(effs2).intersect(effs3)) by { - IteError(r,s,e,e1,e2,Type.Bool(bt),Type.Bool(True)); - assert !IsTrueStrong(r,s,If(e,e1,e2)); - } - } - } - } - - lemma SoundAnd(e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases And(e1,e2) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(And(e1,e2),effs,t) - ensures IsSafe(r,s,And(e1,e2),t) - ensures GuardedEffectsInvariant(And(e1,e2),getEffects(And(e1,e2),effs)) - { - var t' :| getType(And(e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferAnd(e1,e2,effs).Ok?; - var (bt1, effs1) := TC.inferBoolType(e1,effs).value; - assert Typesafe(e1,effs,Type.Bool(bt1)); - assert IsSafe(r,s,e1,Type.Bool(bt1)) && GuardedEffectsInvariant(e1,effs1) by { - Sound(e1,Type.Bool(bt1),effs); - } - assert GuardedEffectsInvariant(And(e1,e2),Effects.empty()) by { - EmptyEffectsInvariant(); - } - match bt1 { - case False => - assert IsSafe(r,s,And(e1,e2),t') by { AndLShortSafe(r,s,e1,e2); } - assert IsSafe(r,s,And(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,And(e1,e2),t',t); - } - case _ => - var res := TC.inferBoolType(e2,effs.union(effs1)).value; - var bt2 := res.0; - var effs2 := res.1; - assert Typesafe(e2,effs.union(effs1),Type.Bool(bt2)); - if IsSafeStrong(r,s,e1,Type.Bool(bt1)) { - if IsTrue(r,s,e1) { - // `e1` evaluates to true - IsTrueImpliesIsTrueStrong(r,s,e1,Type.Bool(bt1)); - assert IsTrueStrong(r,s,e1); - assert EffectsInvariant(effs1); - assert IsSafe(r,s,e2,Type.Bool(bt2)) && GuardedEffectsInvariant(e2,effs2) by { - EffectsInvariantUnion(effs,effs1); - Sound(e2,Type.Bool(bt2),effs.union(effs1)); - } - match bt2 { - case False => - assert IsFalse(r,s,e2); - assert IsSafe(r,s,e1,Type.Bool(AnyBool)) by { - assert subty(Type.Bool(bt1),Type.Bool(AnyBool),ValidationMode.Permissive); - SubtyCompat(Type.Bool(bt1),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e1,Type.Bool(bt1),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,And(e1,e2),t') by { - AndRShortSafe(r,s,e1,e2); - } - assert IsSafe(r,s,And(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,And(e1,e2),t',t); - } - case True => - assert IsTrue(r,s,e2); - assert SemanticSubty(Type.Bool(bt1),Type.Bool(AnyBool)) by { - assert subty(Type.Bool(bt1),Type.Bool(AnyBool),ValidationMode.Permissive); - SubtyCompat(Type.Bool(bt1),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,And(e1,e2),Type.Bool(bt1)) by { - AndLRetSafe(r,s,e1,e2,Type.Bool(bt1)); - } - assert IsSafe(r,s,And(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,And(e1,e2),t',t); - } - assert GuardedEffectsInvariant(And(e1,e2),effs1.union(effs2)) by { - if IsTrueStrong(r,s,And(e1,e2)) { - AndTrueStrong(r,s,e1,e2); - assert EffectsInvariant(effs2); - EffectsInvariantUnion(effs1,effs2); - } - } - case _ => - assert IsSafe(r,s,e1,Type.Bool(AnyBool)) by { - SubtyCompat(Type.Bool(bt1),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e1,Type.Bool(bt1),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,e2,Type.Bool(AnyBool)) by { - SubtyCompat(Type.Bool(bt2),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e2,Type.Bool(bt2),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,And(e1,e2),Type.Bool(AnyBool)) by { AndSafe(r,s,e1,e2); } - assert IsSafe(r,s,And(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,And(e1,e2),t',t); - } - assert GuardedEffectsInvariant(And(e1,e2),effs1.union(effs2)) by { - if IsTrueStrong(r,s,And(e1,e2)) { - AndTrueStrong(r,s,e1,e2); - assert EffectsInvariant(effs2); - EffectsInvariantUnion(effs1,effs2); - } - } - } - } else { - // `e1` evaluates to false - NotTrueImpliesFalse(r,s,e1,bt1); - assert IsFalse(r,s,e1); - assert IsFalse(r,s,And(e1,e2)) by { AndLShortSafe(r,s,e1,e2); } - assert IsSafe(r,s,And(e1,e2),t) by { - SubtyCompat(Type.Bool(False),t); - SemSubtyTransport(r,s,And(e1,e2),Type.Bool(False),t); - } - match bt2 { - case False => - case True => - assert GuardedEffectsInvariant(And(e1,e2),effs1.union(effs2)) by { - assert IsFalse(r,s,And(e1,e2)); - FalseImpliesNotTrueStrong(r,s,And(e1,e2)); - assert !IsTrueStrong(r,s,And(e1,e2)); - } - case AnyBool => - assert GuardedEffectsInvariant(And(e1,e2),effs1.union(effs2)) by { - assert IsFalse(r,s,And(e1,e2)); - FalseImpliesNotTrueStrong(r,s,And(e1,e2)); - assert !IsTrueStrong(r,s,And(e1,e2)); - } - } - } - } else { - // `e1` produces an error - assert IsSafe(r,s,And(e1,e2),t) by { - AndError(r,s,e1,e2,Type.Bool(bt1),t); - } - assert GuardedEffectsInvariant(And(e1,e2),effs1.union(effs2)) by { - AndError(r,s,e1,e2,Type.Bool(bt1),Type.Bool(True)); - assert !IsTrueStrong(r,s,And(e1,e2)); - } - } - } - } - - lemma SoundOr(e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases Or(e1,e2) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(Or(e1,e2),effs,t) - ensures IsSafe(r,s,Or(e1,e2),t) - ensures GuardedEffectsInvariant(Or(e1,e2),getEffects(Or(e1,e2),effs)) - { - var t' :| getType(Or(e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferOr(e1,e2,effs).Ok?; - var (bt1, effs1) := TC.inferBoolType(e1,effs).value; - assert Typesafe(e1,effs,Type.Bool(bt1)); - assert IsSafe(r,s,e1,Type.Bool(bt1)) && GuardedEffectsInvariant(e1,effs1) by { - Sound(e1,Type.Bool(bt1),effs); - } - assert GuardedEffectsInvariant(Or(e1,e2),Effects.empty()) by { - EmptyEffectsInvariant(); - } - match bt1 { - case True => - assert IsTrue(r,s,e1); - assert IsSafe(r,s,Or(e1,e2),t') by { OrLShortSafe(r,s,e1,e2); } - assert IsSafe(r,s,Or(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Or(e1,e2),t',t); - } - case False => - assert IsFalse(r,s,e1); - var (bt2, effs2) := TC.inferBoolType(e2,effs).value; - assert Typesafe(e2,effs,Type.Bool(bt2)); - assert IsSafe(r,s,e2,Type.Bool(bt2)) && GuardedEffectsInvariant(e2,effs2) by { - Sound(e2,Type.Bool(bt2),effs); - } - assert SemanticSubty(Type.Bool(bt2),Type.Bool(AnyBool)) by { - assert subty(Type.Bool(bt2),Type.Bool(AnyBool),ValidationMode.Permissive); - SubtyCompat(Type.Bool(bt2),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,Or(e1,e2),Type.Bool(bt2)) by { OrRRetSafe(r,s,e1,e2,Type.Bool(bt2)); } - assert IsSafe(r,s,Or(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Or(e1,e2),t',t); - } - assert GuardedEffectsInvariant(Or(e1,e2),effs2) by { - if IsTrueStrong(r,s,Or(e1,e2)) { - OrTrueStrong(r,s,e1,e2); - FalseImpliesNotTrueStrong(r,s,e1); - assert IsTrueStrong(r,s,e2); - assert EffectsInvariant(effs2); - } - } - case _ => - var (bt2, effs2) := TC.inferBoolType(e2,effs).value; - assert Typesafe(e2,effs,Type.Bool(bt2)); - assert IsSafe(r,s,e2,Type.Bool(bt2)) && GuardedEffectsInvariant(e2,effs2) by { - Sound(e2,Type.Bool(bt2),effs); - } - match bt2 { - case True => - assert IsTrue(r,s,e2); - assert IsSafe(r,s,e1,Type.Bool(AnyBool)) by { - SubtyCompat(Type.Bool(bt1),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e1,Type.Bool(bt1),Type.Bool(AnyBool)); - } - assert IsTrue(r,s,Or(e1,e2)) by { OrRShortSafe(r,s,e1,e2); } - assert IsSafe(r,s,Or(e1,e2),t) by { - SubtyCompat(Type.Bool(True),t); - SemSubtyTransport(r,s,Or(e1,e2),Type.Bool(True),t); - } - case False => - assert IsFalse(r,s,e2); - assert IsSafe(r,s,Or(e1,e2),t) by { - OrLRetSafe(r,s,e1,e2,Type.Bool(bt1)); - SubtyCompat(Type.Bool(bt1),t); - SemSubtyTransport(r,s,Or(e1,e2),Type.Bool(bt1),t); - } - assert GuardedEffectsInvariant(Or(e1,e2),effs1) by { - if IsTrueStrong(r,s,Or(e1,e2)) { - OrTrueStrong(r,s,e1,e2); - FalseImpliesNotTrueStrong(r,s,e2); - assert IsTrueStrong(r,s,e1); - assert EffectsInvariant(effs1); - } - } - case _ => - assert IsSafe(r,s,e1,Type.Bool(AnyBool)) by { - SubtyCompat(Type.Bool(bt1),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e1,Type.Bool(bt1),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,e2,Type.Bool(AnyBool)) by { - SubtyCompat(Type.Bool(bt2),Type.Bool(AnyBool)); - SemSubtyTransport(r,s,e2,Type.Bool(bt2),Type.Bool(AnyBool)); - } - assert IsSafe(r,s,Or(e1,e2),Type.Bool(AnyBool)) by { OrSafe(r,s,e1,e2); } - assert IsSafe(r,s,Or(e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Or(e1,e2),t',t); - } - assert GuardedEffectsInvariant(Or(e1,e2),effs1.intersect(effs2)) by { - if IsTrueStrong(r,s,Or(e1,e2)) { - OrTrueStrong(r,s,e1,e2); - if IsTrueStrong(r,s,e1) { - assert EffectsInvariant(effs1); - EffectsInvariantIntersectL(effs1,effs2); - } else { - assert IsTrueStrong(r,s,e2); - assert EffectsInvariant(effs2); - EffectsInvariantIntersectR(effs1,effs2); - } - } - } - } - } - } - - lemma SoundNot(e: Expr, t: Type, effs: Effects) - decreases UnaryApp(Not,e) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(UnaryApp(Not,e),effs,t) - ensures IsSafe(r,s,UnaryApp(Not,e),t) - ensures getEffects(UnaryApp(Not,e),effs) == Effects.empty() - { - var t' :| getType(UnaryApp(Not,e),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferNot(e,effs).Ok?; - var (bt,_) := TC.inferBoolType(e,effs).value; - assert t' == Type.Bool(bt.not()); - assert Typesafe(e,effs,Type.Bool(bt)) by { SubtyRefl(Type.Bool(bt)); } - assert IsSafe(r,s,e,Type.Bool(bt)) by { Sound(e,Type.Bool(bt),effs); } - assert IsSafe(r,s,UnaryApp(Not,e),t') by { - match bt { - case AnyBool => NotSafe(r,s,e); - case True => NotTrueSafe(r,s,e); - case False => NotFalseSafe(r,s,e); - } - } - assert IsSafe(r,s,UnaryApp(Not,e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(Not,e),t',t); - } - } - - lemma SoundNeg(e: Expr, t: Type, effs: Effects) - decreases UnaryApp(Neg,e) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(UnaryApp(Neg,e),effs,t) - ensures IsSafe(r,s,UnaryApp(Neg,e),t) - ensures getEffects(UnaryApp(Neg,e),effs) == Effects.empty() - { - var t' :| getType(UnaryApp(Neg,e),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferArith1(Neg,e,effs) == types.Ok(Type.Int); - assert TC.ensureIntType(e,effs).Ok?; - assert Typesafe(e,effs,Type.Int); - assert IsSafe(r,s,e,Type.Int) by { Sound(e,Type.Int,effs); } - assert IsSafe(r,s,UnaryApp(Neg,e),t') by { NegSafe(r,s,e); } - assert IsSafe(r,s,UnaryApp(Neg,e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(Neg,e),t',t); - } - } - - lemma SoundMulBy(i: int, e: Expr, t: Type, effs: Effects) - decreases UnaryApp(MulBy(i),e) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(UnaryApp(MulBy(i),e),effs,t) - ensures IsSafe(r,s,UnaryApp(MulBy(i),e),t) - ensures getEffects(UnaryApp(MulBy(i),e),effs) == Effects.empty() - { - var t' :| getType(UnaryApp(MulBy(i),e),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferArith1(MulBy(i),e,effs) == types.Ok(Type.Int); - assert TC.ensureIntType(e,effs).Ok?; - assert Typesafe(e,effs,Type.Int); - assert IsSafe(r,s,e,Type.Int) by { Sound(e,Type.Int,effs); } - assert IsSafe(r,s,UnaryApp(MulBy(i),e),t') by { MulBySafe(r,s,e,i); } - assert IsSafe(r,s,UnaryApp(MulBy(i),e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(MulBy(i),e),t',t); - } - } - - lemma SoundLike(e: Expr, p: Pattern, t: Type, effs: Effects) - decreases UnaryApp(Like(p),e) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(UnaryApp(Like(p),e),effs,t) - ensures IsSafe(r,s,UnaryApp(Like(p),e),t) - ensures getEffects(UnaryApp(Like(p),e),effs) == Effects.empty() - { - var t' :| getType(UnaryApp(Like(p),e),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferLike(p,e,effs) == types.Ok(Type.Bool(AnyBool)); - assert TC.ensureStringType(e,effs).Ok?; - assert Typesafe(e,effs,Type.String); - assert IsSafe(r,s,e,Type.String) by { Sound(e,Type.String,effs); } - assert IsSafe(r,s,UnaryApp(Like(p),e),t') by { LikeSafe(r,s,e,p); } - assert IsSafe(r,s,UnaryApp(Like(p),e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(Like(p),e),t',t); - } - } - - lemma SoundIs(e: Expr, ety: EntityType, t: Type, effs: Effects) - decreases UnaryApp(UnaryOp.Is(ety),e) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(UnaryApp(UnaryOp.Is(ety),e),effs,t) - ensures IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),t) - ensures getEffects(UnaryApp(UnaryOp.Is(ety),e),effs) == Effects.empty() - { - var t' :| getType(UnaryApp(UnaryOp.Is(ety),e),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferIs(ety,e,effs) == types.Ok(t'); - - assert TC.ensureEntityType(e,effs).Ok?; - var t1 := getType(e,effs); - assert t1.Entity?; - - assert IsSafe(r,s,e,t1) by { Sound(e,t1,effs); } - match t1 { - case Entity(AnyEntity) => - assert t' == Type.Bool(AnyBool); - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),Type.Bool(AnyBool)) by { - IsOpSafe(r,s,e,ety); - } - case Entity(EntityLUB(tys)) => - if ety !in tys { - assert t' == Type.Bool(False); - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),t') by { - IsOpSafeFalse(r,s,e,ety,EntityLUB(tys)); - } - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(UnaryOp.Is(ety),e),t',t); - } - } else if ety in tys && |tys| == 1 { - assert t' == Type.Bool(True); - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),t') by { - IsOpSafeTrue(r,s,e,ety,EntityLUB(tys)); - } - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,UnaryApp(UnaryOp.Is(ety),e),t',t); - } - } else { - assert IsSafe(r,s,UnaryApp(UnaryOp.Is(ety),e),Type.Bool(AnyBool)) by { - assert subty(t1,Type.Entity(AnyEntity),ValidationMode.Permissive); - assert IsSafe(r,s,e,Type.Entity(AnyEntity)) by { Sound(e,Type.Entity(AnyEntity),effs); } - IsOpSafe(r,s,e,ety); - } - } - } - } - - const unspecifiedEntityType := Type.Entity(EntityLUB({EntityType.UNSPECIFIED})) - - // Take advantage of the fact that in the current implementation, an - // unspecified entity belongs to unspecifiedEntityType, and we can reuse - // our logic about LUBs to show that it is different from any "specified" - // entity. This might not work in the future if we restructure EntityType to - // have a separate alternative for unspecified entities like in production. - lemma UnspecifiedVarHasUnspecifiedEntityType(e: Expr) - requires reqty.isUnspecifiedVar(e) - requires InstanceOfRequestType(r,reqty) - ensures IsSafe(r,s,e,unspecifiedEntityType) - { - match e { - case Var(Principal) => - assert r.principal == unspecifiedPrincipalEuid; - PrincipalIsSafe(r,s,unspecifiedEntityType); - case Var(Resource) => - assert r.resource == unspecifiedResourceEuid; - ResourceIsSafe(r,s,unspecifiedEntityType); - } - } - - lemma SoundEqAuxEqUids(u1: EntityUID, u2: EntityUID, t: Type, effs: Effects) - requires Typesafe(BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),effs,t) - requires u1 == u2 - ensures IsSafe(r,s,BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),t) { - var e1: Expr := PrimitiveLit(Primitive.EntityUID(u1)); - var e2: Expr := PrimitiveLit(Primitive.EntityUID(u2)); - var t' :| getType(BinaryApp(BinaryOp.Eq,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferEq(e1,e2,effs) == types.Ok(t'); - // Somehow, these unused variables help nudge Dafny to complete the proof. - var t1 := getType(e1,effs); - var t2 := getType(e2,effs); - assert t' == Type.Bool(True); - assert IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t') by { EqEntitySameSafe(r,s,u1); } - assert IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t',t); } - } - - lemma SoundEqAuxDiffUids(u1: EntityUID, u2: EntityUID, t: Type, effs: Effects) - requires Typesafe(BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),effs,t) - requires u1 != u2 - ensures IsSafe(r,s,BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),t) { - var e1: Expr := PrimitiveLit(Primitive.EntityUID(u1)); - var e2: Expr := PrimitiveLit(Primitive.EntityUID(u2)); - var t' :| getType(BinaryApp(BinaryOp.Eq,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferEq(e1,e2,effs) == types.Ok(t'); - // Somehow, these unused variables help nudge Dafny to complete the proof. - var t1 := getType(e1,effs); - var t2 := getType(e2,effs); - assert t' == Type.Bool(False); - assert IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t') by { EqEntityDiffSafe(r,s,u1,u2); } - assert IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t',t); } - } - - lemma SoundEqAux(u1: EntityUID, u2: EntityUID, t: Type, effs: Effects) - requires Typesafe(BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),effs,t) - ensures IsSafe(r,s,BinaryApp(BinaryOp.Eq,PrimitiveLit(Primitive.EntityUID(u1)),PrimitiveLit(Primitive.EntityUID(u2))),t) - { - if u1 == u2 { - SoundEqAuxEqUids(u1, u2, t, effs); - } else { - SoundEqAuxDiffUids(u1, u2, t, effs); - } - } - - lemma TypesafeEqSemantics(e1: Expr, e2: Expr, t: Type, effs: Effects) returns (t': Type) - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(BinaryOp.Eq, e1, e2), effs, t) - ensures getType(BinaryApp(BinaryOp.Eq,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive) - ensures TC.inferEq(e1,e2,effs) == types.Ok(t') - ensures Typesafe(e1,effs,getType(e1,effs)) - ensures Typesafe(e2,effs,getType(e2,effs)) - { - var tt' :| getType(BinaryApp(BinaryOp.Eq,e1,e2),effs) == tt' && subty(tt',t,ValidationMode.Permissive); - assert TC.inferEq(e1,e2,effs) == types.Ok(tt'); - t' := tt'; - SubtyRefl(getType(e1,effs)); - SubtyRefl(getType(e2,effs)); - } - - lemma SoundEq(e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(BinaryOp.Eq,e1,e2) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(BinaryOp.Eq,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t) - ensures getEffects(BinaryApp(BinaryOp.Eq,e1,e2),effs) == Effects.empty() - { - var t' := TypesafeEqSemantics(e1,e2,t,effs); - assert TC.inferEq(e1,e2,effs) == types.Ok(t'); - var t1 := getType(e1,effs); - var t2 := getType(e2,effs); - assert IsSafe(r,s,e1,t1) by { Sound(e1,t1,effs); } - assert IsSafe(r,s,e2,t2) by { Sound(e2,t2,effs); } - match (e1,e2,t1,t2) { - case (PrimitiveLit(EntityUID(u1)),PrimitiveLit(EntityUID(u2)),_,_) => - SoundEqAux(u1,u2,t,effs); - case _ => - if t1.Entity? && t2.Entity? && t1.lub.disjoint(t2.lub) { - assert t' == Type.Bool(False); - EqFalseIsSafe(r,s,e1,e2,t1.lub,t2.lub); - } else if reqty.isUnspecifiedVar(e1) && t2.Entity? && t2.lub.specified() { - assert t' == Type.Bool(False); - reveal WellFormedRequestAndStore(); - UnspecifiedVarHasUnspecifiedEntityType(e1); - EqFalseIsSafe(r,s,e1,e2,unspecifiedEntityType.lub,t2.lub); - } else { - assert t' == Type.Bool(AnyBool); - EqIsSafe(r,s,e1,e2,t1,t2); - } - assert IsSafe(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(BinaryOp.Eq,e1,e2),t',t); - } - } - } - - lemma SoundIneq(op: BinaryOp, e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(op,e1,e2) , 0 - requires op == Less || op == BinaryOp.LessEq - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(op,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(op,e1,e2),t) - ensures getEffects(BinaryApp(op,e1,e2),effs) == Effects.empty() - { - var t' :| getType(BinaryApp(op,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferIneq(op,e1,e2,effs) == types.Ok(Type.Bool(AnyBool)); - assert TC.ensureIntType(e1,effs).Ok?; - assert Typesafe(e1,effs,Type.Int); - assert TC.ensureIntType(e2,effs).Ok?; - assert Typesafe(e2,effs,Type.Int); - assert IsSafe(r,s,e1,Type.Int) by { Sound(e1,Type.Int,effs); } - assert IsSafe(r,s,e2,Type.Int) by { Sound(e2,Type.Int,effs); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t') by { IneqSafe(r,s,op,e1,e2); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(op,e1,e2),t',t); - } - } - - lemma SoundArith(op: BinaryOp, e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(op,e1,e2) , 0 - requires op == Add || op == Sub - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(op,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(op,e1,e2),t) - ensures getEffects(BinaryApp(op,e1,e2),effs) == Effects.empty() - { - var t' :| getType(BinaryApp(op,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferArith2(op,e1,e2,effs) == types.Ok(Type.Int); - assert TC.ensureIntType(e1,effs).Ok?; - assert Typesafe(e1,effs,Type.Int); - assert TC.ensureIntType(e2,effs).Ok?; - assert Typesafe(e2,effs,Type.Int); - assert IsSafe(r,s,e1,Type.Int) by { Sound(e1,Type.Int,effs); } - assert IsSafe(r,s,e2,Type.Int) by { Sound(e2,Type.Int,effs); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t') by { ArithSafe(r,s,op,e1,e2); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(op,e1,e2),t',t); - } - } - - lemma SoundContainsAnyAll(op: BinaryOp, e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(op,e1,e2) , 0 - requires op == ContainsAny || op == ContainsAll - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(op,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(op,e1,e2),t) - ensures getEffects(BinaryApp(op,e1,e2),effs) == Effects.empty() - { - var t' :| getType(BinaryApp(op,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferContainsAnyAll(op,e1,e2,effs) == types.Ok(t'); - var t1 := TC.inferSetType(e1,effs).value; - var t2 := TC.inferSetType(e2,effs).value; - assert Typesafe(e1,effs,Type.Set(t1)) by { SubtyRefl(Type.Set(t1)); } - assert Typesafe(e2,effs,Type.Set(t2)) by { SubtyRefl(Type.Set(t2)); } - assert IsSafe(r,s,e1,Type.Set(t1)) by { Sound(e1,Type.Set(t1),effs); } - assert IsSafe(r,s,e2,Type.Set(t2)) by { Sound(e2,Type.Set(t2),effs); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t') by { ContainsAnyAllSafe(r,s,op,e1,e2,t1,t2); } - assert IsSafe(r,s,BinaryApp(op,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(op,e1,e2),t',t); - } - } - - lemma SoundContains(e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(Contains,e1,e2) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(Contains,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(Contains,e1,e2),t) - ensures getEffects(BinaryApp(Contains,e1,e2),effs) == Effects.empty() - { - var t' :| getType(BinaryApp(Contains,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferContains(e1,e2,effs) == types.Ok(t'); - var t1 := TC.inferSetType(e1,effs).value; - assert Typesafe(e1,effs,Type.Set(t1)) by { SubtyRefl(Type.Set(t1)); } - var (t2,_) := TC.infer(e2,effs).value; - assert Typesafe(e2,effs,t2) by { SubtyRefl(t2); } - assert IsSafe(r,s,e1,Type.Set(t1)) by { Sound(e1,Type.Set(t1),effs); } - assert IsSafe(r,s,e2,t2) by { Sound(e2,t2,effs); } - assert IsSafe(r,s,BinaryApp(Contains,e1,e2),t') by { ContainsSetSafe(r,s,e1,e2,t1,t2); } - assert IsSafe(r,s,BinaryApp(Contains,e1,e2),t) by { SemSubtyTransport(r,s,BinaryApp(Contains,e1,e2),t',t); } - } - - lemma InferRecordLemma(e: Expr, es: seq<(Attr,Expr)>, effs: Effects) - requires forall i | 0 <= i < |es| :: es[i] < e - requires TC.inferRecord(e,es,effs).Ok? - ensures forall i | 0 <= i < |es| :: es[i].0 in TC.inferRecord(e,es,effs).value.attrs.Keys && TC.infer(es[i].1,effs).Ok? - ensures forall k | k in TC.inferRecord(e,es,effs).value.attrs.Keys :: KeyExists(k,es) && TC.infer(LastOfKey(k,es),effs).value.0 == TC.inferRecord(e,es,effs).value.attrs[k].ty - ensures forall k | !(k in TC.inferRecord(e,es,effs).value.attrs.Keys) :: !KeyExists(k,es) - { - reveal TC.inferRecord(); - } - - lemma SoundRecord(es: seq<(Attr,Expr)>, t: Type, effs: Effects) - decreases Expr.Record(es) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(Expr.Record(es),effs,t) - ensures IsSafe(r,s,Expr.Record(es),t) - ensures getEffects(Expr.Record(es),effs) == Effects.empty() - { - var t' :| getType(Expr.Record(es),effs) == t' && subty(t',t,ValidationMode.Permissive); - var rt := TC.inferRecord(Expr.Record(es),es,effs).value; - InferRecordLemma(Expr.Record(es),es,effs); - assert t' == Type.Record(rt); - assert forall i | 0 <= i < |es| :: WellTyped(es[i].1,effs); - assert forall k | k in rt.attrs :: KeyExists(k,es) && getType(LastOfKey(k,es),effs) == rt.attrs[k].ty by { - assert TC.inferRecord(Expr.Record(es),es,effs).Ok?; - } - forall k | k in rt.attrs - ensures KeyExists(k,es) && IsSafe(r,s,LastOfKey(k,es),rt.attrs[k].ty) - { - assert getType(LastOfKey(k,es),effs) == rt.attrs[k].ty; - assert Typesafe(LastOfKey(k,es),effs,rt.attrs[k].ty) by { SubtyRefl(rt.attrs[k].ty); } - assert IsSafe(r,s,LastOfKey(k,es),rt.attrs[k].ty) by { Sound(LastOfKey(k,es),rt.attrs[k].ty,effs); } - } - assert IsSafe(r,s,Expr.Record(es),t') by { - assert forall ae | ae in es :: ExistsSafeType(r,s,ae.1) by { - forall ae | ae in es - ensures ExistsSafeType(r,s,ae.1) - { - assert WellTyped(ae.1,effs); - var t_ae := getType(ae.1,effs); - assert Typesafe(ae.1,effs,t_ae) by { SubtyRefl(t_ae); } - assert IsSafe(r,s,ae.1,t_ae) by { Sound(ae.1,t_ae,effs); } - } - } - RecordSafe(r,s,es,rt); - } - assert IsSafe(r,s,Expr.Record(es),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Expr.Record(es),t',t); - } - } - - lemma InferSetLemma(e: Expr, es: seq, effs: Effects) - requires forall i | 0 <= i < |es| :: es[i] < e - requires TC.inferSet(e,es,effs).Ok? - ensures forall i | 0 <= i < |es| :: TC.infer(es[i],effs).Ok? && subty(TC.infer(es[i],effs).value.0,TC.inferSet(e,es,effs).value,ValidationMode.Permissive) - { - if es == [] { - } else { - var (t,_) := TC.infer(es[0],effs).value; - var t1 := TC.inferSet(e,es[1..],effs).value; - var t2 := lubOpt(t,t1,ValidationMode.Permissive).value; - assert forall i | 0 <= i < |es| :: TC.infer(es[i],effs).Ok? && subty(TC.infer(es[i],effs).value.0,t2,ValidationMode.Permissive) by { - forall i | 0 <= i < |es| - ensures TC.infer(es[i],effs).Ok? && subty(TC.infer(es[i],effs).value.0,t2,ValidationMode.Permissive) - { - if i == 0 { - assert subty(t,t2,ValidationMode.Permissive) by { LubIsUB(t,t1,t2,ValidationMode.Permissive); } - } else { - assert TC.infer(es[i],effs).Ok?; - assert subty(TC.infer(es[i],effs).value.0,t2,ValidationMode.Permissive) by { - LubIsUB(t,t1,t2,ValidationMode.Permissive); - SubtyTrans(TC.infer(es[i],effs).value.0,t1,t2,ValidationMode.Permissive); - } - } - } - } - } - } - - lemma SoundSet(es: seq, t: Type, effs: Effects) - decreases Expr.Set(es) , 0 - requires WellFormedRequestAndStore() - requires Typesafe(Expr.Set(es),effs,t) - requires EffectsInvariant(effs) - ensures IsSafe(r,s,Expr.Set(es),t) - ensures getEffects(Expr.Set(es),effs) == Effects.empty() - { - var t' :| getType(Expr.Set(es),effs) == t' && subty(t',t,ValidationMode.Permissive); - var st := TC.inferSet(Expr.Set(es),es,effs).value; - InferSetLemma(Expr.Set(es),es,effs); - assert t' == Type.Set(st); - forall i | 0 <= i < |es| - ensures IsSafe(r,s,es[i],st) - { - SetSemantics(es, Evaluator(r,s)); - assert Typesafe(es[i],effs,st); - Sound(es[i],st,effs); - } - assert IsSafe(r,s,Expr.Set(es),t') by { - SetConstrSafe(r,s,es,st); - } - assert IsSafe(r,s,Expr.Set(es),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,Expr.Set(es),t',t); - } - } - - lemma SoundGetAttr(e: Expr, k: Attr, t: Type, effs: Effects) - decreases GetAttr(e,k) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(GetAttr(e,k),effs,t) - ensures IsSafe(r,s,GetAttr(e,k),t) - ensures getEffects(GetAttr(e,k),effs) == Effects.empty() - { - var t' :| getType(GetAttr(e,k),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferGetAttr(e,k,effs).Ok?; - var ret := TC.inferRecordEntityType(e,effs).value; - match ret { - case Record(rt) => { - assert t' == rt.attrs[k].ty; - assert Typesafe(e,effs,Type.Record(rt)) by { SubtyRefl(Type.Record(rt)); } - assert IsSafe(r,s,e,Type.Record(rt)) by { Sound(e,Type.Record(rt),effs); } - assert IsSafe(r,s,GetAttr(e,k),t') by { - assert k in rt.attrs; - assert rt.attrs[k].isRequired || effs.contains(e,k); - if rt.attrs[k].isRequired { - ObjectProjSafeRequired(r,s,e,Type.Record(rt),k,rt.attrs[k]); - } else { - reveal EffectsInvariant(); - assert GetAttrSafe(r,s,e,k); - ObjectProjSafeGetAttrSafe(r,s,e,Type.Record(rt),k,rt.attrs[k]); - } - } - assert IsSafe(r,s,GetAttr(e,k),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,GetAttr(e,k),t',t); - } - } - case Entity(lub) => { - var rt := ets.getLubRecordType(lub,ValidationMode.Permissive).value; - assert t' == rt.attrs[k].ty; - assert IsSafe(r,s,e,Type.Entity(lub)) by { Sound(e,Type.Entity(lub),effs); } - assert IsSafe(r,s,GetAttr(e,k),t') by { - assert k in rt.attrs; - assert rt.attrs[k].isRequired || effs.contains(e,k); - if !rt.attrs[k].isRequired { - reveal EffectsInvariant(); - assert GetAttrSafe(r,s,e,k); - } - forall euid: EntityUID | InstanceOfType(Primitive(Primitive.EntityUID(euid)),Type.Entity(lub)) && euid in s.entities - ensures rt.attrs[k].isRequired ==> k in s.entities[euid].attrs - ensures k in s.entities[euid].attrs ==> InstanceOfType(s.entities[euid].attrs[k],t') - { - reveal WellFormedRequestAndStore(); - GetLubRecordTypeSubty(lub, euid.ty); - SubtyCompat(ets.types[euid.ty].attrs[k].ty, t'); - } - EntityProjSafe(r,s,e,k,lub,t',rt.attrs[k].isRequired); - } - assert IsSafe(r,s,GetAttr(e,k),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,GetAttr(e,k),t',t); - } - } - } - } - - lemma LubRecordType(rt1: RecordType, rt2: RecordType) - ensures var rtl := lubRecordType(rt1, rt2, ValidationMode.Permissive).value; - forall k | k in rtl.attrs.Keys :: - lubOpt(rt1.attrs[k].ty, rt2.attrs[k].ty, ValidationMode.Permissive) == Ok(rtl.attrs[k].ty) - {} - - lemma LubRecordTypeSubty(rt1: RecordType, rt2: RecordType) - ensures subtyRecordType(rt1, lubRecordType(rt1, rt2,ValidationMode.Permissive).value,ValidationMode.Permissive) - ensures subtyRecordType(rt2, lubRecordType(rt1, rt2,ValidationMode.Permissive).value,ValidationMode.Permissive) - { - var rtl := lubRecordType(rt1, rt2, ValidationMode.Permissive).value; - - assert rt1.isOpen() ==> rtl.isOpen(); - assert rt2.isOpen() ==> rtl.isOpen(); - assert !rtl.isOpen() ==> rt1.attrs.Keys == rt2.attrs.Keys; - - LubRecordType(rt1, rt2); - reveal WellFormedRequestAndStore(); - - forall k | k in rtl.attrs.Keys - ensures subtyAttrType(rt1.attrs[k], rtl.attrs[k],ValidationMode.Permissive) && subtyAttrType(rt2.attrs[k], rtl.attrs[k],ValidationMode.Permissive) { - var al := rtl.attrs[k]; - var a1 := rt1.attrs[k]; - var a2 := rt2.attrs[k]; - LubIsUB(a1.ty, a2.ty, al.ty, ValidationMode.Permissive); - } - } - - lemma LubRecordTypeSeqSubty(rts: seq, i: nat) - requires lubRecordTypeSeq(rts,ValidationMode.Permissive).Ok? - requires 0 <= i < |rts| - ensures subtyRecordType(rts[i], lubRecordTypeSeq(rts,ValidationMode.Permissive).value, ValidationMode.Permissive) - { - var res := lubRecordTypeSeq(rts,ValidationMode.Permissive).value; - if |rts| == 1 { - SubtyRecordTypeRefl(rts[0]); - } else { - var tailRes := lubRecordTypeSeq(rts[1..],ValidationMode.Permissive).value; - LubRecordTypeSubty(rts[0], tailRes); - if i > 0 { - LubRecordTypeSeqSubty(rts[1..], i - 1); - SubtyRecordTypeTrans(rts[i], tailRes, res,ValidationMode.Permissive); - } - } - } - - lemma GetLubRecordTypeSubty(lub: EntityLUB, ety: EntityType) - requires lub.EntityLUB? - requires ety in lub.tys - requires ets.getLubRecordType(lub,ValidationMode.Permissive).Ok? - requires ety in ets.types - ensures subtyRecordType(ets.types[ety], ets.getLubRecordType(lub, ValidationMode.Permissive).value,ValidationMode.Permissive) - { - var lub_ty := ets.getLubRecordType(lub,ValidationMode.Permissive) ; - if lub_ty != Ok(RecordType(map[], OpenAttributes)) { - def.util.EntityTypeLeqIsTotalOrder(); - var lubSeq := def.util.SetToSortedSeq(lub.tys,def.util.EntityTypeLeq); - var etyI :| 0 <= etyI < |lubSeq| && lubSeq[etyI] == ety; - var RecordTypeSeq := seq (|lubSeq|, i requires 0 <= i < |lubSeq| => ets.types[lubSeq[i]]); - LubRecordTypeSeqSubty(RecordTypeSeq, etyI); - } - } - - lemma SoundHasAttr(e: Expr, k: Attr, t: Type, effs: Effects) - decreases HasAttr(e,k) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(HasAttr(e,k),effs,t) - ensures IsSafe(r,s,HasAttr(e,k),t) - ensures GuardedEffectsInvariant(HasAttr(e,k),getEffects(HasAttr(e,k),effs)) - { - var t' :| getType(HasAttr(e,k),effs) == t' && subty(t',t,ValidationMode.Permissive); - assert TC.inferHasAttr(e,k,effs).Ok?; - var ret := TC.inferRecordEntityType(e,effs).value; - assert GuardedEffectsInvariant(HasAttr(e,k),Effects.empty()) by { - EmptyEffectsInvariant(); - } - match ret { - case Record(rt) => { - assert Typesafe(e,effs,Type.Record(rt)) by { SubtyRefl(Type.Record(rt)); } - assert IsSafe(r,s,e,Type.Record(rt)) by { Sound(e,Type.Record(rt),effs); } - if k in rt.attrs { - if rt.attrs[k].isRequired { - assert IsSafe(r,s,e,Type.Record(RecordType(map[k := rt.attrs[k]], OpenAttributes))) by { - SubtyRefl(rt.attrs[k].ty); - assert subtyRecordType(rt,RecordType(map[k := rt.attrs[k]], OpenAttributes),ValidationMode.Permissive); - assert subty(Type.Record(rt),Type.Record(RecordType(map[k := rt.attrs[k]], OpenAttributes)),ValidationMode.Permissive); - SubtyCompat(Type.Record(rt),Type.Record(RecordType(map[k := rt.attrs[k]], OpenAttributes))); - SemSubtyTransport(r,s,e,Type.Record(rt),Type.Record(RecordType(map[k := rt.attrs[k]], OpenAttributes))); - } - assert IsSafe(r,s,HasAttr(e,k),t') by { RecordHasRequiredTrueSafe(r,s,e,k,rt.attrs[k]); } - } else if effs.contains(e,k) { - assert IsSafe(r,s,HasAttr(e,k),t') by { - reveal EffectsInvariant(); - } - } else { - assert IsSafe(r,s,e,Type.Record(RecordType(map[], OpenAttributes))) by { - assert subty(Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes)),ValidationMode.Permissive); - SubtyCompat(Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes))); - SemSubtyTransport(r,s,e,Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes))); - } - assert IsSafe(r,s,HasAttr(e,k),t') by { RecordHasOpenRecSafe(r,s,e,k); } - assert GuardedEffectsInvariant(HasAttr(e,k),Effects.singleton(e,k)) by { - if IsTrueStrong(r,s,HasAttr(e,k)) { - IsTrueStrongImpliesIsTrue(r,s,HasAttr(e,k)); - reveal EffectsInvariant(); - } - } - } - } else if rt.isOpen() { - assert IsSafe(r,s,e,Type.Record(RecordType(map[], OpenAttributes))) by { - assert subty(Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes)),ValidationMode.Permissive); - SubtyCompat(Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes))); - SemSubtyTransport(r,s,e,Type.Record(rt),Type.Record(RecordType(map[], OpenAttributes))); - } - assert IsSafe(r,s,HasAttr(e,k),t') by { RecordHasOpenRecSafe(r,s,e,k); } - } else { - assert IsSafe(r,s,HasAttr(e,k),t') by { RecordHasClosedRecFalseSafe(r,s,e,k, rt); } - } - } - case Entity(et) => { - assert Typesafe(e,effs,Type.Entity(et)) by { SubtyRefl(Type.Entity(et)); } - assert IsSafe(r,s,e,Type.Entity(et)) by { Sound(e,Type.Entity(et),effs); } - if !ets.isAttrPossible(et,k) { - reveal WellFormedRequestAndStore(); - EntityHasImpossibleFalseSafe(r,s,e,k,et); - } else { - var m := ets.getLubRecordType(et,ValidationMode.Permissive).value; - if k in m.attrs { - if effs.contains(e,k) { - assert IsSafe(r,s,HasAttr(e,k),t') by { - reveal EffectsInvariant(); - } - } else { - assert IsSafe(r,s,e,Type.Entity(AnyEntity)) by { - SubtyCompat(Type.Entity(et),Type.Entity(AnyEntity)); - SemSubtyTransport(r,s,e,Type.Entity(et),Type.Entity(AnyEntity)); - } - assert IsSafe(r,s,HasAttr(e,k),t') by { EntityHasOpenSafe(r,s,e,k); } - assert GuardedEffectsInvariant(HasAttr(e,k),Effects.singleton(e,k)) by { - if IsTrueStrong(r,s,HasAttr(e,k)) { - IsTrueStrongImpliesIsTrue(r,s,HasAttr(e,k)); - reveal EffectsInvariant(); - } - } - } - } else { - PossibleAttrNotInLubAttrImpliesOpen(et, k, m); - assert IsSafe(r,s,e,Type.Entity(AnyEntity)) by { - SubtyCompat(Type.Entity(et),Type.Entity(AnyEntity)); - SemSubtyTransport(r,s,e,Type.Entity(et),Type.Entity(AnyEntity)); - } - assert IsSafe(r,s,HasAttr(e,k),t') by { EntityHasOpenSafe(r,s,e,k); } - } - } - } - } - assert IsSafe(r,s,HasAttr(e,k),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,HasAttr(e,k),t',t); - } - } - - lemma PossibleAttrNotInLubAttrImpliesOpen(lub: EntityLUB, k: Attr, lubR: RecordType) - requires ets.getLubRecordType(lub, ValidationMode.Permissive) == Ok(lubR) - requires ets.isAttrPossible(lub, k) - requires k !in lubR.attrs.Keys - ensures lubR.isOpen() - { - if lub.AnyEntity? || exists et <- lub.tys :: isAction(et) { - assert ets.getLubRecordType(AnyEntity,ValidationMode.Permissive) == Ok(RecordType(map[], OpenAttributes)); - } else { - assert forall et <- lub.tys :: et in ets.types; - assert exists et <- lub.tys :: et in ets.types && (ets.types[et].isOpen() || k in ets.types[et].attrs); - var et :| et in lub.tys && et in ets.types && (ets.types[et].isOpen() || k in ets.types[et].attrs); - GetLubRecordTypeSubty(lub, et); - assert lubR.isOpen(); - } - } - - lemma SoundInSetMemberFalse(e1: Expr, ei2s: seq, i: nat, effs: Effects) - decreases BinaryApp(BinaryOp.In,e1,Expr.Set(ei2s)) , 0 , Expr.Set(ei2s) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires WellTyped(BinaryApp(BinaryOp.In,e1,Expr.Set(ei2s)),effs) - requires getType(BinaryApp(BinaryOp.In,e1,Expr.Set(ei2s)),effs) == Type.Bool(False) - requires !reqty.isUnspecifiedVar(e1) - requires 0 <= i < |ei2s| - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,ei2s[i])) - { - // Reestablishing things we had at the call site in `SoundIn`. - var e2 := Expr.Set(ei2s); - - assert TC.inferIn(BinaryApp(BinaryOp.In,e1,e2),e1,e2,effs) == types.Ok(Type.Bool(False)); - - assert TC.ensureEntityType(e1,effs).Ok?; - var t1 := getType(e1,effs); - - var euids2 :- assert TC.tryGetEUIDs(e2); - var ets2 := set u <- euids2 :: u.ty; - - // New proof. - var u2 :- assert TC.tryGetEUID(ei2s[i]); - assert u2 in euids2; - match e1 { - case Var(v1) => - var et1 :- assert TC.getPrincipalOrResource(v1); - assert t1 == Type.Entity(EntityLUB({et1})); - assert IsSafe(r,s,Var(v1),t1) by { Sound(e1,t1,effs); } - assert !ets.possibleDescendantOf(et1,u2.ty); - reveal WellFormedRequestAndStore(); - InSingleFalseEntityTypeAndLiteral(r,s,e1,et1,u2); - case PrimitiveLit(EntityUID(u1)) => - if isAction(u1.ty) { - assert !acts.descendantOfSet(u1,euids2); - assert !acts.descendantOf(u1,u2); - } else { - assert !ets.possibleDescendantOfSet(u1.ty,ets2); - assert !ets.possibleDescendantOf(u1.ty,u2.ty); - } - reveal WellFormedRequestAndStore(); - InSingleFalseLiterals(r,s,u1,u2); - } - } - - lemma SoundIn(e1: Expr, e2: Expr, t: Type, effs: Effects) - decreases BinaryApp(BinaryOp.In,e1,e2) , 0 , e2 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(BinaryApp(BinaryOp.In,e1,e2),effs,t) - ensures IsSafe(r,s,BinaryApp(BinaryOp.In,e1,e2),t) - ensures getEffects(BinaryApp(BinaryOp.In,e1,e2),effs) == Effects.empty() - { - var t' :| getType(BinaryApp(BinaryOp.In,e1,e2),effs) == t' && subty(t',t,ValidationMode.Permissive); - - assert TC.inferIn(BinaryApp(BinaryOp.In,e1,e2),e1,e2,effs) == types.Ok(t'); - - assert TC.ensureEntityType(e1,effs).Ok?; - var t1 := getType(e1,effs); - assert t1.Entity?; - assert subty(t1,Type.Entity(AnyEntity),ValidationMode.Permissive); - assert IsSafe(r,s,e1,Type.Entity(AnyEntity)) by { Sound(e1,Type.Entity(AnyEntity),effs); } - - assert TC.ensureEntitySetType(e2,effs).Ok?; - var t2 := getType(e2,effs); - var e2IsSet := match t2 { - case Entity(_) => false - case Set(Entity(_)) => true - case Set(Never) => true - }; - var e2IsSpecified := match t2 { - case Entity(lub) => lub.specified() - case Set(Entity(lub)) => lub.specified() - case Set(Never) => true - }; - - match t' { - // Easy case - case Bool(AnyBool) => - reveal WellFormedRequestAndStore(); - assert IsSafe(r,s,BinaryApp(BinaryOp.In,e1,e2),Type.Bool(AnyBool)) by { - if e2IsSet { - assert IsSafe(r,s,e2,Type.Set(Type.Entity(AnyEntity))) by { Sound(e2,Type.Set(Type.Entity(AnyEntity)),effs); } - InSetSafe(r,s,e1,e2); - } else { - assert IsSafe(r,s,e2,Type.Entity(AnyEntity)) by { Sound(e2,Type.Entity(AnyEntity),effs); } - InSingleSafe(r,s,e1,e2); - } - } - // Harder case: we have to prove that the result is false. - case Bool(False) => - if reqty.isUnspecifiedVar(e1) && e2IsSpecified { - reveal WellFormedRequestAndStore(); - UnspecifiedVarHasUnspecifiedEntityType(e1); - assert IsSafe(r,s,e2,t2) by { Sound(e2,t2,effs); } - if e2IsSet { - var Set(t2e) := t2; - InSetFalseTypes(r,s,e1,e2,unspecifiedEntityType,t2e); - } else { - InSingleFalseTypes(r,s,e1,e2,unspecifiedEntityType,t2); - } - } else { - match e2 { - case PrimitiveLit(EntityUID(u2)) => match e1 { - case Var(v1) => - var et1 :- assert TC.getPrincipalOrResource(v1); - assert t1 == Type.Entity(EntityLUB({et1})); - assert IsSafe(r,s,Var(v1),t1) by { Sound(e1,t1,effs); } - assert !ets.possibleDescendantOf(et1,u2.ty); - reveal WellFormedRequestAndStore(); - InSingleFalseEntityTypeAndLiteral(r,s,e1,et1,u2); - case PrimitiveLit(EntityUID(u1)) => - if isAction(u1.ty) { - assert !acts.descendantOf(u1,u2); - } else { - assert !ets.possibleDescendantOf(u1.ty,u2.ty); - } - reveal WellFormedRequestAndStore(); - InSingleFalseLiterals(r,s,u1,u2); - } - case Set(ei2s) => - var euids2 :- assert TC.tryGetEUIDs(e2); - var ets2 := set u <- euids2 :: u.ty; - // Argument that is the same any time e2 is a set. - assert e2IsSet; - var eltType :| t2 == Type.Set(eltType); - InferSetLemma(e2, ei2s, effs); - forall i | 0 <= i < |ei2s| - ensures IsSafe(r,s,ei2s[i],Type.Entity(AnyEntity)) - { - assert subty(getType(ei2s[i],effs), eltType,ValidationMode.Permissive); - SubtyTrans(getType(ei2s[i],effs), eltType, Type.Entity(AnyEntity),ValidationMode.Permissive); - assert IsSafe(r,s,ei2s[i],Type.Entity(AnyEntity)) by { Sound(ei2s[i], Type.Entity(AnyEntity), effs); } - } - // Argument depending on e1 - forall i | 0 <= i < |ei2s| - ensures IsFalse(r,s,BinaryApp(BinaryOp.In,e1,ei2s[i])) - { - // Since this is the most expensive part of the proof, we move - // it to a separate lemma to help keep each lemma under the - // verification limits. - SoundInSetMemberFalse(e1, ei2s, i, effs); - } - InSetFalseIfAllFalse(r,s,e1,ei2s); - } - } - } - - assert IsSafe(r,s,BinaryApp(BinaryOp.In,e1,e2),t) by { - SubtyCompat(t',t); - SemSubtyTransport(r,s,BinaryApp(BinaryOp.In,e1,e2),t',t); - } - } - - lemma InferCallArgsSound(e: Expr, name: base.Name, args: seq, tys: seq, effs: Effects) - requires |args| == |tys| - requires forall i | 0 <= i < |args| :: args[i] < e - requires TC.inferCallArgs(e,args,tys,effs).Ok? - ensures forall i | 0 <= i < |args| :: Typesafe(args[i],effs,tys[i]) - {} - - lemma TypesafeCallSemantics(name: base.Name, es: seq, effs: Effects, t: Type) - requires Typesafe(Call(name,es),effs,t) - ensures name in extFunTypes - ensures |extFunTypes[name].args| == |es| - ensures forall i | 0 <= i < |es| :: Typesafe(es[i],effs,extFunTypes[name].args[i]) - ensures extFunTypes[name].ret == t - { - assert TC.inferCall(Call(name,es),name,es,effs).Ok?; - InferCallArgsSound(Call(name,es),name,es,extFunTypes[name].args,effs); - - assert extFunTypes[name].check.Some? ==> extFunTypes[name].check.value(es).Ok?; - assert forall i | 0 <= i < |es| :: Typesafe(es[i],effs,extFunTypes[name].args[i]); - assert extFunTypes[name].ret == t; - } - - lemma SoundCall(name: base.Name, es: seq, t: Type, effs: Effects) - decreases Call(name,es) , 0 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(Call(name,es),effs,t) - ensures IsSafe(r,s,Call(name,es),t) - ensures getEffects(Call(name,es),effs) == Effects.empty() - { - assert TC.inferCall(Call(name,es),name,es,effs).Ok?; - TypesafeCallSemantics(name, es, effs, t); - var eft := extFunTypes[name]; - forall i | 0 <= i < |es| ensures IsSafe(r,s,es[i],eft.args[i]) { - Sound(es[i],eft.args[i],effs); - } - CallSafe(r,s,name,es); - } - - lemma Sound(e: Expr, t: Type, effs: Effects) - decreases e , 1 - requires WellFormedRequestAndStore() - requires EffectsInvariant(effs) - requires Typesafe(e,effs,t) - ensures IsSafe(r,s,e,t) - ensures GuardedEffectsInvariant(e,getEffects(e,effs)) - { - assert GuardedEffectsInvariant(e,Effects.empty()) by { - EmptyEffectsInvariant(); - } - match e { - case PrimitiveLit(p) => SoundLit(p,t,effs); - case Var(x) => SoundVar(x,t,effs); - case If(e',e1,e2) => SoundIf(e',e1,e2,t,effs); - case And(e1,e2) => SoundAnd(e1,e2,t,effs); - case Or(e1,e2) => SoundOr(e1,e2,t,effs); - case UnaryApp(Not,e') => SoundNot(e',t,effs); - case UnaryApp(Neg,e') => SoundNeg(e',t,effs); - case UnaryApp(MulBy(i),e') => SoundMulBy(i,e',t,effs); - case UnaryApp(Like(p),e') => SoundLike(e',p,t,effs); - case UnaryApp(Is(ety),e') => SoundIs(e',ety,t,effs); - case BinaryApp(Eq,e1,e2) => SoundEq(e1,e2,t,effs); - case BinaryApp(Less,e1,e2) => SoundIneq(Less,e1,e2,t,effs); - case BinaryApp(LessEq,e1,e2) => SoundIneq(BinaryOp.LessEq,e1,e2,t,effs); - case BinaryApp(Add,e1,e2) => SoundArith(Add,e1,e2,t,effs); - case BinaryApp(Sub,e1,e2) => SoundArith(Sub,e1,e2,t,effs); - case BinaryApp(In,e1,e2) => SoundIn(e1,e2,t,effs); - case BinaryApp(ContainsAny,e1,e2) => SoundContainsAnyAll(ContainsAny,e1,e2,t,effs); - case BinaryApp(ContainsAll,e1,e2) => SoundContainsAnyAll(ContainsAll,e1,e2,t,effs); - case BinaryApp(Contains,e1,e2) => SoundContains(e1,e2,t,effs); - case Record(es) => SoundRecord(es,t,effs); - case Set(es) => SoundSet(es,t,effs); - case GetAttr(e',l) => SoundGetAttr(e',l,t,effs); - case HasAttr(e',l) => SoundHasAttr(e',l,t,effs); - case Call(name,es) => SoundCall(name,es,t,effs); - } - } - - lemma SoundToplevel(e: Expr, t: Type) - requires InstanceOfRequestType(r,reqty) - requires InstanceOfEntityTypeStore(s,ets) - requires InstanceOfActionStore(s,acts) - requires TC.typecheck(e,t).Ok? - ensures IsSafe(r,s,e,t) - { - EmptyEffectsInvariant(); - reveal WellFormedRequestAndStore(); - Sound(e,t,Effects.empty()); - } - } -} diff --git a/cedar-dafny/validation/thm/strict_inf_strict.dfy b/cedar-dafny/validation/thm/strict_inf_strict.dfy deleted file mode 100644 index 3fa654dd9..000000000 --- a/cedar-dafny/validation/thm/strict_inf_strict.dfy +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" -include "base.dfy" -include "model.dfy" -include "soundness.dfy" - -// A quick proof checking that strict validation is reasonable. Given a -// strictly-type environment (see predicate `strictEnvironment`), the strict -// typechecker should always infer a strict type (see `Type::isStrict()`) for -// any expression assuming the expression is well typed. -module validation.thm.strict_inf_strict { - import opened typechecker - import opened types - import opened subtyping - import opened base - import opened model - import opened soundness - import opened def.core - import opened def.engine - import opened ext - - datatype StrictInfProof = StrictInfProof( - reqty: RequestType, - ets: EntityTypeStore, - acts: ActionStore - ) { - - predicate strictEnvironment() { - reqty.principal.Some? && - reqty.resource.Some? && - reqty.context.isStrictType() && - forall k | k in ets.types.Keys :: ets.types[k].isStrictType() - } - - const S_TC := Typechecker(ets,acts,reqty, ValidationMode.Strict) - - lemma StrictRecord(e: Expr, r: seq<(Attr,Expr)>, effs: Effects) - decreases r - requires forall i | 0 <= i < |r| :: r[i] < e - requires strictEnvironment() - requires S_TC.inferRecord(e, r, effs).Ok? - ensures S_TC.inferRecord(e, r, effs).value.isStrictType() - { - reveal S_TC.inferRecord(); - if r != [] { - assert S_TC.infer(r[0].1,effs).value.0.isStrictType() by {StrictTypeInf(r[0].1, effs); } - assert S_TC.inferRecord(e, r[1..], effs).value.isStrictType() by { StrictRecord(e, r[1..], effs); } - } - } - - lemma StrictSetElems(e: Expr, es: seq, effs: Effects) - decreases es - requires forall i | 0 <= i < |es| :: es[i] < e - requires strictEnvironment() - requires S_TC.inferSet(e, es, effs).Ok? - ensures es == [] ==> S_TC.inferSet(e, es, effs).value == Never - ensures es != [] ==> S_TC.inferSet(e, es, effs).value.isStrictType() - { - if es != [] { - var (t,_) := S_TC.infer(es[0],effs).value; - assert t.isStrictType() by { StrictTypeInf(es[0], effs); } - var t1 := S_TC.inferSet(e,es[1..],effs).value; - assert t1.isStrictType() || t1 == Never by { StrictSetElems(e, es[1..], effs); } - var t2 := lubOpt(t,t1,ValidationMode.Strict).value; - assert t2.isStrictType() by { StrictTypeLub(t, t1); } - } - } - - lemma StrictIf(g: Expr, t: Expr, e: Expr, effs: Effects) - decreases If(g, t, e), 0 - requires strictEnvironment() - requires S_TC.infer(If(g, t, e), effs).Ok? - ensures S_TC.infer(If(g, t, e), effs).value.0.isStrictType() - { - var (bt,effs1) := S_TC.inferBoolType(g,effs).value; - match bt { - case True => - var (t1,effs2) := S_TC.infer(t,effs.union(effs1)).value; - StrictTypeInf(t, effs.union(effs1)); - case False => - var (t2,effs2) := S_TC.infer(e,effs).value; - StrictTypeInf(e, effs); - case Bool => - var (t1,effs2) := S_TC.infer(t,effs.union(effs1)).value; - var (t2,effs3) := S_TC.infer(e,effs).value; - StrictTypeInf(t, effs.union(effs1)); - StrictTypeInf(e, effs); - StrictTypeLub(t1, t2); - } - } - - lemma StrictGetAttr(e: Expr, l: Attr, effs:Effects) - decreases GetAttr(e, l), 0 - requires strictEnvironment() - requires S_TC.infer(GetAttr(e, l), effs).Ok? - ensures S_TC.infer(GetAttr(e, l), effs).value.0.isStrictType() - { - assert S_TC.inferRecordEntityType(e,effs).Ok?; - StrictTypeInf(e, effs); - } - - lemma StrictHasAttr(e: Expr, l: Attr, effs: Effects) - decreases HasAttr(e, l), 0 - requires strictEnvironment() - requires S_TC.infer(HasAttr(e, l), effs).Ok? - ensures S_TC.infer(HasAttr(e, l), effs).value.0.isStrictType() - { } - - lemma StrictIn(e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(BinaryOp.In, e1, e2), 0 - requires strictEnvironment() - requires S_TC.infer(BinaryApp(BinaryOp.In, e1, e2), effs).Ok? - ensures S_TC.infer(BinaryApp(BinaryOp.In, e1, e2), effs).value.0.isStrictType() - { } - - lemma StrictCall(name: base.Name, args: seq, effs: Effects) - decreases args - requires strictEnvironment() - requires S_TC.infer(Call(name, args), effs).Ok? - ensures S_TC.infer(Call(name, args), effs).value.0.isStrictType() - { } - - lemma StrictTypeInf(e: Expr, effs: Effects) - decreases e, 1 - requires strictEnvironment() - requires S_TC.infer(e, effs).Ok? - ensures S_TC.infer(e, effs).value.0.isStrictType() - { - match e { - case PrimitiveLit(p) => - case Var(x) => - case If(e',e1,e2) => StrictIf(e', e1, e2, effs); - case And(e1,e2) => - case Or(e1,e2) => - case UnaryApp(Not,e') => - case UnaryApp(Neg,e') => - case UnaryApp(MulBy(i),e') => - case UnaryApp(Like(p),e') => - case UnaryApp(Is(ety),e') => - case BinaryApp(Eq,e1,e2) => - case BinaryApp(Less,e1,e2) => - case BinaryApp(LessEq,e1,e2) => - case BinaryApp(Add,e1,e2) => - case BinaryApp(Sub,e1,e2) => - case BinaryApp(In,e1,e2) => StrictIn(e1, e2, effs); - case BinaryApp(ContainsAny,e1,e2) => - case BinaryApp(ContainsAll,e1,e2) => - case BinaryApp(Contains,e1,e2) => - case Record(es) => StrictRecord(e, es, effs); - case Set(es) => StrictSetElems(e, es, effs); - case GetAttr(e',l) => StrictGetAttr(e', l, effs); - case HasAttr(e',l) => StrictHasAttr(e', l, effs); - case Call(name,es) => StrictCall(name, es, effs); - } - } - } -} diff --git a/cedar-dafny/validation/thm/strict_soundness.dfy b/cedar-dafny/validation/thm/strict_soundness.dfy deleted file mode 100644 index 43547e9b9..000000000 --- a/cedar-dafny/validation/thm/strict_soundness.dfy +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" -include "base.dfy" -include "model.dfy" -include "soundness.dfy" - -// This module contains the proof of soundness for strict typechecking. -module validation.thm.strict_soundness { - import opened typechecker - import opened types - import opened subtyping - import opened base - import opened model - import opened soundness - import opened def.core - import opened def.engine - import opened ext - - datatype StrictProof = StrictProof( - reqty: RequestType, - ets: EntityTypeStore, - acts: ActionStore - ) { - - const P_TC := Typechecker(ets,acts,reqty, ValidationMode.Permissive) - const S_TC := Typechecker(ets,acts,reqty, ValidationMode.Strict) - - lemma StrictCallArgs(e: Expr, args: seq, tys: seq, effs: Effects) - decreases args, tys - requires |args| == |tys| - requires forall i | 0 <= i < |args| :: args[i] < e - requires S_TC.inferCallArgs(e, args, tys, effs).Ok? - ensures P_TC.inferCallArgs(e, args, tys, effs).Ok? - { - if args != [] { - var (t,effs') := S_TC.infer(args[0], effs).value; - assert P_TC.infer(args[0], effs) == Ok((t, effs')) by { StrictTypecheckingIsStrict(args[0], effs); } - - assert subty(t, tys[0], ValidationMode.Strict); - assert subty(t, tys[0], ValidationMode.Permissive) by { StrictSubtyIsStrict(t, tys[0]); } - - assert S_TC.inferCallArgs(e, args[1..], tys[1..], effs).Ok?; - assert P_TC.inferCallArgs(e, args[1..], tys[1..], effs).Ok? by { StrictCallArgs(e, args[1..], tys[1..], effs); } - } - } - - lemma StrictCall(name: base.Name, args: seq, effs: Effects) - decreases args - requires S_TC.infer(Call(name, args), effs).Ok? - ensures P_TC.infer(Call(name, args), effs) == S_TC.infer(Call(name, args), effs) - { - var ty := extFunTypes[name]; - assert S_TC.inferCallArgs(Call(name, args), args, ty.args, effs).Ok?; - assert P_TC.inferCallArgs(Call(name, args), args, ty.args, effs).Ok? by { StrictCallArgs(Call(name, args), args, ty.args, effs); } - } - - lemma StrictSetElems(e: Expr, es: seq, effs: Effects) - decreases es - requires forall i | 0 <= i < |es| :: es[i] < e - requires S_TC.inferSet(e, es, effs).Ok? - ensures P_TC.inferSet(e, es, effs) == S_TC.inferSet(e, es, effs) - { - if es != [] { - var (t, effs') := S_TC.infer(es[0], effs).value; - assert P_TC.infer(es[0], effs) == Ok((t, effs')) by { StrictTypecheckingIsStrict(es[0], effs); } - - var t1 := S_TC.inferSet(e, es[1..], effs).value; - assert P_TC.inferSet(e, es[1..], effs) == Ok(t1) by { StrictSetElems(e, es[1..], effs); } - - var lub := lub(t, t1, ValidationMode.Strict); - assert lubOpt(t, t1, ValidationMode.Permissive) == Ok(lub) by { StrictLubIsStrict(t, t1); } - } - } - - lemma StrictNot(e: Expr, effs: Effects) - decreases UnaryApp(Not, e), 0 - requires S_TC.infer(UnaryApp(Not, e), effs).Ok? - ensures P_TC.infer(UnaryApp(Not, e), effs) == S_TC.infer(UnaryApp(Not, e), effs) - { - assert P_TC.inferBoolType(e, effs) == S_TC.inferBoolType(e, effs) by { - StrictTypecheckingIsStrict(e, effs); - } - } - - lemma StrictArith1(o: UnaryOp, e: Expr, effs: Effects) - decreases UnaryApp(o, e), 0 - requires o.Neg? || o.MulBy? - requires S_TC.infer(UnaryApp(o, e), effs).Ok? - ensures P_TC.infer(UnaryApp(o, e), effs) == S_TC.infer(UnaryApp(o, e), effs) - { - assert P_TC.ensureIntType(e, effs).Ok? by { - assert S_TC.ensureIntType(e, effs).Ok?; - StrictTypecheckingIsStrict(e, effs); - } - } - - lemma StrictLike(p: Pattern, e: Expr, effs: Effects) - decreases UnaryApp(Like(p), e), 0 - requires S_TC.infer(UnaryApp(Like(p), e), effs).Ok? - ensures P_TC.infer(UnaryApp(Like(p), e), effs) == S_TC.infer(UnaryApp(Like(p), e), effs) - { - assert P_TC.ensureStringType(e, effs).Ok? by { - assert S_TC.ensureStringType(e, effs).Ok?; - StrictTypecheckingIsStrict(e, effs); - } - } - - lemma StrictIs(ety: EntityType, e: Expr, effs: Effects) - decreases UnaryApp(UnaryOp.Is(ety), e), 0 - requires S_TC.infer(UnaryApp(UnaryOp.Is(ety), e), effs).Ok? - ensures P_TC.infer(UnaryApp(UnaryOp.Is(ety), e), effs) == S_TC.infer(UnaryApp(UnaryOp.Is(ety), e), effs) - { - assert S_TC.ensureEntityType(e, effs).Ok?; - assert P_TC.ensureEntityType(e, effs) == S_TC.ensureEntityType(e, effs) by { - StrictTypecheckingIsStrict(e, effs); - } - } - - lemma StrictArith2Ineq(o: BinaryOp, e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(o, e1, e2), 0 - requires o.Add? || o.Sub? || o.Less? || o.LessEq? - requires S_TC.infer(BinaryApp(o, e1, e2), effs).Ok? - ensures P_TC.infer(BinaryApp(o, e1, e2), effs) == S_TC.infer(BinaryApp(o, e1, e2), effs) - { - assert P_TC.ensureIntType(e1, effs).Ok? by { - assert S_TC.ensureIntType(e1, effs).Ok?; - StrictTypecheckingIsStrict(e1, effs); - } - assert P_TC.ensureIntType(e2, effs).Ok? by { - assert S_TC.ensureIntType(e2, effs).Ok?; - StrictTypecheckingIsStrict(e2, effs); - } - } - - lemma StrictLubRecordTypeSeq(rts: seq) - requires lubRecordTypeSeq(rts, ValidationMode.Strict).Ok? - ensures lubRecordTypeSeq(rts, ValidationMode.Permissive) == lubRecordTypeSeq(rts, ValidationMode.Strict) - { - assert rts != []; - if |rts| != 1 { - var tailLub := lubRecordTypeSeq(rts[1..], ValidationMode.Strict).value; - assert lubRecordTypeSeq(rts[1..], ValidationMode.Permissive) == Ok(tailLub) by { StrictLubRecordTypeSeq(rts[1..]); } - - assert lubRecordType(rts[0], tailLub, ValidationMode.Strict) == lubRecordType(rts[0], tailLub, ValidationMode.Permissive) by { - StrictLubIsStrict(Type.Record(rts[0]), Type.Record(tailLub)); - } - } - } - - lemma StrictGetLubRecordType(lub: EntityLUB) - requires ets.getLubRecordType(lub, ValidationMode.Strict).Ok? - ensures ets.getLubRecordType(lub, ValidationMode.Permissive) == ets.getLubRecordType(lub, ValidationMode.Strict) - { - if lub.AnyEntity? || exists et <- lub.tys :: isAction(et) { - assert ets.getLubRecordType(lub, ValidationMode.Permissive).Ok?; - } else { - assert forall et <- lub.tys :: et in ets.types; - def.util.EntityTypeLeqIsTotalOrder(); - var lubSeq := def.util.SetToSortedSeq(lub.tys,def.util.EntityTypeLeq); - var s := seq (|lubSeq|, i requires 0 <= i < |lubSeq| => ets.types[lubSeq[i]]); - assert s != []; - assert lubRecordTypeSeq(s, ValidationMode.Strict) == lubRecordTypeSeq(s, ValidationMode.Permissive) by { - assert ets.getLubRecordType(lub, ValidationMode.Strict).Ok?; - StrictLubRecordTypeSeq(s); - } - } - } - - lemma StrictHasAttr(e: Expr, l: Attr, effs: Effects) - decreases HasAttr(e, l), 0 - requires S_TC.infer(HasAttr(e, l), effs).Ok? - ensures P_TC.infer(HasAttr(e, l), effs) == S_TC.infer(HasAttr(e, l), effs) - { - var ret := S_TC.inferRecordEntityType(e, effs).value; - assert P_TC.inferRecordEntityType(e, effs) == Ok(ret) by { StrictTypecheckingIsStrict(e, effs); } - - match ret { - case Record(rt) => - case Entity(lub) => - if ets.isAttrPossible(lub,l) { - var rt := ets.getLubRecordType(lub, ValidationMode.Strict).value; - assert ets.getLubRecordType(lub, ValidationMode.Permissive) == Ok(rt) by { StrictGetLubRecordType(lub); } - } - } - } - - lemma StrictGetAttr(e: Expr, l: Attr, effs:Effects) - decreases GetAttr(e, l), 0 - requires S_TC.infer(GetAttr(e, l), effs).Ok? - ensures P_TC.infer(GetAttr(e, l), effs) == S_TC.infer(GetAttr(e, l), effs) - { - var ret := S_TC.inferRecordEntityType(e, effs).value; - assert P_TC.inferRecordEntityType(e, effs) == Ok(ret) by { StrictTypecheckingIsStrict(e, effs); } - - match ret { - case Record(rt) => - case Entity(lub) => { - var rt := ets.getLubRecordType(lub, ValidationMode.Strict).value; - assert ets.getLubRecordType(lub, ValidationMode.Permissive) == Ok(rt) by { StrictGetLubRecordType(lub); } - } - } - } - - lemma StrictIf(g: Expr, t: Expr, e: Expr, effs: Effects) - decreases If(g, t, e), 0 - requires S_TC.infer(If(g, t, e), effs).Ok? - ensures P_TC.infer(If(g, t, e), effs) == S_TC.infer(If(g, t, e), effs) - { - var (gt, ge) := S_TC.inferBoolType(g, effs).value; - assert P_TC.inferBoolType(g, effs) == Ok((gt, ge)) by { StrictTypecheckingIsStrict(g, effs); } - - match gt { - case True => { - var (tt, te) := S_TC.infer(t, effs.union(ge)).value; - assert P_TC.infer(t, effs.union(ge)) == Ok((tt, te)) by { StrictTypecheckingIsStrict(t, effs.union(ge)); } - } - case False => { - var (et, ee) := S_TC.infer(e, effs).value; - assert P_TC.infer(e, effs) == Ok((et, ee)) by { StrictTypecheckingIsStrict(e, effs); } - } - case Bool => { - var (tt, te) := S_TC.infer(t, effs.union(ge)).value; - assert P_TC.infer(t, effs.union(ge)) == Ok((tt, te)) by { StrictTypecheckingIsStrict(t, effs.union(ge)); } - - var (et, ee) := S_TC.infer(e, effs).value; - assert P_TC.infer(e, effs) == Ok((et, ee)) by { StrictTypecheckingIsStrict(e, effs); } - - var lub := lub(tt, et, ValidationMode.Strict); - assert lubOpt(tt, et, ValidationMode.Permissive) == Ok(lub) by { StrictLubIsStrict(tt, et); } - } - } - } - - lemma StrictEq(e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(BinaryOp.Eq, e1, e2), 0 - requires S_TC.infer(BinaryApp(BinaryOp.Eq, e1, e2), effs).Ok? - ensures P_TC.infer(BinaryApp(BinaryOp.Eq, e1, e2), effs) == S_TC.infer(BinaryApp(BinaryOp.Eq, e1, e2), effs) - { - var (t1, effs1) := S_TC.infer(e1, effs).value; - assert P_TC.infer(e1, effs) == Ok((t1, effs1)) by { StrictTypecheckingIsStrict(e1, effs); } - - var (t2, effs2) := S_TC.infer(e2, effs).value; - assert P_TC.infer(e2, effs) == Ok((t2, effs2)) by { StrictTypecheckingIsStrict(e2, effs); } - } - - lemma StrictContainsAnyAll(o: BinaryOp, e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(o, e1, e2), 0 - requires o.ContainsAny? || o.ContainsAll? - requires S_TC.infer(BinaryApp(o, e1, e2), effs).Ok? - ensures P_TC.infer(BinaryApp(o, e1, e2), effs) == S_TC.infer(BinaryApp(o, e1, e2), effs) - { - var t1 := S_TC.inferSetType(e1, effs).value; - assert P_TC.inferSetType(e1, effs) == Ok(t1) by { StrictTypecheckingIsStrict(e1, effs); } - - var t2 := S_TC.inferSetType(e2, effs).value; - assert P_TC.inferSetType(e2, effs) == Ok(t2) by { StrictTypecheckingIsStrict(e2, effs); } - } - - lemma StrictContains(e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(BinaryOp.Contains, e1, e2), 0 - requires S_TC.infer(BinaryApp(BinaryOp.Contains, e1, e2), effs).Ok? - ensures P_TC.infer(BinaryApp(BinaryOp.Contains, e1, e2), effs) == S_TC.infer(BinaryApp(BinaryOp.Contains, e1, e2), effs) - { - var t1 := S_TC.inferSetType(e1, effs).value; - assert P_TC.inferSetType(e1, effs) == Ok(t1) by { StrictTypecheckingIsStrict(e1, effs); } - - var (t2, effs2) := S_TC.infer(e2, effs).value; - assert P_TC.infer(e2, effs) == Ok((t2, effs2)) by { StrictTypecheckingIsStrict(e2, effs); } - } - - lemma StrictIn(e1: Expr, e2: Expr, effs: Effects) - decreases BinaryApp(BinaryOp.In, e1, e2), 0 - requires S_TC.infer(BinaryApp(BinaryOp.In, e1, e2), effs).Ok? - ensures P_TC.infer(BinaryApp(BinaryOp.In, e1, e2), effs) == S_TC.infer(BinaryApp(BinaryOp.In, e1, e2), effs) - { - assert S_TC.ensureEntityType(e1,effs).Ok?; - assert P_TC.ensureEntityType(e1,effs).Ok? by { StrictTypecheckingIsStrict(e1, effs); } - - assert S_TC.ensureEntitySetType(e2,effs).Ok?; - assert P_TC.ensureEntitySetType(e2,effs).Ok? by { StrictTypecheckingIsStrict(e2, effs); } - - var (t2, effs') := S_TC.infer(e2,effs).value; - assert P_TC.infer(e2, effs) == Ok((t2, effs')) by { StrictTypecheckingIsStrict(e2, effs); } - } - - lemma StrictAnd(e1: Expr, e2: Expr, effs: Effects) - decreases And(e1, e2), 0 - requires S_TC.infer(And(e1, e2), effs).Ok? - ensures P_TC.infer(And(e1, e2), effs) == S_TC.infer(And(e1, e2), effs) - { - var (bt1,effs1) := S_TC.inferBoolType(e1,effs).value; - assert P_TC.inferBoolType(e1,effs) == Ok((bt1, effs1)) by { StrictTypecheckingIsStrict(e1, effs); } - - match bt1 { - case True => { - var (bt2,effs2) := S_TC.inferBoolType(e2,effs.union(effs1)).value; - assert P_TC.inferBoolType(e2,effs.union(effs1)) == Ok((bt2, effs2)) by { StrictTypecheckingIsStrict(e2, effs.union(effs1)); } - } - case False => - case Bool => { - var (bt2,effs2) := S_TC.inferBoolType(e2,effs.union(effs1)).value; - assert P_TC.inferBoolType(e2,effs.union(effs1)) == Ok((bt2, effs2)) by { StrictTypecheckingIsStrict(e2, effs.union(effs1)); } - } - } - } - - lemma StrictOr(e1: Expr, e2: Expr, effs: Effects) - decreases Or(e1, e2), 0 - requires S_TC.infer(Or(e1, e2), effs).Ok? - ensures P_TC.infer(Or(e1, e2), effs) == S_TC.infer(Or(e1, e2), effs) - { - var (bt1,effs1) := S_TC.inferBoolType(e1,effs).value; - assert P_TC.inferBoolType(e1,effs) == Ok((bt1, effs1)) by { StrictTypecheckingIsStrict(e1, effs); } - - match bt1 { - case True => - case False => { - var (bt2,effs2) := S_TC.inferBoolType(e2,effs).value; - assert P_TC.inferBoolType(e2,effs) == Ok((bt2, effs2)) by { StrictTypecheckingIsStrict(e2, effs); } - } - case Bool => { - var (bt2,effs2) := S_TC.inferBoolType(e2,effs).value; - assert P_TC.inferBoolType(e2,effs) == Ok((bt2, effs2)) by { StrictTypecheckingIsStrict(e2, effs); } - } - } - } - - lemma StrictRecord(e: Expr, r: seq<(Attr,Expr)>, effs: Effects) - decreases r - requires forall i | 0 <= i < |r| :: r[i] < e - requires S_TC.inferRecord(e, r, effs).Ok? - ensures P_TC.inferRecord(e, r, effs) == S_TC.inferRecord(e, r, effs) - { - reveal S_TC.inferRecord(); - if r != [] { - var (t,effs') := S_TC.infer(r[0].1,effs).value; - assert P_TC.infer(r[0].1, effs) == Ok((t, effs')) by {StrictTypecheckingIsStrict(r[0].1, effs); } - - var m := S_TC.inferRecord(e, r[1..], effs).value; - assert P_TC.inferRecord(e, r[1..], effs) == Ok(m) by { StrictRecord(e, r[1..], effs); } - } - } - - lemma StrictTypecheckingIsStrict(e: Expr, effs: Effects) - decreases e, 1 - requires S_TC.infer(e, effs).Ok? - ensures P_TC.infer(e, effs) == S_TC.infer(e, effs) - { - match e { - case PrimitiveLit(p) => - case Var(x) => - case If(e',e1,e2) => StrictIf(e', e1, e2, effs); - case And(e1,e2) => StrictAnd(e1, e2, effs); - case Or(e1,e2) => StrictOr(e1, e2, effs); - case UnaryApp(Not,e') => StrictNot(e', effs); - case UnaryApp(Neg,e') => StrictArith1(Neg, e', effs); - case UnaryApp(MulBy(i),e') => StrictArith1(MulBy(i), e', effs); - case UnaryApp(Like(p),e') => StrictLike(p, e', effs); - case UnaryApp(Is(ety),e') => StrictIs(ety, e', effs); - case BinaryApp(Eq,e1,e2) => StrictEq(e1, e2, effs); - case BinaryApp(Less,e1,e2) => StrictArith2Ineq(Less, e1, e2, effs); - case BinaryApp(LessEq,e1,e2) => StrictArith2Ineq(LessEq, e1, e2, effs); - case BinaryApp(Add,e1,e2) => StrictArith2Ineq(Add, e1, e2, effs); - case BinaryApp(Sub,e1,e2) => StrictArith2Ineq(Sub, e1, e2, effs); - case BinaryApp(In,e1,e2) => StrictIn(e1, e2, effs); - case BinaryApp(ContainsAny,e1,e2) => StrictContainsAnyAll(ContainsAny, e1, e2, effs); - case BinaryApp(ContainsAll,e1,e2) => StrictContainsAnyAll(ContainsAll, e1, e2, effs); - case BinaryApp(Contains,e1,e2) => StrictContains(e1, e2, effs); - case Record(es) => StrictRecord(e, es, effs); - case Set(es) => StrictSetElems(e, es, effs); - case GetAttr(e',l) => StrictGetAttr(e', l, effs); - case HasAttr(e',l) => StrictHasAttr(e', l, effs); - case Call(name,es) => StrictCall(name, es, effs); - } - } - } -} diff --git a/cedar-dafny/validation/thm/toplevel.dfy b/cedar-dafny/validation/thm/toplevel.dfy deleted file mode 100644 index 11930c90a..000000000 --- a/cedar-dafny/validation/thm/toplevel.dfy +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../../def/all.dfy" -include "../all.dfy" -include "base.dfy" -include "model.dfy" -include "soundness.dfy" -include "strict_soundness.dfy" - -// This module contains the high-level statement of type soundness. -module validation.thm.toplevel { - import opened typechecker - import opened types - import opened subtyping - import opened base - import opened model - import opened soundness - import opened strict_soundness - import opened def.core - import opened def.engine - import opened ext - - datatype Schema = Schema( - reqty: RequestType, - ets: EntityTypeStore, - acts: ActionStore - ) - - ghost predicate SatisfiesSchema(request: Request, entities: EntityStore, schema: Schema) { - InstanceOfRequestType(request, schema.reqty) && - InstanceOfEntityTypeStore(entities, schema.ets) && - InstanceOfActionStore(entities, schema.acts) - } - - function permissiveTypecheck(pid: PolicyID, policies: PolicyStore, schema: Schema): types.Result - requires pid in policies.policies.Keys - { - var typechecker := Typechecker(schema.ets, schema.acts, schema.reqty, ValidationMode.Permissive); - typechecker.typecheck(policies.policies[pid].toExpr(), Type.Bool(AnyBool)) - } - - // If an expression is well-typed according to the permissive typechecker, - // then either evaluation returns a value of that type or it returns an error - // of type EntityDoesNotExist or ExtensionError. All other errors (i.e., - // AttrDoesNotExist, TypeError, ArityMismatchError, NoSuchFunctionError) are - // impossible. - lemma PermissiveTypecheckingIsSound( - pid: PolicyID, - request: Request, - store: Store, - schema: Schema, - res: base.Result) - requires pid in store.policies.policies.Keys - requires SatisfiesSchema(request, store.entities, schema) - requires permissiveTypecheck(pid, store.policies, schema).Ok? - requires res == Evaluator(request, store.entities).interpret(store.policies.policies[pid].toExpr()) - ensures res.Ok? ==> InstanceOfType(res.value, Type.Bool(AnyBool)) - ensures res.Err? ==> res.error.EntityDoesNotExist? || res.error.ExtensionError? - { - reveal IsSafe(); - var policies := store.policies; - var entities := store.entities; - var expr := policies.policies[pid].toExpr(); - assert IsSafe(request, entities, expr, Type.Bool(AnyBool)) by { - SSP(schema.reqty, schema.ets, schema.acts, request, entities).SoundToplevel(expr, Type.Bool(AnyBool)); - } - } - - function strictTypecheck(pid: PolicyID, policies: PolicyStore, schema: Schema): types.Result - requires pid in policies.policies.Keys - { - var typechecker := Typechecker(schema.ets, schema.acts, schema.reqty, ValidationMode.Strict); - typechecker.typecheck(policies.policies[pid].toExpr(), Type.Bool(AnyBool)) - } - - // If an expression is well-typed according to the strict typechecker, - // then either evaluation returns a value of that type or it returns an error - // of type EntityDoesNotExist or ExtensionError. All other errors (i.e., - // AttrDoesNotExist, TypeError, ArityMismatchError, NoSuchFunctionError) are - // impossible. - lemma StrictTypecheckingIsSound( - pid: PolicyID, - request: Request, - store: Store, - schema: Schema, - res: base.Result) - requires pid in store.policies.policies.Keys - requires SatisfiesSchema(request, store.entities, schema) - requires strictTypecheck(pid, store.policies, schema).Ok? - requires res == Evaluator(request, store.entities).interpret(store.policies.policies[pid].toExpr()) - ensures res.Ok? ==> InstanceOfType(res.value, Type.Bool(AnyBool)) - ensures res.Err? ==> res.error.EntityDoesNotExist? || res.error.ExtensionError? - { - assert permissiveTypecheck(pid, store.policies, schema).Ok? by { - var expr := store.policies.policies[pid].toExpr(); - StrictProof(schema.reqty, schema.ets, schema.acts).StrictTypecheckingIsStrict(expr, Effects.empty()); - } - PermissiveTypecheckingIsSound(pid, request, store, schema, res); - } -} diff --git a/cedar-dafny/validation/typechecker.dfy b/cedar-dafny/validation/typechecker.dfy deleted file mode 100644 index d10ccc75b..000000000 --- a/cedar-dafny/validation/typechecker.dfy +++ /dev/null @@ -1,718 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" -include "types.dfy" -include "ext.dfy" -include "subtyping.dfy" - -// This module contains the specification of Cedar's permissive typechecker, -// which is the core of the Cedar validator (see validator.dfy). -module validation.typechecker { - import def - import opened def.core - import opened types - import opened ext - import opened subtyping - - // --------- Entity Type Store --------- // - - // The Entity Type Store records the attributes associated with each - // entity type, and the hierarchy between entity types. Note that we do not - // enforce that the possibleDescendantOf relationship is transitive. - datatype EntityTypeStore = EntityTypeStore( - types: map, - descendants: map> - ) { - // Check if an entity of type et1 can be a descendent of an entity of type et2 - predicate possibleDescendantOf(et1: EntityType, et2: EntityType) - { - if et1 == et2 then true - else if et2 in descendants - then et1 in descendants[et2] - else false - } - - predicate possibleDescendantOfSet(et: EntityType, ets: set) - { - exists et1 <- ets :: possibleDescendantOf(et,et1) - } - - // Get the RecordType common to all entity types in the LUB - function getLubRecordType(lub: EntityLUB, m: ValidationMode): Result - { - if lub.AnyEntity? || exists et <- lub.tys :: isAction(et) - then Ok(RecordType(map[], OpenAttributes)) - else - if forall et <- lub.tys :: et in types - then - def.util.EntityTypeLeqIsTotalOrder(); - var lubSeq := def.util.SetToSortedSeq(lub.tys,def.util.EntityTypeLeq); - lubRecordTypeSeq(seq (|lubSeq|, i requires 0 <= i < |lubSeq| => types[lubSeq[i]]), m) - else Err(UnknownEntities(set et <- lub.tys | et !in types :: et)) - } - - // Check if an Attr is allowed by any entity type in the LUB - predicate isAttrPossible(lub: EntityLUB, k: Attr) - { - lub.AnyEntity? || exists e <- lub.tys :: e in types && (types[e].isOpen() || k in types[e].attrs) - } - } - - // --------- Action Store --------- // - - // The Action Store records the hierarchy between actions. This is different - // from the Entity Type Store because it stores the literal EUIDs of actions in - // the hierarchy, rather than their types. Like the Entity Type Store, we do - // not enforce that the descendantOf relationship is transitive. - datatype ActionStore = ActionStore( - descendants: map> - ) { - // Check if an euid1 is a descendent of euid2. - predicate descendantOf(euid1: EntityUID, euid2: EntityUID) - { - if euid1 == euid2 then true - else if euid2 in descendants - then euid1 in descendants[euid2] - else false - } - - predicate descendantOfSet(euid: EntityUID, euids: set) - { - exists euid1 <- euids :: descendantOf(euid, euid1) - } - } - - // --------- Request Type --------- // - - // Types for the four variables bound in the request, - // generated from the schema using a cross-product. - // If a field is None, then it is an "Unspecified" entity. - datatype RequestType = RequestType ( - principal: Option, - action: EntityUID, - resource: Option, - context: RecordType - ) - { - function isUnspecifiedVar(e: Expr): bool { - match e { - case Var(Principal) => this.principal.None? - case Var(Resource) => this.resource.None? - case _ => false - } - } - } - - // --------- Effects --------- // - - // Effects are used for occurrence typing. An (e,a) pair represents that - // attribute a is known to exist for expression e. - datatype Effects = Effects(effs: set<(Expr,Attr)>) - { - function union(other: Effects): Effects { - Effects(this.effs + other.effs) - } - - function intersect(other: Effects): Effects { - Effects(this.effs * other.effs) - } - - predicate contains(e: Expr, a: Attr) { - (e,a) in this.effs - } - - static function empty(): Effects { - Effects({}) - } - - static function singleton(e: Expr, a: Attr): Effects { - Effects({(e,a)}) - } - } - - // --------- Typechecker --------- // - - // A Typechecker is a standard bidirectional typechecker for Cedar. - // It expects an EntityTypeStore, ActionStore, and RequestType as input. - // The two main functions (typecheck and infer) are at the bottom of the - // datatype, with helpers at the top. - datatype Typechecker = Typechecker(ets: EntityTypeStore, acts: ActionStore, reqty: RequestType, mode: ValidationMode){ - - function ensureSubty(t1: Type, t2: Type): (res: Result<()>) - { - if subty(t1,t2,mode) then Ok(()) - else Err(SubtyErr(t1,t2)) - } - - function ensureStringType(e: Expr, effs: Effects): Result<()> - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case String => Ok(()) - case _ => Err(UnexpectedType(t)) - } - } - - function ensureIntType(e: Expr, effs: Effects): Result<()> - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case Int => Ok(()) - case _ => Err(UnexpectedType(t)) - } - } - - function ensureEntityType(e: Expr, effs: Effects): Result - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case Entity(elub) => Ok(elub) - case _ => Err(UnexpectedType(t)) - } - } - - function ensureEntitySetType(e: Expr, effs: Effects): Result> - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case Entity(elub) => Ok(Option.Some(elub)) - case Set(Entity(elub)) => Ok(Option.Some(elub)) - case Set(Never) => - if this.mode.isStrict() - then Err(EmptySetForbidden) - else Ok(Option.None) // empty set is also valid if we're not in strict mode - case _ => Err(UnexpectedType(t)) - } - } - - function inferPrim(p: Primitive): Result { - match p { - case Bool(true) => Ok(Type.Bool(True)) - case Bool(false) => Ok(Type.Bool(False)) - case Int(_) => Ok(Type.Int) - case String(_) => Ok(Type.String) - case EntityUID(u) => - if u.ty in ets.types || isAction(u.ty) - then Ok(Type.Entity(EntityLUB({u.ty}))) - else Err(UnknownEntities({u.ty})) - } - } - - function inferVar(x: Var): Result { - match x { - case Principal => - if reqty.principal.None? - then Ok(Type.Entity(AnyEntity)) - else Ok(Type.Entity(EntityLUB({reqty.principal.value}))) - case Context => Ok(Type.Record(reqty.context)) - case Action => Ok(Type.Entity(EntityLUB({reqty.action.ty}))) - case Resource => - if reqty.resource.None? - then Ok(Type.Entity(AnyEntity)) - else Ok(Type.Entity(EntityLUB({reqty.resource.value}))) - } - } - - function inferBoolType(e: Expr, effs: Effects): Result<(BoolType,Effects)> - decreases e , 2 - { - var (t,effs1) :- infer(e,effs); - match t { - case Bool(bt) => Ok((bt,effs1)) - case _ => Err(UnexpectedType(t)) - } - } - - function inferSetType(e: Expr, effs: Effects): Result - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case Set(t1) => Ok(t1) - case _ => Err(UnexpectedType(t)) - } - } - - function inferRecordEntityType(e: Expr, effs: Effects): Result - decreases e , 2 - { - var (t,_) :- infer(e,effs); - match t { - case Record(rt) => Ok(RecordEntityType.Record(rt)) - case Entity(lub) => Ok(RecordEntityType.Entity(lub)) - case _ => Err(UnexpectedType(t)) - } - } - - // If e1 is known to be true, then return the union of the effects of e1 and e2. - // If e1 is known to be false, then return the effects of e3. - // Otherwise, return the union of the effects of e1 and e2 (the "true" case) - // intersected with the effects of e3 (the "false" case). - function inferIf(e1: Expr, e2: Expr, e3: Expr, effs: Effects): Result<(Type,Effects)> - decreases If(e1,e2,e3) , 0 - { - var (bt,effs1) :- inferBoolType(e1,effs); - match bt { - case True => - var (t,effs2) :- infer(e2,effs.union(effs1)); - Ok((t,effs1.union(effs2))) - case False => infer(e3,effs) - case Bool => - var (t1,effs2) :- infer(e2,effs.union(effs1)); - var (t2,effs3) :- infer(e3,effs); - var t :- lubOpt(t1,t2,mode); - Ok((t,effs1.union(effs2).intersect(effs3))) - } - } - - // If e1 or e2 is known to be false, then return the empty set of effects. - // Otherwise, return the union of the effects of e1 and e2. - // The returned effects will hold iff the runtime value of And(e1,e2) is true. - function inferAnd(e1: Expr, e2: Expr, effs: Effects): Result<(Type,Effects)> - decreases And(e1,e2) , 0 - { - var (bt1,effs1) :- inferBoolType(e1,effs); - match bt1 { - case False => wrap(Ok(Type.Bool(False))) - case _ => - var (bt2,effs2) :- inferBoolType(e2,effs.union(effs1)); - match bt2 { - case False => wrap(Ok(Type.Bool(False))) - case True => Ok((Type.Bool(bt1),effs1.union(effs2))) - case _ => Ok((Type.Bool(AnyBool),effs1.union(effs2))) - } - } - } - - // If e1 is known to be true, then return the effects of e1. - // If e1 is known to be false, then return the effects of e2. - // If e1 is unknown and e2 is known to be true, then return the effects of e2. - // If e1 is unknown and e2 is known to be false, then return the effects of e1. - // Otherwise, return the intersection of the effects of e1 and e2. - // The returned effects will hold iff the runtime value of Or(e1,e2) is true. - function inferOr(e1: Expr, e2: Expr, effs: Effects): Result<(Type,Effects)> - decreases Or(e1,e2) , 0 - { - var (bt1,effs1) :- inferBoolType(e1,effs); - match bt1 { - case True => wrap(Ok(Type.Bool(True))) - case False => - var (bt2,effs2) :- inferBoolType(e2,effs); - Ok((Type.Bool(bt2),effs2)) - case _ => - var (bt2,effs2) :- inferBoolType(e2,effs); - match bt2 { - case True => wrap(Ok(Type.Bool(True))) - case False => Ok((Type.Bool(bt1),effs1)) - case _ => Ok((Type.Bool(AnyBool),effs1.intersect(effs2))) - } - } - } - - function inferNot(e: Expr, effs: Effects): Result - decreases UnaryApp(Not,e) , 0 - { - var (bt,_) :- inferBoolType(e,effs); - Ok(Type.Bool(bt.not())) - } - - - function inferEq(e1: Expr, e2: Expr, effs: Effects): (res: Result) - decreases BinaryApp(BinaryOp.Eq,e1,e2) , 0 - { - var (t1,_) :- infer(e1,effs); - var (t2,_) :- infer(e2,effs); - if t1.Entity? && t2.Entity? && t1.lub.disjoint(t2.lub) - then Ok(Type.Bool(False)) - else if reqty.isUnspecifiedVar(e1) && t2.Entity? && t2.lub.specified() - then Ok(Type.Bool(False)) - else match (e1,e2) { - case (PrimitiveLit(EntityUID(u1)),PrimitiveLit(EntityUID(u2))) => - if u1 == u2 then Ok(Type.Bool(True)) else Ok(Type.Bool(False)) - case _ => - if mode.isStrict() && lubOpt(t1, t2, mode).Err? - then Err(LubErr(t1, t2)) - else Ok(Type.Bool(AnyBool)) - } - } - - function inferIneq(ghost op: BinaryOp, e1: Expr, e2: Expr, effs: Effects): Result - requires op == Less || op == LessEq - decreases BinaryApp(op,e1,e2) , 0 - { - var _ :- ensureIntType(e1,effs); - var _ :- ensureIntType(e2,effs); - Ok(Type.Bool(AnyBool)) - } - - function tryGetEUID(e: Expr): Option { - match e { - case PrimitiveLit(EntityUID(euid)) => Option.Some(euid) - case _ => Option.None - } - } - - function tryGetEUIDs(e: Expr): Option> { - match e { - case Set(es) => - if forall e1 <- es :: tryGetEUID(e1).Some? - then Option.Some(set e1 <- es :: tryGetEUID(e1).value) - else Option.None - case _ => Option.None - } - } - - function getPrincipalOrResource(v: Var): Option - requires v == Var.Principal || v == Var.Resource - { - match v { - case Principal => reqty.principal - case Resource => reqty.resource - } - } - - function extractEntityType(lub: EntityLUB): Result { - match lub { - case AnyEntity => Result.Err(NonSingletonLub) - case EntityLUB(tys) => - def.util.EntityTypeLeqIsTotalOrder(); - var tySeq := def.util.SetToSortedSeq(lub.tys,def.util.EntityTypeLeq); - if |tySeq| == 1 - then Result.Ok(tySeq[0]) - else Result.Err(NonSingletonLub) - } - } - - function inferIn(ghost parent: Expr, e1: Expr, e2: Expr, effs: Effects): Result - requires e1 < parent - requires e2 < parent - decreases parent , 0 , e2 - { - // check that LHS is an entity - var elub1 :- ensureEntityType(e1,effs); - // check that RHS is an entity or a set of entities - var elub2 :- ensureEntitySetType(e2,effs); - - var (t2, _) := infer(e2,effs).value; - var outTy :- - if reqty.isUnspecifiedVar(e1) && match t2 { - case Entity(lub) => lub.specified() - case Set(Entity(lub)) => lub.specified() - // `Set(Never)` is the type of the empty set. It would also be safe to - // return true in this case, but false matches the Rust implementation. - case Set(Never) => false - } - then Ok(Type.Bool(False)) - else match (e1,e2) { - // We substitute `Var::Action` for its literal EntityUID prior to - // validation, so the real logic for handling Actions is in the entity - // literal case below. We return an imprecise default answer here. - case (Var(Action),_) => Ok(Type.Bool(AnyBool)) - // LHS is Principal or Resource - case (Var(v),PrimitiveLit(EntityUID(u))) => - var et := getPrincipalOrResource(v); - // Note: When `et.None?`, typing `e1 in e2` as false would be - // unsound without some additional hypothesis that the literal(s) in - // e2 are not unspecified entities. We expect that case to be - // handled by the `isUnspecifiedVar` code above, so we don't handle - // it again here. - var b := et.None? || ets.possibleDescendantOf(et.value,u.ty); - if b then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - case (Var(v),Set(_)) => - var et := getPrincipalOrResource(v); - match tryGetEUIDs(e2) { - case Some(euids) => - var es := set euid <- euids :: euid.ty; - var b := et.None? || ets.possibleDescendantOfSet(et.value,es); - if b then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - case None => Ok(Type.Bool(AnyBool)) - } - // LHS is entity literal (or action, per above) - case (PrimitiveLit(EntityUID(u1)),PrimitiveLit(EntityUID(u2))) => - // If the entity literal is an action, then use acts.descendantOf. - // Otherwise, use ets.possibleDescendantOf. - if isAction(u1.ty) - then - if acts.descendantOf(u1,u2) then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - else - var b := ets.possibleDescendantOf(u1.ty,u2.ty); - if b then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - case (PrimitiveLit(EntityUID(u)),Set(_)) => - match tryGetEUIDs(e2) { - case Some(euids) => - // If the entity literal is an action, then use acts.descendantOfSet. - // Otherwise, use ets.possibleDescendantOfSet. - if isAction(u.ty) - then - if acts.descendantOfSet(u,euids) then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - else - var es := set euid <- euids :: euid.ty; - var b := ets.possibleDescendantOfSet(u.ty,es); - if b then Ok(Type.Bool(AnyBool)) else Ok(Type.Bool(False)) - case None => Ok(Type.Bool(AnyBool)) - } - // otherwise, the result is unknown so return Bool - case _ => Ok(Type.Bool(AnyBool)) - }; - - if this.mode.isStrict() && outTy == Type.Bool(AnyBool) - then - var ety1 :- extractEntityType(elub1); - var ety2 :- extractEntityType(elub2.value); - // If LHS cannot be a member of RHS, but we did not assign this - // expression type `False`, then this is a strict type checking error. - // Note that is it should be sound to just return type `False`, but I'm - // putting off that code and proof update for later. - if ets.possibleDescendantOf(ety1, ety2) - then Ok(outTy) - else Err(HierarchyNotRespected) - else Ok(outTy) - } - - function inferContainsAnyAll(b: BinaryOp, e1: Expr, e2: Expr, effs: Effects): Result - requires b == ContainsAny || b == ContainsAll - decreases BinaryApp(b,e1,e2), 0 - { - var s1 :- inferSetType(e1,effs); - var s2 :- inferSetType(e2,effs); - if mode.isStrict() && lubOpt(s1, s2, mode).Err? - then Err(LubErr(s1, s2)) - else Ok(Type.Bool(AnyBool)) - } - - function inferContains(e1: Expr, e2: Expr, effs: Effects): Result - decreases BinaryApp(Contains,e1,e2) , 0 - { - var s :- inferSetType(e1,effs); - var (t, _) :- infer(e2,effs); - if mode.isStrict() && lubOpt(s, t, mode).Err? - then Err(LubErr(s, t)) - else Ok(Type.Bool(AnyBool)) - } - - function {:opaque true} inferRecord(ghost e: Expr, r: seq<(Attr,Expr)>, effs: Effects): (res: Result) - requires forall i | 0 <= i < |r| :: r[i] < e - decreases e , 0 , r - { - if r == [] then - Ok(RecordType(map[], ClosedAttributes)) - else - var k := r[0].0; - var (t,_) :- infer(r[0].1,effs); - assert r[0] < e; - var m :- inferRecord(e,r[1..],effs); - Ok(RecordType(if k in m.attrs.Keys then m.attrs else m.attrs[k := AttrType(t,true)], ClosedAttributes)) - } - - function inferHasAttrHelper(e: Expr, k: Attr, rt: RecordType, effs: Effects, knownToExist: bool): Result<(Type,Effects)> - { - if k in rt.attrs - then - if rt.attrs[k].isRequired && knownToExist then wrap(Ok(Type.Bool(True))) - else if effs.contains(e,k) - then wrap(Ok(Type.Bool(True))) - else Ok((Type.Bool(AnyBool),Effects.singleton(e,k))) - else if rt.isOpen() - then wrap(Ok(Type.Bool(AnyBool))) - else wrap(Ok(Type.Bool(False))) - } - - function inferHasAttr(e: Expr, k: Attr, effs: Effects): Result<(Type,Effects)> - decreases HasAttr(e,k) , 0 - { - var ret :- inferRecordEntityType(e,effs); - match ret { - case Record(rt) => inferHasAttrHelper(e,k,rt,effs,true) - case Entity(lub) => - if !ets.isAttrPossible(lub,k) then wrap(Ok(Type.Bool(False))) - else - (var rt :- ets.getLubRecordType(lub, mode); - inferHasAttrHelper(e,k,rt,effs,false)) - - } - } - - function inferLike(p: Pattern, e: Expr, effs: Effects): Result - decreases UnaryApp(Like(p),e) , 0 - { - var _ :- ensureStringType(e,effs); - Ok(Type.Bool(AnyBool)) - } - - function inferIs(ety: EntityType, e: Expr, effs: Effects): Result - decreases UnaryApp(UnaryOp.Is(ety),e) , 0 - { - var elub :- ensureEntityType(e,effs); - match elub { - case AnyEntity => Ok(Type.Bool(AnyBool)) - case EntityLUB(tys) => - if ety !in tys - then Ok(Type.Bool(False)) - else if ety in tys && |tys| == 1 - then Ok(Type.Bool(True)) - else Ok(Type.Bool(AnyBool)) - } - } - - function inferArith1(ghost op: UnaryOp, e: Expr, effs: Effects): Result - requires op.Neg? || op.MulBy? - decreases UnaryApp(op,e) , 0 - { - var _ :- ensureIntType(e,effs); - Ok(Type.Int) - } - - function inferArith2(ghost op: BinaryOp, e1: Expr, e2: Expr, effs: Effects): Result - requires op == Add || op == Sub - decreases BinaryApp(op,e1,e2) , 0 - { - var _ :- ensureIntType(e1,effs); - var _ :- ensureIntType(e2,effs); - Ok(Type.Int) - } - - function inferGetAttr(e: Expr, k: Attr, effs: Effects): Result - decreases GetAttr(e,k) , 0 - { - var ret :- inferRecordEntityType(e,effs); - match ret { - case Record(rt) => - if k in rt.attrs && (rt.attrs[k].isRequired || effs.contains(e,k)) - then Ok(rt.attrs[k].ty) - else Err(AttrNotFound(Type.Record(rt),k)) - case Entity(lub) => - var rt :- ets.getLubRecordType(lub, mode); - if k in rt.attrs && (rt.attrs[k].isRequired || effs.contains(e,k)) - then Ok(rt.attrs[k].ty) - else Err(AttrNotFound(Type.Entity(lub),k)) - } - } - - function inferSet(ghost e: Expr, r: seq, effs: Effects): (res: Result) - requires forall i | 0 <= i < |r| :: r[i] < e - decreases e , 0 , r - { - if r == [] then - Ok(Type.Never) - else - var (t,_) :- infer(r[0],effs); - var t1 :- inferSet(e,r[1..],effs); - var t2 :- lubOpt(t,t1,mode); - Ok(t2) - } - - // Utility to convert `Ok(T)` to `Ok(T,Effects.empty())` - function wrap(t: Result): Result<(Type,Effects)> { - t.Map(t0 => (t0,Effects.empty())) - } - - function inferCallArgs(ghost e: Expr, args: seq, tys: seq, effs: Effects): Result<()> - requires |args| == |tys| - requires forall i | 0 <= i < |args| :: args[i] < e - decreases e , 0 , args - { - if args == [] then - Ok(()) - else - var (t,_) :- infer(args[0],effs); - var _ :- ensureSubty(t,tys[0]); - inferCallArgs(e,args[1..],tys[1..],effs) - } - - function inferCall(ghost e: Expr, name: base.Name, args: seq, effs: Effects): Result - requires forall i | 0 <= i < |args| :: args[i] < e - decreases e , 0 - { - if name in extFunTypes.Keys - then - var ty := extFunTypes[name]; - // check that the function uses the expected number of arguments - var _ :- if |args| == |ty.args| then Ok(()) else Err(ExtensionErr(Call(name,args))); - // check that all args are a subtype of the expected type - var _ :- inferCallArgs(e,args,ty.args,effs); - // run the optional argument check - var _ :- match ty.check - case Some(f) => - if mode.isStrict() && exists i | 0 <= i < |args| :: ! args[i].PrimitiveLit? - then Err(NonLitExtConstructor) - else f(args) - case None => Ok(()); - // if we reach this point, then type checking was successful - Ok(ty.ret) - else Err(ExtensionErr(Call(name,args))) - } - - // Inference is fully syntax directed: we simply crawl over the term and - // read off the type. This is only possible without annotation because - // Cedar has no binders. If in the future Cedar gets functions, they will - // have to have type signatures for this bidirectional system to continue - // working. - // - // `effs` tracks the attributes that are known to exist for prior (enclosing) - // expressions. The returned effects are new effects introduced by - // typing the current expression. - function infer(e: Expr, effs: Effects): Result<(Type,Effects)> - decreases e , 1 - { - match e { - case PrimitiveLit(p) => wrap(inferPrim(p)) - case Var(x) => wrap(inferVar(x)) - case If(e1,e2,e3) => inferIf(e1,e2,e3,effs) - case And(e1,e2) => inferAnd(e1,e2,effs) - case Or(e1,e2) => inferOr(e1,e2,effs) - case UnaryApp(Not,e1) => wrap(inferNot(e1,effs)) - case UnaryApp(Neg,e1) => wrap(inferArith1(Neg,e1,effs)) - case UnaryApp(MulBy(i),e1) => wrap(inferArith1(MulBy(i),e1,effs)) - case UnaryApp(Like(p),e1) => wrap(inferLike(p,e1,effs)) - case UnaryApp(Is(ety),e1) => wrap(inferIs(ety,e1,effs)) - case BinaryApp(Eq,e1,e2) => wrap(inferEq(e1,e2,effs)) - case BinaryApp(Less,e1,e2) => wrap(inferIneq(Less,e1,e2,effs)) - case BinaryApp(LessEq,e1,e2) => wrap(inferIneq(LessEq,e1,e2,effs)) - case BinaryApp(In,e1,e2) => wrap(inferIn(e,e1,e2,effs)) - case BinaryApp(Add,e1,e2) => wrap(inferArith2(Add,e1,e2,effs)) - case BinaryApp(Sub,e1,e2) => wrap(inferArith2(Sub,e1,e2,effs)) - case BinaryApp(ContainsAny,e1,e2) => wrap(inferContainsAnyAll(ContainsAny,e1,e2,effs)) - case BinaryApp(ContainsAll,e1,e2) => wrap(inferContainsAnyAll(ContainsAll,e1,e2,effs)) - case BinaryApp(Contains,e1,e2) => wrap(inferContains(e1,e2,effs)) - case Record(r) => var rt :- inferRecord(Expr.Record(r),r,effs); wrap(Ok(Type.Record(rt))) - case Set(es) => - if mode.isStrict() && es == [] - then Err(EmptySetForbidden) - else var st :- inferSet(e,es,effs); wrap(Ok(Type.Set(st))) - case HasAttr(e1,k) => inferHasAttr(e1,k,effs) - case GetAttr(e1,k) => wrap(inferGetAttr(e1,k,effs)) - case Call(name,args) => wrap(inferCall(e,name,args,effs)) - } - } - - // The standard "turn-around" rule of a bidirectional type system forms the - // top-level interface for the expression-level part of the validator. - function typecheck(e: Expr, t: Type): Result { - // call infer with an empty effect set - var (t1,_) :- infer(e,Effects.empty()); - // check that the result of inference is a subtype of t - var _ :- ensureSubty(t1,t); - Ok(t1) - } - - } - -} diff --git a/cedar-dafny/validation/types.dfy b/cedar-dafny/validation/types.dfy deleted file mode 100644 index e0050a918..000000000 --- a/cedar-dafny/validation/types.dfy +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" - -module validation.types { - import opened def.base - import opened def.core - - // The ValidationMode determines whether to use permissive or strict typechecking - datatype ValidationMode = Permissive | Strict { - predicate isStrict() { - match this { - case Permissive => false - case Strict => true - } - } - } - - // --------- Types --------- // - - datatype BoolType = AnyBool | True | False { - function not(): BoolType { - match this { - case AnyBool => AnyBool - case True => False - case False => True - } - } - } - - type EntityType = core.EntityType - - predicate isAction(ety: EntityType) - { - ety.id.id == Id("Action") - } - - datatype EntityLUB = AnyEntity | EntityLUB(tys: set) { - predicate subset(other: EntityLUB) { - match (this, other) { - case (_, AnyEntity) => true - case (EntityLUB(tys1),EntityLUB(tys2)) => tys1 <= tys2 - case _ => false - } - } - predicate disjoint(other: EntityLUB) { - match (this, other) { - case (EntityLUB(tys1),EntityLUB(tys2)) => tys1 !! tys2 - case _ => false - } - } - function union(other: EntityLUB): EntityLUB { - match (this, other) { - case (EntityLUB(tys1),EntityLUB(tys2)) => - // Check if either LUB contains an Action entity type. We do not - // permit any LUBs that contain different action entity types, so - // their union is `AnyEntity` if they are not the same action entity - // type. This also gives `AnyEntity` if they are the same action - // entity type, but some other entity types in the LUB differ. We - // never construct non-singleton action EntityLUBs, so this cannot - // occur. - if (exists ty1 <- tys1 :: isAction(ty1) || exists ty2 <- tys2 :: isAction(ty2)) && tys1 != tys2 - then AnyEntity - else EntityLUB(tys1 + tys2) - case _ => AnyEntity - } - } - predicate specified() { - EntityLUB? && EntityType.UNSPECIFIED !in tys - } - } - - datatype AttrType = AttrType(ty: Type, isRequired: bool) - datatype AttrsTag = OpenAttributes | ClosedAttributes - datatype RecordType = RecordType( - attrs: map, - // Indicates whether a value having this record type may have attributes - // beyond those lists in `attrs` (open), or if it must match `attrs` - // exactly (closed). In principal, any record type, including an entity - // attributes record type, may be closed or open, but the type of all record - // literals and any record type written in the schema is always closed. A - // least upper bound between record types will tend to be open, but it may - // be closed if the constituent record types are closed and have exactly the - // same attributes with a least upper bound existing between corresponding - // attributes. - attrsTag: AttrsTag - ) { - predicate isOpen() { - attrsTag.OpenAttributes? - } - - predicate isStrictType() { - forall k | k in attrs.Keys :: attrs[k].ty.isStrictType() - } - } - - // Each extension function is associated with argument types, a return type, - // and an optional method that checks input well-formedness. - datatype ExtFunType = ExtFunType(args: seq, ret: Type, check: Option -> Result<()>>) - - datatype Type = - Never | // used to type the empty set - String | - Int | - Bool(BoolType) | - Set(ty: Type) | - Record(RecordType) | - Entity(lub: EntityLUB) | - Extension(Name) - { - predicate isStrictType() { - match this { - case Never => false - case String | Int | Bool(_) | Extension(_) => true - case Set(t) => t.isStrictType() - case Record(rt) => rt.isStrictType() - case Entity(lub) => lub.EntityLUB? && |lub.tys| == 1 - } - } - } - - datatype SetStringType = SetType(Type) | StringType - datatype RecordEntityType = Record(RecordType) | Entity(EntityLUB) - - // --------- Typing Errors --------- // - - datatype TypeError = - LubErr(Type,Type) | - SubtyErr(Type,Type) | - UnexpectedType(Type) | - AttrNotFound(Type,Attr) | - UnknownEntities(set) | - ExtensionErr(Expr) | - EmptyLUB | - EmptySetForbidden | - NonLitExtConstructor | - NonSingletonLub | - HierarchyNotRespected - - // --------- Local Names for Useful Types --------- // - - type Result = std.Result - - function Ok(v: T): Result { - Result.Ok(v) - } - - function Err(v: TypeError): Result { - Result.Err(v) - } - - type Option = std.Option -} diff --git a/cedar-dafny/validation/util.dfy b/cedar-dafny/validation/util.dfy deleted file mode 100644 index 555b6b0ec..000000000 --- a/cedar-dafny/validation/util.dfy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" - -module validation.util { - import opened def.base - import opened def.core - import opened def.engine - - // --------- Replace a variable with a literal entity UID --------- // - - function substitute(e: Expr, v: Var, euid: EntityUID): Expr { - match e { - case PrimitiveLit(_) => e - case Var(v') => - if v == v' then PrimitiveLit(Primitive.EntityUID(euid)) else e - case If(e1, e2, e3) => - var e1' := substitute(e1, v, euid); - var e2' := substitute(e2, v, euid); - var e3' := substitute(e3, v, euid); - If(e1', e2', e3') - case And(e1, e2) => - var e1' := substitute(e1, v, euid); - var e2' := substitute(e2, v, euid); - And(e1', e2') - case Or(e1, e2) => - var e1' := substitute(e1, v, euid); - var e2' := substitute(e2, v, euid); - Or(e1', e2') - case UnaryApp(op, e1) => - var e1' := substitute(e1, v, euid); - UnaryApp(op, e1') - case BinaryApp(op, e1, e2) => - var e1' := substitute(e1, v, euid); - var e2' := substitute(e2, v, euid); - BinaryApp(op, e1', e2') - case GetAttr(e1, a) => - var e1' := substitute(e1, v, euid); - GetAttr(e1', a) - case HasAttr(e1, a) => - var e1' := substitute(e1, v, euid); - HasAttr(e1', a) - case Set(es) => - var es' := seq (|es|, i requires 0 <= i < |es| => substitute(es[i], v, euid)); - Expr.Set(es') - case Record(er) => - var er' := seq (|er|, i requires 0 <= i < |er| => (er[i].0, substitute(er[i].1, v, euid))); - Expr.Record(er') - case Call(name, args) => - var args' := seq (|args|, i requires 0 <= i < |args| => substitute(args[i], v, euid)); - Call(name, args') - } - } -} diff --git a/cedar-dafny/validation/validator.dfy b/cedar-dafny/validation/validator.dfy deleted file mode 100644 index a59c2ad94..000000000 --- a/cedar-dafny/validation/validator.dfy +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -include "../def/all.dfy" -include "subtyping.dfy" -include "typechecker.dfy" -include "types.dfy" -include "util.dfy" - -// This module contains the specification of Cedar's validator. -module validation.validator { - import opened def.base - import opened def.core - import opened typechecker - import opened types - import opened util - - // The Schema file records various information useful for validation. Its - // structure matches the Rust implementation to facilitate DRT. - datatype Schema = Schema( - entityTypes: map, - actionIds: map - ) { - - // Return every schema-defined request type - function allRequestTypes(): set { - set a,p,r | a in actionIds.Keys && - p in actionIds[a].appliesTo.principalApplySpec && - r in actionIds[a].appliesTo.resourceApplySpec :: - RequestType(p, a, r, RecordType(actionIds[a].context, ClosedAttributes)) - } - - // Generate an EntityTypeStore - function makeEntityTypeStore(): EntityTypeStore { - var types := map et | et in entityTypes :: RecordType(entityTypes[et].attributes, ClosedAttributes); - var descendants := map et | et in entityTypes :: entityTypes[et].descendants; - EntityTypeStore(types, descendants) - } - - // Generate an ActionStore - function makeActionStore(): ActionStore { - var descendants := map act | act in actionIds :: actionIds[act].descendants; - ActionStore(descendants) - } - } - - datatype TypecheckerEntityType = TypecheckerEntityType( - descendants: set, - attributes: map - ) - - datatype TypecheckerActionId = TypecheckerActionId( - appliesTo: TypecheckerApplySpec, - descendants: set, - context: map - ) - - datatype TypecheckerApplySpec = TypecheckerApplySpec( - principalApplySpec: set>, - resourceApplySpec: set> - ) - - datatype ValidationError = - // Error when typechecking a policy - TypeError(TypeError) | - // A policy returns False under all query types - AllFalse - - // A Validator typechecks a set of policies. - datatype Validator = Validator(schema: Schema, mode: ValidationMode) { - - // check that e is a bool-typed expression for the input entity store type, - // action store, and request type - function Typecheck (e: Expr, ets: EntityTypeStore, acts: ActionStore, reqty: RequestType): std.Result { - Typechecker(ets, acts, reqty, mode).typecheck(e, Type.Bool(AnyBool)) - } - - // Returns a list of type errors for easier debugging, - // but DRT currently only checks whether the output is empty. - method Validate (policyStore: PolicyStore) returns (errs:seq) - { - var pset := set p | p in policyStore.policies.Values; - errs := []; - // for every policy p - while pset != {} { - var p :| p in pset; - var reqtys := schema.allRequestTypes(); - var ets := schema.makeEntityTypeStore(); - var acts := schema.makeActionStore(); - // for every possible request type - var allFalse := true; - while reqtys != {} { - var reqty :| reqty in reqtys; - // substitute Action variable for a literal EUID - var condition := substitute(p.toExpr(), Action, reqty.action); - // typecheck p - var answer := Typecheck(condition, ets, acts, reqty); - match answer { - case Ok(Bool(False)) => {} - case Ok(_) => allFalse := false; - case Err(e) => - allFalse := false; - errs := errs + [ValidationError.TypeError(e)]; - } - reqtys := reqtys - { reqty }; - } - // is the policy False under all envs? - if allFalse { - errs := errs + [AllFalse]; - } - pset := pset - { p }; - } - return errs; - } - } -} diff --git a/cedar-drt/README.md b/cedar-drt/README.md index c53ad7225..1b3f7a069 100644 --- a/cedar-drt/README.md +++ b/cedar-drt/README.md @@ -5,7 +5,7 @@ See the README in the toplevel directory `..` for instructions on how to run. ## Available fuzz targets -The table below lists all available fuzz targets, including which component of the code they test and whether they perform property-based testing of the Rust code (PBT) or differential testing of the Rust code against the Dafny spec (DRT). +The table below lists all available fuzz targets, including which component of the code they test and whether they perform property-based testing of the Rust code (PBT) or differential testing of the Rust code against the Lean spec (DRT). | Name | Component(s) tested | Type | Description | | ----------- | ----------- | ----------- | ----------- | @@ -42,7 +42,7 @@ You can adjust the script's behavior using the following environment variables: ## Debugging build failures If you run into weird build issues, -1. Make sure you have run `source set_env_vars.sh`, which sets all the environment variables needed to run the Dafny and Lean definitional code. +1. Make sure you have run `source set_env_vars.sh`, which sets all the environment variables needed to run the Lean definitional code. 2. Try a `cargo clean` and rebuild. 3. If the steps above don't help, then file [an issue](https://github.com/cedar-policy/cedar-spec/issues). diff --git a/cedar-drt/fuzz/Cargo.toml b/cedar-drt/fuzz/Cargo.toml index 3f6878751..c28e81054 100644 --- a/cedar-drt/fuzz/Cargo.toml +++ b/cedar-drt/fuzz/Cargo.toml @@ -43,48 +43,24 @@ path = "fuzz_targets/rbac.rs" test = false doc = false -[[bin]] -name = "rbac-lean" -path = "fuzz_targets/rbac-lean.rs" -test = false -doc = false - [[bin]] name = "rbac-authorizer" path = "fuzz_targets/rbac-authorizer.rs" test = false doc = false -[[bin]] -name = "rbac-authorizer-lean" -path = "fuzz_targets/rbac-authorizer-lean.rs" -test = false -doc = false - [[bin]] name = "abac" path = "fuzz_targets/abac.rs" test = false doc = false -[[bin]] -name = "abac-lean" -path = "fuzz_targets/abac-lean.rs" -test = false -doc = false - [[bin]] name = "abac-type-directed" path = "fuzz_targets/abac-type-directed.rs" test = false doc = false -[[bin]] -name = "abac-type-directed-lean" -path = "fuzz_targets/abac-type-directed-lean.rs" -test = false -doc = false - [[bin]] name = "partial-eval" path = "fuzz_targets/partial-eval.rs" @@ -109,12 +85,6 @@ path = "fuzz_targets/validation-pbt.rs" test = false doc = false -[[bin]] -name = "validation-drt" -path = "fuzz_targets/validation-drt.rs" -test = false -doc = false - [[bin]] name = "validation-drt-type-directed" path = "fuzz_targets/validation-drt-type-directed.rs" @@ -127,18 +97,6 @@ path = "fuzz_targets/simple-parser.rs" test = false doc = false -[[bin]] -name = "strict-validation-drt-type-directed" -path = "fuzz_targets/strict-validation-drt-type-directed.rs" -test = false -doc = false - -[[bin]] -name = "strict-validation-drt-type-directed-lean" -path = "fuzz_targets/strict-validation-drt-type-directed-lean.rs" -test = false -doc = false - [[bin]] name = "wildcard-matching" path = "fuzz_targets/wildcard-matching.rs" @@ -149,10 +107,4 @@ doc = false name = "eval-type-directed" path = "fuzz_targets/eval-type-directed.rs" test = false -doc = false - -[[bin]] -name = "eval-type-directed-lean" -path = "fuzz_targets/eval-type-directed-lean.rs" -test = false -doc = false +doc = false \ No newline at end of file diff --git a/cedar-drt/fuzz/fuzz_targets/abac-lean.rs b/cedar-drt/fuzz/fuzz_targets/abac-lean.rs deleted file mode 100644 index 75e884ff8..000000000 --- a/cedar-drt/fuzz/fuzz_targets/abac-lean.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod abac_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: abac_shared::FuzzTargetInput| { - let def_engine = LeanDefinitionalEngine::new(); - abac_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/abac-type-directed-lean.rs b/cedar-drt/fuzz/fuzz_targets/abac-type-directed-lean.rs deleted file mode 100644 index 7a99a1011..000000000 --- a/cedar-drt/fuzz/fuzz_targets/abac-type-directed-lean.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod abac_type_directed_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: abac_type_directed_shared::FuzzTargetInput| { - let def_engine = LeanDefinitionalEngine::new(); - abac_type_directed_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs index 3ed7dd972..fc0a1e5d7 100644 --- a/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs @@ -15,12 +15,159 @@ */ #![no_main] -mod abac_type_directed_shared; - use cedar_drt::*; -use cedar_drt_inner::fuzz_target; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_core::authorizer::Authorizer; +use cedar_policy_core::entities::Entities; +use cedar_policy_generators::{ + abac::{ABACPolicy, ABACRequest}, + err::Error, + hierarchy::HierarchyGenerator, + schema::Schema, + settings::ABACSettings, +}; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::{debug, info}; +use serde::Serialize; +use std::convert::TryFrom; + +/// Input expected by this fuzz target: +/// An ABAC hierarchy, policy, and 8 associated requests +#[derive(Debug, Clone, Serialize)] +pub struct FuzzTargetInput { + /// generated schema + #[serde(skip)] + pub schema: Schema, + /// generated entity slice + #[serde(skip)] + pub entities: Entities, + /// generated policy + pub policy: ABACPolicy, + /// the requests to try for this hierarchy and policy. We try 8 requests per + /// policy/hierarchy + #[serde(skip)] + pub requests: [ABACRequest; 8], +} + +/// settings for this fuzz target +const SETTINGS: ABACSettings = ABACSettings { + match_types: true, + enable_extensions: true, + max_depth: 3, + max_width: 3, + enable_additional_attributes: false, + enable_like: true, + enable_action_groups_and_attrs: true, + enable_arbitrary_func_call: true, + enable_unknowns: false, + enable_action_in_constraints: true, + enable_unspecified_apply_spec: true, +}; + +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let schema = Schema::arbitrary(SETTINGS.clone(), u)?; + let hierarchy = schema.arbitrary_hierarchy(u)?; + let policy = schema.arbitrary_policy(&hierarchy, u)?; + + let requests = [ + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + ]; + let all_entities = Entities::try_from(hierarchy).map_err(|_| Error::NotEnoughData)?; + let entities = drop_some_entities(all_entities, u)?; + Ok(Self { + schema, + entities, + policy, + requests, + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Schema::arbitrary_size_hint(depth), + HierarchyGenerator::size_hint(depth), + Schema::arbitrary_policy_size_hint(&SETTINGS, depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + ]) + } +} +// Type-directed fuzzing of ABAC hierarchy/policy/requests. fuzz_target!(|input: abac_type_directed_shared::FuzzTargetInput| { - let def_engine = JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - abac_type_directed_shared::fuzz(input, &def_engine); + initialize_log(); + let def_engine = LeanDefinitionalEngine::new(); + let mut policyset = ast::PolicySet::new(); + let policy: ast::StaticPolicy = input.policy.into(); + policyset.add_static(policy.clone()).unwrap(); + debug!("Schema: {}\n", input.schema.schemafile_string()); + debug!("Policies: {policyset}\n"); + debug!("Entities: {}\n", input.entities); + + let requests = input + .requests + .into_iter() + .map(Into::into) + .collect::>(); + + for request in requests.iter().cloned() { + debug!("Request : {request}"); + let (rust_res, total_dur) = + time_function(|| run_auth_test(def_impl, request, &policyset, &input.entities)); + + info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); + + // additional invariant: + // type-directed fuzzing should never produce wrong-number-of-arguments errors + assert_eq!( + rust_res + .diagnostics + .errors + .iter() + .map(ToString::to_string) + .filter(|err| err.contains("wrong number of arguments")) + .collect::>(), + Vec::::new() + ); + } + + if let Ok(test_name) = std::env::var("DUMP_TEST_NAME") { + // When the corpus is re-parsed, the policy will be given id "policy0". + // Recreate the policy set and compute responses here to account for this. + let mut policyset = ast::PolicySet::new(); + let policy = policy.new_id(ast::PolicyID::from_string("policy0")); + policyset.add_static(policy).unwrap(); + let responses = requests + .iter() + .map(|request| { + let authorizer = Authorizer::new(); + authorizer.is_authorized(request.clone(), &policyset, &input.entities) + }) + .collect::>(); + let dump_dir = std::env::var("DUMP_TEST_DIR").unwrap_or_else(|_| ".".to_string()); + dump( + dump_dir, + &test_name, + &input.schema.into(), + &policyset, + &input.entities, + std::iter::zip(requests, responses), + ) + .expect("failed to dump test case"); + } }); diff --git a/cedar-drt/fuzz/fuzz_targets/abac.rs b/cedar-drt/fuzz/fuzz_targets/abac.rs index 9a27d9cf0..7d199feb7 100644 --- a/cedar-drt/fuzz/fuzz_targets/abac.rs +++ b/cedar-drt/fuzz/fuzz_targets/abac.rs @@ -15,12 +15,143 @@ */ #![no_main] -mod abac_shared; - use cedar_drt::*; -use cedar_drt_inner::fuzz_target; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_core::authorizer::Authorizer; +use cedar_policy_core::entities::Entities; +use cedar_policy_generators::{ + abac::{ABACPolicy, ABACRequest}, + hierarchy::{Hierarchy, HierarchyGenerator}, + schema::Schema, + settings::ABACSettings, +}; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::{debug, info}; +use serde::Serialize; +use std::convert::TryFrom; + +/// Input expected by this fuzz target: +/// An ABAC hierarchy, policy, and 8 associated requests +#[derive(Debug, Clone, Serialize)] +pub struct FuzzTargetInput { + /// generated schema + #[serde(skip)] + pub schema: Schema, + /// generated hierarchy + #[serde(skip)] + pub hierarchy: Hierarchy, + /// generated policy + pub policy: ABACPolicy, + /// the requests to try for this hierarchy and policy. We try 8 requests per + /// policy/hierarchy + #[serde(skip)] + pub requests: [ABACRequest; 8], +} + +/// settings for this fuzz target +const SETTINGS: ABACSettings = ABACSettings { + match_types: false, + enable_extensions: true, + max_depth: 3, + max_width: 7, + enable_additional_attributes: false, + enable_like: true, + // ABAC fuzzing restricts the use of action because it is used to generate + // the corpus tests which will be run on Cedar and CedarCLI. + // These packages only expose the restricted action behavior. + enable_action_groups_and_attrs: false, + enable_arbitrary_func_call: true, + enable_unknowns: false, + enable_action_in_constraints: true, + enable_unspecified_apply_spec: true, +}; +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let schema = Schema::arbitrary(SETTINGS.clone(), u)?; + let hierarchy = schema.arbitrary_hierarchy(u)?; + let policy = schema.arbitrary_policy(&hierarchy, u)?; + let requests = [ + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + ]; + Ok(Self { + schema, + hierarchy, + policy, + requests, + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Schema::arbitrary_size_hint(depth), + HierarchyGenerator::size_hint(depth), + Schema::arbitrary_policy_size_hint(&SETTINGS, depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + ]) + } +} + +// Simple fuzzing of ABAC hierarchy/policy/requests without respect to types. fuzz_target!(|input: abac_shared::FuzzTargetInput| { - let def_engine = JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - abac_shared::fuzz(input, &def_engine); + initialize_log(); + let def_engine = LeanDefinitionalEngine::new(); + if let Ok(entities) = Entities::try_from(input.hierarchy) { + let mut policyset = ast::PolicySet::new(); + let policy: ast::StaticPolicy = input.policy.into(); + policyset.add_static(policy.clone()).unwrap(); + debug!("Policies: {policyset}"); + debug!("Entities: {entities}"); + let requests = input + .requests + .into_iter() + .map(Into::into) + .collect::>(); + + for request in requests.iter().cloned() { + debug!("Request: {request}"); + let (_, total_dur) = + time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); + info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); + } + if let Ok(test_name) = std::env::var("DUMP_TEST_NAME") { + // When the corpus is re-parsed, the policy will be given id "policy0". + // Recreate the policy set and compute responses here to account for this. + let mut policyset = ast::PolicySet::new(); + let policy = policy.new_id(ast::PolicyID::from_string("policy0")); + policyset.add_static(policy).unwrap(); + let responses = requests + .iter() + .map(|request| { + let authorizer = Authorizer::new(); + authorizer.is_authorized(request.clone(), &policyset, &entities) + }) + .collect::>(); + let dump_dir = std::env::var("DUMP_TEST_DIR").unwrap_or_else(|_| ".".to_string()); + dump( + dump_dir, + &test_name, + &input.schema.into(), + &policyset, + &entities, + std::iter::zip(requests, responses), + ) + .expect("failed to dump test case"); + } + } }); diff --git a/cedar-drt/fuzz/fuzz_targets/abac_shared.rs b/cedar-drt/fuzz/fuzz_targets/abac_shared.rs deleted file mode 100644 index 0c80232d1..000000000 --- a/cedar-drt/fuzz/fuzz_targets/abac_shared.rs +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_core::authorizer::Authorizer; -use cedar_policy_core::entities::Entities; -use cedar_policy_generators::{ - abac::{ABACPolicy, ABACRequest}, - hierarchy::{Hierarchy, HierarchyGenerator}, - schema::Schema, - settings::ABACSettings, -}; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::{debug, info}; -use serde::Serialize; -use std::convert::TryFrom; - -/// Input expected by this fuzz target: -/// An ABAC hierarchy, policy, and 8 associated requests -#[derive(Debug, Clone, Serialize)] -pub struct FuzzTargetInput { - /// generated schema - #[serde(skip)] - pub schema: Schema, - /// generated hierarchy - #[serde(skip)] - pub hierarchy: Hierarchy, - /// generated policy - pub policy: ABACPolicy, - /// the requests to try for this hierarchy and policy. We try 8 requests per - /// policy/hierarchy - #[serde(skip)] - pub requests: [ABACRequest; 8], -} - -/// settings for this fuzz target -const SETTINGS: ABACSettings = ABACSettings { - match_types: false, - enable_extensions: true, - max_depth: 3, - max_width: 7, - enable_additional_attributes: false, - enable_like: true, - // ABAC fuzzing restricts the use of action because it is used to generate - // the corpus tests which will be run on Cedar and CedarCLI. - // These packages only expose the restricted action behavior. - enable_action_groups_and_attrs: false, - enable_arbitrary_func_call: true, - enable_unknowns: false, - enable_action_in_constraints: true, - enable_unspecified_apply_spec: true, -}; - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let schema = Schema::arbitrary(SETTINGS.clone(), u)?; - let hierarchy = schema.arbitrary_hierarchy(u)?; - let policy = schema.arbitrary_policy(&hierarchy, u)?; - let requests = [ - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - ]; - Ok(Self { - schema, - hierarchy, - policy, - requests, - }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Schema::arbitrary_size_hint(depth), - HierarchyGenerator::size_hint(depth), - Schema::arbitrary_policy_size_hint(&SETTINGS, depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - ]) - } -} - -// Simple fuzzing of ABAC hierarchy/policy/requests without respect to types. -// `def_impl` is a custom implementation to test against `cedar-policy`. -pub fn fuzz(input: FuzzTargetInput, def_impl: &impl CedarTestImplementation) { - initialize_log(); - if let Ok(entities) = Entities::try_from(input.hierarchy) { - let mut policyset = ast::PolicySet::new(); - let policy: ast::StaticPolicy = input.policy.into(); - policyset.add_static(policy.clone()).unwrap(); - debug!("Policies: {policyset}"); - debug!("Entities: {entities}"); - let requests = input - .requests - .into_iter() - .map(Into::into) - .collect::>(); - - for request in requests.iter().cloned() { - debug!("Request: {request}"); - let (_, total_dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); - info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); - } - if let Ok(test_name) = std::env::var("DUMP_TEST_NAME") { - // When the corpus is re-parsed, the policy will be given id "policy0". - // Recreate the policy set and compute responses here to account for this. - let mut policyset = ast::PolicySet::new(); - let policy = policy.new_id(ast::PolicyID::from_string("policy0")); - policyset.add_static(policy).unwrap(); - let responses = requests - .iter() - .map(|request| { - let authorizer = Authorizer::new(); - authorizer.is_authorized(request.clone(), &policyset, &entities) - }) - .collect::>(); - let dump_dir = std::env::var("DUMP_TEST_DIR").unwrap_or_else(|_| ".".to_string()); - dump( - dump_dir, - &test_name, - &input.schema.into(), - &policyset, - &entities, - std::iter::zip(requests, responses), - ) - .expect("failed to dump test case"); - } - } -} diff --git a/cedar-drt/fuzz/fuzz_targets/abac_type_directed_shared.rs b/cedar-drt/fuzz/fuzz_targets/abac_type_directed_shared.rs deleted file mode 100644 index f1a58735e..000000000 --- a/cedar-drt/fuzz/fuzz_targets/abac_type_directed_shared.rs +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_core::authorizer::Authorizer; -use cedar_policy_core::entities::Entities; -use cedar_policy_generators::{ - abac::{ABACPolicy, ABACRequest}, - err::Error, - hierarchy::HierarchyGenerator, - schema::Schema, - settings::ABACSettings, -}; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::{debug, info}; -use serde::Serialize; -use std::convert::TryFrom; - -/// Input expected by this fuzz target: -/// An ABAC hierarchy, policy, and 8 associated requests -#[derive(Debug, Clone, Serialize)] -pub struct FuzzTargetInput { - /// generated schema - #[serde(skip)] - pub schema: Schema, - /// generated entity slice - #[serde(skip)] - pub entities: Entities, - /// generated policy - pub policy: ABACPolicy, - /// the requests to try for this hierarchy and policy. We try 8 requests per - /// policy/hierarchy - #[serde(skip)] - pub requests: [ABACRequest; 8], -} - -/// settings for this fuzz target -const SETTINGS: ABACSettings = ABACSettings { - match_types: true, - enable_extensions: true, - max_depth: 3, - max_width: 3, - enable_additional_attributes: false, - enable_like: true, - enable_action_groups_and_attrs: true, - enable_arbitrary_func_call: true, - enable_unknowns: false, - enable_action_in_constraints: true, - enable_unspecified_apply_spec: true, -}; - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let schema = Schema::arbitrary(SETTINGS.clone(), u)?; - let hierarchy = schema.arbitrary_hierarchy(u)?; - let policy = schema.arbitrary_policy(&hierarchy, u)?; - - let requests = [ - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - schema.arbitrary_request(&hierarchy, u)?, - ]; - let all_entities = Entities::try_from(hierarchy).map_err(|_| Error::NotEnoughData)?; - let entities = drop_some_entities(all_entities, u)?; - Ok(Self { - schema, - entities, - policy, - requests, - }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Schema::arbitrary_size_hint(depth), - HierarchyGenerator::size_hint(depth), - Schema::arbitrary_policy_size_hint(&SETTINGS, depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - ]) - } -} - -// Type-directed fuzzing of ABAC hierarchy/policy/requests. -// `def_impl` is a custom implementation to test against `cedar-policy`. -pub fn fuzz(input: FuzzTargetInput, def_impl: &impl CedarTestImplementation) { - initialize_log(); - let mut policyset = ast::PolicySet::new(); - let policy: ast::StaticPolicy = input.policy.into(); - policyset.add_static(policy.clone()).unwrap(); - debug!("Schema: {}\n", input.schema.schemafile_string()); - debug!("Policies: {policyset}\n"); - debug!("Entities: {}\n", input.entities); - - let requests = input - .requests - .into_iter() - .map(Into::into) - .collect::>(); - - for request in requests.iter().cloned() { - debug!("Request : {request}"); - let (rust_res, total_dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &input.entities)); - - info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); - - // additional invariant: - // type-directed fuzzing should never produce wrong-number-of-arguments errors - assert_eq!( - rust_res - .diagnostics - .errors - .iter() - .map(ToString::to_string) - .filter(|err| err.contains("wrong number of arguments")) - .collect::>(), - Vec::::new() - ); - } - - if let Ok(test_name) = std::env::var("DUMP_TEST_NAME") { - // When the corpus is re-parsed, the policy will be given id "policy0". - // Recreate the policy set and compute responses here to account for this. - let mut policyset = ast::PolicySet::new(); - let policy = policy.new_id(ast::PolicyID::from_string("policy0")); - policyset.add_static(policy).unwrap(); - let responses = requests - .iter() - .map(|request| { - let authorizer = Authorizer::new(); - authorizer.is_authorized(request.clone(), &policyset, &input.entities) - }) - .collect::>(); - let dump_dir = std::env::var("DUMP_TEST_DIR").unwrap_or_else(|_| ".".to_string()); - dump( - dump_dir, - &test_name, - &input.schema.into(), - &policyset, - &input.entities, - std::iter::zip(requests, responses), - ) - .expect("failed to dump test case"); - } -} diff --git a/cedar-drt/fuzz/fuzz_targets/eval-type-directed-lean.rs b/cedar-drt/fuzz/fuzz_targets/eval-type-directed-lean.rs deleted file mode 100644 index 98cae77f7..000000000 --- a/cedar-drt/fuzz/fuzz_targets/eval-type-directed-lean.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod eval_type_directed_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: eval_type_directed_shared::FuzzTargetInput| { - let def_engine = LeanDefinitionalEngine::new(); - eval_type_directed_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs index 74dece80c..89756e772 100644 --- a/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs @@ -15,12 +15,108 @@ */ #![no_main] -mod eval_type_directed_shared; - +use cedar_drt::utils::expr_to_est; use cedar_drt::*; -use cedar_drt_inner::fuzz_target; +use cedar_drt_inner::*; +use cedar_policy_core::{ast::Expr, entities::Entities}; +use cedar_policy_generators::abac::ABACRequest; +use cedar_policy_generators::err::Error; +use cedar_policy_generators::hierarchy::HierarchyGenerator; +use cedar_policy_generators::schema::{arbitrary_schematype_with_bounded_depth, Schema}; +use cedar_policy_generators::settings::ABACSettings; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::debug; +use serde::Serialize; +use std::convert::TryFrom; + +/// Input expected by this fuzz target: +/// An ABAC hierarchy, policy, and 8 associated requests +#[derive(Debug, Clone, Serialize)] +pub struct FuzzTargetInput { + /// generated schema + #[serde(skip)] + pub schema: Schema, + /// generated entity slice + #[serde(skip)] + pub entities: Entities, + /// generated expression + #[serde(serialize_with = "expr_to_est")] + pub expression: Expr, + /// the requests to try for this hierarchy and policy. We try 8 requests per + /// policy/hierarchy + #[serde(skip)] + pub request: ABACRequest, +} + +/// settings for this fuzz target +const SETTINGS: ABACSettings = ABACSettings { + match_types: true, + enable_extensions: true, + max_depth: 3, + max_width: 3, + enable_additional_attributes: false, + enable_like: true, + enable_action_groups_and_attrs: true, + enable_arbitrary_func_call: true, + enable_unknowns: false, + enable_action_in_constraints: true, + enable_unspecified_apply_spec: true, +}; + +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let schema = Schema::arbitrary(SETTINGS.clone(), u)?; + let hierarchy = schema.arbitrary_hierarchy(u)?; + let toplevel_type = arbitrary_schematype_with_bounded_depth( + &SETTINGS, + schema.entity_types(), + SETTINGS.max_depth, + u, + )?; + let expr_gen = schema.exprgenerator(Some(&hierarchy)); + let expression = + expr_gen.generate_expr_for_schematype(&toplevel_type, SETTINGS.max_depth, u)?; + + let request = schema.arbitrary_request(&hierarchy, u)?; + let all_entities = Entities::try_from(hierarchy).map_err(Error::EntitiesError)?; + let entities = drop_some_entities(all_entities, u)?; + Ok(Self { + schema, + entities, + expression, + request, + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Schema::arbitrary_size_hint(depth), + HierarchyGenerator::size_hint(depth), + Schema::arbitrary_policy_size_hint(&SETTINGS, depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + ]) + } +} +// Type-directed fuzzing of expression evaluation. fuzz_target!(|input: eval_type_directed_shared::FuzzTargetInput| { - let def_engine = JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - eval_type_directed_shared::fuzz(input, &def_engine); + initialize_log(); + let def_engine = LeanDefinitionalEngine::new(); + debug!("Schema: {}\n", input.schema.schemafile_string()); + debug!("expr: {}\n", input.expression); + debug!("Entities: {}\n", input.entities); + run_eval_test( + def_impl, + input.request.into(), + &input.expression, + &input.entities, + SETTINGS.enable_extensions, + ) }); diff --git a/cedar-drt/fuzz/fuzz_targets/eval_type_directed_shared.rs b/cedar-drt/fuzz/fuzz_targets/eval_type_directed_shared.rs deleted file mode 100644 index b94b4a9ba..000000000 --- a/cedar-drt/fuzz/fuzz_targets/eval_type_directed_shared.rs +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::utils::expr_to_est; -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::{ast::Expr, entities::Entities}; -use cedar_policy_generators::abac::ABACRequest; -use cedar_policy_generators::err::Error; -use cedar_policy_generators::hierarchy::HierarchyGenerator; -use cedar_policy_generators::schema::{arbitrary_schematype_with_bounded_depth, Schema}; -use cedar_policy_generators::settings::ABACSettings; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::debug; -use serde::Serialize; -use std::convert::TryFrom; - -/// Input expected by this fuzz target: -/// An ABAC hierarchy, policy, and 8 associated requests -#[derive(Debug, Clone, Serialize)] -pub struct FuzzTargetInput { - /// generated schema - #[serde(skip)] - pub schema: Schema, - /// generated entity slice - #[serde(skip)] - pub entities: Entities, - /// generated expression - #[serde(serialize_with = "expr_to_est")] - pub expression: Expr, - /// the requests to try for this hierarchy and policy. We try 8 requests per - /// policy/hierarchy - #[serde(skip)] - pub request: ABACRequest, -} - -/// settings for this fuzz target -const SETTINGS: ABACSettings = ABACSettings { - match_types: true, - enable_extensions: true, - max_depth: 3, - max_width: 3, - enable_additional_attributes: false, - enable_like: true, - enable_action_groups_and_attrs: true, - enable_arbitrary_func_call: true, - enable_unknowns: false, - enable_action_in_constraints: true, - enable_unspecified_apply_spec: true, -}; - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let schema = Schema::arbitrary(SETTINGS.clone(), u)?; - let hierarchy = schema.arbitrary_hierarchy(u)?; - let toplevel_type = arbitrary_schematype_with_bounded_depth( - &SETTINGS, - schema.entity_types(), - SETTINGS.max_depth, - u, - )?; - let expr_gen = schema.exprgenerator(Some(&hierarchy)); - let expression = - expr_gen.generate_expr_for_schematype(&toplevel_type, SETTINGS.max_depth, u)?; - - let request = schema.arbitrary_request(&hierarchy, u)?; - let all_entities = Entities::try_from(hierarchy).map_err(Error::EntitiesError)?; - let entities = drop_some_entities(all_entities, u)?; - Ok(Self { - schema, - entities, - expression, - request, - }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Schema::arbitrary_size_hint(depth), - HierarchyGenerator::size_hint(depth), - Schema::arbitrary_policy_size_hint(&SETTINGS, depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - Schema::arbitrary_request_size_hint(depth), - ]) - } -} - -// Type-directed fuzzing of expression evaluation. -// `def_impl` is a custom implementation to test against `cedar-policy`. -pub fn fuzz(input: FuzzTargetInput, def_impl: &impl CedarTestImplementation) { - initialize_log(); - debug!("Schema: {}\n", input.schema.schemafile_string()); - debug!("expr: {}\n", input.expression); - debug!("Entities: {}\n", input.entities); - run_eval_test( - def_impl, - input.request.into(), - &input.expression, - &input.entities, - SETTINGS.enable_extensions, - ) -} diff --git a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer-lean.rs b/cedar-drt/fuzz/fuzz_targets/rbac-authorizer-lean.rs deleted file mode 100644 index 4c7b001e4..000000000 --- a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer-lean.rs +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod rbac_authorizer_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!( - |input: rbac_authorizer_shared::AuthorizerInputAbstractEvaluator| { - let def_engine = LeanDefinitionalEngine::new(); - rbac_authorizer_shared::fuzz(input, &def_engine); - } -); diff --git a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs b/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs index dbd02b588..2aee44164 100644 --- a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs +++ b/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs @@ -15,15 +15,126 @@ */ #![no_main] -mod rbac_authorizer_shared; - use cedar_drt::*; -use libfuzzer_sys::fuzz_target; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_core::entities::Entities; +use cedar_policy_core::extensions::Extensions; +use cedar_policy_core::parser; +use libfuzzer_sys::arbitrary::{self, Arbitrary}; +use serde::Serialize; + +#[derive(Arbitrary, Debug, Serialize)] +pub struct AuthorizerInputAbstractEvaluator { + /// Set of AbstractPolicy objects + policies: Vec, +} +#[derive(Arbitrary, Debug, PartialEq, Eq, Clone, Serialize)] +enum AbstractPolicy { + /// Permit policy that evaluates 'true' + PermitTrue, + /// Permit policy that evaluates 'false' + PermitFalse, + /// Permit policy that errors + PermitError, + /// Forbid policy that evaluates 'true' + ForbidTrue, + /// Forbid policy that evaluates 'false' + ForbidFalse, + /// Forbid policy that evaluates 'error' + ForbidError, +} + +impl AbstractPolicy { + /// Convert the `AbstractPolicy` into a `Policy` with the given `id` + fn into_policy(self, id: String) -> ast::StaticPolicy { + match self { + AbstractPolicy::PermitTrue => { + parser::parse_policy(Some(id), "permit(principal, action, resource);") + .expect("should be a valid policy") + } + AbstractPolicy::PermitFalse => parser::parse_policy( + Some(id), + "permit(principal, action, resource) when { 1 == 0 };", + ) + .expect("should be a valid policy"), + AbstractPolicy::PermitError => parser::parse_policy( + Some(id), + "permit(principal, action, resource) when { 1 < \"hello\" };", + ) + .expect("should be a valid policy"), + AbstractPolicy::ForbidTrue => { + parser::parse_policy(Some(id), "forbid(principal, action, resource);") + .expect("should be a valid policy") + } + AbstractPolicy::ForbidFalse => parser::parse_policy( + Some(id), + "forbid(principal, action, resource) when { 1 == 0 };", + ) + .expect("should be a valid policy"), + AbstractPolicy::ForbidError => parser::parse_policy( + Some(id), + "forbid(principal, action, resource) when { 1 < \"hello\" };", + ) + .expect("should be a valid policy"), + } + } +} + +// This fuzz target is for differential-testing the `is_authorized()` +// functionality _assuming the correctness of the evaluator_. We use only +// trivial policies and requests, and focus on how the authorizer combines the +// results. fuzz_target!( |input: rbac_authorizer_shared::AuthorizerInputAbstractEvaluator| { - let def_engine = - JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - rbac_authorizer_shared::fuzz(input, &def_engine); + let def_engine = LeanDefinitionalEngine::new(); + let policies = input + .policies + .iter() + .cloned() + .enumerate() + .map(|(i, p)| p.into_policy(format!("policy{i}"))); + let mut policyset = ast::PolicySet::new(); + for policy in policies { + policyset.add_static(policy).unwrap(); + } + assert_eq!(policyset.policies().count(), input.policies.len()); + let entities = Entities::new(); + let request = ast::Request::new( + ("User::\"alice\"".parse().expect("should be valid"), None), + ("Action::\"read\"".parse().expect("should be valid"), None), + ("Resource::\"foo\"".parse().expect("should be valid"), None), + ast::Context::empty(), + None::<&ast::RequestSchemaAllPass>, + Extensions::none(), + ) + .expect("we aren't doing request validation here, so new() can't fail"); + + // Check agreement with definitional engine. Note that run_auth_test returns + // the result of the call to is_authorized. + let res = run_auth_test(def_impl, request, &policyset, &entities); + + // Check the following property: there should be an error reported iff we + // had either PermitError or ForbidError + let should_error = input + .policies + .iter() + .any(|p| p == &AbstractPolicy::PermitError || p == &AbstractPolicy::ForbidError); + if should_error { + assert!(!res.diagnostics.errors.is_empty()); + } else { + // doing the assertion this way, rather than assert!(.is_empty()), gives + // us a better assertion-failure message (showing what items were + // present on the LHS) + assert_eq!( + res.diagnostics + .errors + .iter() + .map(ToString::to_string) + .collect::>(), + Vec::::new() + ); + } } ); diff --git a/cedar-drt/fuzz/fuzz_targets/rbac-lean.rs b/cedar-drt/fuzz/fuzz_targets/rbac-lean.rs deleted file mode 100644 index 1d26e801d..000000000 --- a/cedar-drt/fuzz/fuzz_targets/rbac-lean.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod rbac_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: rbac_shared::FuzzTargetInput| { - let def_engine = LeanDefinitionalEngine::new(); - rbac_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/rbac.rs b/cedar-drt/fuzz/fuzz_targets/rbac.rs index f902d601f..e3a0b64c6 100644 --- a/cedar-drt/fuzz/fuzz_targets/rbac.rs +++ b/cedar-drt/fuzz/fuzz_targets/rbac.rs @@ -15,12 +15,197 @@ */ #![no_main] -mod rbac_shared; - use cedar_drt::*; -use libfuzzer_sys::fuzz_target; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_core::entities::Entities; +use cedar_policy_core::extensions::Extensions; +use cedar_policy_generators::err::Result; +use cedar_policy_generators::hierarchy::{ + AttributesMode, EntityUIDGenMode, HierarchyGenerator, HierarchyGeneratorMode, +}; +use cedar_policy_generators::policy::GeneratedLinkedPolicy; +use cedar_policy_generators::rbac::{RBACHierarchy, RBACPolicy, RBACRequest}; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::info; +use serde::Serialize; +use std::convert::TryFrom; + +/// Input expected by this fuzz target: +/// An RBAC hierarchy, policy set, and 8 associated requests +#[derive(Debug, Clone, Serialize)] +pub struct FuzzTargetInput { + /// the hierarchy + #[serde(skip)] + pub hierarchy: RBACHierarchy, + /// The policy set is made up of groups, each of which consists of either a + /// single static policy or a template with one or more linked policies. + /// + /// We generate up to 2 groups with up to 4 linked policies each. We think + /// the engine is unlikely to have bugs that are only triggered by policy + /// sets larger than that. + pub policy_groups: Vec, + /// the requests to try for this hierarchy and policy set. We try 8 requests + /// per policy set / hierarchy + #[serde(skip)] + pub requests: [RBACRequest; 8], +} + +#[derive(Debug, Clone, Serialize)] +pub enum PolicyGroup { + StaticPolicy(RBACPolicy), + TemplateWithLinks { + template: RBACPolicy, + links: Vec, + }, +} + +impl std::fmt::Display for FuzzTargetInput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "policy groups: {:?}", &self.policy_groups)?; + writeln!(f, "hierarchy: {}", &self.hierarchy)?; + writeln!(f, "request: {}", &self.requests[0])?; + writeln!(f, "request: {}", &self.requests[1])?; + writeln!(f, "request: {}", &self.requests[2])?; + writeln!(f, "request: {}", &self.requests[3])?; + writeln!(f, "request: {}", &self.requests[4])?; + writeln!(f, "request: {}", &self.requests[5])?; + writeln!(f, "request: {}", &self.requests[6])?; + writeln!(f, "request: {}", &self.requests[7])?; + Ok(()) + } +} + +fn arbitrary_vec<'a, T>( + u: &mut Unstructured<'a>, + min: Option, + max: Option, + mut f: impl FnMut(usize, &mut Unstructured<'a>) -> Result, +) -> Result> { + let mut v: Vec = vec![]; + u.arbitrary_loop(min, max, |u| { + v.push(f(v.len(), u)?); + Ok(std::ops::ControlFlow::Continue(())) + })?; + Ok(v) +} +fn arbitrary_vec_size_hint(_depth: usize) -> (usize, Option) { + (0, None) +} + +impl PolicyGroup { + fn arbitrary_for_hierarchy( + pg_idx: usize, + hierarchy: &RBACHierarchy, + u: &mut Unstructured<'_>, + ) -> arbitrary::Result { + // A policy ID collision would cause a DRT failure. The easiest way to + // prevent that is to generate the policy IDs following a fixed pattern + // rather than arbitrarily. We don't think the authorizer is likely to + // have bugs triggered by specific policy IDs, so the loss of coverage + // is unimportant. + let policy = RBACPolicy::arbitrary_for_hierarchy( + Some(ast::PolicyID::from_string(format!("p{}", pg_idx))), + hierarchy, + true, + u, + )?; + if policy.has_slots() { + let links = arbitrary_vec(u, Some(1), Some(4), |l_idx, u| { + GeneratedLinkedPolicy::arbitrary( + ast::PolicyID::from_string(format!("t{}_l{}", pg_idx, l_idx)), + &policy, + hierarchy, + u, + ) + })?; + Ok(Self::TemplateWithLinks { + template: policy, + links, + }) + } else { + Ok(Self::StaticPolicy(policy)) + } + } +} + +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let hierarchy = RBACHierarchy( + HierarchyGenerator { + mode: HierarchyGeneratorMode::Arbitrary { + attributes_mode: AttributesMode::NoAttributes, + }, + uid_gen_mode: EntityUIDGenMode::default(), + num_entities: cedar_policy_generators::hierarchy::NumEntities::RangePerEntityType( + 0..=4, + ), + u, + extensions: Extensions::all_available(), + } + .generate()?, + ); + let policy_groups: Vec = arbitrary_vec(u, Some(1), Some(2), |idx, u| { + Ok(PolicyGroup::arbitrary_for_hierarchy(idx, &hierarchy, u)?) + })?; + let requests = [ + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, + ]; + Ok(Self { + hierarchy, + policy_groups, + requests, + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + HierarchyGenerator::size_hint(depth), + arbitrary_vec_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + RBACRequest::arbitrary_size_hint(depth), + ]) + } +} +// Fuzzing a single, pure-RBAC policy, with associated pure-RBAC hierarchy and +// pure-RBAC requests. fuzz_target!(|input: rbac_shared::FuzzTargetInput| { - let def_engine = JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - rbac_shared::fuzz(input, &def_engine); + initialize_log(); + let def_engine = LeanDefinitionalEngine::new(); + if let Ok(entities) = Entities::try_from(input.hierarchy) { + let mut policyset = ast::PolicySet::new(); + for pg in input.policy_groups { + match pg { + PolicyGroup::StaticPolicy(p) => { + p.0.add_to_policyset(&mut policyset); + } + PolicyGroup::TemplateWithLinks { template, links } => { + template.0.add_to_policyset(&mut policyset); + for link in links { + link.add_to_policyset(&mut policyset); + } + } + }; + } + for rbac_request in input.requests.into_iter() { + let request = ast::Request::from(rbac_request); + let (_, dur) = + time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); + info!("{}{}", TOTAL_MSG, dur.as_nanos()); + } + } }); diff --git a/cedar-drt/fuzz/fuzz_targets/rbac_authorizer_shared.rs b/cedar-drt/fuzz/fuzz_targets/rbac_authorizer_shared.rs deleted file mode 100644 index 324f14117..000000000 --- a/cedar-drt/fuzz/fuzz_targets/rbac_authorizer_shared.rs +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_core::entities::Entities; -use cedar_policy_core::extensions::Extensions; -use cedar_policy_core::parser; -use libfuzzer_sys::arbitrary::{self, Arbitrary}; -use serde::Serialize; - -#[derive(Arbitrary, Debug, Serialize)] -pub struct AuthorizerInputAbstractEvaluator { - /// Set of AbstractPolicy objects - policies: Vec, -} - -#[derive(Arbitrary, Debug, PartialEq, Eq, Clone, Serialize)] -enum AbstractPolicy { - /// Permit policy that evaluates 'true' - PermitTrue, - /// Permit policy that evaluates 'false' - PermitFalse, - /// Permit policy that errors - PermitError, - /// Forbid policy that evaluates 'true' - ForbidTrue, - /// Forbid policy that evaluates 'false' - ForbidFalse, - /// Forbid policy that evaluates 'error' - ForbidError, -} - -impl AbstractPolicy { - /// Convert the `AbstractPolicy` into a `Policy` with the given `id` - fn into_policy(self, id: String) -> ast::StaticPolicy { - match self { - AbstractPolicy::PermitTrue => { - parser::parse_policy(Some(id), "permit(principal, action, resource);") - .expect("should be a valid policy") - } - AbstractPolicy::PermitFalse => parser::parse_policy( - Some(id), - "permit(principal, action, resource) when { 1 == 0 };", - ) - .expect("should be a valid policy"), - AbstractPolicy::PermitError => parser::parse_policy( - Some(id), - "permit(principal, action, resource) when { 1 < \"hello\" };", - ) - .expect("should be a valid policy"), - AbstractPolicy::ForbidTrue => { - parser::parse_policy(Some(id), "forbid(principal, action, resource);") - .expect("should be a valid policy") - } - AbstractPolicy::ForbidFalse => parser::parse_policy( - Some(id), - "forbid(principal, action, resource) when { 1 == 0 };", - ) - .expect("should be a valid policy"), - AbstractPolicy::ForbidError => parser::parse_policy( - Some(id), - "forbid(principal, action, resource) when { 1 < \"hello\" };", - ) - .expect("should be a valid policy"), - } - } -} - -// This fuzz target is for differential-testing the `is_authorized()` -// functionality _assuming the correctness of the evaluator_. We use only -// trivial policies and requests, and focus on how the authorizer combines the -// results. -// -// `def_impl` is a custom implementation to test against `cedar-policy`. -pub fn fuzz(input: AuthorizerInputAbstractEvaluator, def_impl: &impl CedarTestImplementation) { - let policies = input - .policies - .iter() - .cloned() - .enumerate() - .map(|(i, p)| p.into_policy(format!("policy{i}"))); - let mut policyset = ast::PolicySet::new(); - for policy in policies { - policyset.add_static(policy).unwrap(); - } - assert_eq!(policyset.policies().count(), input.policies.len()); - let entities = Entities::new(); - let request = ast::Request::new( - ("User::\"alice\"".parse().expect("should be valid"), None), - ("Action::\"read\"".parse().expect("should be valid"), None), - ("Resource::\"foo\"".parse().expect("should be valid"), None), - ast::Context::empty(), - None::<&ast::RequestSchemaAllPass>, - Extensions::none(), - ) - .expect("we aren't doing request validation here, so new() can't fail"); - - // Check agreement with definitional engine. Note that run_auth_test returns - // the result of the call to is_authorized. - let res = run_auth_test(def_impl, request, &policyset, &entities); - - // Check the following property: there should be an error reported iff we - // had either PermitError or ForbidError - let should_error = input - .policies - .iter() - .any(|p| p == &AbstractPolicy::PermitError || p == &AbstractPolicy::ForbidError); - if should_error { - assert!(!res.diagnostics.errors.is_empty()); - } else { - // doing the assertion this way, rather than assert!(.is_empty()), gives - // us a better assertion-failure message (showing what items were - // present on the LHS) - assert_eq!( - res.diagnostics - .errors - .iter() - .map(ToString::to_string) - .collect::>(), - Vec::::new() - ); - } -} diff --git a/cedar-drt/fuzz/fuzz_targets/rbac_shared.rs b/cedar-drt/fuzz/fuzz_targets/rbac_shared.rs deleted file mode 100644 index 57edaf3fb..000000000 --- a/cedar-drt/fuzz/fuzz_targets/rbac_shared.rs +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_core::entities::Entities; -use cedar_policy_core::extensions::Extensions; -use cedar_policy_generators::err::Result; -use cedar_policy_generators::hierarchy::{ - AttributesMode, EntityUIDGenMode, HierarchyGenerator, HierarchyGeneratorMode, -}; -use cedar_policy_generators::policy::GeneratedLinkedPolicy; -use cedar_policy_generators::rbac::{RBACHierarchy, RBACPolicy, RBACRequest}; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::info; -use serde::Serialize; -use std::convert::TryFrom; - -/// Input expected by this fuzz target: -/// An RBAC hierarchy, policy set, and 8 associated requests -#[derive(Debug, Clone, Serialize)] -pub struct FuzzTargetInput { - /// the hierarchy - #[serde(skip)] - pub hierarchy: RBACHierarchy, - /// The policy set is made up of groups, each of which consists of either a - /// single static policy or a template with one or more linked policies. - /// - /// We generate up to 2 groups with up to 4 linked policies each. We think - /// the engine is unlikely to have bugs that are only triggered by policy - /// sets larger than that. - pub policy_groups: Vec, - /// the requests to try for this hierarchy and policy set. We try 8 requests - /// per policy set / hierarchy - #[serde(skip)] - pub requests: [RBACRequest; 8], -} - -#[derive(Debug, Clone, Serialize)] -pub enum PolicyGroup { - StaticPolicy(RBACPolicy), - TemplateWithLinks { - template: RBACPolicy, - links: Vec, - }, -} - -impl std::fmt::Display for FuzzTargetInput { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "policy groups: {:?}", &self.policy_groups)?; - writeln!(f, "hierarchy: {}", &self.hierarchy)?; - writeln!(f, "request: {}", &self.requests[0])?; - writeln!(f, "request: {}", &self.requests[1])?; - writeln!(f, "request: {}", &self.requests[2])?; - writeln!(f, "request: {}", &self.requests[3])?; - writeln!(f, "request: {}", &self.requests[4])?; - writeln!(f, "request: {}", &self.requests[5])?; - writeln!(f, "request: {}", &self.requests[6])?; - writeln!(f, "request: {}", &self.requests[7])?; - Ok(()) - } -} - -fn arbitrary_vec<'a, T>( - u: &mut Unstructured<'a>, - min: Option, - max: Option, - mut f: impl FnMut(usize, &mut Unstructured<'a>) -> Result, -) -> Result> { - let mut v: Vec = vec![]; - u.arbitrary_loop(min, max, |u| { - v.push(f(v.len(), u)?); - Ok(std::ops::ControlFlow::Continue(())) - })?; - Ok(v) -} -fn arbitrary_vec_size_hint(_depth: usize) -> (usize, Option) { - (0, None) -} - -impl PolicyGroup { - fn arbitrary_for_hierarchy( - pg_idx: usize, - hierarchy: &RBACHierarchy, - u: &mut Unstructured<'_>, - ) -> arbitrary::Result { - // A policy ID collision would cause a DRT failure. The easiest way to - // prevent that is to generate the policy IDs following a fixed pattern - // rather than arbitrarily. We don't think the authorizer is likely to - // have bugs triggered by specific policy IDs, so the loss of coverage - // is unimportant. - let policy = RBACPolicy::arbitrary_for_hierarchy( - Some(ast::PolicyID::from_string(format!("p{}", pg_idx))), - hierarchy, - true, - u, - )?; - if policy.has_slots() { - let links = arbitrary_vec(u, Some(1), Some(4), |l_idx, u| { - GeneratedLinkedPolicy::arbitrary( - ast::PolicyID::from_string(format!("t{}_l{}", pg_idx, l_idx)), - &policy, - hierarchy, - u, - ) - })?; - Ok(Self::TemplateWithLinks { - template: policy, - links, - }) - } else { - Ok(Self::StaticPolicy(policy)) - } - } -} - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let hierarchy = RBACHierarchy( - HierarchyGenerator { - mode: HierarchyGeneratorMode::Arbitrary { - attributes_mode: AttributesMode::NoAttributes, - }, - uid_gen_mode: EntityUIDGenMode::default(), - num_entities: cedar_policy_generators::hierarchy::NumEntities::RangePerEntityType( - 0..=4, - ), - u, - extensions: Extensions::all_available(), - } - .generate()?, - ); - let policy_groups: Vec = arbitrary_vec(u, Some(1), Some(2), |idx, u| { - Ok(PolicyGroup::arbitrary_for_hierarchy(idx, &hierarchy, u)?) - })?; - let requests = [ - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - RBACRequest::arbitrary_for_hierarchy(&hierarchy, u)?, - ]; - Ok(Self { - hierarchy, - policy_groups, - requests, - }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - HierarchyGenerator::size_hint(depth), - arbitrary_vec_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - RBACRequest::arbitrary_size_hint(depth), - ]) - } -} - -// Fuzzing a single, pure-RBAC policy, with associated pure-RBAC hierarchy and -// pure-RBAC requests. `def_impl` is a custom implementation to test against -// `cedar-policy`. -pub fn fuzz(input: FuzzTargetInput, def_impl: &impl CedarTestImplementation) { - initialize_log(); - if let Ok(entities) = Entities::try_from(input.hierarchy) { - let mut policyset = ast::PolicySet::new(); - for pg in input.policy_groups { - match pg { - PolicyGroup::StaticPolicy(p) => { - p.0.add_to_policyset(&mut policyset); - } - PolicyGroup::TemplateWithLinks { template, links } => { - template.0.add_to_policyset(&mut policyset); - for link in links { - link.add_to_policyset(&mut policyset); - } - } - }; - } - for rbac_request in input.requests.into_iter() { - let request = ast::Request::from(rbac_request); - let (_, dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); - info!("{}{}", TOTAL_MSG, dur.as_nanos()); - } - } -} diff --git a/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed-lean.rs b/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed-lean.rs deleted file mode 100644 index d4d924a26..000000000 --- a/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed-lean.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod strict_validation_drt_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: strict_validation_drt_shared::FuzzTargetInput| { - let def_engine = LeanDefinitionalEngine::new(); - strict_validation_drt_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed.rs deleted file mode 100644 index 885ac4ca5..000000000 --- a/cedar-drt/fuzz/fuzz_targets/strict-validation-drt-type-directed.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -mod strict_validation_drt_shared; - -use cedar_drt::*; -use libfuzzer_sys::fuzz_target; - -fuzz_target!(|input: strict_validation_drt_shared::FuzzTargetInput| { - let def_engine = JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - strict_validation_drt_shared::fuzz(input, &def_engine); -}); diff --git a/cedar-drt/fuzz/fuzz_targets/strict_validation_drt_shared.rs b/cedar-drt/fuzz/fuzz_targets/strict_validation_drt_shared.rs deleted file mode 100644 index e10e16b91..000000000 --- a/cedar-drt/fuzz/fuzz_targets/strict_validation_drt_shared.rs +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_generators::{abac::ABACPolicy, schema::Schema, settings::ABACSettings}; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::{debug, info}; -use serde::Serialize; - -/// Input expected by this fuzz target -#[derive(Debug, Clone, Serialize)] -pub struct FuzzTargetInput { - /// generated schema - #[serde(skip)] - pub schema: Schema, - /// generated policy - pub policy: ABACPolicy, -} - -/// settings for this fuzz target -const SETTINGS: ABACSettings = ABACSettings { - match_types: true, - enable_extensions: true, - max_depth: 7, - max_width: 7, - enable_additional_attributes: true, - enable_like: true, - enable_action_groups_and_attrs: true, - enable_arbitrary_func_call: true, - enable_unknowns: false, - enable_action_in_constraints: true, - enable_unspecified_apply_spec: true, -}; - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let schema: Schema = Schema::arbitrary(SETTINGS.clone(), u)?; - let hierarchy = schema.arbitrary_hierarchy(u)?; - let policy = schema.arbitrary_policy(&hierarchy, u)?; - Ok(Self { schema, policy }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Schema::arbitrary_size_hint(depth), - Schema::arbitrary_policy_size_hint(&SETTINGS, depth), - ]) - } -} - -// Type-directed fuzzing of strict validation. -// `def_impl` is a custom implementation to test against `cedar-policy`. -pub fn fuzz(input: FuzzTargetInput, def_impl: &impl CedarTestImplementation) { - initialize_log(); - - // generate a schema - if let Ok(schema) = ValidatorSchema::try_from(input.schema) { - debug!("Schema: {:?}", schema); - - // generate a policy - let mut policyset = ast::PolicySet::new(); - let policy: ast::StaticPolicy = input.policy.into(); - policyset.add_static(policy).unwrap(); - debug!("Policies: {policyset}"); - - // run the policy through both validators and compare the result - let (_, total_dur) = - time_function(|| run_val_test(def_impl, schema, &policyset, ValidationMode::Strict)); - info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); - } -} diff --git a/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs index 2d8dd71db..b705c6ecf 100644 --- a/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs @@ -25,7 +25,7 @@ use serde::Serialize; /// Input expected by this fuzz target #[derive(Debug, Clone, Serialize)] -struct FuzzTargetInput { +pub struct FuzzTargetInput { /// generated schema #[serde(skip)] pub schema: Schema, @@ -64,9 +64,10 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { } } -// The main fuzz target -fuzz_target!(|input: FuzzTargetInput| { +// Type-directed fuzzing of (strict) validation. +fuzz_target!(|input: strict_validation_drt_shared::FuzzTargetInput| { initialize_log(); + let def_engine = LeanDefinitionalEngine::new(); // generate a schema if let Ok(schema) = ValidatorSchema::try_from(input.schema) { @@ -79,16 +80,8 @@ fuzz_target!(|input: FuzzTargetInput| { debug!("Policies: {policyset}"); // run the policy through both validators and compare the result - let java_def_engine = - JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - let (_, total_dur) = time_function(|| { - run_val_test( - &java_def_engine, - schema, - &policyset, - ValidationMode::Permissive, - ) - }); + let (_, total_dur) = + time_function(|| run_val_test(def_impl, schema, &policyset, ValidationMode::Strict)); info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); } }); diff --git a/cedar-drt/fuzz/fuzz_targets/validation-drt.rs b/cedar-drt/fuzz/fuzz_targets/validation-drt.rs deleted file mode 100644 index 2e9598f2f..000000000 --- a/cedar-drt/fuzz/fuzz_targets/validation-drt.rs +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#![no_main] -use cedar_drt::*; -use cedar_drt_inner::*; -use cedar_policy_core::ast; -use cedar_policy_generators::{abac::ABACPolicy, schema::Schema, settings::ABACSettings}; -use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; -use log::{debug, info}; -use serde::Serialize; - -/// Input expected by this fuzz target -#[derive(Debug, Clone, Serialize)] -struct FuzzTargetInput { - /// generated schema - #[serde(skip)] - pub schema: Schema, - /// generated policy - pub policy: ABACPolicy, -} - -/// settings for this fuzz target -const SETTINGS: ABACSettings = ABACSettings { - match_types: false, - enable_extensions: true, - max_depth: 7, - max_width: 7, - enable_additional_attributes: true, - enable_like: true, - enable_action_groups_and_attrs: true, - enable_arbitrary_func_call: true, - enable_unknowns: false, - enable_action_in_constraints: true, - enable_unspecified_apply_spec: true, -}; - -impl<'a> Arbitrary<'a> for FuzzTargetInput { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let schema: Schema = Schema::arbitrary(SETTINGS.clone(), u)?; - let hierarchy = schema.arbitrary_hierarchy(u)?; - let policy = schema.arbitrary_policy(&hierarchy, u)?; - Ok(Self { schema, policy }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Schema::arbitrary_size_hint(depth), - Schema::arbitrary_policy_size_hint(&SETTINGS, depth), - ]) - } -} - -// The main fuzz target -fuzz_target!(|input: FuzzTargetInput| { - initialize_log(); - - // generate a schema - if let Ok(schema) = ValidatorSchema::try_from(input.schema) { - debug!("Schema: {:?}", schema); - - // generate a policy - let mut policyset = ast::PolicySet::new(); - let policy: ast::StaticPolicy = input.policy.into(); - policyset.add_static(policy).unwrap(); - debug!("Policies: {policyset}"); - - // run the policy through both validators and compare the result - let java_def_engine = - JavaDefinitionalEngine::new().expect("failed to create definitional engine"); - let (_, total_dur) = time_function(|| { - run_val_test( - &java_def_engine, - schema, - &policyset, - ValidationMode::Permissive, - ) - }); - info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); - } -}); diff --git a/cedar-drt/set_env_vars.sh b/cedar-drt/set_env_vars.sh index 6b8e6a194..8cfa9f022 100644 --- a/cedar-drt/set_env_vars.sh +++ b/cedar-drt/set_env_vars.sh @@ -1,32 +1,5 @@ #!/bin/bash -# Set JAVA_HOME -if [ -z "${JAVA_HOME-}" ]; then - # Idea from https://www.baeldung.com/find-java-home. - export JAVA_HOME="$(java -XshowSettings:properties -version 2>&1 | sed -ne 's,^ *java\.home = ,,p')" -fi - -# Set LD_LIBRARY_PATH and DYLD_LIBRARY_PATH (for macOS) -function add_lib_to_path { - lib_dirs=($(find "$JAVA_HOME" -name "lib${1}.*" -exec dirname {} \;)) - if [ ${#lib_dirs[@]} = 1 ]; then - # Accessing an array element in both bash and zsh: https://stackoverflow.com/a/56311706 - lib_dir=${lib_dirs[@]:0:1} - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH+$LD_LIBRARY_PATH:}${lib_dir} - export DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH+$DYLD_LIBRARY_PATH:}${lib_dir} - else - echo >&2 "Error: Failed to find lib${1}" - fi -} -add_lib_to_path jvm -add_lib_to_path jli -unset -f add_lib_to_path - -# Set CLASSPATH -if [ -f "$(pwd)/../cedar-dafny-java-wrapper/build/libs/cedar-dafny-java-wrapper.jar" ]; then - export CLASSPATH="$(< ../cedar-dafny-java-wrapper/build/runtimeClasspath.txt):$(pwd)/../cedar-dafny-java-wrapper/build/libs/cedar-dafny-java-wrapper.jar" -fi - # Set environment variables for Lean if ! command -v lean &> /dev/null; then echo "lean executable could not be found. is lean installed?" diff --git a/cedar-drt/src/dafny_java_impl.rs b/cedar-drt/src/dafny_java_impl.rs deleted file mode 100644 index 9426d5208..000000000 --- a/cedar-drt/src/dafny_java_impl.rs +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Implementation of the [`CedarTestImplementation`] trait for the Cedar Java -//! implementation extracted from the Dafny specification. - -use crate::cedar_test_impl::*; -use crate::definitional_request_types::*; -use crate::logger::*; -use cedar_policy::frontend::is_authorized::InterfaceResponse; -use cedar_policy::integration_testing::{CustomCedarImpl, IntegrationTestValidationResult}; -use cedar_policy_core::ast::{Expr, Value}; -pub use cedar_policy_core::*; -pub use cedar_policy_validator::{ValidationMode, ValidatorSchema}; -pub use entities::Entities; -use jni::objects::{JObject, JString, JValue}; -use jni::{JNIVersion, JavaVM}; -use lazy_static::lazy_static; -use log::info; -use serde::Deserialize; - -/// Times to (de)serialize JSON content sent to / received from the Dafny-Java -/// implementation. -pub const RUST_SERIALIZATION_MSG: &str = "rust_serialization (ns) : "; -pub const RUST_DESERIALIZATION_MSG: &str = "rust_deserialization (ns) : "; - -/// Times for cedar-policy authorization and validation. -pub const RUST_AUTH_MSG: &str = "rust_auth (ns) : "; -pub const RUST_VALIDATION_MSG: &str = "rust_validation (ns) : "; - -/// Times for JSON (de)serialization, authorization, and validation as reported -/// by the Dafny-Java implementation. -pub const JAVA_SERIALIZATION_MSG: &str = "java_serialization (ns) : "; -pub const JAVA_DESERIALIZATION_MSG: &str = "java_deserialization (ns) : "; -pub const JAVA_AUTH_MSG: &str = "java_auth (ns) : "; -pub const JAVA_VALIDATION_MSG: &str = "java_validation (ns) : "; - -lazy_static! { - /// The JVM instance - static ref JVM: JavaVM = { - let classpath_opt = match std::env::var("CLASSPATH") { - Ok(val) => format!("-Djava.class.path={val}"), - Err(std::env::VarError::NotPresent) => String::new(), - Err(std::env::VarError::NotUnicode(_)) => panic!("classpath not unicode"), - }; - let jvm_args = jni::InitArgsBuilder::new() - .version(JNIVersion::V8) - .option("-Xcheck:jni") - //.option("-verbose:class") - .option(&classpath_opt) - .build() - .expect("failed to create JVM args"); - JavaVM::new(jvm_args).expect("failed to create JVM instance") - }; -} - -#[derive(Debug, Deserialize)] -pub struct AuthorizationResponse { - pub serialization_nanos: i64, - pub deserialization_nanos: i64, - pub auth_nanos: i64, - pub response: InterfaceResponse, -} - -#[derive(Debug, Deserialize)] -#[repr(transparent)] -pub struct EvaluationResponse { - pub matches: bool, -} - -#[derive(Debug, Deserialize)] -pub struct ValidationResponseInner { - #[serde(rename = "validationErrors")] - pub validation_errors: Vec, - #[serde(rename = "parseErrors")] - pub parse_errors: Vec, -} - -#[derive(Debug, Deserialize)] -pub struct ValidationResponse { - pub serialization_nanos: i64, - pub deserialization_nanos: i64, - pub validation_nanos: i64, - pub response: ValidationResponseInner, -} - -/// The lifetime parameter 'j is the lifetime of the JVM instance -pub struct JavaDefinitionalEngine<'j> { - /// Thread attached to the JVM - thread: jni::AttachGuard<'j>, - /// Definitional authorizer instance - def_authorizer: JObject<'j>, - /// Definitional validator instance - def_validator: JObject<'j>, -} - -impl<'j> JavaDefinitionalEngine<'j> { - /// Create a new `JavaDefinitionalEngine` instance. - /// - /// This is a relatively expensive operation, so avoid calling it frequently. - pub fn new() -> Result { - let thread = JVM - .attach_current_thread() - .map_err(|e| format!("failed to attach current thread: {e}"))?; - let def_authorizer_class = thread - .find_class("com/CedarDefinitionalImplementation/DefinitionalEngine") - .map_err(|e| format!("failed to find class: {e}"))?; - let def_authorizer = thread - .new_object(def_authorizer_class, "()V", &[]) - .map_err(|e| format!("failed to construct DefinitionalEngine instance: {e}"))?; - let def_validator_class = thread - .find_class("com/CedarDefinitionalImplementation/DefinitionalValidator") - .map_err(|e| format!("failed to find class: {e}"))?; - let def_validator = thread - .new_object(def_validator_class, "()V", &[]) - .map_err(|e| format!("failed to construct DefinitionalValidator instance: {e}"))?; - Ok(Self { - thread, - def_authorizer, - def_validator, - }) - } - - fn serialize_eval_request( - &self, - request: &ast::Request, - entities: &Entities, - expr: &Expr, - expected: Option<&Expr>, - ) -> JString { - let request: String = serde_json::to_string(&EvaluationRequest { - request, - entities, - expr, - expected, - }) - .expect("Failed to serialize request"); - self.thread - .new_string(request) - .expect("failed to create Java object for eval request string") - } - - fn deserialize_eval_response(&self, response: JValue) -> bool { - let jstr = response - .l() - .unwrap_or_else(|_| { - panic!( - "expected eval_str to return an Object (String), but it returned {:?}", - response - ) - }) - .into(); - let response: String = self - .thread - .get_string(jstr) - .expect("Failed to get JavaStr") - .into(); - self.thread - .delete_local_ref(*jstr) - .expect("Deletion failed"); - let r: EvaluationResponse = serde_json::from_str(&response).unwrap_or_else(|_| { - panic!( - "JSON response received from the definitional engine was malformed: \n{response}" - ) - }); - r.matches - } - - pub fn eval( - &self, - request: &ast::Request, - entities: &Entities, - expr: &Expr, - expected: Option, - ) -> bool { - let expected_as_expr = expected.map(|v| v.into()); - let jstr = self.serialize_eval_request(request, entities, expr, expected_as_expr.as_ref()); - let response = self.thread.call_method( - self.def_authorizer, - "eval_str", - "(Ljava/lang/String;)Ljava/lang/String;", - &[jstr.into()], - ); - match response { - Ok(v) => self.deserialize_eval_response(v), - Err(e) => { - self.thread - .exception_describe() - .expect("Failed to print exception information"); - panic!("JVM Exception Occurred!: {:?}", e); - } - } - } - - fn serialize_auth_request( - &self, - request: &ast::Request, - policies: &ast::PolicySet, - entities: &Entities, - ) -> JString { - let request: String = serde_json::to_string(&AuthorizationRequest { - request, - policies, - entities, - }) - .expect("Failed to serialize request, policies, or entities"); - self.thread - .new_string(request) - .expect("failed to create Java object for authorization request string") - } - - fn deserialize_auth_response(&self, response: JValue) -> InterfaceResponse { - let jresponse: JString = response - .l() - .unwrap_or_else(|_| { - panic!( - "expected isAuthorized_str to return an Object (String), but it returned {:?}", - response - ) - }) - .into(); - let response: String = self - .thread - .get_string(jresponse) - .expect("failed to get JavaStr") - .into(); - self.thread - .delete_local_ref(*jresponse) - .expect("Deletion failed"); - let d_response: AuthorizationResponse = serde_json::from_str(&response).unwrap_or_else(|_| { - panic!( - "JSON response received from the definitional engine was the wrong format:\n{response}", - ) - }); - - info!( - "{}{}", - JAVA_SERIALIZATION_MSG, d_response.serialization_nanos - ); - info!( - "{}{}", - JAVA_DESERIALIZATION_MSG, d_response.deserialization_nanos - ); - info!("{}{}", JAVA_AUTH_MSG, d_response.auth_nanos); - - d_response.response - } - - /// Ask the definitional engine whether `isAuthorized` for the given `request`, - /// `policies`, and `entities` - pub fn is_authorized( - &self, - request: &ast::Request, - policies: &ast::PolicySet, - entities: &Entities, - ) -> InterfaceResponse { - let (jstring, dur) = - time_function(|| self.serialize_auth_request(request, policies, entities)); - info!("{}{}", RUST_SERIALIZATION_MSG, dur.as_nanos()); - let response = self.thread.call_method( - self.def_authorizer, - "isAuthorized_str", - // https://stackoverflow.com/questions/8066253/compute-a-java-functions-signature - "(Ljava/lang/String;)Ljava/lang/String;", - &[jstring.into()], - ); - if response.is_err() { - self.thread - .exception_describe() - .expect("Failed to print exception information"); - panic!("JVM Exception Occurred!"); - } - let response: JValue = response.expect("failed to call Java isAuthorized_str"); - let (response, dur) = time_function(|| self.deserialize_auth_response(response)); - info!("{}{}", RUST_DESERIALIZATION_MSG, dur.as_nanos()); - self.thread - .delete_local_ref(*jstring) - .expect("Deletion failed"); - response - } - - fn serialize_val_request( - &self, - schema: &ValidatorSchema, - policies: &ast::PolicySet, - mode: ValidationMode, - ) -> JString { - let request: String = serde_json::to_string(&ValidationRequest { - schema, - policies, - mode, - }) - .expect("Failed to serialize schema or policies"); - self.thread - .new_string(request) - .expect("failed to create Java object for validation request string") - } - - fn deserialize_val_response(&self, response: JValue) -> InterfaceValidationResult { - let jresponse: JString = response - .l() - .unwrap_or_else(|_| { - panic!( - "expected validate_str to return an Object (String), but it returned {:?}", - response - ) - }) - .into(); - let response: String = self - .thread - .get_string(jresponse) - .expect("failed to get JavaStr") - .into(); - self.thread - .delete_local_ref(*jresponse) - .expect("Deletion failed"); - let d_response: ValidationResponse = - serde_json::from_str(&response).unwrap_or_else(|_| { - panic!( - "JSON response received from the definitional validator was the wrong format:\n{response}", - ) - }); - - info!( - "{}{}", - JAVA_SERIALIZATION_MSG, d_response.serialization_nanos - ); - info!( - "{}{}", - JAVA_DESERIALIZATION_MSG, d_response.deserialization_nanos - ); - info!("{}{}", JAVA_VALIDATION_MSG, d_response.validation_nanos); - - assert_eq!( - d_response.response.parse_errors, - Vec::::new(), - "Dafny json parsing failed", - ); - InterfaceValidationResult { - validation_errors: d_response.response.validation_errors, - } - } - - /// Use the definitional validator to validate the given `policies` given a `schema` - pub fn validate( - &self, - schema: &ValidatorSchema, - policies: &ast::PolicySet, - mode: ValidationMode, - ) -> InterfaceValidationResult { - let (jstring, dur) = time_function(|| self.serialize_val_request(schema, policies, mode)); - info!("{}{}", RUST_SERIALIZATION_MSG, dur.as_nanos()); - let response = self.thread.call_method( - self.def_validator, - "validate_str", - // https://stackoverflow.com/questions/8066253/compute-a-java-functions-signature - "(Ljava/lang/String;)Ljava/lang/String;", - &[jstring.into()], - ); - if response.is_err() { - self.thread - .exception_describe() - .expect("Failed to print exception information"); - panic!("JVM Exception Occurred!"); - } - let response: JValue = response.expect("failed to call Java validate_str"); - let (response, dur) = time_function(|| self.deserialize_val_response(response)); - info!("{}{}", RUST_DESERIALIZATION_MSG, dur.as_nanos()); - self.thread - .delete_local_ref(*jstring) - .expect("Deletion failed"); - response - } -} - -impl<'j> CedarTestImplementation for JavaDefinitionalEngine<'j> { - fn is_authorized( - &self, - request: ast::Request, - policies: &ast::PolicySet, - entities: &Entities, - ) -> InterfaceResult { - Ok(self.is_authorized(&request, policies, entities)) - } - - fn interpret( - &self, - request: ast::Request, - entities: &Entities, - expr: &Expr, - expected: Option, - ) -> InterfaceResult { - Ok(self.eval(&request, entities, expr, expected)) - } - - fn validate( - &self, - schema: &cedar_policy_validator::ValidatorSchema, - policies: &ast::PolicySet, - mode: ValidationMode, - ) -> InterfaceResult { - Ok(self.validate(schema, policies, mode)) - } - - fn error_comparison_mode(&self) -> ErrorComparisonMode { - ErrorComparisonMode::Ignore - } -} - -/// Implementation of the trait used for integration testing. -impl<'j> CustomCedarImpl for JavaDefinitionalEngine<'j> { - fn is_authorized( - &self, - request: &ast::Request, - policies: &ast::PolicySet, - entities: &Entities, - ) -> InterfaceResponse { - self.is_authorized(request, policies, entities) - } - - fn validate( - &self, - schema: cedar_policy_validator::ValidatorSchema, - policies: &ast::PolicySet, - ) -> cedar_policy::integration_testing::IntegrationTestValidationResult { - let definitional_res = self.validate( - &schema, - policies, - cedar_policy_validator::ValidationMode::default(), - ); - IntegrationTestValidationResult { - validation_passed: definitional_res.validation_passed(), - validation_errors_debug: format!("{:?}", definitional_res.validation_errors), - } - } -} diff --git a/cedar-drt/src/lib.rs b/cedar-drt/src/lib.rs index a1383dd82..45ab1b3eb 100644 --- a/cedar-drt/src/lib.rs +++ b/cedar-drt/src/lib.rs @@ -15,14 +15,12 @@ */ mod cedar_test_impl; -mod dafny_java_impl; mod definitional_request_types; mod lean_impl; mod logger; pub mod utils; pub use cedar_test_impl::*; -pub use dafny_java_impl::*; pub use definitional_request_types::*; pub use lean_impl::*; pub use logger::*; diff --git a/cedar-drt/tests/integration_tests.rs b/cedar-drt/tests/integration_tests.rs index 06d754f8d..95aa58569 100644 --- a/cedar-drt/tests/integration_tests.rs +++ b/cedar-drt/tests/integration_tests.rs @@ -54,13 +54,6 @@ fn run_integration_tests(custom_impl: &dyn CustomCedarImpl) { #[test] fn integration_tests_on_def_impl() { - //WARNING: We need to create lean def engine first so the JVM signal handlers are aware of it. - //If this needs to change at some point in the future, you'll need to add libjsig.so to LD_PRELOAD - //WARNING: Different tests run in new threads by default, so don't separate these. let lean_def_impl = LeanDefinitionalEngine::new(); run_integration_tests(&lean_def_impl); - - let java_def_impl = - JavaDefinitionalEngine::new().expect("failed to create Dafny definitional engine"); - run_integration_tests(&java_def_impl); } From 46ee9b1ff25a9d4c77157fc861358aa0baad03c4 Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Tue, 13 Feb 2024 15:28:07 +0000 Subject: [PATCH 2/6] update README --- cedar-drt/README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cedar-drt/README.md b/cedar-drt/README.md index 1b3f7a069..7847e180c 100644 --- a/cedar-drt/README.md +++ b/cedar-drt/README.md @@ -9,17 +9,16 @@ The table below lists all available fuzz targets, including which component of t | Name | Component(s) tested | Type | Description | | ----------- | ----------- | ----------- | ----------- | -| [`abac-type-directed`](fuzz/fuzz_targets/abac-type-directed.rs) | Evaluator | DRT | Diff test evaluator on ABAC policies using (mostly) well-typed inputs | -| [`abac`](fuzz/fuzz_targets/abac.rs) | Evaluator | DRT | Diff test evaluator on ABAC policies | +| [`abac-type-directed`](fuzz/fuzz_targets/abac-type-directed.rs) | Authorizer | DRT | Diff test authorizer on ABAC policies using (mostly) well-typed inputs | +| [`abac`](fuzz/fuzz_targets/abac.rs) | Authorizer | DRT | Diff test evaluator on ABAC policies | +| [`eval-type-directed`](fuzz/fuzz_targets/eval-type-directed.rs) | Evaluator | DRT | Diff test evaluator on (mostly) well-typed expressions | | [`formatter`](fuzz/fuzz_targets/formatter.rs) | Policy formatter, Pretty printer, Parser | PBT | Test round trip property: parse ∘ format ∘ pretty-print == id for ASTs | | [`partial-eval`](fuzz/fuzz_targets/partial-eval.rs) | Partial evaluator | PBT | Test that residual policies with unknowns substituted are equivalent to original policies with unknowns replaced | | [`pp`](fuzz/fuzz_targets/pp.rs) | Pretty printer, Parser | PBT | Test round trip property: parse ∘ pretty-print == id for ASTs | | [`rbac-authorizer`](fuzz/fuzz_targets/rbac-authorizer.rs) | Authorizer | PBT + DRT | Test for correct authorization responses over a set of simple policies | | [`rbac`](fuzz/fuzz_targets/rbac.rs) | Authorizer | DRT | Diff test authorizer on sets of RBAC policies, including template instantiations | | [`simple-parser`](fuzz/fuzz_targets/simple-parser.rs) | Parser | PBT | Test that parsing doesn't crash with random input strings | -| [`strict-validation-drt-type-directed`](fuzz/fuzz_targets/strict-validation-drt-type-directed.rs) | Validator | DRT | Diff test strict validation using (mostly) well-typed inputs | -| [`validation-drt-type-directed`](fuzz/fuzz_targets/validation-drt-type-directed.rs) | Validator | DRT | Diff test permissive validation using (mostly) well-typed inputs | -| [`validation-drt`](fuzz/fuzz_targets/validation-drt.rs) | Validator | DRT | Diff test permissive validation | +| [`validation-drt-type-directed`](fuzz/fuzz_targets/validation-drt-type-directed.rs) | Validator | DRT | Diff test (strict) validation using (mostly) well-typed inputs | | [`validation-pbt`](fuzz/fuzz_targets/validation-pbt.rs) | Validator | PBT | Test that validated policies do not result in type errors | | [`wildcard-matching`](fuzz/fuzz_targets/wildcard-matching.rs) | String matching algorithm used for the `like` operator | DRT | Diff test wildcard matching using a regex-based implementation | From 0007fdda48fa755506d26b1736160786fdc809bf Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Tue, 13 Feb 2024 17:40:04 +0000 Subject: [PATCH 3/6] fixes --- .github/workflows/ci.yml | 6 -- Dockerfile | 15 +-- .../fuzz/fuzz_targets/abac-type-directed.rs | 6 +- cedar-drt/fuzz/fuzz_targets/abac.rs | 6 +- .../fuzz/fuzz_targets/eval-type-directed.rs | 6 +- .../fuzz/fuzz_targets/rbac-authorizer.rs | 96 +++++++++---------- cedar-drt/fuzz/fuzz_targets/rbac.rs | 6 +- .../validation-drt-type-directed.rs | 6 +- cedar-drt/fuzz/src/lib.rs | 15 +-- 9 files changed, 71 insertions(+), 91 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a8c70a1a..0466c0144 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,12 +47,6 @@ jobs: run: | wget https://raw.githubusercontent.com/leanprover/elan/master/elan-init.sh bash elan-init.sh -y - - name: Get Java 17 - uses: actions/setup-java@v3 - with: - distribution: 'corretto' - java-version: '17' - cache: 'gradle' - name: rustup run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: cargo fmt (cedar-policy-generators) diff --git a/Dockerfile b/Dockerfile index 9b7c5193a..9bff57dba 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM amazonlinux:2 AS prepare RUN yum update -y \ && yum install -y \ - curl clang tar zip unzip python3 git xz java-1.8.0-openjdk-devel.x86_64 \ + curl clang tar zip unzip python3 git xz \ make wget \ && yum clean all @@ -17,19 +17,6 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > /tmp/rustup.sh \ # Install cargo-fuzz RUN . ~/.profile; cargo install cargo-fuzz -# Setup DOTNET toolchain -RUN mkdir /opt/dotnet \ - && wget -q https://dot.net/v1/dotnet-install.sh -O /opt/dotnet/dotnet-install.sh \ - && chmod +x /opt/dotnet/dotnet-install.sh \ - && /opt/dotnet/dotnet-install.sh --channel 6.0 -ENV PATH="/root/.dotnet/:$PATH" - -# Setup Java/Gradle toolchain -RUN mkdir /opt/gradle \ - && wget -q "https://services.gradle.org/distributions/gradle-8.1.1-bin.zip" -O /opt/gradle/gradle.zip \ - && unzip /opt/gradle/gradle.zip -d /opt/gradle/ -ENV PATH="/opt/gradle/gradle-8.1.1/bin/:$PATH" - # Install Lean RUN wget https://raw.githubusercontent.com/leanprover/elan/master/elan-init.sh && sh elan-init.sh -y diff --git a/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs index fc0a1e5d7..2e6e1c7c6 100644 --- a/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/abac-type-directed.rs @@ -109,9 +109,9 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { } // Type-directed fuzzing of ABAC hierarchy/policy/requests. -fuzz_target!(|input: abac_type_directed_shared::FuzzTargetInput| { +fuzz_target!(|input: FuzzTargetInput| { initialize_log(); - let def_engine = LeanDefinitionalEngine::new(); + let def_impl = LeanDefinitionalEngine::new(); let mut policyset = ast::PolicySet::new(); let policy: ast::StaticPolicy = input.policy.into(); policyset.add_static(policy.clone()).unwrap(); @@ -128,7 +128,7 @@ fuzz_target!(|input: abac_type_directed_shared::FuzzTargetInput| { for request in requests.iter().cloned() { debug!("Request : {request}"); let (rust_res, total_dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &input.entities)); + time_function(|| run_auth_test(&def_impl, request, &policyset, &input.entities)); info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); diff --git a/cedar-drt/fuzz/fuzz_targets/abac.rs b/cedar-drt/fuzz/fuzz_targets/abac.rs index 7d199feb7..5cdc38312 100644 --- a/cedar-drt/fuzz/fuzz_targets/abac.rs +++ b/cedar-drt/fuzz/fuzz_targets/abac.rs @@ -108,9 +108,9 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { } // Simple fuzzing of ABAC hierarchy/policy/requests without respect to types. -fuzz_target!(|input: abac_shared::FuzzTargetInput| { +fuzz_target!(|input: FuzzTargetInput| { initialize_log(); - let def_engine = LeanDefinitionalEngine::new(); + let def_impl = LeanDefinitionalEngine::new(); if let Ok(entities) = Entities::try_from(input.hierarchy) { let mut policyset = ast::PolicySet::new(); let policy: ast::StaticPolicy = input.policy.into(); @@ -126,7 +126,7 @@ fuzz_target!(|input: abac_shared::FuzzTargetInput| { for request in requests.iter().cloned() { debug!("Request: {request}"); let (_, total_dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); + time_function(|| run_auth_test(&def_impl, request, &policyset, &entities)); info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); } if let Ok(test_name) = std::env::var("DUMP_TEST_NAME") { diff --git a/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs index 89756e772..ad3b41a37 100644 --- a/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/eval-type-directed.rs @@ -106,14 +106,14 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { } // Type-directed fuzzing of expression evaluation. -fuzz_target!(|input: eval_type_directed_shared::FuzzTargetInput| { +fuzz_target!(|input: FuzzTargetInput| { initialize_log(); - let def_engine = LeanDefinitionalEngine::new(); + let def_impl = LeanDefinitionalEngine::new(); debug!("Schema: {}\n", input.schema.schemafile_string()); debug!("expr: {}\n", input.expression); debug!("Entities: {}\n", input.entities); run_eval_test( - def_impl, + &def_impl, input.request.into(), &input.expression, &input.entities, diff --git a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs b/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs index 2aee44164..15955bf94 100644 --- a/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs +++ b/cedar-drt/fuzz/fuzz_targets/rbac-authorizer.rs @@ -86,55 +86,53 @@ impl AbstractPolicy { // functionality _assuming the correctness of the evaluator_. We use only // trivial policies and requests, and focus on how the authorizer combines the // results. -fuzz_target!( - |input: rbac_authorizer_shared::AuthorizerInputAbstractEvaluator| { - let def_engine = LeanDefinitionalEngine::new(); - let policies = input - .policies - .iter() - .cloned() - .enumerate() - .map(|(i, p)| p.into_policy(format!("policy{i}"))); - let mut policyset = ast::PolicySet::new(); - for policy in policies { - policyset.add_static(policy).unwrap(); - } - assert_eq!(policyset.policies().count(), input.policies.len()); - let entities = Entities::new(); - let request = ast::Request::new( - ("User::\"alice\"".parse().expect("should be valid"), None), - ("Action::\"read\"".parse().expect("should be valid"), None), - ("Resource::\"foo\"".parse().expect("should be valid"), None), - ast::Context::empty(), - None::<&ast::RequestSchemaAllPass>, - Extensions::none(), - ) - .expect("we aren't doing request validation here, so new() can't fail"); +fuzz_target!(|input: AuthorizerInputAbstractEvaluator| { + let def_impl = LeanDefinitionalEngine::new(); + let policies = input + .policies + .iter() + .cloned() + .enumerate() + .map(|(i, p)| p.into_policy(format!("policy{i}"))); + let mut policyset = ast::PolicySet::new(); + for policy in policies { + policyset.add_static(policy).unwrap(); + } + assert_eq!(policyset.policies().count(), input.policies.len()); + let entities = Entities::new(); + let request = ast::Request::new( + ("User::\"alice\"".parse().expect("should be valid"), None), + ("Action::\"read\"".parse().expect("should be valid"), None), + ("Resource::\"foo\"".parse().expect("should be valid"), None), + ast::Context::empty(), + None::<&ast::RequestSchemaAllPass>, + Extensions::none(), + ) + .expect("we aren't doing request validation here, so new() can't fail"); - // Check agreement with definitional engine. Note that run_auth_test returns - // the result of the call to is_authorized. - let res = run_auth_test(def_impl, request, &policyset, &entities); + // Check agreement with definitional engine. Note that run_auth_test returns + // the result of the call to is_authorized. + let res = run_auth_test(&def_impl, request, &policyset, &entities); - // Check the following property: there should be an error reported iff we - // had either PermitError or ForbidError - let should_error = input - .policies - .iter() - .any(|p| p == &AbstractPolicy::PermitError || p == &AbstractPolicy::ForbidError); - if should_error { - assert!(!res.diagnostics.errors.is_empty()); - } else { - // doing the assertion this way, rather than assert!(.is_empty()), gives - // us a better assertion-failure message (showing what items were - // present on the LHS) - assert_eq!( - res.diagnostics - .errors - .iter() - .map(ToString::to_string) - .collect::>(), - Vec::::new() - ); - } + // Check the following property: there should be an error reported iff we + // had either PermitError or ForbidError + let should_error = input + .policies + .iter() + .any(|p| p == &AbstractPolicy::PermitError || p == &AbstractPolicy::ForbidError); + if should_error { + assert!(!res.diagnostics.errors.is_empty()); + } else { + // doing the assertion this way, rather than assert!(.is_empty()), gives + // us a better assertion-failure message (showing what items were + // present on the LHS) + assert_eq!( + res.diagnostics + .errors + .iter() + .map(ToString::to_string) + .collect::>(), + Vec::::new() + ); } -); +}); diff --git a/cedar-drt/fuzz/fuzz_targets/rbac.rs b/cedar-drt/fuzz/fuzz_targets/rbac.rs index e3a0b64c6..9a00a9177 100644 --- a/cedar-drt/fuzz/fuzz_targets/rbac.rs +++ b/cedar-drt/fuzz/fuzz_targets/rbac.rs @@ -183,9 +183,9 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { // Fuzzing a single, pure-RBAC policy, with associated pure-RBAC hierarchy and // pure-RBAC requests. -fuzz_target!(|input: rbac_shared::FuzzTargetInput| { +fuzz_target!(|input: FuzzTargetInput| { initialize_log(); - let def_engine = LeanDefinitionalEngine::new(); + let def_impl = LeanDefinitionalEngine::new(); if let Ok(entities) = Entities::try_from(input.hierarchy) { let mut policyset = ast::PolicySet::new(); for pg in input.policy_groups { @@ -204,7 +204,7 @@ fuzz_target!(|input: rbac_shared::FuzzTargetInput| { for rbac_request in input.requests.into_iter() { let request = ast::Request::from(rbac_request); let (_, dur) = - time_function(|| run_auth_test(def_impl, request, &policyset, &entities)); + time_function(|| run_auth_test(&def_impl, request, &policyset, &entities)); info!("{}{}", TOTAL_MSG, dur.as_nanos()); } } diff --git a/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs index b705c6ecf..bfd11e314 100644 --- a/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/validation-drt-type-directed.rs @@ -65,9 +65,9 @@ impl<'a> Arbitrary<'a> for FuzzTargetInput { } // Type-directed fuzzing of (strict) validation. -fuzz_target!(|input: strict_validation_drt_shared::FuzzTargetInput| { +fuzz_target!(|input: FuzzTargetInput| { initialize_log(); - let def_engine = LeanDefinitionalEngine::new(); + let def_impl = LeanDefinitionalEngine::new(); // generate a schema if let Ok(schema) = ValidatorSchema::try_from(input.schema) { @@ -81,7 +81,7 @@ fuzz_target!(|input: strict_validation_drt_shared::FuzzTargetInput| { // run the policy through both validators and compare the result let (_, total_dur) = - time_function(|| run_val_test(def_impl, schema, &policyset, ValidationMode::Strict)); + time_function(|| run_val_test(&def_impl, schema, &policyset, ValidationMode::Strict)); info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); } }); diff --git a/cedar-drt/fuzz/src/lib.rs b/cedar-drt/fuzz/src/lib.rs index 044cde3e7..b0c4774c1 100644 --- a/cedar-drt/fuzz/src/lib.rs +++ b/cedar-drt/fuzz/src/lib.rs @@ -20,9 +20,7 @@ mod prt; pub use dump::*; pub use prt::*; -use cedar_drt::{ - time_function, CedarTestImplementation, ErrorComparisonMode, RUST_AUTH_MSG, RUST_VALIDATION_MSG, -}; +use cedar_drt::{time_function, CedarTestImplementation, ErrorComparisonMode}; use cedar_policy::{frontend::is_authorized::InterfaceResponse, PolicyId}; use cedar_policy_core::ast; use cedar_policy_core::authorizer::{AuthorizationError, Authorizer, Response}; @@ -34,6 +32,10 @@ use libfuzzer_sys::arbitrary::{self, Unstructured}; use log::info; use std::collections::HashSet; +/// Times for cedar-policy authorization and validation. +pub const RUST_AUTH_MSG: &str = "rust_auth (ns) : "; +pub const RUST_VALIDATION_MSG: &str = "rust_validation (ns) : "; + /// Compare the behavior of the evaluator in `cedar-policy` against a custom Cedar /// implementation. Panics if the two do not agree. `expr` is the expression to /// evaluate and `request` and `entities` are used to populate the evaluator. @@ -227,13 +229,12 @@ pub fn run_val_test( #[test] fn test_run_auth_test() { - use cedar_drt::JavaDefinitionalEngine; + use cedar_drt::LeanDefinitionalEngine; use cedar_policy_core::ast::{Entity, EntityUID, RequestSchemaAllPass, RestrictedExpr}; use cedar_policy_core::entities::{NoEntitiesSchema, TCComputation}; use smol_str::SmolStr; - let java_def_engine = - JavaDefinitionalEngine::new().expect("failed to create definitional engine"); + let def_engine = LeanDefinitionalEngine::new(); let principal = ast::EntityUIDEntry::Known { euid: std::sync::Arc::new(EntityUID::with_eid_and_type("User", "alice").unwrap()), loc: None, @@ -303,7 +304,7 @@ fn test_run_auth_test() { Extensions::all_available(), ) .unwrap(); - run_auth_test(&java_def_engine, query, &policies, &entities); + run_auth_test(&def_engine, query, &policies, &entities); } /// Randomly drop some of the entities from the list so the generator can produce From 613c191cff97c7d4cad8f582964cb8597b479f1e Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Tue, 13 Feb 2024 13:56:28 -0500 Subject: [PATCH 4/6] Update cedar-drt/README.md Co-authored-by: Craig Disselkoen --- cedar-drt/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cedar-drt/README.md b/cedar-drt/README.md index 7847e180c..dd00028ec 100644 --- a/cedar-drt/README.md +++ b/cedar-drt/README.md @@ -10,7 +10,7 @@ The table below lists all available fuzz targets, including which component of t | Name | Component(s) tested | Type | Description | | ----------- | ----------- | ----------- | ----------- | | [`abac-type-directed`](fuzz/fuzz_targets/abac-type-directed.rs) | Authorizer | DRT | Diff test authorizer on ABAC policies using (mostly) well-typed inputs | -| [`abac`](fuzz/fuzz_targets/abac.rs) | Authorizer | DRT | Diff test evaluator on ABAC policies | +| [`abac`](fuzz/fuzz_targets/abac.rs) | Authorizer | DRT | Diff test authorizer on ABAC policies | | [`eval-type-directed`](fuzz/fuzz_targets/eval-type-directed.rs) | Evaluator | DRT | Diff test evaluator on (mostly) well-typed expressions | | [`formatter`](fuzz/fuzz_targets/formatter.rs) | Policy formatter, Pretty printer, Parser | PBT | Test round trip property: parse ∘ format ∘ pretty-print == id for ASTs | | [`partial-eval`](fuzz/fuzz_targets/partial-eval.rs) | Partial evaluator | PBT | Test that residual policies with unknowns substituted are equivalent to original policies with unknowns replaced | From 2f7755d99926306980b44099a0c0d63273bbfa8e Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Fri, 16 Feb 2024 16:33:42 +0000 Subject: [PATCH 5/6] add additional validation targets --- cedar-drt/README.md | 4 +- cedar-drt/fuzz/Cargo.toml | 12 ++ cedar-drt/fuzz/fuzz_targets/validation-drt.rs | 87 +++++++++ .../validation-pbt-type-directed.rs | 184 ++++++++++++++++++ 4 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 cedar-drt/fuzz/fuzz_targets/validation-drt.rs create mode 100644 cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs diff --git a/cedar-drt/README.md b/cedar-drt/README.md index dd00028ec..745a7641f 100644 --- a/cedar-drt/README.md +++ b/cedar-drt/README.md @@ -18,7 +18,9 @@ The table below lists all available fuzz targets, including which component of t | [`rbac-authorizer`](fuzz/fuzz_targets/rbac-authorizer.rs) | Authorizer | PBT + DRT | Test for correct authorization responses over a set of simple policies | | [`rbac`](fuzz/fuzz_targets/rbac.rs) | Authorizer | DRT | Diff test authorizer on sets of RBAC policies, including template instantiations | | [`simple-parser`](fuzz/fuzz_targets/simple-parser.rs) | Parser | PBT | Test that parsing doesn't crash with random input strings | -| [`validation-drt-type-directed`](fuzz/fuzz_targets/validation-drt-type-directed.rs) | Validator | DRT | Diff test (strict) validation using (mostly) well-typed inputs | +| [`validation-drt-type-directed`](fuzz/fuzz_targets/validation-drt-type-directed.rs) | Validator | DRT | Diff test validation using (mostly) well-typed inputs | +| [`validation-drt`](fuzz/fuzz_targets/validation-drt.rs) | Validator | DRT | Diff test validation | +| [`validation-pbt-type-directed`](fuzz/fuzz_targets/validation-pbt-type-directed.rs) | Validator | PBT | Test that validated policies do not result in type errors using (mostly) well-typed inputs | | [`validation-pbt`](fuzz/fuzz_targets/validation-pbt.rs) | Validator | PBT | Test that validated policies do not result in type errors | | [`wildcard-matching`](fuzz/fuzz_targets/wildcard-matching.rs) | String matching algorithm used for the `like` operator | DRT | Diff test wildcard matching using a regex-based implementation | diff --git a/cedar-drt/fuzz/Cargo.toml b/cedar-drt/fuzz/Cargo.toml index c28e81054..56224d8cf 100644 --- a/cedar-drt/fuzz/Cargo.toml +++ b/cedar-drt/fuzz/Cargo.toml @@ -85,6 +85,18 @@ path = "fuzz_targets/validation-pbt.rs" test = false doc = false +[[bin]] +name = "validation-pbt-type-directed" +path = "fuzz_targets/validation-pbt-type-directed.rs" +test = false +doc = false + +[[bin]] +name = "validation-drt" +path = "fuzz_targets/validation-drt.rs" +test = false +doc = false + [[bin]] name = "validation-drt-type-directed" path = "fuzz_targets/validation-drt-type-directed.rs" diff --git a/cedar-drt/fuzz/fuzz_targets/validation-drt.rs b/cedar-drt/fuzz/fuzz_targets/validation-drt.rs new file mode 100644 index 000000000..4a04ab03e --- /dev/null +++ b/cedar-drt/fuzz/fuzz_targets/validation-drt.rs @@ -0,0 +1,87 @@ +/* + * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![no_main] +use cedar_drt::*; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_generators::{abac::ABACPolicy, schema::Schema, settings::ABACSettings}; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::{debug, info}; +use serde::Serialize; + +/// Input expected by this fuzz target +#[derive(Debug, Clone, Serialize)] +pub struct FuzzTargetInput { + /// generated schema + #[serde(skip)] + pub schema: Schema, + /// generated policy + pub policy: ABACPolicy, +} + +/// settings for this fuzz target +const SETTINGS: ABACSettings = ABACSettings { + match_types: false, + enable_extensions: true, + max_depth: 7, + max_width: 7, + enable_additional_attributes: true, + enable_like: true, + enable_action_groups_and_attrs: true, + enable_arbitrary_func_call: true, + enable_unknowns: false, + enable_action_in_constraints: true, + enable_unspecified_apply_spec: true, +}; + +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let schema: Schema = Schema::arbitrary(SETTINGS.clone(), u)?; + let hierarchy = schema.arbitrary_hierarchy(u)?; + let policy = schema.arbitrary_policy(&hierarchy, u)?; + Ok(Self { schema, policy }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Schema::arbitrary_size_hint(depth), + Schema::arbitrary_policy_size_hint(&SETTINGS, depth), + ]) + } +} + +// Non-type-directed fuzzing of (strict) validation. +fuzz_target!(|input: FuzzTargetInput| { + initialize_log(); + let def_impl = LeanDefinitionalEngine::new(); + + // generate a schema + if let Ok(schema) = ValidatorSchema::try_from(input.schema) { + debug!("Schema: {:?}", schema); + + // generate a policy + let mut policyset = ast::PolicySet::new(); + let policy: ast::StaticPolicy = input.policy.into(); + policyset.add_static(policy).unwrap(); + debug!("Policies: {policyset}"); + + // run the policy through both validators and compare the result + let (_, total_dur) = + time_function(|| run_val_test(&def_impl, schema, &policyset, ValidationMode::Strict)); + info!("{}{}", TOTAL_MSG, total_dur.as_nanos()); + } +}); diff --git a/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs new file mode 100644 index 000000000..a602a82a9 --- /dev/null +++ b/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs @@ -0,0 +1,184 @@ +/* + * Copyright 2022-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#![no_main] +use cedar_drt::initialize_log; +use cedar_drt_inner::*; +use cedar_policy_core::ast; +use cedar_policy_core::authorizer::{AuthorizationError, Authorizer}; +use cedar_policy_core::entities::Entities; +use cedar_policy_core::evaluator::EvaluationErrorKind; +use cedar_policy_generators::{ + abac::{ABACPolicy, ABACRequest}, + hierarchy::{Hierarchy, HierarchyGenerator}, + schema::Schema, + settings::ABACSettings, +}; +use cedar_policy_validator::{ + ValidationMode, Validator, ValidatorSchema, +}; +use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; +use log::debug; +use serde::Serialize; +use std::convert::TryFrom; + +/// Input expected by this fuzz target: +/// An ABAC hierarchy, schema, and 8 associated policies +#[derive(Debug, Clone, Serialize)] +struct FuzzTargetInput { + /// generated schema + #[serde(skip)] + pub schema: Schema, + /// generated hierarchy + #[serde(skip)] + pub hierarchy: Hierarchy, + /// the policy which we will see if it validates + pub policy: ABACPolicy, + /// the requests to try, if the policy validates. + /// We try 8 requests per validated policy. + #[serde(skip)] + pub requests: [ABACRequest; 8], +} + +/// settings for this fuzz target +const SETTINGS: ABACSettings = ABACSettings { + match_types: true, + enable_extensions: true, + max_depth: 7, + max_width: 7, + enable_additional_attributes: true, + enable_like: true, + enable_action_groups_and_attrs: true, + enable_arbitrary_func_call: true, + enable_unknowns: false, + enable_action_in_constraints: true, + enable_unspecified_apply_spec: true, +}; + +impl<'a> Arbitrary<'a> for FuzzTargetInput { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let schema: Schema = Schema::arbitrary(SETTINGS.clone(), u)?; + let hierarchy = schema.arbitrary_hierarchy(u)?; + let policy = schema.arbitrary_policy(&hierarchy, u)?; + let requests = [ + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + schema.arbitrary_request(&hierarchy, u)?, + ]; + Ok(Self { + schema, + hierarchy, + policy, + requests, + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Schema::arbitrary_size_hint(depth), + HierarchyGenerator::size_hint(depth), + Schema::arbitrary_policy_size_hint(&SETTINGS, depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + Schema::arbitrary_request_size_hint(depth), + ]) + } +} + +/// helper function that just tells us whether a policyset passes validation +fn passes_validation(validator: &Validator, policyset: &ast::PolicySet) -> bool { + validator + .validate(policyset, ValidationMode::default()) + .validation_passed() +} + +// The main fuzz target. This is for PBT on the validator +fuzz_target!(|input: FuzzTargetInput| { + initialize_log(); + // preserve the schema in string format, which may be needed for error messages later + let schemafile_string = input.schema.schemafile_string(); + if let Ok(schema) = ValidatorSchema::try_from(input.schema) { + debug!("Schema: {:?}", schema); + if let Ok(entities) = Entities::try_from(input.hierarchy.clone()) { + let validator = Validator::new(schema); + let mut policyset = ast::PolicySet::new(); + let policy: ast::StaticPolicy = input.policy.into(); + policyset.add_static(policy.clone()).unwrap(); + if passes_validation(&validator, &policyset) { + // policy successfully validated, let's make sure we don't get any + // dynamic type errors + let authorizer = Authorizer::new(); + debug!("Policies: {policyset}"); + debug!("Entities: {entities}"); + for r in input.requests.into_iter() { + let q = ast::Request::from(r); + debug!("Request: {q}"); + let ans = authorizer.is_authorized(q.clone(), &policyset, &entities); + + let unexpected_errs = ans + .diagnostics + .errors + .iter() + .filter_map(|error| match error { + AuthorizationError::PolicyEvaluationError { error, .. } => { + match error.error_kind() { + // Evaluation errors the validator should prevent. + EvaluationErrorKind::UnspecifiedEntityAccess(_) + | EvaluationErrorKind::RecordAttrDoesNotExist(_, _) + | EvaluationErrorKind::EntityAttrDoesNotExist { .. } + | EvaluationErrorKind::FailedExtensionFunctionLookup(_) + | EvaluationErrorKind::TypeError { .. } + | EvaluationErrorKind::WrongNumArguments { .. } => { + Some(error.to_string()) + } + // Evaluation errors it shouldn't prevent. Not + // written with a catch all so that we must + // consider if a new error type should cause + // this target to fail. + EvaluationErrorKind::EntityDoesNotExist(_) + | EvaluationErrorKind::IntegerOverflow(_) + | EvaluationErrorKind::InvalidRestrictedExpression(_) + | EvaluationErrorKind::UnlinkedSlot(_) + | EvaluationErrorKind::FailedExtensionFunctionApplication { + .. + } + | EvaluationErrorKind::NonValue(_) + | EvaluationErrorKind::RecursionLimit => None, + } + } + }) + .collect::>(); + + assert_eq!( + unexpected_errs, + Vec::::new(), + "validated policy produced unexpected errors {unexpected_errs:?}!\npolicies:\n{policyset}\nentities:\n{entities}\nschema:\n{schemafile_string}\nrequest:\n{q}\n", + ) + } + } + } + } +}); From e33eab4c292cbb43bcad064f36262973499c96d3 Mon Sep 17 00:00:00 2001 From: Kesha Hietala Date: Fri, 16 Feb 2024 16:37:56 +0000 Subject: [PATCH 6/6] cargo fmt --- cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs b/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs index a602a82a9..96929b0af 100644 --- a/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs +++ b/cedar-drt/fuzz/fuzz_targets/validation-pbt-type-directed.rs @@ -27,9 +27,7 @@ use cedar_policy_generators::{ schema::Schema, settings::ABACSettings, }; -use cedar_policy_validator::{ - ValidationMode, Validator, ValidatorSchema, -}; +use cedar_policy_validator::{ValidationMode, Validator, ValidatorSchema}; use libfuzzer_sys::arbitrary::{self, Arbitrary, Unstructured}; use log::debug; use serde::Serialize;