diff --git a/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs b/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs index e48f467c2f4..774c86a7b3d 100644 --- a/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs +++ b/Source/DafnyCore/Verifier/BoogieGenerator.ExpressionTranslator.cs @@ -1927,6 +1927,8 @@ public static bool RewriteInExpr(Expression s, bool aggressive) { return false; } + private static readonly Dictionary NullaryAttributesToTranslate; + private static readonly HashSet NullaryAttributesToCopy = new(new[] { "focus", "ignore", @@ -1958,6 +1960,15 @@ public static bool RewriteInExpr(Expression s, bool aggressive) { "error" }); + static ExpressionTranslator() { + NullaryAttributesToTranslate = new() { + { + "isolate_assertions", + "vcs_split_on_every_assert" + } + }; + } + private QKeyValue TrBooleanAttribute(string name, Expression arg, QKeyValue rest) { var boolArg = RemoveLit(TrExpr(arg)); return boolArg is Boogie.LiteralExpr { IsTrue: true } or Boogie.LiteralExpr { IsFalse: true } @@ -1995,7 +2006,9 @@ public QKeyValue TrAttributes(Attributes attrs, string skipThisAttribute) { continue; } - if (NullaryAttributesToCopy.Contains(name) && attr.Args.Count == 0) { + if (NullaryAttributesToTranslate.ContainsKey(name) && attr.Args.Count == 0) { + kv = new QKeyValue(attr.tok, NullaryAttributesToTranslate[name], new List(), kv); + } else if (NullaryAttributesToCopy.Contains(name) && attr.Args.Count == 0) { kv = new QKeyValue(attr.tok, name, new List(), kv); } else if (BooleanAttributesToCopy.Contains(name) && attr.Args.Count == 1) { kv = TrBooleanAttribute(name, attr.Args[0], kv); diff --git a/Source/DafnyLanguageServer.Test/Diagnostics/DiagnosticsTest.cs b/Source/DafnyLanguageServer.Test/Diagnostics/DiagnosticsTest.cs index 1622b88cd95..9922a7938d0 100644 --- a/Source/DafnyLanguageServer.Test/Diagnostics/DiagnosticsTest.cs +++ b/Source/DafnyLanguageServer.Test/Diagnostics/DiagnosticsTest.cs @@ -134,7 +134,7 @@ public async Task DiagnosticsForVerificationTimeoutHasNameAsRange() { [Fact] public async Task NoFlickeringWhenMixingCorrectAndErrorBatches() { var source = @" -method {:vcs_split_on_every_assert} Foo(x: int) { +method {:isolate_assertions} Foo(x: int) { if (x == 0) { assert true; } else if (x == 1) { @@ -158,7 +158,7 @@ public async Task NoFlickeringWhenMixingCorrectAndErrorBatches() { [Fact] public async Task IncrementalBatchDiagnostics() { var source = @" -method {:vcs_split_on_every_assert} Foo(x: int) { +method {:isolate_assertions} Foo(x: int) { if (x == 0) { assert false; } else { diff --git a/Source/DafnyLanguageServer.Test/Lookup/HoverVerificationTest.cs b/Source/DafnyLanguageServer.Test/Lookup/HoverVerificationTest.cs index bf9f348e4ae..c10d2dbaa4d 100644 --- a/Source/DafnyLanguageServer.Test/Lookup/HoverVerificationTest.cs +++ b/Source/DafnyLanguageServer.Test/Lookup/HoverVerificationTest.cs @@ -26,7 +26,7 @@ public class HoverVerificationTest : ClientBasedLanguageServerTest { [Fact(Timeout = MaxTestExecutionTimeMs)] public async Task HoverPutsErrorsFirst() { var documentItem = await GetDocumentItem(@" -method {:vcs_split_on_every_assert} Test(x: int, y: int) +method {:isolate_assertions} Test(x: int, y: int) requires x < y { var callIt := giveIt(x, y); @@ -118,7 +118,7 @@ await SetUp(o => { // LineVerificationStatusOption.Instance.Set(o, true); }); var documentItem = await GetDocumentItem(@" -method {:vcs_split_on_every_assert} f(x: int) { +method {:isolate_assertions} f(x: int) { assert x >= 2; // Hover #1 assert x >= 1; // Hover #2 } diff --git a/Source/DafnyRuntime/DafnyRuntimeDafny/src/dafnyRuntime.dfy b/Source/DafnyRuntime/DafnyRuntimeDafny/src/dafnyRuntime.dfy index 5310094f952..5cf543b6efb 100644 --- a/Source/DafnyRuntime/DafnyRuntimeDafny/src/dafnyRuntime.dfy +++ b/Source/DafnyRuntime/DafnyRuntimeDafny/src/dafnyRuntime.dfy @@ -746,7 +746,7 @@ abstract module {:options "/functionSyntax:4"} Dafny { } } - method {:tailrecursion} {:vcs_split_on_every_assert} AppendOptimized(builder: Vector, e: Sequence, stack: Vector>) + method {:tailrecursion} {:isolate_assertions} AppendOptimized(builder: Vector, e: Sequence, stack: Vector>) requires e.Valid() requires builder.Valid() requires stack.Valid() diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo index fd2be3e33cd..ce9c372d986 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-cs.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo index 275aeec2dfe..9507cb0304a 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-go.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo index 507725946bb..9ffddef005a 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-java.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo index 6bb204b0e31..33410558bdc 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-js.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo index 338a7f27793..90e5da48c9a 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-notarget.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo index 5b3c4f75d72..3512c737159 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries-py.doo differ diff --git a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo index 9e0bfd281d9..b72268096b9 100644 Binary files a/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo and b/Source/DafnyStandardLibraries/binaries/DafnyStandardLibraries.doo differ diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/DivMod.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/DivMod.dfy index 8603fc12a96..2bed5472fa2 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/DivMod.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/DivMod.dfy @@ -1281,7 +1281,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.DivMod { } } - lemma {:vcs_split_on_every_assert} LemmaModNegNeg(x: int, d: int) + lemma {:isolate_assertions} LemmaModNegNeg(x: int, d: int) requires 0 < d ensures x % d == (x * (1 - d)) % d { diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/DivInternals.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/DivInternals.dfy index 6568910cfbc..72e5d7ebb88 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/DivInternals.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/DivInternals.dfy @@ -108,7 +108,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.DivInternals { (-n <= z < 0 && (x - y) / n == x / n - y / n - 1) } - lemma {:vcs_split_on_every_assert} LemmaDivAutoAuxPlus(n: int) + lemma {:isolate_assertions} LemmaDivAutoAuxPlus(n: int) requires n > 0 && ModAuto(n) ensures DivAutoPlus(n) { @@ -137,7 +137,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.DivInternals { } } - lemma {:vcs_split_on_every_assert} LemmaDivAutoAuxMinusHelper(n: int) + lemma {:isolate_assertions} LemmaDivAutoAuxMinusHelper(n: int) requires n > 0 && ModAuto(n) ensures forall i, j :: && (j >= 0 && DivMinus(n, i, j) ==> DivMinus(n, i, j + n)) diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/ModInternals.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/ModInternals.dfy index 452347eb3b5..e995581699c 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/ModInternals.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/Internal/ModInternals.dfy @@ -76,7 +76,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.ModInternals { } } - lemma {:vcs_split_on_every_assert} LemmaDivAddDenominator(n: int, x: int) + lemma {:isolate_assertions} LemmaDivAddDenominator(n: int, x: int) requires n > 0 ensures (x + n) / n == x / n + 1 { @@ -96,7 +96,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.ModInternals { } } - lemma {:vcs_split_on_every_assert} LemmaDivSubDenominator(n: int, x: int) + lemma {:isolate_assertions} LemmaDivSubDenominator(n: int, x: int) requires n > 0 ensures (x - n) / n == x / n - 1 { @@ -116,7 +116,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.ModInternals { } } - lemma {:vcs_split_on_every_assert} LemmaModAddDenominator(n: int, x: int) + lemma {:isolate_assertions} LemmaModAddDenominator(n: int, x: int) requires n > 0 ensures (x + n) % n == x % n { @@ -136,7 +136,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.ModInternals { } } - lemma {:vcs_split_on_every_assert} LemmaModSubDenominator(n: int, x: int) + lemma {:isolate_assertions} LemmaModSubDenominator(n: int, x: int) requires n > 0 ensures (x - n) % n == x % n { @@ -193,7 +193,7 @@ module {:disableNonlinearArithmetic} Std.Arithmetic.ModInternals { } /* proves the quotient remainder theorem */ - lemma {:vcs_split_on_every_assert} LemmaQuotientAndRemainder(x: int, q: int, r: int, n: int) + lemma {:isolate_assertions} LemmaQuotientAndRemainder(x: int, q: int, r: int, n: int) requires n > 0 requires 0 <= r < n requires x == q * n + r diff --git a/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy b/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy index 55ee9d2bf98..df45d6cb3a4 100644 --- a/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Arithmetic/LittleEndianNat.dfy @@ -50,7 +50,7 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { } /* Given the same sequence, ToNatRight and ToNatLeft return the same nat. */ - lemma {:vcs_split_on_every_assert} LemmaToNatLeftEqToNatRight(xs: seq) + lemma {:isolate_assertions} LemmaToNatLeftEqToNatRight(xs: seq) ensures ToNatRight(xs) == ToNatLeft(xs) { reveal ToNatRight(); @@ -177,7 +177,7 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { /* The nat representation of a sequence can be calculated using the nat representation of its prefix. */ - lemma {:vcs_split_on_every_assert} LemmaSeqPrefix(xs: seq, i: nat) + lemma {:isolate_assertions} LemmaSeqPrefix(xs: seq, i: nat) requires 0 <= i <= |xs| ensures ToNatRight(xs[..i]) + ToNatRight(xs[i..]) * Pow(BASE(), i) == ToNatRight(xs) { @@ -228,7 +228,7 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { /* Two sequences do not have the same nat representations if their prefixes do not have the same nat representations. Helper lemma for LemmaSeqNeq. */ - lemma {:vcs_split_on_every_assert} LemmaSeqPrefixNeq(xs: seq, ys: seq, i: nat) + lemma {:isolate_assertions} LemmaSeqPrefixNeq(xs: seq, ys: seq, i: nat) requires 0 <= i <= |xs| == |ys| requires ToNatRight(xs[..i]) != ToNatRight(ys[..i]) ensures ToNatRight(xs) != ToNatRight(ys) @@ -524,7 +524,7 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { /* SeqAdd returns the same value as converting the sequences to nats, then adding them. */ - lemma {:vcs_split_on_every_assert} LemmaSeqAdd(xs: seq, ys: seq, zs: seq, cout: nat) + lemma {:isolate_assertions} LemmaSeqAdd(xs: seq, ys: seq, zs: seq, cout: nat) requires |xs| == |ys| requires SeqAdd(xs, ys) == (zs, cout) ensures ToNatRight(xs) + ToNatRight(ys) == ToNatRight(zs) + cout * Pow(BASE(), |xs|) @@ -581,7 +581,7 @@ abstract module {:disableNonlinearArithmetic} Std.Arithmetic.LittleEndianNat { /* SeqSub returns the same value as converting the sequences to nats, then subtracting them. */ - lemma {:vcs_split_on_every_assert} LemmaSeqSub(xs: seq, ys: seq, zs: seq, cout: nat) + lemma {:isolate_assertions} LemmaSeqSub(xs: seq, ys: seq, zs: seq, cout: nat) requires |xs| == |ys| requires SeqSub(xs, ys) == (zs, cout) ensures ToNatRight(xs) - ToNatRight(ys) + cout * Pow(BASE(), |xs|) == ToNatRight(zs) diff --git a/Source/DafnyStandardLibraries/src/Std/Base64.dfy b/Source/DafnyStandardLibraries/src/Std/Base64.dfy index 02bd16eaa06..1bb33df8ad1 100644 --- a/Source/DafnyStandardLibraries/src/Std/Base64.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Base64.dfy @@ -98,7 +98,7 @@ module Std.Base64 { else (c - 65 as char) as index } - lemma {:resource_limit 2000000} {:vcs_split_on_every_assert} CharToIndexToChar(c: char) + lemma {:resource_limit 2000000} {:isolate_assertions} CharToIndexToChar(c: char) requires IsBase64Char(c) ensures IndexToChar(CharToIndex(c)) == c { @@ -122,7 +122,7 @@ module Std.Base64 { } } - lemma {:vcs_split_on_every_assert} IndexToCharToIndex(i: index) + lemma {:isolate_assertions} IndexToCharToIndex(i: index) ensures (IndexToCharIsBase64(i); CharToIndex(IndexToChar(i)) == i) { // TODO: reduce resource use, brittleness @@ -263,7 +263,7 @@ module Std.Base64 { IndexSeqToBV24ToIndexSeq(s); } - opaque function {:vcs_split_on_every_assert} DecodeRecursively(s: seq): (b: seq) + opaque function {:isolate_assertions} DecodeRecursively(s: seq): (b: seq) requires |s| % 4 == 0 decreases |s| { @@ -326,7 +326,7 @@ module Std.Base64 { } } - opaque function {:vcs_split_on_every_assert} EncodeRecursively(b: seq): (s: seq) + opaque function {:isolate_assertions} EncodeRecursively(b: seq): (s: seq) requires |b| % 3 == 0 { if |b| == 0 then [] @@ -677,7 +677,7 @@ module Std.Base64 { } } - lemma {:vcs_split_on_every_assert} DecodeEncode1Padding(s: seq) + lemma {:isolate_assertions} DecodeEncode1Padding(s: seq) requires Is1Padding(s) ensures Encode1Padding(Decode1Padding(s)) == s { @@ -1449,7 +1449,7 @@ module Std.Base64 { seq(|b|, i requires 0 <= i < |b| => b[i] as uint8) } - lemma {:vcs_split_on_every_assert} {:resource_limit 1000000000} UInt8sToBVsToUInt8s(u: seq) + lemma {:isolate_assertions} {:resource_limit 1000000000} UInt8sToBVsToUInt8s(u: seq) ensures BVsToUInt8s(UInt8sToBVs(u)) == u { // TODO: reduce resource use diff --git a/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy b/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy index 1d8deec010e..dd7095061ff 100644 --- a/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Collections/Seq.dfy @@ -530,7 +530,7 @@ module Std.Collections.Seq { /* Flattening sequences of sequences is distributive over concatenation. That is, concatenating the flattening of two sequences of sequences is the same as flattening the concatenation of two sequences of sequences. */ - lemma {:vcs_split_on_every_assert} LemmaFlattenConcat(xs: seq>, ys: seq>) + lemma {:isolate_assertions} LemmaFlattenConcat(xs: seq>, ys: seq>) ensures Flatten(xs + ys) == Flatten(xs) + Flatten(ys) { if |xs| == 0 { @@ -781,7 +781,7 @@ module Std.Collections.Seq { /* Filtering a sequence is distributive over concatenation. That is, concatenating two sequences and then using "Filter" is the same as using "Filter" on each sequence separately, and then concatenating the two resulting sequences. */ - lemma {:vcs_split_on_every_assert} + lemma {:isolate_assertions} LemmaFilterDistributesOverConcat(f: (T ~> bool), xs: seq, ys: seq) requires forall i {:trigger xs[i]}:: 0 <= i < |xs| ==> f.requires(xs[i]) requires forall j {:trigger ys[j]}:: 0 <= j < |ys| ==> f.requires(ys[j]) diff --git a/Source/DafnyStandardLibraries/src/Std/JSON/Deserializer.dfy b/Source/DafnyStandardLibraries/src/Std/JSON/Deserializer.dfy index dd3641eb7f9..6a1a247cbc1 100644 --- a/Source/DafnyStandardLibraries/src/Std/JSON/Deserializer.dfy +++ b/Source/DafnyStandardLibraries/src/Std/JSON/Deserializer.dfy @@ -69,7 +69,7 @@ module Std.JSON.Deserializer { hd as uint16 } - function {:tailrecursion} {:vcs_split_on_every_assert} Unescape(str: seq, start: nat := 0, prefix: seq := []): DeserializationResult> + function {:tailrecursion} {:isolate_assertions} Unescape(str: seq, start: nat := 0, prefix: seq := []): DeserializationResult> decreases |str| - start { // Assumes UTF-16 strings if start >= |str| then Success(prefix) diff --git a/Source/DafnyStandardLibraries/src/Std/JSON/Serializer.dfy b/Source/DafnyStandardLibraries/src/Std/JSON/Serializer.dfy index cad694e4b58..a503096f5be 100644 --- a/Source/DafnyStandardLibraries/src/Std/JSON/Serializer.dfy +++ b/Source/DafnyStandardLibraries/src/Std/JSON/Serializer.dfy @@ -64,7 +64,7 @@ module Std.JSON.Serializer { Failure(o.error) } - function {:vcs_split_on_every_assert} {:resource_limit 1000000} Number(dec: Values.Decimal): Result { + function {:isolate_assertions} {:resource_limit 1000000} Number(dec: Values.Decimal): Result { var minus: jminus := Sign(dec.n); var num: jnum :- Int(Math.Abs(dec.n)); var frac: Maybe := Empty(); diff --git a/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Deserializer.dfy b/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Deserializer.dfy index 2d1bb740332..3ba4d60e779 100644 --- a/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Deserializer.dfy +++ b/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Deserializer.dfy @@ -59,7 +59,7 @@ module Std.JSON.ZeroCopy.Deserializer { return Cursor(cs.s, cs.beg, point', cs.end).Split(); } - opaque function {:vcs_split_on_every_assert} {:resource_limit 1000000000} Structural(cs: FreshCursor, parser: Parser) + opaque function {:isolate_assertions} {:resource_limit 1000000000} Structural(cs: FreshCursor, parser: Parser) : (pr: ParseResult>) requires forall cs :: parser.fn.requires(cs) ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, st => Spec.Structural(st, parser.spec)) @@ -244,11 +244,11 @@ module Std.JSON.ZeroCopy.Deserializer { elems' } - opaque function {:resource_limit 10000000} {:vcs_split_on_every_assert} AppendLast(ghost cs0: FreshCursor, - ghost json: ValueParser, - elems: Split>, - elem: Split, - sep: Split>) + opaque function {:resource_limit 10000000} {:isolate_assertions} AppendLast(ghost cs0: FreshCursor, + ghost json: ValueParser, + elems: Split>, + elem: Split, + sep: Split>) : (elems': Split>) requires elems.cs.StrictlySplitFrom?(json.cs) requires elems.SplitFrom?(cs0, SuffixedElementsSpec) @@ -280,7 +280,7 @@ module Std.JSON.ZeroCopy.Deserializer { elems' } - lemma {:resource_limit "10e6"} {:vcs_split_on_every_assert} AboutTryStructural(cs: FreshCursor) + lemma {:resource_limit "10e6"} {:isolate_assertions} AboutTryStructural(cs: FreshCursor) ensures var sp := Core.TryStructural(cs); var s0 := sp.t.t.Peek(); @@ -291,14 +291,14 @@ module Std.JSON.ZeroCopy.Deserializer { { } - lemma {:vcs_split_on_every_assert} AboutLists(xs: seq, i: uint32) + lemma {:isolate_assertions} AboutLists(xs: seq, i: uint32) requires 0 <= (i as int) < |xs| ensures xs[(i as int)..(i as int)+1] == [xs[i as int]] {} // The implementation and proof of this function is more painful than // expected due to the tail recursion. - opaque function {:vcs_split_on_every_assert} {:tailrecursion} Elements( + opaque function {:isolate_assertions} {:tailrecursion} Elements( ghost cs0: FreshCursor, json: ValueParser, open: Split>, @@ -419,7 +419,7 @@ module Std.JSON.ZeroCopy.Deserializer { } } - opaque function {:vcs_split_on_every_assert} Bracketed(cs: FreshCursor, json: ValueParser) + opaque function {:isolate_assertions} Bracketed(cs: FreshCursor, json: ValueParser) : (pr: ParseResult) requires cs.SplitFrom?(json.cs) ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, BracketedSpec) @@ -476,7 +476,7 @@ module Std.JSON.ZeroCopy.Deserializer { case OtherError(err) => err } - opaque function {:vcs_split_on_every_assert} {:resource_limit 10000000} JSON(cs: Cursors.FreshCursor) : (pr: DeserializationResult>) + opaque function {:isolate_assertions} {:resource_limit 10000000} JSON(cs: Cursors.FreshCursor) : (pr: DeserializationResult>) ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, Spec.JSON) { Core.Structural(cs, Parsers.Parser(Values.Value, Spec.Value)).MapFailure(LiftCursorError) @@ -514,7 +514,7 @@ module Std.JSON.ZeroCopy.Deserializer { import opened Utils.Cursors import opened Core - opaque function {:vcs_split_on_every_assert} Value(cs: FreshCursor) : (pr: ParseResult) + opaque function {:isolate_assertions} Value(cs: FreshCursor) : (pr: ParseResult) decreases cs.Length(), 1 ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, Spec.Value) { @@ -755,7 +755,7 @@ module Std.JSON.ZeroCopy.Deserializer { else Success(sp) } - opaque function {:vcs_split_on_every_assert} {:resource_limit 100000000} Exp(cs: FreshCursor) : (pr: ParseResult>) + opaque function {:isolate_assertions} {:resource_limit 100000000} Exp(cs: FreshCursor) : (pr: ParseResult>) ensures pr.Success? ==> pr.value.SplitFrom?(cs, exp => Spec.Maybe(exp, Spec.Exp)) { var SP(e, cs) := @@ -842,7 +842,7 @@ module Std.JSON.ZeroCopy.Deserializer { module Arrays refines Sequences { import opened Params = ArrayParams - lemma {:vcs_split_on_every_assert} BracketedToArray(arr: jarray) + lemma {:isolate_assertions} BracketedToArray(arr: jarray) ensures Spec.Bracketed(arr, SuffixedElementSpec) == Spec.Array(arr) { var rItem := (d: jitem) requires d < arr => Spec.Item(d); @@ -857,7 +857,7 @@ module Std.JSON.ZeroCopy.Deserializer { } } - opaque function {:vcs_split_on_every_assert} Array(cs: FreshCursor, json: ValueParser) + opaque function {:isolate_assertions} Array(cs: FreshCursor, json: ValueParser) : (pr: ParseResult) requires cs.SplitFrom?(json.cs) ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, Spec.Array) @@ -911,7 +911,7 @@ module Std.JSON.ZeroCopy.Deserializer { Spec.KeyValue(t) } - opaque function {:vcs_split_on_every_assert} Element(cs: FreshCursor, json: ValueParser) + opaque function {:isolate_assertions} Element(cs: FreshCursor, json: ValueParser) : (pr: ParseResult) { var k :- Strings.String(cs); @@ -957,7 +957,7 @@ module Std.JSON.ZeroCopy.Deserializer { module Objects refines Sequences { import opened Params = ObjectParams - lemma {:vcs_split_on_every_assert} BracketedToObject(obj: jobject) + lemma {:isolate_assertions} BracketedToObject(obj: jobject) ensures Spec.Bracketed(obj, SuffixedElementSpec) == Spec.Object(obj) { var rMember := (d: jmember) requires d < obj => Spec.Member(d); @@ -974,7 +974,7 @@ module Std.JSON.ZeroCopy.Deserializer { } } - opaque function {:vcs_split_on_every_assert} {:resource_limit 10000000} Object(cs: FreshCursor, json: ValueParser) + opaque function {:isolate_assertions} {:resource_limit 10000000} Object(cs: FreshCursor, json: ValueParser) : (pr: ParseResult) requires cs.SplitFrom?(json.cs) ensures pr.Success? ==> pr.value.StrictlySplitFrom?(cs, Spec.Object) diff --git a/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Serializer.dfy b/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Serializer.dfy index 044b8e64a0e..b58b9074ec5 100644 --- a/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Serializer.dfy +++ b/Source/DafnyStandardLibraries/src/Std/JSON/ZeroCopy/Serializer.dfy @@ -59,7 +59,7 @@ module Std.JSON.ZeroCopy.Serializer { .Append(js.after) } - opaque function {:vcs_split_on_every_assert} Value(v: Grammar.Value, writer: Writer) : (wr: Writer) + opaque function {:isolate_assertions} Value(v: Grammar.Value, writer: Writer) : (wr: Writer) decreases v, 4 ensures wr.Bytes() == writer.Bytes() + Spec.Value(v) { @@ -122,7 +122,7 @@ module Std.JSON.ZeroCopy.Serializer { .Append(str.rq) } - lemma {:vcs_split_on_every_assert} NumberHelper1(num: jnumber, writer: Writer) + lemma {:isolate_assertions} NumberHelper1(num: jnumber, writer: Writer) ensures if num.exp.NonEmpty? then ( if num.frac.NonEmpty? then @@ -151,11 +151,11 @@ module Std.JSON.ZeroCopy.Serializer { } } - lemma {:vcs_split_on_every_assert} NumberHelper2a(num: jnumber, writer: Writer) + lemma {:isolate_assertions} NumberHelper2a(num: jnumber, writer: Writer) ensures Spec.Number(num) == num.minus.Bytes() + num.num.Bytes() + Spec.Maybe(num.frac, Spec.Frac) + Spec.Maybe(num.exp, Spec.Exp) {} - lemma {:vcs_split_on_every_assert} {:resource_limit 10000000} NumberHelper2(num: jnumber, writer: Writer) + lemma {:isolate_assertions} {:resource_limit 10000000} NumberHelper2(num: jnumber, writer: Writer) ensures if num.exp.NonEmpty? then ( if num.frac.NonEmpty? then writer.Bytes() + Spec.Number(num) == writer.Bytes() + num.minus.Bytes() + num.num.Bytes() + num.frac.t.period.Bytes() + num.frac.t.num.Bytes() + num.exp.t.e.Bytes() + num.exp.t.sign.Bytes() + num.exp.t.num.Bytes() else writer.Bytes() + Spec.Number(num) == writer.Bytes() + num.minus.Bytes() + num.num.Bytes() + num.exp.t.e.Bytes() + num.exp.t.sign.Bytes() + num.exp.t.num.Bytes() @@ -217,7 +217,7 @@ module Std.JSON.ZeroCopy.Serializer { } } - opaque function {:vcs_split_on_every_assert} Number(num: jnumber, writer: Writer) : (wr: Writer) + opaque function {:isolate_assertions} Number(num: jnumber, writer: Writer) : (wr: Writer) decreases num, 0 ensures wr.Bytes() == writer.Bytes() + Spec.Number(num) { @@ -266,7 +266,7 @@ module Std.JSON.ZeroCopy.Serializer { } // DISCUSS: Can't be opaque, due to the lambda - function {:vcs_split_on_every_assert} StructuralView(st: Structural, writer: Writer) : (wr: Writer) + function {:isolate_assertions} StructuralView(st: Structural, writer: Writer) : (wr: Writer) ensures wr.Bytes() == writer.Bytes() + Spec.Structural(st, Spec.View) { writer.Append(st.before).Append(st.t).Append(st.after) @@ -397,9 +397,9 @@ module Std.JSON.ZeroCopy.Serializer { } - ghost function {:vcs_split_on_every_assert} SequenceSpec(v: Value, items: seq, - spec: T -> bytes, impl: (Value, T, Writer) --> Writer, - writer: Writer) + ghost function {:isolate_assertions} SequenceSpec(v: Value, items: seq, + spec: T -> bytes, impl: (Value, T, Writer) --> Writer, + writer: Writer) : (wr: Writer) requires SequenceSpecRequires(v, items, spec, impl, writer) decreases v, 1, items @@ -485,7 +485,7 @@ module Std.JSON.ZeroCopy.Serializer { assert wr == MembersSpec(obj, members, writer); } - method {:vcs_split_on_every_assert} ItemsImpl(arr: jarray, writer: Writer) returns (wr: Writer) + method {:isolate_assertions} ItemsImpl(arr: jarray, writer: Writer) returns (wr: Writer) decreases arr, 1 ensures wr == ItemsSpec(arr, arr.data, writer) { diff --git a/Source/DafnyStandardLibraries/src/Std/Strings.dfy b/Source/DafnyStandardLibraries/src/Std/Strings.dfy index a9acc6b46f0..4ff388923e0 100644 --- a/Source/DafnyStandardLibraries/src/Std/Strings.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Strings.dfy @@ -81,7 +81,7 @@ module {:disableNonlinearArithmetic} Std.Strings { /** Convert a String that represents a natural number, back into that number. */ - function {:vcs_split_on_every_assert} ToNat(str: String) : (n: nat) + function {:isolate_assertions} ToNat(str: String) : (n: nat) requires forall c <- str :: IsDigitChar(c) { if str == [] then 0 diff --git a/Source/DafnyStandardLibraries/src/Std/Unicode/UnicodeStringsWithUnicodeChar.dfy b/Source/DafnyStandardLibraries/src/Std/Unicode/UnicodeStringsWithUnicodeChar.dfy index 7b73339301d..e0ea3c7871c 100644 --- a/Source/DafnyStandardLibraries/src/Std/Unicode/UnicodeStringsWithUnicodeChar.dfy +++ b/Source/DafnyStandardLibraries/src/Std/Unicode/UnicodeStringsWithUnicodeChar.dfy @@ -14,7 +14,7 @@ module Std.Unicode.UnicodeStringsWithUnicodeChar refines AbstractUnicodeStrings import Utf8EncodingForm import Utf16EncodingForm - lemma {:vcs_split_on_every_assert} CharIsUnicodeScalarValue(c: char) + lemma {:isolate_assertions} CharIsUnicodeScalarValue(c: char) ensures && var asBits := c as bv24; && asBits <= 0x10_FFFF diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/VSI-Benchmarks/b4.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/VSI-Benchmarks/b4.dfy index 6142067e8a1..82f3a11b505 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/VSI-Benchmarks/b4.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/VSI-Benchmarks/b4.dfy @@ -136,7 +136,7 @@ class Map { } // Removes key from the domain of M (and does nothing if key wasn't in M to begin with) - method {:resource_limit "3e6"} {:vcs_split_on_every_assert} Remove(key: Key) + method {:resource_limit "3e6"} {:isolate_assertions} Remove(key: Key) requires Valid() modifies Repr ensures Valid() && fresh(Repr - old(Repr)) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/VerifyThis2015/Problem1.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/VerifyThis2015/Problem1.dfy index c8be9fa25d1..47d330ab1b8 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/VerifyThis2015/Problem1.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/VerifyThis2015/Problem1.dfy @@ -156,7 +156,7 @@ lemma Prefix(pat: seq, a: seq) ensures IsRelaxedPrefixAux(pat, a, 0) { } -lemma {:vcs_split_on_every_assert} Same2(pat: seq, a: seq) +lemma {:isolate_assertions} Same2(pat: seq, a: seq) requires IsRelaxedPrefixAux(pat, a, 1) ensures IRP_Alt(pat, a) { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/09-CounterNoStateMachine.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/09-CounterNoStateMachine.dfy index d9842b142f2..f8d015415ab 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/09-CounterNoStateMachine.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/09-CounterNoStateMachine.dfy @@ -690,7 +690,7 @@ class ClaimIncreasingCounterGreaterThanConstant extends OwnedObject { // assert!(i == 10); // } -method {:vcs_split_on_every_assert} Incrementer(universe: Universe, running: Thread, counter: IncreasingCounter, remaining: Integer) +method {:isolate_assertions} Incrementer(universe: Universe, running: Thread, counter: IncreasingCounter, remaining: Integer) requires universe.globalInv() && running in universe.content && counter in universe.content && remaining in universe.content requires remaining.owner == running && remaining.value == 10 // USER precondition modifies universe, universe.content diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/10-SequenceInvariant.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/10-SequenceInvariant.dfy index 79206c776d2..098d30d3d4b 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/10-SequenceInvariant.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/10-SequenceInvariant.dfy @@ -614,7 +614,7 @@ class ClaimIncreasingCounterGreaterThanConstant extends OwnedObject { // assert!(i == 10); // } -method {:vcs_split_on_every_assert} Incrementer(universe: Universe, running: Thread, counter: IncreasingCounter, remaining: Integer) +method {:isolate_assertions} Incrementer(universe: Universe, running: Thread, counter: IncreasingCounter, remaining: Integer) requires universe.globalInv() && running in universe.content && counter in universe.content && remaining in universe.content requires remaining.owner == running && remaining.value == 10 // USER precondition modifies universe, universe.content diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/11-MutexGuard2.legacy.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/11-MutexGuard2.legacy.dfy index a70667f5843..88ba54711bc 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/11-MutexGuard2.legacy.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/11-MutexGuard2.legacy.dfy @@ -650,7 +650,7 @@ method Acquire(universe: Universe, running: Thread, mutex: Mutex) // drop(l); // deallocate a guard == release mutex // } -method {:vcs_split_on_every_assert} SetData(universe: Universe, running: Thread, mutex: Mutex) +method {:isolate_assertions} SetData(universe: Universe, running: Thread, mutex: Mutex) requires universe.globalInv() && running in universe.content && mutex in universe.content modifies universe, universe.content decreases * diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/12-MutexLifetime-short.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/12-MutexLifetime-short.dfy index 35e3fcd2ab0..c213afc2470 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/12-MutexLifetime-short.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/concurrency/12-MutexLifetime-short.dfy @@ -886,7 +886,7 @@ class MutexGuardU32 extends OwnedObject { } // MutexGuardU32 - constructor {:vcs_split_on_every_assert} (ghost universe: Universe, ghost running: Thread, ghost scope: Lifetime, mutex: Mutex, ghost mutexScope: Lifetime) + constructor {:isolate_assertions} (ghost universe: Universe, ghost running: Thread, ghost scope: Lifetime, mutex: Mutex, ghost mutexScope: Lifetime) requires universe.globalInv() && { running, scope, mutex, mutexScope } <= universe.content requires scope.owner == running && mutexScope.owner == running && scope != mutexScope requires universe.outlives(mutex.lifetime, mutexScope) && universe.outlives(mutexScope, scope) && scope.unused() diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy index fc00d0ae492..331c1252809 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny0/Termination.dfy @@ -487,7 +487,7 @@ datatype Tree = Empty | Node(root: int, left: Tree, right: Tree) } } - lemma {:induction this} {:vcs_split_on_every_assert} EvensSumToEven() // explicitly use "this" as quantified over by induction hypothesis + lemma {:induction this} {:isolate_assertions} EvensSumToEven() // explicitly use "this" as quantified over by induction hypothesis requires forall u :: u in Elements() ==> u % 2 == 0 ensures Sum() % 2 == 0 // auto: decreases this @@ -500,7 +500,7 @@ datatype Tree = Empty | Node(root: int, left: Tree, right: Tree) right.EvensSumToEven(); } - lemma {:vcs_split_on_every_assert} EvensSumToEvenAutoInduction() // {:induction this} is the default + lemma {:isolate_assertions} EvensSumToEvenAutoInduction() // {:induction this} is the default requires forall u :: u in Elements() ==> u % 2 == 0 ensures Sum() % 2 == 0 // auto: decreases this diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/MoreInduction.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/MoreInduction.dfy index 8bcae1a537b..6c69c586f82 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/MoreInduction.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/MoreInduction.dfy @@ -48,7 +48,7 @@ lemma Theorem(list: List) Lemma(list, Nil); } -lemma {:vcs_split_on_every_assert} Lemma(list: List, ext: List) +lemma {:isolate_assertions} Lemma(list: List, ext: List) requires IsFlat(ext) ensures ToSeq(list) + ToSeq(ext) == ToSeq(Flatten(list, ext)) { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/Rippling.legacy.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/Rippling.legacy.dfy index 544f10d2f8f..8fffa7ceed4 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/Rippling.legacy.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny1/Rippling.legacy.dfy @@ -594,7 +594,7 @@ lemma P2_alt(n: Nat, xs: List, ys: List) // --------- -lemma {:vcs_split_on_every_assert} Lemma_RevConcat(xs: List, ys: List) +lemma {:isolate_assertions} Lemma_RevConcat(xs: List, ys: List) ensures reverse(concat(xs, ys)) == concat(reverse(ys), reverse(xs)); { match (xs) { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/MinWindowMax.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/MinWindowMax.dfy index dccc5f9d5ef..460d7bce2c7 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/MinWindowMax.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/MinWindowMax.dfy @@ -25,7 +25,7 @@ method Main() { print "Window size 5: min window-max is ", m, "\n"; // 3 } -method {:resource_limit "100e6"} {:vcs_split_on_every_assert} MinimumWindowMax(a: array, W: int) returns (m: int, start: int) +method {:resource_limit "100e6"} {:isolate_assertions} MinimumWindowMax(a: array, W: int) returns (m: int, start: int) requires 1 <= W <= a.Length ensures 0 <= start <= a.Length - W ensures m == Max(a, start, W) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SmallestMissingNumber-functional.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SmallestMissingNumber-functional.dfy index 6198c83753d..e6b4d5b59b0 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SmallestMissingNumber-functional.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SmallestMissingNumber-functional.dfy @@ -216,7 +216,7 @@ ghost function IntRange(lo: nat, len: nat): set // ----- Proofs of alternative versions -lemma {:vcs_split_on_every_assert} SMN'_Correct(xs: List, n: nat, len: nat) +lemma {:isolate_assertions} SMN'_Correct(xs: List, n: nat, len: nat) requires NoDuplicates(xs) requires forall x :: x in Elements(xs) ==> n <= x requires len == Length(xs) @@ -251,7 +251,7 @@ lemma {:vcs_split_on_every_assert} SMN'_Correct(xs: List, n: nat, len: nat) } } -lemma {:vcs_split_on_every_assert} SMN''_Correct(xs: List, n: nat, len: nat) +lemma {:isolate_assertions} SMN''_Correct(xs: List, n: nat, len: nat) requires NoDuplicates(xs) requires forall x :: x in Elements(xs) ==> n <= x requires len == Length(xs) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SnapshotableTrees.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SnapshotableTrees.dfy index a9d5459420b..6e30255ef1b 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SnapshotableTrees.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny2/SnapshotableTrees.dfy @@ -580,7 +580,7 @@ module SnapTree { } // private - static method {:vcs_split_on_every_assert} Push(stIn: List, ghost n: int, p: Node, ghost C: seq, ghost Nodes: set) returns (st: List) + static method {:isolate_assertions} Push(stIn: List, ghost n: int, p: Node, ghost C: seq, ghost Nodes: set) returns (st: List) requires p in Nodes && p.Repr <= Nodes && p.NodeValid() requires 0 <= n <= |C| requires p.Contents <= C[n..] @@ -607,7 +607,7 @@ module SnapTree { case Cons(y, rest) => x := y.data; } - method {:vcs_split_on_every_assert} MoveNext() returns (hasCurrent: bool) + method {:isolate_assertions} MoveNext() returns (hasCurrent: bool) requires Valid() && N <= |Contents| modifies IterRepr ensures Valid() && fresh(IterRepr - old(IterRepr)) && T.Repr == old(T.Repr) diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/ACL2-extractor.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/ACL2-extractor.dfy index 4cbfca1bbd9..0d3eca21e51 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/ACL2-extractor.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/ACL2-extractor.dfy @@ -141,7 +141,7 @@ lemma EqualElementsMakeEqualLists(xs: List, ys: List) // here is the theorem, but applied to the ith element -lemma {:vcs_split_on_every_assert} ExtractorLemma(i: int, xs: List) +lemma {:isolate_assertions} ExtractorLemma(i: int, xs: List) requires 0 <= i < length(xs); ensures nth(i, xtr(nats(length(xs)), xs)) == nth(i, rev(xs)); { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/SoftwareFoundations-Basics.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/SoftwareFoundations-Basics.dfy index a93c348f0b7..41baa0682b7 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/SoftwareFoundations-Basics.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/dafny4/SoftwareFoundations-Basics.dfy @@ -269,7 +269,7 @@ lemma plus_lemma(a: Nat, b: Nat, c: Nat) {} // This lemma expresses: m*(2*n) == (2*m)*n -lemma {:vcs_split_on_every_assert} mult_lemma(m: Nat, n: Nat) +lemma {:isolate_assertions} mult_lemma(m: Nat, n: Nat) ensures mult(m, plus(n, n)) == mult(plus(m, m), n) { match m { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-3855.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-3855.dfy index c064909c154..ae5046374ba 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-3855.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/git-issues/git-issue-3855.dfy @@ -637,7 +637,7 @@ function partitionOfJustHeapRegions(os : set) : (partition : map) assert r % 2 == 0; } -method {:vcs_split_on_every_assert} FoldL_Use_Direct_lambda(xs: List) +method {:isolate_assertions} FoldL_Use_Direct_lambda(xs: List) { var f := (b,a) => 3*b + 2*a; var r := foldl(f, 0, xs); diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy index bd326b4fed9..0c777579f63 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/lambdas/MatrixAssoc.dfy @@ -170,7 +170,7 @@ lemma sum_assoc_mult(a: Matrix, b: Matrix, c: Matrix, i: Index, j: Index) } /** (a * (b * c))(i, j) == ((a * b) * c)(i, j) */ -lemma {:vcs_split_on_every_assert} mult_assoc_ij(a: Matrix, b: Matrix, c: Matrix, i: Index, j: Index) +lemma {:isolate_assertions} mult_assoc_ij(a: Matrix, b: Matrix, c: Matrix, i: Index, j: Index) ensures mult(mult(a, b), c)(i)(j) == mult(a, mult(b, c))(i)(j) { calc { diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy new file mode 100644 index 00000000000..327bc1ea5b0 --- /dev/null +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy @@ -0,0 +1,13 @@ +// RUN: %verify --progress --cores=1 %s &> %t.raw +// RUN: %sed 's/time: \d*ms/redacted/g' "%t".raw > %t +// RUN: %diff "%s.expect" "%t" + +method {:isolate_assertions} Foo() { + assert true; + assert true; +} + +method Bar() { + assert true; + assert true; +} \ No newline at end of file diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy.expect b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy.expect new file mode 100644 index 00000000000..524e0c4e9e8 --- /dev/null +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/verification/isolate-assertions.dfy.expect @@ -0,0 +1,8 @@ +Verified 0/2 symbols. Waiting for Foo to verify. +Verified part 1/3 of Foo, on line 5 (redacted, resource count: 8.7E+002) +Verified part 2/3 of Foo, on line 6 (redacted, resource count: 3.1E+003) +Verified part 3/3 of Foo, on line 7 (redacted, resource count: 2.8E+003) +Verified 1/2 symbols. Waiting for Bar to verify. +Verified part 1/1 of Bar, on line 10 (redacted, resource count: 3.1E+003) + +Dafny program verifier finished with 4 verified, 0 errors diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/BreadthFirstSearch.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/BreadthFirstSearch.dfy index a7707a5ecef..79489ae4c5e 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/BreadthFirstSearch.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/BreadthFirstSearch.dfy @@ -28,7 +28,7 @@ class BreadthFirstSearch // existential quantifier and no ghost out-parameter. method {:resource_limit "8e6"} - {:vcs_split_on_every_assert} + {:isolate_assertions} BFS(source: Vertex, dest: Vertex, ghost AllVertices: set) returns (d: int, ghost path: List) // source and dest are among AllVertices @@ -199,7 +199,7 @@ class BreadthFirstSearch } } - lemma {:vcs_split_on_every_assert} IsReachFixpoint(source: Vertex, m: nat, n: nat, AllVertices: set) + lemma {:isolate_assertions} IsReachFixpoint(source: Vertex, m: nat, n: nat, AllVertices: set) requires R(source, m, AllVertices) == R(source, m+1, AllVertices); requires m <= n; ensures R(source, m, AllVertices) == R(source, n, AllVertices); diff --git a/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/Tree.dfy b/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/Tree.dfy index 0232e4ed78c..37bb623175e 100644 --- a/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/Tree.dfy +++ b/Source/IntegrationTests/TestFiles/LitTests/LitTest/vstte2012/Tree.dfy @@ -40,7 +40,7 @@ ghost function toList(d: int, t: Tree): seq // property; converting the resulting tree back into a // sequence yields exactly that portion of the input // sequence that has been consumed. -function {:vcs_split_on_every_assert} build_rec(d: int, s: seq): Result +function {:isolate_assertions} build_rec(d: int, s: seq): Result ensures build_rec(d, s).Res? ==> |build_rec(d, s).sOut| < |s| && build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..] diff --git a/docs/DafnyRef/Attributes.md b/docs/DafnyRef/Attributes.md index bfc7fb70f36..60748d37ae3 100644 --- a/docs/DafnyRef/Attributes.md +++ b/docs/DafnyRef/Attributes.md @@ -398,7 +398,7 @@ in which methods or functions are verified (default: N = 1). `{:resource_limit N}` limits the verifier resource usage to verify the method or function to `N`. This is the per-method equivalent of the command-line flag `/rlimit:N` or `--resource-limit N`. -If using [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) as well, the limit will be set for each assertion. +If using [`{:isolate_assertions}`](#sec-isolate_assertions) as well, the limit will be set for each assertion. The attribute `{:rlimit N}` is also available, and limits the verifier resource usage to verify the method or function to `N * 1000`. This version is deprecated, however. @@ -422,10 +422,10 @@ To give orders of magnitude about resource usage, here is a list of examples ind } ``` -* 40K total resource usage using [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) +* 40K total resource usage using [`{:isolate_assertions}`](#sec-isolate_assertions) ```dafny - method {:vcs_split_on_every_assert} f(a: bool, b: bool) { + method {:isolate_assertions} f(a: bool, b: bool) { assert a: (a ==> b) <==> (!b ==> !a); assert b: (a ==> b) <==> (!b ==> !a); assert c: (a ==> b) <==> (!b ==> !a); @@ -608,7 +608,7 @@ The [assertion batch](#sec-assertion-batches) of a method will not be split unless the cost of an [assertion batch](#sec-assertion-batches) exceeds this number, defaults to 2000.0. In [keep-going mode](#sec-vcs_max_keep_going_splits), only applies to the first round. -If [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) is set, then this parameter is useless. +If [`{:isolate_assertions}`](#sec-isolate_assertions) is set, then this parameter is useless. ### 11.2.23. `{:vcs_max_keep_going_splits N}` {#sec-vcs_max_keep_going_splits} @@ -619,7 +619,7 @@ until we succeed proving them, or there is only one single assertion that it timeouts (in which case an error is reported for that assertion). Defaults to 1. -If [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) is set, then this parameter is useless. +If [`{:isolate_assertions}`](#sec-isolate_assertions) is set, then this parameter is useless. ### 11.2.24. `{:vcs_max_splits N}` {#sec-vcs_max_splits} @@ -627,10 +627,10 @@ Per-method version of the command-line option `/vcsMaxSplits`. Maximal number of [assertion batches](#sec-assertion-batches) generated for this method. In [keep-going mode](#sec-vcs_max_keep_going_splits), only applies to the first round. Defaults to 1. -If [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) is set, then this parameter is useless. +If [`{:isolate_assertions}`](#sec-isolate_assertions) is set, then this parameter is useless. -### 11.2.25. `{:vcs_split_on_every_assert}` {#sec-vcs_split_on_every_assert} -Per-method version of the command-line option `/vcsSplitOnEveryAssert`. +### 11.2.25. `{:isolate_assertions}` {#sec-isolate_assertions} +Per-method version of the command-line option `/vcsSplitOnEveryAssert` In the first and only verification round, this option will split the original [assertion batch](#sec-assertion-batches) into one assertion batch per assertion. diff --git a/docs/DafnyRef/UserGuide.md b/docs/DafnyRef/UserGuide.md index 4c1f2d49f38..6e779312294 100644 --- a/docs/DafnyRef/UserGuide.md +++ b/docs/DafnyRef/UserGuide.md @@ -1434,7 +1434,7 @@ The fundamental unit of verification in `dafny` is an _assertion batch_, which c * If the verifier says it is correct,[^smt-encoding] it means that all the assertions hold. * If the verifier returns a counterexample, this counterexample is used to determine both the failing assertion and the failing path. In order to retrieve additional failing assertions, `dafny` will again query the verifier after turning previously failed assertions into assumptions.[^example-assertion-turned-into-assumption] [^caveat-about-assertion-and-assumption] -* If the verifier returns `unknown` or times out, or even preemptively for difficult assertions or to reduce the chance that the verifier will ‘be confused’ by the many assertions in a large batch, `dafny` may partition the assertions into smaller batches[^smaller-batches]. An extreme case is the use of the `/vcsSplitOnEveryAssert` command-line option or the [`{:vcs_split_on_every_assert}` attribute](#sec-vcs_split_on_every_assert), which causes `dafny` to make one batch for each assertion. +* If the verifier returns `unknown` or times out, or even preemptively for difficult assertions or to reduce the chance that the verifier will ‘be confused’ by the many assertions in a large batch, `dafny` may partition the assertions into smaller batches[^smaller-batches]. An extreme case is the use of the `/vcsSplitOnEveryAssert` command-line option or the [`{:isolate_assertions}` attribute](#sec-isolate_assertions), which causes `dafny` to make one batch for each assertion. [^smt-encoding]: The formula sent to the underlying SMT solver is the negation of the formula that the verifier wants to prove - also called a VC or verification condition. Hence, if the SMT solver returns "unsat", it means that the SMT formula is always false, meaning the verifier's formula is always true. On the other side, if the SMT solver returns "sat", it means that the SMT formula can be made true with a special variable assignment, which means that the verifier's formula is false under that same variable assignment, meaning it's a counter-example for the verifier. In practice and because of quantifiers, the SMT solver will usually return "unknown" instead of "sat", but will still provide a variable assignment that it couldn't prove that it does not make the formula true. `dafny` reports it as a "counter-example" but it might not be a real counter-example, only provide hints about what `dafny` knows. @@ -1450,7 +1450,7 @@ Here is how you can control how `dafny` partitions assertions into batches. * [`{:focus}`](#sec-focus) on an assert generates a separate assertion batch for the assertions of the enclosing block. * [`{:split_here}`](#sec-split_here) on an assert generates a separate assertion batch for assertions after this point. -* [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) on a function or a method generates one assertion batch per assertion +* [`{:isolate_assertions}`](#sec-isolate_assertions) on a function or a method generates one assertion batch per assertion We discourage the use of the following _heuristics attributes_ to partition assertions into batches. The effect of these attributes may vary, because they are low-level attributes and tune low-level heuristics, and will result in splits that could be manually controlled anyway. @@ -2679,7 +2679,7 @@ Legacy options: * `-vcsSplitOnEveryAssert` - prove each (explicit or implicit) assertion in each procedure separately. See also the attribute - [`{:vcs_split_on_every_assert}`](#sec-vcs_split_on_every_assert) for + [`{:isolate_assertions}`](#sec-isolate_assertions) for restricting this option on specific procedures. By default, Boogie attempts to prove that every assertion in a given procedure holds all at once, in a single query to an SMT solver. This usually performs diff --git a/docs/VerificationOptimization/VerificationOptimization.md b/docs/VerificationOptimization/VerificationOptimization.md index d7052b7befa..aa83919a5ce 100644 --- a/docs/VerificationOptimization/VerificationOptimization.md +++ b/docs/VerificationOptimization/VerificationOptimization.md @@ -78,9 +78,9 @@ Dafny provides several attributes that tell it to verify certain assertions sepa * The [`{:focus}`](../DafnyRef/DafnyRef#sec-focus) attribute on an `assert` or `assume` statement tells Dafny to verify all assertions in the containing block, and everything that _always_ follows it, in one batch, and the rest of the definition in another batch (potentially subject to further splitting due to other occurrences of `{:focus}`). -* The [`{:vcs_split_on_every_assert}`](../DafnyRef/DafnyRef#sec-vcs_split_on_every_assert) attribute on a definition tells Dafny to verify each assertion in that definition in its own batch. You can think of this as being similar to having many `{:focus}` or `{:split_here}` occurrences, including on assertions that arise implicitly, such as preconditions of calls or side conditions on partial operations. +* The [`{:isolate_assertions}`](../DafnyRef/DafnyRef#sec-isolate_assertions) attribute on a definition tells Dafny to verify each assertion in that definition in its own batch. You can think of this as being similar to having many `{:focus}` or `{:split_here}` occurrences, including on assertions that arise implicitly, such as preconditions of calls or side conditions on partial operations. -We recommend using `{:vcs_split_on_every_assert}` over either of the other attributes under most circumstances. It can also be specified globally, applying to all definitions, with `dafny verify --isolate-assertions`. If you're using the legacy options without top-level commands, use the `/vcsSplitOnEveryAssert` flag, instead. +We recommend using `{:isolate_assertions}` over either of the other attributes under most circumstances. It can also be specified globally, applying to all definitions, with `dafny verify --isolate-assertions`. If you're using the legacy options without top-level commands, use the `/vcsSplitOnEveryAssert` flag, instead. When Dafny verifies a definition in smaller batches, the VS Code plugin will display performance statistics for each batch when you hover over a particular definition. @@ -91,7 +91,7 @@ Consider the following method, which proves a few simple arithmetic facts. const TWO_TO_THE_32: int := 0x1_00000000 newtype uint32 = x: int | 0 <= x < TWO_TO_THE_32 -method {:vcs_split_on_every_assert} ProveSomeArithmetic(x: uint32) { +method {:isolate_assertions} ProveSomeArithmetic(x: uint32) { assert forall y :: y * y != 115249; // 115249 is prime assert (x as bv32) as uint32 <= x; assert x < 65535 ==> x * 2 == x + x; @@ -104,7 +104,7 @@ Hovering over the name of the definition will show you performance statistics fo Although the time taken to complete verification is ultimately the factor that most impacts the development cycle, this time can depend on CPU frequency scaling, other processes, etc., so Z3 provides a separate measurement of verification difficulty known as a _resource count_. When running the same build of Z3 on the same platform multiple times, the resource count is deterministic, unlike time. Therefore, the IDE displays performance information in terms of Resource Units (RU). -Hovering over a specific assertion will show you performance statistics for the specific batch containing that assertion (and, in the case of `{:vcs_split_on_every_assert}`, only that assertion). +Hovering over a specific assertion will show you performance statistics for the specific batch containing that assertion (and, in the case of `{:isolate_assertions}`, only that assertion). ![image](hover-assertion.png) diff --git a/docs/dev/news/5247.feat b/docs/dev/news/5247.feat new file mode 100644 index 00000000000..b203266de28 --- /dev/null +++ b/docs/dev/news/5247.feat @@ -0,0 +1 @@ +Add the attribute `{:isolate_assertions}`, which does the same as `{:vcs_split_on_every_assert}`. Deprecated `{:vcs_split_on_every_assert}` \ No newline at end of file