diff --git a/.circleci/config.yml b/.circleci/config.yml index 2fd81038..b17ac198 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - go: circleci/go@1.7.1 + go: circleci/go@1.7.3 workflows: circleci_build_and_test: @@ -15,7 +15,7 @@ workflows: jobs: test: machine: - image: "ubuntu-2004:202104-01" + image: "ubuntu-2204:2022.04.2" parameters: go_version: type: string diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..8858d55b --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# .git-blame-ignore-revs +# Formatting fixes for transaction/ and abi/ +09351b56fff3459b5d4701ae681ec45b4b62d945 +# Formatting fixes in test/ +23c3faf0701b0d5c07c57dd7fca4ed77449f3209 +# Formatting fixes in logic/ and crypto/ +ebccd4939d84d3233e6fbb5fddb2332549a0f323 +# Formatting fixes in types/ +f99ffb34b9955fdaa366326fe97da49e761e9367 diff --git a/.github/workflows/create-release-pr.yml b/.github/workflows/create-release-pr.yml new file mode 100644 index 00000000..640c8159 --- /dev/null +++ b/.github/workflows/create-release-pr.yml @@ -0,0 +1,224 @@ +name: Create Release PR + +on: + workflow_dispatch: + inputs: + release_version: + description: 'The release_version used for the release branch name, e.g. release/vx.x.x' + default: 'vx.x.x' + required: true + type: string + pre_release_version: + description: "Pre-Release version, e.g. 'beta.1', will be added behind the release_version as the tag." + required: false + type: string + +env: + RELEASE_VERSION: ${{ inputs.release_version }} + PRE_RELEASE_VERSION: ${{ inputs.pre_release_version }} + RELEASE_BRANCH: release/${{ inputs.release_version }} + +jobs: + create-release-pr: + runs-on: ubuntu-latest + + steps: + - name: Set Release Version and Branch to Check Out + id: set-release + run: | + if [[ $RELEASE_VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + if [[ $PRE_RELEASE_VERSION =~ ^[a-z.0-9]+$ ]]; then + echo "release-tag: $RELEASE_VERSION-$PRE_RELEASE_VERSION" + echo "release-tag=$RELEASE_VERSION-$PRE_RELEASE_VERSION" >> $GITHUB_OUTPUT + elif [[ -n $PRE_RELEASE_VERSION ]]; then + echo "Input pre_release_version is not empty, but does not match the regex pattern ^[a-z.0-9]+$" + exit 1 + else + echo "release-tag: $RELEASE_VERSION" + echo "release-tag=$RELEASE_VERSION" >> $GITHUB_OUTPUT + fi + else + echo "Version input doesn't match the regex pattern ^v[0-9]+\.[0-9]+\.[0-9]+$" + exit 1 + fi + + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Create Release Branch if it does not exist + run: | + if ! git show-ref --verify --quiet "refs/remotes/origin/$RELEASE_BRANCH"; then + git checkout -b $RELEASE_BRANCH + git push --set-upstream origin $RELEASE_BRANCH + elif [[ $(git rev-parse --abbrev-ref HEAD) != "$RELEASE_BRANCH" ]]; then + echo "Current Branch: $(git rev-parse --abbrev-ref HEAD)" + echo "Release branch exists, make sure you're using the workflow from the release branch or delete the existing release branch." + exit 1 + else + echo "Release branch exists and used as workflow ref." + fi + + - name: Get Latest Release + id: get-release + run: | + if [[ -n $PRE_RELEASE_VERSION ]]; then + echo "Get the latest release" + tag=$(curl -L \ + --header "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/releases" | jq -r '.[0].tag_name') + echo "latest-tag=$tag" >> $GITHUB_OUTPUT + else + echo "Get the latest stable release" + tag=$(curl -L \ + --header "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/releases/latest" | jq -r '.tag_name') + echo "latest-tag=$tag" >> $GITHUB_OUTPUT + fi + + - name: Build Changelog + uses: mikepenz/release-changelog-builder-action@v3.7.2 + id: build-changelog + env: + PREVIOUS_VERSION: ${{ steps.get-release.outputs.latest-tag }} + with: + fromTag: ${{ env.PREVIOUS_VERSION }} + toTag: ${{ env.RELEASE_BRANCH }} + failOnError: true + configurationJson: | + { + "categories": [ + { + "title": "## New Features", + "labels": [ + "New Feature" + ] + }, + { + "title": "## Enhancements", + "labels": [ + "Enhancement" + ] + }, + { + "title": "## Bug Fixes", + "labels": [ + "Bug-Fix" + ] + }, + { + "title": "## Not Yet Enabled", + "labels": [ + "Not-Yet-Enabled" + ] + } + ], + "ignore_labels": [ + "Skip-Release-Notes" + ], + "sort": { + "order": "ASC", + "on_property": "mergedAt" + }, + "template": "#{{CHANGELOG}}", + "pr_template": "- #{{TITLE}} by @#{{AUTHOR}} in ##{{NUMBER}}" + } + + - name: Update Changelog + if: ${{ env.PRE_RELEASE_VERSION == '' }} + env: + CHANGELOG_CONTENT: ${{ steps.build-changelog.outputs.changelog }} + PREVIOUS_VERSION: ${{ steps.get-release.outputs.latest-tag }} + run: | + echo -e "# ${RELEASE_VERSION}\n\n${CHANGELOG_CONTENT}**Full Changelog**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_VERSION}...${RELEASE_VERSION}\n" | cat - CHANGELOG.md > temp && mv temp CHANGELOG.md + + - name: Commit Changes + uses: EndBug/add-and-commit@v9.1.3 + env: + RELEASE_TAG: ${{ steps.set-release.outputs.release-tag }} + with: + message: "bump up version to ${{ env.RELEASE_TAG }}" + + - name: Create Pull Request to Master + env: + CHANGELOG_CONTENT: ${{ steps.build-changelog.outputs.changelog }} + PREVIOUS_VERSION: ${{ steps.get-release.outputs.latest-tag }} + GH_TOKEN: ${{ github.token }} + RELEASE_TAG: ${{ steps.set-release.outputs.release-tag }} + run: | + echo -e "# What's Changed\n\n${CHANGELOG_CONTENT}**Full Changelog**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_VERSION}...${RELEASE_TAG}" > tmp_msg_body.txt + export msg_body=$(cat tmp_msg_body.txt) + rm tmp_msg_body.txt + # Note: There's an issue adding teams as reviewers, see https://github.com/cli/cli/issues/6395 + PULL_REQUEST_URL=$(gh pr create --base "master" \ + --title "FOR REVIEW ONLY: ${{ github.event.repository.name }} $RELEASE_TAG" \ + --label "Skip-Release-Notes" \ + --label "Team Hyper Flow" \ + --body "$msg_body" | tail -n 1) + if [[ $PULL_REQUEST_URL =~ ^https://github.com/${{ github.repository }}/pull/[0-9]+$ ]]; then + PULL_REQUEST_NUM=$(echo $PULL_REQUEST_URL | sed 's:.*/::') + echo "pull-request-master=$PULL_REQUEST_URL" >> $GITHUB_ENV + echo "pull-request-master-num=$PULL_REQUEST_NUM" >> $GITHUB_ENV + echo "Pull request to Master created: $PULL_REQUEST_URL" + else + echo "There was an issue creating the pull request to master branch." + exit 1 + fi + + - name: Create Pull Request to Develop + if: ${{ env.PRE_RELEASE_VERSION == '' }} + env: + GH_TOKEN: ${{ github.token }} + RELEASE_TAG: ${{ steps.set-release.outputs.release-tag }} + run: | + # Note: There's an issue adding teams as reviewers, see https://github.com/cli/cli/issues/6395 + PULL_REQUEST_URL=$(gh pr create --base "develop" \ + --title "FOR REVIEW ONLY: Merge back ${{ github.event.repository.name }} $RELEASE_TAG to develop" \ + --label "Skip-Release-Notes" \ + --label "Team Hyper Flow" \ + --body "Merge back version changes to develop." | tail -n 1) + if [[ $PULL_REQUEST_URL =~ ^https://github.com/${{ github.repository }}/pull/[0-9]+$ ]]; then + echo "Pull request to Develop created: $PULL_REQUEST_URL" + DEVELOP_PR_MESSAGE="\nPull Request to develop: $PULL_REQUEST_URL" + echo "pull-request-develop-message=$DEVELOP_PR_MESSAGE" >> $GITHUB_ENV + else + echo "There was an issue creating the pull request to develop branch." + exit 1 + fi + + - name: Send Slack Message + id: slack + uses: slackapi/slack-github-action@v1.24.0 + env: + RELEASE_TAG: ${{ steps.set-release.outputs.release-tag }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + SDK_DEPLOYMENT_URL: ${{ secrets.SDK_DEPLOYMENT_URL }} + with: + payload: | + { + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "${{ github.event.repository.name }} Release PR for ${{ env.RELEASE_TAG }}" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Approvals needed for*:\nPull Request to master: ${{ env.pull-request-master}}${{ env.pull-request-develop-message }}" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*After approvals*\nDeploy SDK using the <${{ env.SDK_DEPLOYMENT_URL }}|Deployment Pipeline> with the following parameters:\n*SDK*: ${{ github.event.repository.name }}\n*RELEASE_PR_NUM*: ${{ env.pull-request-master-num }}\n*RELEASE_VERSION*: ${{ env.RELEASE_VERSION }}\n*PRE_RELEASE_VERSION*: ${{ env.PRE_RELEASE_VERSION }}" + } + } + ] + } diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml new file mode 100644 index 00000000..afe5fb36 --- /dev/null +++ b/.github/workflows/reviewdog.yml @@ -0,0 +1,30 @@ +name: "Lint Checks" +on: + pull_request: +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + with: + fetch-depth: 0 # required for new-from-rev option in .golangci.yml + - name: Install specific golang + uses: actions/setup-go@v2 + with: + go-version: '1.17.13' + - name: Check format + run: test -z `go fmt ./...` + - name: Vet + run: go vet ./... + - name: reviewdog-golangci-lint + uses: reviewdog/action-golangci-lint@v2 + with: + golangci_lint_version: "v1.47.3" + golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners" + go_version: "1.17.13" + reporter: "github-pr-review" + tool_name: "Lint Errors" + level: "error" + fail_on_error: true + filter_mode: "nofilter" \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..30061ae5 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,79 @@ +run: + timeout: 5m + tests: false + skip-dirs: + # Don't run linter on generated files + - client/v2 + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - exportloopref + - gci + - gofmt + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - nolintlint + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/algorand) + - prefix(github.com/algorand/go-algorand-sdk) + section-separators: + - newLine + nolintlint: + # require naming a specific linter X using //nolint:X + require-specific: true + # require comments like "//nolint:errcheck // Explanation of why we are ignoring linter here..." + require-explanation: true + +severity: + default-severity: error + +issues: + # Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below) + exclude-use-default: false + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + + exclude: + # ignore govet false positive fixed in https://github.com/golang/go/issues/45043 + - "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify" + # ignore golint false positive fixed in https://github.com/golang/lint/pull/487 + - "exported method (.*).Unwrap` should have comment or be unexported" + # ignore issues about the way we use _struct fields to define encoding settings + - "`_struct` is unused" + + # Enable some golangci-lint default exception rules: + # "EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok" + - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore" + - ineffective break statement. Did you mean to break out of the outer loop + + exclude-rules: + # Test utilities and helpers may have code that look unused, but is being used in another file + - path: test/helpers.go + text: "is unused" + - path: test/utilities.go + text: "is unused" + # Ignore unused fields in types that are copied from go-algorand + - path: types/ + text: "is unused" diff --git a/.test-env b/.test-env index df783a4f..049289d7 100644 --- a/.test-env +++ b/.test-env @@ -5,7 +5,7 @@ SDK_TESTING_HARNESS="test-harness" INSTALL_ONLY=0 -VERBOSE_HARNESS=0 +VERBOSE_HARNESS=1 # WARNING: If set to 1, new features will be LOST when downloading the test harness. # REGARDLESS: modified features are ALWAYS overwritten. diff --git a/CHANGELOG.md b/CHANGELOG.md index 777858fe..069559a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +# v2.2.0 + +## Enhancements + +- DevOps: Update CODEOWNERS to only refer to the devops group by @onetechnical in #533 +- lint: Add `golangci-lint` to CI process by @algochoi in #534 +- algod: State delta endpoints by @algochoi in #538 +- enhancement: Verbose Test Harness by @tzaffi in #540 +- types: add consensus protocol and types. by @winder in #543 +- lint: Fix lint errors by @algochoi in #535 +- clients: Regenerate client. by @winder in #553 +- API: Regenerate code with the latest specification file (d559cb2f) by @github-actions[bot] in #556 +- docs: Readme fixes by @algochoi in #559 +- encoder: Update go-codec version. by @winder in #560 + +## Bug Fixes + +- bugfix: adding EvalDelta.SharedAccts by @tzaffi in #531 + +**Full Changelog**: https://github.com/algorand/go-algorand-sdk/compare/v2.1.0...v2.2.0 + # 2.1.0 ## What's Changed diff --git a/CODEOWNERS b/CODEOWNERS index aa26c82a..3c88c6e7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ -.github/ @algorand/dev -.circleci/ @algorand/dev +.github/ @algorand/devops +.circleci/ @algorand/devops diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..966aa2cf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +conduct@algorand.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/Makefile b/Makefile index 239a3a85..15ee842c 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,8 @@ INTEGRATIONS_TAGS := "$(shell awk '{print $2}' test/integration.tags | paste -s GO_IMAGE := golang:$(subst go,,$(shell go version | cut -d' ' -f 3 | cut -d'.' -f 1,2))-stretch lint: - golint `go list ./... | grep -v /vendor/` + golangci-lint run -c .golangci.yml + go vet ./... fmt: go fmt ./... diff --git a/README.md b/README.md index 7ffd6765..675f4569 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,24 @@ # go-algorand-sdk -[![Build Status](https://travis-ci.com/algorand/go-algorand-sdk.svg?branch=master)](https://travis-ci.com/algorand/go-algorand-sdk) -[![Go Report Card](https://goreportcard.com/badge/github.com/algorand/go-algorand-sdk)](https://goreportcard.com/report/github.com/algorand/go-algorand-sdk) -[![GoDoc](https://godoc.org/github.com/algorand/go-algorand-sdk?status.svg)](https://godoc.org/github.com/algorand/go-algorand-sdk) +[![Go Report Card](https://goreportcard.com/badge/github.com/algorand/go-algorand-sdk)](https://goreportcard.com/report/github.com/algorand/go-algorand-sdk/v2) +[![GoDoc](https://godoc.org/github.com/algorand/go-algorand-sdk?status.svg)](https://godoc.org/github.com/algorand/go-algorand-sdk/v2) The Algorand golang SDK provides: - HTTP clients for the algod (agreement) and kmd (key management) APIs - Standalone functionality for interacting with the Algorand protocol, including transaction signing, message encoding, etc. -# Documentation +## Documentation -Full documentation is available [on pkg.go.dev](https://pkg.go.dev/github.com/algorand/go-algorand-sdk/v2). You can also self-host the documentation by running `godoc -http=:8099` and visiting `http://localhost:8099/pkg/github.com/algorand/go-algorand-sdk` in your web browser. +Full documentation is available [on pkg.go.dev](https://pkg.go.dev/github.com/algorand/go-algorand-sdk/v2). You can also self-host the documentation by running `godoc -http=:8099` and visiting `http://localhost:8099/pkg/github.com/algorand/go-algorand-sdk/v2` in your web browser. Additional developer documentation and examples can be found on [developer.algorand.org](https://developer.algorand.org/docs/sdks/go/) -# Package overview +## Package Overview In `client/`, the `kmd` packages provide HTTP clients for the Key Management Daemon. It is responsible for managing spending key material, signing transactions, and managing wallets. In `client/v2` the `algod` package contains a client for the Algorand protocol daemon HTTP API. You can use it to check the status of the blockchain, read a block, look at transactions, or submit a signed transaction. -In `client/v2` the `indexer` package contains a client for the Algorand Indexer API. You can use it to query historical transactions or make queries about the current state of the chain. +In `client/v2` the `indexer` package contains a client for the Algorand Indexer API. You can use it to query historical transactions or make queries about the current state of the chain. `transaction` package contains Transaction building utility functions. @@ -29,14 +28,18 @@ In `client/v2` the `indexer` package contains a client for the Algorand Indexer `mnemonic` contains support for turning 32-byte keys into checksummed, human-readable mnemonics (and going from mnemonics back to keys). -# SDK Development +## SDK Development Run tests with `make docker-test`. To set up the sandbox-based test harness without standing up the go-algorand docker image use `make harness`. -# Quick Start +We use golangci-lint to run linters on our codebase. Please run `make lint` before you submit a PR to make sure it conforms to linter standards. + +We use cucumber testing for all of our SDKs, including this one. Please refer to [algorand-sdk-testing](https://github.com/algorand/algorand-sdk-testing#readme) for guidance and existing tests that you may need to update. Depending on the type of update you with to contribute, you may also need to have corresponding updates in the other SDKs (Java, JS, and Python). Feel welcome to ask for collaboration on that front. + +## Quick Start To download the SDK, open a terminal and use the `go get` command. -```command +```sh go get -u github.com/algorand/go-algorand-sdk/... ``` diff --git a/abi/abi.go b/abi/abi.go index e0a7f296..d841a25d 100644 --- a/abi/abi.go +++ b/abi/abi.go @@ -4,8 +4,10 @@ import ( avm_abi "github.com/algorand/avm-abi/abi" ) +// Type is an ABI type type Type = avm_abi.Type +// TypeOf returns the ABI type of a string representation of a type. func TypeOf(str string) (Type, error) { return avm_abi.TypeOf(str) } diff --git a/abi/interactions.go b/abi/interactions.go index 3d6c6b23..ec6cd5d0 100644 --- a/abi/interactions.go +++ b/abi/interactions.go @@ -169,6 +169,8 @@ func (method *Method) GetTxCount() int { return cnt } +// GetMethodByName returns the method with the given name from the given list. +// Returns an error if there are multiple or no methods with the same name. func GetMethodByName(methods []Method, name string) (Method, error) { var filteredMethods []Method for _, method := range methods { @@ -204,6 +206,7 @@ type Interface struct { Methods []Method `json:"methods"` } +// GetMethodByName returns the method with the given name func (i *Interface) GetMethodByName(name string) (Method, error) { return GetMethodByName(i.Methods, name) } @@ -228,6 +231,7 @@ type Contract struct { Methods []Method `json:"methods"` } +// GetMethodByName returns the method with the given name func (c *Contract) GetMethodByName(name string) (Method, error) { return GetMethodByName(c.Methods, name) } diff --git a/client/v2/algod/algod.go b/client/v2/algod/algod.go index 00c27afb..2914f2f9 100644 --- a/client/v2/algod/algod.go +++ b/client/v2/algod/algod.go @@ -138,6 +138,18 @@ func (c *Client) PendingTransactionInformation(txid string) *PendingTransactionI return &PendingTransactionInformation{c: c, txid: txid} } +func (c *Client) GetLedgerStateDelta(round uint64) *GetLedgerStateDelta { + return &GetLedgerStateDelta{c: c, round: round} +} + +func (c *Client) GetTransactionGroupLedgerStateDeltasForRound(round uint64) *GetTransactionGroupLedgerStateDeltasForRound { + return &GetTransactionGroupLedgerStateDeltasForRound{c: c, round: round} +} + +func (c *Client) GetLedgerStateDeltaForTransactionGroup(id string) *GetLedgerStateDeltaForTransactionGroup { + return &GetLedgerStateDeltaForTransactionGroup{c: c, id: id} +} + func (c *Client) GetStateProof(round uint64) *GetStateProof { return &GetStateProof{c: c, round: round} } diff --git a/client/v2/algod/getLedgerStateDelta.go b/client/v2/algod/getLedgerStateDelta.go new file mode 100644 index 00000000..866b5d03 --- /dev/null +++ b/client/v2/algod/getLedgerStateDelta.go @@ -0,0 +1,33 @@ +package algod + +import ( + "context" + "fmt" + + "github.com/algorand/go-algorand-sdk/v2/client/v2/common" + "github.com/algorand/go-algorand-sdk/v2/types" +) + +// GetLedgerStateDeltaParams contains all of the query parameters for url serialization. +type GetLedgerStateDeltaParams struct { + + // Format configures whether the response object is JSON or MessagePack encoded. If + // not provided, defaults to JSON. + Format string `url:"format,omitempty"` +} + +// GetLedgerStateDelta get ledger deltas for a round. +type GetLedgerStateDelta struct { + c *Client + + round uint64 + + p GetLedgerStateDeltaParams +} + +// Do performs the HTTP request +func (s *GetLedgerStateDelta) Do(ctx context.Context, headers ...*common.Header) (response types.LedgerStateDelta, err error) { + s.p.Format = "msgpack" + err = s.c.getMsgpack(ctx, &response, fmt.Sprintf("/v2/deltas/%s", common.EscapeParams(s.round)...), s.p, headers) + return +} diff --git a/client/v2/algod/getLedgerStateDeltaForTransactionGroup.go b/client/v2/algod/getLedgerStateDeltaForTransactionGroup.go new file mode 100644 index 00000000..e81536ae --- /dev/null +++ b/client/v2/algod/getLedgerStateDeltaForTransactionGroup.go @@ -0,0 +1,34 @@ +package algod + +import ( + "context" + "fmt" + + "github.com/algorand/go-algorand-sdk/v2/client/v2/common" + "github.com/algorand/go-algorand-sdk/v2/types" +) + +// GetLedgerStateDeltaForTransactionGroupParams contains all of the query parameters for url serialization. +type GetLedgerStateDeltaForTransactionGroupParams struct { + + // Format configures whether the response object is JSON or MessagePack encoded. If + // not provided, defaults to JSON. + Format string `url:"format,omitempty"` +} + +// GetLedgerStateDeltaForTransactionGroup get a ledger delta for a given +// transaction group. +type GetLedgerStateDeltaForTransactionGroup struct { + c *Client + + id string + + p GetLedgerStateDeltaForTransactionGroupParams +} + +// Do performs the HTTP request +func (s *GetLedgerStateDeltaForTransactionGroup) Do(ctx context.Context, headers ...*common.Header) (response types.LedgerStateDelta, err error) { + s.p.Format = "msgpack" + err = s.c.getMsgpack(ctx, &response, fmt.Sprintf("/v2/deltas/txn/group/%s", common.EscapeParams(s.id)...), s.p, headers) + return +} diff --git a/client/v2/algod/getTransactionGroupLedgerStateDeltasForRound.go b/client/v2/algod/getTransactionGroupLedgerStateDeltasForRound.go new file mode 100644 index 00000000..1bb028fe --- /dev/null +++ b/client/v2/algod/getTransactionGroupLedgerStateDeltasForRound.go @@ -0,0 +1,34 @@ +package algod + +import ( + "context" + "fmt" + + "github.com/algorand/go-algorand-sdk/v2/client/v2/common" + "github.com/algorand/go-algorand-sdk/v2/client/v2/common/models" +) + +// GetTransactionGroupLedgerStateDeltasForRoundParams contains all of the query parameters for url serialization. +type GetTransactionGroupLedgerStateDeltasForRoundParams struct { + + // Format configures whether the response object is JSON or MessagePack encoded. If + // not provided, defaults to JSON. + Format string `url:"format,omitempty"` +} + +// GetTransactionGroupLedgerStateDeltasForRound get ledger deltas for transaction +// groups in a given round. +type GetTransactionGroupLedgerStateDeltasForRound struct { + c *Client + + round uint64 + + p GetTransactionGroupLedgerStateDeltasForRoundParams +} + +// Do performs the HTTP request +func (s *GetTransactionGroupLedgerStateDeltasForRound) Do(ctx context.Context, headers ...*common.Header) (response models.TransactionGroupLedgerStateDeltasForRoundResponse, err error) { + s.p.Format = "msgpack" + err = s.c.getMsgpack(ctx, &response, fmt.Sprintf("/v2/deltas/%s/txn/group", common.EscapeParams(s.round)...), s.p, headers) + return +} diff --git a/client/v2/algod/rawTransaction.go b/client/v2/algod/rawTransaction.go index bbdc3435..8bac7d93 100644 --- a/client/v2/algod/rawTransaction.go +++ b/client/v2/algod/rawTransaction.go @@ -28,7 +28,7 @@ func (s *SendRawTransaction) Do(ctx context.Context, headers ...*common.Header) } } if addContentType { - headers = append(headers, &common.Header{"Content-Type", "application/x-binary"}) + headers = append(headers, &common.Header{Key: "Content-Type", Value: "application/x-binary"}) } err = s.c.post(ctx, &response, "/v2/transactions", nil, headers, s.rawtxn) txid = response.Txid diff --git a/client/v2/common/models/box.go b/client/v2/common/models/box.go index 5bc552cb..5094495b 100644 --- a/client/v2/common/models/box.go +++ b/client/v2/common/models/box.go @@ -5,6 +5,9 @@ type Box struct { // Name (name) box name, base64 encoded Name []byte `json:"name"` + // Round the round for which this information is relevant + Round uint64 `json:"round"` + // Value (value) box value, base64 encoded. Value []byte `json:"value"` } diff --git a/client/v2/common/models/ledger_state_delta_for_transaction_group.go b/client/v2/common/models/ledger_state_delta_for_transaction_group.go new file mode 100644 index 00000000..fcc749c5 --- /dev/null +++ b/client/v2/common/models/ledger_state_delta_for_transaction_group.go @@ -0,0 +1,11 @@ +package models + +// LedgerStateDeltaForTransactionGroup contains a ledger delta for a single +// transaction group +type LedgerStateDeltaForTransactionGroup struct { + // Delta ledger StateDelta object + Delta *map[string]interface{} `json:"Delta"` + + // Ids + Ids []string `json:"Ids"` +} diff --git a/client/v2/common/models/simulate_request.go b/client/v2/common/models/simulate_request.go index 048ed202..a212ad8a 100644 --- a/client/v2/common/models/simulate_request.go +++ b/client/v2/common/models/simulate_request.go @@ -9,6 +9,13 @@ type SimulateRequest struct { // AllowMoreLogging lifts limits on log opcode usage during simulation. AllowMoreLogging bool `json:"allow-more-logging,omitempty"` + // ExecTraceConfig an object that configures simulation execution trace. + ExecTraceConfig SimulateTraceConfig `json:"exec-trace-config,omitempty"` + + // ExtraOpcodeBudget applies extra opcode budget during simulation for each + // transaction group. + ExtraOpcodeBudget uint64 `json:"extra-opcode-budget,omitempty"` + // TxnGroups the transaction groups to simulate. TxnGroups []SimulateRequestTransactionGroup `json:"txn-groups"` } diff --git a/client/v2/common/models/simulate_response.go b/client/v2/common/models/simulate_response.go index 51cdd10c..98e4034a 100644 --- a/client/v2/common/models/simulate_response.go +++ b/client/v2/common/models/simulate_response.go @@ -7,6 +7,9 @@ type SimulateResponse struct { // standard evaluation in certain ways. EvalOverrides SimulationEvalOverrides `json:"eval-overrides,omitempty"` + // ExecTraceConfig an object that configures simulation execution trace. + ExecTraceConfig SimulateTraceConfig `json:"exec-trace-config,omitempty"` + // LastRound the round immediately preceding this simulation. State changes through // this round were used to run this simulation. LastRound uint64 `json:"last-round"` diff --git a/client/v2/common/models/simulate_trace_config.go b/client/v2/common/models/simulate_trace_config.go new file mode 100644 index 00000000..27e4166b --- /dev/null +++ b/client/v2/common/models/simulate_trace_config.go @@ -0,0 +1,8 @@ +package models + +// SimulateTraceConfig an object that configures simulation execution trace. +type SimulateTraceConfig struct { + // Enable a boolean option for opting in execution trace features simulation + // endpoint. + Enable bool `json:"enable,omitempty"` +} diff --git a/client/v2/common/models/simulate_transaction_result.go b/client/v2/common/models/simulate_transaction_result.go index 3dab4b68..56cf5118 100644 --- a/client/v2/common/models/simulate_transaction_result.go +++ b/client/v2/common/models/simulate_transaction_result.go @@ -6,6 +6,10 @@ type SimulateTransactionResult struct { // value includes budged used by inner app calls spawned by this transaction. AppBudgetConsumed uint64 `json:"app-budget-consumed,omitempty"` + // ExecTrace the execution trace of calling an app or a logic sig, containing the + // inner app call trace in a recursive way. + ExecTrace SimulationTransactionExecTrace `json:"exec-trace,omitempty"` + // LogicSigBudgetConsumed budget used during execution of a logic sig transaction. LogicSigBudgetConsumed uint64 `json:"logic-sig-budget-consumed,omitempty"` diff --git a/client/v2/common/models/simulation_eval_overrides.go b/client/v2/common/models/simulation_eval_overrides.go index 59510676..dfa9c3d9 100644 --- a/client/v2/common/models/simulation_eval_overrides.go +++ b/client/v2/common/models/simulation_eval_overrides.go @@ -8,6 +8,10 @@ type SimulationEvalOverrides struct { // simulated as if they were properly signed. AllowEmptySignatures bool `json:"allow-empty-signatures,omitempty"` + // ExtraOpcodeBudget the extra opcode budget added to each transaction group during + // simulation + ExtraOpcodeBudget uint64 `json:"extra-opcode-budget,omitempty"` + // MaxLogCalls the maximum log calls one can make during simulation MaxLogCalls uint64 `json:"max-log-calls,omitempty"` diff --git a/client/v2/common/models/simulation_opcode_trace_unit.go b/client/v2/common/models/simulation_opcode_trace_unit.go new file mode 100644 index 00000000..1b256458 --- /dev/null +++ b/client/v2/common/models/simulation_opcode_trace_unit.go @@ -0,0 +1,12 @@ +package models + +// SimulationOpcodeTraceUnit the set of trace information and effect from +// evaluating a single opcode. +type SimulationOpcodeTraceUnit struct { + // Pc the program counter of the current opcode being evaluated. + Pc uint64 `json:"pc"` + + // SpawnedInners the indexes of the traces for inner transactions spawned by this + // opcode, if any. + SpawnedInners []uint64 `json:"spawned-inners,omitempty"` +} diff --git a/client/v2/common/models/simulation_transaction_exec_trace.go b/client/v2/common/models/simulation_transaction_exec_trace.go new file mode 100644 index 00000000..c37196bb --- /dev/null +++ b/client/v2/common/models/simulation_transaction_exec_trace.go @@ -0,0 +1,21 @@ +package models + +// SimulationTransactionExecTrace the execution trace of calling an app or a logic +// sig, containing the inner app call trace in a recursive way. +type SimulationTransactionExecTrace struct { + // ApprovalProgramTrace program trace that contains a trace of opcode effects in an + // approval program. + ApprovalProgramTrace []SimulationOpcodeTraceUnit `json:"approval-program-trace,omitempty"` + + // ClearStateProgramTrace program trace that contains a trace of opcode effects in + // a clear state program. + ClearStateProgramTrace []SimulationOpcodeTraceUnit `json:"clear-state-program-trace,omitempty"` + + // InnerTrace an array of SimulationTransactionExecTrace representing the execution + // trace of any inner transactions executed. + InnerTrace []SimulationTransactionExecTrace `json:"inner-trace,omitempty"` + + // LogicSigTrace program trace that contains a trace of opcode effects in a logic + // sig. + LogicSigTrace []SimulationOpcodeTraceUnit `json:"logic-sig-trace,omitempty"` +} diff --git a/client/v2/common/models/transaction_asset_transfer.go b/client/v2/common/models/transaction_asset_transfer.go index 27e17a15..c5cadcb8 100644 --- a/client/v2/common/models/transaction_asset_transfer.go +++ b/client/v2/common/models/transaction_asset_transfer.go @@ -11,7 +11,7 @@ type TransactionAssetTransfer struct { // AssetId (xaid) ID of the asset being transferred. AssetId uint64 `json:"asset-id"` - // CloseAmount number of assets transfered to the close-to account as part of the + // CloseAmount number of assets transferred to the close-to account as part of the // transaction. CloseAmount uint64 `json:"close-amount,omitempty"` diff --git a/client/v2/common/models/transaction_group_ledger_state_deltas_for_round_response.go b/client/v2/common/models/transaction_group_ledger_state_deltas_for_round_response.go new file mode 100644 index 00000000..00f0fee8 --- /dev/null +++ b/client/v2/common/models/transaction_group_ledger_state_deltas_for_round_response.go @@ -0,0 +1,9 @@ +package models + +// TransactionGroupLedgerStateDeltasForRoundResponse response containing all ledger +// state deltas for transaction groups, with their associated Ids, in a single +// round. +type TransactionGroupLedgerStateDeltasForRoundResponse struct { + // Deltas + Deltas []LedgerStateDeltaForTransactionGroup `json:"Deltas"` +} diff --git a/crypto/crypto.go b/crypto/crypto.go index 3bf84658..156ea4bc 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -65,6 +65,7 @@ func GenerateAddressFromSK(sk []byte) (types.Address, error) { return a, nil } +// GetTxID returns the txid of a transaction func GetTxID(tx types.Transaction) string { rawTx := rawTransactionBytesToSign(tx) return txIDFromRawTxnBytesToSign(rawTx) @@ -427,11 +428,7 @@ func VerifyMultisig(addr types.Address, message []byte, msig types.MultisigSig) } } - if verifiedCount < msig.Threshold { - return false - } - - return true + return verifiedCount >= msig.Threshold } // ComputeGroupID returns group ID for a group of transactions @@ -461,15 +458,15 @@ func ComputeGroupID(txgroup []types.Transaction) (gid types.Digest, err error) { /* LogicSig support */ -func isAsciiPrintableByte(symbol byte) bool { +func isASCIIPrintableByte(symbol byte) bool { isBreakLine := symbol == '\n' isStdPrintable := symbol >= ' ' && symbol <= '~' return isBreakLine || isStdPrintable } -func isAsciiPrintable(program []byte) bool { +func isASCIIPrintable(program []byte) bool { for _, b := range program { - if !isAsciiPrintableByte(b) { + if !isASCIIPrintableByte(b) { return false } } @@ -482,7 +479,7 @@ func sanityCheckProgram(program []byte) error { if len(program) == 0 { return fmt.Errorf("empty program") } - if isAsciiPrintable(program) { + if isASCIIPrintable(program) { if _, err := types.DecodeAddress(string(program)); err == nil { return fmt.Errorf("requesting program bytes, get Algorand address") } @@ -759,6 +756,7 @@ func GetApplicationAddress(appID uint64) types.Address { return types.Address(hash) } +// HashStateProofMessage returns the hash of a state proof message. func HashStateProofMessage(stateProofMessage *types.Message) types.MessageHash { msgPackedStateProofMessage := msgpack.Encode(stateProofMessage) @@ -769,6 +767,7 @@ func HashStateProofMessage(stateProofMessage *types.Message) types.MessageHash { return sha256.Sum256(stateProofMessageData) } +// HashLightBlockHeader returns the hash of a light block header. func HashLightBlockHeader(lightBlockHeader types.LightBlockHeader) types.Digest { msgPackedLightBlockHeader := msgpack.Encode(lightBlockHeader) diff --git a/crypto/errors.go b/crypto/errors.go index 0e20d8b4..7d67e2db 100644 --- a/crypto/errors.go +++ b/crypto/errors.go @@ -14,10 +14,8 @@ var errMsigMergeKeysMismatch = errors.New("multisig parameters do not match") var errMsigMergeInvalidDups = errors.New("mismatched duplicate signatures") var errMsigMergeAuthAddrMismatch = errors.New("mismatched AuthAddrs") var errLsigTooManySignatures = errors.New("logicsig has too many signatures, at most one of Sig or Msig may be defined") -var errLsigNoSignature = errors.New("logicsig is not delegated") var errLsigInvalidSignature = errors.New("invalid logicsig signature") var errLsigNoPublicKey = errors.New("missing public key of delegated logicsig") var errLsigInvalidPublicKey = errors.New("public key does not match logicsig signature") -var errLsigInvalidProgram = errors.New("invalid logicsig program") var errLsigEmptyMsig = errors.New("empty multisig in logicsig") var errLsigAccountPublicKeyNotNeeded = errors.New("a public key for the signer was provided when none was expected") diff --git a/encoding/json/json_test.go b/encoding/json/json_test.go index 750408d4..f7c93676 100644 --- a/encoding/json/json_test.go +++ b/encoding/json/json_test.go @@ -2,6 +2,7 @@ package json import ( "bytes" + "encoding/json" "testing" "github.com/stretchr/testify/assert" @@ -17,70 +18,100 @@ type subsetObject struct { Data string `codec:"data"` } -func TestDecode(t *testing.T) { - obj := object{ +var obj object +var encodedOb []byte + +func init() { + obj = object{ subsetObject: subsetObject{Data: "data"}, Name: "name", } - encodedOb := Encode(obj) - - t.Run("basic encode/decode test", func(t *testing.T) { - // basic encode/decode test. - var decoded object - err := Decode(encodedOb, &decoded) - require.NoError(t, err) - assert.Equal(t, obj, decoded) - }) - - t.Run("strict decode, pass", func(t *testing.T) { - // strict decode test - decoder := NewDecoder(bytes.NewReader(encodedOb)) - var decoded object - err := decoder.Decode(&decoded) - require.NoError(t, err) - assert.Equal(t, obj, decoded) - }) - - t.Run("strict decode subset, fail", func(t *testing.T) { - // strict decode test - decoder := NewDecoder(bytes.NewReader(encodedOb)) - var decoded subsetObject - err := decoder.Decode(&decoded) - require.Error(t, err) - assert.Contains(t, err.Error(), "no matching struct field found when decoding stream map with key name") - }) - - t.Run("lenient decode subset, pass", func(t *testing.T) { - // strict decode test - decoder := NewLenientDecoder(bytes.NewReader(encodedOb)) - var decoded subsetObject - err := decoder.Decode(&decoded) - require.NoError(t, err) - assert.Equal(t, obj.subsetObject, decoded) - }) - - t.Run("original encode map key as string", func(t *testing.T) { - intMap := map[int]string{ - 0: "int key", - } - data := string(Encode(intMap)) - assert.NotContains(t, data, "\"0\":") - }) - - t.Run("strict encode map key as string", func(t *testing.T) { - intMap := map[int]string{ - 0: "int key", - } - data := string(EncodeStrict(intMap)) - assert.NotContains(t, data, "0:") - }) - - t.Run("strict encode map interface key as string", func(t *testing.T) { - t.Skip("There is a bug in go-codec with MapKeyAsString = true and Canonical = true") - intMap := map[interface{}]interface{}{ - 0: "int key", - } - data := string(EncodeStrict(intMap)) - assert.NotContains(t, data, "0:") - }) + encodedOb = Encode(obj) +} + +func TestBasicEncodeDecode(t *testing.T) { + // basic encode/decode test. + var decoded object + err := Decode(encodedOb, &decoded) + require.NoError(t, err) + assert.Equal(t, obj, decoded) +} + +func TestDecode(t *testing.T) { + decoder := NewDecoder(bytes.NewReader(encodedOb)) + var decoded object + err := decoder.Decode(&decoded) + require.NoError(t, err) + assert.Equal(t, obj, decoded) +} + +func TestSubsetDecode(t *testing.T) { + decoder := NewDecoder(bytes.NewReader(encodedOb)) + var decoded subsetObject + err := decoder.Decode(&decoded) + require.Error(t, err) + assert.Contains(t, err.Error(), "no matching struct field found when decoding stream map with key name") +} + +func TestLenientDecode(t *testing.T) { + decoder := NewLenientDecoder(bytes.NewReader(encodedOb)) + var decoded subsetObject + err := decoder.Decode(&decoded) + require.NoError(t, err) + assert.Equal(t, obj.subsetObject, decoded) +} + +func TestEncodeMaapKeyAsString(t *testing.T) { + intMap := map[int]string{ + 0: "int key", + } + data := string(Encode(intMap)) + assert.NotContains(t, data, `"0"`) +} + +func TestStrictEncodeMapIntKeyAsString(t *testing.T) { + intMap := map[int]string{ + 0: "int key", + } + data := string(EncodeStrict(intMap)) + assert.NotContains(t, data, "0:") +} + +func TestStrictEncodeMapInterfaceKeyAsString(t *testing.T) { + intMap := map[interface{}]interface{}{ + 0: "int key", + } + data := string(EncodeStrict(intMap)) + assert.Contains(t, data, `"0"`) +} + +func TestStructKeyEncode(t *testing.T) { + type KeyStruct struct { + Key1 string `json:"key1"` + Key2 string `json:"key2"` + } + type TestStruct struct { + Complex map[KeyStruct]string `json:"complex"` + } + + data := TestStruct{ + Complex: map[KeyStruct]string{ + { + Key1: "key1", + Key2: "key2", + }: "value", + }, + } + + encoded := Encode(data) + + var data2 TestStruct + err := Decode(encoded, &data2) + assert.NoError(t, err) + assert.Equal(t, data, data2) + + // Unfortunately, still an error + var data3 TestStruct + err = json.NewDecoder(bytes.NewReader(encoded)).Decode(&data3) + assert.Error(t, err) } diff --git a/go.mod b/go.mod index 65a611ac..98f11d2a 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/algorand/avm-abi v0.1.1 - github.com/algorand/go-codec/codec v1.1.8 + github.com/algorand/go-codec/codec v1.1.10 github.com/cucumber/godog v0.8.1 github.com/google/go-querystring v1.0.0 github.com/stretchr/testify v1.7.1 diff --git a/go.sum b/go.sum index 0f3bc5b1..e8b1fb2b 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,7 @@ github.com/algorand/avm-abi v0.1.1 h1:dbyQKzXiyaEbzpmqXFB30yAhyqseBsyqXTyZbNbkh2Y= github.com/algorand/avm-abi v0.1.1/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g= -github.com/algorand/go-codec v1.1.8 h1:XDSreeeZY8gMst6Edz4RBkl08/DGMJOeHYkoXL2B7wI= -github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4= -github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U= -github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA= +github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA= +github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k= github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas= github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s= github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= diff --git a/logic/source_map.go b/logic/source_map.go index 0d798971..dbec0aef 100644 --- a/logic/source_map.go +++ b/logic/source_map.go @@ -19,6 +19,7 @@ type SourceMap struct { PcToLine map[int]int } +// DecodeSourceMap decodes a source map func DecodeSourceMap(ism map[string]interface{}) (SourceMap, error) { var sm SourceMap @@ -62,11 +63,13 @@ func DecodeSourceMap(ism map[string]interface{}) (SourceMap, error) { return sm, nil } +// GetLineForPc returns the line number for the given pc func (s *SourceMap) GetLineForPc(pc int) (int, bool) { line, ok := s.PcToLine[pc] return line, ok } +// GetPcsForLine returns the program counter for the given line func (s *SourceMap) GetPcsForLine(line int) []int { return s.LineToPc[line] } diff --git a/protocol/config/consensus.go b/protocol/config/consensus.go new file mode 100644 index 00000000..80cc8652 --- /dev/null +++ b/protocol/config/consensus.go @@ -0,0 +1,1291 @@ +package config + +import ( + "time" + + "github.com/algorand/go-algorand-sdk/v2/protocol" +) + +// ConsensusParams specifies settings that might vary based on the +// particular version of the consensus protocol. +type ConsensusParams struct { + // Consensus protocol upgrades. Votes for upgrades are collected for + // UpgradeVoteRounds. If the number of positive votes is over + // UpgradeThreshold, the proposal is accepted. + // + // UpgradeVoteRounds needs to be long enough to collect an + // accurate sample of participants, and UpgradeThreshold needs + // to be high enough to ensure that there are sufficient participants + // after the upgrade. + // + // A consensus protocol upgrade may specify the delay between its + // acceptance and its execution. This gives clients time to notify + // users. This delay is specified by the upgrade proposer and must + // be between MinUpgradeWaitRounds and MaxUpgradeWaitRounds (inclusive) + // in the old protocol's parameters. Note that these parameters refer + // to the representation of the delay in a block rather than the actual + // delay: if the specified delay is zero, it is equivalent to + // DefaultUpgradeWaitRounds. + // + // The maximum length of a consensus version string is + // MaxVersionStringLen. + UpgradeVoteRounds uint64 + UpgradeThreshold uint64 + DefaultUpgradeWaitRounds uint64 + MinUpgradeWaitRounds uint64 + MaxUpgradeWaitRounds uint64 + MaxVersionStringLen int + + // MaxTxnBytesPerBlock determines the maximum number of bytes + // that transactions can take up in a block. Specifically, + // the sum of the lengths of encodings of each transaction + // in a block must not exceed MaxTxnBytesPerBlock. + MaxTxnBytesPerBlock int + + // MaxTxnBytesPerBlock is the maximum size of a transaction's Note field. + MaxTxnNoteBytes int + + // MaxTxnLife is how long a transaction can be live for: + // the maximum difference between LastValid and FirstValid. + // + // Note that in a protocol upgrade, the ledger must first be upgraded + // to hold more past blocks for this value to be raised. + MaxTxnLife uint64 + + // ApprovedUpgrades describes the upgrade proposals that this protocol + // implementation will vote for, along with their delay value + // (in rounds). A delay value of zero is the same as a delay of + // DefaultUpgradeWaitRounds. + ApprovedUpgrades map[protocol.ConsensusVersion]uint64 + + // SupportGenesisHash indicates support for the GenesisHash + // fields in transactions (and requires them in blocks). + SupportGenesisHash bool + + // RequireGenesisHash indicates that GenesisHash must be present + // in every transaction. + RequireGenesisHash bool + + // DefaultKeyDilution specifies the granularity of top-level ephemeral + // keys. KeyDilution is the number of second-level keys in each batch, + // signed by a top-level "batch" key. The default value can be + // overridden in the account state. + DefaultKeyDilution uint64 + + // MinBalance specifies the minimum balance that can appear in + // an account. To spend money below MinBalance requires issuing + // an account-closing transaction, which transfers all of the + // money from the account, and deletes the account state. + MinBalance uint64 + + // MinTxnFee specifies the minimum fee allowed on a transaction. + // A minimum fee is necessary to prevent DoS. In some sense this is + // a way of making the spender subsidize the cost of storing this transaction. + MinTxnFee uint64 + + // EnableFeePooling specifies that the sum of the fees in a + // group must exceed one MinTxnFee per Txn, rather than check that + // each Txn has a MinFee. + EnableFeePooling bool + + // EnableAppCostPooling specifies that the sum of fees for application calls + // in a group is checked against the sum of the budget for application calls, + // rather than check each individual app call is within the budget. + EnableAppCostPooling bool + + // RewardUnit specifies the number of MicroAlgos corresponding to one reward + // unit. + // + // Rewards are received by whole reward units. Fractions of + // RewardUnits do not receive rewards. + // + // Ensure both considerations below are taken into account if RewardUnit is planned for change: + // 1. RewardUnits should not be changed without touching all accounts to apply their rewards + // based on the old RewardUnits and then use the new RewardUnits for all subsequent calculations. + // 2. Having a consistent RewardUnit is also important for preserving + // a constant amount of total algos in the system: + // the block header tracks how many reward units worth of algos are in existence + // and have logically received rewards. + RewardUnit uint64 + + // RewardsRateRefreshInterval is the number of rounds after which the + // rewards level is recomputed for the next RewardsRateRefreshInterval rounds. + RewardsRateRefreshInterval uint64 + + // seed-related parameters + SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec + SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec + + // ledger retention policy + MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for + + // sortition threshold factors + NumProposers uint64 + SoftCommitteeSize uint64 + SoftCommitteeThreshold uint64 + CertCommitteeSize uint64 + CertCommitteeThreshold uint64 + NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant + NextCommitteeThreshold uint64 + LateCommitteeSize uint64 + LateCommitteeThreshold uint64 + RedoCommitteeSize uint64 + RedoCommitteeThreshold uint64 + DownCommitteeSize uint64 + DownCommitteeThreshold uint64 + + // time for nodes to wait for block proposal headers for period > 0, value should be set to 2 * SmallLambda + AgreementFilterTimeout time.Duration + // time for nodes to wait for block proposal headers for period = 0, value should be configured to suit best case + // critical path + AgreementFilterTimeoutPeriod0 time.Duration + + FastRecoveryLambda time.Duration // time between fast recovery attempts + + // how to commit to the payset: flat or merkle tree + PaysetCommit PaysetCommitType + + MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks + + // support for the efficient encoding in SignedTxnInBlock + SupportSignedTxnInBlock bool + + // force the FeeSink address to be non-participating in the genesis balances. + ForceNonParticipatingFeeSink bool + + // support for ApplyData in SignedTxnInBlock + ApplyData bool + + // track reward distributions in ApplyData + RewardsInApplyData bool + + // domain-separated credentials + CredentialDomainSeparationEnabled bool + + // support for transactions that mark an account non-participating + SupportBecomeNonParticipatingTransactions bool + + // fix the rewards calculation by avoiding subtracting too much from the rewards pool + PendingResidueRewards bool + + // asset support + Asset bool + + // max number of assets per account + MaxAssetsPerAccount int + + // max length of asset name + MaxAssetNameBytes int + + // max length of asset unit name + MaxAssetUnitNameBytes int + + // max length of asset url + MaxAssetURLBytes int + + // support sequential transaction counter TxnCounter + TxnCounter bool + + // transaction groups + SupportTxGroups bool + + // max group size + MaxTxGroupSize int + + // support for transaction leases + // note: if FixTransactionLeases is not set, the transaction + // leases supported are faulty; specifically, they do not + // enforce exclusion correctly when the FirstValid of + // transactions do not match. + SupportTransactionLeases bool + FixTransactionLeases bool + + // 0 for no support, otherwise highest version supported + LogicSigVersion uint64 + + // len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this + LogicSigMaxSize uint64 + + // sum of estimated op cost must be less than this + LogicSigMaxCost uint64 + + // max decimal precision for assets + MaxAssetDecimals uint32 + + // SupportRekeying indicates support for account rekeying (the RekeyTo and AuthAddr fields) + SupportRekeying bool + + // application support + Application bool + + // max number of ApplicationArgs for an ApplicationCall transaction + MaxAppArgs int + + // max sum([len(arg) for arg in txn.ApplicationArgs]) + MaxAppTotalArgLen int + + // maximum byte len of application approval program or clear state + // When MaxExtraAppProgramPages > 0, this is the size of those pages. + // So two "extra pages" would mean 3*MaxAppProgramLen bytes are available. + MaxAppProgramLen int + + // maximum total length of an application's programs (approval + clear state) + // When MaxExtraAppProgramPages > 0, this is the size of those pages. + // So two "extra pages" would mean 3*MaxAppTotalProgramLen bytes are available. + MaxAppTotalProgramLen int + + // extra length for application program in pages. A page is MaxAppProgramLen bytes + MaxExtraAppProgramPages int + + // maximum number of accounts in the ApplicationCall Accounts field. + // this determines, in part, the maximum number of balance records + // accessed by a single transaction + MaxAppTxnAccounts int + + // maximum number of app ids in the ApplicationCall ForeignApps field. + // these are the only applications besides the called application for + // which global state may be read in the transaction + MaxAppTxnForeignApps int + + // maximum number of asset ids in the ApplicationCall ForeignAssets + // field. these are the only assets for which the asset parameters may + // be read in the transaction + MaxAppTxnForeignAssets int + + // maximum number of "foreign references" (accounts, asa, app, boxes) + // that can be attached to a single app call. + MaxAppTotalTxnReferences int + + // maximum cost of application approval program or clear state program + MaxAppProgramCost int + + // maximum length of a key used in an application's global or local + // key/value store + MaxAppKeyLen int + + // maximum length of a bytes value used in an application's global or + // local key/value store + MaxAppBytesValueLen int + + // maximum sum of the lengths of the key and value of one app state entry + MaxAppSumKeyValueLens int + + // maximum number of inner transactions that can be created by an app call. + // with EnableInnerTransactionPooling, limit is multiplied by MaxTxGroupSize + // and enforced over the whole group. + MaxInnerTransactions int + + // should the number of inner transactions be pooled across group? + EnableInnerTransactionPooling bool + + // provide greater isolation for clear state programs + IsolateClearState bool + + // The minimum app version that can be called in an inner transaction + MinInnerApplVersion uint64 + + // maximum number of applications a single account can create and store + // AppParams for at once + MaxAppsCreated int + + // maximum number of applications a single account can opt in to and + // store AppLocalState for at once + MaxAppsOptedIn int + + // flat MinBalance requirement for creating a single application and + // storing its AppParams + AppFlatParamsMinBalance uint64 + + // flat MinBalance requirement for opting in to a single application + // and storing its AppLocalState + AppFlatOptInMinBalance uint64 + + // MinBalance requirement per key/value entry in LocalState or + // GlobalState key/value stores, regardless of value type + SchemaMinBalancePerEntry uint64 + + // MinBalance requirement (in addition to SchemaMinBalancePerEntry) for + // integer values stored in LocalState or GlobalState key/value stores + SchemaUintMinBalance uint64 + + // MinBalance requirement (in addition to SchemaMinBalancePerEntry) for + // []byte values stored in LocalState or GlobalState key/value stores + SchemaBytesMinBalance uint64 + + // Maximum length of a box (Does not include name/key length. That is capped by MaxAppKeyLen) + MaxBoxSize uint64 + + // Minimum Balance Requirement (MBR) per box created (this accounts for a + // bit of overhead used to store the box bytes) + BoxFlatMinBalance uint64 + + // MBR per byte of box storage. MBR is incremented by BoxByteMinBalance * (len(name)+len(value)) + BoxByteMinBalance uint64 + + // Number of box references allowed + MaxAppBoxReferences int + + // Amount added to a txgroup's box I/O budget per box ref supplied. + // For reads: the sum of the sizes of all boxes in the group must be less than I/O budget + // For writes: the sum of the sizes of all boxes created or written must be less than I/O budget + // In both cases, what matters is the sizes of the boxes touched, not the + // number of times they are touched, or the size of the touches. + BytesPerBoxReference uint64 + + // maximum number of total key/value pairs allowed by a given + // LocalStateSchema (and therefore allowed in LocalState) + MaxLocalSchemaEntries uint64 + + // maximum number of total key/value pairs allowed by a given + // GlobalStateSchema (and therefore allowed in GlobalState) + MaxGlobalSchemaEntries uint64 + + // maximum total minimum balance requirement for an account, used + // to limit the maximum size of a single balance record + MaximumMinimumBalance uint64 + + // StateProofInterval defines the frequency with which state + // proofs are generated. Every round that is a multiple + // of StateProofInterval, the block header will include a vector + // commitment to the set of online accounts (that can vote after + // another StateProofInterval rounds), and that block will be signed + // (forming a state proof) by the voters from the previous + // such vector commitment. A value of zero means no state proof. + StateProofInterval uint64 + + // StateProofTopVoters is a bound on how many online accounts get to + // participate in forming the state proof, by including the + // top StateProofTopVoters accounts (by normalized balance) into the + // vector commitment. + StateProofTopVoters uint64 + + // StateProofVotersLookback is the number of blocks we skip before + // publishing a vector commitment to the online accounts. Namely, + // if block number N contains a vector commitment to the online + // accounts (which, incidentally, means N%StateProofInterval=0), + // then the balances reflected in that commitment must come from + // block N-StateProofVotersLookback. This gives each node some + // time (StateProofVotersLookback blocks worth of time) to + // construct this vector commitment, so as to avoid placing the + // construction of this vector commitment (and obtaining the requisite + // accounts and balances) in the critical path. + StateProofVotersLookback uint64 + + // StateProofWeightThreshold specifies the fraction of top voters weight + // that must sign the message (block header) for security. The state + // proof ensures this threshold holds; however, forming a valid + // state proof requires a somewhat higher number of signatures, + // and the more signatures are collected, the smaller the state proof + // can be. + // + // This threshold can be thought of as the maximum fraction of + // malicious weight that state proofs defend against. + // + // The threshold is computed as StateProofWeightThreshold/(1<<32). + StateProofWeightThreshold uint32 + + // StateProofStrengthTarget represents either k+q (for pre-quantum security) or k+2q (for post-quantum security) + StateProofStrengthTarget uint64 + + // StateProofMaxRecoveryIntervals represents the number of state proof intervals that the network will try to catch-up with. + // When the difference between the latest state proof and the current round will be greater than value, Nodes will + // release resources allocated for creating state proofs. + StateProofMaxRecoveryIntervals uint64 + + // StateProofExcludeTotalWeightWithRewards specifies whether to subtract rewards from excluded online accounts along with + // their account balances. + StateProofExcludeTotalWeightWithRewards bool + + // EnableAssetCloseAmount adds an extra field to the ApplyData. The field contains the amount of the remaining + // asset that were sent to the close-to address. + EnableAssetCloseAmount bool + + // update the initial rewards rate calculation to take the reward pool minimum balance into account + InitialRewardsRateCalculation bool + + // NoEmptyLocalDeltas updates how ApplyDelta.EvalDelta.LocalDeltas are stored + NoEmptyLocalDeltas bool + + // EnableKeyregCoherencyCheck enable the following extra checks on key registration transactions: + // 1. checking that [VotePK/SelectionPK/VoteKeyDilution] are all set or all clear. + // 2. checking that the VoteFirst is less or equal to VoteLast. + // 3. checking that in the case of going offline, both the VoteFirst and VoteLast are clear. + // 4. checking that in the case of going online the VoteLast is non-zero and greater then the current network round. + // 5. checking that in the case of going online the VoteFirst is less or equal to the LastValid+1. + // 6. checking that in the case of going online the VoteFirst is less or equal to the next network round. + EnableKeyregCoherencyCheck bool + + EnableExtraPagesOnAppUpdate bool + + // MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need + // to be taken offline, that would be proposed to be taken offline. + MaxProposedExpiredOnlineAccounts int + + // EnableAccountDataResourceSeparation enables the support for extended application and asset storage + // in a separate table. + EnableAccountDataResourceSeparation bool + + // When rewards rate changes, use the new value immediately. + RewardsCalculationFix bool + + // EnableStateProofKeyregCheck enables the check for stateProof key on key registration + EnableStateProofKeyregCheck bool + + // MaxKeyregValidPeriod defines the longest period (in rounds) allowed for a keyreg transaction. + // This number sets a limit to prevent the number of StateProof keys generated by the user from being too large, and also checked by the WellFormed method. + // The hard-limit for number of StateProof keys is derived from the maximum depth allowed for the merkle signature scheme's tree - 2^16. + // More keys => deeper merkle tree => longer proof required => infeasible for our SNARK. + MaxKeyregValidPeriod uint64 + + // UnifyInnerTxIDs enables a consistent, unified way of computing inner transaction IDs + UnifyInnerTxIDs bool + + // EnableSHA256TxnCommitmentHeader enables the creation of a transaction vector commitment tree using SHA256 hash function. (vector commitment extends Merkle tree by having a position binding property). + // This new header is in addition to the existing SHA512_256 merkle root. + // It is useful for verifying transaction on different blockchains, as some may not support SHA512_256 OPCODE natively but SHA256 is common. + EnableSHA256TxnCommitmentHeader bool + + // CatchpointLookback specifies a round lookback to take catchpoints at. + // Accounts snapshot for round X will be taken at X-CatchpointLookback + CatchpointLookback uint64 + + // DeeperBlockHeaderHistory defines number of rounds in addition to MaxTxnLife + // available for lookup for smart contracts and smart signatures. + // Setting it to 1 for example allows querying data up to MaxTxnLife + 1 rounds back from the Latest. + DeeperBlockHeaderHistory uint64 + + // EnableOnlineAccountCatchpoints specifies when to re-enable catchpoints after the online account table migration has occurred. + EnableOnlineAccountCatchpoints bool + + // UnfundedSenders ensures that accounts with no balance (so they don't even + // "exist") can still be a transaction sender by avoiding updates to rewards + // state for accounts with no algos. The actual change implemented to allow + // this is to avoid updating an account if the only change would have been + // the rewardsLevel, but the rewardsLevel has no meaning because the account + // has fewer than RewardUnit algos. + UnfundedSenders bool + + // EnablePrecheckECDSACurve means that ecdsa_verify opcode will bail early, + // returning false, if pubkey is not on the curve. + EnablePrecheckECDSACurve bool + + // EnableBareBudgetError specifies that I/O budget overruns should not be considered EvalError + EnableBareBudgetError bool + + // StateProofUseTrackerVerification specifies whether the node will use data from state proof verification tracker + // in order to verify state proofs. + StateProofUseTrackerVerification bool + + // EnableCatchpointsWithSPContexts specifies when to re-enable version 7 catchpoints. + // Version 7 includes state proof verification contexts + EnableCatchpointsWithSPContexts bool + + // AppForbidLowResources enforces a rule that prevents apps from accessing + // asas and apps below 256, in an effort to decrease the ambiguity of + // opcodes that accept IDs or slot indexes. Simultaneously, the first ID + // allocated in new chains is raised to 1001. + AppForbidLowResources bool + + // EnableBoxRefNameError specifies that box ref names should be validated early + EnableBoxRefNameError bool + + // ExcludeExpiredCirculation excludes expired stake from the total online stake + // used by agreement for Circulation, and updates the calculation of StateProofOnlineTotalWeight used + // by state proofs to use the same method (rather than excluding stake from the top N stakeholders as before). + ExcludeExpiredCirculation bool +} + +// PaysetCommitType enumerates possible ways for the block header to commit to +// the set of transactions in the block. +type PaysetCommitType int + +const ( + // PaysetCommitUnsupported is the zero value, reflecting the fact + // that some early protocols used a Merkle tree to commit to the + // transactions in a way that we no longer support. + PaysetCommitUnsupported PaysetCommitType = iota + + // PaysetCommitFlat hashes the entire payset array. + PaysetCommitFlat + + // PaysetCommitMerkle uses merkle array to commit to the payset. + PaysetCommitMerkle +) + +// ConsensusProtocols defines a set of supported protocol versions and their +// corresponding parameters. +type ConsensusProtocols map[protocol.ConsensusVersion]ConsensusParams + +// Consensus tracks the protocol-level settings for different versions of the +// consensus protocol. +var Consensus ConsensusProtocols + +// MaxVoteThreshold is the largest threshold for a bundle over all supported +// consensus protocols, used for decoding purposes. +var MaxVoteThreshold int + +// MaxEvalDeltaAccounts is the largest number of accounts that may appear in +// an eval delta, used for decoding purposes. +var MaxEvalDeltaAccounts int + +// MaxStateDeltaKeys is the largest number of key/value pairs that may appear +// in a StateDelta, used for decoding purposes. +var MaxStateDeltaKeys int + +// MaxLogCalls is the highest allowable log messages that may appear in +// any version, used only for decoding purposes. Never decrease this value. +var MaxLogCalls int + +// MaxInnerTransactionsPerDelta is the maximum number of inner transactions in one EvalDelta +var MaxInnerTransactionsPerDelta int + +// MaxLogicSigMaxSize is the largest logical signature appear in any of the supported +// protocols, used for decoding purposes. +var MaxLogicSigMaxSize int + +// MaxTxnNoteBytes is the largest supported nodes field array size supported by any +// of the consensus protocols. used for decoding purposes. +var MaxTxnNoteBytes int + +// MaxTxGroupSize is the largest supported number of transactions per transaction group supported by any +// of the consensus protocols. used for decoding purposes. +var MaxTxGroupSize int + +// MaxAppProgramLen is the largest supported app program size supported by any +// of the consensus protocols. used for decoding purposes. +var MaxAppProgramLen int + +// MaxBytesKeyValueLen is a maximum length of key or value across all protocols. +// used for decoding purposes. +var MaxBytesKeyValueLen int + +// MaxExtraAppProgramLen is the maximum extra app program length supported by any +// of the consensus protocols. used for decoding purposes. +var MaxExtraAppProgramLen int + +// MaxAvailableAppProgramLen is the largest supported app program size include the extra pages +//supported supported by any of the consensus protocols. used for decoding purposes. +var MaxAvailableAppProgramLen int + +// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need +// to be taken offline, that would be proposed to be taken offline. +var MaxProposedExpiredOnlineAccounts int + +func checkSetMax(value int, curMax *int) { + if value > *curMax { + *curMax = value + } +} + +// checkSetAllocBounds sets some global variables used during msgpack decoding +// to enforce memory allocation limits. The values should be generous to +// prevent correctness bugs, but not so large that DoS attacks are trivial +func checkSetAllocBounds(p ConsensusParams) { + checkSetMax(int(p.SoftCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.CertCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.NextCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.LateCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.RedoCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.DownCommitteeThreshold), &MaxVoteThreshold) + + // These bounds could be tighter, but since these values are just to + // prevent DoS, setting them to be the maximum number of allowed + // executed TEAL instructions should be fine (order of ~1000) + checkSetMax(p.MaxAppProgramLen, &MaxStateDeltaKeys) + checkSetMax(p.MaxAppProgramLen, &MaxEvalDeltaAccounts) + checkSetMax(p.MaxAppProgramLen, &MaxAppProgramLen) + checkSetMax(int(p.LogicSigMaxSize), &MaxLogicSigMaxSize) + checkSetMax(p.MaxTxnNoteBytes, &MaxTxnNoteBytes) + checkSetMax(p.MaxTxGroupSize, &MaxTxGroupSize) + // MaxBytesKeyValueLen is max of MaxAppKeyLen and MaxAppBytesValueLen + checkSetMax(p.MaxAppKeyLen, &MaxBytesKeyValueLen) + checkSetMax(p.MaxAppBytesValueLen, &MaxBytesKeyValueLen) + checkSetMax(p.MaxExtraAppProgramPages, &MaxExtraAppProgramLen) + // MaxAvailableAppProgramLen is the max of supported app program size + MaxAvailableAppProgramLen = MaxAppProgramLen * (1 + MaxExtraAppProgramLen) + // There is no consensus parameter for MaxLogCalls and MaxAppProgramLen as an approximation + // Its value is much larger than any possible reasonable MaxLogCalls value in future + checkSetMax(p.MaxAppProgramLen, &MaxLogCalls) + checkSetMax(p.MaxInnerTransactions*p.MaxTxGroupSize, &MaxInnerTransactionsPerDelta) + checkSetMax(p.MaxProposedExpiredOnlineAccounts, &MaxProposedExpiredOnlineAccounts) +} + +// DeepCopy creates a deep copy of a consensus protocols map. +func (cp ConsensusProtocols) DeepCopy() ConsensusProtocols { + staticConsensus := make(ConsensusProtocols) + for consensusVersion, consensusParams := range cp { + // recreate the ApprovedUpgrades map since we don't want to modify the original one. + if consensusParams.ApprovedUpgrades != nil { + newApprovedUpgrades := make(map[protocol.ConsensusVersion]uint64) + for ver, when := range consensusParams.ApprovedUpgrades { + newApprovedUpgrades[ver] = when + } + consensusParams.ApprovedUpgrades = newApprovedUpgrades + } + staticConsensus[consensusVersion] = consensusParams + } + return staticConsensus +} + +// Merge merges a configurable consensus on top of the existing consensus protocol and return +// a new consensus protocol without modify any of the incoming structures. +func (cp ConsensusProtocols) Merge(configurableConsensus ConsensusProtocols) ConsensusProtocols { + staticConsensus := cp.DeepCopy() + + for consensusVersion, consensusParams := range configurableConsensus { + if consensusParams.ApprovedUpgrades == nil { + // if we were provided with an empty ConsensusParams, delete the existing reference to this consensus version + for cVer, cParam := range staticConsensus { + if cVer == consensusVersion { + delete(staticConsensus, cVer) + } else { + // delete upgrade to deleted version + delete(cParam.ApprovedUpgrades, consensusVersion) + } + } + } else { + // need to add/update entry + staticConsensus[consensusVersion] = consensusParams + } + } + + return staticConsensus +} + +func initConsensusProtocols() { + // WARNING: copying a ConsensusParams by value into a new variable + // does not copy the ApprovedUpgrades map. Make sure that each new + // ConsensusParams structure gets a fresh ApprovedUpgrades map. + + // Base consensus protocol version, v7. + v7 := ConsensusParams{ + UpgradeVoteRounds: 10000, + UpgradeThreshold: 9000, + DefaultUpgradeWaitRounds: 10000, + MaxVersionStringLen: 64, + + MinBalance: 10000, + MinTxnFee: 1000, + MaxTxnLife: 1000, + MaxTxnNoteBytes: 1024, + MaxTxnBytesPerBlock: 1000000, + DefaultKeyDilution: 10000, + + MaxTimestampIncrement: 25, + + RewardUnit: 1e6, + RewardsRateRefreshInterval: 5e5, + + ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{}, + + NumProposers: 30, + SoftCommitteeSize: 2500, + SoftCommitteeThreshold: 1870, + CertCommitteeSize: 1000, + CertCommitteeThreshold: 720, + NextCommitteeSize: 10000, + NextCommitteeThreshold: 7750, + LateCommitteeSize: 10000, + LateCommitteeThreshold: 7750, + RedoCommitteeSize: 10000, + RedoCommitteeThreshold: 7750, + DownCommitteeSize: 10000, + DownCommitteeThreshold: 7750, + + AgreementFilterTimeout: 4 * time.Second, + AgreementFilterTimeoutPeriod0: 4 * time.Second, + + FastRecoveryLambda: 5 * time.Minute, + + SeedLookback: 2, + SeedRefreshInterval: 100, + + MaxBalLookback: 320, + + MaxTxGroupSize: 1, + } + + v7.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV7] = v7 + + // v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis + v8 := v7 + + v8.SeedRefreshInterval = 80 + v8.NumProposers = 9 + v8.SoftCommitteeSize = 2990 + v8.SoftCommitteeThreshold = 2267 + v8.CertCommitteeSize = 1500 + v8.CertCommitteeThreshold = 1112 + v8.NextCommitteeSize = 5000 + v8.NextCommitteeThreshold = 3838 + v8.LateCommitteeSize = 5000 + v8.LateCommitteeThreshold = 3838 + v8.RedoCommitteeSize = 5000 + v8.RedoCommitteeThreshold = 3838 + v8.DownCommitteeSize = 5000 + v8.DownCommitteeThreshold = 3838 + + v8.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV8] = v8 + + // v7 can be upgraded to v8. + v7.ApprovedUpgrades[protocol.ConsensusV8] = 0 + + // v9 increases the minimum balance to 100,000 microAlgos. + v9 := v8 + v9.MinBalance = 100000 + v9.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV9] = v9 + + // v8 can be upgraded to v9. + v8.ApprovedUpgrades[protocol.ConsensusV9] = 0 + + // v10 introduces fast partition recovery (and also raises NumProposers). + v10 := v9 + v10.NumProposers = 20 + v10.LateCommitteeSize = 500 + v10.LateCommitteeThreshold = 320 + v10.RedoCommitteeSize = 2400 + v10.RedoCommitteeThreshold = 1768 + v10.DownCommitteeSize = 6000 + v10.DownCommitteeThreshold = 4560 + v10.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV10] = v10 + + // v9 can be upgraded to v10. + v9.ApprovedUpgrades[protocol.ConsensusV10] = 0 + + // v11 introduces SignedTxnInBlock. + v11 := v10 + v11.SupportSignedTxnInBlock = true + v11.PaysetCommit = PaysetCommitFlat + v11.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV11] = v11 + + // v10 can be upgraded to v11. + v10.ApprovedUpgrades[protocol.ConsensusV11] = 0 + + // v12 increases the maximum length of a version string. + v12 := v11 + v12.MaxVersionStringLen = 128 + v12.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV12] = v12 + + // v11 can be upgraded to v12. + v11.ApprovedUpgrades[protocol.ConsensusV12] = 0 + + // v13 makes the consensus version a meaningful string. + v13 := v12 + v13.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV13] = v13 + + // v12 can be upgraded to v13. + v12.ApprovedUpgrades[protocol.ConsensusV13] = 0 + + // v14 introduces tracking of closing amounts in ApplyData, and enables + // GenesisHash in transactions. + v14 := v13 + v14.ApplyData = true + v14.SupportGenesisHash = true + v14.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV14] = v14 + + // v13 can be upgraded to v14. + v13.ApprovedUpgrades[protocol.ConsensusV14] = 0 + + // v15 introduces tracking of reward distributions in ApplyData. + v15 := v14 + v15.RewardsInApplyData = true + v15.ForceNonParticipatingFeeSink = true + v15.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV15] = v15 + + // v14 can be upgraded to v15. + v14.ApprovedUpgrades[protocol.ConsensusV15] = 0 + + // v16 fixes domain separation in credentials. + v16 := v15 + v16.CredentialDomainSeparationEnabled = true + v16.RequireGenesisHash = true + v16.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV16] = v16 + + // v15 can be upgraded to v16. + v15.ApprovedUpgrades[protocol.ConsensusV16] = 0 + + // ConsensusV17 points to 'final' spec commit + v17 := v16 + v17.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV17] = v17 + + // v16 can be upgraded to v17. + v16.ApprovedUpgrades[protocol.ConsensusV17] = 0 + + // ConsensusV18 points to reward calculation spec commit + v18 := v17 + v18.PendingResidueRewards = true + v18.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v18.TxnCounter = true + v18.Asset = true + v18.LogicSigVersion = 1 + v18.LogicSigMaxSize = 1000 + v18.LogicSigMaxCost = 20000 + v18.MaxAssetsPerAccount = 1000 + v18.SupportTxGroups = true + v18.MaxTxGroupSize = 16 + v18.SupportTransactionLeases = true + v18.SupportBecomeNonParticipatingTransactions = true + v18.MaxAssetNameBytes = 32 + v18.MaxAssetUnitNameBytes = 8 + v18.MaxAssetURLBytes = 32 + Consensus[protocol.ConsensusV18] = v18 + + // ConsensusV19 is the official spec commit ( teal, assets, group tx ) + v19 := v18 + v19.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + Consensus[protocol.ConsensusV19] = v19 + + // v18 can be upgraded to v19. + v18.ApprovedUpgrades[protocol.ConsensusV19] = 0 + // v17 can be upgraded to v19. + v17.ApprovedUpgrades[protocol.ConsensusV19] = 0 + + // v20 points to adding the precision to the assets. + v20 := v19 + v20.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v20.MaxAssetDecimals = 19 + // we want to adjust the upgrade time to be roughly one week. + // one week, in term of rounds would be: + // 140651 = (7 * 24 * 60 * 60 / 4.3) + // for the sake of future manual calculations, we'll round that down + // a bit : + v20.DefaultUpgradeWaitRounds = 140000 + Consensus[protocol.ConsensusV20] = v20 + + // v19 can be upgraded to v20. + v19.ApprovedUpgrades[protocol.ConsensusV20] = 0 + + // v21 fixes a bug in Credential.lowestOutput that would cause larger accounts to be selected to propose disproportionately more often than small accounts + v21 := v20 + v21.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusV21] = v21 + // v20 can be upgraded to v21. + v20.ApprovedUpgrades[protocol.ConsensusV21] = 0 + + // v22 is an upgrade which allows tuning the number of rounds to wait to execute upgrades. + v22 := v21 + v22.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v22.MinUpgradeWaitRounds = 10000 + v22.MaxUpgradeWaitRounds = 150000 + Consensus[protocol.ConsensusV22] = v22 + + // v23 is an upgrade which fixes the behavior of leases so that + // it conforms with the intended spec. + v23 := v22 + v23.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v23.FixTransactionLeases = true + Consensus[protocol.ConsensusV23] = v23 + // v22 can be upgraded to v23. + v22.ApprovedUpgrades[protocol.ConsensusV23] = 10000 + // v21 can be upgraded to v23. + v21.ApprovedUpgrades[protocol.ConsensusV23] = 0 + + // v24 is the stateful teal and rekeying upgrade + v24 := v23 + v24.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v24.LogicSigVersion = 2 + + // Enable application support + v24.Application = true + + // Although Inners were not allowed yet, this gates downgrade checks, which must be allowed + v24.MinInnerApplVersion = 6 + + // Enable rekeying + v24.SupportRekeying = true + + // 100.1 Algos (MinBalance for creating 1,000 assets) + v24.MaximumMinimumBalance = 100100000 + + v24.MaxAppArgs = 16 + v24.MaxAppTotalArgLen = 2048 + v24.MaxAppProgramLen = 1024 + v24.MaxAppTotalProgramLen = 2048 // No effect until v28, when MaxAppProgramLen increased + v24.MaxAppKeyLen = 64 + v24.MaxAppBytesValueLen = 64 + v24.MaxAppSumKeyValueLens = 128 // Set here to have no effect until MaxAppBytesValueLen increases + + // 0.1 Algos (Same min balance cost as an Asset) + v24.AppFlatParamsMinBalance = 100000 + v24.AppFlatOptInMinBalance = 100000 + + // Can look up Sender + 4 other balance records per Application txn + v24.MaxAppTxnAccounts = 4 + + // Can look up 2 other app creator balance records to see global state + v24.MaxAppTxnForeignApps = 2 + + // Can look up 2 assets to see asset parameters + v24.MaxAppTxnForeignAssets = 2 + + // Intended to have no effect in v24 (it's set to accounts + + // asas + apps). In later vers, it allows increasing the + // individual limits while maintaining same max references. + v24.MaxAppTotalTxnReferences = 8 + + // 64 byte keys @ ~333 microAlgos/byte + delta + v24.SchemaMinBalancePerEntry = 25000 + + // 9 bytes @ ~333 microAlgos/byte + delta + v24.SchemaUintMinBalance = 3500 + + // 64 byte values @ ~333 microAlgos/byte + delta + v24.SchemaBytesMinBalance = 25000 + + // Maximum number of key/value pairs per local key/value store + v24.MaxLocalSchemaEntries = 16 + + // Maximum number of key/value pairs per global key/value store + v24.MaxGlobalSchemaEntries = 64 + + // Maximum cost of ApprovalProgram/ClearStateProgram + v24.MaxAppProgramCost = 700 + + // Maximum number of apps a single account can create + v24.MaxAppsCreated = 10 + + // Maximum number of apps a single account can opt into + v24.MaxAppsOptedIn = 10 + Consensus[protocol.ConsensusV24] = v24 + + // v23 can be upgraded to v24, with an update delay of 7 days ( see calculation above ) + v23.ApprovedUpgrades[protocol.ConsensusV24] = 140000 + + // v25 enables AssetCloseAmount in the ApplyData + v25 := v24 + v25.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable AssetCloseAmount field + v25.EnableAssetCloseAmount = true + Consensus[protocol.ConsensusV25] = v25 + + // v26 adds support for teal3 + v26 := v25 + v26.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable the InitialRewardsRateCalculation fix + v26.InitialRewardsRateCalculation = true + + // Enable transaction Merkle tree. + v26.PaysetCommit = PaysetCommitMerkle + + // Enable teal3 + v26.LogicSigVersion = 3 + + Consensus[protocol.ConsensusV26] = v26 + + // v25 or v24 can be upgraded to v26, with an update delay of 7 days ( see calculation above ) + v25.ApprovedUpgrades[protocol.ConsensusV26] = 140000 + v24.ApprovedUpgrades[protocol.ConsensusV26] = 140000 + + // v27 updates ApplyDelta.EvalDelta.LocalDeltas format + v27 := v26 + v27.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable the ApplyDelta.EvalDelta.LocalDeltas fix + v27.NoEmptyLocalDeltas = true + + Consensus[protocol.ConsensusV27] = v27 + + // v26 can be upgraded to v27, with an update delay of 3 days + // 60279 = (3 * 24 * 60 * 60 / 4.3) + // for the sake of future manual calculations, we'll round that down + // a bit : + v26.ApprovedUpgrades[protocol.ConsensusV27] = 60000 + + // v28 introduces new TEAL features, larger program size, fee pooling and longer asset max URL + v28 := v27 + v28.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable TEAL 4 / AVM 0.9 + v28.LogicSigVersion = 4 + // Enable support for larger app program size + v28.MaxExtraAppProgramPages = 3 + v28.MaxAppProgramLen = 2048 + // Increase asset URL length to allow for IPFS URLs + v28.MaxAssetURLBytes = 96 + // Let the bytes value take more space. Key+Value is still limited to 128 + v28.MaxAppBytesValueLen = 128 + + // Individual limits raised + v28.MaxAppTxnForeignApps = 8 + v28.MaxAppTxnForeignAssets = 8 + + // MaxAppTxnAccounts has not been raised yet. It is already + // higher (4) and there is a multiplicative effect in + // "reachability" between accounts and creatables, so we + // retain 4 x 4 as worst case. + + v28.EnableFeePooling = true + v28.EnableKeyregCoherencyCheck = true + + Consensus[protocol.ConsensusV28] = v28 + + // v27 can be upgraded to v28, with an update delay of 7 days ( see calculation above ) + v27.ApprovedUpgrades[protocol.ConsensusV28] = 140000 + + // v29 fixes application update by using ExtraProgramPages in size calculations + v29 := v28 + v29.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable ExtraProgramPages for application update + v29.EnableExtraPagesOnAppUpdate = true + + Consensus[protocol.ConsensusV29] = v29 + + // v28 can be upgraded to v29, with an update delay of 3 days ( see calculation above ) + v28.ApprovedUpgrades[protocol.ConsensusV29] = 60000 + + // v30 introduces AVM 1.0 and TEAL 5, increases the app opt in limit to 50, + // and allows costs to be pooled in grouped stateful transactions. + v30 := v29 + v30.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable TEAL 5 / AVM 1.0 + v30.LogicSigVersion = 5 + + // Enable App calls to pool budget in grouped transactions + v30.EnableAppCostPooling = true + + // Enable Inner Transactions, and set maximum number. 0 value is + // disabled. Value > 0 also activates storage of creatable IDs in + // ApplyData, as that is required to support REST API when inner + // transactions are activated. + v30.MaxInnerTransactions = 16 + + // Allow 50 app opt ins + v30.MaxAppsOptedIn = 50 + + Consensus[protocol.ConsensusV30] = v30 + + // v29 can be upgraded to v30, with an update delay of 7 days ( see calculation above ) + v29.ApprovedUpgrades[protocol.ConsensusV30] = 140000 + + v31 := v30 + v31.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v31.RewardsCalculationFix = true + v31.MaxProposedExpiredOnlineAccounts = 32 + + // Enable TEAL 6 / AVM 1.1 + v31.LogicSigVersion = 6 + v31.EnableInnerTransactionPooling = true + v31.IsolateClearState = true + + // stat proof key registration + v31.EnableStateProofKeyregCheck = true + + // Maximum validity period for key registration, to prevent generating too many StateProof keys + v31.MaxKeyregValidPeriod = 256*(1<<16) - 1 + + Consensus[protocol.ConsensusV31] = v31 + + // v30 can be upgraded to v31, with an update delay of 7 days ( see calculation above ) + v30.ApprovedUpgrades[protocol.ConsensusV31] = 140000 + + v32 := v31 + v32.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable extended application storage; binaries that contain support for this + // flag would already be restructuring their internal storage for extended + // application storage, and therefore would not produce catchpoints and/or + // catchpoint labels prior to this feature being enabled. + v32.EnableAccountDataResourceSeparation = true + + // Remove limits on MinimumBalance + v32.MaximumMinimumBalance = 0 + + // Remove limits on assets / account. + v32.MaxAssetsPerAccount = 0 + + // Remove limits on maximum number of apps a single account can create + v32.MaxAppsCreated = 0 + + // Remove limits on maximum number of apps a single account can opt into + v32.MaxAppsOptedIn = 0 + + Consensus[protocol.ConsensusV32] = v32 + + // v31 can be upgraded to v32, with an update delay of 7 days ( see calculation above ) + v31.ApprovedUpgrades[protocol.ConsensusV32] = 140000 + + v33 := v32 + v33.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Make the accounts snapshot for round X at X-CatchpointLookback + // order to guarantee all nodes produce catchpoint at the same round. + v33.CatchpointLookback = 320 + + // Require MaxTxnLife + X blocks and headers preserved by a node + v33.DeeperBlockHeaderHistory = 1 + + v33.MaxTxnBytesPerBlock = 5 * 1024 * 1024 + + Consensus[protocol.ConsensusV33] = v33 + + // v32 can be upgraded to v33, with an update delay of 7 days ( see calculation above ) + v32.ApprovedUpgrades[protocol.ConsensusV33] = 140000 + + v34 := v33 + v34.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable state proofs. + v34.StateProofInterval = 256 + v34.StateProofTopVoters = 1024 + v34.StateProofVotersLookback = 16 + v34.StateProofWeightThreshold = (1 << 32) * 30 / 100 + v34.StateProofStrengthTarget = 256 + v34.StateProofMaxRecoveryIntervals = 10 + + v34.LogicSigVersion = 7 + v34.MinInnerApplVersion = 4 + + v34.UnifyInnerTxIDs = true + + v34.EnableSHA256TxnCommitmentHeader = true + v34.EnableOnlineAccountCatchpoints = true + + v34.UnfundedSenders = true + + v34.AgreementFilterTimeoutPeriod0 = 3400 * time.Millisecond + + Consensus[protocol.ConsensusV34] = v34 + + v35 := v34 + v35.StateProofExcludeTotalWeightWithRewards = true + + v35.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + Consensus[protocol.ConsensusV35] = v35 + + // v33 and v34 can be upgraded to v35, with an update delay of 12h: + // 10046 = (12 * 60 * 60 / 4.3) + // for the sake of future manual calculations, we'll round that down a bit : + v33.ApprovedUpgrades[protocol.ConsensusV35] = 10000 + v34.ApprovedUpgrades[protocol.ConsensusV35] = 10000 + + v36 := v35 + v36.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Boxes (unlimited global storage) + v36.LogicSigVersion = 8 + v36.MaxBoxSize = 32768 + v36.BoxFlatMinBalance = 2500 + v36.BoxByteMinBalance = 400 + v36.MaxAppBoxReferences = 8 + v36.BytesPerBoxReference = 1024 + + Consensus[protocol.ConsensusV36] = v36 + + v35.ApprovedUpgrades[protocol.ConsensusV36] = 140000 + + v37 := v36 + v37.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + Consensus[protocol.ConsensusV37] = v37 + + // v36 can be upgraded to v37, with an update delay of 7 days ( see calculation above ) + v36.ApprovedUpgrades[protocol.ConsensusV37] = 140000 + + v38 := v37 + v38.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // enables state proof recoverability + v38.StateProofUseTrackerVerification = true + v38.EnableCatchpointsWithSPContexts = true + + // online circulation on-demand expiration + v38.ExcludeExpiredCirculation = true + + // TEAL resources sharing and other features + v38.LogicSigVersion = 9 + v38.EnablePrecheckECDSACurve = true + v38.AppForbidLowResources = true + v38.EnableBareBudgetError = true + v38.EnableBoxRefNameError = true + + v38.AgreementFilterTimeoutPeriod0 = 3000 * time.Millisecond + + Consensus[protocol.ConsensusV38] = v38 + + // v37 can be upgraded to v38, with an update delay of 12h: + // 10046 = (12 * 60 * 60 / 4.3) + // for the sake of future manual calculations, we'll round that down a bit : + v37.ApprovedUpgrades[protocol.ConsensusV38] = 10000 + + // ConsensusFuture is used to test features that are implemented + // but not yet released in a production protocol version. + vFuture := v38 + vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + vFuture.LogicSigVersion = 10 // When moving this to a release, put a new higher LogicSigVersion here + + Consensus[protocol.ConsensusFuture] = vFuture + + // vAlphaX versions are an separate series of consensus parameters and versions for alphanet + vAlpha1 := v32 + vAlpha1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + vAlpha1.AgreementFilterTimeoutPeriod0 = 2 * time.Second + vAlpha1.MaxTxnBytesPerBlock = 5000000 + Consensus[protocol.ConsensusVAlpha1] = vAlpha1 + + vAlpha2 := vAlpha1 + vAlpha2.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + vAlpha2.AgreementFilterTimeoutPeriod0 = 3500 * time.Millisecond + vAlpha2.MaxTxnBytesPerBlock = 5 * 1024 * 1024 + Consensus[protocol.ConsensusVAlpha2] = vAlpha2 + vAlpha1.ApprovedUpgrades[protocol.ConsensusVAlpha2] = 10000 + + // vAlpha3 and vAlpha4 use the same parameters as v33 and v34 + vAlpha3 := v33 + vAlpha3.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusVAlpha3] = vAlpha3 + vAlpha2.ApprovedUpgrades[protocol.ConsensusVAlpha3] = 10000 + + vAlpha4 := v34 + vAlpha4.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusVAlpha4] = vAlpha4 + vAlpha3.ApprovedUpgrades[protocol.ConsensusVAlpha4] = 10000 + + // vAlpha5 uses the same parameters as v36 + vAlpha5 := v36 + vAlpha5.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + Consensus[protocol.ConsensusVAlpha5] = vAlpha5 + vAlpha4.ApprovedUpgrades[protocol.ConsensusVAlpha5] = 10000 +} + +// Global defines global Algorand protocol parameters which should not be overridden. +type Global struct { + SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential) + BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block) +} + +// Protocol holds the global configuration settings for the agreement protocol, +// initialized with our current defaults. This is used across all nodes we create. +var Protocol = Global{ + SmallLambda: 2000 * time.Millisecond, + BigLambda: 15000 * time.Millisecond, +} + +func init() { + Consensus = make(ConsensusProtocols) + + initConsensusProtocols() + + // Set allocation limits + for _, p := range Consensus { + checkSetAllocBounds(p) + } + +} diff --git a/protocol/consensus.go b/protocol/consensus.go new file mode 100644 index 00000000..3a9e525d --- /dev/null +++ b/protocol/consensus.go @@ -0,0 +1,236 @@ +package protocol + +import ( + "fmt" +) + +// ConsensusVersion is a string that identifies a version of the +// consensus protocol. +type ConsensusVersion string + +// DEPRECATEDConsensusV0 is a baseline version of the Algorand consensus protocol. +// at the time versioning was introduced. +// It is now deprecated. +const DEPRECATEDConsensusV0 = ConsensusVersion("v0") + +// DEPRECATEDConsensusV1 adds support for Genesis ID in transactions, but does not +// require it (transactions missing a GenesisID value are still allowed). +// It is now deprecated. +const DEPRECATEDConsensusV1 = ConsensusVersion("v1") + +// DEPRECATEDConsensusV2 fixes a bug in the agreement protocol where proposalValues +// fail to commit to the original period and sender of a block. +const DEPRECATEDConsensusV2 = ConsensusVersion("v2") + +// DEPRECATEDConsensusV3 adds support for fine-grained ephemeral keys. +const DEPRECATEDConsensusV3 = ConsensusVersion("v3") + +// DEPRECATEDConsensusV4 adds support for a min balance and a transaction that +// closes out an account. +const DEPRECATEDConsensusV4 = ConsensusVersion("v4") + +// DEPRECATEDConsensusV5 sets MinTxnFee to 1000 and fixes a blance lookback bug +const DEPRECATEDConsensusV5 = ConsensusVersion("v5") + +// DEPRECATEDConsensusV6 adds support for explicit ephemeral-key parameters +const DEPRECATEDConsensusV6 = ConsensusVersion("v6") + +// ConsensusV7 increases MaxBalLookback to 320 in preparation for +// the twin seeds change. +const ConsensusV7 = ConsensusVersion("v7") + +// ConsensusV8 uses the new parameters and seed derivation policy +// from the agreement protocol's security analysis. +const ConsensusV8 = ConsensusVersion("v8") + +// ConsensusV9 increases min balance to 100,000 microAlgos. +const ConsensusV9 = ConsensusVersion("v9") + +// ConsensusV10 introduces fast partition recovery. +const ConsensusV10 = ConsensusVersion("v10") + +// ConsensusV11 introduces efficient encoding of SignedTxn using SignedTxnInBlock. +const ConsensusV11 = ConsensusVersion("v11") + +// ConsensusV12 increases the maximum length of a version string. +const ConsensusV12 = ConsensusVersion("v12") + +// ConsensusV13 makes the consensus version a meaningful string. +const ConsensusV13 = ConsensusVersion( + // Points to version of the Algorand spec as of May 21, 2019. + "https://github.com/algorand/spec/tree/0c8a9dc44d7368cc266d5407b79fb3311f4fc795", +) + +// ConsensusV14 adds tracking of closing amounts in ApplyData, +// and enables genesis hash in transactions. +const ConsensusV14 = ConsensusVersion( + "https://github.com/algorand/spec/tree/2526b6ae062b4fe5e163e06e41e1d9b9219135a9", +) + +// ConsensusV15 adds tracking of reward distributions in ApplyData. +const ConsensusV15 = ConsensusVersion( + "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622", +) + +// ConsensusV16 fixes domain separation in Credentials and requires GenesisHash. +const ConsensusV16 = ConsensusVersion( + "https://github.com/algorand/spec/tree/22726c9dcd12d9cddce4a8bd7e8ccaa707f74101", +) + +// ConsensusV17 points to 'final' spec commit for 2019 june release +const ConsensusV17 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/5615adc36bad610c7f165fa2967f4ecfa75125f0", +) + +// ConsensusV18 points to reward calculation spec commit +const ConsensusV18 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/6c6bd668be0ab14098e51b37e806c509f7b7e31f", +) + +// ConsensusV19 points to 'final' spec commit for 2019 nov release +const ConsensusV19 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/0e196e82bfd6e327994bec373c4cc81bc878ef5c", +) + +// ConsensusV20 points to adding the decimals field to assets +const ConsensusV20 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/4a9db6a25595c6fd097cf9cc137cc83027787eaa", +) + +// ConsensusV21 fixes a bug in credential.lowestOutput +const ConsensusV21 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/8096e2df2da75c3339986317f9abe69d4fa86b4b", +) + +// ConsensusV22 allows tuning the upgrade delay. +const ConsensusV22 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/57016b942f6d97e6d4c0688b373bb0a2fc85a1a2", +) + +// ConsensusV23 fixes lease behavior. +const ConsensusV23 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/e5f565421d720c6f75cdd186f7098495caf9101f", +) + +// ConsensusV24 include the applications, rekeying and AVM v2 +const ConsensusV24 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/3a83c4c743f8b17adfd73944b4319c25722a6782", +) + +// ConsensusV25 adds support for AssetCloseAmount in the ApplyData +const ConsensusV25 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466", +) + +// ConsensusV26 adds support for TEAL 3, initial rewards calculation and merkle tree hash commitments +const ConsensusV26 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff", +) + +// ConsensusV27 updates ApplyDelta.EvalDelta.LocalDeltas format +const ConsensusV27 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/d050b3cade6d5c664df8bd729bf219f179812595", +) + +// ConsensusV28 introduces new TEAL features, larger program size, fee pooling and longer asset max URL +const ConsensusV28 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/65b4ab3266c52c56a0fa7d591754887d68faad0a", +) + +// ConsensusV29 fixes application update by using ExtraProgramPages in size calculations +const ConsensusV29 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/abc54f79f9ad679d2d22f0fb9909fb005c16f8a1", +) + +// ConsensusV30 introduces AVM 1.0 and TEAL 5, increases the app opt in limit to 50, +// and allows costs to be pooled in grouped stateful transactions. +const ConsensusV30 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/bc36005dbd776e6d1eaf0c560619bb183215645c", +) + +// ConsensusV31 enables the batch verification for ed25519 signatures, Fix reward calculation issue, introduces the ability +// to force an expired participation offline, enables TEAL 6 ( AVM 1.1 ) and add support for creating +// state proof keys. +const ConsensusV31 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/85e6db1fdbdef00aa232c75199e10dc5fe9498f6", +) + +// ConsensusV32 enables the unlimited assets. +const ConsensusV32 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/d5ac876d7ede07367dbaa26e149aa42589aac1f7", +) + +// ConsensusV33 enables large blocks, the deeper block history for TEAL +// and catchpoint generation round after lowering in-memory deltas size (320 -> 4). +const ConsensusV33 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/830a4e673148498cc7230a0d1ba1ed0a5471acc6", +) + +// ConsensusV34 enables the TEAL v7 opcodes, stateproofs, shorter lambda to 1.7s. +const ConsensusV34 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/2dd5435993f6f6d65691140f592ebca5ef19ffbd", +) + +// ConsensusV35 updates the calculation of total stake in state proofs. +const ConsensusV35 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/433d8e9a7274b6fca703d91213e05c7e6a589e69", +) + +// ConsensusV36 adds box storage in TEAL v8 +const ConsensusV36 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/44fa607d6051730f5264526bf3c108d51f0eadb6", +) + +// ConsensusV37 is a technical upgrade and released in the same time as ConsensusV38. +// It is needed to allow nodes to build up a necessary state to support state proofs related +// options in ConsensusV38 +const ConsensusV37 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/1ac4dd1f85470e1fb36c8a65520e1313d7dfed5e", +) + +// ConsensusV38 enables state proof verification using a special tracker, +// TEAL v9 resources sharing, pre-check ECDSA curve and extra features, and +// shortens the lambda to 1.5s. +const ConsensusV38 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/abd3d4823c6f77349fc04c3af7b1e99fe4df699f", +) + +// ConsensusFuture is a protocol that should not appear in any production +// network, but is used to test features before they are released. +const ConsensusFuture = ConsensusVersion( + "future", +) + +// ConsensusVAlpha1 is the first consensus protocol for AlphaNet, which is the same as +// v32, but with a 2-second filter timeout and 5M block size. +const ConsensusVAlpha1 = ConsensusVersion("alpha1") + +// ConsensusVAlpha2 is the second consensus protocol for AlphaNet, which increases the +// filter timeout to 3.5 seconds and uses 5MiB blocks. +const ConsensusVAlpha2 = ConsensusVersion("alpha2") + +// ConsensusVAlpha3 uses the same parameters as ConsensusV33. +const ConsensusVAlpha3 = ConsensusVersion("alpha3") + +// ConsensusVAlpha4 uses the same parameters as ConsensusV34. +const ConsensusVAlpha4 = ConsensusVersion("alpha4") + +// ConsensusVAlpha5 uses the same parameters as ConsensusV36. +const ConsensusVAlpha5 = ConsensusVersion("alpha5") + +// !!! ********************* !!! +// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!! +// !!! ********************* !!! + +// ConsensusCurrentVersion is the latest version and should be used +// when a specific version is not provided. +const ConsensusCurrentVersion = ConsensusV38 + +// Error is used to indicate that an unsupported protocol has been detected. +type Error ConsensusVersion + +// Error satisfies builtin interface `error` +func (err Error) Error() string { + proto := ConsensusVersion(err) + return fmt.Sprintf("protocol not supported: %s", proto) +} diff --git a/test/algodclientv2_test.go b/test/algodclientv2_test.go index def58c4c..910d935a 100644 --- a/test/algodclientv2_test.go +++ b/test/algodclientv2_test.go @@ -61,6 +61,9 @@ func AlgodClientV2Context(s *godog.Suite) { s.Step(`^we make a UnsetSyncRound call$`, weMakeAUnsetSyncRoundCall) s.Step(`^we make a SetBlockTimeStampOffset call against offset (\d+)$`, weMakeASetBlockTimeStampOffsetCallAgainstOffset) s.Step(`^we make a GetBlockTimeStampOffset call$`, weMakeAGetBlockTimeStampOffsetCall) + s.Step(`^we make a GetLedgerStateDelta call against round (\d+)$`, weMakeAGetLedgerStateDeltaCallAgainstRound) + s.Step(`^we make a LedgerStateDeltaForTransactionGroupResponse call for ID "([^"]*)"$`, weMakeALedgerStateDeltaForTransactionGroupResponseCallForID) + s.Step(`^we make a TransactionGroupLedgerStateDeltaForRoundResponse call for round (\d+)$`, weMakeATransactionGroupLedgerStateDeltaForRoundResponseCallForRound) s.BeforeScenario(func(interface{}) { globalErrForExamination = nil @@ -364,3 +367,30 @@ func weMakeAGetBlockTimeStampOffsetCall() error { algodClient.GetBlockTimeStampOffset().Do(context.Background()) return nil } + +func weMakeAGetLedgerStateDeltaCallAgainstRound(round int) error { + algodClient, err := algod.MakeClient(mockServer.URL, "") + if err != nil { + return err + } + algodClient.GetLedgerStateDelta(uint64(round)).Do(context.Background()) + return nil +} + +func weMakeALedgerStateDeltaForTransactionGroupResponseCallForID(id string) error { + algodClient, err := algod.MakeClient(mockServer.URL, "") + if err != nil { + return err + } + algodClient.GetLedgerStateDeltaForTransactionGroup(id).Do(context.Background()) + return nil +} + +func weMakeATransactionGroupLedgerStateDeltaForRoundResponseCallForRound(round int) error { + algodClient, err := algod.MakeClient(mockServer.URL, "") + if err != nil { + return err + } + algodClient.GetTransactionGroupLedgerStateDeltasForRound(uint64(round)).Do(context.Background()) + return nil +} diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 3e2b33ee..610e5c7e 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_IMAGE=golang:1.17.9 +ARG GO_IMAGE=golang:1.20.5 FROM $GO_IMAGE # Copy SDK code into the container diff --git a/test/helpers.go b/test/helpers.go index 3618b503..6b465445 100644 --- a/test/helpers.go +++ b/test/helpers.go @@ -42,7 +42,7 @@ func loadMockJsons(commaDelimitedFilenames, pathToJsons string) ([][]byte, error var mockServer *httptest.Server var responseRing *ring.Ring -func mockHttpResponsesInLoadedFromHelper(jsonfiles, directory string, status int) error { +func mockHTTPResponsesInLoadedFromHelper(jsonfiles, directory string, status int) error { jsons, err := loadMockJsons(jsonfiles, directory) if err != nil { return err @@ -75,7 +75,7 @@ func mockServerRecordingRequestPaths() error { } func expectTheRequestToBe(expectedMethod, expectedPath string) error { - if strings.ToLower(expectedMethod) != strings.ToLower(receivedMethod) { + if !strings.EqualFold(expectedMethod, receivedMethod) { return fmt.Errorf("method used to access mock server was %s but expected %s", receivedMethod, expectedMethod) } return expectThePathUsedToBe(expectedPath) diff --git a/test/responses_unit_test.go b/test/responses_unit_test.go index 7b9a6d28..0d838f5b 100644 --- a/test/responses_unit_test.go +++ b/test/responses_unit_test.go @@ -29,7 +29,7 @@ func mockHttpResponsesInLoadedFromWithStatus(jsonfile, loadedFrom string, status baselinePath = path.Join(directory, jsonfile) var err error expectedStatus = status - err = mockHttpResponsesInLoadedFromHelper(jsonfile, directory, status) + err = mockHTTPResponsesInLoadedFromHelper(jsonfile, directory, status) if err != nil { return err } @@ -184,6 +184,15 @@ func weMakeAnyCallTo(client /* algod/indexer */, endpoint string) (err error) { case "GetBlockTimeStampOffset": response, err = algodC.GetBlockTimeStampOffset().Do(context.Background()) + case "GetLedgerStateDelta": + response, err = + algodC.GetLedgerStateDelta(123).Do(context.Background()) + case "GetTransactionGroupLedgerStateDeltaForRound": + response, err = + algodC.GetTransactionGroupLedgerStateDeltasForRound(123).Do(context.Background()) + case "GetLedgerStateDeltaForTransactionGroup": + response, err = + algodC.GetLedgerStateDeltaForTransactionGroup("someID").Do(context.Background()) case "any": // This is an error case // pickup the error as the response diff --git a/test/steps_test.go b/test/steps_test.go index 072943b1..2badecf2 100644 --- a/test/steps_test.go +++ b/test/steps_test.go @@ -2398,7 +2398,7 @@ func aDryrunResponseFileAndATransactionAtIndex(arg1, arg2 string) error { if err != nil { return err } - dr, err := transaction.NewDryrunResponseFromJson(data) + dr, err := transaction.NewDryrunResponseFromJSON(data) if err != nil { return err } diff --git a/test/unit.tags b/test/unit.tags index 24c201b2..6b27ad93 100644 --- a/test/unit.tags +++ b/test/unit.tags @@ -28,11 +28,13 @@ @unit.responses.timestamp @unit.responses.unlimited_assets @unit.sourcemap +@unit.statedelta @unit.stateproof.paths @unit.stateproof.responses @unit.sync @unit.tealsign -@unit.timestamp +@unit.responses.timestamp @unit.transactions @unit.transactions.keyreg @unit.transactions.payment +@unit.txngroupdeltas diff --git a/test/utilities.go b/test/utilities.go index 26fda1b9..8d9ffd89 100644 --- a/test/utilities.go +++ b/test/utilities.go @@ -18,6 +18,7 @@ import ( "github.com/algorand/go-algorand-sdk/v2/types" ) +// VerifyResponse compares the actual response to the expected response. func VerifyResponse(expectedFile string, actual string) error { jsonfile, err := os.Open(expectedFile) if err != nil { @@ -47,7 +48,16 @@ func VerifyResponse(expectedFile string, actual string) error { expectedString = string(sdk_json.EncodeStrict(generic)) } - err = EqualJson2(expectedString, actual) + if strings.HasSuffix(expectedFile, ".msgp") { + generic := make(map[string]interface{}) + err = msgpack.Decode(fileBytes, generic) + if err != nil { + return fmt.Errorf("failed to decode '%s' from message pack: %v", expectedFile, err) + } + expectedString = string(sdk_json.EncodeStrict(generic)) + } + + err = EqualJSON2(expectedString, actual) if err != nil { fmt.Printf("EXPECTED:\n%v\n", expectedString) fmt.Printf("ACTUAL:\n%v\n", actual) @@ -55,11 +65,11 @@ func VerifyResponse(expectedFile string, actual string) error { return err } -// EqualJson2 compares two json strings. +// EqualJSON2 compares two json strings. // returns true if considered equal, false otherwise. // The error returns the difference. // For reference: j1 is the baseline, j2 is the test -func EqualJson2(j1, j2 string) (err error) { +func EqualJSON2(j1, j2 string) (err error) { var expected map[string]interface{} err = json.Unmarshal([]byte(j1), &expected) if err != nil { @@ -81,37 +91,38 @@ func EqualJson2(j1, j2 string) (err error) { return err } +// ValueType is the type of the value as an enum. type ValueType int const ( - OBJECT ValueType = iota - ARRAY - VALUE - NUMBER - BOOL - STRING - MISSING + objectType ValueType = iota + arrayType + valueType + numberType + boolType + stringType + missingType ) func getType(val interface{}) ValueType { if val == nil { - return MISSING + return missingType } switch val.(type) { case map[string]interface{}: - return OBJECT + return objectType case []interface{}: - return ARRAY + return arrayType case string: - return STRING + return stringType case bool: - return BOOL + return boolType case float64: - return NUMBER + return numberType case nil: - return MISSING + return missingType default: - return VALUE + return valueType } } @@ -194,10 +205,10 @@ func sortArray(arr []interface{}, field string) { } func getFirstField(ob interface{}) string { - if ob == nil || getType(ob) != OBJECT { + if ob == nil || getType(ob) != objectType { return "" } - for k, _ := range ob.(map[string]interface{}) { + for k := range ob.(map[string]interface{}) { return k } return "" @@ -208,17 +219,17 @@ func recursiveCompare(field string, expected, actual interface{}) error { actualType := getType(actual) // If both were nil, just return - if expectedType == MISSING && actualType == MISSING { + if expectedType == missingType && actualType == missingType { return nil } var keyType ValueType - if expectedType == MISSING || actualType == MISSING { - if expectedType == MISSING { + if expectedType == missingType || actualType == missingType { + if expectedType == missingType { keyType = actualType } - if actualType == MISSING { + if actualType == missingType { keyType = expectedType } } else { @@ -230,17 +241,17 @@ func recursiveCompare(field string, expected, actual interface{}) error { } switch keyType { - case ARRAY: + case arrayType: var expectedArr []interface{} var actualArr []interface{} expectedSize := 0 - if expectedType != MISSING { + if expectedType != missingType { expectedArr = expected.([]interface{}) expectedSize = len(expectedArr) } actualSize := 0 - if actualType != MISSING { + if actualType != missingType { actualArr = actual.([]interface{}) actualSize = len(actualArr) } @@ -269,7 +280,7 @@ func recursiveCompare(field string, expected, actual interface{}) error { } return err - case OBJECT: + case objectType: //log.Printf("%s{...} - object\n", field) // Recursively compare each key value @@ -278,7 +289,7 @@ func recursiveCompare(field string, expected, actual interface{}) error { // Go happily creates complex zero value objects, so go ahead and recursively compare nil against defaults // If they are both missing what are we even doing here. Return with no error. - if expectedType == MISSING && actualType == MISSING { + if expectedType == missingType && actualType == missingType { return nil } @@ -286,34 +297,33 @@ func recursiveCompare(field string, expected, actual interface{}) error { var actualObject map[string]interface{} keys := make(map[string]bool) - if expectedType != MISSING { + if expectedType != missingType { expectedObject = expected.(map[string]interface{}) - for k, _ := range expectedObject { + for k := range expectedObject { keys[k] = true } } - if actualType != MISSING { + if actualType != missingType { actualObject = actual.(map[string]interface{}) - for k, _ := range actualObject { + for k := range actualObject { keys[k] = true } } - for k, _ := range keys { - var err error - err = recursiveCompare(fmt.Sprintf("%s.%s", field, k), expectedObject[k], actualObject[k]) + for k := range keys { + err := recursiveCompare(fmt.Sprintf("%s.%s", field, k), expectedObject[k], actualObject[k]) if err != nil { return err } } - case NUMBER: + case numberType: // Compare numbers, if missing treat as zero expectedNum := float64(0) - if expectedType != MISSING { + if expectedType != missingType { expectedNum = expected.(float64) } actualNum := float64(0) - if actualType != MISSING { + if actualType != missingType { actualNum = actual.(float64) } //log.Printf("%s - number %f == %f\n", field, expectedNum, actualNum) @@ -321,14 +331,14 @@ func recursiveCompare(field string, expected, actual interface{}) error { return fmt.Errorf("failed to match field %s, %f != %f", field, expectedNum, actualNum) } - case BOOL: + case boolType: // Compare bools, if missing treat as false expectedBool := false - if expectedType != MISSING { + if expectedType != missingType { expectedBool = expected.(bool) } actualBool := false - if actualType != MISSING { + if actualType != missingType { actualBool = actual.(bool) } //log.Printf("%s - bool %t == %t\n", field, expectedBool, actualBool) @@ -336,15 +346,15 @@ func recursiveCompare(field string, expected, actual interface{}) error { return fmt.Errorf("failed to match field %s, %t != %t", field, expectedBool, actualBool) } - case STRING: + case stringType: // Compare strings, if missing treat as an empty string. // Note: I think binary ends up in here, it may need some special handling. expectedStr := "" - if expectedType != MISSING { + if expectedType != missingType { expectedStr = expected.(string) } actualStr := "" - if actualType != MISSING { + if actualType != missingType { actualStr = actual.(string) } diff --git a/transaction/atomicTransactionComposer.go b/transaction/atomicTransactionComposer.go index beab0a53..8565d33b 100644 --- a/transaction/atomicTransactionComposer.go +++ b/transaction/atomicTransactionComposer.go @@ -26,7 +26,7 @@ const methodArgsTupleThreshold = maxAppArgs - 2 // TransactionWithSigner represents an unsigned transactions and a signer that can authorize that // transaction. -type TransactionWithSigner struct { +type TransactionWithSigner struct { //nolint:revive // Ignore stuttering for backwards compatibility // An unsigned transaction Txn types.Transaction // A transaction signer that can authorize the transaction @@ -118,19 +118,19 @@ type ExecuteResult struct { type AtomicTransactionComposerStatus = int const ( - // The atomic group is still under construction. + // BUILDING means the atomic group is still under construction. BUILDING AtomicTransactionComposerStatus = iota - // The atomic group has been finalized, but not yet signed. + // BUILT means the atomic group has been finalized, but not yet signed. BUILT - // The atomic group has been finalized and signed, but not yet submitted to the network. + // SIGNED means the atomic group has been finalized and signed, but not yet submitted to the network. SIGNED - // The atomic group has been finalized, signed, and submitted to the network. + // SUBMITTED means the atomic group has been finalized, signed, and submitted to the network. SUBMITTED - // The atomic group has been finalized, signed, submitted, and successfully committed to a block. + // COMMITTED means the atomic group has been finalized, signed, submitted, and successfully committed to a block. COMMITTED ) @@ -162,7 +162,7 @@ func (txContext *transactionContext) isMethodCallTx() bool { return txContext.method != nil } -// The maximum size of an atomic transaction group. +// MaxAtomicGroupSize is the maximum size of an atomic transaction group. const MaxAtomicGroupSize = 16 // AtomicTransactionComposer is a helper class used to construct and execute atomic transaction groups @@ -549,7 +549,7 @@ func (atc *AtomicTransactionComposer) getTxIDs() []string { // Note: a group can only be submitted again if it fails. // // Returns a list of TxIDs of the submitted transactions. -func (atc *AtomicTransactionComposer) Submit(client *algod.Client, ctx context.Context) ([]string, error) { +func (atc *AtomicTransactionComposer) Submit(client *algod.Client, ctx context.Context) ([]string, error) { //nolint:revive // Ignore Context order for backwards compatibility if atc.status > SUBMITTED { return nil, errors.New("status must be SUBMITTED or lower in order to call Submit()") } @@ -584,7 +584,7 @@ func (atc *AtomicTransactionComposer) Submit(client *algod.Client, ctx context.C // // Returns the confirmed round for this transaction, the txIDs of the submitted transactions, and an // ABIResult for each method call in this group. -func (atc *AtomicTransactionComposer) Execute(client *algod.Client, ctx context.Context, waitRounds uint64) (ExecuteResult, error) { +func (atc *AtomicTransactionComposer) Execute(client *algod.Client, ctx context.Context, waitRounds uint64) (ExecuteResult, error) { //nolint:revive // Ignore Context order for backwards compatibility if atc.status == COMMITTED { return ExecuteResult{}, errors.New("status is already committed") } @@ -605,7 +605,7 @@ func (atc *AtomicTransactionComposer) Execute(client *algod.Client, ctx context. if numMethodCalls == 0 { indexToWaitFor = i } - numMethodCalls += 1 + numMethodCalls++ } } diff --git a/transaction/dryrun.go b/transaction/dryrun.go index 606d6715..d0a92308 100644 --- a/transaction/dryrun.go +++ b/transaction/dryrun.go @@ -16,7 +16,7 @@ import ( ) const ( - defaultAppId uint64 = 1380011588 + defaultAppID uint64 = 1380011588 rejectMsg = "REJECT" defaultMaxWidth = 30 @@ -24,7 +24,7 @@ const ( // CreateDryrun creates a DryrunRequest object from a client and slice of SignedTxn objects and a default configuration // Passed in as a pointer to a DryrunRequest object to use for extra parameters -func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.DryrunRequest, ctx context.Context) (drr models.DryrunRequest, err error) { +func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.DryrunRequest, ctx context.Context) (drr models.DryrunRequest, err error) { //nolint:revive // Ignore Context order for backwards compatibility var ( apps []types.AppIndex assets []types.AssetIndex @@ -58,7 +58,7 @@ func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.Dryru if t.Txn.ApplicationID == 0 { drr.Apps = append(drr.Apps, models.Application{ - Id: defaultAppId, + Id: defaultAppID, Params: models.ApplicationParams{ Creator: t.Txn.Sender.String(), ApprovalProgram: t.Txn.ApprovalProgram, @@ -80,14 +80,14 @@ func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.Dryru } seenAssets := map[types.AssetIndex]bool{} - for _, assetId := range assets { - if _, ok := seenAssets[assetId]; ok { + for _, assetID := range assets { + if _, ok := seenAssets[assetID]; ok { continue } - assetInfo, err := client.GetAssetByID(uint64(assetId)).Do(ctx) + assetInfo, err := client.GetAssetByID(uint64(assetID)).Do(ctx) if err != nil { - return drr, fmt.Errorf("failed to get asset %d: %+v", assetId, err) + return drr, fmt.Errorf("failed to get asset %d: %+v", assetID, err) } addr, err := types.DecodeAddress(assetInfo.Params.Creator) @@ -96,18 +96,18 @@ func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.Dryru } accts = append(accts, addr) - seenAssets[assetId] = true + seenAssets[assetID] = true } seenApps := map[types.AppIndex]bool{} - for _, appId := range apps { - if _, ok := seenApps[appId]; ok { + for _, appID := range apps { + if _, ok := seenApps[appID]; ok { continue } - appInfo, err := client.GetApplicationByID(uint64(appId)).Do(ctx) + appInfo, err := client.GetApplicationByID(uint64(appID)).Do(ctx) if err != nil { - return drr, fmt.Errorf("failed to get application %d: %+v", appId, err) + return drr, fmt.Errorf("failed to get application %d: %+v", appID, err) } drr.Apps = append(drr.Apps, appInfo) @@ -117,7 +117,7 @@ func CreateDryrun(client *algod.Client, txns []types.SignedTxn, dr *models.Dryru } accts = append(accts, creator) - seenApps[appId] = true + seenApps[appID] = true } seenAccts := map[types.Address]bool{} @@ -147,27 +147,31 @@ func DefaultStackPrinterConfig() StackPrinterConfig { return StackPrinterConfig{MaxValueWidth: defaultMaxWidth, TopOfStackFirst: true} } +// DryrunResponse represents the response from a dryrun call type DryrunResponse struct { Error string `json:"error"` ProtocolVersion string `json:"protocol-version"` Txns []DryrunTxnResult `json:"txns"` } +// NewDryrunResponse creates a new DryrunResponse from a models.DryrunResponse func NewDryrunResponse(d models.DryrunResponse) (DryrunResponse, error) { // Marshal and unmarshal to fix integer types. b, err := json.Marshal(d) if err != nil { return DryrunResponse{}, err } - return NewDryrunResponseFromJson(b) + return NewDryrunResponseFromJSON(b) } -func NewDryrunResponseFromJson(js []byte) (DryrunResponse, error) { +// NewDryrunResponseFromJSON creates a new DryrunResponse from a JSON byte array +func NewDryrunResponseFromJSON(js []byte) (DryrunResponse, error) { dr := DryrunResponse{} err := json.Unmarshal(js, &dr) return dr, err } +// DryrunTxnResult is a wrapper around models.DryrunTxnResult type DryrunTxnResult struct { models.DryrunTxnResult } diff --git a/transaction/transaction.go b/transaction/transaction.go index fe437ce0..b79aa548 100644 --- a/transaction/transaction.go +++ b/transaction/transaction.go @@ -1294,7 +1294,7 @@ func AssignGroupID(txns []types.Transaction, account string) (result []types.Tra } } for _, tx := range txns { - if account == "" || bytes.Compare(tx.Sender[:], decoded[:]) == 0 { + if account == "" || bytes.Equal(tx.Sender[:], decoded[:]) { tx.Group = gid result = append(result, tx) } diff --git a/transaction/transactionSigner.go b/transaction/transactionSigner.go index 62843a46..d23e79b7 100644 --- a/transaction/transactionSigner.go +++ b/transaction/transactionSigner.go @@ -7,26 +7,23 @@ import ( "github.com/algorand/go-algorand-sdk/v2/types" ) -/** - * This type represents a function which can sign transactions from an atomic transaction group. - * @param txnGroup - The atomic group containing transactions to be signed - * @param indexesToSign - An array of indexes in the atomic transaction group that should be signed - * @returns An array of encoded signed transactions. The length of the - * array will be the same as the length of indexesToSign, and each index i in the array - * corresponds to the signed transaction from txnGroup[indexesToSign[i]] - */ -type TransactionSigner interface { +// TransactionSigner represents a function which can sign transactions from an atomic transaction group. +// @param txnGroup - The atomic group containing transactions to be signed +// @param indexesToSign - An array of indexes in the atomic transaction group that should be signed +// @returns An array of encoded signed transactions. The length of the +// array will be the same as the length of indexesToSign, and each index i in the array +// corresponds to the signed transaction from txnGroup[indexesToSign[i]] +type TransactionSigner interface { //nolint:revive // Ignore stuttering for backwards compatibility SignTransactions(txGroup []types.Transaction, indexesToSign []int) ([][]byte, error) Equals(other TransactionSigner) bool } -/** - * TransactionSigner that can sign transactions for the provided basic Account. - */ +// BasicAccountTransactionSigner that can sign transactions for the provided basic Account. type BasicAccountTransactionSigner struct { Account crypto.Account } +// SignTransactions signs the provided transactions with the private key of the account. func (txSigner BasicAccountTransactionSigner) SignTransactions(txGroup []types.Transaction, indexesToSign []int) ([][]byte, error) { stxs := make([][]byte, len(indexesToSign)) for i, pos := range indexesToSign { @@ -41,30 +38,31 @@ func (txSigner BasicAccountTransactionSigner) SignTransactions(txGroup []types.T return stxs, nil } +// Equals returns true if the other TransactionSigner equals this one. func (txSigner BasicAccountTransactionSigner) Equals(other TransactionSigner) bool { if castedSigner, ok := other.(BasicAccountTransactionSigner); ok { - otherJson, err := json.Marshal(castedSigner) + otherJSON, err := json.Marshal(castedSigner) if err != nil { return false } - selfJson, err := json.Marshal(txSigner) + selfJSON, err := json.Marshal(txSigner) if err != nil { return false } - return string(otherJson) == string(selfJson) + return string(otherJSON) == string(selfJSON) } return false } -/** - * TransactionSigner that can sign transactions for the provided LogicSigAccount. - */ +// LogicSigAccountTransactionSigner is a TransactionSigner that can +// sign transactions for the provided LogicSigAccount. type LogicSigAccountTransactionSigner struct { LogicSigAccount crypto.LogicSigAccount } +// SignTransactions signs the provided transactions with the private key of the account. func (txSigner LogicSigAccountTransactionSigner) SignTransactions(txGroup []types.Transaction, indexesToSign []int) ([][]byte, error) { stxs := make([][]byte, len(indexesToSign)) for i, pos := range indexesToSign { @@ -79,31 +77,32 @@ func (txSigner LogicSigAccountTransactionSigner) SignTransactions(txGroup []type return stxs, nil } +// Equals returns true if the other TransactionSigner equals this one. func (txSigner LogicSigAccountTransactionSigner) Equals(other TransactionSigner) bool { if castedSigner, ok := other.(LogicSigAccountTransactionSigner); ok { - otherJson, err := json.Marshal(castedSigner) + otherJSON, err := json.Marshal(castedSigner) if err != nil { return false } - selfJson, err := json.Marshal(txSigner) + selfJSON, err := json.Marshal(txSigner) if err != nil { return false } - return string(otherJson) == string(selfJson) + return string(otherJSON) == string(selfJSON) } return false } -/** - * TransactionSigner that can sign transactions for the provided MultiSig Account - */ +// MultiSigAccountTransactionSigner is a TransactionSigner that can +// sign transactions for the provided MultiSig Account type MultiSigAccountTransactionSigner struct { Msig crypto.MultisigAccount Sks [][]byte } +// SignTransactions signs the provided transactions with the private keys of the account. func (txSigner MultiSigAccountTransactionSigner) SignTransactions(txGroup []types.Transaction, indexesToSign []int) ([][]byte, error) { stxs := make([][]byte, len(indexesToSign)) for i, pos := range indexesToSign { @@ -132,19 +131,20 @@ func (txSigner MultiSigAccountTransactionSigner) SignTransactions(txGroup []type return stxs, nil } +// Equals returns true if the other TransactionSigner equals this one. func (txSigner MultiSigAccountTransactionSigner) Equals(other TransactionSigner) bool { if castedSigner, ok := other.(MultiSigAccountTransactionSigner); ok { - otherJson, err := json.Marshal(castedSigner) + otherJSON, err := json.Marshal(castedSigner) if err != nil { return false } - selfJson, err := json.Marshal(txSigner) + selfJSON, err := json.Marshal(txSigner) if err != nil { return false } - return string(otherJson) == string(selfJson) + return string(otherJSON) == string(selfJSON) } return false } diff --git a/transaction/transaction_test.go b/transaction/transaction_test.go index 298d1a55..d7e1d5e9 100644 --- a/transaction/transaction_test.go +++ b/transaction/transaction_test.go @@ -976,11 +976,11 @@ func TestFee(t *testing.T) { func TestParseBoxReferences(t *testing.T) { genWithAppId := func(appId uint64) types.AppBoxReference { - return types.AppBoxReference{appId, []byte("example")} + return types.AppBoxReference{AppID: appId, Name: []byte("example")} } genWithNewAppId := func() types.AppBoxReference { - return types.AppBoxReference{0, []byte("example")} + return types.AppBoxReference{AppID: 0, Name: []byte("example")} } t.Run("appIndexExists", func(t *testing.T) { diff --git a/transaction/waitForConfirmation.go b/transaction/waitForConfirmation.go index 0150c769..9725a6cc 100644 --- a/transaction/waitForConfirmation.go +++ b/transaction/waitForConfirmation.go @@ -9,10 +9,10 @@ import ( "github.com/algorand/go-algorand-sdk/v2/client/v2/common/models" ) -// `WaitForConfirmation` waits for a pending transaction to be accepted by the network -// `txid`: The ID of the pending transaction to wait for -// `waitRounds`: The number of rounds to block before exiting with an error. -func WaitForConfirmation(c *algod.Client, txid string, waitRounds uint64, ctx context.Context, headers ...*common.Header) (txInfo models.PendingTransactionInfoResponse, err error) { +// WaitForConfirmation waits for a pending transaction to be accepted by the network +// txid: The ID of the pending transaction to wait for +// waitRounds: The number of rounds to block before exiting with an error. +func WaitForConfirmation(c *algod.Client, txid string, waitRounds uint64, ctx context.Context, headers ...*common.Header) (txInfo models.PendingTransactionInfoResponse, err error) { //nolint:revive // Ignore Context order for backwards compatibility response, err := c.Status().Do(ctx, headers...) if err != nil { return @@ -53,6 +53,6 @@ func WaitForConfirmation(c *algod.Client, txid string, waitRounds uint64, ctx co } // Increment the `currentRound` - currentRound += 1 + currentRound++ } } diff --git a/types/address.go b/types/address.go index 1ac5a88c..f2d33019 100644 --- a/types/address.go +++ b/types/address.go @@ -36,17 +36,17 @@ func (a Address) IsZero() bool { } // MarshalText returns the address string as an array of bytes -func (addr *Address) MarshalText() ([]byte, error) { - result := base64.StdEncoding.EncodeToString(addr[:]) +func (a *Address) MarshalText() ([]byte, error) { + result := base64.StdEncoding.EncodeToString(a[:]) return []byte(result), nil } // UnmarshalText initializes the Address from an array of bytes. // The bytes may be in the base32 checksum format, or the raw bytes base64 encoded. -func (addr *Address) UnmarshalText(text []byte) error { +func (a *Address) UnmarshalText(text []byte) error { address, err := DecodeAddress(string(text)) if err == nil { - *addr = address + *a = address return nil } // ignore the DecodeAddress error because it isn't the native MarshalText format. @@ -54,10 +54,10 @@ func (addr *Address) UnmarshalText(text []byte) error { // Check if its b64 encoded data, err := base64.StdEncoding.DecodeString(string(text)) if err == nil { - if len(data) != len(addr[:]) { + if len(data) != len(a[:]) { return errWrongAddressLen } - copy(addr[:], data[:]) + copy(a[:], data[:]) return nil } return err diff --git a/types/applications.go b/types/applications.go index aaa90f5b..984dc326 100644 --- a/types/applications.go +++ b/types/applications.go @@ -2,6 +2,7 @@ package types // This file has the applications specific structures +// ApplicationFields are the fields that are common to all application type ApplicationFields struct { ApplicationCallTxnFields } @@ -11,6 +12,7 @@ type ApplicationFields struct { // AppParams type AppIndex uint64 +// AppBoxReference names a box by the app ID type AppBoxReference struct { // The ID of the app that owns the box. Must be converted to BoxReference during transaction submission. AppID uint64 @@ -19,6 +21,7 @@ type AppBoxReference struct { Name []byte } +// BoxReference names a box by the index in the foreign app array type BoxReference struct { _struct struct{} `codec:",omitempty,omitemptyarray"` diff --git a/types/basics.go b/types/basics.go index 33c40eb1..2ac21430 100644 --- a/types/basics.go +++ b/types/basics.go @@ -7,9 +7,9 @@ import ( "fmt" "math" - "github.com/algorand/go-algorand-sdk/v2/encoding/msgpack" - "golang.org/x/crypto/ed25519" + + "github.com/algorand/go-algorand-sdk/v2/encoding/msgpack" ) // TxType identifies the type of the transaction @@ -79,6 +79,7 @@ func ToMicroAlgos(algos float64) MicroAlgos { return MicroAlgos(math.Round(algos * microAlgoConversionFactor)) } +// FromBase64String converts a base64 string to a SignedTxn func (signedTxn *SignedTxn) FromBase64String(b64string string) error { txnBytes, err := base64.StdEncoding.DecodeString(b64string) if err != nil { @@ -91,6 +92,7 @@ func (signedTxn *SignedTxn) FromBase64String(b64string string) error { return nil } +// FromBase64String converts a base64 string to a Block func (block *Block) FromBase64String(b64string string) error { txnBytes, err := base64.StdEncoding.DecodeString(b64string) if err != nil { diff --git a/types/block.go b/types/block.go index d68baa5f..e15cfc9f 100644 --- a/types/block.go +++ b/types/block.go @@ -250,6 +250,9 @@ type ( } ) +// EvalDelta stores StateDeltas for an application's global key/value store, as +// well as StateDeltas for some number of accounts holding local state for that +// application type EvalDelta struct { _struct struct{} `codec:",omitempty,omitemptyarray"` @@ -257,7 +260,14 @@ type EvalDelta struct { // When decoding EvalDeltas, the integer key represents an offset into // [txn.Sender, txn.Accounts[0], txn.Accounts[1], ...] - LocalDeltas map[uint64]StateDelta `codec:"ld,allocbound=config.MaxEvalDeltaAccounts"` + //msgp:allocbound LocalDeltas config.MaxEvalDeltaAccounts + LocalDeltas map[uint64]StateDelta `codec:"ld"` + + // If a program modifies the local of an account that is not the Sender, or + // in txn.Accounts, it must be recorded here, so that the key in LocalDeltas + // can refer to it. + //msgp:allocbound SharedAccts config.MaxEvalDeltaAccounts + SharedAccts []Address `codec:"sa"` Logs []string `codec:"lg"` diff --git a/types/genesis.go b/types/genesis.go index f7f95cd6..7939b644 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -48,6 +48,11 @@ type Genesis struct { DevMode bool `codec:"devmode"` } +// GenesisAllocation object represents an allocation of algos to +// an address in the genesis block. Address is the checksummed +// short address. Comment is a note about what this address is +// representing, and is purely informational. State is the initial +// account state. type GenesisAllocation struct { _struct struct{} `codec:""` @@ -56,6 +61,9 @@ type GenesisAllocation struct { State Account `codec:"state"` } +// Account contains the data associated with a given address. +// This includes the account balance, cryptographic public keys, +// consensus delegation status. type Account struct { _struct struct{} `codec:",omitempty,omitemptyarray"` diff --git a/types/stateproof.go b/types/stateproof.go index 1aa78240..808ae1f5 100644 --- a/types/stateproof.go +++ b/types/stateproof.go @@ -53,7 +53,7 @@ func (d GenericDigest) IsEmpty() bool { // Sumhash512DigestSize The size in bytes of the sumhash checksum const Sumhash512DigestSize = 64 -//size of each hash +// Sizes of each hash const ( Sha512_256Size = sha512.Size256 SumhashDigestSize = Sumhash512DigestSize @@ -95,6 +95,7 @@ type Proof struct { TreeDepth uint8 `codec:"td"` } +// MerkleSignatureSchemeRootSize is the size of the root of the merkle tree. const MerkleSignatureSchemeRootSize = SumhashDigestSize // Commitment represents the root of the vector commitment tree built upon the MSS keys.