From f9b1fe26fc4c99348a5404f7604b9fa2141d615d Mon Sep 17 00:00:00 2001 From: AurelienFT <32803821+AurelienFT@users.noreply.github.com> Date: Mon, 18 Nov 2024 13:09:44 +0100 Subject: [PATCH] Resolve some falky tests and improve CI times (#2401) ## Linked Issues/PRs Fix https://github.com/FuelLabs/fuel-core/issues/2408 Fix https://github.com/FuelLabs/fuel-core/issues/2407 Fix https://github.com/FuelLabs/fuel-core/issues/2406 Fix https://github.com/FuelLabs/fuel-core/issues/2351 Fix https://github.com/FuelLabs/fuel-core/issues/2393 Fix https://github.com/FuelLabs/fuel-core/issues/2394 Fix https://github.com/FuelLabs/fuel-core/issues/2395 ## Description This PR fix an issue in P2P heartbeat. The problem was that P2P heartbeat was updated only if new blocks were received or produced. This means that if we start the node from an existing db but doesn't produce blocks and not connect it to anyone it will send block height 0 to the peers that connects to him. We believe that this fix, resolves #2408 #2407 #2406 and #2351. For #2394 we just increased the timeouts. For #2393 we removed the panic in the test and just let p2p reconnect For #2395 we launch this test using multi-threads mode of Tokio to follow the convention of all the others tests that launch a node using `FuelCoreDriver`. Also we added a kill of the driver to try to kill the node in a more graceful way in all of the test, it should fix a lot of flakyness in these tests This PR also change the CI workflow by removing all docker related jobs and codecov job. These two set of jobs has been moved to separated workflow that are not triggered automatically but can be triggered manually on the "Actions" tab of this repository (after the merge of this PR). The tests launched by the CI job now use `nextest` that allow us to add timeout for each test and provide more detailed output. The timeout is currently 5 min (and 8 for two really big tests) because we have tests that take a long time but we should lower it in the future. The steps on the matrix are not cancelled anymore when one failed to allow possible other success and cache their success for a relaunch of the tests. There is still more improve to do on our tests especially on timeout and rapid execution but this should improve a lot our workflow. ## Checklist - [x] Breaking changes are clearly marked as such in the PR description and changelog - [x] New behavior is reflected in tests - [x] [The specification](https://github.com/FuelLabs/fuel-specs/) matches the implemented behavior (link update PR if changes are needed) ### Before requesting review - [x] I have reviewed the code myself - [x] I have created follow-up issues caused by this PR and linked them here --------- Co-authored-by: green --- .config/nextest.toml | 2 + .github/workflows/ci.yml | 481 +----------------- .github/workflows/docker-images.yml | 419 +++++++++++++++ .github/workflows/publish-codecov.yml | 61 +++ CONTRIBUTING.md | 4 + Cargo.lock | 34 -- Makefile.toml | 2 +- benches/src/db_lookup_times_utils/mod.rs | 12 +- bin/e2e-test-client/src/lib.rs | 4 +- ...onfig__tests__default_config_snapshot.snap | 2 +- bin/e2e-test-client/src/tests/script.rs | 13 +- .../test_data/large_state/state_config.json | 3 +- .../tests/integration_tests.rs | 11 +- ci_checks.sh | 13 +- crates/fuel-core/src/executor.rs | 75 +-- crates/fuel-core/src/service/sub_services.rs | 1 + crates/fuel-core/src/state/rocks_db.rs | 52 +- .../src/v0/tests/algorithm_v0_tests.rs | 1 - .../src/v1/tests/algorithm_v1_tests.rs | 1 - .../poa/src/service_test/trigger_tests.rs | 1 - crates/services/executor/src/executor.rs | 13 +- .../src/v1/da_source_service/service.rs | 3 +- crates/services/p2p/src/discovery.rs | 3 +- crates/services/p2p/src/service.rs | 10 + .../sync/src/import/back_pressure_tests.rs | 10 +- crates/services/sync/src/state.rs | 6 + .../services/upgradable-executor/Cargo.toml | 1 - .../upgradable-executor/src/executor.rs | 16 +- tests/Cargo.toml | 12 +- tests/tests/aws_kms.rs | 80 +++ tests/tests/blocks.rs | 8 +- tests/tests/dos.rs | 1 - tests/tests/gas_price.rs | 6 +- tests/tests/lib.rs | 41 +- tests/tests/local_node.rs | 1 + tests/tests/poa.rs | 80 +-- tests/tests/recovery.rs | 24 +- tests/tests/regenesis.rs | 11 +- tests/tests/state_rewind.rs | 4 +- 39 files changed, 825 insertions(+), 697 deletions(-) create mode 100644 .config/nextest.toml create mode 100644 .github/workflows/docker-images.yml create mode 100644 .github/workflows/publish-codecov.yml create mode 100644 tests/tests/aws_kms.rs diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 00000000000..47e5bf53d0f --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,2 @@ +[profile.default] +slow-timeout = { period = "60s", terminate-after = 5 } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 943f36c810d..28a8634d14f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -118,20 +118,20 @@ jobs: args: --all-features --workspace --no-deps - command: make args: check --locked - - command: test - args: --workspace - - command: test - args: --all-features --workspace - - command: test - args: -p fuel-core --no-default-features - - command: test - args: -p fuel-core --lib executor --features wasm-executor + - command: nextest + args: run --workspace + - command: nextest + args: run --all-features --workspace + - command: nextest + args: run -p fuel-core --no-default-features + - command: nextest + args: run -p fuel-core --lib executor --features wasm-executor env: FUEL_ALWAYS_USE_WASM=true - - command: test - args: -p fuel-core-client --no-default-features - - command: test - args: -p fuel-core-chain-config --no-default-features + - command: nextest + args: run -p fuel-core-client --no-default-features + - command: nextest + args: run -p fuel-core-chain-config --no-default-features # Don't split this command; this is a workaround. # We need to run `cargo check` first to fetch the locked dependencies # for `fuel-core 0.26.0`(because of the bug with `--offline` @@ -152,6 +152,7 @@ jobs: args: -p fuel-core-chain-config --target wasm32-unknown-unknown --no-default-features - command: check args: -p fuel-core-executor --target wasm32-unknown-unknown --no-default-features --features alloc + fail-fast: false # disallow any job that takes longer than 45 minutes timeout-minutes: 45 @@ -177,6 +178,12 @@ jobs: ~/.cargo/git/db/ target/ key: ${{ matrix.command }}-${{ matrix.args }}-${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - uses: actions-rs/cargo@v1 + if: ${{ matrix.command == 'nextest' }} + with: + command: install + args: cargo-nextest --locked + continue-on-error: true - name: ${{ matrix.command }} ${{ matrix.args }} run: ${{ matrix.env }} cargo ${{ matrix.command }} ${{ matrix.args }} - uses: FuelLabs/.github/.github/actions/slack-notify-template@master @@ -237,48 +244,6 @@ jobs: - name: Run integration tests for kms only run: cargo test -p fuel-core-tests --features aws-kms -- kms - publish-codecov: - name: Publish code coverage report on GitHub pages branch - runs-on: buildjet-4vcpu-ubuntu-2204 - needs: - - cargo-verifications - permissions: # Write access to push changes to pages - contents: write - steps: - - uses: actions/checkout@v4 - - name: Install latest Rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ env.RUST_VERSION_COV }} - targets: wasm32-unknown-unknown - - - name: Install cargo-llvm-codecov - uses: taiki-e/install-action@cargo-llvm-cov - - - name: Code coverage report - run: cargo +${{ env.RUST_VERSION_COV }} llvm-cov --all-features --html --branch - - - name: Checkout the repo again for pushing pages revision - uses: actions/checkout@v4 - with: - ref: 'codecov-pages' - path: 'pages-branch' - - - name: Push codecov report to pages branch - working-directory: ./pages-branch - run: | - export BRANCH_B64=$(echo -n "${{ env.GIT_BRANCH }}" | basenc --base64url) - git config user.email "2204863+Dentosal@users.noreply.github.com" - git config user.name "Dentosal" - cp -r ../target/llvm-cov/html "$BRANCH_B64" - python3 ../.github/workflows/scripts/generate_pages_index.py > index.html - git add . - git commit -m "Update codecov for ${{ env.GIT_BRANCH }}" - git push - export PAGES_URL="https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/$BRANCH_B64/index.html" - echo "$PAGES_URL" - echo "Codecov report $PAGES_URL" >> $GITHUB_STEP_SUMMARY - verifications-complete: needs: - cargo-verifications @@ -334,385 +299,6 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} - build-docker-images: - needs: - - publish-crates-check - strategy: - matrix: - arch: [ - # build on native runners instead of using emulation - { platform: linux/amd64, runner: buildjet-8vcpu-ubuntu-2204 }, - { platform: linux/arm64, runner: buildjet-16vcpu-ubuntu-2204-arm } - ] - runs-on: ${{ matrix.arch.runner }} - permissions: - contents: read - packages: write - steps: - - name: Setup environment - run: | - echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} - platform=${{ matrix.arch.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Log in to the ghcr.io registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to the docker.io registry - uses: docker/login-action@v3 - with: - username: fuellabs - password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_URL }} - - - name: Setup Rust build cache - id: cache - uses: buildjet/cache@v3 - with: - path: | - home-cargo-bin - home-cargo-registry-index - home-cargo-registry-cache - home-cargo-git-db - target - key: ${{ env.PLATFORM_PAIR }}-${{ hashFiles('**/Cargo.lock') }} - - - name: Inject cache into docker - uses: reproducible-containers/buildkit-cache-dance@v3.1.2 - with: - cache-map: | - { - "home-cargo-bin": "/usr/local/cargo/bin", - "home-cargo-registry-index": "/usr/local/cargo/registry/index", - "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", - "home-cargo-git-db": "/usr/local/cargo/git/db", - "target": "/build/target" - } - skip-extraction: ${{ steps.cache.outputs.cache-hit }} - - - name: Build Docker image - id: build - uses: docker/build-push-action@v6 - with: - context: . - platforms: ${{ matrix.arch.platform }} - file: deployment/Dockerfile - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache:latest-${{ matrix.arch.runner }} - cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache:latest-${{ matrix.arch.runner }},mode=max,image-manifest=true,oci-mediatypes=true - outputs: | - type=image,name=${{ env.REGISTRY_URL }},push-by-digest=true,name-canonical=true,push=true - - - name: Export digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: /tmp/digests/* - if-no-files-found: error - retention-days: 1 - - publish-docker-image: - needs: - - build-docker-images - - verifications-complete - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - id-token: write - steps: - - name: Setup environment - run: | - echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} - - - name: Download digests - uses: actions/download-artifact@v4 - with: - path: /tmp/digests - pattern: digests-* - merge-multiple: true - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Configure AWS credentials for ECR publishing - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ env.AWS_ROLE_ARN }} - aws-region: us-east-1 # ecr public is only in us-east-1 - - - name: Login to Amazon ECR Public - id: login-ecr-public - uses: aws-actions/amazon-ecr-login@v2 - with: - registry-type: public - - - name: Log in to the ghcr.io registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to the docker.io registry - uses: docker/login-action@v3 - with: - username: fuellabs - password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} - - - name: Docker metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: | - ${{ env.REGISTRY_URL }} - ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }} - tags: | - type=sha - type=ref,event=branch - type=ref,event=tag - type=semver,pattern={{raw}} - type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} - type=raw,value=latest,enable={{is_default_branch}} - - - name: Create manifest list and push to all registries - working-directory: /tmp/digests - run: | - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.REGISTRY_URL }}@sha256:%s ' *) - - - name: Inspect image - run: | - docker buildx imagetools inspect ${{ env.REGISTRY_URL }}:${{ steps.meta.outputs.version }} - - # duplicate of publish-docker-image, but with profiling features enabled - # this is split into a separate action since it takes longer to build - publish-docker-image-profiling: - needs: - - verifications-complete - runs-on: buildjet-16vcpu-ubuntu-2204 - permissions: - contents: read - packages: write - id-token: write - steps: - - name: Setup environment - run: | - echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} - - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Configure AWS credentials for ECR publishing - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ env.AWS_ROLE_ARN }} - aws-region: us-east-1 # ecr public is only in us-east-1 - - - name: Login to Amazon ECR Public - id: login-ecr-public - uses: aws-actions/amazon-ecr-login@v2 - with: - registry-type: public - - - name: Log in to the ghcr.io registry - uses: docker/login-action@v1 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to the docker.io registry - uses: docker/login-action@v2 - with: - username: fuellabs - password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: | - ${{ env.REGISTRY_URL }}-debug - ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }}-debug - tags: | - type=sha - type=ref,event=branch - type=ref,event=tag - type=semver,pattern={{raw}} - type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} - type=raw,value=latest,enable={{is_default_branch}} - - - name: Setup Rust build cache - id: cache - uses: buildjet/cache@v3 - with: - path: | - home-cargo-bin - home-cargo-registry-index - home-cargo-registry-cache - home-cargo-git-db - target - key: publish-docker-image-profiling-${{ hashFiles('**/Cargo.lock') }} - - - name: Inject cache into docker - uses: reproducible-containers/buildkit-cache-dance@v3.1.2 - with: - cache-map: | - { - "home-cargo-bin": "/usr/local/cargo/bin", - "home-cargo-registry-index": "/usr/local/cargo/registry/index", - "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", - "home-cargo-git-db": "/usr/local/cargo/git/db", - "target": "/build/target" - } - skip-extraction: ${{ steps.cache.outputs.cache-hit }} - - - name: Build & push Docker image - id: build - uses: docker/build-push-action@v6 - with: - context: . - file: deployment/Dockerfile - build-args: "DEBUG_SYMBOLS=true" - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest - cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest,mode=max,image-manifest=true,oci-mediatypes=true - - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master - if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} - - publish-e2e-client-docker-image: - needs: - - verifications-complete - runs-on: buildjet-4vcpu-ubuntu-2204 - permissions: - contents: read - packages: write - id-token: write - steps: - - name: Setup environment - run: | - echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} - - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Configure AWS credentials for ECR publishing - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ env.AWS_ROLE_ARN }} - aws-region: us-east-1 # ecr public is only in us-east-1 - - - name: Login to Amazon ECR Public - id: login-ecr-public - uses: aws-actions/amazon-ecr-login@v2 - with: - registry-type: public - - - name: Log in to the ghcr.io registry - uses: docker/login-action@v1 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to the docker.io registry - uses: docker/login-action@v2 - with: - username: fuellabs - password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: | - ${{ env.REGISTRY_URL }}-e2e-client - ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }}-e2e-client - tags: | - type=sha - type=ref,event=branch - type=ref,event=tag - type=semver,pattern={{raw}} - type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} - type=raw,value=latest,enable={{is_default_branch}} - - - name: Setup Rust build cache - id: cache - uses: buildjet/cache@v3 - with: - path: | - home-cargo-bin - home-cargo-registry-index - home-cargo-registry-cache - home-cargo-git-db - target - key: publish-e2e-client-docker-image-${{ hashFiles('**/Cargo.lock') }} - - - name: Inject cache into docker - uses: reproducible-containers/buildkit-cache-dance@v3.1.2 - with: - cache-map: | - { - "home-cargo-bin": "/usr/local/cargo/bin", - "home-cargo-registry-index": "/usr/local/cargo/registry/index", - "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", - "home-cargo-git-db": "/usr/local/cargo/git/db", - "target": "/build/target" - } - skip-extraction: ${{ steps.cache.outputs.cache-hit }} - - - name: Build & push Docker image - id: build - uses: docker/build-push-action@v6 - with: - context: . - file: deployment/e2e-client.Dockerfile - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest - cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest,mode=max,image-manifest=true,oci-mediatypes=true - - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master - if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} - publish-fuel-core-binary: name: Release fuel-core binaries runs-on: ${{ matrix.job.os }} @@ -858,35 +444,6 @@ jobs: with: github_token: ${{ secrets.GITHUB_TOKEN }} slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} - - # Deploy Fuel Core Ephemeral Developer Environment - deploy-eph-env: - if: startsWith(github.head_ref, 'preview/') - needs: - - publish-docker-image - runs-on: buildjet-4vcpu-ubuntu-2204 - steps: - - name: Set Environment Variables - run: | - tag=(`echo $GITHUB_SHA | cut -c1-7`) - echo "IMAGE_TAG=`echo sha-$tag`" >> $GITHUB_ENV - echo "DEPLOYMENT_VERSION=$(echo $GITHUB_SHA)" >> $GITHUB_ENV - echo "NAMESPACE=$(echo ${GITHUB_HEAD_REF} | cut -c 9-)" >> $GITHUB_ENV - - - name: Deploy Fuel Core Ephemeral Developer Environment - uses: benc-uk/workflow-dispatch@v1 - with: - workflow: Deploy Fuel-Core on k8s - repo: FuelLabs/fuel-deployment - ref: refs/heads/master - token: ${{ secrets.REPO_TOKEN }} - inputs: '{ "k8s-type": "${{ env.K8S }}", "config-directory": "${{ env.CONFIG }}", "config-env": "${{ env.ENV }}", "deployment-version": "${{ env.DEPLOYMENT_VERSION }}", "image-tag": "${{ env.IMAGE_TAG }}", "namespace": "${{ env.NAMESPACE }}", "delete-infra": "${{ env.DELETE_INFRA }}" }' - env: - K8S: 'eks' - CONFIG: 'fuel-dev1' - ENV: 'fueldevsway.env' - DELETE_INFRA: true - cargo-audit: runs-on: ubuntu-latest continue-on-error: true diff --git a/.github/workflows/docker-images.yml b/.github/workflows/docker-images.yml new file mode 100644 index 00000000000..31ceffc464a --- /dev/null +++ b/.github/workflows/docker-images.yml @@ -0,0 +1,419 @@ +name: Docker Images + +on: + workflow_dispatch: + +env: + GIT_BRANCH: ${{ github.head_ref || github.ref_name }} + GIT_REPO_OWNER: ${{ github.repository_owner }} + GIT_REPO: ${{ github.repository }} + GIT_REPO_NAME: ${{ github.event.repository.name }} + AWS_ROLE_ARN: arn:aws:iam::024848458133:role/github_oidc_FuelLabs_fuel-core + AWS_ECR_ORG: fuellabs + CARGO_TERM_COLOR: always + RUST_VERSION: 1.79.0 + RUST_VERSION_FMT: nightly-2023-10-29 + RUST_VERSION_COV: nightly-2024-06-05 + RUSTFLAGS: -D warnings + REGISTRY: ghcr.io + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 + +jobs: + build-docker-images: + strategy: + matrix: + arch: [ + # build on native runners instead of using emulation + { platform: linux/amd64, runner: buildjet-8vcpu-ubuntu-2204 }, + { platform: linux/arm64, runner: buildjet-16vcpu-ubuntu-2204-arm } + ] + runs-on: ${{ matrix.arch.runner }} + permissions: + contents: read + packages: write + steps: + - name: Setup environment + run: | + echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} + platform=${{ matrix.arch.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to the ghcr.io registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to the docker.io registry + uses: docker/login-action@v3 + with: + username: fuellabs + password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_URL }} + + - name: Setup Rust build cache + id: cache + uses: buildjet/cache@v3 + with: + path: | + home-cargo-bin + home-cargo-registry-index + home-cargo-registry-cache + home-cargo-git-db + target + key: ${{ env.PLATFORM_PAIR }}-${{ hashFiles('**/Cargo.lock') }} + + - name: Inject cache into docker + uses: reproducible-containers/buildkit-cache-dance@v3.1.2 + with: + cache-map: | + { + "home-cargo-bin": "/usr/local/cargo/bin", + "home-cargo-registry-index": "/usr/local/cargo/registry/index", + "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", + "home-cargo-git-db": "/usr/local/cargo/git/db", + "target": "/build/target" + } + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + + - name: Build Docker image + id: build + uses: docker/build-push-action@v6 + with: + context: . + platforms: ${{ matrix.arch.platform }} + file: deployment/Dockerfile + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache:latest-${{ matrix.arch.runner }} + cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache:latest-${{ matrix.arch.runner }},mode=max,image-manifest=true,oci-mediatypes=true + outputs: | + type=image,name=${{ env.REGISTRY_URL }},push-by-digest=true,name-canonical=true,push=true + + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + publish-docker-image: + needs: + - build-docker-images + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - name: Setup environment + run: | + echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} + + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Configure AWS credentials for ECR publishing + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.AWS_ROLE_ARN }} + aws-region: us-east-1 # ecr public is only in us-east-1 + + - name: Login to Amazon ECR Public + id: login-ecr-public + uses: aws-actions/amazon-ecr-login@v2 + with: + registry-type: public + + - name: Log in to the ghcr.io registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to the docker.io registry + uses: docker/login-action@v3 + with: + username: fuellabs + password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} + + - name: Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_URL }} + ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }} + tags: | + type=sha + type=ref,event=branch + type=ref,event=tag + type=semver,pattern={{raw}} + type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Create manifest list and push to all registries + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.REGISTRY_URL }}@sha256:%s ' *) + + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.REGISTRY_URL }}:${{ steps.meta.outputs.version }} + + # duplicate of publish-docker-image, but with profiling features enabled + # this is split into a separate action since it takes longer to build + publish-docker-image-profiling: + runs-on: buildjet-16vcpu-ubuntu-2204 + permissions: + contents: read + packages: write + id-token: write + steps: + - name: Setup environment + run: | + echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Configure AWS credentials for ECR publishing + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.AWS_ROLE_ARN }} + aws-region: us-east-1 # ecr public is only in us-east-1 + + - name: Login to Amazon ECR Public + id: login-ecr-public + uses: aws-actions/amazon-ecr-login@v2 + with: + registry-type: public + + - name: Log in to the ghcr.io registry + uses: docker/login-action@v1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to the docker.io registry + uses: docker/login-action@v2 + with: + username: fuellabs + password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_URL }}-debug + ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }}-debug + tags: | + type=sha + type=ref,event=branch + type=ref,event=tag + type=semver,pattern={{raw}} + type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Setup Rust build cache + id: cache + uses: buildjet/cache@v3 + with: + path: | + home-cargo-bin + home-cargo-registry-index + home-cargo-registry-cache + home-cargo-git-db + target + key: publish-docker-image-profiling-${{ hashFiles('**/Cargo.lock') }} + + - name: Inject cache into docker + uses: reproducible-containers/buildkit-cache-dance@v3.1.2 + with: + cache-map: | + { + "home-cargo-bin": "/usr/local/cargo/bin", + "home-cargo-registry-index": "/usr/local/cargo/registry/index", + "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", + "home-cargo-git-db": "/usr/local/cargo/git/db", + "target": "/build/target" + } + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + + - name: Build & push Docker image + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: deployment/Dockerfile + build-args: "DEBUG_SYMBOLS=true" + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest + cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest,mode=max,image-manifest=true,oci-mediatypes=true + + - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} + + publish-e2e-client-docker-image: + runs-on: buildjet-4vcpu-ubuntu-2204 + permissions: + contents: read + packages: write + id-token: write + steps: + - name: Setup environment + run: | + echo "REGISTRY_URL=${REGISTRY@L}/${GIT_REPO@L}" >>${GITHUB_ENV} + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Configure AWS credentials for ECR publishing + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.AWS_ROLE_ARN }} + aws-region: us-east-1 # ecr public is only in us-east-1 + + - name: Login to Amazon ECR Public + id: login-ecr-public + uses: aws-actions/amazon-ecr-login@v2 + with: + registry-type: public + + - name: Log in to the ghcr.io registry + uses: docker/login-action@v1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to the docker.io registry + uses: docker/login-action@v2 + with: + username: fuellabs + password: ${{ secrets.DOCKER_IO_READ_ONLY_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_URL }}-e2e-client + ${{ steps.login-ecr-public.outputs.registry }}/${{ env.AWS_ECR_ORG }}/${{ env.GIT_REPO_NAME }}-e2e-client + tags: | + type=sha + type=ref,event=branch + type=ref,event=tag + type=semver,pattern={{raw}} + type=raw,value=sha-{{sha}}-{{date 'YYYYMMDDhhmmss'}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Setup Rust build cache + id: cache + uses: buildjet/cache@v3 + with: + path: | + home-cargo-bin + home-cargo-registry-index + home-cargo-registry-cache + home-cargo-git-db + target + key: publish-e2e-client-docker-image-${{ hashFiles('**/Cargo.lock') }} + + - name: Inject cache into docker + uses: reproducible-containers/buildkit-cache-dance@v3.1.2 + with: + cache-map: | + { + "home-cargo-bin": "/usr/local/cargo/bin", + "home-cargo-registry-index": "/usr/local/cargo/registry/index", + "home-cargo-registry-cache": "/usr/local/cargo/registry/cache", + "home-cargo-git-db": "/usr/local/cargo/git/db", + "target": "/build/target" + } + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + + - name: Build & push Docker image + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: deployment/e2e-client.Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest + cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest,mode=max,image-manifest=true,oci-mediatypes=true + + - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} + # Deploy Fuel Core Ephemeral Developer Environment + deploy-eph-env: + if: startsWith(github.head_ref, 'preview/') + needs: + - publish-docker-image + runs-on: buildjet-4vcpu-ubuntu-2204 + steps: + - name: Set Environment Variables + run: | + tag=(`echo $GITHUB_SHA | cut -c1-7`) + echo "IMAGE_TAG=`echo sha-$tag`" >> $GITHUB_ENV + echo "DEPLOYMENT_VERSION=$(echo $GITHUB_SHA)" >> $GITHUB_ENV + echo "NAMESPACE=$(echo ${GITHUB_HEAD_REF} | cut -c 9-)" >> $GITHUB_ENV + + - name: Deploy Fuel Core Ephemeral Developer Environment + uses: benc-uk/workflow-dispatch@v1 + with: + workflow: Deploy Fuel-Core on k8s + repo: FuelLabs/fuel-deployment + ref: refs/heads/master + token: ${{ secrets.REPO_TOKEN }} + inputs: '{ "k8s-type": "${{ env.K8S }}", "config-directory": "${{ env.CONFIG }}", "config-env": "${{ env.ENV }}", "deployment-version": "${{ env.DEPLOYMENT_VERSION }}", "image-tag": "${{ env.IMAGE_TAG }}", "namespace": "${{ env.NAMESPACE }}", "delete-infra": "${{ env.DELETE_INFRA }}" }' + env: + K8S: 'eks' + CONFIG: 'fuel-dev1' + ENV: 'fueldevsway.env' + DELETE_INFRA: true diff --git a/.github/workflows/publish-codecov.yml b/.github/workflows/publish-codecov.yml new file mode 100644 index 00000000000..42230a15b4d --- /dev/null +++ b/.github/workflows/publish-codecov.yml @@ -0,0 +1,61 @@ +name: Publish Codecov Report + +on: + workflow_dispatch: + +env: + GIT_BRANCH: ${{ github.head_ref || github.ref_name }} + GIT_REPO_OWNER: ${{ github.repository_owner }} + GIT_REPO: ${{ github.repository }} + GIT_REPO_NAME: ${{ github.event.repository.name }} + AWS_ROLE_ARN: arn:aws:iam::024848458133:role/github_oidc_FuelLabs_fuel-core + AWS_ECR_ORG: fuellabs + CARGO_TERM_COLOR: always + RUST_VERSION: 1.79.0 + RUST_VERSION_FMT: nightly-2023-10-29 + RUST_VERSION_COV: nightly-2024-06-05 + RUSTFLAGS: -D warnings + REGISTRY: ghcr.io + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 + +jobs: + publish-codecov: + name: Publish code coverage report on GitHub pages branch + runs-on: buildjet-4vcpu-ubuntu-2204 + permissions: # Write access to push changes to pages + contents: write + steps: + - uses: actions/checkout@v4 + - name: Install latest Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_VERSION_COV }} + targets: wasm32-unknown-unknown + + - name: Install cargo-llvm-codecov + uses: taiki-e/install-action@cargo-llvm-cov + + - name: Code coverage report + run: cargo +${{ env.RUST_VERSION_COV }} llvm-cov --all-features --html --branch + + - name: Checkout the repo again for pushing pages revision + uses: actions/checkout@v4 + with: + ref: 'codecov-pages' + path: 'pages-branch' + + - name: Push codecov report to pages branch + working-directory: ./pages-branch + run: | + export BRANCH_B64=$(echo -n "${{ env.GIT_BRANCH }}" | basenc --base64url) + git config user.email "2204863+Dentosal@users.noreply.github.com" + git config user.name "Dentosal" + cp -r ../target/llvm-cov/html "$BRANCH_B64" + python3 ../.github/workflows/scripts/generate_pages_index.py > index.html + git add . + git commit -m "Update codecov for ${{ env.GIT_BRANCH }}" + git push + export PAGES_URL="https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/$BRANCH_B64/index.html" + echo "$PAGES_URL" + echo "Codecov report $PAGES_URL" >> $GITHUB_STEP_SUMMARY + \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce3d6df8cd2..f379e61081a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -153,6 +153,10 @@ Multiple issues should use full syntax for each issue and separate by a comma, l close #123, ref #456 ``` +### Long tests issues + +If your test if taking more than 5 minutes on the CI, you need to edit the `.config/nextest.toml` file to allow your test to have a bigger timeout. See [nextest documentation](https://nexte.st/docs/configuration/?h=config) for format + ### Releasing Each release should have its own new version of the `fuel_core_upgradable_executor::Executor` regardless of minor or major release. The version of the executor should grow without gaps. diff --git a/Cargo.lock b/Cargo.lock index 683e5b02a69..1903c106fde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3852,7 +3852,6 @@ dependencies = [ "fuel-core-storage", "fuel-core-types 0.40.0", "fuel-core-wasm-executor", - "ntest", "parking_lot", "postcard", "tracing", @@ -6449,39 +6448,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "ntest" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb183f0a1da7a937f672e5ee7b7edb727bf52b8a52d531374ba8ebb9345c0330" -dependencies = [ - "ntest_test_cases", - "ntest_timeout", -] - -[[package]] -name = "ntest_test_cases" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d0d3f2a488592e5368ebbe996e7f1d44aa13156efad201f5b4d84e150eaa93" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ntest_timeout" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc7c92f190c97f79b4a332f5e81dcf68c8420af2045c936c9be0bc9de6f63b5" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" diff --git a/Makefile.toml b/Makefile.toml index 9494fce12af..4c7e5aa3ba2 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -35,4 +35,4 @@ env = { "CARGO_MAKE_WORKSPACE_SKIP_MEMBERS" = ["fuel-core-tests"] } description = "Run tests on each workspace member" category = "Test" command = "cargo" -args = ["test", "${@}"] +args = ["nextest", "run", "${@}"] diff --git a/benches/src/db_lookup_times_utils/mod.rs b/benches/src/db_lookup_times_utils/mod.rs index 0d001a3bc3e..b446a52d00f 100644 --- a/benches/src/db_lookup_times_utils/mod.rs +++ b/benches/src/db_lookup_times_utils/mod.rs @@ -14,20 +14,16 @@ mod tests { }; use fuel_core::state::rocks_db::RocksDb; - use crate::{ - db_lookup_times_utils::seed::{ - insert_compressed_block, - insert_full_block, - }, - utils::ShallowTempDir, + use crate::db_lookup_times_utils::seed::{ + insert_compressed_block, + insert_full_block, }; const TEST_HEIGHT: u32 = 1; const TEST_TX_COUNT: u32 = 10; fn setup_test_db() -> RocksDb { - let temp_dir = ShallowTempDir::new(); - RocksDb::default_open(temp_dir.path(), None, -1).unwrap() + RocksDb::default_open_temp(None).unwrap() } #[test] diff --git a/bin/e2e-test-client/src/lib.rs b/bin/e2e-test-client/src/lib.rs index 36ee3ea5413..b8bf49b92ac 100644 --- a/bin/e2e-test-client/src/lib.rs +++ b/bin/e2e-test-client/src/lib.rs @@ -15,7 +15,7 @@ use std::{ }; pub const CONFIG_FILE_KEY: &str = "FUEL_CORE_E2E_CONFIG"; -pub const SYNC_TIMEOUT: Duration = Duration::from_secs(10); +pub const SYNC_TIMEOUT: Duration = Duration::from_secs(20); pub mod config; pub mod test_context; @@ -115,7 +115,7 @@ pub fn main_body(config: SuiteConfig, mut args: Arguments) { with_cloned(&config, |config| { async_execute(async { let ctx = TestContext::new(config).await; - tests::transfers::transfer_back(&ctx).await + tests::contracts::deploy_large_contract(&ctx).await }) }), ), diff --git a/bin/e2e-test-client/src/snapshots/fuel_core_e2e_client__config__tests__default_config_snapshot.snap b/bin/e2e-test-client/src/snapshots/fuel_core_e2e_client__config__tests__default_config_snapshot.snap index e56ef483f66..4a38c16f0c4 100644 --- a/bin/e2e-test-client/src/snapshots/fuel_core_e2e_client__config__tests__default_config_snapshot.snap +++ b/bin/e2e-test-client/src/snapshots/fuel_core_e2e_client__config__tests__default_config_snapshot.snap @@ -3,7 +3,7 @@ source: bin/e2e-test-client/src/config.rs expression: serialized --- endpoint = "http://localhost:4000" -wallet_sync_timeout = "10s" +wallet_sync_timeout = "20s" full_test = false coinbase_contract_id = "7777777777777777777777777777777777777777777777777777777777777777" diff --git a/bin/e2e-test-client/src/tests/script.rs b/bin/e2e-test-client/src/tests/script.rs index a14268627e0..d0bed59d178 100644 --- a/bin/e2e-test-client/src/tests/script.rs +++ b/bin/e2e-test-client/src/tests/script.rs @@ -23,6 +23,7 @@ use fuel_core_types::{ }, services::executor::TransactionExecutionResult, }; +use futures::StreamExt; use libtest_mimic::Failed; use std::{ path::Path, @@ -137,7 +138,7 @@ pub async fn run_contract_large_state(ctx: &TestContext) -> Result<(), Failed> { if result?.is_none() { let deployment_request = ctx.bob.deploy_contract(contract_config, salt); - timeout(Duration::from_secs(20), deployment_request).await??; + timeout(Duration::from_secs(90), deployment_request).await??; } _dry_runs(ctx, &[dry_run], 100, DryRunResult::MayFail).await @@ -199,10 +200,12 @@ async fn _dry_runs( }); } - // All queries should be resolved for 60 seconds. - let queries = - tokio::time::timeout(Duration::from_secs(60), futures::future::join_all(queries)) - .await?; + let stream = futures::stream::iter(queries.into_iter()) + .buffered(10) + .collect::>(); + + // All queries should be resolved for 90 seconds. + let queries = tokio::time::timeout(Duration::from_secs(90), stream).await?; let chain_info = ctx.alice.client.chain_info().await?; for query in queries { diff --git a/bin/e2e-test-client/src/tests/test_data/large_state/state_config.json b/bin/e2e-test-client/src/tests/test_data/large_state/state_config.json index 0be6bf6e091..1cf35d39097 100644 --- a/bin/e2e-test-client/src/tests/test_data/large_state/state_config.json +++ b/bin/e2e-test-client/src/tests/test_data/large_state/state_config.json @@ -118150,6 +118150,5 @@ } ] } - ], - "last_block": null + ] } diff --git a/bin/e2e-test-client/tests/integration_tests.rs b/bin/e2e-test-client/tests/integration_tests.rs index a6a13f75f2a..bb62d4e8dc3 100644 --- a/bin/e2e-test-client/tests/integration_tests.rs +++ b/bin/e2e-test-client/tests/integration_tests.rs @@ -4,15 +4,20 @@ use fuel_core::service::{ }; // Add methods on commands +use fuel_core::service::config::Trigger; use fuel_core_chain_config::{ SnapshotMetadata, SnapshotReader, }; use fuel_core_e2e_client::config::SuiteConfig; -use fuel_core_types::fuel_tx::ContractId; +use fuel_core_types::{ + blockchain::header::LATEST_STATE_TRANSITION_VERSION, + fuel_tx::ContractId, +}; use std::{ fs, str::FromStr, + time::Duration, }; use tempfile::TempDir; // Used for writing assertions // Run programs @@ -127,6 +132,7 @@ fn dev_config() -> Config { } chain_config.state_transition_bytecode = fuel_core::upgradable_executor::WASM_BYTECODE.to_vec(); + chain_config.genesis_state_transition_version = Some(LATEST_STATE_TRANSITION_VERSION); let reader = reader.with_chain_config(chain_config); let mut config = Config::local_node_with_reader(reader); @@ -137,6 +143,9 @@ fn dev_config() -> Config { ) .unwrap(), ); + config.block_production = Trigger::Interval { + block_time: Duration::from_secs(1), + }; config } diff --git a/ci_checks.sh b/ci_checks.sh index b948a32ce09..fa5454366dd 100755 --- a/ci_checks.sh +++ b/ci_checks.sh @@ -11,6 +11,7 @@ # - `cargo install cargo-sort` # - `cargo install cargo-make` # - `cargo install cargo-insta` +# - `cargo install cargo-nextest` # - `npm install prettier prettier-plugin-toml` npx prettier --write "**/Cargo.toml" && @@ -29,9 +30,9 @@ cargo check -p fuel-core-executor --target wasm32-unknown-unknown --no-default-f cargo make check --all-features --locked && cargo make check --locked && OVERRIDE_CHAIN_CONFIGS=true cargo test --test integration_tests local_node && -cargo test --workspace && -FUEL_ALWAYS_USE_WASM=true cargo test --all-features --workspace && -cargo test -p fuel-core --no-default-features && -cargo test -p fuel-core-client --no-default-features && -cargo test -p fuel-core-chain-config --no-default-features && -cargo test --manifest-path version-compatibility/Cargo.toml --workspace +cargo nextest run --workspace && +FUEL_ALWAYS_USE_WASM=true cargo nextest run --all-features --workspace && +cargo nextest run -p fuel-core --no-default-features && +cargo nextest run -p fuel-core-client --no-default-features && +cargo nextest run -p fuel-core-chain-config --no-default-features && +cargo nextest run --manifest-path version-compatibility/Cargo.toml --workspace diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index 412d13c18cf..186b9fd44b7 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -6,14 +6,10 @@ mod tests { use crate as fuel_core; use fuel_core::database::Database; use fuel_core_executor::{ - executor::{ - OnceTransactionsSource, - MAX_TX_COUNT, - }, + executor::OnceTransactionsSource, ports::{ MaybeCheckedTransaction, RelayerPort, - TransactionsSource, }, refs::ContractRef, }; @@ -151,7 +147,6 @@ mod tests { Rng, SeedableRng, }; - use std::sync::Mutex; #[derive(Clone, Debug, Default)] struct Config { @@ -186,32 +181,6 @@ mod tests { } } - /// Bad transaction source: ignores the limit of `u16::MAX -1` transactions - /// that should be returned by [`TransactionsSource::next()`]. - /// It is used only for testing purposes - pub struct BadTransactionsSource { - transactions: Mutex>, - } - - impl BadTransactionsSource { - pub fn new(transactions: Vec) -> Self { - Self { - transactions: Mutex::new( - transactions - .into_iter() - .map(MaybeCheckedTransaction::Transaction) - .collect(), - ), - } - } - } - - impl TransactionsSource for BadTransactionsSource { - fn next(&self, _: u64, _: u16, _: u32) -> Vec { - std::mem::take(&mut *self.transactions.lock().unwrap()) - } - } - fn add_consensus_parameters( mut database: Database, consensus_parameters: &ConsensusParameters, @@ -2599,7 +2568,6 @@ mod tests { // One of two transactions is skipped. assert_eq!(skipped_transactions.len(), 1); let err = &skipped_transactions[0].1; - dbg!(err); assert!(matches!( err, &ExecutorError::TransactionValidity( @@ -3026,14 +2994,17 @@ mod tests { } #[test] + #[cfg(not(feature = "wasm-executor"))] fn block_producer_never_includes_more_than_max_tx_count_transactions() { + use fuel_core_executor::executor::max_tx_count; + let block_height = 1u32; let block_da_height = 2u64; let mut consensus_parameters = ConsensusParameters::default(); // Given - let transactions_in_tx_source = (MAX_TX_COUNT as usize) + 10; + let transactions_in_tx_source = (max_tx_count() as usize) + 10; consensus_parameters.set_block_gas_limit(u64::MAX); let config = Config { consensus_parameters, @@ -3057,20 +3028,50 @@ mod tests { // Then assert_eq!( result.block.transactions().len(), - (MAX_TX_COUNT as usize + 1) + (max_tx_count() as usize + 1) ); } #[test] + #[cfg(not(feature = "wasm-executor"))] fn block_producer_never_includes_more_than_max_tx_count_transactions_with_bad_tx_source( ) { + use fuel_core_executor::executor::max_tx_count; + use std::sync::Mutex; + + /// Bad transaction source: ignores the limit of `u16::MAX -1` transactions + /// that should be returned by [`TransactionsSource::next()`]. + /// It is used only for testing purposes + pub struct BadTransactionsSource { + transactions: Mutex>, + } + + impl BadTransactionsSource { + pub fn new(transactions: Vec) -> Self { + Self { + transactions: Mutex::new( + transactions + .into_iter() + .map(MaybeCheckedTransaction::Transaction) + .collect(), + ), + } + } + } + + impl fuel_core_executor::ports::TransactionsSource for BadTransactionsSource { + fn next(&self, _: u64, _: u16, _: u32) -> Vec { + std::mem::take(&mut *self.transactions.lock().unwrap()) + } + } + let block_height = 1u32; let block_da_height = 2u64; let mut consensus_parameters = ConsensusParameters::default(); // Given - let transactions_in_tx_source = (MAX_TX_COUNT as usize) + 10; + let transactions_in_tx_source = (max_tx_count() as usize) + 10; consensus_parameters.set_block_gas_limit(u64::MAX); let config = Config { consensus_parameters, @@ -3102,7 +3103,7 @@ mod tests { // Then assert_eq!( result.block.transactions().len(), - (MAX_TX_COUNT as usize + 1) + (max_tx_count() as usize + 1) ); } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 342783c2bae..4fd45534b24 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -214,6 +214,7 @@ pub fn init_sub_services( |(p2p_config, (shared_state, request_receiver))| { fuel_core_p2p::service::new_service( chain_id, + last_height, p2p_config, shared_state, request_receiver, diff --git a/crates/fuel-core/src/state/rocks_db.rs b/crates/fuel-core/src/state/rocks_db.rs index d2a9f7bfc9c..513a7feaf2c 100644 --- a/crates/fuel-core/src/state/rocks_db.rs +++ b/crates/fuel-core/src/state/rocks_db.rs @@ -57,7 +57,10 @@ use std::{ Path, PathBuf, }, - sync::Arc, + sync::{ + Arc, + Mutex, + }, }; use tempfile::TempDir; @@ -95,6 +98,7 @@ impl Drop for DropResources { pub struct RocksDb { read_options: ReadOptions, db: Arc, + create_family: Arc>>, snapshot: Option>, metrics: Arc, // used for RAII @@ -270,7 +274,6 @@ where block_opts.set_block_size(16 * 1024); let mut opts = Options::default(); - opts.create_if_missing(true); opts.set_compression_type(DBCompressionType::Lz4); // TODO: Make it customizable https://github.com/FuelLabs/fuel-core/issues/1666 opts.set_max_total_wal_size(64 * 1024 * 1024); @@ -304,6 +307,10 @@ where } } + if cf_descriptors_to_open.is_empty() { + opts.create_if_missing(true); + } + let unknown_columns_to_open: BTreeMap<_, _> = existing_column_families .iter() .filter(|column_name| { @@ -341,22 +348,19 @@ where } .map_err(|e| DatabaseError::Other(e.into()))?; - // Setup cfs - for (name, opt) in cf_descriptors_to_create { - db.create_cf(name, &opt) - .map_err(|e| DatabaseError::Other(e.into()))?; - } - let db = Arc::new(db); + let create_family = Arc::new(Mutex::new(cf_descriptors_to_create)); let rocks_db = RocksDb { read_options: Self::generate_read_options(&None), snapshot: None, db, metrics, + create_family, _drop: Default::default(), _marker: Default::default(), }; + Ok(rocks_db) } @@ -383,6 +387,7 @@ where &self, ) -> RocksDb { let db = self.db.clone(); + let create_family = self.create_family.clone(); let metrics = self.metrics.clone(); let _drop = self._drop.clone(); @@ -402,6 +407,7 @@ where read_options: Self::generate_read_options(&snapshot), snapshot, db, + create_family, metrics, _drop, _marker: Default::default(), @@ -413,9 +419,33 @@ where } fn cf_u32(&self, column: u32) -> Arc { - self.db - .cf_handle(&Self::col_name(column)) - .expect("invalid column state") + let family = self.db.cf_handle(&Self::col_name(column)); + + match family { + None => { + let mut lock = self + .create_family + .lock() + .expect("The create family lock should be available"); + + let name = Self::col_name(column); + let Some(family) = lock.remove(&name) else { + return self + .db + .cf_handle(&Self::col_name(column)) + .expect("No column family found"); + }; + + self.db + .create_cf(&name, &family) + .expect("Couldn't create column family"); + + let family = self.db.cf_handle(&name).expect("invalid column state"); + + family + } + Some(family) => family, + } } fn col_name(column: u32) -> String { diff --git a/crates/fuel-gas-price-algorithm/src/v0/tests/algorithm_v0_tests.rs b/crates/fuel-gas-price-algorithm/src/v0/tests/algorithm_v0_tests.rs index 0fad86ed2d6..a903a8efe6b 100644 --- a/crates/fuel-gas-price-algorithm/src/v0/tests/algorithm_v0_tests.rs +++ b/crates/fuel-gas-price-algorithm/src/v0/tests/algorithm_v0_tests.rs @@ -42,7 +42,6 @@ fn _worst_case__correctly_calculates_value( let change_amount = expected.saturating_mul(percentage).saturating_div(100); expected = expected.saturating_add(change_amount); } - dbg!(actual, expected); assert!(actual >= expected); } diff --git a/crates/fuel-gas-price-algorithm/src/v1/tests/algorithm_v1_tests.rs b/crates/fuel-gas-price-algorithm/src/v1/tests/algorithm_v1_tests.rs index 5578af6bdc2..9e6be41073b 100644 --- a/crates/fuel-gas-price-algorithm/src/v1/tests/algorithm_v1_tests.rs +++ b/crates/fuel-gas-price-algorithm/src/v1/tests/algorithm_v1_tests.rs @@ -72,7 +72,6 @@ fn _worst_case__correctly_calculates_value( let expected = expected_exec_price.saturating_add(expected_da_gas_price); - dbg!(actual, expected); assert!(actual >= expected); } diff --git a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs index 8b403e97fc1..b1fa82ac2c3 100644 --- a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs @@ -502,7 +502,6 @@ async fn interval_trigger_even_if_queued_tx_events() { let block_creation_waiter = block_creation_notifier.clone(); tokio::task::spawn(async move { ctx.block_import.recv().await.unwrap(); - dbg!("First block produced"); block_creation_notifier.notify_waiters(); }); block_creation_waiter.notified().await; diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 2e9b6ac6e0d..bc690c52d3d 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -164,7 +164,14 @@ use alloc::{ /// The maximum amount of transactions that can be included in a block, /// excluding the mint transaction. -pub const MAX_TX_COUNT: u16 = u16::MAX.saturating_sub(1); +#[cfg(not(feature = "test-helpers"))] +pub const fn max_tx_count() -> u16 { + u16::MAX.saturating_sub(1) +} +#[cfg(feature = "test-helpers")] +pub const fn max_tx_count() -> u16 { + 1024 +} pub struct OnceTransactionsSource { transactions: ParkingMutex>, @@ -593,7 +600,7 @@ where // When processing l2 transactions, we must take into account transactions from the l1 // that have been included in the block already (stored in `data.tx_count`), as well // as the final mint transaction. - let mut remaining_tx_count = MAX_TX_COUNT.saturating_sub(data.tx_count); + let mut remaining_tx_count = max_tx_count().saturating_sub(data.tx_count); let mut regular_tx_iter = l2_tx_source .next( @@ -636,7 +643,7 @@ where remaining_gas_limit = block_gas_limit.saturating_sub(data.used_gas); remaining_block_transaction_size_limit = block_transaction_size_limit.saturating_sub(data.used_size); - remaining_tx_count = MAX_TX_COUNT.saturating_sub(data.tx_count); + remaining_tx_count = max_tx_count().saturating_sub(data.tx_count); } regular_tx_iter = l2_tx_source diff --git a/crates/services/gas_price_service/src/v1/da_source_service/service.rs b/crates/services/gas_price_service/src/v1/da_source_service/service.rs index e20a506a097..74f1fcafb42 100644 --- a/crates/services/gas_price_service/src/v1/da_source_service/service.rs +++ b/crates/services/gas_price_service/src/v1/da_source_service/service.rs @@ -24,6 +24,7 @@ impl SharedState { fn new(sender: Sender) -> Self { Self(sender) } + pub fn subscribe(&self) -> tokio::sync::broadcast::Receiver { self.0.subscribe() } @@ -40,7 +41,7 @@ where shared_state: SharedState, } -const DA_BLOCK_COSTS_CHANNEL_SIZE: usize = 10; +const DA_BLOCK_COSTS_CHANNEL_SIZE: usize = 16 * 1024; const POLLING_INTERVAL_MS: u64 = 10_000; impl DaSourceService diff --git a/crates/services/p2p/src/discovery.rs b/crates/services/p2p/src/discovery.rs index 725024c4939..e41d228883d 100644 --- a/crates/services/p2p/src/discovery.rs +++ b/crates/services/p2p/src/discovery.rs @@ -309,7 +309,6 @@ mod tests { for _ in 1..num_of_swarms { let (swarm, peer_addr, peer_id) = build_fuel_discovery(vec![bootstrap_addr.clone()]); - discovery_swarms.push((swarm, peer_addr, peer_id)); } @@ -369,7 +368,7 @@ mod tests { .add_address(&peer_id, unroutable_peer_addr.clone()); } SwarmEvent::ConnectionClosed { peer_id, .. } => { - panic!("PeerId {peer_id:?} disconnected"); + dbg!(peer_id); } _ => {} } diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 0004a15ea63..3a33ef5f4e1 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -401,6 +401,7 @@ impl Broadcast for SharedState { /// Uninitialized task for the p2p that can be upgraded later into [`Task`]. pub struct UninitializedTask { chain_id: ChainId, + last_height: BlockHeight, view_provider: V, next_block_height: BoxStream, /// Receive internal Task Requests @@ -442,8 +443,10 @@ pub struct HeartbeatPeerReputationConfig { } impl UninitializedTask { + #[allow(clippy::too_many_arguments)] pub fn new( chain_id: ChainId, + last_height: BlockHeight, config: Config, shared_state: SharedState, request_receiver: Receiver, @@ -455,6 +458,7 @@ impl UninitializedTask { Self { chain_id, + last_height, view_provider, tx_pool, next_block_height, @@ -762,6 +766,7 @@ where ) -> anyhow::Result { let Self { chain_id, + last_height, view_provider, next_block_height, request_receiver, @@ -799,6 +804,7 @@ where PostcardCodec::new(max_block_size), ) .await?; + p2p_service.update_block_height(last_height); p2p_service.start().await?; let next_check_time = @@ -1309,8 +1315,10 @@ pub fn build_shared_state( ) } +#[allow(clippy::too_many_arguments)] pub fn new_service( chain_id: ChainId, + last_height: BlockHeight, p2p_config: Config, shared_state: SharedState, request_receiver: Receiver, @@ -1326,6 +1334,7 @@ where { let task = UninitializedTask::new( chain_id, + last_height, p2p_config, shared_state, request_receiver, @@ -1455,6 +1464,7 @@ pub mod tests { let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); let service = new_service( ChainId::default(), + 0.into(), p2p_config, shared_state, request_receiver, diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index 7974890015f..9610d552dba 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -29,7 +29,7 @@ struct Input { )] #[test_case( Input { - headers: Duration::from_millis(10), + headers: Duration::from_millis(100), ..Default::default() }, State::new(None, 0), @@ -42,7 +42,7 @@ struct Input { )] #[test_case( Input { - headers: Duration::from_millis(10), + headers: Duration::from_millis(100), ..Default::default() }, State::new(None, 1000), @@ -55,7 +55,7 @@ struct Input { )] #[test_case( Input { - transactions: Duration::from_millis(10), + transactions: Duration::from_millis(100), ..Default::default() }, State::new(None, 1000), @@ -68,7 +68,7 @@ struct Input { )] #[test_case( Input { - consensus: Duration::from_millis(10), + consensus: Duration::from_millis(100), ..Default::default() }, State::new(None, 1000), @@ -81,7 +81,7 @@ struct Input { )] #[test_case( Input { - executes: Duration::from_millis(10), + executes: Duration::from_millis(20), ..Default::default() }, State::new(None, 1000), diff --git a/crates/services/sync/src/state.rs b/crates/services/sync/src/state.rs index 7bf704c2b66..0cfbfe8e8f5 100644 --- a/crates/services/sync/src/state.rs +++ b/crates/services/sync/src/state.rs @@ -218,4 +218,10 @@ impl State { _ => None, } } + + #[cfg(test)] + /// Get the current status. + pub fn status(&self) -> &Status { + &self.status + } } diff --git a/crates/services/upgradable-executor/Cargo.toml b/crates/services/upgradable-executor/Cargo.toml index 4b340df0a25..16eeb0fc391 100644 --- a/crates/services/upgradable-executor/Cargo.toml +++ b/crates/services/upgradable-executor/Cargo.toml @@ -34,7 +34,6 @@ wasmtime = { version = "23.0.2", default-features = false, features = [ anyhow = { workspace = true } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["test-helpers"] } -ntest = "0.9.2" [build-dependencies] fuel-core-wasm-executor = { workspace = true, optional = true, default-features = false } diff --git a/crates/services/upgradable-executor/src/executor.rs b/crates/services/upgradable-executor/src/executor.rs index 38ebabe0283..2b8c72fa5af 100644 --- a/crates/services/upgradable-executor/src/executor.rs +++ b/crates/services/upgradable-executor/src/executor.rs @@ -739,9 +739,6 @@ where #[allow(unexpected_cfgs)] // for cfg(coverage) #[cfg(test)] mod test { - #[cfg(coverage)] - use ntest as _; // Only used outside cdg(coverage) - use super::*; use fuel_core_storage::{ kv_store::Value, @@ -946,7 +943,6 @@ mod test { mod native { use super::*; use crate::executor::Executor; - use ntest as _; #[test] fn can_validate_block() { @@ -1111,7 +1107,6 @@ mod test { // If it doesn't cache the modules, the test will fail with a timeout. #[test] #[cfg(not(coverage))] // Too slow for coverage - #[ntest::timeout(60_000)] fn reuse_cached_compiled_module__native_strategy() { // Given let next_version = Executor::::VERSION + 1; @@ -1119,10 +1114,15 @@ mod test { let executor = Executor::native(storage, DisabledRelayer, Config::default()); let block = valid_block(next_version); + executor.validate(&block).map(|_| ()).unwrap(); // When for _ in 0..1000 { + let start = std::time::Instant::now(); let result = executor.validate(&block).map(|_| ()); + if start.elapsed().as_secs() > 1 { + panic!("The test is too slow"); + } // Then assert_eq!(Ok(()), result); } @@ -1132,7 +1132,6 @@ mod test { // If it doesn't cache the modules, the test will fail with a timeout. #[test] #[cfg(not(coverage))] // Too slow for coverage - #[ntest::timeout(60_000)] fn reuse_cached_compiled_module__wasm_strategy() { // Given let next_version = Executor::::VERSION + 1; @@ -1140,10 +1139,15 @@ mod test { let executor = Executor::wasm(storage, DisabledRelayer, Config::default()); let block = valid_block(next_version); + executor.validate(&block).map(|_| ()).unwrap(); // When for _ in 0..1000 { + let start = std::time::Instant::now(); let result = executor.validate(&block).map(|_| ()); + if start.elapsed().as_secs() > 1 { + panic!("The test is too slow"); + } // Then assert_eq!(Ok(()), result); } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 3c4ad33502e..291f41ab7d7 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -28,6 +28,8 @@ cynic = { workspace = true } ethers = "2" fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "smt", + "p2p", + "relayer", "wasm-executor", "test-helpers", ] } @@ -35,7 +37,7 @@ fuel-core-benches = { path = "../benches" } fuel-core-bin = { path = "../bin/fuel-core", features = ["parquet", "p2p"] } fuel-core-client = { path = "../crates/client", features = ["test-helpers"] } fuel-core-compression = { path = "../crates/compression" } -fuel-core-executor = { workspace = true } +fuel-core-executor = { workspace = true, features = ["test-helpers"] } fuel-core-gas-price-service = { path = "../crates/services/gas_price_service" } fuel-core-p2p = { path = "../crates/services/p2p", features = [ "test-helpers", @@ -43,7 +45,7 @@ fuel-core-p2p = { path = "../crates/services/p2p", features = [ fuel-core-poa = { path = "../crates/services/consensus_module/poa" } fuel-core-relayer = { path = "../crates/services/relayer", features = [ "test-helpers", -], optional = true } +] } fuel-core-storage = { path = "../crates/storage", features = ["test-helpers"] } fuel-core-trace = { path = "../crates/trace" } fuel-core-txpool = { path = "../crates/services/txpool_v2", features = [ @@ -79,8 +81,6 @@ proptest = { workspace = true } tracing = { workspace = true } [features] -default = ["fuel-core/default", "relayer"] -p2p = ["fuel-core/p2p", "fuel-core-p2p"] -relayer = ["fuel-core/relayer", "fuel-core-relayer"] -wasm-executor = ["fuel-core/wasm-executor"] +default = ["fuel-core/default"] +only-p2p = ["fuel-core-p2p"] aws-kms = ["dep:aws-config", "dep:aws-sdk-kms", "fuel-core-bin/aws-kms"] diff --git a/tests/tests/aws_kms.rs b/tests/tests/aws_kms.rs new file mode 100644 index 00000000000..31dcb8e0448 --- /dev/null +++ b/tests/tests/aws_kms.rs @@ -0,0 +1,80 @@ +use fuel_core::combined_database::CombinedDatabase; +use fuel_core_storage::transactional::AtomicView; +use fuel_core_types::blockchain::consensus::Consensus; +use test_helpers::fuel_core_driver::FuelCoreDriver; + +#[tokio::test] +async fn can_get_sealed_block_from_poa_produced_block_when_signing_with_kms() { + use fuel_core_types::fuel_crypto::PublicKey; + use k256::pkcs8::DecodePublicKey; + + // This test is only enabled if the environment variable is set + let Some(kms_arn) = option_env!("FUEL_CORE_TEST_AWS_KMS_ARN") else { + return; + }; + + // Get the public key for the KMS key + let config = aws_config::load_from_env().await; + let kms_client = aws_sdk_kms::Client::new(&config); + let poa_public_der = kms_client + .get_public_key() + .key_id(kms_arn) + .send() + .await + .expect("Unable to fetch public key from KMS") + .public_key + .unwrap() + .into_inner(); + let poa_public = k256::PublicKey::from_public_key_der(&poa_public_der) + .expect("invalid DER public key from AWS KMS"); + let poa_public = PublicKey::from(poa_public); + + // start node with the kms enabled and produce some blocks + let num_blocks = 100; + let args = vec![ + "--debug", + "--poa-instant", + "true", + "--consensus-aws-kms", + kms_arn, + ]; + let driver = FuelCoreDriver::spawn(&args).await.unwrap(); + let _ = driver + .client + .produce_blocks(num_blocks, None) + .await + .unwrap(); + + // stop the node and just grab the database + let db_path = driver.kill().await; + let db = CombinedDatabase::open(db_path.path(), 1024 * 1024, Default::default(), 512) + .unwrap(); + + let view = db.on_chain().latest_view().unwrap(); + + // verify signatures and ensure that the block producer wont change + let mut block_producer = None; + for height in 1..=num_blocks { + let sealed_block = view + .get_sealed_block_by_height(&height.into()) + .unwrap() + .expect("expected sealed block to be available"); + let block_id = sealed_block.entity.id(); + let signature = match sealed_block.consensus { + Consensus::PoA(ref poa) => poa.signature, + _ => panic!("Not expected consensus"), + }; + signature + .verify(&poa_public, &block_id.into_message()) + .expect("failed to verify signature"); + let this_bp = sealed_block + .consensus + .block_producer(&block_id) + .expect("Block should have a block producer"); + if let Some(bp) = block_producer { + assert_eq!(bp, this_bp, "Block producer changed"); + } else { + block_producer = Some(this_bp); + } + } +} diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index 7a5ba4688d5..35e40130373 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -365,7 +365,7 @@ mod full_block { }, FuelClient, }; - use fuel_core_executor::executor; + use fuel_core_executor::executor::max_tx_count; use fuel_core_txpool::config::{ HeavyWorkConfig, PoolLimits, @@ -479,7 +479,7 @@ mod full_block { let srv = FuelService::new_node(patched_node_config).await.unwrap(); let client = FuelClient::from(srv.bound_address); - let tx_count: u64 = 66_000; + let tx_count: u64 = max_tx_count() as u64 + 100; let txs = (1..=tx_count) .map(|i| test_helpers::make_tx(&mut rng, i, max_gas_limit)) .collect_vec(); @@ -505,11 +505,11 @@ mod full_block { assert_eq!( second_last_block.transactions.len(), - executor::MAX_TX_COUNT as usize + 1 // Mint transaction for one block + max_tx_count() as usize + 1 // Mint transaction for one block ); assert_eq!( last_block.transactions.len(), - (tx_count as usize - (executor::MAX_TX_COUNT as usize)) + 1 /* Mint transaction for second block */ + (tx_count as usize - (max_tx_count() as usize)) + 1 /* Mint transaction for second block */ ); } } diff --git a/tests/tests/dos.rs b/tests/tests/dos.rs index 05c0e0fc030..03092699ef4 100644 --- a/tests/tests/dos.rs +++ b/tests/tests/dos.rs @@ -321,7 +321,6 @@ async fn complex_queries__100_block_headers__works() { let url = format!("http://{}/v1/graphql", node.bound_address); let result = send_graph_ql_query(&url, query).await; - dbg!(&result); assert!(result.contains("transactions")); } diff --git a/tests/tests/gas_price.rs b/tests/tests/gas_price.rs index acd8b7d2ad4..5b24470ca34 100644 --- a/tests/tests/gas_price.rs +++ b/tests/tests/gas_price.rs @@ -260,7 +260,7 @@ async fn estimate_gas_price__returns_min_gas_price_if_starting_gas_price_is_zero assert_eq!(MIN_GAS_PRICE, actual) } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn latest_gas_price__if_node_restarts_gets_latest_value() { // given let args = vec![ @@ -297,6 +297,7 @@ async fn latest_gas_price__if_node_restarts_gets_latest_value() { let LatestGasPrice { gas_price, .. } = new_latest_gas_price; let actual = gas_price; assert_eq!(expected, actual); + recovered_driver.kill().await; } #[tokio::test] @@ -357,7 +358,7 @@ async fn dry_run_opt__zero_gas_price_equal_to_none_gas_price() { assert_eq!(total_gas, total_gas_zero_gas_price); } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn startup__can_override_gas_price_values_by_changing_config() { // given let args = vec![ @@ -413,4 +414,5 @@ async fn startup__can_override_gas_price_values_by_changing_config() { l2_block_height, .. } = new_metadata.try_into().unwrap(); assert_eq!(l2_block_height, new_height); + recovered_driver.kill().await; } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 5337e134358..c42218b045f 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -1,38 +1,69 @@ #![deny(unused_must_use)] #![deny(warnings)] +#[cfg(not(feature = "only-p2p"))] mod balances; +#[cfg(not(feature = "only-p2p"))] mod blob; +#[cfg(not(feature = "only-p2p"))] mod blocks; +#[cfg(not(feature = "only-p2p"))] mod chain; +#[cfg(not(feature = "only-p2p"))] mod coin; +#[cfg(not(feature = "only-p2p"))] mod coins; +#[cfg(not(feature = "only-p2p"))] mod contract; +#[cfg(not(feature = "only-p2p"))] mod da_compression; +#[cfg(not(feature = "only-p2p"))] mod dap; +#[cfg(not(feature = "only-p2p"))] mod debugger; +#[cfg(not(feature = "only-p2p"))] mod dos; +#[cfg(not(feature = "only-p2p"))] mod fee_collection_contract; +#[cfg(not(feature = "only-p2p"))] mod gas_price; +#[cfg(not(feature = "only-p2p"))] mod health; +#[cfg(not(feature = "only-p2p"))] mod helpers; +#[cfg(not(feature = "only-p2p"))] mod local_node; +#[cfg(not(feature = "only-p2p"))] mod messages; +#[cfg(not(feature = "only-p2p"))] mod metrics; +#[cfg(not(feature = "only-p2p"))] mod node_info; +#[cfg(not(feature = "only-p2p"))] mod poa; +#[cfg(not(feature = "only-p2p"))] mod recovery; +#[cfg(not(feature = "only-p2p"))] mod regenesis; -#[cfg(feature = "relayer")] +#[cfg(not(feature = "only-p2p"))] mod relayer; +#[cfg(not(feature = "only-p2p"))] mod snapshot; +#[cfg(not(feature = "only-p2p"))] mod state_rewind; -#[cfg(feature = "p2p")] -mod sync; +#[cfg(not(feature = "only-p2p"))] mod trigger_integration; +#[cfg(not(feature = "only-p2p"))] mod tx; -#[cfg(feature = "p2p")] -mod tx_gossip; +#[cfg(not(feature = "only-p2p"))] mod vm_storage; +#[cfg(feature = "only-p2p")] +mod sync; +#[cfg(feature = "only-p2p")] +mod tx_gossip; + +#[cfg(feature = "aws-kms")] +mod aws_kms; + fuel_core_trace::enable_tracing!(); diff --git a/tests/tests/local_node.rs b/tests/tests/local_node.rs index 6176ada75c5..88467b57ff0 100644 --- a/tests/tests/local_node.rs +++ b/tests/tests/local_node.rs @@ -104,4 +104,5 @@ async fn start_local_node_without_any_arguments() { block.header.state_transition_bytecode_version, LATEST_STATE_TRANSITION_VERSION ); + service.kill().await; } diff --git a/tests/tests/poa.rs b/tests/tests/poa.rs index 0f4f5a4fb97..2de2ee4e82c 100644 --- a/tests/tests/poa.rs +++ b/tests/tests/poa.rs @@ -92,83 +92,6 @@ async fn can_get_sealed_block_from_poa_produced_block() { .expect("failed to verify signature"); } -#[tokio::test] -#[cfg(feature = "aws-kms")] -async fn can_get_sealed_block_from_poa_produced_block_when_signing_with_kms() { - use fuel_core_types::fuel_crypto::PublicKey; - use k256::pkcs8::DecodePublicKey; - - // This test is only enabled if the environment variable is set - let Some(kms_arn) = option_env!("FUEL_CORE_TEST_AWS_KMS_ARN") else { - return; - }; - - // Get the public key for the KMS key - let config = aws_config::load_from_env().await; - let kms_client = aws_sdk_kms::Client::new(&config); - let poa_public_der = kms_client - .get_public_key() - .key_id(kms_arn) - .send() - .await - .expect("Unable to fetch public key from KMS") - .public_key - .unwrap() - .into_inner(); - let poa_public = k256::PublicKey::from_public_key_der(&poa_public_der) - .expect("invalid DER public key from AWS KMS"); - let poa_public = PublicKey::from(poa_public); - - // start node with the kms enabled and produce some blocks - let num_blocks = 100; - let args = vec![ - "--debug", - "--poa-instant", - "true", - "--consensus-aws-kms", - kms_arn, - ]; - let driver = FuelCoreDriver::spawn(&args).await.unwrap(); - let _ = driver - .client - .produce_blocks(num_blocks, None) - .await - .unwrap(); - - // stop the node and just grab the database - let db_path = driver.kill().await; - let db = CombinedDatabase::open(db_path.path(), 1024 * 1024, Default::default(), 512) - .unwrap(); - - let view = db.on_chain().latest_view().unwrap(); - - // verify signatures and ensure that the block producer wont change - let mut block_producer = None; - for height in 1..=num_blocks { - let sealed_block = view - .get_sealed_block_by_height(&height.into()) - .unwrap() - .expect("expected sealed block to be available"); - let block_id = sealed_block.entity.id(); - let signature = match sealed_block.consensus { - Consensus::PoA(ref poa) => poa.signature, - _ => panic!("Not expected consensus"), - }; - signature - .verify(&poa_public, &block_id.into_message()) - .expect("failed to verify signature"); - let this_bp = sealed_block - .consensus - .block_producer(&block_id) - .expect("Block should have a block producer"); - if let Some(bp) = block_producer { - assert_eq!(bp, this_bp, "Block producer changed"); - } else { - block_producer = Some(this_bp); - } - } -} - #[tokio::test(flavor = "multi_thread")] async fn starting_node_with_predefined_nodes_produces_these_predefined_blocks( ) -> anyhow::Result<()> { @@ -229,7 +152,7 @@ async fn starting_node_with_predefined_nodes_produces_these_predefined_blocks( }) .collect(); assert_eq!(predefined_blocks, blocks_from_new_node); - + new_core.kill().await; Ok(()) } @@ -259,7 +182,6 @@ mod p2p { // Then starts second_producer that uses the first one as a reserved peer. // second_producer should not produce blocks while the first one is producing // after the first_producer stops, second_producer should start producing blocks - #[ignore = "seems to be flaky, issue: https://github.com/FuelLabs/fuel-core/issues/2351"] #[tokio::test(flavor = "multi_thread")] async fn test_poa_multiple_producers() { const SYNC_TIMEOUT: u64 = 30; diff --git a/tests/tests/recovery.rs b/tests/tests/recovery.rs index d1f9cb9d7b9..60806b58f05 100644 --- a/tests/tests/recovery.rs +++ b/tests/tests/recovery.rs @@ -1,5 +1,6 @@ #![allow(non_snake_case)] +use clap::Parser; use fuel_core_storage::transactional::HistoricalView; use fuel_core_types::fuel_types::BlockHeight; use proptest::{ @@ -61,11 +62,12 @@ async fn off_chain_worker_can_recover_on_start_up_when_is_behind() -> anyhow::Re Some(BlockHeight::new(HEIGHTS)) ); + recovered_driver.kill().await; Ok(()) } prop_compose! { - fn height_and_lower_height()(height in 2..100u32)(height in Just(height), lower_height in 1..height) -> (u32, u32) { + fn height_and_lower_height()(height in 2..15u32)(height in Just(height), lower_height in 1..height) -> (u32, u32) { (height, lower_height) } } @@ -90,13 +92,18 @@ async fn _gas_price_updater__can_recover_on_startup_when_gas_price_db_is_ahead( database.on_chain().latest_height(), Some(BlockHeight::new(height)) ); - let diff = height - lower_height; - for _ in 0..diff { - database.on_chain().rollback_last_block()?; - database.off_chain().rollback_last_block()?; - } - assert!(database.on_chain().latest_height() < database.gas_price().latest_height()); let temp_dir = driver.kill().await; + let target_block_height = lower_height.to_string(); + let args = [ + "_IGNORED_", + "--db-path", + temp_dir.path().to_str().unwrap(), + "--target-block-height", + target_block_height.as_str(), + ]; + let command = fuel_core_bin::cli::rollback::Command::parse_from(args); + tracing::info!("Rolling back to block {}", target_block_height); + fuel_core_bin::cli::rollback::exec(command).await?; // When let recovered_driver = FuelCoreDriver::spawn_with_directory( @@ -125,6 +132,7 @@ async fn _gas_price_updater__can_recover_on_startup_when_gas_price_db_is_ahead( assert_eq!(actual_onchain_height, expected_onchain_height); assert_eq!(actual_gas_price_height, expected_gas_price_height); + recovered_driver.kill().await; Ok(()) } @@ -190,6 +198,7 @@ async fn _gas_price_updater__can_recover_on_startup_when_gas_price_db_is_behind( Some(BlockHeight::new(height)) ); + recovered_driver.kill().await; Ok(()) } @@ -263,6 +272,7 @@ async fn gas_price_updater__if_no_metadata_history_start_from_current_block( Some(BlockHeight::new(next_height)) ); + recovered_driver.kill().await; Ok(()) } diff --git a/tests/tests/regenesis.rs b/tests/tests/regenesis.rs index 6e92f3dad40..67e62fda911 100644 --- a/tests/tests/regenesis.rs +++ b/tests/tests/regenesis.rs @@ -176,6 +176,7 @@ async fn test_regenesis_old_blocks_are_preserved() -> anyhow::Result<()> { .expect("The block and all related data should migrate"); } + core.kill().await; Ok(()) } @@ -267,6 +268,7 @@ async fn test_regenesis_spent_messages_are_preserved() -> anyhow::Result<()> { .expect("Failed to get message status"); assert_eq!(status, MessageStatus::Spent); + core.kill().await; Ok(()) } @@ -334,6 +336,7 @@ async fn test_regenesis_processed_transactions_are_preserved() -> anyhow::Result "Unexpected message {reason:?}" ); + core.kill().await; Ok(()) } @@ -479,6 +482,7 @@ async fn test_regenesis_message_proofs_are_preserved() -> anyhow::Result<()> { )); } + core.kill().await; Ok(()) } @@ -540,6 +544,7 @@ async fn starting_node_with_same_chain_config_keeps_genesis() -> anyhow::Result< .consensus; assert_eq!(original_consensus, non_modified_consensus); + core.kill().await; Ok(()) } @@ -601,6 +606,7 @@ async fn starting_node_with_new_chain_config_updates_genesis() -> anyhow::Result .consensus; assert_ne!(original_consensus, modified_consensus); + core.kill().await; Ok(()) } @@ -689,6 +695,7 @@ async fn starting_node_with_overwritten_old_poa_key_doesnt_rollback_the_state( .height; assert_eq!(original_block_height, block_height_after_override); + core.kill().await; Ok(()) } @@ -730,6 +737,7 @@ async fn starting_empty_node_with_overwritten_poa_works() -> anyhow::Result<()> let core = result.expect("Failed to start the node"); produce_block_with_tx(&mut rng, &core.client).await; + core.kill().await; Ok(()) } @@ -812,7 +820,7 @@ async fn starting_node_with_overwritten_new_poa_key_rollbacks_the_state( .height; assert_ne!(original_block_height, block_height_after_override); assert_eq!(override_height - 1, block_height_after_override); - + core.kill().await; Ok(()) } @@ -895,5 +903,6 @@ async fn starting_node_with_overwritten_new_poa_key_from_the_future_doesnt_rollb .height; assert_eq!(original_block_height, block_height_after_override); + core.kill().await; Ok(()) } diff --git a/tests/tests/state_rewind.rs b/tests/tests/state_rewind.rs index 3c8a7af810b..3aea2950050 100644 --- a/tests/tests/state_rewind.rs +++ b/tests/tests/state_rewind.rs @@ -77,7 +77,7 @@ async fn validate_block_at_any_height__only_transfers() -> anyhow::Result<()> { let node = &driver.node; // Given - const TOTAL_BLOCKS: u64 = 5000; + const TOTAL_BLOCKS: u64 = 1000; const MIN_AMOUNT: u64 = 123456; let mut last_block_height = 0u32; let mut database_modifications = std::collections::HashMap::new(); @@ -114,6 +114,7 @@ async fn validate_block_at_any_height__only_transfers() -> anyhow::Result<()> { assert_eq!(&actual_changes, expected_changes); } + driver.kill().await; Ok(()) } @@ -211,6 +212,7 @@ async fn rollback_existing_chain_to_target_height_and_verify( .latest_height_from_metadata(); assert_eq!(Ok(Some(BlockHeight::new(target_height))), latest_height); + driver.kill().await; Ok(()) }