diff --git a/.env.dev b/.env.dev
index 390e34774..f48e6b915 100644
--- a/.env.dev
+++ b/.env.dev
@@ -16,7 +16,7 @@ GROVE_PORTAL_STAGING_ETH_MAINNET=https://eth-mainnet.rpc.grove.town
# The "protocol" field here instructs the Grove gateway which network to use
JSON_RPC_DATA_ETH_BLOCK_HEIGHT='{"protocol": "shannon-testnet","jsonrpc":"2.0","id":"0","method":"eth_blockNumber", "params": []}'
-# On-chain module account addresses. Search for `func TestModuleAddress` in the
+# Onchain module account addresses. Search for `func TestModuleAddress` in the
# codebase to get an understanding of how we got these values.
APPLICATION_MODULE_ADDRESS=pokt1rl3gjgzexmplmds3tq3r3yk84zlwdl6djzgsvm
SUPPLIER_MODULE_ADDRESS=pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index dba8e6fea..f387c62d7 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,26 +1,15 @@
-
-
## Summary
-
+< One line summary>
-## Issue
+Changes:
+- < Change 1 >
+- < Change 2 >
-
+## Issue
-- #{ISSUE_NUMBER}
+- Description: < Description >
+- Issue: #{ISSUE_NUMBER}
## Type of change
@@ -33,28 +22,11 @@ Select one or more from the following:
- [ ] Documentation
- [ ] Other (specify)
-## Testing
-
-
-
-- [ ] **Documentation**: `make docusaurus_start`; only needed if you make doc changes
-- [ ] **Unit Tests**: `make go_develop_and_test`
-- [ ] **LocalNet E2E Tests**: `make test_e2e`
-- [ ] **DevNet E2E Tests**: Add the `devnet-test-e2e` label to the PR.
-
## Sanity Checklist
-- [ ] I have tested my changes using the available tooling
-- [ ] I have commented my code
-- [ ] I have performed a self-review of my own code; both comments & source code
-- [ ] I create and reference any new tickets, if applicable
-- [ ] I have left TODOs throughout the codebase, if applicable
+- [ ] I have updated the GitHub Issue `assignees`, `reviewers`, `labels`, `project`, `iteration` and `milestone`
+- [ ] For docs, I have run `make docusaurus_start`
+- [ ] For code, I have run `make go_develop_and_test` and `make test_e2e`
+- [ ] For code, I have added the `devnet-test-e2e` label to run E2E tests in CI
+- [ ] For configurations, I have update the documentation
+- [ ] I added TODOs where applicable
diff --git a/.github/workflows-helpers/run-e2e-test-job-template.yaml b/.github/workflows-helpers/run-e2e-test-job-template.yaml
index 9db41ed01..ca03ca867 100644
--- a/.github/workflows-helpers/run-e2e-test-job-template.yaml
+++ b/.github/workflows-helpers/run-e2e-test-job-template.yaml
@@ -37,7 +37,7 @@ spec:
ls -l /root/.poktroll/keyring-test/ && \
poktrolld q gateway list-gateway --node=$POCKET_NODE && \
poktrolld q application list-application --node=$POCKET_NODE && \
- poktrolld q supplier list-supplier --node=$POCKET_NODE && \
+ poktrolld q supplier list-suppliers --node=$POCKET_NODE && \
make acc_initialize_pubkeys && \
go test -v ./e2e/tests/... -tags=e2e
env:
@@ -50,13 +50,13 @@ spec:
- name: POKTROLLD_HOME
value: /root/.poktroll
- name: PATH_URL
- value: http://${NAMESPACE}-path:3000/v1
+ value: http://${NAMESPACE}-path:3069/v1
# PATH relies on subdomains to get the requested service but our DevNet infra is not
# built to expose arbitrary subdomains and supporting it would be a significant effort.
# As a workaround, PATH_HOST_OVERRIDE is used as the host:port to connect to PATH while
# the subdomain is passed as a Host header in the request.
- name: PATH_HOST_OVERRIDE
- value: ${NAMESPACE}-path:3000
+ value: ${NAMESPACE}-path:3069
volumeMounts:
- mountPath: /root/.poktroll/keyring-test/
name: writable-keys-volume
diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml
index 8530c3d23..f9b3d03d7 100644
--- a/.github/workflows/main-build.yml
+++ b/.github/workflows/main-build.yml
@@ -13,7 +13,7 @@ jobs:
build-push-container:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 06ffee0fc..5fb703abc 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -13,7 +13,7 @@ jobs:
release-artifacts:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147
@@ -62,7 +62,7 @@ jobs:
type=sha,format=long,suffix=-prod
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -82,7 +82,7 @@ jobs:
# TODO_TECHDEBT(@okdas): use for releases (also change the "on" part at the top so it only tgirrered for tags/releases)
- name: Add release and publish binaries
- uses: softprops/action-gh-release@v1
+ uses: softprops/action-gh-release@v2
with:
files: |
release/*
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 4295cfb1d..9dbd1750c 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -67,7 +67,7 @@ jobs:
# Ensure that we are using polylog (via logger.) instead of the golang's stdlib
# log package.
check_stdlog_in_off_chain_package:
- name: Check stdlog in off-chain source code
+ name: Check stdlog in offchain source code
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 70d66edb9..454e59202 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -17,7 +17,7 @@ jobs:
go-test:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147
diff --git a/.github/workflows/upload-pages-artifact.yml b/.github/workflows/upload-pages-artifact.yml
index 6caa01401..503b42123 100644
--- a/.github/workflows/upload-pages-artifact.yml
+++ b/.github/workflows/upload-pages-artifact.yml
@@ -16,7 +16,7 @@ jobs:
update-docs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147
@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
@@ -63,7 +63,7 @@ jobs:
pages: write
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
@@ -83,15 +83,15 @@ jobs:
yarn build
- name: Setup Pages
- uses: actions/configure-pages@v4
+ uses: actions/configure-pages@v5
with:
enablement: true
- name: Upload artifact
- uses: actions/upload-pages-artifact@v2
+ uses: actions/upload-pages-artifact@v3
with:
path: docusaurus/build
- name: Deploy to GitHub Pages
id: deployment
- uses: actions/deploy-pages@v2
+ uses: actions/deploy-pages@v4
diff --git a/Dockerfile.release b/Dockerfile.release
index 35d2a659c..efd5d2f44 100644
--- a/Dockerfile.release
+++ b/Dockerfile.release
@@ -8,7 +8,6 @@ RUN apt-get update && \
apt-get install -y --no-install-recommends ca-certificates && \
rm -rf /var/lib/apt/lists/*
-
# Use `1025` G/UID so users can switch between this and `heighliner` image without a need to chown the files.
RUN groupadd -g 1025 pocket && useradd -u 1025 -g pocket -m -s /sbin/nologin pocket
diff --git a/Makefile b/Makefile
index 200d37f06..6930de829 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@ GROVE_PORTAL_STAGING_ETH_MAINNET = https://eth-mainnet.rpc.grove.town
# JSON RPC data for a test relay request
JSON_RPC_DATA_ETH_BLOCK_HEIGHT = '{"jsonrpc":"2.0","id":"0","method":"eth_blockNumber", "params": []}'
-# On-chain module account addresses. Search for `func TestModuleAddress` in the
+# Onchain module account addresses. Search for `func TestModuleAddress` in the
# codebase to get an understanding of how we got these values.
APPLICATION_MODULE_ADDRESS = pokt1rl3gjgzexmplmds3tq3r3yk84zlwdl6djzgsvm
SUPPLIER_MODULE_ADDRESS = pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa
diff --git a/Tiltfile b/Tiltfile
index 4c1d74b1c..7a9af482d 100644
--- a/Tiltfile
+++ b/Tiltfile
@@ -320,6 +320,9 @@ for x in range(localnet_config["path_gateways"]["count"]):
"--set=metrics.serviceMonitor.enabled=" + str(localnet_config["observability"]["enabled"]),
"--set=path.mountConfigMaps[0].name=path-config-" + str(actor_number),
"--set=path.mountConfigMaps[0].mountPath=/app/config/",
+ "--set=fullnameOverride=path" + str(actor_number),
+ "--set=nameOverride=path" + str(actor_number),
+ "--set=global.serviceAccount.name=path" + str(actor_number),
]
if localnet_config["path_local_repo"]["enabled"]:
@@ -360,7 +363,9 @@ for x in range(localnet_config["path_gateways"]["count"]):
# ],
# TODO_IMPROVE(@okdas): Add port forwards to grafana, pprof, like the other resources
port_forwards=[
- str(2999 + actor_number) + ":3000"
+ # See PATH for the default port used by the gateway. As of PR #1026, it is :3069.
+ # https://github.com/buildwithgrove/path/blob/main/config/router.go
+ str(2999 + actor_number) + ":3069"
],
)
diff --git a/api/poktroll/application/types.pulsar.go b/api/poktroll/application/types.pulsar.go
index f482e77bb..21eb04ac4 100644
--- a/api/poktroll/application/types.pulsar.go
+++ b/api/poktroll/application/types.pulsar.go
@@ -2182,35 +2182,37 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// Application defines the type used to store an on-chain definition and state for an application
+// Application represents the on-chain definition and state of an application
type Application struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the application.
- Stake *v1beta1.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"` // The total amount of uPOKT the application has staked
- // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE.
+ // Bech32 address of the application
+ Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ // Total amount of staked uPOKT
+ Stake *v1beta1.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"`
+ // CRITICAL: Must contain EXACTLY ONE service config
// This prevents applications from over-servicing.
- // The field is kept repeated (a list) for both legacy and future logic reaosns.
- // References:
+ // Kept as repeated field for legacy and future compatibility
+ // Refs:
// - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033
// - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7
- ServiceConfigs []*shared.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"` // The list of services this appliccation is configured to request service for
+ ServiceConfigs []*shared.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"`
// TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`.
// Ensure to rename all relevant configs, comments, variables, function names, etc as well.
- DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"` // The Bech32 encoded addresses for all delegatee Gateways, in a non-nullable slice
- // A map from sessionEndHeights to a list of Gateways.
- // The key is the height of the last block of the session during which the
- // respective undelegation was committed.
- // The value is a list of gateways being undelegated from.
+ // Non-nullable list of Bech32 encoded delegatee Gateway addresses
+ DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"`
+ // Mapping of session end heights to gateways being undelegated from
+ // - Key: Height of the last block of the session when undelegation tx was committed
+ // - Value: List of gateways being undelegated from
// TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment
// so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906.
PendingUndelegations map[uint64]*UndelegatingGatewayList `protobuf:"bytes,5,rep,name=pending_undelegations,json=pendingUndelegations,proto3" json:"pending_undelegations,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // The end height of the session at which an application initiated its unstaking process.
- // If the application did not unstake, this value will be 0.
- UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
- PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"`
+ // Session end height when application initiated unstaking (0 if not unstaking)
+ UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
+ // Information about pending application transfers
+ PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"`
}
func (x *Application) Reset() {
diff --git a/api/poktroll/proof/params.pulsar.go b/api/poktroll/proof/params.pulsar.go
index 47fb7a9c7..896c46242 100644
--- a/api/poktroll/proof/params.pulsar.go
+++ b/api/poktroll/proof/params.pulsar.go
@@ -687,7 +687,7 @@ type Params struct {
ProofMissingPenalty *v1beta1.Coin `protobuf:"bytes,4,opt,name=proof_missing_penalty,json=proofMissingPenalty,proto3" json:"proof_missing_penalty,omitempty"`
// proof_submission_fee is the number of tokens (uPOKT) which should be paid by
// the supplier operator when submitting a proof.
- // This is needed to account for the cost of storing proofs on-chain and prevent
+ // This is needed to account for the cost of storing proofs onchain and prevent
// spamming (i.e. sybil bloat attacks) the network with non-required proofs.
// TODO_MAINNET: Consider renaming this to `proof_submission_fee_upokt`.
ProofSubmissionFee *v1beta1.Coin `protobuf:"bytes,5,opt,name=proof_submission_fee,json=proofSubmissionFee,proto3" json:"proof_submission_fee,omitempty"`
diff --git a/api/poktroll/proof/types.pulsar.go b/api/poktroll/proof/types.pulsar.go
index cace1d1e4..ac6d5b248 100644
--- a/api/poktroll/proof/types.pulsar.go
+++ b/api/poktroll/proof/types.pulsar.go
@@ -1317,7 +1317,7 @@ func (x *Proof) GetClosestMerkleProof() []byte {
return nil
}
-// Claim is the serialized object stored on-chain for claims pending to be proven
+// Claim is the serialized object stored onchain for claims pending to be proven
type Claim struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
diff --git a/api/poktroll/service/params.pulsar.go b/api/poktroll/service/params.pulsar.go
index 6b180a66a..c7f354952 100644
--- a/api/poktroll/service/params.pulsar.go
+++ b/api/poktroll/service/params.pulsar.go
@@ -522,7 +522,7 @@ type Params struct {
// and transferred to the pocket network foundation.
AddServiceFee *v1beta1.Coin `protobuf:"bytes,1,opt,name=add_service_fee,json=addServiceFee,proto3" json:"add_service_fee,omitempty"`
// target_num_relays is the target for the EMA of the number of relays per session.
- // Per service, on-chain relay mining difficulty will be adjusted to maintain this target.
+ // Per service, onchain relay mining difficulty will be adjusted to maintain this target.
TargetNumRelays uint64 `protobuf:"varint,2,opt,name=target_num_relays,json=targetNumRelays,proto3" json:"target_num_relays,omitempty"`
}
diff --git a/api/poktroll/service/relay.pulsar.go b/api/poktroll/service/relay.pulsar.go
index dca789df5..46491af29 100644
--- a/api/poktroll/service/relay.pulsar.go
+++ b/api/poktroll/service/relay.pulsar.go
@@ -2667,7 +2667,7 @@ type RelayRequestMetadata struct {
// application has delegated to. The signature is made using the ring of the
// application in both cases.
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
- // TODO_MAINNET: make sure we're checking/verifying this address on-chain (if needed).
+ // TODO_MAINNET: make sure we're checking/verifying this address onchain (if needed).
// Relevant conversation: https://github.com/pokt-network/poktroll/pull/567#discussion_r1628722168
//
// The supplier operator address the relay is sent to. It is being used on the
diff --git a/api/poktroll/service/relay_mining_difficulty.pulsar.go b/api/poktroll/service/relay_mining_difficulty.pulsar.go
index 10e3920a7..71cecc89f 100644
--- a/api/poktroll/service/relay_mining_difficulty.pulsar.go
+++ b/api/poktroll/service/relay_mining_difficulty.pulsar.go
@@ -608,7 +608,7 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// RelayMiningDifficulty is a message used to store the on-chain Relay Mining
+// RelayMiningDifficulty is a message used to store the onchain Relay Mining
// difficulty associated with a specific service ID.
// TODO_TECHDEBT: Embed this message in the Service message.
type RelayMiningDifficulty struct {
diff --git a/api/poktroll/session/types.pulsar.go b/api/poktroll/session/types.pulsar.go
index 0a9c92f0b..b7e886b44 100644
--- a/api/poktroll/session/types.pulsar.go
+++ b/api/poktroll/session/types.pulsar.go
@@ -1498,11 +1498,11 @@ type SessionHeader struct {
ApplicationAddress string `protobuf:"bytes,1,opt,name=application_address,json=applicationAddress,proto3" json:"application_address,omitempty"` // The Bech32 address of the application.
ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` // The service id this session is for
- // NOTE: session_id can be derived from the above values using on-chain but is included in the header for convenience
+ // NOTE: session_id can be derived from the above values using onchain but is included in the header for convenience
SessionId string `protobuf:"bytes,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` // A unique pseudoranom ID for this session
SessionStartBlockHeight int64 `protobuf:"varint,4,opt,name=session_start_block_height,json=sessionStartBlockHeight,proto3" json:"session_start_block_height,omitempty"` // The height at which this session started
// Note that`session_end_block_height` is a derivative of (`start` + `num_blocks_per_session`)
- // as goverened by on-chain params at the time of the session start.
+ // as goverened by onchain params at the time of the session start.
// It is stored as an additional field to simplofy business logic in case
// the number of blocks_per_session changes during the session.
SessionEndBlockHeight int64 `protobuf:"varint,5,opt,name=session_end_block_height,json=sessionEndBlockHeight,proto3" json:"session_end_block_height,omitempty"` // The height at which this session ended, this is the last block of the session
diff --git a/api/poktroll/shared/params.pulsar.go b/api/poktroll/shared/params.pulsar.go
index c567709c1..e0f788169 100644
--- a/api/poktroll/shared/params.pulsar.go
+++ b/api/poktroll/shared/params.pulsar.go
@@ -842,12 +842,12 @@ type Params struct {
ProofWindowCloseOffsetBlocks uint64 `protobuf:"varint,6,opt,name=proof_window_close_offset_blocks,json=proofWindowCloseOffsetBlocks,proto3" json:"proof_window_close_offset_blocks,omitempty"`
// supplier_unbonding_period_sessions is the number of sessions that a supplier must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the unbonding
+ // Onchain business logic requires, and ensures, that the corresponding block count of the unbonding
// period will exceed the end of any active claim & proof lifecycles.
SupplierUnbondingPeriodSessions uint64 `protobuf:"varint,7,opt,name=supplier_unbonding_period_sessions,json=supplierUnbondingPeriodSessions,proto3" json:"supplier_unbonding_period_sessions,omitempty"`
// application_unbonding_period_sessions is the number of sessions that an application must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the
+ // Onchain business logic requires, and ensures, that the corresponding block count of the
// application unbonding period will exceed the end of its corresponding proof window close height.
ApplicationUnbondingPeriodSessions uint64 `protobuf:"varint,8,opt,name=application_unbonding_period_sessions,json=applicationUnbondingPeriodSessions,proto3" json:"application_unbonding_period_sessions,omitempty"`
// The amount of upokt that a compute unit should translate to when settling a session.
diff --git a/api/poktroll/shared/service.pulsar.go b/api/poktroll/shared/service.pulsar.go
index 60a1e6c85..a923eda0b 100644
--- a/api/poktroll/shared/service.pulsar.go
+++ b/api/poktroll/shared/service.pulsar.go
@@ -2,7 +2,6 @@
package shared
import (
- binary "encoding/binary"
fmt "fmt"
_ "github.com/cosmos/cosmos-proto"
runtime "github.com/cosmos/cosmos-proto/runtime"
@@ -11,7 +10,6 @@ import (
protoiface "google.golang.org/protobuf/runtime/protoiface"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
- math "math"
reflect "reflect"
sync "sync"
)
@@ -2418,8 +2416,8 @@ func (x *fastReflection_ServiceRevenueShare) Range(f func(protoreflect.FieldDesc
return
}
}
- if x.RevSharePercentage != float32(0) || math.Signbit(float64(x.RevSharePercentage)) {
- value := protoreflect.ValueOfFloat32(x.RevSharePercentage)
+ if x.RevSharePercentage != uint64(0) {
+ value := protoreflect.ValueOfUint64(x.RevSharePercentage)
if !f(fd_ServiceRevenueShare_rev_share_percentage, value) {
return
}
@@ -2442,7 +2440,7 @@ func (x *fastReflection_ServiceRevenueShare) Has(fd protoreflect.FieldDescriptor
case "poktroll.shared.ServiceRevenueShare.address":
return x.Address != ""
case "poktroll.shared.ServiceRevenueShare.rev_share_percentage":
- return x.RevSharePercentage != float32(0) || math.Signbit(float64(x.RevSharePercentage))
+ return x.RevSharePercentage != uint64(0)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare"))
@@ -2462,7 +2460,7 @@ func (x *fastReflection_ServiceRevenueShare) Clear(fd protoreflect.FieldDescript
case "poktroll.shared.ServiceRevenueShare.address":
x.Address = ""
case "poktroll.shared.ServiceRevenueShare.rev_share_percentage":
- x.RevSharePercentage = float32(0)
+ x.RevSharePercentage = uint64(0)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare"))
@@ -2484,7 +2482,7 @@ func (x *fastReflection_ServiceRevenueShare) Get(descriptor protoreflect.FieldDe
return protoreflect.ValueOfString(value)
case "poktroll.shared.ServiceRevenueShare.rev_share_percentage":
value := x.RevSharePercentage
- return protoreflect.ValueOfFloat32(value)
+ return protoreflect.ValueOfUint64(value)
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare"))
@@ -2508,7 +2506,7 @@ func (x *fastReflection_ServiceRevenueShare) Set(fd protoreflect.FieldDescriptor
case "poktroll.shared.ServiceRevenueShare.address":
x.Address = value.Interface().(string)
case "poktroll.shared.ServiceRevenueShare.rev_share_percentage":
- x.RevSharePercentage = float32(value.Float())
+ x.RevSharePercentage = value.Uint()
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare"))
@@ -2549,7 +2547,7 @@ func (x *fastReflection_ServiceRevenueShare) NewField(fd protoreflect.FieldDescr
case "poktroll.shared.ServiceRevenueShare.address":
return protoreflect.ValueOfString("")
case "poktroll.shared.ServiceRevenueShare.rev_share_percentage":
- return protoreflect.ValueOfFloat32(float32(0))
+ return protoreflect.ValueOfUint64(uint64(0))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare"))
@@ -2623,8 +2621,8 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods
if l > 0 {
n += 1 + l + runtime.Sov(uint64(l))
}
- if x.RevSharePercentage != 0 || math.Signbit(float64(x.RevSharePercentage)) {
- n += 5
+ if x.RevSharePercentage != 0 {
+ n += 1 + runtime.Sov(uint64(x.RevSharePercentage))
}
if x.unknownFields != nil {
n += len(x.unknownFields)
@@ -2655,11 +2653,10 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods
i -= len(x.unknownFields)
copy(dAtA[i:], x.unknownFields)
}
- if x.RevSharePercentage != 0 || math.Signbit(float64(x.RevSharePercentage)) {
- i -= 4
- binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(x.RevSharePercentage))))
+ if x.RevSharePercentage != 0 {
+ i = runtime.EncodeVarint(dAtA, i, uint64(x.RevSharePercentage))
i--
- dAtA[i] = 0x15
+ dAtA[i] = 0x18
}
if len(x.Address) > 0 {
i -= len(x.Address)
@@ -2749,17 +2746,25 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods
}
x.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
- if wireType != 5 {
+ case 3:
+ if wireType != 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RevSharePercentage", wireType)
}
- var v uint32
- if (iNdEx + 4) > l {
- return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
+ x.RevSharePercentage = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ x.RevSharePercentage |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- x.RevSharePercentage = float32(math.Float32frombits(v))
default:
iNdEx = preIndex
skippy, err := runtime.Skip(dAtA[iNdEx:])
@@ -3336,7 +3341,7 @@ func (RPCType) EnumDescriptor() ([]byte, []int) {
}
// Enum to define configuration options
-// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing on-chain QoS.
+// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing onchain QoS.
type ConfigOptions int32
const (
@@ -3398,7 +3403,7 @@ type Service struct {
// cost_per_relay_for_specific_service = compute_units_per_relay_for_specific_service * compute_units_to_tokens_multipler_global_value
ComputeUnitsPerRelay uint64 `protobuf:"varint,3,opt,name=compute_units_per_relay,json=computeUnitsPerRelay,proto3" json:"compute_units_per_relay,omitempty"` // Compute units required per relay for this service
// The owner address that created the service.
- // It is the address that receives rewards based on the Service's on-chain usage
+ // It is the address that receives rewards based on the Service's onchain usage
// It is the only address that can update the service configuration (e.g. compute_units_per_relay),
// or make other updates to it.
OwnerAddress string `protobuf:"bytes,4,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"` // The Bech32 address of the service owner / creator
@@ -3598,8 +3603,8 @@ type ServiceRevenueShare struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the revenue share recipient
- RevSharePercentage float32 `protobuf:"fixed32,2,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` // The percentage of revenue share the recipient will receive
+ Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the revenue share recipient
+ RevSharePercentage uint64 `protobuf:"varint,3,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` // The percentage of revenue share the recipient will receive
}
func (x *ServiceRevenueShare) Reset() {
@@ -3629,7 +3634,7 @@ func (x *ServiceRevenueShare) GetAddress() string {
return ""
}
-func (x *ServiceRevenueShare) GetRevSharePercentage() float32 {
+func (x *ServiceRevenueShare) GetRevSharePercentage() uint64 {
if x != nil {
return x.RevSharePercentage
}
@@ -3724,39 +3729,39 @@ var file_poktroll_shared_service_proto_rawDesc = []byte{
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70,
0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x73, 0x22, 0x7b, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
- 0x65, 0x76, 0x65, 0x6e, 0x75, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x61,
- 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4,
- 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
- 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
- 0x30, 0x0a, 0x14, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72,
- 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x72,
- 0x65, 0x76, 0x53, 0x68, 0x61, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67,
- 0x65, 0x22, 0x56, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x30, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e,
- 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64,
- 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, 0x4b, 0x0a, 0x07, 0x52, 0x50, 0x43,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f,
- 0x52, 0x50, 0x43, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12,
- 0x0d, 0x0a, 0x09, 0x57, 0x45, 0x42, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0c,
- 0x0a, 0x08, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
- 0x52, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x30, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
- 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54,
- 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x42, 0xa6, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a,
- 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68,
- 0x61, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x20, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e,
- 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f,
- 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa, 0x02, 0x0f, 0x50,
- 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0xca, 0x02,
- 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
- 0xe2, 0x02, 0x1b, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72,
- 0x65, 0x64, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
- 0x10, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x53, 0x68, 0x61, 0x72, 0x65,
- 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x66, 0x69, 0x67, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x52, 0x65, 0x76, 0x65, 0x6e, 0x75, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x12, 0x32, 0x0a, 0x07,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2,
+ 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x70, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12,
+ 0x72, 0x65, 0x76, 0x53, 0x68, 0x61, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61,
+ 0x67, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x56, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2a, 0x4b, 0x0a, 0x07, 0x52, 0x50, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04,
+ 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x57, 0x45, 0x42, 0x53, 0x4f, 0x43,
+ 0x4b, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x50,
+ 0x43, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x30, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12,
+ 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47,
+ 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x42,
+ 0xa6, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74,
+ 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x20, 0x63, 0x6f, 0x73,
+ 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f,
+ 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0xa2, 0x02, 0x03,
+ 0x50, 0x53, 0x58, 0xaa, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0xca, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0xe2, 0x02, 0x1b, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f,
+ 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x3a, 0x3a, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/api/poktroll/shared/supplier.pulsar.go b/api/poktroll/shared/supplier.pulsar.go
index 367169513..8ac084889 100644
--- a/api/poktroll/shared/supplier.pulsar.go
+++ b/api/poktroll/shared/supplier.pulsar.go
@@ -1066,29 +1066,30 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// Supplier is the type defining the actor in Pocket Network that provides RPC services.
+// Supplier represents an actor in Pocket Network that provides RPC services
type Supplier struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The address of the owner (i.e. staker, custodial) that owns the funds for staking.
- // By default, this address is the one that receives all the rewards unless owtherwise specified.
- // This property cannot be updated by the operator.
- OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"` // Bech32 cosmos address
- // The operator address of the supplier operator (i.e. the one managing the off-chain server).
- // The operator address can update the supplier's configurations excluding the owner address.
- // This property does not change over the supplier's lifespan, the supplier must be unstaked
- // and re-staked to effectively update this value.
- OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` // Bech32 cosmos address
- Stake *v1beta1.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"` // The total amount of uPOKT the supplier has staked
- Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"` // The service configs this supplier can support
- // The session end height at which an actively unbonding supplier unbonds its stake.
- // If the supplier did not unstake, this value will be 0.
+ // Owner address that controls the staked funds and receives rewards by default
+ // Cannot be updated by the operator
+ OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"`
+ // Operator address managing the offchain server
+ // Immutable for supplier's lifespan - requires unstake/re-stake to change.
+ // Can update supplier configs except for owner address.
+ OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"`
+ // Total amount of staked uPOKT
+ Stake *v1beta1.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"`
+ // List of service configurations supported by this supplier
+ Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"`
+ // Session end height when supplier initiated unstaking (0 if not unstaking)
UnstakeSessionEndHeight uint64 `protobuf:"varint,5,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
- // services_activation_heights_map is a map of serviceIds to the height at
- // which the staked supplier will become active for that service.
- // Activation heights are session start heights.
+ // Mapping of serviceIds to their activation heights
+ // - Key: serviceId
+ // - Value: Session start height when supplier becomes active for the service
+ // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf
+ // because maps are no longer supported for serialized types in the CosmoSDK.
ServicesActivationHeightsMap map[string]uint64 `protobuf:"bytes,6,rep,name=services_activation_heights_map,json=servicesActivationHeightsMap,proto3" json:"services_activation_heights_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
diff --git a/api/poktroll/supplier/event.pulsar.go b/api/poktroll/supplier/event.pulsar.go
index 22dcc4989..41941710f 100644
--- a/api/poktroll/supplier/event.pulsar.go
+++ b/api/poktroll/supplier/event.pulsar.go
@@ -2250,7 +2250,7 @@ func (SupplierUnbondingReason) EnumDescriptor() ([]byte, []int) {
return file_poktroll_supplier_event_proto_rawDescGZIP(), []int{0}
}
-// EventSupplierStaked is emitted when a supplier stake message is committed on-chain.
+// EventSupplierStaked is emitted when a supplier stake message is committed onchain.
type EventSupplierStaked struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2296,7 +2296,7 @@ func (x *EventSupplierStaked) GetSessionEndHeight() int64 {
}
// EventSupplierUnbondingBegin is emitted when an application unstake message
-// is committed on-chain, indicating that the supplier will now begin unbonding.
+// is committed onchain, indicating that the supplier will now begin unbonding.
type EventSupplierUnbondingBegin struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
diff --git a/api/poktroll/supplier/query.pulsar.go b/api/poktroll/supplier/query.pulsar.go
index d1f087c62..392f4aa39 100644
--- a/api/poktroll/supplier/query.pulsar.go
+++ b/api/poktroll/supplier/query.pulsar.go
@@ -1668,12 +1668,14 @@ func (x *fastReflection_QueryGetSupplierResponse) ProtoMethods() *protoiface.Met
var (
md_QueryAllSuppliersRequest protoreflect.MessageDescriptor
fd_QueryAllSuppliersRequest_pagination protoreflect.FieldDescriptor
+ fd_QueryAllSuppliersRequest_service_id protoreflect.FieldDescriptor
)
func init() {
file_poktroll_supplier_query_proto_init()
md_QueryAllSuppliersRequest = File_poktroll_supplier_query_proto.Messages().ByName("QueryAllSuppliersRequest")
fd_QueryAllSuppliersRequest_pagination = md_QueryAllSuppliersRequest.Fields().ByName("pagination")
+ fd_QueryAllSuppliersRequest_service_id = md_QueryAllSuppliersRequest.Fields().ByName("service_id")
}
var _ protoreflect.Message = (*fastReflection_QueryAllSuppliersRequest)(nil)
@@ -1747,6 +1749,16 @@ func (x *fastReflection_QueryAllSuppliersRequest) Range(f func(protoreflect.Fiel
return
}
}
+ if x.Filter != nil {
+ switch o := x.Filter.(type) {
+ case *QueryAllSuppliersRequest_ServiceId:
+ v := o.ServiceId
+ value := protoreflect.ValueOfString(v)
+ if !f(fd_QueryAllSuppliersRequest_service_id, value) {
+ return
+ }
+ }
+ }
}
// Has reports whether a field is populated.
@@ -1764,6 +1776,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) Has(fd protoreflect.FieldDescr
switch fd.FullName() {
case "poktroll.supplier.QueryAllSuppliersRequest.pagination":
return x.Pagination != nil
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ if x.Filter == nil {
+ return false
+ } else if _, ok := x.Filter.(*QueryAllSuppliersRequest_ServiceId); ok {
+ return true
+ } else {
+ return false
+ }
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1782,6 +1802,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) Clear(fd protoreflect.FieldDes
switch fd.FullName() {
case "poktroll.supplier.QueryAllSuppliersRequest.pagination":
x.Pagination = nil
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ x.Filter = nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1801,6 +1823,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) Get(descriptor protoreflect.Fi
case "poktroll.supplier.QueryAllSuppliersRequest.pagination":
value := x.Pagination
return protoreflect.ValueOfMessage(value.ProtoReflect())
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ if x.Filter == nil {
+ return protoreflect.ValueOfString("")
+ } else if v, ok := x.Filter.(*QueryAllSuppliersRequest_ServiceId); ok {
+ return protoreflect.ValueOfString(v.ServiceId)
+ } else {
+ return protoreflect.ValueOfString("")
+ }
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1823,6 +1853,9 @@ func (x *fastReflection_QueryAllSuppliersRequest) Set(fd protoreflect.FieldDescr
switch fd.FullName() {
case "poktroll.supplier.QueryAllSuppliersRequest.pagination":
x.Pagination = value.Message().Interface().(*v1beta1.PageRequest)
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ cv := value.Interface().(string)
+ x.Filter = &QueryAllSuppliersRequest_ServiceId{ServiceId: cv}
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1848,6 +1881,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) Mutable(fd protoreflect.FieldD
x.Pagination = new(v1beta1.PageRequest)
}
return protoreflect.ValueOfMessage(x.Pagination.ProtoReflect())
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ panic(fmt.Errorf("field service_id of message poktroll.supplier.QueryAllSuppliersRequest is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1864,6 +1899,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) NewField(fd protoreflect.Field
case "poktroll.supplier.QueryAllSuppliersRequest.pagination":
m := new(v1beta1.PageRequest)
return protoreflect.ValueOfMessage(m.ProtoReflect())
+ case "poktroll.supplier.QueryAllSuppliersRequest.service_id":
+ return protoreflect.ValueOfString("")
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest"))
@@ -1877,6 +1914,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) NewField(fd protoreflect.Field
// It panics if the oneof descriptor does not belong to this message.
func (x *fastReflection_QueryAllSuppliersRequest) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
+ case "poktroll.supplier.QueryAllSuppliersRequest.filter":
+ if x.Filter == nil {
+ return nil
+ }
+ switch x.Filter.(type) {
+ case *QueryAllSuppliersRequest_ServiceId:
+ return x.Descriptor().Fields().ByName("service_id")
+ }
default:
panic(fmt.Errorf("%s is not a oneof field in poktroll.supplier.QueryAllSuppliersRequest", d.FullName()))
}
@@ -1937,6 +1982,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met
l = options.Size(x.Pagination)
n += 1 + l + runtime.Sov(uint64(l))
}
+ switch x := x.Filter.(type) {
+ case *QueryAllSuppliersRequest_ServiceId:
+ if x == nil {
+ break
+ }
+ l = len(x.ServiceId)
+ n += 1 + l + runtime.Sov(uint64(l))
+ }
if x.unknownFields != nil {
n += len(x.unknownFields)
}
@@ -1966,6 +2019,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met
i -= len(x.unknownFields)
copy(dAtA[i:], x.unknownFields)
}
+ switch x := x.Filter.(type) {
+ case *QueryAllSuppliersRequest_ServiceId:
+ i -= len(x.ServiceId)
+ copy(dAtA[i:], x.ServiceId)
+ i = runtime.EncodeVarint(dAtA, i, uint64(len(x.ServiceId)))
+ i--
+ dAtA[i] = 0x12
+ }
if x.Pagination != nil {
encoded, err := options.Marshal(x.Pagination)
if err != nil {
@@ -2065,6 +2126,38 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
+ }
+ if postIndex > l {
+ return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
+ }
+ x.Filter = &QueryAllSuppliersRequest_ServiceId{string(dAtA[iNdEx:postIndex])}
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := runtime.Skip(dAtA[iNdEx:])
@@ -2755,7 +2848,7 @@ type QueryGetSupplierRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- OperatorAddress string `protobuf:"bytes,1,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"`
+ OperatorAddress string `protobuf:"bytes,1,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` // TODO_TECHDEBT: Add the ability to query for a supplier by owner_id
}
func (x *QueryGetSupplierRequest) Reset() {
@@ -2826,6 +2919,10 @@ type QueryAllSuppliersRequest struct {
unknownFields protoimpl.UnknownFields
Pagination *v1beta1.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // Types that are assignable to Filter:
+ //
+ // *QueryAllSuppliersRequest_ServiceId
+ Filter isQueryAllSuppliersRequest_Filter `protobuf_oneof:"filter"`
}
func (x *QueryAllSuppliersRequest) Reset() {
@@ -2855,6 +2952,30 @@ func (x *QueryAllSuppliersRequest) GetPagination() *v1beta1.PageRequest {
return nil
}
+func (x *QueryAllSuppliersRequest) GetFilter() isQueryAllSuppliersRequest_Filter {
+ if x != nil {
+ return x.Filter
+ }
+ return nil
+}
+
+func (x *QueryAllSuppliersRequest) GetServiceId() string {
+ if x, ok := x.GetFilter().(*QueryAllSuppliersRequest_ServiceId); ok {
+ return x.ServiceId
+ }
+ return ""
+}
+
+type isQueryAllSuppliersRequest_Filter interface {
+ isQueryAllSuppliersRequest_Filter()
+}
+
+type QueryAllSuppliersRequest_ServiceId struct {
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3,oneof"` // unique service identifier to filter by
+}
+
+func (*QueryAllSuppliersRequest_ServiceId) isQueryAllSuppliersRequest_Filter() {}
+
type QueryAllSuppliersResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2937,65 +3058,67 @@ var file_poktroll_supplier_query_proto_rawDesc = []byte{
0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74,
0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70,
0x6c, 0x69, 0x65, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70,
- 0x6c, 0x69, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c,
- 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
- 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61,
- 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa1, 0x01, 0x0a, 0x19, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69,
- 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72,
- 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c,
- 0x69, 0x65, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c,
- 0x69, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73,
- 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65,
- 0x74, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0xda, 0x03, 0x0a,
- 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70,
- 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72,
- 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d,
- 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
- 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x12, 0xa8, 0x01, 0x0a, 0x08, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x2a, 0x2e,
+ 0x6c, 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c,
+ 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61,
+ 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70,
+ 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x19, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c,
+ 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e,
+ 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42,
+ 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12,
+ 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
+ 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61,
+ 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0xda, 0x03, 0x0a, 0x05, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x25, 0x2e,
0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65,
- 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69,
- 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74,
+ 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e,
+ 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70,
+ 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0xa8, 0x01, 0x0a,
+ 0x08, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x6b, 0x74,
0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75,
0x65, 0x72, 0x79, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x12, 0x3b,
- 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f,
- 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f,
- 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x6f, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x7d, 0x12, 0x9b, 0x01, 0x0a, 0x0c,
- 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x2e, 0x70,
- 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72,
- 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65,
- 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x6b, 0x74,
- 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75,
- 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12,
- 0x28, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70,
- 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72,
- 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0xb0, 0x01, 0xd8, 0xe2, 0x1e, 0x01,
- 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73,
- 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b,
- 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
- 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa,
- 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c,
- 0x69, 0x65, 0x72, 0xca, 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53,
- 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xe2, 0x02, 0x1d, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x5c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x3a, 0x3a, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x47,
+ 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x12, 0x3b, 0x2f, 0x70, 0x6f, 0x6b,
+ 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f,
+ 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x73, 0x75, 0x70, 0x70,
+ 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x7d, 0x12, 0x9b, 0x01, 0x0a, 0x0c, 0x41, 0x6c, 0x6c, 0x53,
+ 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41,
+ 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x2f, 0x70, 0x6f,
+ 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x73, 0x75, 0x70,
+ 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0xb0, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x15, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c,
+ 0x69, 0x65, 0x72, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70,
+ 0x70, 0x6c, 0x69, 0x65, 0x72, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa, 0x02, 0x11, 0x50, 0x6f,
+ 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xca,
+ 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x75, 0x70, 0x70, 0x6c,
+ 0x69, 0x65, 0x72, 0xe2, 0x02, 0x1d, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53,
+ 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a,
+ 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -3122,6 +3245,9 @@ func file_poktroll_supplier_query_proto_init() {
}
}
}
+ file_poktroll_supplier_query_proto_msgTypes[4].OneofWrappers = []interface{}{
+ (*QueryAllSuppliersRequest_ServiceId)(nil),
+ }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/api/poktroll/tokenomics/event.pulsar.go b/api/poktroll/tokenomics/event.pulsar.go
index af0c412dc..54b8df12e 100644
--- a/api/poktroll/tokenomics/event.pulsar.go
+++ b/api/poktroll/tokenomics/event.pulsar.go
@@ -3480,7 +3480,7 @@ func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) {
}
// EventClaimExpired is an event emitted during settlement whenever a claim requiring
-// an on-chain proof doesn't have one. The claim cannot be settled, leading to that work
+// an onchain proof doesn't have one. The claim cannot be settled, leading to that work
// never being rewarded.
type EventClaimExpired struct {
state protoimpl.MessageState
diff --git a/app/upgrades.go b/app/upgrades.go
index 0978f4322..df4543c99 100644
--- a/app/upgrades.go
+++ b/app/upgrades.go
@@ -13,6 +13,7 @@ import (
var allUpgrades = []upgrades.Upgrade{
upgrades.Upgrade_0_0_4,
upgrades.Upgrade_0_0_10,
+ upgrades.Upgrade_0_0_11,
}
// setUpgrades sets upgrade handlers for all upgrades and executes KVStore migration if an upgrade plan file exists.
@@ -27,7 +28,7 @@ func (app *App) setUpgrades() error {
}
// Reads the upgrade info from disk.
- // The previous binary is expected to have read the plan from on-chain and saved it locally.
+ // The previous binary is expected to have read the plan from onchain and saved it locally.
upgradePlan, err := app.Keepers.UpgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
return err
diff --git a/app/upgrades/historical.go b/app/upgrades/historical.go
index 2c0740652..35393ad02 100644
--- a/app/upgrades/historical.go
+++ b/app/upgrades/historical.go
@@ -17,6 +17,7 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ cosmostypes "github.com/cosmos/cosmos-sdk/types"
"github.com/pokt-network/poktroll/app/keepers"
)
@@ -29,6 +30,8 @@ func defaultUpgradeHandler(
configurator module.Configurator,
) upgradetypes.UpgradeHandler {
return func(ctx context.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) {
+ logger := cosmostypes.UnwrapSDKContext(ctx).Logger()
+ logger.Info("Starting the migration in defaultUpgradeHandler")
return mm.RunMigrations(ctx, configurator, vm)
}
}
@@ -87,3 +90,10 @@ var Upgrade_0_0_4 = Upgrade{
// No changes to the KVStore in this upgrade.
StoreUpgrades: storetypes.StoreUpgrades{},
}
+
+// Upgrade_0_0_9 is a small upgrade on TestNet.
+var Upgrade_0_0_9 = Upgrade{
+ PlanName: "v0.0.9",
+ CreateUpgradeHandler: defaultUpgradeHandler,
+ StoreUpgrades: storetypes.StoreUpgrades{},
+}
diff --git a/app/upgrades/types.go b/app/upgrades/types.go
index 637544cee..3bba73629 100644
--- a/app/upgrades/types.go
+++ b/app/upgrades/types.go
@@ -21,10 +21,10 @@ const (
)
// Upgrade represents a protocol upgrade in code.
-// Once a `MsgSoftwareUpgrade` is submitted on-chain, and `Upgrade.PlanName` matches the `Plan.Name`,
+// Once a `MsgSoftwareUpgrade` is submitted onchain, and `Upgrade.PlanName` matches the `Plan.Name`,
// the upgrade will be scheduled for execution at the corresponding height.
type Upgrade struct {
- // PlanName is a name an upgrade is matched to from the on-chain `upgradetypes.Plan`.
+ // PlanName is a name an upgrade is matched to from the onchain `upgradetypes.Plan`.
PlanName string
// CreateUpgradeHandler returns an upgrade handler that will be executed at the time of the upgrade.
diff --git a/app/upgrades/v0.0.11.go b/app/upgrades/v0.0.11.go
new file mode 100644
index 000000000..615feecef
--- /dev/null
+++ b/app/upgrades/v0.0.11.go
@@ -0,0 +1,98 @@
+package upgrades
+
+import (
+ "context"
+
+ storetypes "cosmossdk.io/store/types"
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ cosmosTypes "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/pokt-network/poktroll/app/keepers"
+ sessiontypes "github.com/pokt-network/poktroll/x/session/types"
+ tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types"
+)
+
+// Upgrade_0_0_11 is the upgrade handler for v0.0.11 Alpha TestNet upgrade
+// Beta TestNet was launched with v0.0.11, so this upgrade is exclusively for Alpha TestNet.
+// - Before: v0.0.10
+// - After: v0.0.11
+var Upgrade_0_0_11 = Upgrade{
+ PlanName: "v0.0.11",
+ CreateUpgradeHandler: func(mm *module.Manager,
+ keepers *keepers.Keepers,
+ configurator module.Configurator,
+ ) upgradetypes.UpgradeHandler {
+ // Adds new parameters using ignite's config.yml as a reference. Assuming we don't need any other parameters.
+ // https://github.com/pokt-network/poktroll/compare/v0.0.10...v0.0.11-rc
+ applyNewParameters := func(ctx context.Context) (err error) {
+ logger := cosmosTypes.UnwrapSDKContext(ctx).Logger()
+ logger.Info("Starting parameter updates for v0.0.11")
+
+ // Set num_suppliers_per_session to 15
+ // Validate with: `poktrolld q session params --node=https://testnet-validated-validator-rpc.poktroll.com/`
+ sessionParams := sessiontypes.Params{
+ NumSuppliersPerSession: uint64(15),
+ }
+
+ // ALL parameters must be present when setting params.
+ err = keepers.SessionKeeper.SetParams(ctx, sessionParams)
+ if err != nil {
+ logger.Error("Failed to set session params", "error", err)
+ return err
+ }
+ logger.Info("Successfully updated session params", "new_params", sessionParams)
+
+ // Set tokenomics params. The values are based on default values for LocalNet/Beta TestNet.
+ // Validate with: `poktrolld q tokenomics params --node=https://testnet-validated-validator-rpc.poktroll.com/`
+ tokenomicsParams := tokenomicstypes.Params{
+ MintAllocationPercentages: tokenomicstypes.MintAllocationPercentages{
+ Dao: 0.1,
+ Proposer: 0.05,
+ Supplier: 0.7,
+ SourceOwner: 0.15,
+ Application: 0.0,
+ },
+ DaoRewardAddress: AlphaTestNetPnfAddress,
+ }
+
+ // ALL parameters must be present when setting params.
+ err = keepers.TokenomicsKeeper.SetParams(ctx, tokenomicsParams)
+ if err != nil {
+ logger.Error("Failed to set tokenomics params", "error", err)
+ return err
+ }
+ logger.Info("Successfully updated tokenomics params", "new_params", tokenomicsParams)
+
+ return
+ }
+
+ // The diff shows that the only new authz authorization is for the `poktroll.session.MsgUpdateParam` message.
+ // However, this message is already authorized for the `pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t` address.
+ // See here: poktrolld q authz grants-by-granter pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t --node=https://shannon-testnet-grove-seed-rpc.alpha.poktroll.com
+ // If this upgrade would have been applied to other networks, we could have added a separate upgrade handler for each network.
+
+ // Returns the upgrade handler for v0.0.11
+ return func(ctx context.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) {
+ logger := cosmosTypes.UnwrapSDKContext(ctx).Logger()
+ logger.Info("Starting v0.0.11 upgrade handler")
+
+ err := applyNewParameters(ctx)
+ if err != nil {
+ logger.Error("Failed to apply new parameters", "error", err)
+ return vm, err
+ }
+
+ logger.Info("Running module migrations")
+ vm, err = mm.RunMigrations(ctx, configurator, vm)
+ if err != nil {
+ logger.Error("Failed to run migrations", "error", err)
+ return vm, err
+ }
+
+ logger.Info("Successfully completed v0.0.11 upgrade handler")
+ return vm, nil
+ }
+ },
+ // No changes to the KVStore in this upgrade.
+ StoreUpgrades: storetypes.StoreUpgrades{},
+}
diff --git a/cmd/poktrolld/cmd/config.go b/cmd/poktrolld/cmd/config.go
index ae9520a41..89536a6a1 100644
--- a/cmd/poktrolld/cmd/config.go
+++ b/cmd/poktrolld/cmd/config.go
@@ -21,12 +21,10 @@ type PoktrollAppConfig struct {
}
// poktrollAppConfigDefaults sets default values to render in `app.toml`.
-// Checkout `customAppConfigTemplate()` for additional information about each setting.
+// Checkout `customAppConfigTemplate()` for additional information about each config parameter.
func poktrollAppConfigDefaults() PoktrollAppConfig {
return PoktrollAppConfig{
- Telemetry: telemetry.PoktrollTelemetryConfig{
- CardinalityLevel: "medium",
- },
+ Telemetry: telemetry.DefaultConfig(),
}
}
@@ -104,7 +102,6 @@ func initCometBFTConfig() *cmtcfg.Config {
// return "", nil if no custom configuration is required for the application.
// TODO_MAINNET: Reconsider values - check `app.toml` for possible options.
func initAppConfig() (string, interface{}) {
- // The following code snippet is just for reference.
type CustomAppConfig struct {
serverconfig.Config `mapstructure:",squash"`
Poktroll PoktrollAppConfig `mapstructure:"poktroll"`
@@ -140,6 +137,7 @@ func initAppConfig() (string, interface{}) {
srvCfg.GRPC.Enable = true
srvCfg.GRPCWeb.Enable = true
+ // Create the custom config with both server and poktroll configs
customAppConfig := CustomAppConfig{
Config: *srvCfg,
Poktroll: poktrollAppConfigDefaults(),
diff --git a/config.yml b/config.yml
index 0554fcf5d..cbd3fbe25 100644
--- a/config.yml
+++ b/config.yml
@@ -104,6 +104,20 @@ validators:
instrumentation:
prometheus: true
log_level: "info"
+ # Increase the rpc and mempool max bytes to support large transactions.
+ # DEV_NOTE: These values were selected arbitrarily, but chosen to be large,
+ # as a result of load testing and seeing large proof messages during the
+ # Claim & Proof lifecycle.
+ rpc:
+ # Controls how large any single RPC request accepted by the CometBFT
+ # server (offchain) can be.
+ max_body_bytes: "100000000"
+ mempool:
+ # Control how big any single transaction accepted by the CometBFT server
+ # (offchain) can be.
+ # Since multiple messages are bundled into a single transaction,
+ # max_tx_bytes needs to be increased alongside max_txs_bytes as well.
+ max_tx_bytes: "100000000"
client:
chain-id: poktroll
@@ -215,7 +229,7 @@ genesis:
url: http://relayminer1:8545
rev_share:
- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj
- rev_share_percentage: "100"
+ rev_share_percentage: 100
- service_id: rest
endpoints:
- configs: []
@@ -223,7 +237,7 @@ genesis:
url: http://relayminer1:8545
rev_share:
- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj
- rev_share_percentage: "100"
+ rev_share_percentage: 100
- service_id: ollama
endpoints:
- configs: []
@@ -231,7 +245,7 @@ genesis:
url: http://relayminer1:8545
rev_share:
- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj
- rev_share_percentage: "100"
+ rev_share_percentage: 100
stake:
# NB: This value should be exactly 1upokt smaller than the value in
# `supplier1_stake_config.yaml` so that the stake command causes a state change.
diff --git a/docusaurus/docs/README.md b/docusaurus/docs/README.md
index f226dc1d4..1de7ee4d7 100644
--- a/docusaurus/docs/README.md
+++ b/docusaurus/docs/README.md
@@ -57,7 +57,7 @@ You can view the Shannon Roadmap on [Github](https://github.com/orgs/pokt-networ
## Quickstart
-The best way to get involved is by following the [quickstart instructions](./develop/developer_guide/quickstart.md).
+The best way to get involved is by following the [quickstart instructions](develop/developer_guide/walkthrough.md).
## Godoc
diff --git a/docusaurus/docs/develop/contributing/code_review_guidelines.md b/docusaurus/docs/develop/contributing/code_review_guidelines.md
index 287b5d5d7..41f892a97 100644
--- a/docusaurus/docs/develop/contributing/code_review_guidelines.md
+++ b/docusaurus/docs/develop/contributing/code_review_guidelines.md
@@ -3,7 +3,7 @@ sidebar_position: 3
title: Code Review Guidelines
---
-# Code Review Guidelines
+## Code Review Guidelines
:::note
This is a living document and will be updated as the ecosystem matures & grows.
diff --git a/docusaurus/docs/develop/developer_guide/adding_params.md b/docusaurus/docs/develop/developer_guide/adding_params.md
index df8bf2c7d..529d68db6 100644
--- a/docusaurus/docs/develop/developer_guide/adding_params.md
+++ b/docusaurus/docs/develop/developer_guide/adding_params.md
@@ -1,9 +1,9 @@
---
sidebar_position: 5
-title: Adding On-Chain Module Parameters
+title: Adding Onchain Module Parameters
---
-# Adding On-Chain Module Parameters
+# Adding Onchain Module Parameters
- [Step-by-Step Instructions](#step-by-step-instructions)
- [0. If the Module Doesn't Already Support a `MsgUpdateParam` Message](#0-if-the-module-doesnt-already-support-a-msgupdateparam-message)
@@ -37,7 +37,7 @@ title: Adding On-Chain Module Parameters
- [7.2 Create a new JSON File for the Individual Parameter Update](#72-create-a-new-json-file-for-the-individual-parameter-update)
- [7.3 Update the JSON File for Updating All Parameters for the Module](#73-update-the-json-file-for-updating-all-parameters-for-the-module)
-Adding a new on-chain module parameter involves multiple steps to ensure that the
+Adding a new onchain module parameter involves multiple steps to ensure that the
parameter is properly integrated into the system. This guide will walk you through
the process using a generic approach, illustrated by adding a parameter to the `proof` module.
diff --git a/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md b/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md
index 72da1f4f3..5b32f5cda 100644
--- a/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md
+++ b/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md
@@ -8,13 +8,15 @@ title: Chain Halt Troubleshooting
- [Understanding Chain Halts](#understanding-chain-halts)
- [Definition and Causes](#definition-and-causes)
- [Impact on Network](#impact-on-network)
-- [Troubleshooting Process](#troubleshooting-process)
+- [Troubleshooting `wrong Block.Header.AppHash`](#troubleshooting-wrong-blockheaderapphash)
- [Step 1: Identifying the Issue](#step-1-identifying-the-issue)
- [Step 2: Collecting Node Data](#step-2-collecting-node-data)
- [Step 3: Analyzing Discrepancies](#step-3-analyzing-discrepancies)
- [Step 4: Decoding and Interpreting Data](#step-4-decoding-and-interpreting-data)
- [Step 5: Comparing Records](#step-5-comparing-records)
- [Step 6: Investigation and Resolution](#step-6-investigation-and-resolution)
+- [Troubleshooting `wrong Block.Header.LastResultsHash`](#troubleshooting-wrong-blockheaderlastresultshash)
+- [Syncing from genesis](#syncing-from-genesis)
## Understanding Chain Halts
@@ -40,7 +42,7 @@ Chain halts can have severe consequences for the network:
Given these impacts, swift and effective troubleshooting is crucial to maintain network health and user trust.
-## Troubleshooting Process
+## Troubleshooting `wrong Block.Header.AppHash`
### Step 1: Identifying the Issue
@@ -94,3 +96,20 @@ Based on the identified discrepancies:
2. Develop a fix or patch to address the issue.
3. If necessary, initiate discussions with the validator community to reach social consensus on how to proceed.
4. Implement the agreed-upon solution and monitor the network closely during and after the fix.
+
+## Troubleshooting `wrong Block.Header.LastResultsHash`
+
+Errors like the following can occur from using the incorrect binary version at a certain height.
+
+```bash
+reactor validation error: wrong Block.Header.LastResultsHash.
+```
+
+The solution is to use the correct binary version to sync the full node at the correct height.
+
+Tools like [cosmosvisor](https://docs.cosmos.network/v0.45/run-node/cosmovisor.html) make it easier
+to sync a node from genesis by automatically using the appropriate binary for each range of block heights.
+
+## Syncing from genesis
+
+If you're encountering any of the errors mentioned above while trying to sync the historical blocks - make sure you're running the correct version of the binary in accordance with this table [Upgrade List](../../protocol/upgrades/upgrade_list.md).
diff --git a/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md b/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md
new file mode 100644
index 000000000..d1ca5a069
--- /dev/null
+++ b/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md
@@ -0,0 +1,196 @@
+---
+sidebar_position: 7
+title: Chain Halt Recovery
+---
+
+## Chain Halt Recovery
+
+This document describes how to recover from a chain halt.
+
+It assumes that the cause of the chain halt has been identified, and that the
+new release has been created and verified to function correctly.
+
+:::tip
+
+See [Chain Halt Troubleshooting](./chain_halt_troubleshooting.md) for more information on identifying the cause of a chain halt.
+
+:::
+
+- [Background](#background)
+- [Resolving halts during a network upgrade](#resolving-halts-during-a-network-upgrade)
+ - [Manual binary replacement (preferred)](#manual-binary-replacement-preferred)
+ - [Rollback, fork and upgrade](#rollback-fork-and-upgrade)
+ - [Troubleshooting](#troubleshooting)
+ - [Data rollback - retrieving snapshot at a specific height (step 5)](#data-rollback---retrieving-snapshot-at-a-specific-height-step-5)
+ - [Validator Isolation - risks (step 6)](#validator-isolation---risks-step-6)
+
+## Background
+
+Pocket network is built on top of `cosmos-sdk`, which utilizes the CometBFT consensus engine.
+Comet's Byzantine Fault Tolerant (BFT) consensus algorithm requires that **at least** 2/3 of Validators
+are online and voting for the same block to reach a consensus. In order to maintain liveness
+and avoid a chain-halt, we need the majority (> 2/3) of Validators to participate
+and use the same version of the software.
+
+## Resolving halts during a network upgrade
+
+If the halt is caused by the network upgrade, it is possible the solution can be as simple as
+skipping an upgrade (i.e. `unsafe-skip-upgrade`) and creating a new (fixed) upgrade.
+
+Read more about [upgrade contingency plans](../../protocol/upgrades/contigency_plans.md).
+
+### Manual binary replacement (preferred)
+
+:::note
+
+This is the preferred way of resolving consensus-breaking issues.
+
+**Significant side effect**: this breaks an ability to sync from genesis **without manual interventions**.
+For example, when a consensus-breaking issue occurs on a node that is synching from the first block, node operators need
+to manually replace the binary with the new one. There are efforts underway to mitigate this issue, including
+configuration for `cosmovisor` that could automate the process.
+
+
+
+:::
+
+Since the chain is not moving, **it is impossible** to issue an automatic upgrade with an upgrade plan. Instead,
+we need **social consensus** to manually replace the binary and get the chain moving.
+
+The steps to doing so are:
+
+1. Prepare and verify a new binary that addresses the consensus-breaking issue.
+2. Reach out to the community and validators so they can upgrade the binary manually.
+3. Update [the documentation](../../protocol/upgrades/upgrade_list.md) to include a range a height when the binary needs
+ to be replaced.
+
+:::warning
+
+TODO_MAINNET(@okdas):
+
+1. **For step 2**: Investigate if the CometBFT rounds/steps need to be aligned as in Morse chain halts. See [this ref](https://docs.cometbft.com/v1.0/spec/consensus/consensus).
+2. **For step 3**: Add `cosmovisor` documentation so its configured to automatically replace the binary when synching from genesis.
+
+:::
+
+```mermaid
+sequenceDiagram
+ participant DevTeam
+ participant Community
+ participant Validators
+ participant Documentation
+ participant Network
+
+ DevTeam->>DevTeam: 1. Prepare and verify new binary
+ DevTeam->>Community: 2. Announce new binary and instructions
+ DevTeam->>Validators: 2. Notify validators to upgrade manually
+ Validators->>Validators: 2. Manually replace the binary
+ Validators->>Network: 2. Restart nodes with new binary
+ DevTeam->>Documentation: 3. Update documentation (GitHub Release and Upgrade List to include instructions)
+ Validators-->>Network: Network resumes operation
+
+```
+
+### Rollback, fork and upgrade
+
+:::info
+
+These instructions are only relevant to Pocket Network's Shannon release.
+
+We do not currently use `x/gov` or on-chain voting for upgrades.
+Instead, all participants in our DAO vote on upgrades off-chain, and the Foundation
+executes transactions on their behalf.
+
+:::
+
+:::warning
+
+This should be avoided or more testing is required. In our tests, the full nodes were
+propagating the existing blocks signed by the Validators, making it hard to rollback.
+
+:::
+
+**Performing a rollback is analogous to forking the network at the older height.**
+
+However, if necessary, the instructions to follow are:
+
+1. Prepare & verify a new binary that addresses the consensus-breaking issue.
+2. [Create a release](../../protocol/upgrades/release_process.md).
+3. [Prepare an upgrade transaction](../../protocol/upgrades/upgrade_procedure.md#writing-an-upgrade-transaction) to the new version.
+4. Disconnect the `Validator set` from the rest of the network **3 blocks** prior to the height of the chain halt. For example:
+ - Assume an issue at height `103`.
+ - Revert the `validator set` to height `100`.
+ - Submit an upgrade transaction at `101`.
+ - Upgrade the chain at height `102`.
+ - Avoid the issue at height `103`.
+5. Ensure all validators rolled back to the same height and use the same snapshot ([how to get a snapshot](#data-rollback---retrieving-snapshot-at-a-specific-height-step-5))
+ - The snapshot should be imported into each Validator's data directory.
+ - This is necessary to ensure data continuity and prevent forks.
+6. Isolate the `validator set` from full nodes - ([why this is necessary](#validator-isolation---risks-step-6)).
+ - This is necessary to avoid full nodes from gossiping blocks that have been rolled back.
+ - This may require using a firewall or a private network.
+ - Validators should only be permitted to gossip blocks amongst themselves.
+7. Start the `validator set` and perform the upgrade. For example, reiterating the process above:
+ - Start all Validators at height `100`.
+ - On block `101`, submit the `MsgSoftwareUpgrade` transaction with a `Plan.height` set to `102`.
+ - `x/upgrade` will perform the upgrade in the `EndBlocker` of block `102`.
+ - The node will stop climbing with an error waiting for the upgrade to be performed.
+ - Cosmovisor deployments automatically replace the binary.
+ - Manual deployments will require a manual replacement at this point.
+ - Start the node back up.
+8. Wait for the network to reach the height of the previous ledger (`104`+).
+9. Allow validators to open their network to full nodes again.
+ - **Note**: full nodes will need to perform the rollback or use a snapshot as well.
+
+```mermaid
+sequenceDiagram
+ participant DevTeam
+ participant Foundation
+ participant Validators
+ participant FullNodes
+ %% participant Network
+
+ DevTeam->>DevTeam: 1. Prepare & verify new binary
+ DevTeam->>DevTeam: 2 & 3. Create a release & prepare upgrade transaction
+ Validators->>Validators: 4 & 5. Roll back to height before issue or import snapshot
+ Validators->>Validators: 6. Isolate from Full Nodes
+ Foundation->>Validators: 7. Distribute upgrade transaction
+ Validators->>Validators: 7. Start network and perform upgrade
+
+ break
+ Validators->>Validators: 8. Wait until previously problematic height elapses
+ end
+
+ Validators-->FullNodes: 9. Open network connections
+ FullNodes-->>Validators: 9. Sync with updated network
+ note over Validators,FullNodes: Network resumes operation
+```
+
+### Troubleshooting
+
+#### Data rollback - retrieving snapshot at a specific height (step 5)
+
+There are two ways to get a snapshot from a prior height:
+
+1. Execute
+
+ ```bash
+ poktrolld rollback --hard
+ ```
+
+ repeately, until the command responds with the desired block number.
+
+2. Use a snapshot from below the halt height (e.g. `100`) and start the node with `--halt-height=100` parameter so it only syncs up to certain height and then
+ gracefully shuts down. Add this argument to `poktrolld start` like this:
+
+ ```bash
+ poktrolld start --halt-height=100
+ ```
+
+#### Validator Isolation - risks (step 6)
+
+Having at least one node that has knowledge of the forking ledger can jeopardize the whole process. In particular, the
+following errors in logs are the sign of the nodes syncing blocks from the wrong fork:
+
+- `found conflicting vote from ourselves; did you unsafe_reset a validator?`
+- `conflicting votes from validator`
diff --git a/docusaurus/docs/develop/developer_guide/testing/integration_suites.md b/docusaurus/docs/develop/developer_guide/testing/integration_suites.md
index 652857de4..655117cb1 100644
--- a/docusaurus/docs/develop/developer_guide/testing/integration_suites.md
+++ b/docusaurus/docs/develop/developer_guide/testing/integration_suites.md
@@ -35,7 +35,7 @@ The `testutil/integration/suites` package contains multiple **app integration su
### Example (`ParamsSuite`)
-The following example shows a test suite which embeds `suites.ParamsSuite`, in order to set on-chain module params as part of its `SetupTest()` method:
+The following example shows a test suite which embeds `suites.ParamsSuite`, in order to set onchain module params as part of its `SetupTest()` method:
```go
package suites
diff --git a/docusaurus/docs/develop/developer_guide/testing/testing_levels.md b/docusaurus/docs/develop/developer_guide/testing/testing_levels.md
index 49ccee9d5..88d9dc75f 100644
--- a/docusaurus/docs/develop/developer_guide/testing/testing_levels.md
+++ b/docusaurus/docs/develop/developer_guide/testing/testing_levels.md
@@ -123,7 +123,7 @@ This level of testing ensures that the appchain behaves as expected in a multi-v
## [End-to-End Tests](e2e)
-**End-to-end tests** focus on testing the behavior of a network containing both on- and off-chain actors; typically exercising "localnet".
+**End-to-end tests** focus on testing the behavior of a network containing both on- and offchain actors; typically exercising "localnet".
### E2E Test Example
@@ -133,9 +133,9 @@ This level of testing ensures that the appchain behaves as expected in a multi-v
### E2E Test - Good Fit
-- Asserts or dependent on off-chain assertions
-- Asserts or dependent on off-chain actors
-- Asserts or dependent on off-chain behavior
+- Asserts or dependent on offchain assertions
+- Asserts or dependent on offchain actors
+- Asserts or dependent on offchain behavior
### E2E Test - Bad Fit
@@ -147,6 +147,6 @@ This level of testing ensures that the appchain behaves as expected in a multi-v
- Depends on LocalNet to be running and healthy
- Depends on other environments (DevNet/TestNet) to be running and healthy
-- Shared mutable network state on-chain
-- Shared mutable network state off-chain
+- Shared mutable network state onchain
+- Shared mutable network state offchain
- Intolerant of non-idempotent operations (CI re-runnability).
diff --git a/docusaurus/docs/develop/developer_guide/quickstart.md b/docusaurus/docs/develop/developer_guide/walkthrough.md
similarity index 97%
rename from docusaurus/docs/develop/developer_guide/quickstart.md
rename to docusaurus/docs/develop/developer_guide/walkthrough.md
index de78ec45a..da5f30ed3 100644
--- a/docusaurus/docs/develop/developer_guide/quickstart.md
+++ b/docusaurus/docs/develop/developer_guide/walkthrough.md
@@ -1,11 +1,11 @@
---
sidebar_position: 1
-title: Quickstart
+title: Walkthrough
---
import ReactPlayer from "react-player";
-# Quickstart
+## Walkthrough
:::info
The goal of this document is to get you up and running with a LocalNet, some
@@ -105,7 +105,7 @@ This section will help you deploy a POKT LocalNet in a k8s cluster on your machi
and inspect it so you have an idea of what's going on!
We'll be manually configuring a few actors to run in your shell for the sake of
-the tutorial so you have visibility into the types of on-chain and off-chain
+the tutorial so you have visibility into the types of onchain and offchain
actors. In practice, you should be using [localnet](../../operate/infrastructure/localnet.md)
to dynamically scale your actors.
@@ -339,8 +339,8 @@ make acc_balance_query ACC=$SHANNON_APPLICATION
## 3. Manually Stake a Supplier & Deploy a RelayMiner
-As we mentioned earlier, if you want to understand the different on-chain actors
-and off-chain operators in POKT Network, look at the docs [here](../../protocol/actors/actors.md).
+As we mentioned earlier, if you want to understand the different onchain actors
+and offchain operators in POKT Network, look at the docs [here](../../protocol/actors/actors.md).
If you just want to follow instructions to make things work and get your hands
dirty, keep reading.
@@ -404,7 +404,7 @@ EOF
### 3.4 Stake the new Supplier
-Stake the `shannon_supplier` on-chain:
+Stake the `shannon_supplier` onchain:
```bash
poktrolld \
@@ -511,7 +511,7 @@ EOF
### 4.3 Stake the new Application
-Stake the application on-chain:
+Stake the application onchain:
```bash
poktrolld --home=./localnet/poktrolld \
@@ -548,7 +548,7 @@ You must run `make acc_initialize_pubkeys` before sending a relay in order for
the public keys to be initialized correctly.
See the [x/auth](https://docs.cosmos.network/main/build/modules/auth) for more
-information on how public keys are stored and accessible on-chain.
+information on how public keys are stored and accessible onchain.
:::
@@ -578,8 +578,8 @@ The Relay Request/Response from is captured in the sequence diagram below.
sequenceDiagram
actor U as User
(curl Client)
- participant PG as PATH Gateway
(off-chain Application Operator)
- participant RM as RelayMiner
(off-chain Supplier Operator)
+ participant PG as PATH Gateway
(offchain Application Operator)
+ participant RM as RelayMiner
(offchain Supplier Operator)
participant anvil as ETH Node
(Anvil)
U ->> +PG: eth_blockNumber
(JSON-RPC Request)
@@ -600,8 +600,8 @@ provide some intuition:
sequenceDiagram
participant RM as RelayMiner
(Supplier Operator)
participant P as Pocket Node
- actor A as Application
(on-chain Record)
- actor S as Supplier
(on-chain Record)
+ actor A as Application
(onchain Record)
+ actor S as Supplier
(onchain Record)
RM -->> P: CreateClaim
(Relays Served)
RM -->> P: SubmitProof
(Proof of Work)
diff --git a/docusaurus/docs/operate/configs/relayminer_config.md b/docusaurus/docs/operate/configs/relayminer_config.md
index 8931d03c2..3b1272b98 100644
--- a/docusaurus/docs/operate/configs/relayminer_config.md
+++ b/docusaurus/docs/operate/configs/relayminer_config.md
@@ -47,19 +47,19 @@ You can find a fully featured example configuration at [relayminer_config_full_e
## Introduction
-The following diagram illustrates how the _off-chain_ `RelayMiner` operator
-config (yaml) MUST match the _on-chain_ `Supplier` actor service endpoints
+The following diagram illustrates how the _offchain_ `RelayMiner` operator
+config (yaml) MUST match the _onchain_ `Supplier` actor service endpoints
for correct and deterministic behavior.
If these do not match, the behavior is non-deterministic and could result in
a variety of errors such as bad QoS, incorrect proxying, burning of the actor, etc...
-_Assuming that the on-chain endpoints 1 and 2 have different hosts_
+_Assuming that the onchain endpoints 1 and 2 have different hosts_
```mermaid
flowchart LR
-subgraph "Supplier Actor (On-Chain)"
+subgraph "Supplier Actor (Onchain)"
subgraph "SupplierServiceConfig (protobuf)"
subgraph svc1["Service1 (protobuf)"]
svc1Id[Service1.Id]
@@ -74,7 +74,7 @@ subgraph "Supplier Actor (On-Chain)"
end
end
-subgraph "RelayMiner Operator (Off-Chain)"
+subgraph "RelayMiner Operator (Offchain)"
subgraph "DevOps Operator Configs (yaml)"
subgraph svc1Config ["Service1 Config (yaml)"]
svc1IdConfig[service_id=Service1.Id]-->svc1Id
@@ -214,9 +214,9 @@ The `suppliers` section configures the services that the `RelayMiner` will offer
to Pocket Network. It specifies exactly where those requests will be forwarded
to by the Supplier's infrastructure.
-Each suppliers entry's `service_id` MUST reflect the on-chain `Service.Id` the
+Each suppliers entry's `service_id` MUST reflect the onchain `Service.Id` the
supplier staked for. In addition, the `publicly_exposed_endpoints` list MUST
-contain the same endpoints that the Supplier advertised on-chain when staking for
+contain the same endpoints that the Supplier advertised onchain when staking for
that service.
At least one supplier is required for the `RelayMiner` to be functional.
@@ -306,11 +306,11 @@ _`Required`_, _`Unique` within the supplier's `publicly_exposed_endpoints` list_
The `publicly_exposed_endpoints` section of the supplier configuration is a list
of hosts that the `RelayMiner` will accept requests from. It MUST be a valid host
-that reflects the on-chain supplier staking service endpoints.
+that reflects the onchain supplier staking service endpoints.
It is used to determine if the incoming request is allowed to be processed by
the server listening on `listen_url` host address as well as to check if the
-request's RPC-Type matches the on-chain endpoint's RPC-Type.
+request's RPC-Type matches the onchain endpoint's RPC-Type.
:::note
@@ -325,14 +325,14 @@ and/or send requests internally from a k8s cluster for example.
There are various reasons to having multiple `publicly_exposed_endpoints`
for the same supplier service.
-- The on-chain Supplier may provide the same Service on multiple domains
+- The onchain Supplier may provide the same Service on multiple domains
(e.g. for different regions).
- The operator may want to route requests of different RPC types to
the same server
- Migrating from one domain to another. Where the operator could still
accept requests on the old domain while the new domain is being propagated.
- The operator may want to have a different domain for internal requests.
-- The on-chain Service configuration accepts multiple endpoints.
+- The onchain Service configuration accepts multiple endpoints.
## Configuring Signing Keys
diff --git a/docusaurus/docs/operate/configs/supplier_staking_config.md b/docusaurus/docs/operate/configs/supplier_staking_config.md
index 5f5f8b305..41ba22758 100644
--- a/docusaurus/docs/operate/configs/supplier_staking_config.md
+++ b/docusaurus/docs/operate/configs/supplier_staking_config.md
@@ -92,7 +92,7 @@ flowchart TD
NCS ---> |owner_address or operator_address| US
US -- funds --> OWA{{owner_address}}
- US -- remove on-chain record --> OPA{{owner_address or operator_address}}
+ US -- remove onchain record --> OPA{{owner_address or operator_address}}
classDef owner fill:#f9f, stroke:#333, stroke-width:2px, color:#222;
classDef operator fill:#eba69a, color: #333, stroke:#333, stroke-width:2px;
@@ -214,8 +214,8 @@ _`Optional`_, _`Non-empty`_
```yaml
default_rev_share_percent:
- :
- :
+ :
+ :
```
`default_rev_share_percent` is an optional map that defines the default the revenue
diff --git a/docusaurus/docs/operate/infrastructure/devnet.md b/docusaurus/docs/operate/infrastructure/devnet.md
index 48166bc00..3e79a0209 100644
--- a/docusaurus/docs/operate/infrastructure/devnet.md
+++ b/docusaurus/docs/operate/infrastructure/devnet.md
@@ -60,7 +60,7 @@ The following is a list of details to know how our DevNet infrastructure is prov
Each DevNet ArgoCD App (following the App of Apps pattern) provisions a Helm chart called [full-network](https://github.com/pokt-network/protocol-infra/tree/main/charts/full-network).
-Each `full-network` includes other ArgoCD applications that deploy Validators and off-chain actors.
+Each `full-network` includes other ArgoCD applications that deploy Validators and offchain actors.
Each Helm chart receives a list of configuration files. For example, see the [relayminer configuration](https://github.com/pokt-network/protocol-infra/blob/main/charts/full-network/templates/Application-Relayminer.yaml#L37). All possible values can be found in the `values.yaml` of the Helm chart, such as the [relayminer Helm chart](https://github.com/pokt-network/helm-charts/blob/main/charts/relayminer/values.yaml).
diff --git a/docusaurus/docs/operate/infrastructure/localnet.md b/docusaurus/docs/operate/infrastructure/localnet.md
index 76260c51c..582c2ca19 100644
--- a/docusaurus/docs/operate/infrastructure/localnet.md
+++ b/docusaurus/docs/operate/infrastructure/localnet.md
@@ -17,7 +17,7 @@ needed to send an end-to-end relay.
- [Developing with LocalNet](#developing-with-localnet)
- [localnet_config.yaml](#localnet_configyaml)
- [Scaling network actors](#scaling-network-actors)
- - [Off-chain actors configuration](#off-chain-actors-configuration)
+ - [Offchain actors configuration](#offchain-actors-configuration)
- [Modify Kubernetes workloads](#modify-kubernetes-workloads)
- [Observability](#observability)
- [Access dashboards with graphs and logs](#access-dashboards-with-graphs-and-logs)
@@ -75,7 +75,7 @@ relayers:
_NOTE: You may need to up to 1 minute for the new actors to be registered and deployed locally._
-### Off-chain actors configuration
+### Offchain actors configuration
We heavily use Helm charts for configuring LocalNet. The goal is to maximize the tooling involved in deploying production
workloads and local development.
diff --git a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md
index e79bf73af..6123e5ce8 100644
--- a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md
+++ b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md
@@ -60,11 +60,11 @@ This is a text heavy walkthrough, but if all goes well, you should have somethin
- `Node` - A `Morse` actor that stakes to provide Relay services.
- In `Morse` - All `Validator` are nodes but only the top 1000 stakes `Node`s are `Validator`s
- This actor is not present in `Shannon` and decoupled into `Supplier` and a `RelayMiner`.
-- `Supplier` - The on-chain actor that stakes to provide Relay services.
+- `Supplier` - The onchain actor that stakes to provide Relay services.
- In `Shannon` - This actor is responsible needs access to a Full Node (sovereign or node).
-- `RelayMiner` - The off-chain service that provides Relay services on behalf of a `Supplier`.
+- `RelayMiner` - The offchain service that provides Relay services on behalf of a `Supplier`.
- In `Shannon` - This actor is responsible for providing the Relay services.
-- `PATH Gateway` - The off-chain service that provides Relay services on behalf of an `Application` or `Gateway`.
+- `PATH Gateway` - The offchain service that provides Relay services on behalf of an `Application` or `Gateway`.
For more details, please refer to the [Shannon actors documentation](https://dev.poktroll.com/actors).
@@ -76,7 +76,7 @@ multiple node types in the upcoming `Shannon` requires some explanation.
In `Shannon`, the `Supplier` role is separated from the `Full node` role.
In `Morse`, a `Validator` or a staked `Node` was responsible for both holding
-a copy of the on-chain data, as well as performing relays. With `Shannon`, the
+a copy of the onchain data, as well as performing relays. With `Shannon`, the
`RelayMiner` software, which runs the supplier logic, is distinct from the full-node/validator.
Furthermore, `Shannon` uses [`PATH Gateway`](https://github.com/buildwithgrove/path),
@@ -84,7 +84,7 @@ a software component that acts on behalf of either `Applications` or `Gateways`
to access services provided by Pocket Network `Supplier`s via `RelayMiners`.
The following diagram from the [actors](../../protocol/actors/) page captures the relationship
-between on-chain records (actors) and off-chain operators (servers).
+between onchain records (actors) and offchain operators (servers).
```mermaid
---
@@ -92,13 +92,13 @@ title: Actors
---
flowchart TB
- subgraph on-chain
+ subgraph onchain
A([Application])
G([Gateway])
S([Supplier])
end
- subgraph off-chain
+ subgraph offchain
PG[PATH Gateway]
RM[Relay Miner]
end
@@ -132,7 +132,7 @@ Make sure to replace `olshansky` with your username.
You can generally do everything as the `root` user, but it's recommended to
create a new user and give it sudo permissions.
-This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/install.md).
+This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/poktrolld_cli.md).
```bash
adduser poktroll
@@ -190,7 +190,7 @@ sed -i -e s/NODE_HOSTNAME=/NODE_HOSTNAME=69.42.690.420/g .env
You can generally do everything as the `root` user, but it's recommended to
create a new user and give it sudo permissions.
-This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/install.md).
+This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/poktrolld_cli.md).
You can create a new user (e.g. poktroll), provide sudo permissions and switch users like so:
@@ -325,7 +325,7 @@ which can be updated based on the `PATH Gateway` images available at
## B. Creating a Supplier and Deploying a RelayMiner
-A Supplier is an on-chain record that advertises services it'll provide.
+A Supplier is an onchain record that advertises services it'll provide.
A RelayMiner is an operator / service that provides services to offer on the Pocket Network.
@@ -702,5 +702,5 @@ done
Why?
- Suppliers may have been staked, but the RelayMiner is no longer running.
-- Pocket does not currently have on-chain quality-of-service
+- Pocket does not currently have onchain quality-of-service
- Pocket does not currently have supplier jailing
diff --git a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md
index 03ee87cdd..2d3ab0937 100644
--- a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md
+++ b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md
@@ -39,7 +39,7 @@ streamline development and reduce friction for any new potential contributor.
## Pre-Requisites
-1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md).
+1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md).
2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md).
:::warning
diff --git a/docusaurus/docs/operate/quickstart/service_cheatsheet.md b/docusaurus/docs/operate/quickstart/service_cheatsheet.md
index 4e3304db5..eda5d4af5 100644
--- a/docusaurus/docs/operate/quickstart/service_cheatsheet.md
+++ b/docusaurus/docs/operate/quickstart/service_cheatsheet.md
@@ -14,7 +14,7 @@ title: Service Cheat Sheet
### Pre-Requisites
-1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md).
+1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md).
2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md).
### How do I query for all existing onchain Services?
diff --git a/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md b/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md
index 77a0f69e9..3f40c410e 100644
--- a/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md
+++ b/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md
@@ -39,7 +39,7 @@ streamline development and reduce friction for any new potential contributor.
## Pre-Requisites
-1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md).
+1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md).
2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md).
3. You have either [staked a new `service` or found an existing one](./service_cheatsheet.md).
4. `[Optional]` You can run things locally or have dedicated long-running hardware. See the [Docker Compose Cheat Sheet](./docker_compose_debian_cheatsheet#deploy-your-server) if you're interested in the latter.
@@ -62,7 +62,7 @@ This document is a cheat sheet to get you quickly started with two things:
1. Staking an onchain `Supplier`
2. Deploying an offchain `RelayMiner`
-By the end of it, you should be able to serve Relays off-chain, and claim on-chain rewards.
+By the end of it, you should be able to serve Relays offchain, and claim onchain rewards.
## Account Setup
@@ -277,5 +277,5 @@ poktrolld query supplier -h
Then, you can query for all services like so:
```bash
-poktrolld query supplier list-supplier --node https://shannon-testnet-grove-rpc.beta.poktroll.com --output json | jq
+poktrolld query supplier list-suppliers --node https://shannon-testnet-grove-rpc.beta.poktroll.com --output json | jq
```
diff --git a/docusaurus/docs/operate/run_a_node/full_node_docker.md b/docusaurus/docs/operate/run_a_node/full_node_docker.md
index 7fb3005da..5f70de6ce 100644
--- a/docusaurus/docs/operate/run_a_node/full_node_docker.md
+++ b/docusaurus/docs/operate/run_a_node/full_node_docker.md
@@ -44,7 +44,7 @@ There are two types of Full Nodes:
## Pocket Network Full Nodes
Within Pocket Network, the role of Full Nodes is pivotal for Node Runners. These
-nodes needed for off-chain entities like [RelayMiners (Suppliers)](./supplier_walkthrough.md) and
+nodes needed for offchain entities like [RelayMiners (Suppliers)](./supplier_walkthrough.md) and
[Gateways](./gateway_walkthrough.md), which rely on interaction with the Pocket Network
blockchain for full functionality.
diff --git a/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md b/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md
index 2a19269fb..44ab1e221 100644
--- a/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md
+++ b/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md
@@ -130,7 +130,7 @@ source ~/.profile
### 5. Install `poktrolld`
-Follow the instructions in the [CLI Installation Guide](../user_guide/install.md) page to install `poktrolld`.
+Follow the instructions in the [CLI Installation Guide](../user_guide/poktrolld_cli.md) page to install `poktrolld`.
Create a symlink of the binary so Comosvisor knows where to find it:
diff --git a/docusaurus/docs/operate/run_a_node/gateway_walkthrough.md b/docusaurus/docs/operate/run_a_node/gateway_walkthrough.md
index 5a9f0f6c6..209feb137 100644
--- a/docusaurus/docs/operate/run_a_node/gateway_walkthrough.md
+++ b/docusaurus/docs/operate/run_a_node/gateway_walkthrough.md
@@ -21,8 +21,8 @@ details on how to deploy and operate it.
A PATH Gateway requires the following:
-1. A staked on-chain [Application](../../protocol/actors/application.md) to pay for services.
-2. An optional on-chain [Gateway](../../protocol/actors/gateway.md) to optionally proxy services.
+1. A staked onchain [Application](../../protocol/actors/application.md) to pay for services.
+2. An optional onchain [Gateway](../../protocol/actors/gateway.md) to optionally proxy services.
3. A connection to a [Full Node](./full_node_docker.md) to interact with the blockchain.
:::tip
diff --git a/docusaurus/docs/operate/run_a_node/supplier_walkthrough.md b/docusaurus/docs/operate/run_a_node/supplier_walkthrough.md
index b119b641f..ea46f5749 100644
--- a/docusaurus/docs/operate/run_a_node/supplier_walkthrough.md
+++ b/docusaurus/docs/operate/run_a_node/supplier_walkthrough.md
@@ -23,7 +23,7 @@ details on how to deploy and operate it.
A RelayMiner requires the following:
-1. A staked on-chain [Supplier](../../protocol/actors/supplier.md) to provide services.
+1. A staked onchain [Supplier](../../protocol/actors/supplier.md) to provide services.
2. A connection to a [Full Node](./full_node_docker.md) to interact with the blockchain.
:::tip
diff --git a/docusaurus/docs/operate/testing/load_testing.md b/docusaurus/docs/operate/testing/load_testing.md
index f1b802c8c..4063e0314 100644
--- a/docusaurus/docs/operate/testing/load_testing.md
+++ b/docusaurus/docs/operate/testing/load_testing.md
@@ -71,7 +71,7 @@ To execute tests on LocalNet:
#### Interpreting Results
- The CLI output displays standard Go test results. Successful tests are indicated by `PASS`, while failures are denoted by `FAIL` with accompanying error messages.
-- During test execution, the observability stack continuously collects metric data from off-chain actors. On LocalNet, [Grafana is accessible on port 3003](http://localhost:3003/?orgId=1). The
+- During test execution, the observability stack continuously collects metric data from offchain actors. On LocalNet, [Grafana is accessible on port 3003](http://localhost:3003/?orgId=1). The
[Stress test](http://localhost:3003/d/ddkakqetrti4gb/protocol-stress-test?orgId=1&refresh=5s)
and [Load Testing](http://localhost:3003/d/fdjwb9u9t9ts0e/protocol-load-testing?orgId=1) dashboards provide valuable
insights into system status.
@@ -81,7 +81,7 @@ To execute tests on LocalNet:
These networks are generated with random addresses, necessitating modifications to the load test manifest to reflect network-specific accounts.
:::info
-Note: Such networks typically involve other participants, allowing load testing against off-chain actors deployed by third parties. Consequently, metrics and logs may not be available when testing against uncontrolled software. For comprehensive observability, consider creating a new service with custom gateways and suppliers, and conduct tests against this controlled environment.
+Note: Such networks typically involve other participants, allowing load testing against offchain actors deployed by third parties. Consequently, metrics and logs may not be available when testing against uncontrolled software. For comprehensive observability, consider creating a new service with custom gateways and suppliers, and conduct tests against this controlled environment.
:::
#### Prerequisites
diff --git a/docusaurus/docs/operate/testing/load_testing_devnet.md b/docusaurus/docs/operate/testing/load_testing_devnet.md
index 581c91b6a..dedae1936 100644
--- a/docusaurus/docs/operate/testing/load_testing_devnet.md
+++ b/docusaurus/docs/operate/testing/load_testing_devnet.md
@@ -20,7 +20,7 @@ We can create DevNets that are suitable for running load tests.
:::warning
DevNets created with GitHub PRs using `devnet-test-e2e` tags are not suitable for load testing, as they only provision a
-single instance of each off-chain actor. We can create custom DevNets with multiple instances of each off-chain actor for load testing purposes.
+single instance of each offchain actor. We can create custom DevNets with multiple instances of each offchain actor for load testing purposes.
:::
## Prerequisites
diff --git a/docusaurus/docs/operate/testing/load_testing_plan_1.md b/docusaurus/docs/operate/testing/load_testing_plan_1.md
index d497e82eb..9ad8e77ec 100644
--- a/docusaurus/docs/operate/testing/load_testing_plan_1.md
+++ b/docusaurus/docs/operate/testing/load_testing_plan_1.md
@@ -39,7 +39,7 @@ _This document outlines the first load test for the Shannon upgrade. IT **IS NOT
2. `Stress test` the SMT (Sparse Merkle Trie) and how it is being used
3. `Build intuition` into the cost of operating the network for all of the stakeholders involved, both on & off chain
4. `Gain visibility` into basic metrics (disk, RAM, CPU, ingress/egress traffic, etc.…) for our network actors
-5. `Uncover` potential bugs, bottlenecks or concurrency issues in the on-chain & off-chain code
+5. `Uncover` potential bugs, bottlenecks or concurrency issues in the onchain & offchain code
6. `Document and design` a process that’ll act as the foundation for future load-testing efforts
## Non-Goals
diff --git a/docusaurus/docs/operate/user_guide/check-balance.md b/docusaurus/docs/operate/user_guide/check-balance.md
index 98557295a..60897ca22 100644
--- a/docusaurus/docs/operate/user_guide/check-balance.md
+++ b/docusaurus/docs/operate/user_guide/check-balance.md
@@ -26,7 +26,7 @@ balance using the `poktrolld` command-line interface (CLI).
## Pre-requisites
-1. `poktrolld` is installed on your system; see the [installation guide](./install) for more details
+1. `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details
2. You have the address of the wallet you wish to check
3. You know the token denomination you wish to check; `upokt` for POKT tokens
diff --git a/docusaurus/docs/operate/user_guide/create-new-wallet.md b/docusaurus/docs/operate/user_guide/create-new-wallet.md
index 8df204d14..d5444dce7 100644
--- a/docusaurus/docs/operate/user_guide/create-new-wallet.md
+++ b/docusaurus/docs/operate/user_guide/create-new-wallet.md
@@ -51,7 +51,7 @@ refer to the [Cosmos SDK Keyring documentation](https://docs.cosmos.network/main
Ensure you have `poktrolld` installed on your system.
-Follow the [installation guide](./install) specific to your operating system.
+Follow the [installation guide](./poktrolld_cli.md) specific to your operating system.
## Step 2: Creating the Wallet
diff --git a/docusaurus/docs/operate/user_guide/install.md b/docusaurus/docs/operate/user_guide/poktrolld_cli.md
similarity index 98%
rename from docusaurus/docs/operate/user_guide/install.md
rename to docusaurus/docs/operate/user_guide/poktrolld_cli.md
index dab4f98b2..9ca5431b9 100644
--- a/docusaurus/docs/operate/user_guide/install.md
+++ b/docusaurus/docs/operate/user_guide/poktrolld_cli.md
@@ -1,5 +1,5 @@
---
-title: CLI Installation
+title: poktrolld CLI Installation
sidebar_position: 0
---
diff --git a/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md b/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md
index ca561ec80..6343667f9 100644
--- a/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md
+++ b/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md
@@ -24,7 +24,7 @@ seed phrase, recovering your account is straightforward!
## Pre-requisites
- You have the mnemonic seed phrase of the wallet you wish to recover
-- `poktrolld` is installed on your system; see the [installation guide](./install) for more details
+- `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details
## Step 1: Prepare to Recover Your Wallet
diff --git a/docusaurus/docs/operate/user_guide/send-tokens.md b/docusaurus/docs/operate/user_guide/send-tokens.md
index dbff2914d..6d3c5676c 100644
--- a/docusaurus/docs/operate/user_guide/send-tokens.md
+++ b/docusaurus/docs/operate/user_guide/send-tokens.md
@@ -17,7 +17,7 @@ Pocket Network using the `poktrolld` command-line interface (CLI).
## Pre-requisites
-1. `poktrolld` is installed on your system; see the [installation guide](./install) for more details
+1. `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details
2. You have access to your wallet with sufficient tokens for the transaction and fees
3. You have the recipient's address
diff --git a/docusaurus/docs/protocol/actors/actors.md b/docusaurus/docs/protocol/actors/actors.md
index d69371ce0..4b99768c1 100644
--- a/docusaurus/docs/protocol/actors/actors.md
+++ b/docusaurus/docs/protocol/actors/actors.md
@@ -6,21 +6,21 @@ sidebar_position: 1
# Pocket Network Actors
- [Overview](#overview)
-- [On-Chain Actors](#on-chain-actors)
+- [Onchain Actors](#onchain-actors)
- [Risks \& Misbehavior](#risks--misbehavior)
-- [Off-Chain Actors](#off-chain-actors)
+- [Offchain Actors](#offchain-actors)
## Overview
-Pocket Network protocol is composed of both on-chain and off-chain actors.
+Pocket Network protocol is composed of both onchain and offchain actors.
-There are 3 on-chain actors:
+There are 3 onchain actors:
- [Applications](./application.md)
- [Suppliers](./supplier.md)
- [Gateways](./gateway.md)
-There are 2 off-chain actors:
+There are 2 offchain actors:
- [RelayMiners](./relay_miner.md)
- [PATH Gateways](./path_gateway.md)
@@ -31,13 +31,13 @@ title: Actors
---
flowchart TB
- subgraph on-chain
+ subgraph onchain
A([Application])
G([Gateway])
S([Supplier])
end
- subgraph off-chain
+ subgraph offchain
PG[PATH Gateway]
RM[Relay Miner]
end
@@ -47,9 +47,9 @@ flowchart TB
S -..- RM
```
-## On-Chain Actors
+## Onchain Actors
-On-Chain actors are part of the Pocket Network distributed ledger. They are the
+Onchain actors are part of the Pocket Network distributed ledger. They are the
_"Web3"_ part of Pocket.
They can thought of as a `record`, a `registration` or a piece of `state` at a
@@ -64,15 +64,15 @@ This is an open work in progress and an active area of research.
```mermaid
mindmap
- (On-Chain Actors)
+ (Onchain Actors)
Gateway
Risks
Intentional overservicing
- Off-chain only?
+ Offchain only?
Misbehavior
Low volume exploit
- On-chain, there are few/any? expectations of gateway actors; basically a registry to track gateways and application delegations
- On-chain, we cannot robustly distinguish requests sent by gateways from those sent by applications acting sovereignly
+ Onchain, there are few/any? expectations of gateway actors; basically a registry to track gateways and application delegations
+ Onchain, we cannot robustly distinguish requests sent by gateways from those sent by applications acting sovereignly
Application
Risks
Insufficient funds to pay for services received
@@ -87,12 +87,12 @@ mindmap
Invalid/missing proofs
```
-## Off-Chain Actors
+## Offchain Actors
-Off-Chain actors are all the operators that make up Pocket Network. They are the
+Offchain actors are all the operators that make up Pocket Network. They are the
_"Web2"_ part of Pocket.
They can be thought of as `servers`, `processes` or `clients`.
-Off-chain actors play a key role in executing off-chain business logic that is
-verified on-chain and drives on-chain state transitions.
+Offchain actors play a key role in executing offchain business logic that is
+verified onchain and drives onchain state transitions.
diff --git a/docusaurus/docs/protocol/actors/application.md b/docusaurus/docs/protocol/actors/application.md
index 56c174eff..94fff7f21 100644
--- a/docusaurus/docs/protocol/actors/application.md
+++ b/docusaurus/docs/protocol/actors/application.md
@@ -17,7 +17,7 @@ services available on Pocket Network as a function of volume and time.
## Schema
-The on-chain representation of an `Application` can be found at [application.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/application/application.proto).
+The onchain representation of an `Application` can be found at [application.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/application/application.proto).
## Configuration
diff --git a/docusaurus/docs/protocol/actors/gateway.md b/docusaurus/docs/protocol/actors/gateway.md
index 1cfbe4ebb..97286f448 100644
--- a/docusaurus/docs/protocol/actors/gateway.md
+++ b/docusaurus/docs/protocol/actors/gateway.md
@@ -17,7 +17,7 @@ on behalf of an [Application](./application.md).
## Schema
-The on-chain representation of a `Gateway` can be found at [gateway.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/gateway/gateway.proto).
+The onchain representation of a `Gateway` can be found at [gateway.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/gateway/gateway.proto).
## Configuration
diff --git a/docusaurus/docs/protocol/actors/relay_miner.md b/docusaurus/docs/protocol/actors/relay_miner.md
index 31121f362..7616a081c 100644
--- a/docusaurus/docs/protocol/actors/relay_miner.md
+++ b/docusaurus/docs/protocol/actors/relay_miner.md
@@ -11,7 +11,7 @@ sidebar_position: 5
## Overview
-A `RelayMiner` is a specialized operation node (not an on-chain actor) designed
+A `RelayMiner` is a specialized operation node (not an onchain actor) designed
for individuals to **offer services** through Pocket Network alongside a staked
`Supplier`. It is responsible for proxying `RelayRequests` between a `PATH Gateway`
and the supplied `Service`.
diff --git a/docusaurus/docs/protocol/actors/supplier.md b/docusaurus/docs/protocol/actors/supplier.md
index 4e9f500ea..5d84e11a8 100644
--- a/docusaurus/docs/protocol/actors/supplier.md
+++ b/docusaurus/docs/protocol/actors/supplier.md
@@ -27,7 +27,7 @@ providing services as a function of volume and time.
## Schema
-The on-chain representation of a `Supplier` can be found at [supplier.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/shared/supplier.proto).
+The onchain representation of a `Supplier` can be found at [supplier.proto](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/shared/supplier.proto).
## Configuration
@@ -36,7 +36,7 @@ Configurations to stake an `Supplier` can be found at [supplier_staking_config.m
## Modules
The `Supplier` actor depends on both the [`supplier`](https://github.com/pokt-network/poktroll/tree/main/x/supplier)
-and [`proof`](https://github.com/pokt-network/poktroll/tree/main/x/proof) on-chain modules.
+and [`proof`](https://github.com/pokt-network/poktroll/tree/main/x/proof) onchain modules.
These two modules' concerns are separated as follows:
### Supplier Module
diff --git a/docusaurus/docs/protocol/governance/params.md b/docusaurus/docs/protocol/governance/params.md
index a98a0546e..1b51f5147 100644
--- a/docusaurus/docs/protocol/governance/params.md
+++ b/docusaurus/docs/protocol/governance/params.md
@@ -9,8 +9,6 @@ sidebar_position: 1
DO NOT EDIT: this file was generated by make docs_update_gov_params_page
:::
-- [Access Control](#access-control)
-- [Updating governance parameter values](#updating-governance-parameter-values)
- [Updating this page](#updating-this-page)
- [Adding a new parameter](#adding-a-new-parameter)
- [Parameters](#parameters)
@@ -36,10 +34,10 @@ Please follow the instructions in [this guide](../../develop/developer_guide/add
| `proof` | `cosmos.base.v1beta1.Coin` | `proof_missing_penalty` | proof_missing_penalty is the number of tokens (uPOKT) which should be slashed from a supplier when a proof is required (either via proof_requirement_threshold or proof_missing_penalty) but is not provided. TODO_MAINNET: Consider renaming this to `proof_missing_penalty_upokt`. |
| `proof` | `double` | `proof_request_probability` | proof_request_probability is the probability of a session requiring a proof if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. |
| `proof` | `cosmos.base.v1beta1.Coin` | `proof_requirement_threshold` | proof_requirement_threshold is the session cost (i.e. compute unit consumption) threshold which asserts that a session MUST have a corresponding proof when its cost is equal to or above the threshold. This is in contrast to the this requirement being determined probabilistically via ProofRequestProbability. TODO_MAINNET: Consider renaming this to `proof_requirement_threshold_upokt`. |
-| `proof` | `cosmos.base.v1beta1.Coin` | `proof_submission_fee` | proof_submission_fee is the number of tokens (uPOKT) which should be paid by the supplier operator when submitting a proof. This is needed to account for the cost of storing proofs on-chain and prevent spamming (i.e. sybil bloat attacks) the network with non-required proofs. TODO_MAINNET: Consider renaming this to `proof_submission_fee_upokt`. |
+| `proof` | `cosmos.base.v1beta1.Coin` | `proof_submission_fee` | proof_submission_fee is the number of tokens (uPOKT) which should be paid by the supplier operator when submitting a proof. This is needed to account for the cost of storing proofs onchain and prevent spamming (i.e. sybil bloat attacks) the network with non-required proofs. TODO_MAINNET: Consider renaming this to `proof_submission_fee_upokt`. |
| `service` | `cosmos.base.v1beta1.Coin` | `add_service_fee` | The amount of uPOKT required to add a new service. This will be deducted from the signer's account balance, and transferred to the pocket network foundation. |
| `session` | `uint64` | `num_suppliers_per_session` | num_suppliers_per_session is the maximun number of suppliers per session (applicaiton:supplier pair for a given session number). |
-| `shared` | `uint64` | `application_unbonding_period_sessions` | application_unbonding_period_sessions is the number of sessions that an application must wait after unstaking before their staked assets are moved to their account balance. On-chain business logic requires, and ensures, that the corresponding block count of the application unbonding period will exceed the end of its corresponding proof window close height. |
+| `shared` | `uint64` | `application_unbonding_period_sessions` | application_unbonding_period_sessions is the number of sessions that an application must wait after unstaking before their staked assets are moved to their account balance. Onchain business logic requires, and ensures, that the corresponding block count of the application unbonding period will exceed the end of its corresponding proof window close height. |
| `shared` | `uint64` | `claim_window_close_offset_blocks` | claim_window_close_offset_blocks is the number of blocks after the claim window open height, at which the claim window closes. |
| `shared` | `uint64` | `claim_window_open_offset_blocks` | claim_window_open_offset_blocks is the number of blocks after the session grace period height, at which the claim window opens. |
| `shared` | `uint64` | `compute_units_to_tokens_multiplier` | The amount of upokt that a compute unit should translate to when settling a session. DEV_NOTE: This used to be under x/tokenomics but has been moved here to avoid cyclic dependencies. |
@@ -47,7 +45,7 @@ Please follow the instructions in [this guide](../../develop/developer_guide/add
| `shared` | `uint64` | `num_blocks_per_session` | num_blocks_per_session is the number of blocks between the session start & end heights. |
| `shared` | `uint64` | `proof_window_close_offset_blocks` | proof_window_close_offset_blocks is the number of blocks after the proof window open height, at which the proof window closes. |
| `shared` | `uint64` | `proof_window_open_offset_blocks` | proof_window_open_offset_blocks is the number of blocks after the claim window close height, at which the proof window opens. |
-| `shared` | `uint64` | `supplier_unbonding_period_sessions` | supplier_unbonding_period_sessions is the number of sessions that a supplier must wait after unstaking before their staked assets are moved to their account balance. On-chain business logic requires, and ensures, that the corresponding block count of the unbonding period will exceed the end of any active claim & proof lifecycles. |
+| `shared` | `uint64` | `supplier_unbonding_period_sessions` | supplier_unbonding_period_sessions is the number of sessions that a supplier must wait after unstaking before their staked assets are moved to their account balance. Onchain business logic requires, and ensures, that the corresponding block count of the unbonding period will exceed the end of any active claim & proof lifecycles. |
| `supplier` | `cosmos.base.v1beta1.Coin` | `min_stake` | min_stake is the minimum amount of uPOKT that a supplier must stake to be included in network sessions and remain staked. |
| `tokenomics` | `string` | `dao_reward_address` | dao_reward_address is the address to which mint_allocation_dao percentage of the minted tokens are at the end of claim settlement. |
| `tokenomics` | `MintAllocationPercentages` | `mint_allocation_percentages` | mint_allocation_percentages represents the distribution of newly minted tokens, at the end of claim settlement, as a result of the Global Mint TLM. |
diff --git a/docusaurus/docs/protocol/primitives/claim_and_proof_lifecycle.md b/docusaurus/docs/protocol/primitives/claim_and_proof_lifecycle.md
index 7617b5ce7..131afef18 100644
--- a/docusaurus/docs/protocol/primitives/claim_and_proof_lifecycle.md
+++ b/docusaurus/docs/protocol/primitives/claim_and_proof_lifecycle.md
@@ -15,8 +15,8 @@ to all readers.
:::
- [Introduction](#introduction)
-- [Session Windows \& On-Chain Parameters](#session-windows--on-chain-parameters)
- - [References:](#references)
+- [Session Windows \& Onchain Parameters](#session-windows--onchain-parameters)
+ - [References:](#references)
- [Claim Expiration](#claim-expiration)
- [Session](#session)
- [Session Duration](#session-duration)
@@ -72,7 +72,7 @@ sequenceDiagram
participant PN as Pocket Network
(Distributed Ledger)
loop Session Duration
- note over A,S: off-chain
+ note over A,S: offchain
A ->> +S: Relay Request
S ->> S: Insert Leaf into
Sparse Merkle Sum Trie
S ->> -A: Relay Response
@@ -93,7 +93,7 @@ sequenceDiagram
end
```
-## Session Windows & On-Chain Parameters
+## Session Windows & Onchain Parameters
_TODO(@bryanchriswhite): Add message distribution offsets/windows to this picture._
@@ -159,7 +159,7 @@ See [Session](./session.md) for more details.
### Session Duration
-After a session is initiated, the majority of it is handled `off-chain`,
+After a session is initiated, the majority of it is handled `offchain`,
as `Applications` make RPC requests (`relays`) to the `Supplier`.
### Session End
@@ -176,16 +176,16 @@ timeline
CreateClaim
(Supplier)
: Wait for Claim Window to open
: Submit CreateClaim Transaction
(root, sum, session, app, supplier, service, etc...)
- : Claim stored on-chain
+ : Claim stored onchain
SubmitProof
(Supplier)
: Wait for Proof Window to open
- : Retrieve seed (entropy) from on-chain data (block hash)
+ : Retrieve seed (entropy) from onchain data (block hash)
: Generate Merkle Proof for path in SMST based on seed
: Submit SubmitProof Transaction
(session, merkle proof, leaf, etc...)
- : Proof stored on-chain
+ : Proof stored onchain
Proof Validation
(Protocol)
- : Retrieve on-chain Claims that need to be settled
- : Retrieve corresponding on-chain Proof for every Claim
+ : Retrieve onchain Claims that need to be settled
+ : Retrieve corresponding onchain Proof for every Claim
: Validate leaf difficulty
: Validate Merkle Proof
: Validate Leaf Signature
@@ -195,7 +195,7 @@ timeline
## Claim
-A `Claim` is a structure submitted on-chain by a `Supplier` claiming to have done
+A `Claim` is a structure submitted onchain by a `Supplier` claiming to have done
some amount of work in servicing `relays` for `Application`.
Exactly one claim exists for every `(Application, Supplier, Session)`.
@@ -209,13 +209,13 @@ that were necessary to service that request.
| Type | Description |
| ---------------------------------------------------------------------------------------------------- | ------------------------------------------------------- |
-| [`Claim`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/claim.proto) | A serialized version of the `Claim` is stored on-chain. |
-| [`MsgCreateClaim`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/tx.proto) | Submitted by a `Supplier` to store a claim `on-chain`. |
+| [`Claim`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/claim.proto) | A serialized version of the `Claim` is stored onchain. |
+| [`MsgCreateClaim`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/tx.proto) | Submitted by a `Supplier` to store a claim `onchain`. |
### CreateClaim Validation
When the network receives a [`MsgCreateClaim`](#TODO_link_to_MsgCreateClaim) message, before the claim is persisted
-on-chain, it MUST be validated:
+onchain, it MUST be validated:
```mermaid
stateDiagram-v2
@@ -255,7 +255,7 @@ Validate_Claim --> [*]
validation ([`MsgCreateClaim#ValidateBasic()`](https://github.com/pokt-network/poktroll/blob/main/x/proof/types/message_create_claim.go))
- Session header
validation ([diagram](#session-header-validation) / [`msgServer#queryAndValidateSessionHeader()`](https://github.com/pokt-network/poktroll/blob/main/x/proof/keeper/session.go))
-- On-chain claim window
+- Onchain claim window
validation ([diagram](#TODO) / [`msgServer#validateClaimWindow()`](https://github.com/pokt-network/poktroll/blob/main/x/proof/keeper/session.go))
### Claim Window
@@ -267,11 +267,11 @@ or too late, it will be rejected by the protocol.
If a `Supplier` fails to submit a `Claim` during the Claim Window, it will forfeit
any potential rewards it could earn in exchange for the work done.
-See [Session Windows & On-Chain Parameters](#session-windows--on-chain-parameters) for more details.
+See [Session Windows & OnChain Parameters](#session-windows--onchain-parameters) for more details.
## Proof
-A `Proof` is a structure submitted on-chain by a `Supplier` containing a Merkle
+A `Proof` is a structure submitted onchain by a `Supplier` containing a Merkle
Proof to a single pseudo-randomly selected leaf from the corresponding `Claim`.
At most one `Proof` exists for every `Claim`.
@@ -283,13 +283,13 @@ rewarded for the work done.
| Type | Description |
| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [`Proof`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/proof.proto) | A serialized version of the `Proof` is stored on-chain. |
-| [`MsgSubmitProof`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/tx.proto) | Submitted by a `Supplier` to store a proof `on-chain`. If the `Proof` is invalid, or if there is no corresponding `Claim` for the `Proof`, the transaction will be rejected. |
+| [`Proof`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/proof.proto) | A serialized version of the `Proof` is stored onchain. |
+| [`MsgSubmitProof`](https://github.com/pokt-network/poktroll/blob/main/proto/poktroll/proof/tx.proto) | Submitted by a `Supplier` to store a proof `onchain`. If the `Proof` is invalid, or if there is no corresponding `Claim` for the `Proof`, the transaction will be rejected. |
### SubmitProof Validation
When the network receives a [`MsgSubmitProof`](#TODO_link_to_MsgSubmitProof) message, before the proof is accepted
-on-chain, it MUST be validated:
+onchain, it MUST be validated:
```mermaid
stateDiagram-v2
@@ -361,15 +361,15 @@ If a proof is required (as determined by [Probabilistic Proofs](probabilistic_pr
submit a `Proof` during the Proof Window, the Claim will expire, and the supplier will forfeit rewards for the claimed
work done. See [Claim Expiration](#claim-expiration) for more.
-See [Session Windows & On-Chain Parameters](#session-windows--on-chain-parameters) for more details.
+See [Session Windows & Onchain Parameters](#session-windows--onchain-parameters) for more details.
## Proof Security
In addition to basic validation as part of processing `SubmitProof` to determine
-whether or not the `Proof` should be stored on-chain, there are several additional
+whether or not the `Proof` should be stored onchain, there are several additional
deep cryptographic validations needed:
-1. `Merkle Leaf Validation`: Proof of the off-chain `Supplier`/`Application` interaction during the Relay request & response.
+1. `Merkle Leaf Validation`: Proof of the offchain `Supplier`/`Application` interaction during the Relay request & response.
2. `Merkle Proof Selection`: Proof of the amount of work done by the `Supplier` during the `Session`.
:::note
@@ -387,7 +387,7 @@ After the leaf is validated, two things happen:
1. The stake of `Application` signing the `Relay Request` is decreased through burn
2. The account balance of the `Supplier` owner is increased through mint
-The validation on these signatures is done on-chain as part of `Proof Validation`.
+The validation on these signatures is done onchain as part of `Proof Validation`.
```mermaid
graph LR
@@ -415,9 +415,9 @@ graph LR
### Merkle Proof Selection
Before the leaf itself is validated, we need to make sure if there is a valid
-Merkle Proof for the associated pseudo-random path computed on-chain.
+Merkle Proof for the associated pseudo-random path computed onchain.
-Since the path that needs to be proven uses an on-chain seed after the `Claim`
+Since the path that needs to be proven uses an onchain seed after the `Claim`
has been submitted, it is impossible to know the path in advance.
Assume a collision resistant hash function `H` that takes a the `block header hash`
@@ -684,7 +684,7 @@ state Validate_Session_Header {
Get_Session --> if_get_session_error
if_get_session_error --> Session_Header_Validation_Error: get session error
if_get_session_error --> if_session_id_mismatch
- if_session_id_mismatch --> Session_Header_Validation_Error: claim & on-chain session ID mismatch
+ if_session_id_mismatch --> Session_Header_Validation_Error: claim & onchain session ID mismatch
if_session_id_mismatch --> if_supplier_found
if_supplier_found --> Session_Header_Validation_Error: claim supplier not in session
if_supplier_found --> [*]
@@ -825,7 +825,7 @@ state Validate_Relay_Response {
state if_supplier_sig_malformed <>
[*] --> if_supplier_pubkey_exists
- if_supplier_pubkey_exists --> Relay_Response_Signature_Error: no supplier public key on-chain
+ if_supplier_pubkey_exists --> Relay_Response_Signature_Error: no supplier public key onchain
if_supplier_pubkey_exists --> if_supplier_sig_malformed
if_supplier_sig_malformed --> Relay_Response_Signature_Error: cannot unmarshal supplier (response) signature
}
diff --git a/docusaurus/docs/protocol/primitives/gateways.md b/docusaurus/docs/protocol/primitives/gateways.md
index b44d0a5da..4d666ddea 100644
--- a/docusaurus/docs/protocol/primitives/gateways.md
+++ b/docusaurus/docs/protocol/primitives/gateways.md
@@ -22,13 +22,13 @@ to all readers.
- [Application -\> Gateway Delegation](#application---gateway-delegation)
- [Relay Signatures](#relay-signatures)
- [Delegating Application Example](#delegating-application-example)
-- [\[WIP\] Gateway Off-Chain Operations](#wip-gateway-off-chain-operations)
+- [\[WIP\] Gateway Offchain Operations](#wip-gateway-offchain-operations)
## Introduction
The [Gateway Actor](../../protocol/actors/gateway.md) section covers what a Gateway is.
Recall that it is a permissionless protocol actor to whom the Application can
-**optionally** delegate on-chain trust in order to perform off-chain operations.
+**optionally** delegate onchain trust in order to perform offchain operations.
This section aims to cover the cryptographic aspects of Gateway interactions,
trust delegation, and how they fit into the Pocket Network protocol.
@@ -42,22 +42,22 @@ There are three modes of operation to interact with the Suppliers on the network
3. **Gateway Application**: Client trusts Gateway to sign relays on behalf of its Application
For the purposes of this discussion, it is important to note that an `Application`
-and `Gateway` are on-chain actors/records that stake POKT to participate in the
+and `Gateway` are onchain actors/records that stake POKT to participate in the
network. The term `Client` is used to represent an application running on a user's
device, such as a smartphone or a web browser.
-The goal of Gateways is to enable free-market off-chain economics tie into
-on-chain interactions.
+The goal of Gateways is to enable free-market offchain economics tie into
+onchain interactions.
### Sovereign Application
-A Sovereign Application is one where the `Client` manages its own on-chain `Application`
+A Sovereign Application is one where the `Client` manages its own onchain `Application`
and interacts with the Pocket Supplier Network directly.
The Application is responsible for:
- Protecting it's own `Application` private key on the `Client`
-- Maintaining and updating it's own on-chain stake to pay for `Supplier` services
+- Maintaining and updating it's own onchain stake to pay for `Supplier` services
- Determining which `Supplier` to use from the available list in the session
```mermaid
@@ -88,13 +88,13 @@ sequenceDiagram
A Delegated Application is one where an `Application` delegates to one or more
`Gateways`. Agreements (authentication, payments, etc) between the `Client` and
-`Gateway` are then managed off-chain, but payment for the on-chain `Supplier`
+`Gateway` are then managed offchain, but payment for the onchain `Supplier`
services still comes from the `Application`s stake.
The Application is responsible for:
- Protecting it's own `Application` private key somewhere in hot/cold storage
-- Maintaining and updating it's own on-chain stake to pay for `Supplier` services
+- Maintaining and updating it's own onchain stake to pay for `Supplier` services
- Managing, through (un)delegation, which Gateway(s) can sign requests on ts behalf
The Gateway is responsible for:
@@ -138,10 +138,10 @@ sequenceDiagram
### Gateway Application
A Gateway Application is one where the `Gateway` takes full onus, on behalf of
-`Client`s to manage all on-chain `Application` interactions to access the
+`Client`s to manage all onchain `Application` interactions to access the
Pocket `Supplier` Network. Agreements (authentication, payments, etc) between
-the `Client` and `Gateway` are then managed off-chain, and payment for the
-on-chain `Supplier` services will comes from the `Application`s stake, which
+the `Client` and `Gateway` are then managed offchain, and payment for the
+onchain `Supplier` services will comes from the `Application`s stake, which
is now maintained by the `Gateway`.
It is responsible for:
@@ -149,7 +149,7 @@ It is responsible for:
The Gateway is responsible for:
- Protecting it's own `Application` private key somewhere in hot/cold storage
-- Maintaining and updating it's own on-chain stake to pay for `Supplier` services
+- Maintaining and updating it's own onchain stake to pay for `Supplier` services
- Providing tooling and infrastructure to coordinate with the `Client`
- Determining which `Supplier` to use from the available list in the session
@@ -188,7 +188,7 @@ sequenceDiagram
An Application that chooses to delegate trust to a gateway by submitting a
one-time `DelegateMsg` transaction. Once this is done, the `Gateway` will be
able to sign relay requests on behalf of the `Application` that'll use the
-`Application`s on-chain stake to pay for service to access the Pocket `Supplier` Network.
+`Application`s onchain stake to pay for service to access the Pocket `Supplier` Network.
This can be done any number of times, so an `Application` can delegate to multiple
`Gateways` simultaneously.
@@ -329,9 +329,9 @@ stateDiagram-v2
sigCheck --> Invalid: No
```
-## [WIP] Gateway Off-Chain Operations
+## [WIP] Gateway Offchain Operations
-Gateways can design and manage off-chain operations to coordinate with the `Client`
+Gateways can design and manage offchain operations to coordinate with the `Client`
including by not limited to:
- Dashboards & user management
@@ -340,7 +340,7 @@ including by not limited to:
- Providing altruist backups
- QoS (SLA, SLO) guarantees
- Prove & validate data integrity
-- Provide additional off-chain services
+- Provide additional offchain services
- Guarantee certain SLAs and SLOs
-- Manage on-chain Pocket logic (account top-ups, etc...)
+- Manage onchain Pocket logic (account top-ups, etc...)
- Etc...
diff --git a/docusaurus/docs/protocol/primitives/probabilistic_proofs.md b/docusaurus/docs/protocol/primitives/probabilistic_proofs.md
index 5b7b8d169..6fedbffa0 100644
--- a/docusaurus/docs/protocol/primitives/probabilistic_proofs.md
+++ b/docusaurus/docs/protocol/primitives/probabilistic_proofs.md
@@ -17,7 +17,7 @@ document as a reference for writing this.
## Introduction
Probabilistic Proofs is the solution to solving for the long tail of low relay
-sessions that can cause on-chain bloat.
+sessions that can cause onchain bloat.
This complements the design of [Relay Mining](./relay_mining.md)
to solve for all scenarios.
diff --git a/docusaurus/docs/protocol/primitives/relay_mining.md b/docusaurus/docs/protocol/primitives/relay_mining.md
index e15edf50a..6bf9c2f6e 100644
--- a/docusaurus/docs/protocol/primitives/relay_mining.md
+++ b/docusaurus/docs/protocol/primitives/relay_mining.md
@@ -16,7 +16,7 @@ the [relay mining paper](https://arxiv.org/abs/2305.10672) as a reference for wr
## Introduction
-tl;dr Modulate on-chain difficulty up (similar to Bitcoin) so we can accommodate
+tl;dr Modulate onchain difficulty up (similar to Bitcoin) so we can accommodate
surges in relays and have no upper limit on the number of relays per session.
Relay Mining is the only solution in Web3 to incentivize read-only requests
@@ -154,8 +154,8 @@ sequenceDiagram
Note over proto_part,proto_actor: ref: Interaction (other seq. diagram)
- proto_part--xproto_actor: An action of protocol participant updates on-chain state of protocol actor
- proto_part-xproto_actor: An action of protocol participant references on-chain state of protocol actor
+ proto_part--xproto_actor: An action of protocol participant updates onchain state of protocol actor
+ proto_part-xproto_actor: An action of protocol participant references onchain state of protocol actor
break Time gap
proto_actor->>proto_actor: Protocol actor performs some independent action
diff --git a/docusaurus/docs/protocol/primitives/session.md b/docusaurus/docs/protocol/primitives/session.md
index 1dedb2cc8..ac0b1525c 100644
--- a/docusaurus/docs/protocol/primitives/session.md
+++ b/docusaurus/docs/protocol/primitives/session.md
@@ -15,5 +15,5 @@ TODO_DOCUMENT(@Olshansk): This is just a placeholder. Use the session part of th
## Introduction
-Sessions are an on-chain mechanism to pair `Application`s to `Supplier`s for a
-period of time to service relays off-chain.
+Sessions are an onchain mechanism to pair `Application`s to `Supplier`s for a
+period of time to service relays offchain.
diff --git a/docusaurus/docs/protocol/tokenomics/token_logic_modules.md b/docusaurus/docs/protocol/tokenomics/token_logic_modules.md
index 917d808c9..3dcb8e2e7 100644
--- a/docusaurus/docs/protocol/tokenomics/token_logic_modules.md
+++ b/docusaurus/docs/protocol/tokenomics/token_logic_modules.md
@@ -66,13 +66,13 @@ by the supplier adheres to the optimistic maxIA set per the limits of the Relay
Pocket Network can be seen as a probabilistic, optimistic permissionless multi-tenant rate limiter.
This works by putting funds in escrow, burning it after work is done, and putting optimistic limits
-in place whose work volume is proven on-chain.
+in place whose work volume is proven onchain.
:::
Suppliers always have the option to over-service an Application (**i.e. do free work**),
in order to ensure high quality service in the network. This may lead to offchain
-reputation benefits (e.g. Gateways favoring them), but suppliers' on-chain rewards
+reputation benefits (e.g. Gateways favoring them), but suppliers' onchain rewards
are always limited by the cumulative amounts Applications' stakes (at session start; per service)
and the number of Suppliers in the session.
@@ -131,7 +131,7 @@ to only be able to stake for EXACTLY ONE service.
:::note
TODO_POST_MAINNET: After the Shannon upgrade, the team at Grove has a lot of ideas
-related to on-chain reputation, [supplier overlay networks](https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7?pvs=4), and
+related to onchain reputation, [supplier overlay networks](https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7?pvs=4), and
much more, all of which is out of scope for the initial implementation.
:::
@@ -418,7 +418,7 @@ Later, PNF, on behalf of the DAO, will review the reimbursement requests and app
```mermaid
---
-title: "Off-Chain Reimbursement Request Flow"
+title: "Offchain Reimbursement Request Flow"
---
sequenceDiagram
participant PNF as Pocket Network Foundation
@@ -449,10 +449,10 @@ The application `PAYS` the supplier for work done (i.e. Mint=Burn).
The application `GETS REIMBURSED` for the inflation (i.e. Global Mint).
This will require staked Applications (sovereign or those managed by Gateways) to periodically
-"top up" their balances to cover not only the on-chain costs/burn, but also the inflation
+"top up" their balances to cover not only the onchain costs/burn, but also the inflation
until it is reimbursed by the DAO/PNF.
-#### Will there be on-chain enforcement of how Applications get reimbursed?
+#### Will there be onchain enforcement of how Applications get reimbursed?
_tl;dr Unfortunately, no._
@@ -460,11 +460,11 @@ The Applications will indeed have to trust the DAO/PNF to reimburse them.
The following is an example of the approach PNF could take.
1. Assume Application staking by Gateways is permissionless and done.
-2. Applications pay on-chain for costs and inflation
+2. Applications pay onchain for costs and inflation
3. PNF KYCs Gateways who seek reimbursement.
4. Gateways that don't go through the KYC process cover the cost of inflation
out of pocket.
-5. A script that retrieves on-chain reimbursement requests will be written that
+5. A script that retrieves onchain reimbursement requests will be written that
automatically send funds to previously KYCed gateways
6. The script above, and the trust that it'll be maintained, updated and executed
relies in the Gateways' trust in the PNF.
@@ -473,7 +473,7 @@ This is similar, in spirit, but still an improvement on top of the trust
between Gateways and PNF in Morse today in order to:
- Get access to the limited supply of Gateway keys
-- Gateways paying the on-chain burn manually
+- Gateways paying the onchain burn manually
#### How does this solution scale for Sovereign Applications?
@@ -485,7 +485,7 @@ _Read more about about their differences and similarities [here](./../../protoco
#### What kind of resources are needed to scale and automate reimbursement?
-This will be a combination of on-chain and off-chain resources (EventReader, TxSubmission, Accounting, etc...). In particular:
+This will be a combination of onchain and offchain resources (EventReader, TxSubmission, Accounting, etc...). In particular:
-- **On-chain**: load testing will show if events take up too much on-chain space. This is unlikely to be an issue relative to proofs.
-- **Off-chain**: PNF Directors are aware and approve of the operational overhead this will require. This will require some off-chain scripting to automate the process.
+- **Onchain**: load testing will show if events take up too much onchain space. This is unlikely to be an issue relative to proofs.
+- **Offchain**: PNF Directors are aware and approve of the operational overhead this will require. This will require some offchain scripting to automate the process.
diff --git a/docusaurus/docs/protocol/upgrades/contigency_plans.md b/docusaurus/docs/protocol/upgrades/contigency_plans.md
new file mode 100644
index 000000000..260f37823
--- /dev/null
+++ b/docusaurus/docs/protocol/upgrades/contigency_plans.md
@@ -0,0 +1,100 @@
+---
+title: Failed upgrade contingency plan
+sidebar_position: 5
+---
+
+:::tip
+
+This documentation covers failed upgrade contingency for `poktroll` - a `cosmos-sdk` based chain.
+
+While this can be helpful for other blockchain networks, it is not guaranteed to work for other chains.
+
+:::
+
+## Contingency plans
+
+There's always a chance the upgrade will fail.
+
+This document is intended to help you recover without significant downtime.
+
+- [Option 0: The bug is discovered before the upgrade height is reached](#option-0-the-bug-is-discovered-before-the-upgrade-height-is-reached)
+- [Option 1: The migration didn't start (i.e. migration halt)](#option-1-the-migration-didnt-start-ie-migration-halt)
+- [Option 2: The migration is stuck (i.e. incomplete/partial migration)](#option-2-the-migration-is-stuck-ie-incompletepartial-migration)
+- [Option 3: The migration succeed but the network is stuck (i.e. migration had a bug)](#option-3-the-migration-succeed-but-the-network-is-stuck-ie-migration-had-a-bug)
+- [MANDATORY Checklist of Documentation \& Scripts to Update](#mandatory-checklist-of-documentation--scripts-to-update)
+
+### Option 0: The bug is discovered before the upgrade height is reached
+
+**Cancel the upgrade plan!**
+
+See the instructions of [how to do that here](./upgrade_procedure.md#cancelling-the-upgrade-plan).
+
+### Option 1: The migration didn't start (i.e. migration halt)
+
+**This is unlikely to happen.**
+
+Possible reasons for this are if the name of the upgrade handler is different
+from the one specified in the upgrade plan, or if the binary suggested by the
+upgrade plan is wrong.
+
+If the nodes on the network stopped at the upgrade height and the migration did not
+start yet (i.e. there are no logs indicating the upgrade handler and store migrations are being executed),
+we **MUST** gather social consensus to restart validators with the `--unsafe-skip-upgrade=$upgradeHeightNumber` flag.
+
+This will skip the upgrade process, allowing the chain to continue and the protocol team to plan another release.
+
+`--unsafe-skip-upgrade` simply skips the upgrade handler and store migrations.
+The chain continues as if the upgrade plan was never set.
+The upgrade needs to be fixed, and then a new plan needs to be submitted to the network.
+
+:::caution
+
+`--unsafe-skip-upgrade` needs to be documented in the list of upgrades and added
+to the scripts so the next time somebody tries to sync the network from genesis,
+they will automatically skip the failed upgrade.
+[Documentation and scripts to update](#documentation-and-scripts-to-update)
+
+
+
+:::
+
+### Option 2: The migration is stuck (i.e. incomplete/partial migration)
+
+If the migration is stuck, there's always a chance the upgrade handler was executed on-chain as scheduled, but the migration didn't complete.
+
+In such a case, we need:
+
+- **All full nodes and validators**: Roll back validators to the backup
+
+ - A snapshot is taken by `cosmovisor` automatically prior to upgrade when `UNSAFE_SKIP_BACKUP` is set to `false` (the default recommended value;
+ [more information](https://docs.cosmos.network/main/build/tooling/cosmovisor#command-line-arguments-and-environment-variables))
+
+- **All full nodes and validators**: skip the upgrade
+
+ - Add the `--unsafe-skip-upgrade=$upgradeHeightNumber` argument to `poktroll start` command like so:
+
+ ```bash
+ poktrolld start --unsafe-skip-upgrade=$upgradeHeightNumber # ... the rest of the arguments
+ ```
+
+- **Protocol team**: Resolve the issue with an upgrade and schedule a new plan.
+
+ - The upgrade needs to be fixed, and then a new plan needs to be submitted to the network.
+
+- **Protocol team**: document the failed upgrade
+
+ - Document and add `--unsafe-skip-upgrade=$upgradeHeightNumber` to the scripts (such as docker-compose and cosmovisor installer)
+ - The next time somebody tries to sync the network from genesis they will automatically skip the failed upgrade; see [documentation and scripts to update](#documentation-and-scripts-to-update)
+
+
+
+### Option 3: The migration succeed but the network is stuck (i.e. migration had a bug)
+
+This should be treated as a consensus or non-determinism bug that is unrelated to the upgrade. See [Recovery From Chain Halt](../../develop/developer_guide/recovery_from_chain_halt.md) for more information on how to handle such issues.
+
+### MANDATORY Checklist of Documentation & Scripts to Update
+
+- [ ] The [upgrade list](./upgrade_list.md) should reflect a failed upgrade and provide a range of heights that served by each version.
+- [ ] Systemd service should include`--unsafe-skip-upgrade=$upgradeHeightNumber` argument in its start command [here](https://github.com/pokt-network/poktroll/blob/main/tools/installer/full-node.sh).
+- [ ] The [Helm chart](https://github.com/pokt-network/helm-charts/blob/main/charts/poktrolld/templates/StatefulSet.yaml) should point to the latest version;consider exposing via a `values.yaml` file
+- [ ] The [docker-compose](https://github.com/pokt-network/poktroll-docker-compose-example/tree/main/scripts) examples should point to the latest version
diff --git a/docusaurus/docs/protocol/upgrades/module_params.md b/docusaurus/docs/protocol/upgrades/module_params.md
index 690fabf26..bd64fa5bb 100644
--- a/docusaurus/docs/protocol/upgrades/module_params.md
+++ b/docusaurus/docs/protocol/upgrades/module_params.md
@@ -7,7 +7,7 @@ sidebar_position: 3
## Parameters and the DAO
-Pocket Network utilizes an off-chain governance mechanism that enables the community to vote on proposals. Once a proposal passes, the DAO can adjust the parameters necessary for the protocol's operation.
+Pocket Network utilizes an offchain governance mechanism that enables the community to vote on proposals. Once a proposal passes, the DAO can adjust the parameters necessary for the protocol's operation.
- [Parameters and the DAO](#parameters-and-the-dao)
- [Examples](#examples)
diff --git a/docusaurus/docs/protocol/upgrades/release_process.md b/docusaurus/docs/protocol/upgrades/release_process.md
index 2845f4c84..398d56c05 100644
--- a/docusaurus/docs/protocol/upgrades/release_process.md
+++ b/docusaurus/docs/protocol/upgrades/release_process.md
@@ -16,13 +16,6 @@ sidebar_position: 4
This document is for the Pocket Network protocol team's internal use only.
:::
-- [1. Determine if the Release is Consensus-Breaking](#1-determine-if-the-release-is-consensus-breaking)
-- [2. Create a GitHub Release](#2-create-a-github-release)
- - [Legend](#legend)
-- [3. Write an Upgrade Plan](#3-write-an-upgrade-plan)
-- [4. Issue Upgrade on TestNet](#4-issue-upgrade-on-testnet)
-- [5. Issue Upgrade on MainNet](#5-issue-upgrade-on-mainnet)
-
### 1. Determine if the Release is Consensus-Breaking
:::note
@@ -59,12 +52,18 @@ You can find an example [here](https://github.com/pokt-network/poktroll/releases
```text
## Protocol Upgrades
+
+
- **Planned Upgrade:** ❌ Not applicable for this release.
- **Breaking Change:** ❌ Not applicable for this release.
- **Manual Intervention Required:** ✅ Yes, but only for Alpha TestNet participants. If you are participating, please follow the [instructions provided here](https://dev.poktroll.com/operate/quickstart/docker_compose_walkthrough#restarting-a-full-node-after-re-genesis-) for restarting your full node after re-genesis.
- **Upgrade Height:** ❌ Not applicable for this release.
## What's Changed
+
```
diff --git a/docusaurus/docs/protocol/upgrades/upgrade_procedure.md b/docusaurus/docs/protocol/upgrades/upgrade_procedure.md
index 2e0998251..91dfc12bf 100644
--- a/docusaurus/docs/protocol/upgrades/upgrade_procedure.md
+++ b/docusaurus/docs/protocol/upgrades/upgrade_procedure.md
@@ -6,24 +6,34 @@ sidebar_position: 2
# Upgrade procedure
:::warning
-This page describes the protocol upgrade process, which is internal to the protocol team. If you're interested in upgrading your Pocket Network node, please check our [releases page](https://github.com/pokt-network/poktroll/releases) for upgrade instructions and changelogs.
+
+This page describes the protocol upgrade process, intended for the protocol team's internal use.
+
+If you're interested in upgrading your Pocket Network node, please check our [releases page](https://github.com/pokt-network/poktroll/releases) for upgrade instructions and changelogs.
+
:::
- [When is an Upgrade Warranted?](#when-is-an-upgrade-warranted)
- [Implementing the Upgrade](#implementing-the-upgrade)
- [Writing an Upgrade Transaction](#writing-an-upgrade-transaction)
-- [Submitting the upgrade on-chain](#submitting-the-upgrade-on-chain)
+ - [Validate the URLs (live network only)](#validate-the-urls-live-network-only)
+- [Submitting the upgrade onchain](#submitting-the-upgrade-onchain)
+- [Cancelling the upgrade plan](#cancelling-the-upgrade-plan)
- [Testing the Upgrade](#testing-the-upgrade)
- - [LocalNet](#localnet)
- - [DevNet](#devnet)
- - [TestNet](#testnet)
- - [Mainnet](#mainnet)
+ - [LocalNet Upgrades](#localnet-upgrades)
+ - [LocalNet Upgrade Cheat Sheet](#localnet-upgrade-cheat-sheet)
+ - [DevNet Upgrades](#devnet-upgrades)
+ - [TestNet Upgrades](#testnet-upgrades)
+ - [Mainnet Upgrades](#mainnet-upgrades)
## Overview
-When a consensus-breaking change is made to the protocol, we must carefully evaluate and implement an upgrade path that allows existing nodes to transition safely from one software version to another without disruption. This process involves several key steps:
+When a consensus-breaking change is made to the protocol, we must carefully evaluate and implement an upgrade path that
+allows existing nodes to transition safely from one software version to another without disruption.
-1. **Proposal**: The DAO drafts an upgrade proposal using our off-chain governance system.
+This process involves several key steps:
+
+1. **Proposal**: The DAO drafts an upgrade proposal using our offchain governance system.
2. **Implementation**: The proposed changes are implemented in the codebase.
3. **Testing**: Thorough testing of the proposed changes is conducted in devnet and testnet environments before mainnet deployment.
4. **Announcement**: Upon successful testing, we announce the upgrade through our social media channels and community forums.
@@ -36,16 +46,34 @@ An upgrade is necessary whenever there's an API, State Machine, or other Consens
## Implementing the Upgrade
-1. When a new version includes a consensus-breaking change, plan for the next protocol upgrade:
- - If there's a change to a specific module, bump that module's consensus version.
+1. When a new version includes a `consensus-breaking` change, plan for the next protocol upgrade:
+
+ - If there's a change to a specific module -> bump that module's consensus version.
- Note any potential parameter changes to include in the upgrade.
+
2. Create a new upgrade in `app/upgrades`:
- Refer to `historical.go` for past upgrades and examples.
- - Consult Cosmos-sdk documentation on upgrades for additional guidance [here](https://docs.cosmos.network/main/build/building-apps/app-upgrade) and [here](https://docs.cosmos.network/main/build/modules/upgrade).
+ - Consult Cosmos-sdk documentation on upgrades for additional guidance on [building-apps/app-upgrade](https://docs.cosmos.network/main/build/building-apps/app-upgrade) and [modules/upgrade](https://docs.cosmos.network/main/build/modules/upgrade).
+
+:::info
+
+Creating a new upgrade plan **MUST BE DONE** even if there are no state changes.
+
+:::
## Writing an Upgrade Transaction
-An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/blob/0fda53f265de4bcf4be1a13ea9fad450fc2e66d4/x/upgrade/proto/cosmos/upgrade/v1beta1/upgrade.proto#L14) with specific details about the upgrade. This information helps schedule the upgrade on the network and provides necessary data for automatic upgrades via `Cosmovisor`. A typical upgrade transaction will look like the following:
+An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/blob/0fda53f265de4bcf4be1a13ea9fad450fc2e66d4/x/upgrade/proto/cosmos/upgrade/v1beta1/upgrade.proto#L14) with specific details about the upgrade.
+
+This information helps schedule the upgrade on the network and provides necessary data for automatic upgrades via `Cosmovisor`.
+
+A typical upgrade transaction includes:
+
+- `name`: Name of the upgrade. It should match the `VersionName` of `upgrades.Upgrade`.
+- `height`: The height at which an upgrade should be executed and the node will be restarted.
+- `info`: Can be empty. **Only needed for live networks where we want cosmovisor to upgrade nodes automatically**.
+
+And looks like the following as an example:
```json
{
@@ -65,52 +93,171 @@ An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/bl
}
```
-- `name`: Name of the upgrade. It should match the `VersionName` of `upgrades.Upgrade`.
-- `height`: The height at which an upgrade should be executed and the node will be restarted.
-- `info`: While this field can theoretically contain any information about the upgrade, in practice, `cosmovisor`uses it to obtain information about the binaries. When`cosmovisor` is configured to automatically download binaries, it will pull the binary from the link provided in this field and perform a hash verification (which is optional).
+:::tip
+
+When `cosmovisor` is configured to automatically download binaries, it will pull the binary from the link provided in
+the object about and perform a hash verification (which is also optional).
+
+**NOTE THAT** we only know the hashes **AFTER** the release has been cut and CI created artifacts for this version.
+
+:::
+
+### Validate the URLs (live network only)
+
+The URLs of the binaries contain checksums. It is critical to ensure they are correct.
+Otherwise Cosmovisor won't be able to download the binaries and go through the upgrade.
+
+The command below (using tools build by the authors of Cosmosvisor) can be used to achieve the above:
+
+```bash
+jq -r '.body.messages[0].plan.info | fromjson | .binaries[]' $PATH_TO_UPGRADE_TRANSACTION_JSON | while IFS= read -r url; do
+ go-getter "$url" .
+done
+```
+
+The output should look like this:
+
+```text
+2024/09/24 12:40:40 success!
+2024/09/24 12:40:42 success!
+2024/09/24 12:40:44 success!
+2024/09/24 12:40:46 success!
+```
+
+:::tip
+
+`go-getter` can be installed using the following command:
+
+```bash
+go install github.com/hashicorp/go-getter/cmd/go-getter@latest
+```
-## Submitting the upgrade on-chain
+:::
+
+## Submitting the upgrade onchain
The `MsgSoftwareUpgrade` can be submitted using the following command:
```bash
-poktrolld tx authz exec PATH_TO_TRANSACTION_JSON --from pnf
+poktrolld tx authz exec $PATH_TO_UPGRADE_TRANSACTION_JSON --from=pnf
```
-If the transaction has been accepted, upgrade plan can be viewed with this command:
+If the transaction has been accepted, the upgrade plan can be viewed with this command:
```bash
poktrolld query upgrade plan
```
+## Cancelling the upgrade plan
+
+It is possible to cancel the upgrade before the upgrade plan height is reached. To do so, execute the following make target:
+
+```bash
+make localnet_cancel_upgrade
+```
+
## Testing the Upgrade
:::warning
-Note that for local testing, `cosmovisor` won't pull the binary from the info field.
+Note that for local testing, `cosmovisor` won't pull the binary from the upgrade Plan's info field.
:::
-### LocalNet
+### LocalNet Upgrades
+
+LocalNet **DOES NOT** support `cosmovisor` and automatic upgrades at the moment.
+
+However, **IT IS NOT NEEDED** to simulate and test the upgrade procedure.
+
+#### LocalNet Upgrade Cheat Sheet
+
+For a hypothetical scenario to upgrade from `0.1` to `0.2`:
+
+1. **Stop LocalNet** to prevent interference. Pull the `poktroll` repo into two separate directories. Let's name them `old` and `new`. It is recommended to open at least two tabs/shell panels in each directory for easier switching between directories.
+
+2. **(`old` repo)** - Check out the old version. For the test to be accurate, we need to upgrade from the correct version.
+
+ ```bash
+ git checkout v0.1
+ ```
+
+3. **(`new` repo)**
+
+ ```bash
+ git checkout -b branch_to_test
+ ```
-LocalNet currently does not support `cosmovisor` and automatic upgrades. However, we have provided scripts to facilitate local testing in the `tools/scripts/upgrades` directory:
+ Replace `branch_to_test` with the actual branch you want to test.
-1. Modify `tools/scripts/upgrades/authz_upgrade_tx_example_v0.0.4_height_30.json` to reflect the name of the upgrade and the height at which it should be scheduled.
+ :::note
+ This branch should have an upgrade implemented per the docs in [Implementing the Upgrade](#implementing-the-upgrade).
+ Here, the upgrade should be named `v0.2`.
+ :::
-2. Check and update the `tools/scripts/upgrades/cosmovisor-start-node.sh` to point to the correct binaries:
+4. **(BOTH repos)** - We'll use binaries from both versions - old and new.
- - The old binary should be compiled to work before the upgrade.
- - The new binary should contain the upgrade logic to be executed immediately after the node is started using the new binary.
+ ```bash
+ make go_develop ignite_release ignite_release_extract_binaries
+ ```
-3. Run `bash tools/scripts/upgrades/cosmovisor-start-node.sh` to wipe the `~/.poktroll` directory and place binaries in the correct locations.
+ :::note
+ The binary produced by these commands in the old repo should result in the same binary as it was downloaded from [production releases](https://github.com/pokt-network/poktroll/releases). You can use them as an alternative to building the binary from source.
+ :::
-4. Execute the transaction as shown in [Submitting the upgrade on-chain](#submitting-the-upgrade-on-chain) section above.
+5. **(`old` repo)** - Clean up and generate an empty genesis using the old version.
-### DevNet
+ ```bash
+ rm -rf ~/.poktroll && ./release_binaries/poktroll_darwin_arm64 comet unsafe-reset-all && make localnet_regenesis
+ ```
+
+6. **(`old` repo)** Write and save [an upgrade transaction](#writing-an-upgrade-transaction) for `v0.2`. The upgrade plan should be named after the version to which you're upgrading.
+
+7. **(`old` repo)** Start the node:
+
+ ```bash
+ ./release_binaries/poktroll_darwin_arm64 start
+ ```
+
+ The validator node should run and produce blocks as expected.
+
+8. **(`old` repo)** Submit the upgrade transaction. **NOTE THAT** the upgrade height in the transaction should be higher than the current block height. Adjust and submit if necessary:
+
+ ```bash
+ ./release_binaries/poktroll_darwin_arm64 tx authz exec tools/scripts/upgrades/local_test_v0.2.json --from=pnf
+ ```
+
+ Replace the path to the JSON transaction with your prepared upgrade transaction. Verify the upgrade plan was submitted and accepted:
+
+ ```bash
+ ./release_binaries/poktroll_darwin_arm64 query upgrade plan
+ ```
+
+9. Wait for the upgrade height to be reached on the old version. The old version should stop working since it has no knowledge of the `v0.2` upgrade. This simulates a real-world scenario. Stop the old node, and switch to the new version.
+
+10. **(`new` repo)**
+
+ ```bash
+ ./release_binaries/poktroll_darwin_arm64 start
+ ```
+
+11. **Observe the output:**
+
+ - A successful upgrade should output `applying upgrade "v0.2" at height: 20 module=x/upgrade`.
+ - The node on the new version should continue producing blocks.
+ - If there were errors during the upgrade, investigate and address them.
+
+12. **(`new` repo, optional**) - If parameters were changed during the upgrade, test if these changes were applied. For example:
+
+ ```bash
+ ./release_binaries/poktroll_darwin_arm64 q application params
+ ```
+
+### DevNet Upgrades
DevNets currently do not support `cosmovisor`.
We use Kubernetes to manage software versions, including validators. Introducing another component to manage versions would be complex, requiring a re-architecture of our current solution to accommodate this change.
-### TestNet
+### TestNet Upgrades
We currently deploy TestNet validators using Kubernetes with helm charts, which prevents us from managing the validator with `cosmovisor`. We do not control what other TestNet participants are running. However, if participants have deployed their nodes using the [cosmovisor guide](../../operate/run_a_node/full_node_walkthrough.md), their nodes will upgrade automatically.
@@ -121,9 +268,11 @@ Until we transition to [cosmos-operator](https://github.com/strangelove-ventures
3. Monitor validator node(s) as they start and begin producing blocks.
:::tip
-If you are a member of Grove, you can find the instructions to access the infrastructure [here](https://www.notion.so/buildwithgrove/How-to-re-genesis-a-Shannon-TestNet-a6230dd8869149c3a4c21613e3cfad15?pvs=4).
+
+If you are a member of Grove, you can find the instructions to access the infrastructure [on notion](https://www.notion.so/buildwithgrove/How-to-re-genesis-a-Shannon-TestNet-a6230dd8869149c3a4c21613e3cfad15?pvs=4).
+
:::
-### Mainnet
+### Mainnet Upgrades
The Mainnet upgrade process is to be determined. We aim to develop and implement improved tooling for this environment.
diff --git a/e2e/tests/init_test.go b/e2e/tests/init_test.go
index 680806c87..08804373d 100644
--- a/e2e/tests/init_test.go
+++ b/e2e/tests/init_test.go
@@ -171,7 +171,7 @@ func (s *suite) ThePocketdBinaryShouldExitWithoutError() {
func (s *suite) TheUserRunsTheCommand(cmd string) {
cmds := strings.Split(cmd, " ")
res, err := s.pocketd.RunCommand(cmds...)
- require.NoError(s, err, "error running command %s", cmd)
+ require.NoError(s, err, "error running command %s due to: %v", cmd, err)
s.pocketd.result = res
}
@@ -192,7 +192,7 @@ func (s *suite) TheUserSendsUpoktFromAccountToAccount(amount int64, accName1, ac
"-y",
}
res, err := s.pocketd.RunCommandOnHost("", args...)
- require.NoError(s, err, "error sending upokt from %q to %q", accName1, accName2)
+ require.NoError(s, err, "error sending upokt from %q to %q due to: %v", accName1, accName2, err)
s.pocketd.result = res
}
@@ -267,6 +267,7 @@ func (s *suite) TheUserStakesAWithUpoktFromTheAccount(actorType string, amount i
"-y",
}
res, err := s.pocketd.RunCommandOnHost("", args...)
+ require.NoError(s, err, "error staking %s due to: %v", actorType, err)
// Remove the temporary config file
err = os.Remove(configFile.Name())
@@ -301,7 +302,7 @@ func (s *suite) TheUserStakesAWithUpoktForServiceFromTheAccount(actorType string
"-y",
}
res, err := s.pocketd.RunCommandOnHost("", args...)
- require.NoError(s, err, "error staking %s for service %s", actorType, serviceId)
+ require.NoError(s, err, "error staking %s for service %s due to: %v", actorType, serviceId, err)
// Remove the temporary config file
err = os.Remove(configFile.Name())
@@ -372,7 +373,7 @@ func (s *suite) TheUserUnstakesAFromTheAccount(actorType string, accName string)
}
res, err := s.pocketd.RunCommandOnHost("", args...)
- require.NoError(s, err, "error unstaking %s", actorType)
+ require.NoError(s, err, "error unstaking %s due to: %v", actorType, err)
// Get current balance
balanceKey := accBalanceKey(accName)
@@ -463,7 +464,7 @@ func (s *suite) TheApplicationSendsTheSupplierASuccessfulRequestForServiceWithPa
appAddr := accNameToAddrMap[appName]
res, err := s.pocketd.RunCurlWithRetry(pathUrl, serviceId, method, path, appAddr, requestData, 5)
- require.NoError(s, err, "error sending relay request from app %q to supplier %q for service %q", appName, supplierOperatorName, serviceId)
+ require.NoError(s, err, "error sending relay request from app %q to supplier %q for service %q due to: %v", appName, supplierOperatorName, serviceId, err)
var jsonContent json.RawMessage
err = json.Unmarshal([]byte(res.Stdout), &jsonContent)
@@ -569,11 +570,22 @@ func (s *suite) TheUserWaitsForTheApplicationForAccountPeriodToFinish(accName, p
func (s *suite) getStakedAmount(actorType, accName string) (int, bool) {
s.Helper()
+
+ listCommand := fmt.Sprintf("list-%s", actorType)
+ // TODO_TECHDEBT(@olshansky): As of #1028, we started migrating some parts
+ // of the CLI to use AutoCLI which made list commands pluralized.
+ // E.g. "list-suppliers" instead of "list-supplier".
+ // Over time, all actor commands will be updated like so and this if can
+ // be removed.
+ if actorType == suppliertypes.ModuleName {
+ listCommand = fmt.Sprintf("%ss", listCommand)
+ }
args := []string{
"query",
actorType,
- fmt.Sprintf("list-%s", actorType),
+ listCommand,
}
+
res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, args...)
require.NoError(s, err, "error getting %s", actorType)
s.pocketd.result = res
@@ -662,7 +674,7 @@ func (s *suite) buildSupplierMap() {
argsAndFlags := []string{
"query",
"supplier",
- "list-supplier",
+ "list-suppliers",
fmt.Sprintf("--%s=json", cometcli.OutputFlag),
}
res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, argsAndFlags...)
@@ -752,7 +764,7 @@ func (s *suite) getSupplierInfo(supplierOperatorName string) *sharedtypes.Suppli
}
res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, args...)
- require.NoError(s, err, "error getting supplier %s", supplierOperatorAddr)
+ require.NoError(s, err, "error getting supplier %s due to error: %v", supplierOperatorAddr, err)
s.pocketd.result = res
var resp suppliertypes.QueryGetSupplierResponse
diff --git a/e2e/tests/node.go b/e2e/tests/node.go
index 826b3ef71..d49970d8c 100644
--- a/e2e/tests/node.go
+++ b/e2e/tests/node.go
@@ -5,7 +5,6 @@ package e2e
import (
"bytes"
"fmt"
- "net"
"net/url"
"os"
"os/exec"
@@ -26,11 +25,6 @@ var (
defaultHome = os.Getenv("POKTROLLD_HOME")
// defaultPathURL used by curl commands to send relay requests
defaultPathURL = os.Getenv("PATH_URL")
- // defaultPathHostOverride overrides the host in the URL used to send requests
- // Since the current DevNet infrastructure does not support arbitrary subdomains,
- // this is used to specify the host to connect to and the full host (with the service as a subdomain)
- // will be sent in the "Host" request header.
- defaultPathHostOverride = os.Getenv("PATH_HOST_OVERRIDE")
// defaultDebugOutput provides verbose output on manipulations with binaries (cli command, stdout, stderr)
defaultDebugOutput = os.Getenv("E2E_DEBUG_OUTPUT")
)
@@ -99,7 +93,18 @@ func (p *pocketdBin) RunCommandOnHostWithRetry(rpcUrl string, numRetries uint8,
if err == nil {
return res, nil
}
- // TODO_HACK: Figure out a better solution for retries. A parameter? Exponential backoff? What else?
+ // DEV_NOTE: Intentionally keeping a print statement here so errors are
+ // very visible even though the output may be noisy.
+ fmt.Printf(`
+----------------------------------------
+Retrying command due to error:
+ - RPC URL: %s
+ - Arguments: %v
+ - Response: %v
+ - Error: %v
+----------------------------------------
+`, rpcUrl, args, res, err)
+ // TODO_TECHDEBT(@bryanchriswhite): Figure out a better solution for retries. A parameter? Exponential backoff? What else?
time.Sleep(5 * time.Second)
return p.RunCommandOnHostWithRetry(rpcUrl, numRetries-1, args...)
}
@@ -182,29 +187,13 @@ func (p *pocketdBin) runPocketCmd(args ...string) (*commandResult, error) {
return r, err
}
-// runCurlPostCmd is a helper to run a command using the local pocketd binary with the flags provided
+// runCurlCmd is a helper to run a command using the local pocketd binary with the flags provided
func (p *pocketdBin) runCurlCmd(rpcBaseURL, service, method, path, appAddr, data string, args ...string) (*commandResult, error) {
rpcUrl, err := url.Parse(rpcBaseURL)
if err != nil {
return nil, err
}
- // Get the virtual host that will be sent in the "Host" request header
- virtualHost := getVirtualHostFromUrlForService(rpcUrl, service)
-
- // TODO_HACK: As of PR #879, the DevNet infrastructure does not support routing
- // requests to arbitrary subdomains due to TLS certificate-related complexities.
- // In such environment, defaultPathHostOverride (which contains no subdomain)
- // is used as:
- // 1. The gateway's 'host:port' to connect to
- // 2. A base to which the service is added as a subdomain then set as the "Host" request header.
- // (i.e. Host: .)
- //
- // Override the actual connection address if the environment requires it.
- if defaultPathHostOverride != "" {
- rpcUrl.Host = defaultPathHostOverride
- }
-
// Ensure that if a path is provided, it starts with a "/".
// This is required for RESTful APIs that use a path to identify resources.
// For JSON-RPC APIs, the resource path should be empty, so empty paths are allowed.
@@ -225,8 +214,9 @@ func (p *pocketdBin) runCurlCmd(rpcBaseURL, service, method, path, appAddr, data
"-v", // verbose output
"-sS", // silent with error
"-H", `Content-Type: application/json`, // HTTP headers
- "-H", fmt.Sprintf("Host: %s", virtualHost), // Add virtual host header
+ "-H", fmt.Sprintf("Host: %s", rpcUrl.Host), // Add virtual host header
"-H", fmt.Sprintf("X-App-Address: %s", appAddr),
+ "-H", fmt.Sprintf("target-service-id: %s", service),
rpcUrl.String(),
}
@@ -262,40 +252,3 @@ func (p *pocketdBin) runCurlCmd(rpcBaseURL, service, method, path, appAddr, data
return r, err
}
-
-// formatURLString returns RESTful or JSON-RPC API endpoint URL depending
-// on the parameters provided.
-func formatURLString(serviceAlias, rpcUrl, path string) string {
- // For JSON-RPC APIs, the path should be empty
- if len(path) == 0 {
- return fmt.Sprintf("http://%s.%s/v1", serviceAlias, rpcUrl)
- }
-
- // For RESTful APIs, the path should not be empty.
- // We remove the leading / to make the format string below easier to read.
- if path[0] == '/' {
- path = path[1:]
- }
- return fmt.Sprintf("http://%s.%s/v1/%s", serviceAlias, rpcUrl, path)
-}
-
-// getVirtualHostFromUrlForService returns a virtual host taking into consideration
-// the URL's host and the service if it's non-empty.
-// Specifically, it:
-// 1. Extract's the host from the rpcURL
-// 2. Prefixes the service as a subdomain to (1) the given rpcUrl host stripped of the port
-//
-// TODO_HACK: This is needed as of PR #879 because the DevNet infrastructure does
-// not support arbitrary subdomains due to TLS certificate-related complexities.
-func getVirtualHostFromUrlForService(rpcUrl *url.URL, service string) string {
- // Strip port if it exists and add service prefix
- host, _, err := net.SplitHostPort(rpcUrl.Host)
- if err != nil {
- // err is non-nil if rpcUrl.Host does not have a port.
- // Use the entire host as is
- host = rpcUrl.Host
- }
- virtualHost := fmt.Sprintf("%s.%s", service, host)
-
- return virtualHost
-}
diff --git a/e2e/tests/reset_params_test.go b/e2e/tests/reset_params_test.go
index 404ecd176..3b0c0ab1d 100644
--- a/e2e/tests/reset_params_test.go
+++ b/e2e/tests/reset_params_test.go
@@ -28,7 +28,7 @@ func (s *suite) resetAllModuleParamsToDefaults() {
s.sendAuthzExecTx(s.granteeName, resetTxJSONFile.Name())
}
-// allMoudlesMsgUpdateParamsToDefaultsAny returns a slice of Any messages, each corresponding
+// allModulesMsgUpdateParamsToDefaultsAny returns a slice of Any messages, each corresponding
// to a MsgUpdateParams for a module, populated with the respective default values.
func (s *suite) allModulesMsgUpdateParamsToDefaultsAny() []*codectypes.Any {
s.Helper()
diff --git a/e2e/tests/session.feature b/e2e/tests/session.feature
index df5f3f66a..12a230d17 100644
--- a/e2e/tests/session.feature
+++ b/e2e/tests/session.feature
@@ -17,7 +17,7 @@ Feature: Session Namespace
When the supplier "supplier1" has serviced a session with "5" relays for service "anvil" for application "app1"
And the user should wait for the "proof" module "CreateClaim" Message to be submitted
And the user should wait for the "proof" module "ClaimCreated" tx event to be broadcast
- Then the claim created by supplier "supplier1" for service "svc1" for application "app1" should be persisted on-chain
+ Then the claim created by supplier "supplier1" for service "svc1" for application "app1" should be persisted onchain
And the user should wait for the "proof" module "SubmitProof" Message to be submitted
And the user should wait for the "proof" module "ProofSubmitted" tx event to be broadcast
Then the claim created by supplier "supplier1" for service "anvil" for application "app1" should be successfully settled
@@ -37,7 +37,7 @@ Feature: Session Namespace
# Then the supplier "supplier1" replys with a relay response for service "svc1" for application "app1" with session number "1"
# And the application "app1" receives a successful relay response signed by "supplier1" for session number "1"
# And after the supplier "supplier1" updates a claim for session number "1" for service "svc1" for application "app1"
- # Then the claim created by supplier "supplier1" for service "svc1" for application "app1" should be persisted on-chain
+ # Then the claim created by supplier "supplier1" for service "svc1" for application "app1" should be persisted onchain
# Scenario: A late Relay outside the SessionGracePeriod is rejected
# Given the user has the pocketd binary installed
diff --git a/e2e/tests/session_steps_test.go b/e2e/tests/session_steps_test.go
index b5e35aedd..e035a5616 100644
--- a/e2e/tests/session_steps_test.go
+++ b/e2e/tests/session_steps_test.go
@@ -52,8 +52,10 @@ func (s *suite) TheUserShouldWaitForTheModuleMessageToBeSubmitted(module, msgTyp
// so that next steps that assert on supplier rewards can do it without having
// the proof submission fee skewing the results.
switch msgType {
+ case "CreateClaim":
+ fallthrough
case "SubmitProof":
- supplierOperatorAddress := getMsgSubmitProofSenderAddress(event)
+ supplierOperatorAddress := getMsgSenderAddress(event)
require.NotEmpty(s, supplierOperatorAddress)
supplierAccName := accAddrToNameMap[supplierOperatorAddress]
@@ -146,13 +148,13 @@ func (s *suite) TheSupplierHasServicedASessionWithRelaysForServiceForApplication
require.NoError(s, err)
// Query for any existing claims so that we can compare against them in
- // future assertions about changes in on-chain claims.
+ // future assertions about changes in onchain claims.
allClaimsRes, err := s.proofQueryClient.AllClaims(ctx, &prooftypes.QueryAllClaimsRequest{})
require.NoError(s, err)
s.scenarioState[preExistingClaimsKey] = allClaimsRes.Claims
// Query for any existing proofs so that we can compare against them in
- // future assertions about changes in on-chain proofs.
+ // future assertions about changes in onchain proofs.
allProofsRes, err := s.proofQueryClient.AllProofs(ctx, &prooftypes.QueryAllProofsRequest{})
require.NoError(s, err)
s.scenarioState[preExistingProofsKey] = allProofsRes.Proofs
@@ -417,8 +419,8 @@ func combineEventMatchFns(fns ...func(*abci.Event) bool) func(*abci.Event) bool
}
}
-// getMsgSubmitProofSenderAddress returns the sender address from the given event.
-func getMsgSubmitProofSenderAddress(event *abci.Event) string {
+// getMsgSenderAddress returns the sender address from the given event.
+func getMsgSenderAddress(event *abci.Event) string {
senderAttrIdx := slices.IndexFunc(event.Attributes, func(attr abci.EventAttribute) bool {
return attr.Key == "sender"
})
diff --git a/e2e/tests/stake_supplier.feature b/e2e/tests/stake_supplier.feature
index b24a579d9..3d9607fdc 100644
--- a/e2e/tests/stake_supplier.feature
+++ b/e2e/tests/stake_supplier.feature
@@ -30,19 +30,23 @@ Feature: Stake Supplier Namespace
And the user verifies the "supplier" for account "supplier2" is not staked
And the account balance of "supplier2" should be "1000070" uPOKT "more" than before
- Scenario: User can restake a Supplier waiting for it to become active again
- Given the user has the pocketd binary installed
- # Reduce the application unbonding period to avoid timeouts and speed up scenarios.
- And the "supplier" unbonding period param is successfully set to "1" sessions of "2" blocks
- And the user verifies the "supplier" for account "supplier2" is not staked
- Then the user stakes a "supplier" with "1000070" uPOKT for "anvil" service from the account "supplier2"
- And the user should wait for the "supplier" module "StakeSupplier" message to be submitted
- Then the user should see that the supplier for account "supplier2" is staked
- But the session for application "app1" and service "anvil" does not contain "supplier2"
- When the user waits for supplier "supplier2" to become active for service "anvil"
- Then the session for application "app1" and service "anvil" contains the supplier "supplier2"
- # Cleanup to make this feature idempotent.
- And the user unstakes a "supplier" from the account "supplier2"
- And the supplier for account "supplier2" is unbonding
- And the user should wait for the "supplier" module "SupplierUnbondingBegin" tx event to be broadcast
- And a "supplier" module "SupplierUnbondingEnd" end block event is broadcast
+ # TODO_MAINNET(@olshansk, #1033): Since the "to become active for service" step
+ # requires reading "ServicesActivationHeightsMap", which is temporarily set to nil,
+ # this test has been commented out. See #1033 for details and re-enable this test
+ # once that data is retrievable through a different method.
+ # Scenario: User can restake a Supplier waiting for it to become active again
+ # Given the user has the pocketd binary installed
+ # # Reduce the application unbonding period to avoid timeouts and speed up scenarios.
+ # And the "supplier" unbonding period param is successfully set to "1" sessions of "2" blocks
+ # And the user verifies the "supplier" for account "supplier2" is not staked
+ # Then the user stakes a "supplier" with "1000070" uPOKT for "anvil" service from the account "supplier2"
+ # And the user should wait for the "supplier" module "StakeSupplier" message to be submitted
+ # Then the user should see that the supplier for account "supplier2" is staked
+ # But the session for application "app1" and service "anvil" does not contain "supplier2"
+ # When the user waits for supplier "supplier2" to become active for service "anvil"
+ # Then the session for application "app1" and service "anvil" contains the supplier "supplier2"
+ # # Cleanup to make this feature idempotent.
+ # And the user unstakes a "supplier" from the account "supplier2"
+ # And the supplier for account "supplier2" is unbonding
+ # And the user should wait for the "supplier" module "SupplierUnbondingBegin" tx event to be broadcast
+ # And a "supplier" module "SupplierUnbondingEnd" end block event is broadcast
diff --git a/go.mod b/go.mod
index 70303d360..162807307 100644
--- a/go.mod
+++ b/go.mod
@@ -66,10 +66,10 @@ require (
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.9.0
go.uber.org/multierr v1.11.0
- golang.org/x/crypto v0.25.0
+ golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20240707233637-46b078467d37
- golang.org/x/sync v0.7.0
- golang.org/x/text v0.16.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/text v0.21.0
golang.org/x/tools v0.23.0
google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5
google.golang.org/grpc v1.65.0
@@ -280,8 +280,8 @@ require (
golang.org/x/mod v0.19.0 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.20.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/term v0.22.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/api v0.169.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
diff --git a/go.sum b/go.sum
index eddbf91bd..761f5909e 100644
--- a/go.sum
+++ b/go.sum
@@ -1235,8 +1235,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1380,8 +1380,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1481,13 +1481,13 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
-golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1498,8 +1498,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/load-testing/config/load_test_manifest_reader.go b/load-testing/config/load_test_manifest_reader.go
index db20b21fc..81b94a630 100644
--- a/load-testing/config/load_test_manifest_reader.go
+++ b/load-testing/config/load_test_manifest_reader.go
@@ -22,7 +22,7 @@ type LoadTestManifestYAML struct {
// IsEphemeralChain is a flag that indicates whether the test is expected to be
// run on LocalNet or long-living remote chain (i.e. TestNet/DevNet).
IsEphemeralChain bool `yaml:"is_ephemeral_chain"`
- TestNetNode string `yaml:"testnet_node"`
+ RPCNode string `yaml:"rpc_node"`
ServiceId string `yaml:"service_id"`
Suppliers []ProvisionedActorConfig `yaml:"suppliers"`
Gateways []ProvisionedActorConfig `yaml:"gateways"`
@@ -67,6 +67,10 @@ func validatedEphemeralChainManifest(manifest *LoadTestManifestYAML) (*LoadTestM
return nil, ErrEphemeralChainLoadTestInvalidManifest.Wrap("empty funding account address")
}
+ if len(manifest.RPCNode) == 0 {
+ return nil, ErrEphemeralChainLoadTestInvalidManifest.Wrap("empty rpc node url")
+ }
+
for _, gateway := range manifest.Gateways {
if len(gateway.Address) == 0 {
return nil, ErrEphemeralChainLoadTestInvalidManifest.Wrap("empty gateway address")
@@ -107,8 +111,8 @@ func validatedNonEphemeralChainManifest(manifest *LoadTestManifestYAML) (*LoadTe
return nil, ErrNonEphemeralChainLoadTestInvalidManifest.Wrap("suppliers entry forbidden")
}
- if len(manifest.TestNetNode) == 0 {
- return nil, ErrNonEphemeralChainLoadTestInvalidManifest.Wrap("empty testnet node url")
+ if len(manifest.RPCNode) == 0 {
+ return nil, ErrNonEphemeralChainLoadTestInvalidManifest.Wrap("empty rpc node url")
}
if len(manifest.ServiceId) == 0 {
diff --git a/load-testing/loadtest_manifest_example.yaml b/load-testing/loadtest_manifest_example.yaml
index 840fafdab..47c33f07c 100644
--- a/load-testing/loadtest_manifest_example.yaml
+++ b/load-testing/loadtest_manifest_example.yaml
@@ -2,16 +2,16 @@
# It is intended to target a remote environment, such as a devnet or testnet.
is_ephemeral_chain: false
-# testnet_node is the URL of the node that the load test will use to query the
+# rpc_node is the URL of the RPC node that the load test will use to query the
# chain and submit transactions.
-testnet_node: https://devnet-sophon-validator-rpc.poktroll.com
+rpc_node: https://devnet-sophon-validator-rpc.poktroll.com
# The service ID to request relays from.
service_id: "anvil"
# The address of the account that will be used to fund the application accounts
-# so that they can stake on the network.
-funding_account_address: pokt1awtlw5sjmw2f5lgj8ekdkaqezphgz88rdk93sk # address for faucet account
+# so that they can stake on the local network.
+funding_account_address: pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw # address for faucet account
# In non-ephemeral chains, the gateways are identified by their address.
gateways:
diff --git a/load-testing/loadtest_manifest_localnet.yaml b/load-testing/loadtest_manifest_localnet.yaml
index 763771576..da7d729ba 100644
--- a/load-testing/loadtest_manifest_localnet.yaml
+++ b/load-testing/loadtest_manifest_localnet.yaml
@@ -3,12 +3,16 @@
is_ephemeral_chain: true # This should be `true` for LocalNet as it is an ephemeral network
+# rpc_node is the URL of the RPC node that the load test will use to query the
+# chain and submit transactions.
+rpc_node: http://localhost:26657
+
# The service ID to use for the load test.
service_id: anvil
# The address of the account that will be used to fund the application,
-# gateway and supplier accounts so that they can stake on the network.
-funding_account_address: pokt1awtlw5sjmw2f5lgj8ekdkaqezphgz88rdk93sk # address for faucet account
+# gateway and supplier accounts so that they can stake on the local network.
+funding_account_address: pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw # address for faucet account
# List of pre-provisioned suppliers used for load testing.
# These suppliers will be progressively staked during the load test, according
@@ -24,7 +28,7 @@ suppliers:
# RelayMiner 1; http://localhost:10350/r/relayminer1/overview
- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj
- # The advertised URL used by the supplier when it submits a stake message on-chain.
+ # The advertised URL used by the supplier when it submits a stake message onchain.
exposed_url: http://relayminer1:8545
# RelayMiner 2; http://localhost:10350/r/relayminer2/overview
@@ -48,12 +52,12 @@ gateways:
# Gateway 1; http://localhost:10350/r/gateway1/overview
- address: pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4
- exposed_url: http://anvil.localhost/v1:3000 # The gateway url that the user sends relays to (e.g. curl)
+ exposed_url: http://localhost:3000/v1/ # The gateway url that the user sends relays to (e.g. curl)
# Gateway 2; http://localhost:10350/r/gateway2/overview
- address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz
- exposed_url: http://anvil.localhost/v1:3001
+ exposed_url: http://localhost:3001/v1/
# Gateway 3; http://localhost:10350/r/gateway3/overview
- address: pokt1zhmkkd0rh788mc9prfq0m2h88t9ge0j83gnxya
- exposed_url: http://anvil.localhost/v1:3002
+ exposed_url: http://localhost:3002/v1/
diff --git a/load-testing/loadtest_manifest_localnet_single_supplier.yaml b/load-testing/loadtest_manifest_localnet_single_supplier.yaml
index c455eaa8f..e2a87e5d7 100644
--- a/load-testing/loadtest_manifest_localnet_single_supplier.yaml
+++ b/load-testing/loadtest_manifest_localnet_single_supplier.yaml
@@ -3,12 +3,16 @@
is_ephemeral_chain: true # This should be `true` for LocalNet as it is an ephemeral network
+# rpc_node is the URL of the RPC node that the load test will use to query the
+# chain and submit transactions.
+rpc_node: http://localhost:26657
+
# The service ID to use for the load test.
service_id: anvil
# The address of the account that will be used to fund the application,
-# gateway and supplier accounts so that they can stake on the network.
-funding_account_address: pokt1awtlw5sjmw2f5lgj8ekdkaqezphgz88rdk93sk # address for faucet account
+# gateway and supplier accounts so that they can stake on the local network.
+funding_account_address: pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw # address for faucet account
# List of pre-provisioned suppliers used for load testing.
# These suppliers will be progressively staked during the load test, according
@@ -24,7 +28,7 @@ suppliers:
# RelayMiner 1; http://localhost:10350/r/relayminer1/overview
- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj
- # The advertised URL used by the supplier when it submits a stake message on-chain.
+ # The advertised URL used by the supplier when it submits a stake message onchain.
exposed_url: http://relayminer1:8545
# List of pre-provisioned gateways used for load testing.
@@ -40,12 +44,12 @@ gateways:
# Gateway 1; http://localhost:10350/r/gateway1/overview
- address: pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4
- exposed_url: http://anvil.localhost/v1:3000 # The gateway url that the user sends relays to (e.g. curl)
+ exposed_url: http://localhost:3000/v1/ # The gateway url that the user sends relays to (e.g. curl)
# Gateway 2; http://localhost:10350/r/gateway2/overview
- address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz
- exposed_url: http://anvil.localhost/v1:3001
+ exposed_url: http://localhost:3001/v1/
# Gateway 3; http://localhost:10350/r/gateway3/overview
- address: pokt1zhmkkd0rh788mc9prfq0m2h88t9ge0j83gnxya
- exposed_url: http://anvil.localhost/v1:3002
+ exposed_url: http://localhost:3002/v1/
diff --git a/load-testing/tests/relays_stress.feature b/load-testing/tests/relays_stress.feature
index 5aa3f63a0..d6981c95e 100644
--- a/load-testing/tests/relays_stress.feature
+++ b/load-testing/tests/relays_stress.feature
@@ -14,4 +14,12 @@ Feature: Loading gateway server with relays
| gateway | 1 | 10 | 3 |
| supplier | 1 | 10 | 3 |
When a load of concurrent relay requests are sent from the applications
- Then the correct pairs count of claim and proof messages should be committed on-chain
\ No newline at end of file
+ Then the number of failed relay requests is "0"
+ # TODO_FOLLOWUP(@red-0ne): Implement the following steps
+ # Then "0" over servicing events are observed
+ # And "0" slashing events are observed
+ # And "0" expired claim events are observed
+ # And there is as many reimbursement requests as the number of settled claims
+ # And the number of claims submitted and claims settled is the same
+ # And the number of proofs submitted and proofs required is the same
+ # And the actors onchain balances are as expected
\ No newline at end of file
diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go
index aa55dcf49..5eea3fd03 100644
--- a/load-testing/tests/relays_stress_helpers_test.go
+++ b/load-testing/tests/relays_stress_helpers_test.go
@@ -5,10 +5,11 @@ package tests
import (
"context"
"fmt"
+ "io"
"net/http"
- "net/url"
"os"
"path/filepath"
+ "slices"
"strings"
"sync"
"testing"
@@ -17,6 +18,9 @@ import (
"cosmossdk.io/depinject"
"cosmossdk.io/math"
"github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/libs/json"
+ cmtcoretypes "github.com/cometbft/cometbft/rpc/core/types"
+ rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types"
sdkclient "github.com/cosmos/cosmos-sdk/client"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
@@ -25,23 +29,26 @@ import (
"github.com/cosmos/cosmos-sdk/x/authz"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/gogoproto/proto"
"github.com/regen-network/gocuke"
"github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
"github.com/pokt-network/poktroll/load-testing/config"
"github.com/pokt-network/poktroll/pkg/client"
- "github.com/pokt-network/poktroll/pkg/client/events"
"github.com/pokt-network/poktroll/pkg/client/query"
- "github.com/pokt-network/poktroll/pkg/client/tx"
"github.com/pokt-network/poktroll/pkg/observable/channel"
"github.com/pokt-network/poktroll/pkg/sync2"
- testsession "github.com/pokt-network/poktroll/testutil/session"
+ testdelays "github.com/pokt-network/poktroll/testutil/delays"
+ "github.com/pokt-network/poktroll/testutil/events"
"github.com/pokt-network/poktroll/testutil/testclient"
- "github.com/pokt-network/poktroll/testutil/testclient/testeventsquery"
+ "github.com/pokt-network/poktroll/testutil/testclient/testblock"
apptypes "github.com/pokt-network/poktroll/x/application/types"
gatewaytypes "github.com/pokt-network/poktroll/x/gateway/types"
+ prooftypes "github.com/pokt-network/poktroll/x/proof/types"
sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
suppliertypes "github.com/pokt-network/poktroll/x/supplier/types"
+ tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types"
)
// actorLoadTestIncrementPlans is a struct that holds the parameters for incrementing
@@ -75,29 +82,53 @@ type actorLoadTestIncrementPlan struct {
maxActorCount int64
}
-// setupTxEventListeners sets up the transaction event listeners to observe the
-// transactions committed on-chain.
-func (s *relaysSuite) setupTxEventListeners() {
- eventsQueryClient := testeventsquery.NewLocalnetClient(s.TestingT.(*testing.T))
+// setupEventListeners sets up the event listeners for the relays suite.
+// It listens to both tx and block events to keep track of the events that are happening
+// onchain.
+func (s *relaysSuite) setupEventListeners(rpcNode string) {
+ // Set up the blockClient that will be notifying the suite about the committed blocks.
+ eventsObs, eventsObsCh := channel.NewObservable[[]types.Event]()
+ s.committedEventsObs = eventsObs
+
+ extractBlockEvents := func(ctx context.Context, block client.Block) {
+ // Query the block results endpoint for each observed block to get the tx and block events.
+ // Ref: https://docs.cometbft.com/main/rpc/#/Info/block_results
+ blockResultsUrl := fmt.Sprintf("%s/block_results?height=%d", rpcNode, block.Height())
+ blockResultsResp, err := http.DefaultClient.Get(blockResultsUrl)
+ require.NoError(s, err)
- deps := depinject.Supply(eventsQueryClient)
- eventsReplayClient, err := events.NewEventsReplayClient(
- s.ctx,
- deps,
- newTxEventSubscriptionQuery,
- tx.UnmarshalTxResult,
- eventsReplayClientBufferSize,
- )
- require.NoError(s, err)
+ defer blockResultsResp.Body.Close()
+
+ blockResultsRespBz, err := io.ReadAll(blockResultsResp.Body)
+ require.NoError(s, err)
+
+ var rpcResponse rpctypes.RPCResponse
+ err = json.Unmarshal(blockResultsRespBz, &rpcResponse)
+ require.NoError(s, err)
+
+ var blockResults cmtcoretypes.ResultBlockResults
+ err = json.Unmarshal(rpcResponse.Result, &blockResults)
+ require.NoError(s, err)
- // Map the eventsReplayClient.EventsSequence which is a replay observable
- // to a regular observable to avoid replaying txResults from old blocks.
- s.newTxEventsObs = channel.Map(
+ numEvents := len(blockResults.TxsResults) + len(blockResults.FinalizeBlockEvents)
+ events := make([]types.Event, 0, numEvents)
+
+ // Flatten all tx result events and block event results into one slice.
+ for _, txResult := range blockResults.TxsResults {
+ events = append(events, txResult.Events...)
+ }
+
+ events = append(events, blockResults.FinalizeBlockEvents...)
+
+ s.latestBlock = block
+ eventsObsCh <- events
+ }
+
+ s.blockClient = testblock.NewLocalnetClient(s.ctx, s.TestingT.(*testing.T))
+ channel.ForEach(
s.ctx,
- eventsReplayClient.EventsSequence(s.ctx),
- func(ctx context.Context, txResult *types.TxResult) (*types.TxResult, bool) {
- return txResult, false
- },
+ s.blockClient.CommittedBlocksSequence(s.ctx),
+ extractBlockEvents,
)
}
@@ -173,9 +204,9 @@ func (s *relaysSuite) mapSessionInfoForLoadTestDurationFn(
sessionInfo := &sessionInfoNotif{
blockHeight: blockHeight,
- sessionNumber: testsession.GetSessionNumberWithDefaultParams(blockHeight),
- sessionStartBlockHeight: testsession.GetSessionStartHeightWithDefaultParams(blockHeight),
- sessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(blockHeight),
+ sessionNumber: sharedtypes.GetSessionNumber(s.sharedParams, blockHeight),
+ sessionStartBlockHeight: sharedtypes.GetSessionStartHeight(s.sharedParams, blockHeight),
+ sessionEndBlockHeight: sharedtypes.GetSessionEndHeight(s.sharedParams, blockHeight),
}
infoLogger := logger.Info().
@@ -231,10 +262,12 @@ func (s *relaysSuite) mapSessionInfoForLoadTestDurationFn(
testProgressBlocksRelativeToTestStartHeight, s.relayLoadDurationBlocks,
)
- if sessionInfo.blockHeight == sessionInfo.sessionEndBlockHeight {
- newSessionsCount := len(s.activeApplications) * len(s.activeSuppliers)
- s.expectedClaimsAndProofsCount = s.expectedClaimsAndProofsCount + newSessionsCount
- }
+ logger.Info().Msgf(
+ "Relays sent: %d; Success: %d; Failed: %d",
+ s.numRelaysSent.Load(),
+ s.successfulRelays.Load(),
+ s.failedRelays.Load(),
+ )
// If the current block is the start of any new session, activate the prepared
// actors to be used in the current session.
@@ -457,16 +490,17 @@ func (s *relaysSuite) mapSessionInfoWhenStakingNewSuppliersAndGatewaysFn() chann
// For each notification received, it waits for the new actors' staking/funding
// txs to be committed before sending staking & delegation txs for new applications.
func (s *relaysSuite) mapStakingInfoWhenStakingAndDelegatingNewApps(
- _ context.Context,
+ ctx context.Context,
notif *stakingInfoNotif,
) (*stakingInfoNotif, bool) {
// Ensure that new gateways and suppliers are staked.
- // Ensure that new applications are funded and have an account entry on-chain
+ // Ensure that new applications are funded and have an account entry onchain
// so that they can stake and delegate in the next block.
- txResults := s.waitForTxsToBeCommitted()
- s.ensureFundedActors(txResults, notif.newApps)
- s.ensureStakedActors(txResults, EventActionMsgStakeGateway, notif.newGateways)
- s.ensureStakedActors(txResults, EventActionMsgStakeSupplier, notif.newSuppliers)
+ testdelays.WaitAll(
+ func() { s.ensureStakedActors(ctx, notif.newSuppliers) },
+ func() { s.ensureStakedActors(ctx, notif.newGateways) },
+ func() { s.ensureFundedActors(ctx, notif.newApps) },
+ )
// Update the list of staked suppliers.
s.activeSuppliers = append(s.activeSuppliers, notif.newSuppliers...)
@@ -627,11 +661,13 @@ func (s *relaysSuite) createApplicationAccount(
// cost, and the block duration.
func (s *relaysSuite) getAppFundingAmount(currentBlockHeight int64) sdk.Coin {
currentTestDuration := s.testStartHeight + s.relayLoadDurationBlocks - currentBlockHeight
+ // Compute the cost of all relays throughout the test duration.
+ totalRelayCostDuringTestUPOKT := s.relayRatePerApp * s.relayCoinAmountCost * currentTestDuration * blockDurationSec
// Multiply by 2 to make sure the application does not run out of funds
// based on the number of relays it needs to send. Theoretically, `+1` should
// be enough, but probabilistic and time based mechanisms make it hard
// to predict exactly.
- appFundingAmount := s.relayRatePerApp * s.relayCoinAmountCost * currentTestDuration * blockDuration * 2
+ appFundingAmount := math.Max(totalRelayCostDuringTestUPOKT, s.appParams.MinStake.Amount.Int64()*2)
return sdk.NewCoin("upokt", math.NewInt(appFundingAmount))
}
@@ -724,7 +760,7 @@ func (plan *actorLoadTestIncrementPlan) shouldIncrementActorCount(
return false
}
- initialSessionNumber := testsession.GetSessionNumberWithDefaultParams(startBlockHeight)
+ initialSessionNumber := sharedtypes.GetSessionNumber(sharedParams, startBlockHeight)
actorSessionIncRate := plan.blocksPerIncrement / int64(sharedParams.GetNumBlocksPerSession())
nextSessionNumber := sessionInfo.sessionNumber + 1 - initialSessionNumber
isSessionStartHeight := sessionInfo.blockHeight == sessionInfo.sessionStartBlockHeight
@@ -750,7 +786,7 @@ func (plan *actorLoadTestIncrementPlan) shouldIncrementSupplierCount(
return false
}
- initialSessionNumber := testsession.GetSessionNumberWithDefaultParams(startBlockHeight)
+ initialSessionNumber := sharedtypes.GetSessionNumber(sharedParams, startBlockHeight)
supplierSessionIncRate := plan.blocksPerIncrement / int64(sharedParams.GetNumBlocksPerSession())
nextSessionNumber := sessionInfo.sessionNumber + 1 - initialSessionNumber
isSessionEndHeight := sessionInfo.blockHeight == sessionInfo.sessionEndBlockHeight
@@ -798,6 +834,9 @@ func (s *relaysSuite) addPendingStakeSupplierMsg(supplier *accountInfo) {
RpcType: sharedtypes.RPCType_JSON_RPC,
},
},
+ RevShare: []*sharedtypes.ServiceRevenueShare{
+ {Address: supplier.address, RevSharePercentage: 100},
+ },
},
},
))
@@ -943,7 +982,9 @@ func (s *relaysSuite) sendPendingMsgsTx(actor *accountInfo) {
err := txBuilder.SetMsgs(actor.pendingMsgs...)
require.NoError(s, err)
- txBuilder.SetTimeoutHeight(uint64(s.latestBlock.Height() + 1))
+ // Set the transaction timeout height to 2 blocks beyond the current block height.
+ // This ensures the transaction won't be rejected if the next block commit is imminent.
+ txBuilder.SetTimeoutHeight(uint64(s.latestBlock.Height() + 2))
txBuilder.SetGasLimit(690000042)
accAddress := sdk.MustAccAddressFromBech32(actor.address)
@@ -973,33 +1014,6 @@ func (s *relaysSuite) sendPendingMsgsTx(actor *accountInfo) {
}()
}
-// waitForTxsToBeCommitted waits for transactions to be observed on-chain.
-// It is used to ensure that the transactions are committed before taking
-// dependent actions.
-func (s *relaysSuite) waitForTxsToBeCommitted() (txResults []*types.TxResult) {
- ctx, cancel := context.WithCancel(s.ctx)
- defer cancel()
-
- ch := s.newTxEventsObs.Subscribe(ctx).Ch()
- for {
- txResult := <-ch
- txResults = append(txResults, txResult)
-
- // The number of transactions to be observed is not available in the TxResult
- // event, so this number is taken from the last block event.
- // The block received from s.latestBlock may be the previous one, it is
- // necessary to wait until the block matching the txResult height is received
- // in order to get the right number of transaction events to collect.
- numTxs := s.waitUntilLatestBlockHeightEquals(txResult.Height)
-
- // If all transactions are observed, break the loop.
- if len(txResults) == numTxs {
- break
- }
- }
- return txResults
-}
-
// waitUntilLatestBlockHeightEquals blocks until s.latestBlock.Height() equals the targetHeight.
// NB: s.latestBlock is updated asynchronously via a subscription to the block client observable.
func (s *relaysSuite) waitUntilLatestBlockHeightEquals(targetHeight int64) int {
@@ -1034,161 +1048,215 @@ func (s *relaysSuite) sendRelay(iteration uint64, relayPayload string) (appAddre
gateway := s.activeGateways[iteration%uint64(len(s.activeGateways))]
application := s.activeApplications[iteration%uint64(len(s.activeApplications))]
- gatewayUrl, err := url.Parse(s.gatewayUrls[gateway.address])
- require.NoError(s, err)
-
- // Include the application address in the query to the gateway.
- query := gatewayUrl.Query()
- query.Add("applicationAddr", application.address)
- query.Add("relayCount", fmt.Sprintf("%d", iteration))
- gatewayUrl.RawQuery = query.Encode()
-
- // Use the pre-defined service ID that all application and suppliers are staking for.
- gatewayUrl.Path = testedServiceId
-
// TODO_MAINNET: Capture the relay response to check for failing relays.
// Send the relay request within a goroutine to avoid blocking the test batches
// when suppliers or gateways are unresponsive.
- go func(gwURL, payload string) {
- _, err = http.DefaultClient.Post(
- gwURL,
- "application/json",
- strings.NewReader(payload),
- )
+ sendRelayRequest := func(gatewayURL, appAddr, payload string) {
+ req, err := http.NewRequest("POST", gatewayURL, strings.NewReader(payload))
+
+ // TODO_TECHDEBT(red-0ne): Use 'app-address' instead of 'X-App-Address' once PATH Gateway
+ // deprecates the X-App-Address header.
+ // This is needed by the PATH Gateway's trusted mode to identify the application
+ // that is sending the relay request.
+ req.Header.Add("X-App-Address", appAddr)
+ req.Header.Add("target-service-id", "anvil")
+ res, err := http.DefaultClient.Do(req)
require.NoError(s, err)
- }(gatewayUrl.String(), relayPayload)
+
+ if res.StatusCode == http.StatusOK {
+ s.successfulRelays.Add(1)
+ } else {
+ s.failedRelays.Add(1)
+ }
+ }
+
+ gatewayURL := s.gatewayUrls[gateway.address]
+ go sendRelayRequest(gatewayURL, application.address, relayPayload)
return application.address, gateway.address
}
// ensureFundedActors checks if the actors are funded by observing the transfer events
// in the transactions results.
-func (s *relaysSuite) ensureFundedActors(
- txResults []*types.TxResult,
- actors []*accountInfo,
-) {
- for _, actor := range actors {
- actorFunded := false
- for _, txResult := range txResults {
- for _, event := range txResult.Result.Events {
- // Skip non-relevant events.
- if event.Type != "transfer" {
- continue
- }
-
- attrs := event.Attributes
- // Check if the actor is the recipient of the transfer event.
- if actorFunded = hasEventAttr(attrs, "recipient", actor.address); actorFunded {
- break
- }
+func (s *relaysSuite) ensureFundedActors(ctx context.Context, actors []*accountInfo) {
+ if len(actors) == 0 {
+ s.Logf("No actors to fund")
+ return
+ }
+
+ fundedActors := make(map[string]struct{})
+ actorsAddrs := make([]string, len(actors))
+ for i, actor := range actors {
+ actorsAddrs[i] = actor.address
+ }
+
+ // Add 1 second to the block duration to make sure the deadline is after the next block.
+ deadline := time.Now().Add(time.Second * time.Duration(blockDurationSec+1))
+ ctx, cancel := context.WithDeadline(ctx, deadline)
+ channel.ForEach(ctx, s.committedEventsObs, func(ctx context.Context, events []types.Event) {
+ for _, event := range events {
+ // In the context of ensuring the actors are funded, only the transfer events
+ // are relevant; filtering out the other events.
+ if event.GetType() != "transfer" {
+ continue
+ }
+
+ attrs := event.GetAttributes()
+ // Check if the actor is the recipient of the transfer event.
+ fundedActorAddr, ok := getEventAttr(attrs, "recipient")
+ if !ok {
+ continue
}
- // If the actor is funded, no need to check the other transactions.
- if actorFunded {
- break
+ if !slices.Contains(actorsAddrs, fundedActorAddr) {
+ continue
}
+
+ fundedActors[fundedActorAddr] = struct{}{}
}
- // If no transfer event is found for the actor, the test is canceled.
- if !actorFunded {
- s.logAndAbortTest(txResults, "actor not funded")
- return
+ // Cancel this scope once all expected actors are successfully funded before
+ // the deadline was reached.
+ if allActorsFunded(actors, fundedActors) {
+ cancel()
}
+ })
+
+ <-ctx.Done()
+ if !allActorsFunded(actors, fundedActors) {
+ s.logAndAbortTest("at least one actor was not funded successfully")
}
}
+// allActorsFunded checks if all the expected actors are funded.
+// An error is returned if any (at least one) of the expected actors was not funded.
+func allActorsFunded(expectedActors []*accountInfo, fundedActors map[string]struct{}) bool {
+ for _, actor := range expectedActors {
+ if _, ok := fundedActors[actor.address]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
// ensureStakedActors checks if the actors are staked by observing the message events
// in the transactions results.
func (s *relaysSuite) ensureStakedActors(
- txResults []*types.TxResult,
- msg string,
+ ctx context.Context,
actors []*accountInfo,
) {
- for _, actor := range actors {
- actorStaked := false
- for _, txResult := range txResults {
- for _, event := range txResult.Result.Events {
- // Skip non-relevant events.
- if event.Type != "message" {
- continue
- }
-
- attrs := event.Attributes
- // Check if the actor is the sender of the message event.
- if hasEventAttr(attrs, "action", msg) && hasEventAttr(attrs, "sender", actor.address) {
- actorStaked = true
- break
- }
- }
+ if len(actors) == 0 {
+ return
+ }
- // If the actor is staked, no need to check the other transactions.
- if actorStaked {
- break
+ stakedActors := make(map[string]struct{})
+
+ // Add 1 second to the block duration to make sure the deadline is after the next block.
+ deadline := time.Now().Add(time.Second * time.Duration(blockDurationSec+1))
+ ctx, cancel := context.WithDeadline(ctx, deadline)
+ typedEventsObs := events.AbciEventsToTypedEvents(ctx, s.committedEventsObs)
+ channel.ForEach(ctx, typedEventsObs, func(ctx context.Context, blockEvents []proto.Message) {
+ for _, event := range blockEvents {
+ switch e := event.(type) {
+ case *suppliertypes.EventSupplierStaked:
+ stakedActors[e.Supplier.GetOperatorAddress()] = struct{}{}
+ case *gatewaytypes.EventGatewayStaked:
+ stakedActors[e.Gateway.GetAddress()] = struct{}{}
+ case *apptypes.EventApplicationStaked:
+ stakedActors[e.Application.GetAddress()] = struct{}{}
}
}
- // If no message event is found for the actor, log the transaction results
- // and cancel the test.
- if !actorStaked {
- s.logAndAbortTest(txResults, fmt.Sprintf("actor not staked: %s", actor.address))
- return
+ // Cancel this scope once all expected actors are successfully staked before
+ // the deadline was reached.
+ if allActorsStaked(actors, stakedActors) {
+ cancel()
+ }
+ })
+
+ <-ctx.Done()
+ if !allActorsStaked(actors, stakedActors) {
+ s.logAndAbortTest("at least one actor was not staked successfully")
+ return
+ }
+}
+
+// allActorsStaked checks if all the expected actors are staked.
+// An error is returned if any of the expected actors was not staked.
+func allActorsStaked(expectedActors []*accountInfo, stakedActors map[string]struct{}) bool {
+ for _, actor := range expectedActors {
+ if _, ok := stakedActors[actor.address]; !ok {
+ return false
}
}
+
+ return true
}
// ensureDelegatedActors checks if the actors are delegated by observing the
// delegation events in the transactions results.
func (s *relaysSuite) ensureDelegatedApps(
- txResults []*types.TxResult,
+ ctx context.Context,
applications, gateways []*accountInfo,
) {
- for _, application := range applications {
- numDelegatees := 0
- for _, txResult := range txResults {
- for _, event := range txResult.Result.Events {
- // Skip non-EventDelegation events.
- if event.Type != EventTypeRedelegation {
- continue
- }
-
- attrs := event.Attributes
- appAddr := fmt.Sprintf("%q", application.address)
- // Skip the event if the application is not the delegator.
- if !hasEventAttr(attrs, "app_address", appAddr) {
- break
- }
-
- // Check if the application is delegated to each of the gateways.
- for _, gateway := range gateways {
- gwAddr := fmt.Sprintf("%q", gateway.address)
- if hasEventAttr(attrs, "gateway_address", gwAddr) {
- numDelegatees++
- break
- }
- }
+ if len(applications) == 0 || len(gateways) == 0 {
+ return
+ }
+
+ appsToGateways := make(map[string][]string)
+
+ deadline := time.Now().Add(time.Second * time.Duration(blockDurationSec+1))
+ ctx, cancel := context.WithDeadline(ctx, deadline)
+ typedEventsObs := events.AbciEventsToTypedEvents(ctx, s.committedEventsObs)
+ channel.ForEach(ctx, typedEventsObs, func(ctx context.Context, blockEvents []proto.Message) {
+ for _, event := range blockEvents {
+ redelegationEvent, ok := event.(*apptypes.EventRedelegation)
+ if ok {
+ app := redelegationEvent.GetApplication()
+ appsToGateways[app.GetAddress()] = app.GetDelegateeGatewayAddresses()
}
}
- // If the number of delegatees is not equal to the number of gateways,
- // the test is canceled.
- if numDelegatees != len(gateways) {
- s.logAndAbortTest(txResults, "applications not delegated to all gateways")
- return
+ // Cancel this scope once all expected applications are successfully delegated
+ // to the expected gateways before the deadline was reached.
+ if allAppsDelegatedToAllGateways(applications, gateways, appsToGateways) {
+ cancel()
}
+ })
+
+ <-ctx.Done()
+ if !allAppsDelegatedToAllGateways(applications, gateways, appsToGateways) {
+ s.logAndAbortTest("applications not delegated to all gateways")
+ return
}
}
+// allAppsDelegatedToAllGateways checks if all applications are delegated to all gateways.
+func allAppsDelegatedToAllGateways(
+ applications, gateways []*accountInfo,
+ appsToGateways map[string][]string,
+) bool {
+ for _, app := range applications {
+ if _, ok := appsToGateways[app.address]; !ok {
+ return false
+ }
+
+ for _, gateway := range gateways {
+ if !slices.Contains(appsToGateways[app.address], gateway.address) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
// getRelayCost fetches the relay cost from the tokenomics module.
func (s *relaysSuite) getRelayCost() int64 {
- // Set up the tokenomics client.
- flagSet := testclient.NewLocalnetFlagSet(s)
- clientCtx := testclient.NewLocalnetClientCtx(s, flagSet)
- sharedClient := sharedtypes.NewQueryClient(clientCtx)
-
- res, err := sharedClient.Params(s.ctx, &sharedtypes.QueryParamsRequest{})
- require.NoError(s, err)
+ relayCost := s.testedService.ComputeUnitsPerRelay * s.sharedParams.ComputeUnitsToTokensMultiplier
- return int64(res.Params.ComputeUnitsToTokensMultiplier)
+ return int64(relayCost)
}
// getProvisionedActorsCurrentStakedAmount fetches the current stake amount of
@@ -1242,15 +1310,15 @@ func (s *relaysSuite) activatePreparedActors(notif *sessionInfoNotif) {
}
}
-// hasEventAttr checks if the event attributes contain a given key-value pair.
-func hasEventAttr(attributes []types.EventAttribute, key, value string) bool {
+// getEventAttr returns the event attribute value corresponding to the provided key.
+func getEventAttr(attributes []types.EventAttribute, key string) (value string, found bool) {
for _, attribute := range attributes {
- if attribute.Key == key && attribute.Value == value {
- return true
+ if attribute.Key == key {
+ return attribute.Value, true
}
}
- return false
+ return "", false
}
// sendAdjustMaxDelegationsParamTx sends a transaction to adjust the max_delegated_gateways
@@ -1258,20 +1326,17 @@ func hasEventAttr(attributes []types.EventAttribute, key, value string) bool {
func (s *relaysSuite) sendAdjustMaxDelegationsParamTx(maxGateways int64) {
authority := authtypes.NewModuleAddress(govtypes.ModuleName).String()
- appMsgUpdateParams := &apptypes.MsgUpdateParams{
+ appMsgUpdateMaxDelegatedGatewaysParam := &apptypes.MsgUpdateParam{
Authority: authority,
- Params: apptypes.Params{
- // Set the max_delegated_gateways parameter to the number of gateways
- // that are currently used in the test.
- MaxDelegatedGateways: uint64(maxGateways),
- },
+ Name: "max_delegated_gateways",
+ AsType: &apptypes.MsgUpdateParam_AsUint64{AsUint64: uint64(maxGateways)},
}
- appMsgUpdateParamsAny, err := codectypes.NewAnyWithValue(appMsgUpdateParams)
+ appMsgUpdateParamAny, err := codectypes.NewAnyWithValue(appMsgUpdateMaxDelegatedGatewaysParam)
require.NoError(s, err)
authzExecMsg := &authz.MsgExec{
Grantee: s.fundingAccountInfo.address,
- Msgs: []*codectypes.Any{appMsgUpdateParamsAny},
+ Msgs: []*codectypes.Any{appMsgUpdateParamAny},
}
s.fundingAccountInfo.addPendingMsg(authzExecMsg)
@@ -1334,32 +1399,20 @@ func (s *relaysSuite) parseActorLoadTestIncrementPlans(
return actorPlans
}
-// countClaimAndProofs asynchronously counts the number of claim and proof messages
-// in the observed transaction events.
-func (s *relaysSuite) countClaimAndProofs() {
+// forEachSettlement asynchronously captures the settlement events and processes them.
+func (s *relaysSuite) forEachSettlement(ctx context.Context) {
+ typedEventsObs := events.AbciEventsToTypedEvents(ctx, s.committedEventsObs)
channel.ForEach(
s.ctx,
- s.newTxEventsObs,
- func(ctx context.Context, txEvent *types.TxResult) {
- for _, event := range txEvent.Result.Events {
- if event.Type != "message" {
- continue
- }
-
- if hasEventAttr(event.Attributes, "action", EventActionMsgCreateClaim) {
- s.currentClaimCount++
- }
-
- if hasEventAttr(event.Attributes, "action", EventActionMsgSubmitProof) {
- s.currentProofCount++
- }
-
- }
+ typedEventsObs,
+ func(_ context.Context, _ []proto.Message) {
+ // TODO_FOLLOWUP(@red-0ne): Capture all settlement related events and use
+ // them to calculate the expected actor balances.
},
)
}
-// querySharedParams queries the current on-chain shared module parameters for use
+// querySharedParams queries the current onchain shared module parameters for use
// over the duration of the test.
func (s *relaysSuite) querySharedParams(queryNodeRPCURL string) {
s.Helper()
@@ -1379,19 +1432,107 @@ func (s *relaysSuite) querySharedParams(queryNodeRPCURL string) {
s.sharedParams = sharedParams
}
+// queryAppParams queries the current onchain application module parameters for use
+// over the duration of the test.
+func (s *relaysSuite) queryAppParams(queryNodeRPCURL string) {
+ s.Helper()
+
+ deps := depinject.Supply(s.txContext.GetClientCtx())
+
+ blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL)
+ require.NoError(s, err)
+ deps = depinject.Configs(deps, depinject.Supply(blockQueryClient))
+
+ appQueryclient, err := query.NewApplicationQuerier(deps)
+ require.NoError(s, err)
+
+ appParams, err := appQueryclient.GetParams(s.ctx)
+ require.NoError(s, err)
+
+ s.appParams = appParams
+}
+
+// queryProofParams queries the current onchain proof module parameters for use
+// over the duration of the test.
+func (s *relaysSuite) queryProofParams(queryNodeRPCURL string) {
+ s.Helper()
+
+ deps := depinject.Supply(s.txContext.GetClientCtx())
+
+ blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL)
+ require.NoError(s, err)
+ deps = depinject.Configs(deps, depinject.Supply(blockQueryClient))
+
+ proofQueryclient, err := query.NewProofQuerier(deps)
+ require.NoError(s, err)
+
+ params, err := proofQueryclient.GetParams(s.ctx)
+ require.NoError(s, err)
+
+ // The proofQueryclient#GetParams returns an Params interface to avoid a circular
+ // dependency between the proof module and the query module, so it needs to be casted
+ // to the actual prooftypes.Params type.
+ proofParams, ok := params.(*prooftypes.Params)
+ require.True(s, ok)
+
+ s.proofParams = proofParams
+}
+
+// queryTokenomicsParams queries the current onchain tokenomics module parameters for use
+// over the duration of the test.
+func (s *relaysSuite) queryTokenomicsParams(queryNodeRPCURL string) {
+ s.Helper()
+
+ deps := depinject.Supply(s.txContext.GetClientCtx())
+
+ blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL)
+ require.NoError(s, err)
+ deps = depinject.Configs(deps, depinject.Supply(blockQueryClient))
+
+ // TODO_TECHDEBT(red-0ne): Use tokenomics client querier instead of the grpc client
+ // once implemented.
+ var clientConn *grpc.ClientConn
+ err = depinject.Inject(deps, clientConn)
+ require.NoError(s, err)
+
+ tokenomicsQuerier := tokenomicstypes.NewQueryClient(clientConn)
+ res, err := tokenomicsQuerier.Params(s.ctx, &tokenomicstypes.QueryParamsRequest{})
+ require.NoError(s, err)
+
+ s.tokenomicsParams = &res.Params
+}
+
+// queryTestedService queries the current service being tested.
+func (s *relaysSuite) queryTestedService(queryNodeRPCURL string) {
+ s.Helper()
+
+ deps := depinject.Supply(s.txContext.GetClientCtx())
+
+ blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL)
+ require.NoError(s, err)
+ deps = depinject.Configs(deps, depinject.Supply(blockQueryClient))
+
+ serviceQueryclient, err := query.NewServiceQuerier(deps)
+ require.NoError(s, err)
+
+ service, err := serviceQueryclient.GetService(s.ctx, "anvil")
+ require.NoError(s, err)
+
+ s.testedService = &service
+}
+
// forEachStakedAndDelegatedAppPrepareApp is a ForEachFn that waits for txs which
// were broadcast in previous pipeline stages have been committed. It ensures that
// new applications were successfully staked and all application actors are delegated
// to all gateways. Then it adds the new application actors to the prepared set, to
// be activated & used in the next session.
-func (s *relaysSuite) forEachStakedAndDelegatedAppPrepareApp(_ context.Context, notif *stakingInfoNotif) {
- // Wait for the next block to commit staking and delegation transactions
- // and be able to send relay requests evenly distributed across all gateways.
- txResults := s.waitForTxsToBeCommitted()
- s.ensureStakedActors(txResults, EventActionMsgStakeApplication, notif.newApps)
- s.ensureDelegatedApps(txResults, s.activeApplications, notif.newGateways)
- s.ensureDelegatedApps(txResults, notif.newApps, notif.newGateways)
- s.ensureDelegatedApps(txResults, notif.newApps, s.activeGateways)
+func (s *relaysSuite) forEachStakedAndDelegatedAppPrepareApp(ctx context.Context, notif *stakingInfoNotif) {
+ testdelays.WaitAll(
+ func() { s.ensureStakedActors(ctx, notif.newApps) },
+ func() { s.ensureDelegatedApps(ctx, s.activeApplications, notif.newGateways) },
+ func() { s.ensureDelegatedApps(ctx, notif.newApps, notif.newGateways) },
+ func() { s.ensureDelegatedApps(ctx, notif.newApps, s.activeGateways) },
+ )
// Add the new applications to the list of prepared applications to be activated in
// the next session.
@@ -1415,9 +1556,9 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn
relayInterval := time.Second / time.Duration(relaysPerSec)
batchWaitGroup := new(sync.WaitGroup)
- batchWaitGroup.Add(relaysPerSec * int(blockDuration))
+ batchWaitGroup.Add(relaysPerSec * int(blockDurationSec))
- for i := 0; i < relaysPerSec*int(blockDuration); i++ {
+ for i := 0; i < relaysPerSec*int(blockDurationSec); i++ {
iterationTime := relayBatchInfo.nextBatchTime.Add(time.Duration(i+1) * relayInterval)
batchLimiter.Go(s.ctx, func() {
@@ -1453,17 +1594,12 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn
batchWaitGroup.Wait()
}
-func (s *relaysSuite) logAndAbortTest(txResults []*types.TxResult, errorMsg string) {
- for _, txResult := range txResults {
- if txResult.Result.Log != "" {
- logger.Error().Msgf("tx result log: %s", txResult.Result.Log)
- }
- }
+func (s *relaysSuite) logAndAbortTest(errorMsg string) {
s.cancelCtx()
s.Fatal(errorMsg)
}
-// populateWithKnownApplications creates a list of gateways based on the gatewayUrls
+// populateWithKnownGateways creates a list of gateways based on the gatewayUrls
// provided in the test manifest. It is used in non-ephemeral chain tests where the
// gateways are not under the test's control and are expected to be already staked.
func (s *relaysSuite) populateWithKnownGateways() (gateways []*accountInfo) {
diff --git a/load-testing/tests/relays_stress_single_suppier.feature b/load-testing/tests/relays_stress_single_supplier.feature
similarity index 51%
rename from load-testing/tests/relays_stress_single_suppier.feature
rename to load-testing/tests/relays_stress_single_supplier.feature
index 253b82615..34d51a6dd 100644
--- a/load-testing/tests/relays_stress_single_suppier.feature
+++ b/load-testing/tests/relays_stress_single_supplier.feature
@@ -11,7 +11,16 @@ Feature: Loading gateway server with relays
And more actors are staked as follows:
| actor | actor inc amount | blocks per inc | max actors |
| application | 4 | 10 | 12 |
- | gateway | 1 | 10 | 3 |
+ | gateway | 1 | 10 | 1 |
| supplier | 1 | 10 | 1 |
When a load of concurrent relay requests are sent from the applications
- Then the correct pairs count of claim and proof messages should be committed on-chain
\ No newline at end of file
+ Then the number of failed relay requests is "0"
+ # TODO_FOLLOWUP(@red-0ne): Implement the following steps
+ # Then "0" over servicing events are observed
+ # And "0" slashing events are observed
+ # And "0" expired claim events are observed
+ # And there are as many reimbursement requests as the number of settled claims
+ # And the number of claims submitted and claims settled is the same
+ # And the number of proofs submitted and proofs required is the same
+ # And the actors onchain balances are as expected
+ # TODO_CONSIDERATION: Revisit for additional interesting test cases.
\ No newline at end of file
diff --git a/load-testing/tests/relays_stress_test.go b/load-testing/tests/relays_stress_test.go
index ae7f07a12..8557bfa15 100644
--- a/load-testing/tests/relays_stress_test.go
+++ b/load-testing/tests/relays_stress_test.go
@@ -23,23 +23,11 @@ import (
"github.com/pokt-network/poktroll/pkg/observable"
"github.com/pokt-network/poktroll/pkg/observable/channel"
"github.com/pokt-network/poktroll/testutil/testclient"
- "github.com/pokt-network/poktroll/testutil/testclient/testblock"
"github.com/pokt-network/poktroll/testutil/testclient/testtx"
+ apptypes "github.com/pokt-network/poktroll/x/application/types"
+ prooftypes "github.com/pokt-network/poktroll/x/proof/types"
sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
-)
-
-// The following constants are used to identify the different types of transactions,
-// once committed, which are expected to be observed on-chain during the test.
-// NB: The TxResult Events' #Type values are not prefixed with a slash,
-// unlike TxResult Events' "action" attribute value.
-const (
- EventActionMsgStakeApplication = "/poktroll.application.MsgStakeApplication"
- EventActionMsgStakeGateway = "/poktroll.gateway.MsgStakeGateway"
- EventActionMsgStakeSupplier = "/poktroll.supplier.MsgStakeSupplier"
- EventActionMsgCreateClaim = "/poktroll.proof.MsgCreateClaim"
- EventActionMsgSubmitProof = "/poktroll.proof.MsgSubmitProof"
- EventActionAppMsgUpdateParams = "/poktroll.application.MsgUpdateParams"
- EventTypeRedelegation = "poktroll.application.EventRedelegation"
+ tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types"
)
// The following constants define the expected ordering of the actors when
@@ -77,8 +65,6 @@ var (
// maxConcurrentRequestLimit is the maximum number of concurrent requests that can be made.
// By default, it is set to the number of logical CPUs available to the process.
maxConcurrentRequestLimit = runtime.GOMAXPROCS(0)
- // fundingAccountAddress is the address of the account used to fund other accounts.
- fundingAccountAddress string
// supplierStakeAmount is the amount of tokens to stake by suppliers.
supplierStakeAmount sdk.Coin
// gatewayStakeAmount is the amount of tokens to stake by gateways.
@@ -86,15 +72,9 @@ var (
// testedServiceId is the service ID for that all applications and suppliers will
// be using in this test.
testedServiceId string
- // blockDuration is the duration of a block in seconds.
+ // blockDurationSec is the duration of a block in seconds.
// NB: This value SHOULD be equal to `timeout_propose` in `config.yml`.
- blockDuration = int64(2)
- // newTxEventSubscriptionQuery is the format string which yields a subscription
- // query to listen for on-chain Tx events.
- newTxEventSubscriptionQuery = "tm.event='Tx'"
- // eventsReplayClientBufferSize is the buffer size for the events replay client
- // for the subscriptions above.
- eventsReplayClientBufferSize = 100
+ blockDurationSec = int64(2)
// relayPayloadFmt is the JSON-RPC request relayPayloadFmt to send a relay request.
relayPayloadFmt = `{"jsonrpc":"2.0","method":"%s","params":[],"id":%d}`
// relayRequestMethod is the method of the JSON-RPC request to be relayed.
@@ -127,15 +107,17 @@ type relaysSuite struct {
// batchInfoObs is the observable mapping session information to batch information.
// It is used to determine when to send a batch of relay requests to the network.
batchInfoObs observable.Observable[*relayBatchInfoNotif]
- // newTxEventsObs is the observable that notifies the test suite of new
- // transactions committed on-chain.
- // It is used to check the results of the transactions sent by the test suite.
- newTxEventsObs observable.Observable[*types.TxResult]
// txContext is the transaction context used to sign and send transactions.
txContext client.TxContext
- // sharedParams is the shared on-chain parameters used in the test.
+
+ // Protocol governance params used in the test.
// It is queried at the beginning of the test.
- sharedParams *sharedtypes.Params
+ sharedParams *sharedtypes.Params
+ appParams *apptypes.Params
+ proofParams *prooftypes.Params
+ tokenomicsParams *tokenomicstypes.Params
+
+ testedService *sharedtypes.Service
// numRelaysSent is the number of relay requests sent during the test.
numRelaysSent atomic.Uint64
@@ -168,7 +150,7 @@ type relaysSuite struct {
plans *actorLoadTestIncrementPlans
// gatewayUrls is a map of gatewayAddress->URL representing the provisioned gateways.
- // These gateways are not staked yet but have their off-chain instance running
+ // These gateways are not staked yet but have their offchain instance running
// and ready to be staked and used in the test.
// Since Gateways are pre-provisioned, and already assigned a signingAddress
// and an URL to send relays to, the test suite does not create new ones but picks
@@ -181,7 +163,7 @@ type relaysSuite struct {
// It is used to ensure that the gateways are staked in the order they are provisioned.
availableGatewayAddresses []string
// suppliersUrls is a map of supplierOperatorAddress->URL representing the provisioned suppliers.
- // These suppliers are not staked yet but have their off-chain instance running
+ // These suppliers are not staked yet but have their offchain instance running
// and ready to be staked and used in the test.
// Since RelayMiners are pre-provisioned, and already assigned a signingAddress
// and an URL, the test suite does not create new ones but picks from this list.
@@ -213,17 +195,17 @@ type relaysSuite struct {
// ready to handle relay requests.
activeSuppliers []*accountInfo
- // Number of claims and proofs observed on-chain during the test.
- currentProofCount int
- currentClaimCount int
-
- // expectedClaimsAndProofsCount is the expected number of claims and proofs
- // to be committed on-chain during the test.
- expectedClaimsAndProofsCount int
-
// isEphemeralChain is a flag that indicates whether the test is expected to be
// run on ephemeral chain setups like localnet or long living ones (i.e. Test/DevNet).
isEphemeralChain bool
+
+ // committedEventsObs is the observable that maps committed blocks to onchain events.
+ committedEventsObs observable.Observable[[]types.Event]
+
+ // successfulRelays is the number of relay requests that returned 200 status code.
+ successfulRelays atomic.Uint64
+ // failedRelays is the number of relay requests that returned non-200 status code.
+ failedRelays atomic.Uint64
}
// accountInfo contains the account info needed to build and send transactions.
@@ -270,8 +252,8 @@ func TestLoadRelays(t *testing.T) {
gocuke.NewRunner(t, &relaysSuite{}).Path(filepath.Join(".", "relays_stress.feature")).Run()
}
-func TestLoadRelaysSingleSupplier(t *testing.T) {
- gocuke.NewRunner(t, &relaysSuite{}).Path(filepath.Join(".", "relays_stress_single_suppier.feature")).Run()
+func TestSingleSupplierLoadRelays(t *testing.T) {
+ gocuke.NewRunner(t, &relaysSuite{}).Path(filepath.Join(".", "relays_stress_single_supplier.feature")).Run()
}
func (s *relaysSuite) LocalnetIsRunning() {
@@ -318,9 +300,9 @@ func (s *relaysSuite) LocalnetIsRunning() {
// CometLocalWebsocketURL to the TestNetNode URL. These variables are used
// by the testtx txClient to send transactions to the network.
if !s.isEphemeralChain {
- testclient.CometLocalTCPURL = loadTestParams.TestNetNode
+ testclient.CometLocalTCPURL = loadTestParams.RPCNode
- webSocketURL, err := url.Parse(loadTestParams.TestNetNode)
+ webSocketURL, err := url.Parse(loadTestParams.RPCNode)
require.NoError(s, err)
// TestNet nodes may be exposed over HTTPS, so adjust the scheme accordingly.
@@ -332,37 +314,31 @@ func (s *relaysSuite) LocalnetIsRunning() {
testclient.CometLocalWebsocketURL = webSocketURL.String() + "/websocket"
// Update the block duration when running the test on a non-ephemeral chain.
- // TODO_TECHDEBT: Get the block duration value from the chain or the manifest.
- blockDuration = 60
+ // TODO_TECHDEBT: Get the block duration value from the chain.
+ blockDurationSec = 60
}
- // Set up the blockClient that will be notifying the suite about the committed blocks.
- s.blockClient = testblock.NewLocalnetClient(s.ctx, s.TestingT.(*testing.T))
- channel.ForEach(
- s.ctx,
- s.blockClient.CommittedBlocksSequence(s.ctx),
- func(ctx context.Context, block client.Block) {
- s.latestBlock = block
- },
- )
-
// Setup the txContext that will be used to send transactions to the network.
s.txContext = testtx.NewLocalnetContext(s.TestingT.(*testing.T))
- // Get the relay cost from the tokenomics module.
- s.relayCoinAmountCost = s.getRelayCost()
-
- // Setup the tx listener for on-chain events to check and assert on transactions results.
- s.setupTxEventListeners()
+ // Setup the event listener for onchain events to check and assert on transactions
+ // and finalized blocks results.
+ s.setupEventListeners(loadTestParams.RPCNode)
// Initialize the funding account.
s.initFundingAccount(loadTestParams.FundingAccountAddress)
- // Initialize the on-chain claims and proofs counter.
- s.countClaimAndProofs()
+ // Initialize the onchain settlement events listener.
+ s.forEachSettlement(s.ctx)
- // Query for the current shared on-chain params.
- s.querySharedParams(loadTestParams.TestNetNode)
+ // Query for the current network onchain params.
+ s.querySharedParams(loadTestParams.RPCNode)
+ s.queryAppParams(loadTestParams.RPCNode)
+ s.queryProofParams(loadTestParams.RPCNode)
+ s.queryTestedService(loadTestParams.RPCNode)
+
+ // Get the relay cost from the tokenomics module.
+ s.relayCoinAmountCost = s.getRelayCost()
// Some suppliers may already be staked at genesis, ensure that staking during
// this test succeeds by increasing the sake amount.
@@ -402,62 +378,50 @@ func (s *relaysSuite) MoreActorsAreStakedAsFollows(table gocuke.DataTable) {
// increment the actor count to the maximum.
s.relayLoadDurationBlocks = s.plans.maxActorBlocksToFinalIncrementEnd()
- if s.isEphemeralChain {
- // Adjust the max delegations parameter to the max gateways to permit all
- // applications to delegate to all gateways.
- // This is to ensure that requests are distributed evenly across all gateways
- // at any given time.
- s.sendAdjustMaxDelegationsParamTx(s.plans.gateways.maxActorCount)
- s.waitForTxsToBeCommitted()
- s.ensureUpdatedMaxDelegations(s.plans.gateways.maxActorCount)
- }
-
// Fund all the provisioned suppliers and gateways since their addresses are
// known and they are not created on the fly, while funding only the initially
// created applications.
fundedSuppliers, fundedGateways, fundedApplications := s.sendFundAvailableActorsTx()
// Funding messages are sent in a single transaction by the funding account,
// only one transaction is expected to be committed.
- txResults := s.waitForTxsToBeCommitted()
- s.ensureFundedActors(txResults, fundedSuppliers)
- s.ensureFundedActors(txResults, fundedGateways)
- s.ensureFundedActors(txResults, fundedApplications)
+ fundedActors := append(fundedSuppliers, fundedGateways...)
+ fundedActors = append(fundedActors, fundedApplications...)
+ s.ensureFundedActors(s.ctx, fundedActors)
logger.Info().Msg("Actors funded")
// The initial actors are the first actors to stake.
- suppliers := fundedSuppliers[:s.supplierInitialCount]
- gateways := fundedGateways[:s.gatewayInitialCount]
- applications := fundedApplications[:s.appInitialCount]
+ stakedSuppliers := fundedSuppliers[:s.supplierInitialCount]
+ stakedGateways := fundedGateways[:s.gatewayInitialCount]
+ stakedApplications := fundedApplications[:s.appInitialCount]
+
+ stakedActors := append(stakedSuppliers, stakedGateways...)
+ stakedActors = append(stakedActors, stakedApplications...)
- s.sendInitialActorsStakeMsgs(suppliers, gateways, applications)
- txResults = s.waitForTxsToBeCommitted()
- s.ensureStakedActors(txResults, EventActionMsgStakeSupplier, suppliers)
- s.ensureStakedActors(txResults, EventActionMsgStakeGateway, gateways)
- s.ensureStakedActors(txResults, EventActionMsgStakeApplication, applications)
+ s.sendInitialActorsStakeMsgs(stakedSuppliers, stakedGateways, stakedApplications)
+ s.ensureStakedActors(s.ctx, stakedActors)
logger.Info().Msg("Actors staked")
// Update the list of staked suppliers.
- s.activeSuppliers = append(s.activeSuppliers, suppliers...)
+ s.activeSuppliers = append(s.activeSuppliers, stakedSuppliers...)
// In the case of non-ephemeral chain load tests, the available gateways are
// not incrementally staked, but are already staked and delegated to, add all
// of them to the list of active gateways at the beginning of the test.
if !s.isEphemeralChain {
- gateways = s.populateWithKnownGateways()
+ stakedGateways = s.populateWithKnownGateways()
}
// Delegate the initial applications to the initial gateways
- s.sendDelegateInitialAppsTxs(applications, gateways)
- txResults = s.waitForTxsToBeCommitted()
- s.ensureDelegatedApps(txResults, applications, gateways)
+ s.sendDelegateInitialAppsTxs(stakedApplications, stakedGateways)
+ s.ensureDelegatedApps(s.ctx, stakedApplications, stakedGateways)
logger.Info().Msg("Apps delegated")
// Applications and gateways are now ready and will be active in the next session.
- s.preparedApplications = append(s.preparedApplications, applications...)
- s.preparedGateways = append(s.preparedGateways, gateways...)
+ s.preparedApplications = append(s.preparedApplications, stakedApplications...)
+ s.preparedGateways = append(s.preparedGateways, stakedGateways...)
// relayBatchInfoObs maps session information to batch information used to schedule
// the relay requests to be sent on the current block.
@@ -508,29 +472,11 @@ func (s *relaysSuite) ALoadOfConcurrentRelayRequestsAreSentFromTheApplications()
// Block the feature step until the test is done.
<-s.ctx.Done()
}
-func (s *relaysSuite) TheCorrectPairsCountOfClaimAndProofMessagesShouldBeCommittedOnchain() {
- logger.Info().
- Int("claims", s.currentClaimCount).
- Int("proofs", s.currentProofCount).
- Msg("Claims and proofs count")
-
- require.Equal(s,
- s.currentClaimCount,
- s.currentProofCount,
- "claims and proofs count mismatch",
- )
- // TODO_TECHDEBT: The current counting mechanism for the expected claims and proofs
- // is not accurate. The expected claims and proofs count should be calculated based
- // on a function of(time_per_block, num_blocks_per_session) -> num_claims_and_proofs.
- // The reason (time_per_block) is one of the parameters is because claims and proofs
- // are removed from the on-chain state after sessions are settled, only leaving
- // events behind. The final solution needs to either account for this timing
- // carefully (based on sessions that have passed), or be event driven using
- // a replay client of on-chain messages and/or events.
- //require.Equal(s,
- // s.expectedClaimsAndProofsCount,
- // s.currentProofCount,
- // "unexpected claims and proofs count",
- //)
+func (s *relaysSuite) TheNumberOfFailedRelayRequestsIs(expectedFailedRelays string) {
+ expectedFailedRelaysCount, err := strconv.ParseUint(expectedFailedRelays, 10, 64)
+ require.NoError(s, err)
+
+ require.EqualValues(s, expectedFailedRelaysCount, s.failedRelays.Load())
+ require.EqualValues(s, s.numRelaysSent.Load(), s.successfulRelays.Load())
}
diff --git a/localnet/grafana-dashboards/cosmos_sdk_insights.json b/localnet/grafana-dashboards/cosmos_sdk_insights.json
index 157314cdc..72e2abbd5 100644
--- a/localnet/grafana-dashboards/cosmos_sdk_insights.json
+++ b/localnet/grafana-dashboards/cosmos_sdk_insights.json
@@ -794,7 +794,7 @@
"refId": "A"
}
],
- "title": "On-Chain Relays",
+ "title": "Onchain Relays",
"type": "timeseries"
},
{
@@ -1311,7 +1311,7 @@
},
"timepicker": {},
"timezone": "browser",
- "title": "Protocol / Insights and On-Chain data",
+ "title": "Protocol / Insights and Onchain data",
"uid": "adzickiu028lcb",
"version": 1,
"weekStart": ""
diff --git a/localnet/grafana-dashboards/tokenomics_relays.json b/localnet/grafana-dashboards/tokenomics_relays.json
index f6228f499..eff5a3ce2 100644
--- a/localnet/grafana-dashboards/tokenomics_relays.json
+++ b/localnet/grafana-dashboards/tokenomics_relays.json
@@ -38,7 +38,7 @@
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "## What does this dashboard show?\n\nAs relays flow through the network\nAppGate -> RelayMiner and RelayMiner creates proofs and claims, we can capture the whole relay cycle.\n\n1. shows the actual amount of relays on AppGate;\n2. shows the actual amount of relays processed by RelayMiner;\n3. the amount of relays from the on-chain information using `EventClaimCreated`;\n4. relays from `EventProofSubmitted`;",
+ "content": "## What does this dashboard show?\n\nAs relays flow through the network\nAppGate -> RelayMiner and RelayMiner creates proofs and claims, we can capture the whole relay cycle.\n\n1. shows the actual amount of relays on AppGate;\n2. shows the actual amount of relays processed by RelayMiner;\n3. the amount of relays from the onchain information using `EventClaimCreated`;\n4. relays from `EventProofSubmitted`;",
"mode": "markdown"
},
"pluginVersion": "11.2.2+security-01",
@@ -349,7 +349,7 @@
}
}
],
- "title": "Relays (from on-chain claims)",
+ "title": "Relays (from onchain claims)",
"type": "timeseries"
},
{
@@ -462,7 +462,7 @@
}
}
],
- "title": "Relays (from on-chain proofs)",
+ "title": "Relays (from onchain proofs)",
"type": "timeseries"
}
],
diff --git a/localnet/poktrolld/config/config.toml b/localnet/poktrolld/config/config.toml
index bbfa0366f..35dcda640 100644
--- a/localnet/poktrolld/config/config.toml
+++ b/localnet/poktrolld/config/config.toml
@@ -174,7 +174,7 @@ timeout_broadcast_tx_commit = "10s"
max_request_batch_size = 10
# Maximum size of request body, in bytes
-max_body_bytes = 1000000
+max_body_bytes = 100000000
# Maximum size of request header, in bytes
max_header_bytes = 1048576
@@ -330,7 +330,7 @@ keep-invalid-txs-in-cache = false
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}.
-max_tx_bytes = 1048576
+max_tx_bytes = 100000000
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
diff --git a/localnet/poktrolld/config/supplier1_stake_config.yaml b/localnet/poktrolld/config/supplier1_stake_config.yaml
index e3d475939..046fddb4c 100644
--- a/localnet/poktrolld/config/supplier1_stake_config.yaml
+++ b/localnet/poktrolld/config/supplier1_stake_config.yaml
@@ -2,8 +2,8 @@ owner_address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4
operator_address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4
stake_amount: 1000069upokt
default_rev_share_percent:
- pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80.5
- pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 19.5
+ pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80
+ pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 20
services:
- service_id: anvil
endpoints:
diff --git a/localnet/poktrolld/config/supplier_stake_config_example.yaml b/localnet/poktrolld/config/supplier_stake_config_example.yaml
index ce14f4a08..2fef94f0b 100644
--- a/localnet/poktrolld/config/supplier_stake_config_example.yaml
+++ b/localnet/poktrolld/config/supplier_stake_config_example.yaml
@@ -30,8 +30,8 @@ stake_amount: 1000069upokt
# or include at least one item.
default_rev_share_percent:
# The sum of all shares MUST equal 100%. Staking will fail otherwise.
- pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80.5
- pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 19.5
+ pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80
+ pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 20
services:
# The endpoint URL for the Anvil service is provided via the RelayMiner.
# The RelayMiner acts as a proxy, forwarding requests to the actual Anvil data node behind it.
diff --git a/makefiles/localnet.mk b/makefiles/localnet.mk
index d43cdccb6..9278457e6 100644
--- a/makefiles/localnet.mk
+++ b/makefiles/localnet.mk
@@ -29,3 +29,11 @@ localnet_regenesis: check_yq warn_message_acc_initialize_pubkeys ## Regenerate t
.PHONY: cosmovisor_start_node
cosmovisor_start_node: ## Starts the node using cosmovisor that waits for an upgrade plan
bash tools/scripts/upgrades/cosmovisor-start-node.sh
+
+.PHONY: localnet_cancel_upgrade
+localnet_cancel_upgrade: ## Cancels the planed upgrade on local node
+ poktrolld tx authz exec tools/scripts/upgrades/authz_cancel_upgrade_tx.json --gas=auto --from=pnf
+
+.PHONY: localnet_show_upgrade_plan
+localnet_show_upgrade_plan: ## Shows the upgrade plan on local node
+ poktrolld query upgrade plan
diff --git a/makefiles/relay.mk b/makefiles/relay.mk
index 38c41bfcd..202dba1d7 100644
--- a/makefiles/relay.mk
+++ b/makefiles/relay.mk
@@ -6,9 +6,9 @@
send_relay_path_JSONRPC: test_e2e_env ## Send a JSONRPC relay through PATH to a local anvil (test ETH) node
curl -X POST -H "Content-Type: application/json" \
-H "X-App-Address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4" \
+ -H "target-service-id: anvil" \
--data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
- http://anvil.localhost:3000/v1
-# $(subst http://,http://anvil.,$(PATH_URL))/v1
+ http://localhost:3000/v1/
# TODO_MAINNET(@red-0ne): Re-enable this once PATH Gateway supports REST.
# See https://github.com/buildwithgrove/path/issues/87
diff --git a/makefiles/suppliers.mk b/makefiles/suppliers.mk
index 2cceb2d8f..2e0f885e8 100644
--- a/makefiles/suppliers.mk
+++ b/makefiles/suppliers.mk
@@ -4,7 +4,15 @@
.PHONY: supplier_list
supplier_list: ## List all the staked supplier
- poktrolld --home=$(POKTROLLD_HOME) q supplier list-supplier --node $(POCKET_NODE)
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-suppliers --node $(POCKET_NODE)
+
+.PHONY: supplier_list_anvil
+supplier_list_anvil: ## List all the staked supplier staked for the anvil service
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-suppliers --service-id anvil --node $(POCKET_NODE)
+
+.PHONY: supplier_show_supplier1
+supplier_show_supplier1: ## Show supplier1 details
+ poktrolld --home=$(POKTROLLD_HOME) q supplier show-supplier supplier1 --node $(POCKET_NODE)
.PHONY: supplier_stake
supplier_stake: ## Stake tokens for the supplier specified (must specify the SUPPLIER and SUPPLIER_CONFIG env vars)
diff --git a/makefiles/testnet.mk b/makefiles/testnet.mk
index 0a13c056c..cd974a8d1 100644
--- a/makefiles/testnet.mk
+++ b/makefiles/testnet.mk
@@ -4,7 +4,7 @@
.PHONY: testnet_supplier_list
testnet_supplier_list: ## List all the staked supplier on TestNet
- poktrolld q supplier list-supplier --node=$(TESTNET_RPC)
+ poktrolld q supplier list-suppliers --node=$(TESTNET_RPC)
.PHONY: testnet_gateway_list
testnet_gateway_list: ## List all the staked gateways on TestNet
diff --git a/makefiles/tests.mk b/makefiles/tests.mk
index 7bb292e3d..b1962c845 100644
--- a/makefiles/tests.mk
+++ b/makefiles/tests.mk
@@ -53,13 +53,13 @@ test_load_relays_stress_custom: ## Run the stress test for E2E relays using cust
.PHONY: test_load_relays_stress_localnet
test_load_relays_stress_localnet: test_e2e_env warn_message_local_stress_test ## Run the stress test for E2E relays on LocalNet.
go test -v -count=1 ./load-testing/tests/... \
- -tags=load,test -run LoadRelays --log-level=debug --timeout=30m \
+ -tags=load,test -run TestLoadRelays --log-level=debug --timeout=30m \
--manifest ./load-testing/loadtest_manifest_localnet.yaml
.PHONY: test_load_relays_stress_localnet_single_supplier
test_load_relays_stress_localnet_single_supplier: test_e2e_env warn_message_local_stress_test ## Run the stress test for E2E relays on LocalNet using exclusively one supplier.
go test -v -count=1 ./load-testing/tests/... \
- -tags=load,test -run TestLoadRelaysSingleSupplier --log-level=debug --timeout=30m \
+ -tags=load,test -run TestSingleSupplierLoadRelays --log-level=debug --timeout=30m \
--manifest ./load-testing/loadtest_manifest_localnet_single_supplier.yaml
.PHONY: test_verbose
diff --git a/pkg/client/block/client.go b/pkg/client/block/client.go
index b76ecbac2..bbb184496 100644
--- a/pkg/client/block/client.go
+++ b/pkg/client/block/client.go
@@ -144,7 +144,7 @@ func (b *blockReplayClient) asyncForwardBlockEvent(
)
}
-// getInitialBlock fetches the latest committed on-chain block at the time the
+// getInitialBlock fetches the latest committed onchain block at the time the
// client starts up, while concurrently waiting for the next block event,
// publishing whichever occurs first to latestBlockPublishCh.
// This is necessary to ensure that the most recent block is available to the
diff --git a/pkg/client/delegation/godoc.go b/pkg/client/delegation/godoc.go
index 1d5b00139..f156662f6 100644
--- a/pkg/client/delegation/godoc.go
+++ b/pkg/client/delegation/godoc.go
@@ -1,5 +1,5 @@
// Package delegation contains a light wrapper of the EventsReplayClient[Redelegation]
// generic which listens for redelegation events on chain and emits them
-// through a ReplayObservable. This enables consumers to listen for on-chain
+// through a ReplayObservable. This enables consumers to listen for onchain
// application redelegation events and react to them asynchronously.
package delegation
diff --git a/pkg/client/events/godoc.go b/pkg/client/events/godoc.go
index b5bbf685f..bd67f94b6 100644
--- a/pkg/client/events/godoc.go
+++ b/pkg/client/events/godoc.go
@@ -1,4 +1,4 @@
-// Package events provides a generic client for subscribing to on-chain events
+// Package events provides a generic client for subscribing to onchain events
// via an EventsQueryClient and transforming the received events into the type
// defined by the EventsReplayClient's generic type parameter.
//
diff --git a/pkg/client/events/query_client.go b/pkg/client/events/query_client.go
index e04731f8e..ed0475520 100644
--- a/pkg/client/events/query_client.go
+++ b/pkg/client/events/query_client.go
@@ -63,7 +63,7 @@ func (ebc *eventsBytesAndConn) Close() {
}
// NewEventsQueryClient returns a new events query client which is used to
-// subscribe to on-chain events matching the given query.
+// subscribe to onchain events matching the given query.
//
// Available options:
// - WithDialer
diff --git a/pkg/client/interface.go b/pkg/client/interface.go
index 365c24b74..cae49f6d8 100644
--- a/pkg/client/interface.go
+++ b/pkg/client/interface.go
@@ -58,7 +58,7 @@ type MsgSubmitProof interface {
// able to construct blockchain transactions from pocket protocol-specific messages
// related to its role.
type SupplierClient interface {
- // CreateClaims sends claim messages which creates an on-chain commitment by
+ // CreateClaims sends claim messages which creates an onchain commitment by
// calling supplier to the given smt.SparseMerkleSumTree root hash of the given
// session's mined relays.
CreateClaims(
@@ -67,7 +67,7 @@ type SupplierClient interface {
) error
// SubmitProof sends proof messages which contain the smt.SparseCompactMerkleClosestProof,
// corresponding to some previously created claim for the same session.
- // The proof is validated on-chain as part of the pocket protocol.
+ // The proof is validated onchain as part of the pocket protocol.
SubmitProofs(
ctx context.Context,
sessionProofs ...MsgSubmitProof,
@@ -126,6 +126,13 @@ type TxContext interface {
// GetClientCtx returns the cosmos-sdk client context associated with the transaction context.
GetClientCtx() cosmosclient.Context
+
+ // GetSimulatedTxGas returns the estimated gas for the given messages.
+ GetSimulatedTxGas(
+ ctx context.Context,
+ signingKeyName string,
+ msgs ...cosmostypes.Msg,
+ ) (uint64, error)
}
// Block is an interface which abstracts the details of a block to its minimal
@@ -162,7 +169,7 @@ type BlockClient interface {
// CommittedBlocksSequence returns a BlockObservable that emits the
// latest blocks that have been committed to the chain.
CommittedBlocksSequence(context.Context) BlockReplayObservable
- // LastBlock returns the latest block that has been committed on-chain.
+ // LastBlock returns the latest block that has been committed onchain.
LastBlock(context.Context) Block
// Close unsubscribes all observers of the committed block sequence
// observable and closes the events query client.
@@ -255,7 +262,7 @@ type BlockClientOption func(BlockClient)
type EventsReplayClientOption[T any] func(EventsReplayClient[T])
// AccountQueryClient defines an interface that enables the querying of the
-// on-chain account information
+// onchain account information
type AccountQueryClient interface {
// GetAccount queries the chain for the details of the account provided
GetAccount(ctx context.Context, address string) (cosmostypes.AccountI, error)
@@ -265,24 +272,27 @@ type AccountQueryClient interface {
}
// ApplicationQueryClient defines an interface that enables the querying of the
-// on-chain application information
+// onchain application information
type ApplicationQueryClient interface {
// GetApplication queries the chain for the details of the application provided
GetApplication(ctx context.Context, appAddress string) (apptypes.Application, error)
- // GetAllApplications queries all on-chain applications
+ // GetAllApplications queries all onchain applications
GetAllApplications(ctx context.Context) ([]apptypes.Application, error)
+
+ // GetParams queries the chain for the application module parameters.
+ GetParams(ctx context.Context) (*apptypes.Params, error)
}
// SupplierQueryClient defines an interface that enables the querying of the
-// on-chain supplier information
+// onchain supplier information
type SupplierQueryClient interface {
// GetSupplier queries the chain for the details of the supplier provided
GetSupplier(ctx context.Context, supplierOperatorAddress string) (sharedtypes.Supplier, error)
}
// SessionQueryClient defines an interface that enables the querying of the
-// on-chain session information
+// onchain session information
type SessionQueryClient interface {
// GetSession queries the chain for the details of the session provided
GetSession(
@@ -297,7 +307,7 @@ type SessionQueryClient interface {
}
// SharedQueryClient defines an interface that enables the querying of the
-// on-chain shared module params.
+// onchain shared module params.
type SharedQueryClient interface {
// GetParams queries the chain for the current shared module parameters.
GetParams(ctx context.Context) (*sharedtypes.Params, error)
@@ -323,7 +333,7 @@ type SharedQueryClient interface {
}
// BlockQueryClient defines an interface that enables the querying of
-// on-chain block information for a given height. If height is nil, the
+// onchain block information for a given height. If height is nil, the
// latest block is returned.
type BlockQueryClient interface {
Block(ctx context.Context, height *int64) (*cometrpctypes.ResultBlock, error)
@@ -340,14 +350,14 @@ type ProofParams interface {
}
// ProofQueryClient defines an interface that enables the querying of the
-// on-chain proof module params.
+// onchain proof module params.
type ProofQueryClient interface {
- // GetParams queries the chain for the current shared module parameters.
+ // GetParams queries the chain for the current proof module parameters.
GetParams(ctx context.Context) (ProofParams, error)
}
// ServiceQueryClient defines an interface that enables the querying of the
-// on-chain service information
+// onchain service information
type ServiceQueryClient interface {
// GetService queries the chain for the details of the service provided
GetService(ctx context.Context, serviceId string) (sharedtypes.Service, error)
@@ -355,7 +365,7 @@ type ServiceQueryClient interface {
}
// BankQueryClient defines an interface that enables the querying of the
-// on-chain bank information
+// onchain bank information
type BankQueryClient interface {
// GetBalance queries the chain for the uPOKT balance of the account provided
GetBalance(ctx context.Context, address string) (*cosmostypes.Coin, error)
diff --git a/pkg/client/query/accquerier.go b/pkg/client/query/accquerier.go
index 2b4d9c2bc..932db5836 100644
--- a/pkg/client/query/accquerier.go
+++ b/pkg/client/query/accquerier.go
@@ -16,7 +16,7 @@ import (
var _ client.AccountQueryClient = (*accQuerier)(nil)
// accQuerier is a wrapper around the accounttypes.QueryClient that enables the
-// querying of on-chain account information through a single exposed method
+// querying of onchain account information through a single exposed method
// which returns an accounttypes.AccountI interface
type accQuerier struct {
clientConn grpc.ClientConn
@@ -76,7 +76,7 @@ func (aq *accQuerier) GetAccount(
// Fetched accounts must have their public key set. Do not cache accounts
// that do not have a public key set, such as the ones resulting from genesis
// as they may continue failing due to the caching mechanism, even after they
- // got their public key recorded on-chain.
+ // got their public key recorded onchain.
if fetchedAccount.GetPubKey() == nil {
return nil, ErrQueryPubKeyNotFound
}
diff --git a/pkg/client/query/appquerier.go b/pkg/client/query/appquerier.go
index 9477c35f9..356ce674c 100644
--- a/pkg/client/query/appquerier.go
+++ b/pkg/client/query/appquerier.go
@@ -13,7 +13,7 @@ import (
var _ client.ApplicationQueryClient = (*appQuerier)(nil)
// appQuerier is a wrapper around the apptypes.QueryClient that enables the
-// querying of on-chain application information through a single exposed method
+// querying of onchain application information through a single exposed method
// which returns an apptypes.Application interface
type appQuerier struct {
clientConn grpc.ClientConn
@@ -62,3 +62,13 @@ func (aq *appQuerier) GetAllApplications(ctx context.Context) ([]apptypes.Applic
}
return res.Applications, nil
}
+
+// GetParams returns the application module parameters
+func (aq *appQuerier) GetParams(ctx context.Context) (*apptypes.Params, error) {
+ req := apptypes.QueryParamsRequest{}
+ res, err := aq.applicationQuerier.Params(ctx, &req)
+ if err != nil {
+ return nil, err
+ }
+ return &res.Params, nil
+}
diff --git a/pkg/client/query/bankquerier.go b/pkg/client/query/bankquerier.go
index 198e5a3d5..ca28a4998 100644
--- a/pkg/client/query/bankquerier.go
+++ b/pkg/client/query/bankquerier.go
@@ -15,7 +15,7 @@ import (
var _ client.BankQueryClient = (*bankQuerier)(nil)
// bankQuerier is a wrapper around the banktypes.QueryClient that enables the
-// querying of on-chain balance information.
+// querying of onchain balance information.
type bankQuerier struct {
clientConn grpc.ClientConn
bankQuerier banktypes.QueryClient
diff --git a/pkg/client/query/proofquerier.go b/pkg/client/query/proofquerier.go
index 30c2984cd..6751dc995 100644
--- a/pkg/client/query/proofquerier.go
+++ b/pkg/client/query/proofquerier.go
@@ -11,7 +11,7 @@ import (
)
// proofQuerier is a wrapper around the prooftypes.QueryClient that enables the
-// querying of on-chain proof module params.
+// querying of onchain proof module params.
type proofQuerier struct {
clientConn grpc.ClientConn
proofQuerier prooftypes.QueryClient
diff --git a/pkg/client/query/servicequerier.go b/pkg/client/query/servicequerier.go
index cb0629681..1f5ef2d2a 100644
--- a/pkg/client/query/servicequerier.go
+++ b/pkg/client/query/servicequerier.go
@@ -14,7 +14,7 @@ import (
var _ client.ServiceQueryClient = (*serviceQuerier)(nil)
// serviceQuerier is a wrapper around the servicetypes.QueryClient that enables the
-// querying of on-chain service information through a single exposed method
+// querying of onchain service information through a single exposed method
// which returns a sharedtypes.Service struct
type serviceQuerier struct {
clientConn grpc.ClientConn
diff --git a/pkg/client/query/sessionquerier.go b/pkg/client/query/sessionquerier.go
index 8553e3313..fdf6c42e9 100644
--- a/pkg/client/query/sessionquerier.go
+++ b/pkg/client/query/sessionquerier.go
@@ -13,7 +13,7 @@ import (
var _ client.SessionQueryClient = (*sessionQuerier)(nil)
// sessionQuerier is a wrapper around the sessiontypes.QueryClient that enables the
-// querying of on-chain session information through a single exposed method
+// querying of onchain session information through a single exposed method
// which returns an sessiontypes.Session struct
type sessionQuerier struct {
clientConn grpc.ClientConn
@@ -63,7 +63,7 @@ func (sessq *sessionQuerier) GetSession(
return res.Session, nil
}
-// GetParams queries & returns the session module on-chain parameters.
+// GetParams queries & returns the session module onchain parameters.
func (sessq *sessionQuerier) GetParams(ctx context.Context) (*sessiontypes.Params, error) {
req := &sessiontypes.QueryParamsRequest{}
res, err := sessq.sessionQuerier.Params(ctx, req)
diff --git a/pkg/client/query/sharedquerier.go b/pkg/client/query/sharedquerier.go
index 06e0ed90a..bbe67b0de 100644
--- a/pkg/client/query/sharedquerier.go
+++ b/pkg/client/query/sharedquerier.go
@@ -13,7 +13,7 @@ import (
var _ client.SharedQueryClient = (*sharedQuerier)(nil)
// sharedQuerier is a wrapper around the sharedtypes.QueryClient that enables the
-// querying of on-chain shared information through a single exposed method
+// querying of onchain shared information through a single exposed method
// which returns an sharedtypes.Session struct
type sharedQuerier struct {
clientConn grpc.ClientConn
@@ -43,7 +43,7 @@ func NewSharedQuerier(deps depinject.Config) (client.SharedQueryClient, error) {
return querier, nil
}
-// GetParams queries & returns the shared module on-chain parameters.
+// GetParams queries & returns the shared module onchain parameters.
//
// TODO_TECHDEBT(#543): We don't really want to have to query the params for every method call.
// Once `ModuleParamsClient` is implemented, use its replay observable's `#Last()` method
diff --git a/pkg/client/query/supplierquerier.go b/pkg/client/query/supplierquerier.go
index 040a4303f..927f2b335 100644
--- a/pkg/client/query/supplierquerier.go
+++ b/pkg/client/query/supplierquerier.go
@@ -12,7 +12,7 @@ import (
)
// supplierQuerier is a wrapper around the suppliertypes.QueryClient that enables the
-// querying of on-chain supplier information through a single exposed method
+// querying of onchain supplier information through a single exposed method
// which returns an sharedtypes.Supplier struct
type supplierQuerier struct {
clientConn grpc.ClientConn
diff --git a/pkg/client/supplier/client.go b/pkg/client/supplier/client.go
index 018b751c8..f7d889f1b 100644
--- a/pkg/client/supplier/client.go
+++ b/pkg/client/supplier/client.go
@@ -81,7 +81,7 @@ func (sClient *supplierClient) SubmitProofs(
}
// TODO(@bryanchriswhite): reconcile splitting of supplier & proof modules
- // with off-chain pkgs/nomenclature.
+ // with offchain pkgs/nomenclature.
eitherErr := sClient.txClient.SignAndBroadcast(ctx, msgs...)
err, errCh := eitherErr.SyncOrAsyncError()
if err != nil {
@@ -127,7 +127,7 @@ func (sClient *supplierClient) CreateClaims(
}
// TODO(@bryanchriswhite): reconcile splitting of supplier & proof modules
- // with off-chain pkgs/nomenclature.
+ // with offchain pkgs/nomenclature.
eitherErr := sClient.txClient.SignAndBroadcast(ctx, msgs...)
err, errCh := eitherErr.SyncOrAsyncError()
if err != nil {
diff --git a/pkg/client/tx/client.go b/pkg/client/tx/client.go
index 506db3d3e..a1f311dfc 100644
--- a/pkg/client/tx/client.go
+++ b/pkg/client/tx/client.go
@@ -8,6 +8,7 @@ import (
"sync"
"cosmossdk.io/depinject"
+ "cosmossdk.io/math"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/json"
rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types"
@@ -15,6 +16,7 @@ import (
cosmostypes "github.com/cosmos/cosmos-sdk/types"
"go.uber.org/multierr"
+ "github.com/pokt-network/poktroll/app/volatile"
"github.com/pokt-network/poktroll/pkg/client"
"github.com/pokt-network/poktroll/pkg/client/events"
"github.com/pokt-network/poktroll/pkg/client/keyring"
@@ -103,6 +105,9 @@ type txClient struct {
// that they have not already by the given timeout height.
txTimeoutPool txTimeoutPool
+ // gasPrices is the gas unit prices used for sending transactions.
+ gasPrices cosmostypes.DecCoins
+
// connRetryLimit is the number of times the underlying replay client
// should retry in the event that it encounters an error or its connection is interrupted.
// If connRetryLimit is < 0, it will retry indefinitely.
@@ -229,6 +234,12 @@ func (txnClient *txClient) SignAndBroadcast(
return either.SyncErr(validationErrs)
}
+ // Simulate the transaction to calculate the gas limit.
+ gasLimit, simErr := txnClient.txCtx.GetSimulatedTxGas(ctx, txnClient.signingKeyName, msgs...)
+ if simErr != nil {
+ return either.SyncErr(simErr)
+ }
+
// Construct the transactions using cosmos' transactions builder.
txBuilder := txnClient.txCtx.NewTxBuilder()
if err := txBuilder.SetMsgs(msgs...); err != nil {
@@ -240,8 +251,21 @@ func (txnClient *txClient) SignAndBroadcast(
timeoutHeight := txnClient.blockClient.LastBlock(ctx).
Height() + txnClient.commitTimeoutHeightOffset
- // TODO_TECHDEBT: this should be configurable
- txBuilder.SetGasLimit(690000042)
+ txBuilder.SetGasLimit(gasLimit)
+
+ gasLimitDec := math.LegacyNewDec(int64(gasLimit))
+ feeAmountDec := txnClient.gasPrices.MulDec(gasLimitDec)
+
+ feeCoins, changeCoins := feeAmountDec.TruncateDecimal()
+ // Ensure that any decimal remainder is added to the corresponding coin as a
+ // whole number.
+ // Since changeCoins is the result of DecCoins#TruncateDecimal, it will always
+ // be less than 1 unit of the feeCoins.
+ if !changeCoins.IsZero() {
+ feeCoins = feeCoins.Add(cosmostypes.NewInt64Coin(volatile.DenomuPOKT, 1))
+ }
+ txBuilder.SetFeeAmount(feeCoins)
+
txBuilder.SetTimeoutHeight(uint64(timeoutHeight))
// sign transactions
diff --git a/pkg/client/tx/client_test.go b/pkg/client/tx/client_test.go
index e39985641..8b24e6677 100644
--- a/pkg/client/tx/client_test.go
+++ b/pkg/client/tx/client_test.go
@@ -272,7 +272,7 @@ func TestTxClient_SignAndBroadcast_SyncError(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
-// TODO_INCOMPLETE: add coverage for async error; i.e. insufficient gas or on-chain error
+// TODO_INCOMPLETE: add coverage for async error; i.e. insufficient gas or onchain error
func TestTxClient_SignAndBroadcast_CheckTxError(t *testing.T) {
var (
// expectedErrMsg is the expected error message that will be returned
diff --git a/pkg/client/tx/context.go b/pkg/client/tx/context.go
index 5f9c694ea..2c1d8fcde 100644
--- a/pkg/client/tx/context.go
+++ b/pkg/client/tx/context.go
@@ -9,12 +9,23 @@ import (
cosmostx "github.com/cosmos/cosmos-sdk/client/tx"
cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring"
cosmostypes "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/tx"
authclient "github.com/cosmos/cosmos-sdk/x/auth/client"
+ grpc "google.golang.org/grpc"
"github.com/pokt-network/poktroll/pkg/client"
txtypes "github.com/pokt-network/poktroll/pkg/client/tx/types"
)
+// maxGRPCMsgSize is the maximum message size the gRPC client can send and receive.
+// The current value has been set arbitrarily to a large value after empirically
+// observing multiple Proof messages bundled within a single transaction exceeding
+// the default 4MB limit.
+// TODO_MAINNET: Adjust the max message size to a more sensible value.
+// DEV_NOTE: This value should adjusted in concert with the CometBFT's rpc
+// max_body_bytes, mempool max_tx_bytes and max_txs_bytes.
+const maxGRPCMsgSize = 100 * 1024 * 1024 // 100MB
+
var _ client.TxContext = (*cosmosTxContext)(nil)
// cosmosTxContext is an internal implementation of the client.TxContext interface.
@@ -86,7 +97,9 @@ func (txCtx cosmosTxContext) EncodeTx(txBuilder cosmosclient.TxBuilder) ([]byte,
// ABCI operation completes and returns a TxResponse of the transaction status at that point in time.
func (txCtx cosmosTxContext) BroadcastTx(txBytes []byte) (*cosmostypes.TxResponse, error) {
clientCtx := cosmosclient.Context(txCtx.clientCtx)
- return clientCtx.BroadcastTxAsync(txBytes)
+ // BroadcastTxSync is used to capture any transaction error that occurs during
+ // the check-tx ABCI operation, otherwise errors would not be returned.
+ return clientCtx.BroadcastTxSync(txBytes)
}
// QueryTx queries the transaction based on its hash and optionally provides proof
@@ -103,3 +116,53 @@ func (txCtx cosmosTxContext) QueryTx(
func (txCtx cosmosTxContext) GetClientCtx() cosmosclient.Context {
return cosmosclient.Context(txCtx.clientCtx)
}
+
+// GetSimulatedTxGas calculates the gas for the given messages using the simulation mode.
+func (txCtx cosmosTxContext) GetSimulatedTxGas(
+ ctx context.Context,
+ signingKeyName string,
+ msgs ...cosmostypes.Msg,
+) (uint64, error) {
+ clientCtx := cosmosclient.Context(txCtx.clientCtx)
+ keyRecord, err := txCtx.GetKeyring().Key(signingKeyName)
+ if err != nil {
+ return 0, err
+ }
+
+ accAddress, err := keyRecord.GetAddress()
+ if err != nil {
+ return 0, err
+ }
+
+ accountRetriever := txCtx.clientCtx.AccountRetriever
+ _, seq, err := accountRetriever.GetAccountNumberSequence(clientCtx, accAddress)
+ if err != nil {
+ return 0, err
+ }
+
+ txf := txCtx.txFactory.
+ WithSimulateAndExecute(true).
+ WithFromName(signingKeyName).
+ WithSequence(seq)
+
+ txBytes, err := txf.BuildSimTx(msgs...)
+ if err != nil {
+ return 0, err
+ }
+
+ txSvcClient := tx.NewServiceClient(clientCtx)
+
+ simRequest := &tx.SimulateRequest{TxBytes: txBytes}
+ // Set the maximum message size for the gRPC client to allow large transactions
+ // (e.g. transactions with multiple proof messages) to be simulated.
+ gRPCOpts := []grpc.CallOption{
+ grpc.MaxCallSendMsgSize(maxGRPCMsgSize),
+ grpc.MaxCallRecvMsgSize(maxGRPCMsgSize),
+ }
+ simRes, err := txSvcClient.Simulate(context.Background(), simRequest, gRPCOpts...)
+ if err != nil {
+ return 0, err
+ }
+
+ return uint64(txf.GasAdjustment() * float64(simRes.GasInfo.GasUsed)), nil
+}
diff --git a/pkg/client/tx/options.go b/pkg/client/tx/options.go
index 46e3dbcd2..f31736bcc 100644
--- a/pkg/client/tx/options.go
+++ b/pkg/client/tx/options.go
@@ -1,6 +1,10 @@
package tx
-import "github.com/pokt-network/poktroll/pkg/client"
+import (
+ cosmostypes "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/pokt-network/poktroll/pkg/client"
+)
// WithCommitTimeoutBlocks sets the timeout duration in terms of number of blocks
// for the client to wait for broadcast transactions to be committed before
@@ -28,3 +32,10 @@ func WithConnRetryLimit(limit int) client.TxClientOption {
client.(*txClient).connRetryLimit = limit
}
}
+
+// WithGasPrices sets the gas price to be used when constructing transactions.
+func WithGasPrices(gasPrices cosmostypes.DecCoins) client.TxClientOption {
+ return func(client client.TxClient) {
+ client.(*txClient).gasPrices = gasPrices
+ }
+}
diff --git a/pkg/crypto/interface.go b/pkg/crypto/interface.go
index 6ed511da8..1a08dc009 100644
--- a/pkg/crypto/interface.go
+++ b/pkg/crypto/interface.go
@@ -21,10 +21,10 @@ type RingCache interface {
// currently cached in the ring cache.
GetCachedAddresses() []string
// Start starts the ring cache, it takes a cancellable context and, in a
- // separate goroutine, listens for on-chain delegation events and invalidates
+ // separate goroutine, listens for onchain delegation events and invalidates
// the cache if the redelegation event's AppAddress is stored in the cache.
Start(ctx context.Context)
- // Stop stops the ring cache by unsubscribing from on-chain delegation events.
+ // Stop stops the ring cache by unsubscribing from onchain delegation events.
// And clears the cache, so that it no longer contains any rings,
Stop()
}
@@ -47,7 +47,7 @@ type RingClient interface {
}
// PubKeyClient is used to get the public key given an address.
-// On-chain and off-chain implementations should take care of retrieving the
+// Onchain and offchain implementations should take care of retrieving the
// address' account and returning its public key.
type PubKeyClient interface {
// GetPubKeyFromAddress returns the public key of the given account address
diff --git a/pkg/crypto/protocol/relay_difficulty.go b/pkg/crypto/protocol/relay_difficulty.go
index ff15bc6f2..385485e3b 100644
--- a/pkg/crypto/protocol/relay_difficulty.go
+++ b/pkg/crypto/protocol/relay_difficulty.go
@@ -90,7 +90,7 @@ func GetRelayDifficultyProbability(relayDifficultyHash []byte) *big.Rat {
}
// GetRelayDifficultyMultiplier returns the inverse of GetRelayDifficultyProbability
-// to scale on-chain volume applicable relays to estimated serviced off-chain relays.
+// to scale onchain volume applicable relays to estimated serviced offchain relays.
func GetRelayDifficultyMultiplier(relayDifficultyHash []byte) *big.Rat {
probability := GetRelayDifficultyProbability(relayDifficultyHash)
return new(big.Rat).Inv(probability)
diff --git a/pkg/crypto/protocol/relay_difficulty_test.go b/pkg/crypto/protocol/relay_difficulty_test.go
index de7c4ebb0..846fc34cb 100644
--- a/pkg/crypto/protocol/relay_difficulty_test.go
+++ b/pkg/crypto/protocol/relay_difficulty_test.go
@@ -362,7 +362,7 @@ func TestRelayDifficulty_EnsureRelayMiningProbabilityIsProportional(t *testing.T
// a session tree should have.
const targetNumRelays = uint64(10e4)
- // numEstimatedRelays aims to simulate the actual (i.e. off-chain) number of relays
+ // numEstimatedRelays aims to simulate the actual (i.e. offchain) number of relays
// a RelayMiner would service successfully.
for numEstimatedRelays := uint64(1); numEstimatedRelays < 1e18; numEstimatedRelays *= 10 {
// Compute the relay mining difficulty corresponding to the actual number of relays
@@ -410,7 +410,7 @@ func TestRelayDifficulty_TruncateRelayDifficultyHashToBaseSizeDoesNotChangeItsVa
}
// scaleRelaysFromActualToTarget scales the number of relays (i.e. estimated offchain serviced relays)
-// down to the number of expected on-chain volume applicable relays
+// down to the number of expected onchain volume applicable relays
func scaleRelaysFromActualToTarget(t *testing.T, relayDifficultyProbability *big.Rat, numRelays uint64) uint64 {
numRelaysRat := new(big.Rat).SetUint64(numRelays)
volumeApplicableRelaysRat := new(big.Rat).Mul(relayDifficultyProbability, numRelaysRat)
@@ -428,8 +428,8 @@ func TestRelayDifficulty_EnsureRelayMiningMultiplierIsProportional(t *testing.T)
// Target Num Relays is the target number of volume applicable relays a session tree should have.
const (
targetNumRelays = uint64(10e3) // Target number of volume applicable relays
- lowVolumeService = 1e5 // Number of actual off-chain relays serviced by a RelayMiner
- highVolumeService = 1e7 // Number of actual off-chain relays serviced by a RelayMiner
+ lowVolumeService = 1e5 // Number of actual offchain relays serviced by a RelayMiner
+ highVolumeService = 1e7 // Number of actual offchain relays serviced by a RelayMiner
allowableDelta = 0.05 // Allow a 5% error margin between estimated probabilities and results
)
diff --git a/pkg/crypto/rings/cache.go b/pkg/crypto/rings/cache.go
index 8a401c386..4dfa2f52b 100644
--- a/pkg/crypto/rings/cache.go
+++ b/pkg/crypto/rings/cache.go
@@ -26,7 +26,7 @@ type ringCache struct {
ringsByAddr map[string]*ring.Ring
ringsByAddrMu *sync.RWMutex
- // delegationClient is used to listen for on-chain delegation events and
+ // delegationClient is used to listen for onchain delegation events and
// invalidate entries in ringsByAddr if an associated updated has been made.
delegationClient client.DelegationClient
@@ -66,7 +66,7 @@ func NewRingCache(deps depinject.Config) (_ crypto.RingCache, err error) {
return rc, nil
}
-// Start starts the ring cache by subscribing to on-chain redelegation events.
+// Start starts the ring cache by subscribing to onchain redelegation events.
func (rc *ringCache) Start(ctx context.Context) {
rc.logger.Info().Msg("starting ring cache")
// Stop the ringCache when the context is cancelled.
@@ -106,7 +106,7 @@ func (rc *ringCache) goInvalidateCache(ctx context.Context) {
})
}
-// Stop stops the ring cache by unsubscribing from on-chain redelegation events
+// Stop stops the ring cache by unsubscribing from onchain redelegation events
// and clears any existing entries.
func (rc *ringCache) Stop() {
// Clear the cache.
diff --git a/pkg/deps/config/suppliers.go b/pkg/deps/config/suppliers.go
index 1d964caee..26f04043e 100644
--- a/pkg/deps/config/suppliers.go
+++ b/pkg/deps/config/suppliers.go
@@ -2,14 +2,17 @@ package config
import (
"context"
+ "fmt"
"net/url"
"cosmossdk.io/depinject"
sdkclient "github.com/cosmos/cosmos-sdk/client"
cosmosflags "github.com/cosmos/cosmos-sdk/client/flags"
+ cosmostypes "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/gogoproto/grpc"
"github.com/spf13/cobra"
+ "github.com/pokt-network/poktroll/app/volatile"
"github.com/pokt-network/poktroll/pkg/client/block"
"github.com/pokt-network/poktroll/pkg/client/delegation"
"github.com/pokt-network/poktroll/pkg/client/events"
@@ -350,11 +353,21 @@ func NewSupplySupplierClientsFn(signingKeyNames []string) SupplierFn {
return func(
ctx context.Context,
deps depinject.Config,
- _ *cobra.Command,
+ cmd *cobra.Command,
) (depinject.Config, error) {
+ gasPriceStr, err := cmd.Flags().GetString(cosmosflags.FlagGasPrices)
+ if err != nil {
+ return nil, err
+ }
+
+ gasPrices, err := cosmostypes.ParseDecCoins(gasPriceStr)
+ if err != nil {
+ return nil, err
+ }
+
suppliers := supplier.NewSupplierClientMap()
for _, signingKeyName := range signingKeyNames {
- txClientDepinjectConfig, err := newSupplyTxClientsFn(ctx, deps, signingKeyName)
+ txClientDepinjectConfig, err := newSupplyTxClientsFn(ctx, deps, signingKeyName, gasPrices)
if err != nil {
return nil, err
}
@@ -466,12 +479,27 @@ func NewSupplyBankQuerierFn() SupplierFn {
// newSupplyTxClientFn returns a new depinject.Config which is supplied with
// the given deps and the new TxClient.
-func newSupplyTxClientsFn(ctx context.Context, deps depinject.Config, signingKeyName string) (depinject.Config, error) {
+func newSupplyTxClientsFn(
+ ctx context.Context,
+ deps depinject.Config,
+ signingKeyName string,
+ gasPrices cosmostypes.DecCoins,
+) (depinject.Config, error) {
+ // Ensure that the gas prices include upokt
+ for _, gasPrice := range gasPrices {
+ if gasPrice.Denom != volatile.DenomuPOKT {
+ // TODO_TECHDEBT(red-0ne): Allow other gas prices denominations once supported (e.g. mPOKT, POKT)
+ // See https://docs.cosmos.network/main/build/architecture/adr-024-coin-metadata#decision
+ return nil, fmt.Errorf("only gas prices with %s denom are supported", volatile.DenomuPOKT)
+ }
+ }
+
txClient, err := tx.NewTxClient(
ctx,
deps,
tx.WithSigningKeyName(signingKeyName),
tx.WithCommitTimeoutBlocks(tx.DefaultCommitTimeoutHeightOffset),
+ tx.WithGasPrices(gasPrices),
)
if err != nil {
return nil, err
diff --git a/pkg/polylog/LICENSE b/pkg/polylog/LICENSE
index 3914913fc..4819fb18c 100644
--- a/pkg/polylog/LICENSE
+++ b/pkg/polylog/LICENSE
@@ -2,7 +2,7 @@ The `Logger` and `Event` interfaces follow the `zerolog` package's API 1:1
(possibly partially) to make zerolog implementation/integration become the
thinnest wrapper possible.
-This is the API intended for all off-chain (i.e. under `/pkg`) logging.
+This is the API intended for all offchain (i.e. under `/pkg`) logging.
The following is the MIT LICENSE from `zerolog` package at the time of writing:
(see: https://github.com/rs/zerolog)
diff --git a/pkg/relayer/cmd/cmd.go b/pkg/relayer/cmd/cmd.go
index 18cd07d42..574f405b4 100644
--- a/pkg/relayer/cmd/cmd.go
+++ b/pkg/relayer/cmd/cmd.go
@@ -48,16 +48,16 @@ func RelayerCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "relayminer",
Short: "Start a RelayMiner",
- Long: `Run a RelayMiner. A RelayMiner is the off-chain complementary
+ Long: `Run a RelayMiner. A RelayMiner is the offchain complementary
middleware that handles incoming requests for all the services a Supplier staked
-for on-chain.
+for onchain.
Relay requests received by the relay servers are validated and proxied to their
-respective service endpoints, maintained by the relayer off-chain. The responses
+respective service endpoints, maintained by the relayer offchain. The responses
are then signed and sent back to the requesting application.
For each successfully served relay, the miner will hash and compare its difficulty
-against an on-chain threshold. If the difficulty is sufficient, it is applicable
+against an onchain threshold. If the difficulty is sufficient, it is applicable
to relay volume and therefore rewards. Such relays are inserted into and persisted
via an SMT KV store. The miner will monitor the current block height and periodically
submit claim and proof messages according to the protocol as sessions become eligible
@@ -75,6 +75,8 @@ for such operations.`,
cmd.Flags().Bool(cosmosflags.FlagGRPCInsecure, true, "Used to initialize the Cosmos query context with grpc security options. It can be used to override the `QueryNodeGRPCInsecure` field in the config file if specified.")
cmd.Flags().String(cosmosflags.FlagChainID, "poktroll", "The network chain ID")
cmd.Flags().StringVar(&flagLogLevel, cosmosflags.FlagLogLevel, "debug", "The logging level (debug|info|warn|error)")
+ cmd.Flags().Float64(cosmosflags.FlagGasAdjustment, 1.5, "The adjustment factor to be multiplied by the gas estimate returned by the tx simulation")
+ cmd.Flags().String(cosmosflags.FlagGasPrices, "1upokt", "Set the gas unit price in upokt")
return cmd
}
diff --git a/pkg/relayer/config/types.go b/pkg/relayer/config/types.go
index 8f72b0ff0..8c6ece4ab 100644
--- a/pkg/relayer/config/types.go
+++ b/pkg/relayer/config/types.go
@@ -126,7 +126,7 @@ type RelayMinerSupplierConfig struct {
// ServerType is the transport protocol used by the supplier, it must match the
// type of the relay miner server it is associated with.
ServerType RelayMinerServerType
- // PubliclyExposedEndpoints is a list of hosts advertised on-chain by the supplier,
+ // PubliclyExposedEndpoints is a list of hosts advertised onchain by the supplier,
// the corresponding relay miner server will accept relay requests for these hosts.
PubliclyExposedEndpoints []string
// ServiceConfig is the config of the service that relays will be proxied to.
diff --git a/pkg/relayer/interface.go b/pkg/relayer/interface.go
index 766dcb5ce..ee9c8e484 100644
--- a/pkg/relayer/interface.go
+++ b/pkg/relayer/interface.go
@@ -140,7 +140,7 @@ type SessionTree interface {
// Flush gets the root hash of the SMST needed for submitting the claim;
// then commits the entire tree to disk and stops the KVStore.
- // It should be called before submitting the claim on-chain. This function frees up
+ // It should be called before submitting the claim onchain. This function frees up
// the in-memory resources used by the SMST that are no longer needed while waiting
// for the proof submission window to open.
Flush() (SMSTRoot []byte, err error)
@@ -149,7 +149,7 @@ type SessionTree interface {
// aiming to free up KVStore resources after the proof is no longer needed.
// Delete deletes the SMST from the KVStore.
// WARNING: This function should be called only after the proof has been successfully
- // submitted on-chain and the servicer has confirmed that it has been rewarded.
+ // submitted onchain and the servicer has confirmed that it has been rewarded.
Delete() error
// StartClaiming marks the session tree as being picked up for claiming,
diff --git a/pkg/relayer/proxy/proxy.go b/pkg/relayer/proxy/proxy.go
index ad69a587b..9a76eb953 100644
--- a/pkg/relayer/proxy/proxy.go
+++ b/pkg/relayer/proxy/proxy.go
@@ -144,7 +144,7 @@ func NewRelayerProxy(
// if any of them errors.
// NB: This method IS BLOCKING until all RelayServers are stopped.
func (rp *relayerProxy) Start(ctx context.Context) error {
- // The provided services map is built from the supplier's on-chain advertised information,
+ // The provided services map is built from the supplier's onchain advertised information,
// which is a runtime parameter that can be changed by the supplier.
// NOTE: We build the provided services map at Start instead of NewRelayerProxy to avoid having to
// return an error from the constructor.
@@ -155,7 +155,7 @@ func (rp *relayerProxy) Start(ctx context.Context) error {
// Start the ring cache.
rp.ringCache.Start(ctx)
- // Start the relay meter by subscribing to the on-chain events.
+ // Start the relay meter by subscribing to the onchain events.
// This function is non-blocking and the subscription cancellation is handled
// by the context passed to the Start method.
if err := rp.relayMeter.Start(ctx); err != nil {
diff --git a/pkg/relayer/proxy/proxy_test.go b/pkg/relayer/proxy/proxy_test.go
index 3a022bc25..b9cb0b642 100644
--- a/pkg/relayer/proxy/proxy_test.go
+++ b/pkg/relayer/proxy/proxy_test.go
@@ -36,7 +36,7 @@ var (
// supplierEndpoints is the map of serviceName -> []SupplierEndpoint
// where serviceName is the name of the service the supplier staked for
- // and SupplierEndpoint is the endpoint of the service advertised on-chain
+ // and SupplierEndpoint is the endpoint of the service advertised onchain
// by the supplier
supplierEndpoints map[string][]*sharedtypes.SupplierEndpoint
@@ -213,7 +213,7 @@ func TestRelayerProxy_EmptyServicesConfigMap(t *testing.T) {
}
// RelayerProxy should fail to start if it cannot spawn a server for the
-// services it advertized on-chain.
+// services it advertized onchain.
func TestRelayerProxy_UnsupportedRpcType(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
@@ -232,7 +232,7 @@ func TestRelayerProxy_UnsupportedRpcType(t *testing.T) {
testproxy.WithRelayerProxyDependenciesForBlockHeight(supplierOperatorKeyName, blockHeight),
testproxy.WithServicesConfigMap(servicesConfigMap),
- // The supplier is staked on-chain but the service it provides is not supported by the proxy
+ // The supplier is staked onchain but the service it provides is not supported by the proxy
testproxy.WithDefaultSupplier(supplierOperatorKeyName, unsupportedSupplierEndpoint),
testproxy.WithDefaultApplication(appPrivateKey),
testproxy.WithDefaultSessionSupplier(supplierOperatorKeyName, defaultService, appPrivateKey),
diff --git a/pkg/relayer/proxy/server_builder.go b/pkg/relayer/proxy/server_builder.go
index e386aa68d..9cf4d7864 100644
--- a/pkg/relayer/proxy/server_builder.go
+++ b/pkg/relayer/proxy/server_builder.go
@@ -16,7 +16,7 @@ import (
const (
// supplierStakeWaitTime is the time to wait for the supplier to be staked before
- // attempting to (try again to) retrieve the supplier's on-chain record.
+ // attempting to (try again to) retrieve the supplier's onchain record.
// This is useful for testing and development purposes, where the supplier
// may not be staked before the relay miner starts.
supplierStakeWaitTime = 1 * time.Second
@@ -25,13 +25,13 @@ const (
// if the supplier is still not staked when the time elapses.
//
// This is intentionally a larger number because if a RelayMiner is provisioned
- // for this long (either in testing or in prod) without an associated on-chain
+ // for this long (either in testing or in prod) without an associated onchain
// supplier being stake, we need to communicate it either to the operator or
// to the developer.
supplierMaxStakeWaitTimeMinutes = 20 * time.Minute
)
-// BuildProvidedServices builds the advertised relay servers from the supplier's on-chain advertised services.
+// BuildProvidedServices builds the advertised relay servers from the supplier's onchain advertised services.
// It populates the relayerProxy's `advertisedRelayServers` map of servers for each service, where each server
// is responsible for listening for incoming relay requests and relaying them to the supported proxied service.
func (rp *relayerProxy) BuildProvidedServices(ctx context.Context) error {
@@ -57,7 +57,7 @@ func (rp *relayerProxy) BuildProvidedServices(ctx context.Context) error {
// MainNet it might not be that big of a deal, though.
// Prevent the RelayMiner from stopping by waiting until its associated supplier
- // is staked and its on-chain record retrieved.
+ // is staked and its onchain record retrieved.
supplier, err := rp.waitForSupplierToStake(ctx, supplierOperatorAddress.String())
if err != nil {
return err
@@ -138,7 +138,7 @@ func (rp *relayerProxy) initializeProxyServers() (proxyServerMap map[string]rela
return servers, nil
}
-// waitForSupplierToStake waits in a loop until it gets the on-chain supplier's
+// waitForSupplierToStake waits in a loop until it gets the onchain supplier's
// information back.
// This is useful for testing and development purposes, in production the supplier
// is most likely staked before the relay miner starts.
@@ -148,14 +148,14 @@ func (rp *relayerProxy) waitForSupplierToStake(
) (supplier sharedtypes.Supplier, err error) {
startTime := time.Now()
for {
- // Get the supplier's on-chain record
+ // Get the supplier's onchain record
supplier, err = rp.supplierQuerier.GetSupplier(ctx, supplierOperatorAddress)
// If the supplier is not found, wait for the supplier to be staked.
// This enables provisioning and deploying a RelayMiner without staking a
- // supplier on-chain. For testing purposes, this is particularly useful
+ // supplier onchain. For testing purposes, this is particularly useful
// to eliminate the needed of additional communication & coordination
- // between on-chain staking and off-chain provisioning.
+ // between onchain staking and offchain provisioning.
if err != nil && suppliertypes.ErrSupplierNotFound.Is(err) {
rp.logger.Info().Msgf(
"Waiting %d seconds for the supplier with address %s to stake",
diff --git a/pkg/relayer/proxy/synchronous.go b/pkg/relayer/proxy/synchronous.go
index 6d4d69266..a94af25dc 100644
--- a/pkg/relayer/proxy/synchronous.go
+++ b/pkg/relayer/proxy/synchronous.go
@@ -127,7 +127,7 @@ func (sync *synchronousRPCServer) ServeHTTP(writer http.ResponseWriter, request
originHost := request.Host
// When the proxy is behind a reverse proxy, or is getting its requests from
- // a CDN or a load balancer, the host header may not contain the on-chain
+ // a CDN or a load balancer, the host header may not contain the onchain
// advertized address needed to determine the service that the relay request is for.
// These CDNs and reverse proxies usually set the X-Forwarded-Host header
// to the original host.
diff --git a/pkg/relayer/relayminer.go b/pkg/relayer/relayminer.go
index a81e2d982..505d5dbd5 100644
--- a/pkg/relayer/relayminer.go
+++ b/pkg/relayer/relayminer.go
@@ -63,7 +63,7 @@ func (rel *relayMiner) Start(ctx context.Context) error {
// This is a blocking call as it waits for the waitgroup in relayerProxy.Start()
// that starts all the relay servers to be done.
rel.logger.Info().Msg("starting relayer proxy")
- // TODO_TECHDEBT: Listen for on-chain and local configuration changes, stop
+ // TODO_TECHDEBT: Listen for onchain and local configuration changes, stop
// the relayerProxy if they do not match, then wait until they match again
// before starting the relayerProxy with the new config.
// Session manager should continue to run during this time, submitting
diff --git a/pkg/relayer/session/claim.go b/pkg/relayer/session/claim.go
index 61892f24c..4d09fb2a7 100644
--- a/pkg/relayer/session/claim.go
+++ b/pkg/relayer/session/claim.go
@@ -5,8 +5,10 @@ import (
"fmt"
"slices"
+ sdktypes "github.com/cosmos/cosmos-sdk/types"
"github.com/pokt-network/smt"
+ "github.com/pokt-network/poktroll/app/volatile"
"github.com/pokt-network/poktroll/pkg/client"
"github.com/pokt-network/poktroll/pkg/either"
"github.com/pokt-network/poktroll/pkg/observable"
@@ -18,9 +20,16 @@ import (
sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
)
+// The cumulative fees of creating a single claim, followed by submitting a single proof.
+// The value was obtained empirically by observing logs during load testing and observing
+// the claim & proof lifecycle.
+// The gas price at the time of observance was 0.01uPOKT.
+// The value is subject to change as the network parameters change.
+var ClamAndProofGasCost = sdktypes.NewInt64Coin(volatile.DenomuPOKT, 50000)
+
// createClaims maps over the sessionsToClaimObs observable. For each claim batch, it:
// 1. Calculates the earliest block height at which it is safe to CreateClaims
-// 2. Waits for said block and creates the claims on-chain
+// 2. Waits for said block and creates the claims onchain
// 3. Maps errors to a new observable and logs them
// 4. Returns an observable of the successfully claimed sessions
// It DOES NOT BLOCK as map operations run in their own goroutines.
@@ -91,7 +100,7 @@ func (rs *relayerSessionsManager) mapWaitForEarliestCreateClaimsHeight(
// waitForEarliestCreateClaimsHeight calculates and waits for (blocking until) the
// earliest block height, allowed by the protocol, at which claims can be created
// for a session with the given sessionEndHeight. It is calculated relative to
-// sessionEndHeight using on-chain governance parameters and randomized input.
+// sessionEndHeight using onchain governance parameters and randomized input.
// It IS A BLOCKING function.
func (rs *relayerSessionsManager) waitForEarliestCreateClaimsHeight(
ctx context.Context,
@@ -124,7 +133,7 @@ func (rs *relayerSessionsManager) waitForEarliestCreateClaimsHeight(
logger.Info().Msg("waiting & blocking until the earliest claim commit height offset seed block height")
// The block that'll be used as a source of entropy for which branch(es) to
- // prove should be deterministic and use on-chain governance params.
+ // prove should be deterministic and use onchain governance params.
claimsWindowOpenBlock := rs.waitForBlock(ctx, claimWindowOpenHeight)
// TODO_MAINNET: If a relayminer is cold-started with persisted but unclaimed ("late")
// sessions, the claimsWindowOpenBlock will never be observed. In this case, we should
@@ -260,7 +269,10 @@ func (rs *relayerSessionsManager) payableProofsSessionTrees(
if err != nil {
return nil, err
}
- proofSubmissionFeeCoin := proofParams.GetProofSubmissionFee()
+
+ // Account for the gas cost of creating a claim and submitting a proof in addition
+ // to the ProofSubmissionFee.
+ claimAndProofSubmissionCost := proofParams.GetProofSubmissionFee().Add(ClamAndProofGasCost)
supplierOperatorBalanceCoin, err := rs.bankQueryClient.GetBalance(
ctx,
@@ -301,19 +313,30 @@ func (rs *relayerSessionsManager) payableProofsSessionTrees(
for _, sessionTree := range sessionTrees {
// If the supplier operator can afford to claim the session, add it to the
// claimableSessionTrees slice.
- if supplierOperatorBalanceCoin.IsGTE(*proofSubmissionFeeCoin) {
+ supplierCanAffordClaimAndProofFees := supplierOperatorBalanceCoin.IsGTE(claimAndProofSubmissionCost)
+ if supplierCanAffordClaimAndProofFees {
claimableSessionTrees = append(claimableSessionTrees, sessionTree)
- newSupplierOperatorBalanceCoin := supplierOperatorBalanceCoin.Sub(*proofSubmissionFeeCoin)
+ newSupplierOperatorBalanceCoin := supplierOperatorBalanceCoin.Sub(claimAndProofSubmissionCost)
supplierOperatorBalanceCoin = &newSupplierOperatorBalanceCoin
continue
}
+ // At this point supplierCanAffordClaimAndProofFees is false.
+ // Delete the session tree from the relayer sessions and the KVStore since
+ // it won't be claimed due to insufficient funds.
+ rs.removeFromRelayerSessions(sessionTree)
+ if err := sessionTree.Delete(); err != nil {
+ logger.With(
+ "session_id", sessionTree.GetSessionHeader().GetSessionId(),
+ ).Error().Err(err).Msg("failed to delete session tree")
+ }
+
// Log a warning of any session that the supplier operator cannot afford to claim.
logger.With(
"session_id", sessionTree.GetSessionHeader().GetSessionId(),
"supplier_operator_balance", supplierOperatorBalanceCoin,
- "proof_submission_fee", proofSubmissionFeeCoin,
- ).Warn().Msg("supplier operator cannot afford to submit proof for claim, skipping")
+ "proof_submission_fee", claimAndProofSubmissionCost,
+ ).Warn().Msg("supplier operator cannot afford to submit proof for claim, deleting session tree")
}
if len(claimableSessionTrees) < len(sessionTrees) {
diff --git a/pkg/relayer/session/proof.go b/pkg/relayer/session/proof.go
index 9e80c0da4..728615cfb 100644
--- a/pkg/relayer/session/proof.go
+++ b/pkg/relayer/session/proof.go
@@ -19,7 +19,7 @@ import (
// submitProofs maps over the given claimedSessions observable.
// For each session batch, it:
// 1. Calculates the earliest block height at which to submit proofs
-// 2. Waits for said height and submits the proofs on-chain
+// 2. Waits for said height and submits the proofs onchain
// 3. Maps errors to a new observable and logs them
// It DOES NOT BLOCK as map operations run in their own goroutines.
func (rs *relayerSessionsManager) submitProofs(
@@ -76,7 +76,7 @@ func (rs *relayerSessionsManager) mapWaitForEarliestSubmitProofsHeight(
// waitForEarliestSubmitProofsHeightAndGenerateProofs calculates and waits for
// (blocking until) the earliest block height, allowed by the protocol, at which
// proofs can be submitted for a session number which were claimed at createClaimHeight.
-// It is calculated relative to createClaimHeight using on-chain governance parameters
+// It is calculated relative to createClaimHeight using onchain governance parameters
// and randomized input.
func (rs *relayerSessionsManager) waitForEarliestSubmitProofsHeightAndGenerateProofs(
ctx context.Context,
@@ -256,8 +256,8 @@ func (rs *relayerSessionsManager) proveClaims(
// isProofRequired determines whether a proof is required for the given session's
// claim based on the current proof module governance parameters.
-// TODO_TECHDEBT: Refactor the method to be static and used both on-chain and off-chain.
-// TODO_INVESTIGATE: Passing a polylog.Logger should allow for on-chain/off-chain
+// TODO_TECHDEBT: Refactor the method to be static and used both onchain and offchain.
+// TODO_INVESTIGATE: Passing a polylog.Logger should allow for onchain/offchain
// usage of this function but it is currently raising a type error.
func (rs *relayerSessionsManager) isProofRequired(
ctx context.Context,
diff --git a/pkg/relayer/session/service.go b/pkg/relayer/session/service.go
index 33d8b57de..f5b84c6ec 100644
--- a/pkg/relayer/session/service.go
+++ b/pkg/relayer/session/service.go
@@ -17,7 +17,7 @@ func (rs *relayerSessionsManager) getServiceComputeUnitsPerRelay(
service, err := rs.serviceQueryClient.GetService(ctx, sessionHeader.ServiceId)
if err != nil {
return 0, ErrSessionRelayMetaHasInvalidServiceID.Wrapf(
- "getServiceComputeUnitsPerRelay: could not get on-chain service %s: %v",
+ "getServiceComputeUnitsPerRelay: could not get onchain service %s: %v",
sessionHeader.ServiceId,
err,
)
diff --git a/pkg/relayer/session/session_test.go b/pkg/relayer/session/session_test.go
index 5b5757040..a9017bcad 100644
--- a/pkg/relayer/session/session_test.go
+++ b/pkg/relayer/session/session_test.go
@@ -207,8 +207,11 @@ func TestRelayerSessionsManager_InsufficientBalanceForProofSubmission(t *testing
supplierOperatorAddress := sample.AccAddress()
supplierOperatorAccAddress := sdktypes.MustAccAddressFromBech32(supplierOperatorAddress)
+
+ proofSubmissionFee := prooftypes.DefaultParams().ProofSubmissionFee.Amount.Int64()
+ claimAndProofGasCost := session.ClamAndProofGasCost.Amount.Int64()
// Set the supplier operator balance to be able to submit only a single proof.
- supplierOperatorBalance := prooftypes.DefaultParams().ProofSubmissionFee.Amount.Int64() + 1
+ supplierOperatorBalance := proofSubmissionFee + claimAndProofGasCost + 1
supplierClientMock.EXPECT().
OperatorAddress().
Return(&supplierOperatorAccAddress).
diff --git a/pkg/relayer/session/sessiontree.go b/pkg/relayer/session/sessiontree.go
index df1bfde79..42269c973 100644
--- a/pkg/relayer/session/sessiontree.go
+++ b/pkg/relayer/session/sessiontree.go
@@ -93,7 +93,7 @@ func NewSessionTree(
}
// Create the SMST from the KVStore and a nil value hasher so the proof would
- // contain a non-hashed Relay that could be used to validate the proof on-chain.
+ // contain a non-hashed Relay that could be used to validate the proof onchain.
trie := smt.NewSparseMerkleSumTrie(treeStore, protocol.NewTrieHasher(), smt.WithValueHasher(nil))
logger = logger.With(
@@ -221,7 +221,7 @@ func (st *sessionTree) GetProof() *smt.SparseCompactMerkleClosestProof {
// Flush gets the root hash of the SMST needed for submitting the claim;
// then commits the entire tree to disk and stops the KVStore.
-// It should be called before submitting the claim on-chain. This function frees up the KVStore resources.
+// It should be called before submitting the claim onchain. This function frees up the KVStore resources.
// If the SMST has already been flushed to disk, it returns the cached root hash.
func (st *sessionTree) Flush() (SMSTRoot []byte, err error) {
st.sessionMu.Lock()
@@ -257,7 +257,7 @@ func (st *sessionTree) GetClaimRoot() []byte {
// Delete deletes the SMST from the KVStore and removes the sessionTree from the RelayerSessionsManager.
// WARNING: This function deletes the KVStore associated to the session and should be
-// called only after the proof has been successfully submitted on-chain and the servicer
+// called only after the proof has been successfully submitted onchain and the servicer
// has confirmed that it has been rewarded.
func (st *sessionTree) Delete() error {
st.sessionMu.Lock()
diff --git a/proto/poktroll/application/types.proto b/proto/poktroll/application/types.proto
index 951359be2..81e7e893e 100644
--- a/proto/poktroll/application/types.proto
+++ b/proto/poktroll/application/types.proto
@@ -14,30 +14,38 @@ import "cosmos_proto/cosmos.proto";
import "poktroll/shared/service.proto";
-// Application defines the type used to store an on-chain definition and state for an application
+// Application represents the on-chain definition and state of an application
message Application {
- string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the application.
- cosmos.base.v1beta1.Coin stake = 2; // The total amount of uPOKT the application has staked
- // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE.
+ // Bech32 address of the application
+ string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+
+ // Total amount of staked uPOKT
+ cosmos.base.v1beta1.Coin stake = 2;
+
+ // CRITICAL: Must contain EXACTLY ONE service config
// This prevents applications from over-servicing.
- // The field is kept repeated (a list) for both legacy and future logic reaosns.
- // References:
- // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033
- // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7
- repeated poktroll.shared.ApplicationServiceConfig service_configs = 3; // The list of services this appliccation is configured to request service for
+ // Kept as repeated field for legacy and future compatibility
+ // Refs:
+ // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033
+ // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7
+ repeated poktroll.shared.ApplicationServiceConfig service_configs = 3;
+
// TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`.
// Ensure to rename all relevant configs, comments, variables, function names, etc as well.
- repeated string delegatee_gateway_addresses = 4 [(cosmos_proto.scalar) = "cosmos.AddressString", (gogoproto.nullable) = false]; // The Bech32 encoded addresses for all delegatee Gateways, in a non-nullable slice
- // A map from sessionEndHeights to a list of Gateways.
- // The key is the height of the last block of the session during which the
- // respective undelegation was committed.
- // The value is a list of gateways being undelegated from.
+ // Non-nullable list of Bech32 encoded delegatee Gateway addresses
+ repeated string delegatee_gateway_addresses = 4 [(cosmos_proto.scalar) = "cosmos.AddressString", (gogoproto.nullable) = false];
+
+ // Mapping of session end heights to gateways being undelegated from
+ // - Key: Height of the last block of the session when undelegation tx was committed
+ // - Value: List of gateways being undelegated from
// TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment
// so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906.
map pending_undelegations = 5 [(gogoproto.nullable) = false];
- // The end height of the session at which an application initiated its unstaking process.
- // If the application did not unstake, this value will be 0.
+
+ // Session end height when application initiated unstaking (0 if not unstaking)
uint64 unstake_session_end_height = 6;
+
+ // Information about pending application transfers
PendingApplicationTransfer pending_transfer = 7;
}
diff --git a/proto/poktroll/proof/params.proto b/proto/poktroll/proof/params.proto
index d107e1b99..e4b86a63f 100644
--- a/proto/poktroll/proof/params.proto
+++ b/proto/poktroll/proof/params.proto
@@ -33,7 +33,7 @@ message Params {
// proof_submission_fee is the number of tokens (uPOKT) which should be paid by
// the supplier operator when submitting a proof.
- // This is needed to account for the cost of storing proofs on-chain and prevent
+ // This is needed to account for the cost of storing proofs onchain and prevent
// spamming (i.e. sybil bloat attacks) the network with non-required proofs.
// TODO_MAINNET: Consider renaming this to `proof_submission_fee_upokt`.
cosmos.base.v1beta1.Coin proof_submission_fee = 5 [(gogoproto.jsontag) = "proof_submission_fee"];
diff --git a/proto/poktroll/proof/types.proto b/proto/poktroll/proof/types.proto
index 5a67772e2..d131adf90 100644
--- a/proto/poktroll/proof/types.proto
+++ b/proto/poktroll/proof/types.proto
@@ -22,7 +22,7 @@ message Proof {
bytes closest_merkle_proof = 3;
}
-// Claim is the serialized object stored on-chain for claims pending to be proven
+// Claim is the serialized object stored onchain for claims pending to be proven
message Claim {
string supplier_operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // the address of the supplier's operator that submitted this claim
// The session header of the session that this claim is for.
diff --git a/proto/poktroll/service/params.proto b/proto/poktroll/service/params.proto
index 78c5da623..21089ff77 100644
--- a/proto/poktroll/service/params.proto
+++ b/proto/poktroll/service/params.proto
@@ -20,6 +20,6 @@ message Params {
cosmos.base.v1beta1.Coin add_service_fee = 1 [(gogoproto.jsontag) = "add_service_fee", (gogoproto.moretags) = "yaml:\"add_service_fee\""];
// target_num_relays is the target for the EMA of the number of relays per session.
- // Per service, on-chain relay mining difficulty will be adjusted to maintain this target.
+ // Per service, onchain relay mining difficulty will be adjusted to maintain this target.
uint64 target_num_relays = 2 [(gogoproto.jsontag) = "target_num_relays", (gogoproto.moretags) = "yaml:\"target_num_relays\""];
}
diff --git a/proto/poktroll/service/relay.proto b/proto/poktroll/service/relay.proto
index c840c331d..89d4b7a12 100644
--- a/proto/poktroll/service/relay.proto
+++ b/proto/poktroll/service/relay.proto
@@ -25,7 +25,7 @@ message RelayRequestMetadata {
// application in both cases.
bytes signature = 2;
- // TODO_MAINNET: make sure we're checking/verifying this address on-chain (if needed).
+ // TODO_MAINNET: make sure we're checking/verifying this address onchain (if needed).
// Relevant conversation: https://github.com/pokt-network/poktroll/pull/567#discussion_r1628722168
//
// The supplier operator address the relay is sent to. It is being used on the
diff --git a/proto/poktroll/service/relay_mining_difficulty.proto b/proto/poktroll/service/relay_mining_difficulty.proto
index 5c669c4f0..d3392c230 100644
--- a/proto/poktroll/service/relay_mining_difficulty.proto
+++ b/proto/poktroll/service/relay_mining_difficulty.proto
@@ -6,7 +6,7 @@ option (gogoproto.stable_marshaler_all) = true;
import "gogoproto/gogo.proto";
-// RelayMiningDifficulty is a message used to store the on-chain Relay Mining
+// RelayMiningDifficulty is a message used to store the onchain Relay Mining
// difficulty associated with a specific service ID.
// TODO_TECHDEBT: Embed this message in the Service message.
message RelayMiningDifficulty {
diff --git a/proto/poktroll/session/types.proto b/proto/poktroll/session/types.proto
index afbff35a4..18f2375ad 100644
--- a/proto/poktroll/session/types.proto
+++ b/proto/poktroll/session/types.proto
@@ -22,11 +22,11 @@ import "gogoproto/gogo.proto";
message SessionHeader {
string application_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the application.
string service_id = 2; // The service id this session is for
- // NOTE: session_id can be derived from the above values using on-chain but is included in the header for convenience
+ // NOTE: session_id can be derived from the above values using onchain but is included in the header for convenience
string session_id = 3; // A unique pseudoranom ID for this session
int64 session_start_block_height = 4; // The height at which this session started
// Note that`session_end_block_height` is a derivative of (`start` + `num_blocks_per_session`)
- // as goverened by on-chain params at the time of the session start.
+ // as goverened by onchain params at the time of the session start.
// It is stored as an additional field to simplofy business logic in case
// the number of blocks_per_session changes during the session.
int64 session_end_block_height = 5; // The height at which this session ended, this is the last block of the session
diff --git a/proto/poktroll/shared/params.proto b/proto/poktroll/shared/params.proto
index 327491922..215fcb302 100644
--- a/proto/poktroll/shared/params.proto
+++ b/proto/poktroll/shared/params.proto
@@ -33,12 +33,12 @@ message Params {
uint64 proof_window_close_offset_blocks = 6 [(gogoproto.jsontag) = "proof_window_close_offset_blocks"];
// supplier_unbonding_period_sessions is the number of sessions that a supplier must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the unbonding
+ // Onchain business logic requires, and ensures, that the corresponding block count of the unbonding
// period will exceed the end of any active claim & proof lifecycles.
uint64 supplier_unbonding_period_sessions = 7 [(gogoproto.jsontag) = "supplier_unbonding_period_sessions"];
// application_unbonding_period_sessions is the number of sessions that an application must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the
+ // Onchain business logic requires, and ensures, that the corresponding block count of the
// application unbonding period will exceed the end of its corresponding proof window close height.
uint64 application_unbonding_period_sessions = 8 [(gogoproto.jsontag) = "application_unbonding_period_sessions"];
// The amount of upokt that a compute unit should translate to when settling a session.
diff --git a/proto/poktroll/shared/service.proto b/proto/poktroll/shared/service.proto
index 33d8ca1b7..340c89abb 100644
--- a/proto/poktroll/shared/service.proto
+++ b/proto/poktroll/shared/service.proto
@@ -24,7 +24,7 @@ message Service {
uint64 compute_units_per_relay = 3; // Compute units required per relay for this service
// The owner address that created the service.
- // It is the address that receives rewards based on the Service's on-chain usage
+ // It is the address that receives rewards based on the Service's onchain usage
// It is the only address that can update the service configuration (e.g. compute_units_per_relay),
// or make other updates to it.
string owner_address = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the service owner / creator
@@ -57,8 +57,12 @@ message SupplierEndpoint {
// ServiceRevenueShare message to hold revenue share configuration details
message ServiceRevenueShare {
+ // 2 was reserved in #1028 during the change of rev_share_percentage from float to uint64
+ // TODO_TECHDEBT(#1033): Investigate if we can use a double instead.
+ reserved 2;
+
string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the revenue share recipient
- float rev_share_percentage = 2; // The percentage of revenue share the recipient will receive
+ uint64 rev_share_percentage = 3; // The percentage of revenue share the recipient will receive
}
// Enum to define RPC types
@@ -72,7 +76,7 @@ enum RPCType {
}
// Enum to define configuration options
-// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing on-chain QoS.
+// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing onchain QoS.
enum ConfigOptions {
UNKNOWN_CONFIG = 0; // Undefined config option
TIMEOUT = 1; // Timeout setting
diff --git a/proto/poktroll/shared/supplier.proto b/proto/poktroll/shared/supplier.proto
index be58d1a14..7b88dff05 100644
--- a/proto/poktroll/shared/supplier.proto
+++ b/proto/poktroll/shared/supplier.proto
@@ -10,24 +10,30 @@ import "cosmos/base/v1beta1/coin.proto";
import "poktroll/shared/service.proto";
import "gogoproto/gogo.proto";
-// Supplier is the type defining the actor in Pocket Network that provides RPC services.
+// Supplier represents an actor in Pocket Network that provides RPC services
message Supplier {
- // The address of the owner (i.e. staker, custodial) that owns the funds for staking.
- // By default, this address is the one that receives all the rewards unless owtherwise specified.
- // This property cannot be updated by the operator.
- string owner_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // Bech32 cosmos address
- // The operator address of the supplier operator (i.e. the one managing the off-chain server).
- // The operator address can update the supplier's configurations excluding the owner address.
- // This property does not change over the supplier's lifespan, the supplier must be unstaked
- // and re-staked to effectively update this value.
- string operator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // Bech32 cosmos address
- cosmos.base.v1beta1.Coin stake = 3; // The total amount of uPOKT the supplier has staked
- repeated SupplierServiceConfig services = 4; // The service configs this supplier can support
- // The session end height at which an actively unbonding supplier unbonds its stake.
- // If the supplier did not unstake, this value will be 0.
+ // Owner address that controls the staked funds and receives rewards by default
+ // Cannot be updated by the operator
+ string owner_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+
+ // Operator address managing the offchain server
+ // Immutable for supplier's lifespan - requires unstake/re-stake to change.
+ // Can update supplier configs except for owner address.
+ string operator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+
+ // Total amount of staked uPOKT
+ cosmos.base.v1beta1.Coin stake = 3;
+
+ // List of service configurations supported by this supplier
+ repeated SupplierServiceConfig services = 4;
+
+ // Session end height when supplier initiated unstaking (0 if not unstaking)
uint64 unstake_session_end_height = 5;
- // services_activation_heights_map is a map of serviceIds to the height at
- // which the staked supplier will become active for that service.
- // Activation heights are session start heights.
+
+ // Mapping of serviceIds to their activation heights
+ // - Key: serviceId
+ // - Value: Session start height when supplier becomes active for the service
+ // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf
+ // because maps are no longer supported for serialized types in the CosmoSDK.
map services_activation_heights_map = 6;
}
diff --git a/proto/poktroll/supplier/event.proto b/proto/poktroll/supplier/event.proto
index 422f41529..26daded8c 100644
--- a/proto/poktroll/supplier/event.proto
+++ b/proto/poktroll/supplier/event.proto
@@ -16,7 +16,7 @@ enum SupplierUnbondingReason {
SUPPLIER_UNBONDING_REASON_BELOW_MIN_STAKE = 2;
}
-// EventSupplierStaked is emitted when a supplier stake message is committed on-chain.
+// EventSupplierStaked is emitted when a supplier stake message is committed onchain.
message EventSupplierStaked {
poktroll.shared.Supplier supplier = 1 [(gogoproto.jsontag) = "supplier"];
// The session end height of the last session in which the supplier was staked.
@@ -24,7 +24,7 @@ message EventSupplierStaked {
}
// EventSupplierUnbondingBegin is emitted when an application unstake message
-// is committed on-chain, indicating that the supplier will now begin unbonding.
+// is committed onchain, indicating that the supplier will now begin unbonding.
message EventSupplierUnbondingBegin {
poktroll.shared.Supplier supplier = 1 [(gogoproto.jsontag) = "supplier"];
SupplierUnbondingReason reason = 2 [(gogoproto.jsontag) = "reason"];
diff --git a/proto/poktroll/supplier/query.proto b/proto/poktroll/supplier/query.proto
index cb7a4d31a..71de22f24 100644
--- a/proto/poktroll/supplier/query.proto
+++ b/proto/poktroll/supplier/query.proto
@@ -45,6 +45,7 @@ message QueryParamsResponse {
message QueryGetSupplierRequest {
string operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // TODO_TECHDEBT: Add the ability to query for a supplier by owner_id
}
message QueryGetSupplierResponse {
@@ -53,6 +54,10 @@ message QueryGetSupplierResponse {
message QueryAllSuppliersRequest {
cosmos.base.query.v1beta1.PageRequest pagination = 1;
+
+ oneof filter {
+ string service_id = 2; // unique service identifier to filter by
+ }
}
message QueryAllSuppliersResponse {
diff --git a/proto/poktroll/tokenomics/event.proto b/proto/poktroll/tokenomics/event.proto
index c0043e3f9..6a525211b 100644
--- a/proto/poktroll/tokenomics/event.proto
+++ b/proto/poktroll/tokenomics/event.proto
@@ -17,7 +17,7 @@ enum ClaimExpirationReason {
}
// EventClaimExpired is an event emitted during settlement whenever a claim requiring
-// an on-chain proof doesn't have one. The claim cannot be settled, leading to that work
+// an onchain proof doesn't have one. The claim cannot be settled, leading to that work
// never being rewarded.
message EventClaimExpired {
poktroll.proof.Claim claim = 1 [(gogoproto.jsontag) = "claim"];
diff --git a/telemetry/defaults.go b/telemetry/defaults.go
new file mode 100644
index 000000000..e059e2c93
--- /dev/null
+++ b/telemetry/defaults.go
@@ -0,0 +1,14 @@
+package telemetry
+
+// Default configuration values for telemetry
+const (
+ // DefaultCardinalityLevel represents the default cardinality level for metrics collection
+ DefaultCardinalityLevel = "medium"
+)
+
+// DefaultConfig returns the default telemetry configuration
+func DefaultConfig() PoktrollTelemetryConfig {
+ return PoktrollTelemetryConfig{
+ CardinalityLevel: DefaultCardinalityLevel,
+ }
+}
diff --git a/telemetry/event_counters.go b/telemetry/event_counters.go
index 2a36580be..1518445f7 100644
--- a/telemetry/event_counters.go
+++ b/telemetry/event_counters.go
@@ -89,7 +89,7 @@ func ProofRequirementCounter(
}
// ClaimComputeUnitsCounter increments a counter which tracks the number of compute units
-// which are represented by on-chain claims at the given ClaimProofStage.
+// which are represented by onchain claims at the given ClaimProofStage.
// If err is not nil, the counter is not incremented but Prometheus will ingest this event.
func ClaimComputeUnitsCounter(
claimProofStage string,
@@ -127,7 +127,7 @@ func ClaimComputeUnitsCounter(
}
// ClaimRelaysCounter increments a counter which tracks the number of relays
-// represented by on-chain claims at the given ClaimProofStage.
+// represented by onchain claims at the given ClaimProofStage.
// If err is not nil, the counter is not incremented and an "error" label is added
// with the error's message. I.e., Prometheus will ingest this event.
func ClaimRelaysCounter(
diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go
index 348b73898..d7b93a879 100644
--- a/telemetry/telemetry.go
+++ b/telemetry/telemetry.go
@@ -11,16 +11,31 @@ import (
// Set once on initialization and remains constant during runtime.
var globalTelemetryConfig PoktrollTelemetryConfig
-// PoktrollTelemetryConfig represents the telemetry protion of the custom poktroll config section in `app.toml`.
+// PoktrollTelemetryConfig represents the telemetry portion of the custom poktroll config section in `app.toml`.
type PoktrollTelemetryConfig struct {
CardinalityLevel string `mapstructure:"cardinality-level"`
}
// New sets the globalTelemetryConfig for telemetry package.
func New(appOpts servertypes.AppOptions) error {
- // Extract the map from appOpts.
- // `poktroll.telemetry` comes from `app.toml` which is parsed into a map.
- telemetryMap := appOpts.Get("poktroll.telemetry").(map[string]interface{})
+ // Get the poktroll config section. If it doesn't exist, use defaults
+ poktrollConfig := appOpts.Get("poktroll")
+ if poktrollConfig == nil {
+ globalTelemetryConfig = DefaultConfig()
+ return nil
+ }
+
+ // Try to get the telemetry subsection
+ poktrollMap, ok := poktrollConfig.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("invalid poktroll config format: expected map[string]interface{}, got %T", poktrollConfig)
+ }
+
+ telemetryMap, ok := poktrollMap["telemetry"].(map[string]interface{})
+ if !ok {
+ globalTelemetryConfig = DefaultConfig()
+ return nil
+ }
// Use mapstructure to decode the map into the struct
if err := mapstructure.Decode(telemetryMap, &globalTelemetryConfig); err != nil {
diff --git a/telemetry/tokens.go b/telemetry/tokens.go
index a62b3355f..deecbac12 100644
--- a/telemetry/tokens.go
+++ b/telemetry/tokens.go
@@ -9,7 +9,7 @@ import (
// complies with the new hardened settlement approach.
// TODO_MAINNET(@red-0ne, #897): Minted, burnt and slashd tokens values might not be completely accurate.
-// While we're keeping this metric for now consider removing in favor of utilizing the `cosmos-exporter` which uses on-chain data.
+// While we're keeping this metric for now consider removing in favor of utilizing the `cosmos-exporter` which uses onchain data.
// Context: https://github.com/cosmos/cosmos-sdk/issues/21614, https://github.com/pokt-network/poktroll/pull/832
// MintedTokensFromModule is a function to track token minting from a specific module.
diff --git a/tests/integration/application/application_transfer_test.go b/tests/integration/application/application_transfer_test.go
index a4c76d4ba..ad0b49cd7 100644
--- a/tests/integration/application/application_transfer_test.go
+++ b/tests/integration/application/application_transfer_test.go
@@ -80,7 +80,7 @@ func (s *appTransferTestSuite) SetupTest() {
s.app2: {s.gateway1, s.gateway5},
})
- // Assert the on-chain state shows the application 3 as NOT staked.
+ // Assert the onchain state shows the application 3 as NOT staked.
_, queryErr := s.GetAppQueryClient().GetApplication(s.SdkCtx(), s.app3)
require.ErrorContains(s.T(), queryErr, "application not found")
require.ErrorContains(s.T(), queryErr, s.app3)
@@ -390,7 +390,7 @@ func (s *appTransferTestSuite) setupStakeApps(appBech32ToServiceIdsMap map[strin
require.Equal(s.T(), appBech32, stakeAppRes.GetApplication().GetAddress())
require.Equal(s.T(), stakeAmount, stakeAppRes.GetApplication().GetStake().Amount.Int64())
- // Assert the on-chain state shows the application as staked.
+ // Assert the onchain state shows the application as staked.
foundApp, queryErr := s.GetAppQueryClient().GetApplication(s.SdkCtx(), appBech32)
require.NoError(s.T(), queryErr)
require.Equal(s.T(), appBech32, foundApp.GetAddress())
diff --git a/tests/integration/service/relay_mining_difficulty_test.go b/tests/integration/service/relay_mining_difficulty_test.go
index 8e84787fb..050b8328c 100644
--- a/tests/integration/service/relay_mining_difficulty_test.go
+++ b/tests/integration/service/relay_mining_difficulty_test.go
@@ -205,7 +205,7 @@ func prepareSMST(
for i := uint64(0); i < numRelays; i++ {
// DEV_NOTE: A signed mined relay is a MinedRelay type with the appropriate
// payload, signatures and metadata populated.
- // It does not (as of writing) adhere to the actual on-chain difficulty (i.e.
+ // It does not (as of writing) adhere to the actual onchain difficulty (i.e.
// hash check) of the test service surrounding the scope of this test.
minedRelay := testrelayer.NewSignedMinedRelay(t, ctx,
session,
diff --git a/tests/integration/tokenomics/relay_mining_integration_test.go b/tests/integration/tokenomics/relay_mining_integration_test.go
index 0b6ea4a9f..3a9231c4a 100644
--- a/tests/integration/tokenomics/relay_mining_integration_test.go
+++ b/tests/integration/tokenomics/relay_mining_integration_test.go
@@ -198,7 +198,7 @@ func TestComputeNewDifficultyHash_RewardsReflectWorkCompleted(t *testing.T) {
}
// prepareRealClaim prepares a claim by creating a real SMST with the given number
-// of mined relays that adhere to the actual on-chain difficulty of the test service.
+// of mined relays that adhere to the actual onchain difficulty of the test service.
func prepareRealClaim(
t *testing.T,
numRelays uint64,
diff --git a/tests/integration/tokenomics/token_logic_modules/unhaltable_test.go b/tests/integration/tokenomics/token_logic_modules/unhaltable_test.go
index 7726dfdad..3096c29d5 100644
--- a/tests/integration/tokenomics/token_logic_modules/unhaltable_test.go
+++ b/tests/integration/tokenomics/token_logic_modules/unhaltable_test.go
@@ -48,7 +48,7 @@ func (s *tokenLogicModuleTestSuite) TestSettlePendingClaims_NonHaltingError() {
desc string
setup func(*testing.T)
}{
- {desc: "supplier operator pubkey not on-chain"},
+ {desc: "supplier operator pubkey not onchain"},
{desc: "closest merkle proof is invalid (mangled)"},
{desc: "closest merkle proof is invalid (non-compact)"},
{desc: "closest merkle proof leaf is not a relay"},
diff --git a/testutil/delays/waitall.go b/testutil/delays/waitall.go
new file mode 100644
index 000000000..30ae68287
--- /dev/null
+++ b/testutil/delays/waitall.go
@@ -0,0 +1,23 @@
+package testdelays
+
+import "sync"
+
+// WaitAll waits for all the provided functions to complete.
+// It is used to wait for multiple goroutines to complete before proceeding.
+func WaitAll(waitFuncs ...func()) {
+ if len(waitFuncs) == 0 {
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(len(waitFuncs))
+
+ for _, fn := range waitFuncs {
+ go func(f func()) {
+ defer wg.Done()
+ f()
+ }(fn)
+ }
+
+ wg.Wait()
+}
diff --git a/testutil/events/filter.go b/testutil/events/filter.go
index ced4617af..5f16aa1ea 100644
--- a/testutil/events/filter.go
+++ b/testutil/events/filter.go
@@ -1,6 +1,7 @@
package events
import (
+ "context"
"strconv"
"strings"
"testing"
@@ -8,6 +9,8 @@ import (
abci "github.com/cometbft/cometbft/abci/types"
cosmostypes "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/gogoproto/proto"
+ "github.com/pokt-network/poktroll/pkg/observable"
+ "github.com/pokt-network/poktroll/pkg/observable/channel"
"github.com/stretchr/testify/require"
)
@@ -74,3 +77,24 @@ func NewEventTypeMatchFn(matchEventType string) func(*cosmostypes.Event) bool {
return strings.Trim(event.Type, "/") == strings.Trim(matchEventType, "/")
}
}
+
+// AbciEventsToTypedEvents converts the abci events to typed events.
+func AbciEventsToTypedEvents(
+ ctx context.Context,
+ abciEventObs observable.Observable[[]abci.Event],
+) observable.Observable[[]proto.Message] {
+ return channel.Map(ctx, abciEventObs, func(ctx context.Context, events []abci.Event) ([]proto.Message, bool) {
+ var typedEvents []proto.Message
+ for _, event := range events {
+ // TODO_TECHDEBT: Filter out events by event.Type before parsing them.
+ typedEvent, err := cosmostypes.ParseTypedEvent(event)
+ if err != nil {
+ continue
+ }
+
+ typedEvents = append(typedEvents, typedEvent)
+ }
+
+ return typedEvents, false
+ })
+}
diff --git a/testutil/integration/app.go b/testutil/integration/app.go
index 715c0135b..04305089a 100644
--- a/testutil/integration/app.go
+++ b/testutil/integration/app.go
@@ -862,7 +862,7 @@ func (app *App) nextBlockUpdateCtx() {
}
// setupDefaultActorsState uses the integration app keepers to stake "default"
-// on-chain actors for use in tests. In creates a service, and stakes a supplier
+// onchain actors for use in tests. In creates a service, and stakes a supplier
// and application as well as funding the bank balance of the default supplier.
//
// TODO_TECHDEBT(@bryanchriswhite): Eliminate usage of and remove this function in favor of
@@ -897,7 +897,7 @@ func (app *App) setupDefaultActorsState(
app.preGeneratedAccts,
)
- // Prepare the on-chain supplier
+ // Prepare the onchain supplier
supplierStake := types.NewCoin("upokt", math.NewInt(1000000))
defaultSupplier := sharedtypes.Supplier{
OwnerAddress: supplierOperatorAddr.String(),
@@ -908,7 +908,7 @@ func (app *App) setupDefaultActorsState(
RevShare: []*sharedtypes.ServiceRevenueShare{
{
Address: sample.AccAddress(),
- RevSharePercentage: 100,
+ RevSharePercentage: uint64(100),
},
},
ServiceId: defaultService.Id,
@@ -928,7 +928,7 @@ func (app *App) setupDefaultActorsState(
app.preGeneratedAccts,
)
- // Prepare the on-chain application
+ // Prepare the onchain application
appStake := types.NewCoin("upokt", math.NewInt(1000000))
defaultApplication := apptypes.Application{
Address: applicationAddr.String(),
diff --git a/testutil/integration/suites/authz.go b/testutil/integration/suites/authz.go
index 62b44c89a..c83b94376 100644
--- a/testutil/integration/suites/authz.go
+++ b/testutil/integration/suites/authz.go
@@ -25,7 +25,7 @@ type AuthzIntegrationSuite struct {
BaseIntegrationSuite
}
-// RunAuthzGrantMsgForPoktrollModules creates an on-chain authz grant for the given
+// RunAuthzGrantMsgForPoktrollModules creates an onchain authz grant for the given
// granter and grantee addresses for the specified message name in each of the poktroll
// modules present in the integration app.
func (s *AuthzIntegrationSuite) RunAuthzGrantMsgForPoktrollModules(
@@ -65,7 +65,7 @@ func (s *AuthzIntegrationSuite) RunAuthzGrantMsgForPoktrollModules(
}
}
-// RunAuthzGrantMsg creates an on-chain authz grant from the given granter to the
+// RunAuthzGrantMsg creates an onchain authz grant from the given granter to the
// grantee addresses for the authorization object provided.
func (s *AuthzIntegrationSuite) RunAuthzGrantMsg(
t *testing.T,
diff --git a/testutil/integration/suites/update_params.go b/testutil/integration/suites/update_params.go
index 4a26ffa80..edc5ccabb 100644
--- a/testutil/integration/suites/update_params.go
+++ b/testutil/integration/suites/update_params.go
@@ -80,7 +80,7 @@ type ParamsSuite struct {
// AuthorityAddr is the cosmos account address of the authority for the integration
// app. It is used as the **granter** of authz grants for parameter update messages.
- // In practice, is an address sourced by an on-chain string and no one has the private key.
+ // In practice, is an address sourced by an onchain string and no one has the private key.
AuthorityAddr cosmostypes.AccAddress
// AuthorizedAddr is the cosmos account address which is the **grantee** of authz
// grants for parameter update messages.
@@ -121,7 +121,7 @@ func (s *ParamsSuite) SetupTestAuthzAccounts(t *testing.T) {
s.AuthorizedAddr = nextAcct.Address
}
-// SetupTestAuthzGrants creates on-chain authz grants for the MsgUpdateUpdateParam and
+// SetupTestAuthzGrants creates onchain authz grants for the MsgUpdateUpdateParam and
// MsgUpdateParams message for each module. It is expected to be called after s.NewApp()
// as it depends on the authority and authorized addresses having been set.
func (s *ParamsSuite) SetupTestAuthzGrants(t *testing.T) {
@@ -147,7 +147,7 @@ func (s *ParamsSuite) SetupTestAuthzGrants(t *testing.T) {
// RunUpdateParams runs the given MsgUpdateParams message via an authz exec as the
// AuthorizedAddr and returns the response bytes and error. It is expected to be called
-// after s.SetupTestAuthzGrants() as it depends on an on-chain authz grant to AuthorizedAddr
+// after s.SetupTestAuthzGrants() as it depends on an onchain authz grant to AuthorizedAddr
// for MsgUpdateParams for the given module.
func (s *ParamsSuite) RunUpdateParams(
t *testing.T,
@@ -159,7 +159,7 @@ func (s *ParamsSuite) RunUpdateParams(
}
// RunUpdateParamsAsSigner runs the given MsgUpdateParams message via an authz exec
-// as signerAddr and returns the response bytes and error. It depends on an on-chain
+// as signerAddr and returns the response bytes and error. It depends on an onchain
// authz grant to signerAddr for MsgUpdateParams for the given module.
func (s *ParamsSuite) RunUpdateParamsAsSigner(
t *testing.T,
@@ -182,7 +182,7 @@ func (s *ParamsSuite) RunUpdateParamsAsSigner(
// RunUpdateParam constructs and runs an MsgUpdateParam message via an authz exec
// as the AuthorizedAddr for the given module, parameter name, and value. It returns
// the response bytes and error. It is expected to be called after s.SetupTestAuthzGrants()
-// as it depends on an on-chain authz grant to AuthorizedAddr for MsgUpdateParam for the given module.
+// as it depends on an onchain authz grant to AuthorizedAddr for MsgUpdateParam for the given module.
func (s *ParamsSuite) RunUpdateParam(
t *testing.T,
moduleName string,
@@ -201,7 +201,7 @@ func (s *ParamsSuite) RunUpdateParam(
// RunUpdateParamAsSigner constructs and runs an MsgUpdateParam message via an authz exec
// as the given signerAddr for the given module, parameter name, and value. It returns
-// the response bytes and error. It depends on an on-chain authz grant to signerAddr for
+// the response bytes and error. It depends on an onchain authz grant to signerAddr for
// MsgUpdateParam for the given module.
func (s *ParamsSuite) RunUpdateParamAsSigner(
t *testing.T,
diff --git a/testutil/keeper/tokenomics.go b/testutil/keeper/tokenomics.go
index e4cef7328..31f5dc53a 100644
--- a/testutil/keeper/tokenomics.go
+++ b/testutil/keeper/tokenomics.go
@@ -124,7 +124,7 @@ func TokenomicsKeeperWithActorAddrs(t testing.TB) (
registry := codectypes.NewInterfaceRegistry()
cdc := codec.NewProtoCodec(registry)
- // The on-chain governance address.
+ // The onchain governance address.
authority := authtypes.NewModuleAddress(govtypes.ModuleName)
// Prepare the test application.
@@ -146,7 +146,7 @@ func TokenomicsKeeperWithActorAddrs(t testing.TB) (
RevShare: []*sharedtypes.ServiceRevenueShare{
{
Address: supplierOwnerAddr,
- RevSharePercentage: 100,
+ RevSharePercentage: uint64(100),
},
},
},
@@ -182,6 +182,11 @@ func TokenomicsKeeperWithActorAddrs(t testing.TB) (
Return(nil).
AnyTimes()
+ mockApplicationKeeper.EXPECT().
+ GetParams(gomock.Any()).
+ Return(apptypes.Params{}).
+ AnyTimes()
+
// Mock the supplier keeper.
mockSupplierKeeper := mocks.NewMockSupplierKeeper(ctrl)
// Mock SetSupplier.
diff --git a/testutil/session/session.go b/testutil/session/session.go
index 84f9c4495..3128465c2 100644
--- a/testutil/session/session.go
+++ b/testutil/session/session.go
@@ -6,7 +6,7 @@ import (
)
// GetSessionIdWithDefaultParams returns the string and bytes representation of the
-// sessionId for the session containing blockHeight, given the default shared on-chain
+// sessionId for the session containing blockHeight, given the default shared onchain
// parameters, application public key, service ID, and block hash.
func GetSessionIdWithDefaultParams(
appPubKey,
@@ -19,7 +19,7 @@ func GetSessionIdWithDefaultParams(
}
// GetSessionStartHeightWithDefaultParams returns the block height at which the
-// session containing queryHeight starts, given the default shared on-chain
+// session containing queryHeight starts, given the default shared onchain
// parameters.
// See shared.GetSessionStartHeight for more details.
func GetSessionStartHeightWithDefaultParams(queryHeight int64) int64 {
@@ -28,7 +28,7 @@ func GetSessionStartHeightWithDefaultParams(queryHeight int64) int64 {
}
// GetSessionEndHeightWithDefaultParams returns the block height at which the session
-// containing queryHeight ends, given the default shared on-chain parameters.
+// containing queryHeight ends, given the default shared onchain parameters.
// See shared.GetSessionEndHeight for more details.
func GetSessionEndHeightWithDefaultParams(queryHeight int64) int64 {
sharedParams := sharedtypes.DefaultParams()
@@ -36,7 +36,7 @@ func GetSessionEndHeightWithDefaultParams(queryHeight int64) int64 {
}
// GetSessionNumberWithDefaultParams returns the session number of the session
-// containing queryHeight, given the default on-chain shared parameters.
+// containing queryHeight, given the default onchain shared parameters.
// See shared.GetSessionNumber for more details.
func GetSessionNumberWithDefaultParams(queryHeight int64) int64 {
sharedParams := sharedtypes.DefaultParams()
diff --git a/testutil/testclient/testtx/context.go b/testutil/testclient/testtx/context.go
index 449e88b8f..f8769bd4d 100644
--- a/testutil/testclient/testtx/context.go
+++ b/testutil/testclient/testtx/context.go
@@ -95,6 +95,10 @@ func NewOneTimeErrTxTimeoutTxContext(
},
).Times(1)
+ txCtxMock.EXPECT().GetSimulatedTxGas(gomock.Any(), gomock.Any(), gomock.Any()).
+ Return(uint64(1), nil).
+ Times(1)
+
txCtxMock.EXPECT().QueryTx(
gomock.AssignableToTypeOf(context.Background()),
gomock.AssignableToTypeOf([]byte{}),
diff --git a/testutil/testproxy/relayerproxy.go b/testutil/testproxy/relayerproxy.go
index 613c5a6be..e3d0981a6 100644
--- a/testutil/testproxy/relayerproxy.go
+++ b/testutil/testproxy/relayerproxy.go
@@ -293,7 +293,7 @@ func MarshalAndSend(
require.FailNow(test.t, "unsupported server type")
}
- // originHost is the endpoint that the client will retrieve from the on-chain supplier record.
+ // originHost is the endpoint that the client will retrieve from the onchain supplier record.
// The supplier may have multiple endpoints (e.g. for load geo-balancing, host failover, etc.).
// In the current test setup, we only have one endpoint per supplier, which is why we are accessing `[0]`.
// In a real-world scenario, the publicly exposed endpoint would reach a load balancer
diff --git a/tools/iavl-tree-diff/go.mod b/tools/iavl-tree-diff/go.mod
index e0f4c5a83..a067e3bec 100644
--- a/tools/iavl-tree-diff/go.mod
+++ b/tools/iavl-tree-diff/go.mod
@@ -14,7 +14,6 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
- golang.org/x/crypto v0.26.0 // indirect
- golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
)
diff --git a/tools/iavl-tree-diff/go.sum b/tools/iavl-tree-diff/go.sum
index f2bbbac4d..4021bb7d4 100644
--- a/tools/iavl-tree-diff/go.sum
+++ b/tools/iavl-tree-diff/go.sum
@@ -60,8 +60,8 @@ go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
-golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -87,13 +87,13 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
diff --git a/tools/scripts/upgrades/authz_cancel_upgrade_tx.json b/tools/scripts/upgrades/authz_cancel_upgrade_tx.json
new file mode 100644
index 000000000..014eaac60
--- /dev/null
+++ b/tools/scripts/upgrades/authz_cancel_upgrade_tx.json
@@ -0,0 +1,10 @@
+{
+ "body": {
+ "messages": [
+ {
+ "@type": "/cosmos.upgrade.v1beta1.MsgCancelUpgrade",
+ "authority": "pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/tools/scripts/upgrades/cosmovisor-start-node.sh b/tools/scripts/upgrades/cosmovisor-start-node.sh
index bce8b0c94..354eaecee 100644
--- a/tools/scripts/upgrades/cosmovisor-start-node.sh
+++ b/tools/scripts/upgrades/cosmovisor-start-node.sh
@@ -45,7 +45,7 @@ rm -rf $DAEMON_HOME
# Runs regenesis.
make localnet_regenesis
-# Setups cosmovisor directories and poktroll binaries. On real network cosmovisor can download the binaries using on-chain
+# Setups cosmovisor directories and poktroll binaries. On real network cosmovisor can download the binaries using onchain
# data when `DAEMON_ALLOW_DOWNLOAD_BINARIES=true`.
mkdir -p $DAEMON_HOME/cosmovisor/genesis/bin/ $DAEMON_HOME/cosmovisor/upgrades/$POKTROLLD_UPGRADE_PLAN_NAME/bin/
cp -r $POKTROLLD_OLD_BINARY_PATH $DAEMON_HOME/cosmovisor/genesis/bin/poktrolld
diff --git a/tools/scripts/upgrades/upgrade_tx_v0.0.9.json b/tools/scripts/upgrades/upgrade_tx_v0.0.9.json
new file mode 100644
index 000000000..c945229d9
--- /dev/null
+++ b/tools/scripts/upgrades/upgrade_tx_v0.0.9.json
@@ -0,0 +1,15 @@
+{
+ "body": {
+ "messages": [
+ {
+ "@type": "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade",
+ "authority": "pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t",
+ "plan": {
+ "name": "v0.0.9",
+ "height": "15510",
+ "info": "{\"binaries\":{\"linux/amd64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_linux_amd64.tar.gz?checksum=sha256:ab5b99ca0bc4bfbdd7031378d5a01c2a9f040ff310b745866a4dee7e62321c94\",\"linux/arm64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_linux_arm64.tar.gz?checksum=sha256:4b68c2ad326da055d43af1ad1a580158cec0f229d2ec6d9e18280d065260b622\",\"darwin/amd64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_darwin_amd64.tar.gz?checksum=sha256:c81aabddeb190044b979412e5a518bbf5c88305272f72a47e32e13aa765c3330\",\"darwin/arm64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_darwin_arm64.tar.gz?checksum=sha256:e683c55ac13902d107d7a726ed4a5c5affb2af1be3c67dd131ec2072a2cfbcb2\"}}"
+ }
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/x/application/keeper/prune_undelegations.go b/x/application/keeper/prune_undelegations.go
index 9f0b42afc..a526029a1 100644
--- a/x/application/keeper/prune_undelegations.go
+++ b/x/application/keeper/prune_undelegations.go
@@ -45,7 +45,7 @@ func (k Keeper) EndBlockerPruneAppToGatewayPendingUndelegation(ctx sdk.Context)
}
// GetNumBlocksUndelegationRetention returns the number of blocks for which
-// undelegations should be kept before being pruned, given the current on-chain
+// undelegations should be kept before being pruned, given the current onchain
// shared module parameters.
func (k Keeper) GetNumBlocksUndelegationRetention(ctx context.Context) int64 {
sharedParams := k.sharedKeeper.GetParams(ctx)
diff --git a/x/application/types/errors.go b/x/application/types/errors.go
index 4e8d7fb32..f4de58c6d 100644
--- a/x/application/types/errors.go
+++ b/x/application/types/errors.go
@@ -21,6 +21,6 @@ var (
ErrAppDuplicateAddress = sdkerrors.Register(ModuleName, 1113, "duplicate application address")
ErrAppHasPendingTransfer = sdkerrors.Register(ModuleName, 1114, "application is in transfer period")
ErrAppParamInvalid = sdkerrors.Register(ModuleName, 1115, "the provided param is invalid")
- ErrAppEmitEvent = sdkerrors.Register(ModuleName, 1116, "unable to emit on-chain event")
+ ErrAppEmitEvent = sdkerrors.Register(ModuleName, 1116, "unable to emit onchain event")
ErrQueryAppsInvalidGatewayAddress = sdkerrors.Register(ModuleName, 1117, "invalid gateway address querying for apps with delegatee gateway address")
)
diff --git a/x/application/types/types.pb.go b/x/application/types/types.pb.go
index 7d154dd9f..ec942dee4 100644
--- a/x/application/types/types.pb.go
+++ b/x/application/types/types.pb.go
@@ -27,31 +27,33 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-// Application defines the type used to store an on-chain definition and state for an application
+// Application represents the on-chain definition and state of an application
type Application struct {
- Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
- Stake *types.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"`
- // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE.
+ // Bech32 address of the application
+ Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ // Total amount of staked uPOKT
+ Stake *types.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"`
+ // CRITICAL: Must contain EXACTLY ONE service config
// This prevents applications from over-servicing.
- // The field is kept repeated (a list) for both legacy and future logic reaosns.
- // References:
- // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033
- // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7
+ // Kept as repeated field for legacy and future compatibility
+ // Refs:
+ // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033
+ // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7
ServiceConfigs []*types1.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"`
// TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`.
// Ensure to rename all relevant configs, comments, variables, function names, etc as well.
+ // Non-nullable list of Bech32 encoded delegatee Gateway addresses
DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"`
- // A map from sessionEndHeights to a list of Gateways.
- // The key is the height of the last block of the session during which the
- // respective undelegation was committed.
- // The value is a list of gateways being undelegated from.
+ // Mapping of session end heights to gateways being undelegated from
+ // - Key: Height of the last block of the session when undelegation tx was committed
+ // - Value: List of gateways being undelegated from
// TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment
// so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906.
PendingUndelegations map[uint64]UndelegatingGatewayList `protobuf:"bytes,5,rep,name=pending_undelegations,json=pendingUndelegations,proto3" json:"pending_undelegations" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // The end height of the session at which an application initiated its unstaking process.
- // If the application did not unstake, this value will be 0.
- UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
- PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"`
+ // Session end height when application initiated unstaking (0 if not unstaking)
+ UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
+ // Information about pending application transfers
+ PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"`
}
func (m *Application) Reset() { *m = Application{} }
diff --git a/x/gateway/types/errors.go b/x/gateway/types/errors.go
index 641f9d335..c704d08bd 100644
--- a/x/gateway/types/errors.go
+++ b/x/gateway/types/errors.go
@@ -12,5 +12,5 @@ var (
ErrGatewayUnauthorized = sdkerrors.Register(ModuleName, 1103, "unauthorized signer")
ErrGatewayNotFound = sdkerrors.Register(ModuleName, 1104, "gateway not found")
ErrGatewayParamInvalid = sdkerrors.Register(ModuleName, 1105, "the provided param is invalid")
- ErrGatewayEmitEvent = sdkerrors.Register(ModuleName, 1106, "unable to emit on-chain event")
+ ErrGatewayEmitEvent = sdkerrors.Register(ModuleName, 1106, "unable to emit onchain event")
)
diff --git a/x/proof/keeper/keeper.go b/x/proof/keeper/keeper.go
index 5c67ca8d4..cd1b8ae7a 100644
--- a/x/proof/keeper/keeper.go
+++ b/x/proof/keeper/keeper.go
@@ -65,19 +65,19 @@ func NewKeeper(
sharedQuerier := types.NewSharedKeeperQueryClient(sharedKeeper, sessionKeeper)
// RingKeeperClient holds the logic of verifying RelayRequests ring signatures
- // for both on-chain and off-chain actors.
+ // for both onchain and offchain actors.
//
// ApplicationQueriers & AccountQuerier are compatible with the environment
// they're used in and may or may not make an actual network request.
//
- // When used in an on-chain context, the ProofKeeper supplies AppKeeperQueryClient
+ // When used in an onchain context, the ProofKeeper supplies AppKeeperQueryClient
// and AccountKeeperQueryClient that are thin wrappers around the Application and
// Account keepers respectively to satisfy the RingClient needs.
//
// TODO_MAINNET(@red-0ne): Make ring signature verification a stateless
// function and get rid of the RingClient and its dependencies by moving
// application ring retrieval to the application keeper, and making it
- // retrievable using the application query client for off-chain actors. Signature
+ // retrievable using the application query client for offchain actors. Signature
// verification code will still be shared across off/on chain environments.
ringKeeperClientDeps := depinject.Supply(polylogger, applicationQuerier, accountQuerier, sharedQuerier)
ringKeeperClient, err := rings.NewRingClient(ringKeeperClientDeps)
diff --git a/x/proof/keeper/msg_server_create_claim.go b/x/proof/keeper/msg_server_create_claim.go
index f9e06055c..679897f34 100644
--- a/x/proof/keeper/msg_server_create_claim.go
+++ b/x/proof/keeper/msg_server_create_claim.go
@@ -37,7 +37,7 @@ func (k msgServer) CreateClaim(
}
logger.Info("validated the createClaim message")
- // Compare msg session header w/ on-chain session header.
+ // Compare msg session header w/ onchain session header.
session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierOperatorAddress())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
@@ -62,7 +62,7 @@ func (k msgServer) CreateClaim(
)
// Validate claim message commit height is within the respective session's
- // claim creation window using the on-chain session header.
+ // claim creation window using the onchain session header.
if err = k.validateClaimWindow(ctx, claim.SessionHeader, claim.SupplierOperatorAddress); err != nil {
return nil, status.Error(codes.FailedPrecondition, err.Error())
}
diff --git a/x/proof/keeper/msg_server_create_claim_test.go b/x/proof/keeper/msg_server_create_claim_test.go
index f97982cfd..14fab452b 100644
--- a/x/proof/keeper/msg_server_create_claim_test.go
+++ b/x/proof/keeper/msg_server_create_claim_test.go
@@ -86,7 +86,7 @@ func TestMsgServer_CreateClaim_Success(t *testing.T) {
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
blockHeight := int64(1)
blockHeightOpt := keepertest.WithBlockHeight(blockHeight)
@@ -201,7 +201,7 @@ func TestMsgServer_CreateClaim_Success(t *testing.T) {
func TestMsgServer_CreateClaim_Error_OutsideOfWindow(t *testing.T) {
var claimWindowOpenBlockHash []byte
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
blockHeightOpt := keepertest.WithBlockHeight(1)
keepers, ctx := keepertest.NewProofModuleKeepers(t, blockHeightOpt)
sdkCtx := cosmostypes.UnwrapSDKContext(ctx)
@@ -326,7 +326,7 @@ func TestMsgServer_CreateClaim_Error_OutsideOfWindow(t *testing.T) {
}
func TestMsgServer_CreateClaim_Error(t *testing.T) {
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
blockHeightOpt := keepertest.WithBlockHeight(1)
keepers, ctx := keepertest.NewProofModuleKeepers(t, blockHeightOpt)
srv := keeper.NewMsgServerImpl(*keepers.Keeper)
@@ -412,7 +412,7 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) {
expectedErr error
}{
{
- desc: "on-chain session ID must match claim msg session ID",
+ desc: "onchain session ID must match claim msg session ID",
claimMsgFn: func(t *testing.T) *types.MsgCreateClaim {
return newTestClaimMsg(t,
sessionStartHeight,
@@ -427,7 +427,7 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) {
expectedErr: status.Error(
codes.InvalidArgument,
types.ErrProofInvalidSessionId.Wrapf(
- "session ID does not match on-chain session ID; expected %q, got %q",
+ "session ID does not match onchain session ID; expected %q, got %q",
sessionRes.GetSession().GetSessionId(),
"invalid_session_id",
).Error(),
@@ -456,12 +456,12 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) {
),
},
{
- desc: "claim msg supplier operator address must exist on-chain",
+ desc: "claim msg supplier operator address must exist onchain",
claimMsgFn: func(t *testing.T) *types.MsgCreateClaim {
return newTestClaimMsg(t,
sessionStartHeight,
sessionRes.GetSession().GetSessionId(),
- // Use a supplier operat address that's nonexistent on-chain.
+ // Use a supplier operat address that's nonexistent onchain.
randSupplierOperatorAddr,
appAddr,
service,
@@ -500,13 +500,13 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) {
),
},
{
- desc: "claim msg application address must exist on-chain",
+ desc: "claim msg application address must exist onchain",
claimMsgFn: func(t *testing.T) *types.MsgCreateClaim {
return newTestClaimMsg(t,
sessionStartHeight,
sessionRes.GetSession().GetSessionId(),
supplierOperatorAddr,
- // Use an application address that's nonexistent on-chain.
+ // Use an application address that's nonexistent onchain.
randAppAddr,
service,
defaultMerkleRoot,
@@ -539,7 +539,7 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) {
}
func TestMsgServer_CreateClaim_Error_ComputeUnitsMismatch(t *testing.T) {
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
blockHeightOpt := keepertest.WithBlockHeight(1)
keepers, ctx := keepertest.NewProofModuleKeepers(t, blockHeightOpt)
sdkCtx := cosmostypes.UnwrapSDKContext(ctx)
@@ -555,7 +555,7 @@ func TestMsgServer_CreateClaim_Error_ComputeUnitsMismatch(t *testing.T) {
ComputeUnitsPerRelay: nonDefaultComputeUnitsPerRelay,
OwnerAddress: sample.AccAddress(),
}
- // Add the service that is expected to be on-chain.
+ // Add the service that is expected to be onchain.
keepers.SetService(ctx, *service)
// Add a supplier that is expected to be in the session.
diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go
index 3e2870289..5952eb94b 100644
--- a/x/proof/keeper/msg_server_submit_proof.go
+++ b/x/proof/keeper/msg_server_submit_proof.go
@@ -19,8 +19,8 @@ import (
sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
)
-// SubmitProof is the server handler to submit and store a proof on-chain.
-// A proof that's stored on-chain is what leads to rewards (i.e. inflation)
+// SubmitProof is the server handler to submit and store a proof onchain.
+// A proof that's stored onchain is what leads to rewards (i.e. inflation)
// downstream, making this a critical part of the protocol.
//
// Note that the validation of the proof is done in `EnsureValidProof`. However,
@@ -57,7 +57,7 @@ func (k msgServer) SubmitProof(
}
logger.Info("validated the submitProof message")
- // Compare msg session header w/ on-chain session header.
+ // Compare msg session header w/ onchain session header.
session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierOperatorAddress())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
@@ -85,7 +85,7 @@ func (k msgServer) SubmitProof(
"supplier_operator_address", proof.SupplierOperatorAddress)
// Validate proof message commit height is within the respective session's
- // proof submission window using the on-chain session header.
+ // proof submission window using the onchain session header.
if err = k.validateProofWindow(ctx, proof.SessionHeader, proof.SupplierOperatorAddress); err != nil {
return nil, status.Error(codes.FailedPrecondition, err.Error())
}
diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go
index 35185f798..191fdff5d 100644
--- a/x/proof/keeper/msg_server_submit_proof_test.go
+++ b/x/proof/keeper/msg_server_submit_proof_test.go
@@ -86,9 +86,9 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) {
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
opts := []keepertest.ProofKeepersOpt{
- // Set block hash so we can have a deterministic expected on-chain proof requested by the protocol.
+ // Set block hash so we can have a deterministic expected onchain proof requested by the protocol.
keepertest.WithBlockHash(blockHeaderHash),
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
keepertest.WithBlockHeight(1),
}
keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...)
@@ -264,9 +264,9 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) {
var claimWindowOpenHeightBlockHash, proofWindowOpenHeightBlockHash []byte
opts := []keepertest.ProofKeepersOpt{
- // Set block hash so we can have a deterministic expected on-chain proof requested by the protocol.
+ // Set block hash so we can have a deterministic expected onchain proof requested by the protocol.
keepertest.WithBlockHash(blockHeaderHash),
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
keepertest.WithBlockHeight(1),
}
keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...)
@@ -438,10 +438,10 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) {
func TestMsgServer_SubmitProof_Error(t *testing.T) {
opts := []keepertest.ProofKeepersOpt{
- // Set block hash such that on-chain closest merkle proof validation
+ // Set block hash such that onchain closest merkle proof validation
// uses the expected path.
keepertest.WithBlockHash(blockHeaderHash),
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
keepertest.WithBlockHeight(1),
}
keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...)
@@ -618,7 +618,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) {
},
},
{
- desc: "proof session ID must match on-chain session ID",
+ desc: "proof session ID must match onchain session ID",
newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof {
// Construct new proof message using the wrong session ID.
return newTestProofMsg(t,
@@ -632,7 +632,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) {
return status.Error(
codes.InvalidArgument,
prooftypes.ErrProofInvalidSessionId.Wrapf(
- "session ID does not match on-chain session ID; expected %q, got %q",
+ "session ID does not match onchain session ID; expected %q, got %q",
validSessionHeader.GetSessionId(),
msgSubmitProof.GetSessionHeader().GetSessionId(),
).Error(),
@@ -640,7 +640,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) {
},
},
{
- desc: "proof supplier must be in on-chain session",
+ desc: "proof supplier must be in onchain session",
newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof {
// Construct a proof message with a supplier that does not belong in the session.
return newTestProofMsg(t,
@@ -708,9 +708,9 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) {
func TestMsgServer_SubmitProof_FailSubmittingNonRequiredProof(t *testing.T) {
opts := []keepertest.ProofKeepersOpt{
- // Set block hash so we can have a deterministic expected on-chain proof requested by the protocol.
+ // Set block hash so we can have a deterministic expected onchain proof requested by the protocol.
keepertest.WithBlockHash(blockHeaderHash),
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
keepertest.WithBlockHeight(1),
}
keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...)
@@ -843,7 +843,7 @@ func TestMsgServer_SubmitProof_FailSubmittingNonRequiredProof(t *testing.T) {
}
// newTestProofMsg creates a new submit proof message that can be submitted
-// to be validated and stored on-chain.
+// to be validated and stored onchain.
func newTestProofMsg(
t *testing.T,
supplierOperatorAddr string,
@@ -869,7 +869,7 @@ func newTestProofMsg(
}
}
-// createClaimAndStoreBlockHash creates a valid claim, submits it on-chain,
+// createClaimAndStoreBlockHash creates a valid claim, submits it onchain,
// and on success, stores the block hash for retrieval at future heights.
// TODO_TECHDEBT(@bryanchriswhite): Consider if we could/should split
// this into two functions.
diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go
index c9f683cf1..bb45be1aa 100644
--- a/x/proof/keeper/proof_validation.go
+++ b/x/proof/keeper/proof_validation.go
@@ -4,8 +4,8 @@ package keeper
TODO_MAINNET: Document these steps in the docs and link here.
## Actions (error if anything fails)
- 1. Retrieve a fully hydrated `session` from on-chain store using `msg` metadata
- 2. Retrieve a fully hydrated `claim` from on-chain store using `msg` metadata
+ 1. Retrieve a fully hydrated `session` from onchain store using `msg` metadata
+ 2. Retrieve a fully hydrated `claim` from onchain store using `msg` metadata
3. Retrieve `relay.Req` and `relay.Res` from deserializing `proof.ClosestValueHash`
## Basic Validations (metadata only)
@@ -23,7 +23,7 @@ package keeper
2. verify(relay.Res.Signature, supplier.pubKey)
## Relay Mining validation
- 1. verify(proof.path) is the expected path; pseudo-random variation using on-chain data
+ 1. verify(proof.path) is the expected path; pseudo-random variation using onchain data
2. verify(proof.ValueHash, expectedDifficulty); governance based
3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct
*/
@@ -43,7 +43,7 @@ import (
)
// EnsureValidProof validates the proof submitted by the supplier is correct with
-// respect to an on-chain claim.
+// respect to an onchain claim.
//
// This function should be called during session settlement (i.e. EndBlocker)
// rather than during proof submission (i.e. SubmitProof) because:
@@ -80,13 +80,13 @@ func (k Keeper) EnsureValidProof(
}
logger.Info("queried and validated the session header")
- // Re-hydrate message session header with the on-chain session header.
+ // Re-hydrate message session header with the onchain session header.
// This corrects for discrepancies between unvalidated fields in the session
// header which can be derived from known values (e.g. session end height).
sessionHeader := onChainSession.GetHeader()
// Validate proof message commit height is within the respective session's
- // proof submission window using the on-chain session header.
+ // proof submission window using the onchain session header.
if err = k.validateProofWindow(ctx, sessionHeader, supplierOperatorAddr); err != nil {
return err
}
@@ -180,7 +180,7 @@ func (k Keeper) EnsureValidProof(
logger.Debug("successfully validated relay mining difficulty")
// Validate that path the proof is submitted for matches the expected one
- // based on the pseudo-random on-chain data associated with the header.
+ // based on the pseudo-random onchain data associated with the header.
if err = k.validateClosestPath(
ctx,
sparseMerkleClosestProof,
@@ -249,7 +249,7 @@ func (k Keeper) validateClosestPath(
expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId())
if !bytes.Equal(proof.Path, expectedProofPath) {
return types.ErrProofInvalidProof.Wrapf(
- "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)",
+ "the path of the proof provided (%x) does not match one expected by the onchain protocol (%x)",
proof.Path,
expectedProofPath,
)
diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go
index a3e6133ab..f6c3fa970 100644
--- a/x/proof/keeper/proof_validation_test.go
+++ b/x/proof/keeper/proof_validation_test.go
@@ -31,10 +31,10 @@ import (
func TestEnsureValidProof_Error(t *testing.T) {
opts := []keepertest.ProofKeepersOpt{
- // Set block hash such that on-chain closest merkle proof validation
+ // Set block hash such that onchain closest merkle proof validation
// uses the expected path.
keepertest.WithBlockHash(blockHeaderHash),
- // Set block height to 1 so there is a valid session on-chain.
+ // Set block height to 1 so there is a valid session onchain.
keepertest.WithBlockHeight(1),
}
keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...)
@@ -210,7 +210,7 @@ func TestEnsureValidProof_Error(t *testing.T) {
expectedMerkleProofPath)
},
expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf(
- "session ID does not match on-chain session ID; expected %q, got %q",
+ "session ID does not match onchain session ID; expected %q, got %q",
validSessionHeader.GetSessionId(),
"",
),
@@ -235,7 +235,7 @@ func TestEnsureValidProof_Error(t *testing.T) {
),
},
{
- desc: "proof session ID must match on-chain session ID",
+ desc: "proof session ID must match onchain session ID",
newProof: func(t *testing.T) *prooftypes.Proof {
// Construct new proof message using the wrong session ID.
return testtree.NewProof(t,
@@ -246,13 +246,13 @@ func TestEnsureValidProof_Error(t *testing.T) {
)
},
expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf(
- "session ID does not match on-chain session ID; expected %q, got %q",
+ "session ID does not match onchain session ID; expected %q, got %q",
validSessionHeader.GetSessionId(),
wrongSessionIdHeader.GetSessionId(),
),
},
{
- desc: "proof supplier must be in on-chain session",
+ desc: "proof supplier must be in onchain session",
newProof: func(t *testing.T) *prooftypes.Proof {
// Construct a proof message with a supplier that does not belong in the session.
return testtree.NewProof(t,
@@ -582,7 +582,7 @@ func TestEnsureValidProof_Error(t *testing.T) {
return testtree.NewProof(t, supplierOperatorAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath)
},
expectedErr: prooftypes.ErrProofInvalidProof.Wrapf(
- "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)",
+ "the path of the proof provided (%x) does not match one expected by the onchain protocol (%x)",
wrongClosestProofPath,
protocol.GetPathForProof(sdkCtx.HeaderHash(), validSessionHeader.GetSessionId()),
),
@@ -729,21 +729,21 @@ func TestEnsureValidProof_Error(t *testing.T) {
{
desc: "claim and proof application addresses must match",
newProof: func(t *testing.T) *prooftypes.Proof {
- t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases")
+ t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match onchain session ID cases")
return nil
},
},
{
desc: "claim and proof service IDs must match",
newProof: func(t *testing.T) *prooftypes.Proof {
- t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases")
+ t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match onchain session ID cases")
return nil
},
},
{
desc: "claim and proof supplier operator addresses must match",
newProof: func(t *testing.T) *prooftypes.Proof {
- t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases")
+ t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match onchain session ID cases")
return nil
},
},
diff --git a/x/proof/keeper/query_proof.go b/x/proof/keeper/query_proof.go
index f2a84ba42..4d596f948 100644
--- a/x/proof/keeper/query_proof.go
+++ b/x/proof/keeper/query_proof.go
@@ -13,7 +13,10 @@ import (
"github.com/pokt-network/poktroll/x/proof/types"
)
-func (k Keeper) AllProofs(ctx context.Context, req *types.QueryAllProofsRequest) (*types.QueryAllProofsResponse, error) {
+func (k Keeper) AllProofs(
+ ctx context.Context,
+ req *types.QueryAllProofsRequest,
+) (*types.QueryAllProofsResponse, error) {
logger := k.Logger().With("method", "AllProofs")
if req == nil {
diff --git a/x/proof/keeper/session.go b/x/proof/keeper/session.go
index 68a72cf10..a07d0d897 100644
--- a/x/proof/keeper/session.go
+++ b/x/proof/keeper/session.go
@@ -14,7 +14,7 @@ import (
// queryAndValidateSessionHeader ensures that a session with the sessionID of the given session
// header exists and that this session includes the supplier with the given operator address.
-// It returns a session which is hydrated with the on-chain session data.
+// It returns a session which is hydrated with the onchain session data.
func (k Keeper) queryAndValidateSessionHeader(
ctx context.Context,
sessionHeader *sessiontypes.SessionHeader,
@@ -28,7 +28,7 @@ func (k Keeper) queryAndValidateSessionHeader(
BlockHeight: sessionHeader.GetSessionStartBlockHeight(),
}
- // Get the on-chain session for the ground-truth against which the given
+ // Get the onchain session for the ground-truth against which the given
// session header is to be validated.
sessionRes, err := k.sessionKeeper.GetSession(ctx, sessionReq)
if err != nil {
@@ -46,17 +46,17 @@ func (k Keeper) queryAndValidateSessionHeader(
).
Debug("got sessionId for proof")
- // Ensure that the given session header's session ID matches the on-chain onChainSession ID.
+ // Ensure that the given session header's session ID matches the onchain onChainSession ID.
if sessionHeader.GetSessionId() != onChainSession.GetSessionId() {
return nil, types.ErrProofInvalidSessionId.Wrapf(
- "session ID does not match on-chain session ID; expected %q, got %q",
+ "session ID does not match onchain session ID; expected %q, got %q",
onChainSession.GetSessionId(),
sessionHeader.GetSessionId(),
)
}
// NB: it is redundant to assert that the service ID in the request matches the
- // on-chain session service ID because the session is queried using the service
+ // onchain session service ID because the session is queried using the service
// ID as a parameter. Either a different session (i.e. different session ID)
// or an error would be returned depending on whether an application/supplier
// pair exists for the given service ID or not, respectively.
@@ -77,7 +77,7 @@ func (k Keeper) queryAndValidateSessionHeader(
}
// validateClaimWindow returns an error if the given session is not eligible for claiming.
-// It *assumes* that the msg's session header is a valid on-chain session with correct
+// It *assumes* that the msg's session header is a valid onchain session with correct
// height fields. First call #queryAndValidateSessionHeader to ensure any user-provided
// session header is valid and correctly hydrated.
func (k Keeper) validateClaimWindow(
@@ -145,7 +145,7 @@ func (k Keeper) validateClaimWindow(
}
// validateProofWindow returns an error if the given session is not eligible for proving.
-// It *assumes* that the msg's session header is a valid on-chain session with correct
+// It *assumes* that the msg's session header is a valid onchain session with correct
// height fields. First call #queryAndValidateSessionHeader to ensure any user-provided
// session header is valid and correctly hydrated.
func (k Keeper) validateProofWindow(
diff --git a/x/proof/types/account_query_client.go b/x/proof/types/account_query_client.go
index 86ec77a6e..3f2a97374 100644
--- a/x/proof/types/account_query_client.go
+++ b/x/proof/types/account_query_client.go
@@ -13,7 +13,7 @@ var _ client.AccountQueryClient = (*AccountKeeperQueryClient)(nil)
// AccountKeeperQueryClient is a thin wrapper around the AccountKeeper.
// It does not rely on the QueryClient, and therefore does not make any
-// network requests as in the off-chain implementation.
+// network requests as in the offchain implementation.
type AccountKeeperQueryClient struct {
keeper AccountKeeper
}
diff --git a/x/proof/types/application_query_client.go b/x/proof/types/application_query_client.go
index 1cd887314..1cd526604 100644
--- a/x/proof/types/application_query_client.go
+++ b/x/proof/types/application_query_client.go
@@ -11,7 +11,7 @@ var _ client.ApplicationQueryClient = (*AppKeeperQueryClient)(nil)
// AppKeeperQueryClient is a thin wrapper around the AccountKeeper.
// It does not rely on the QueryClient, and therefore does not make any
-// network requests as in the off-chain implementation.
+// network requests as in the offchain implementation.
type AppKeeperQueryClient struct {
keeper ApplicationKeeper
}
@@ -42,3 +42,9 @@ func (appQueryClient *AppKeeperQueryClient) GetApplication(
func (appQueryClient *AppKeeperQueryClient) GetAllApplications(ctx context.Context) ([]apptypes.Application, error) {
return appQueryClient.keeper.GetAllApplications(ctx), nil
}
+
+// GetParams returns the application module parameters.
+func (appQueryClient *AppKeeperQueryClient) GetParams(ctx context.Context) (*apptypes.Params, error) {
+ params := appQueryClient.keeper.GetParams(ctx)
+ return ¶ms, nil
+}
diff --git a/x/proof/types/expected_keepers.go b/x/proof/types/expected_keepers.go
index 9d1fd765e..e2981ce1a 100644
--- a/x/proof/types/expected_keepers.go
+++ b/x/proof/types/expected_keepers.go
@@ -49,6 +49,7 @@ type ApplicationKeeper interface {
GetApplication(ctx context.Context, address string) (app apptypes.Application, found bool)
GetAllApplications(ctx context.Context) []apptypes.Application
SetApplication(context.Context, apptypes.Application)
+ GetParams(ctx context.Context) (params apptypes.Params)
}
// SharedKeeper defines the expected interface needed to retrieve shared information.
diff --git a/x/proof/types/params.pb.go b/x/proof/types/params.pb.go
index 8427231fa..9fb07ce08 100644
--- a/x/proof/types/params.pb.go
+++ b/x/proof/types/params.pb.go
@@ -45,7 +45,7 @@ type Params struct {
ProofMissingPenalty *types.Coin `protobuf:"bytes,4,opt,name=proof_missing_penalty,json=proofMissingPenalty,proto3" json:"proof_missing_penalty"`
// proof_submission_fee is the number of tokens (uPOKT) which should be paid by
// the supplier operator when submitting a proof.
- // This is needed to account for the cost of storing proofs on-chain and prevent
+ // This is needed to account for the cost of storing proofs onchain and prevent
// spamming (i.e. sybil bloat attacks) the network with non-required proofs.
// TODO_MAINNET: Consider renaming this to `proof_submission_fee_upokt`.
ProofSubmissionFee *types.Coin `protobuf:"bytes,5,opt,name=proof_submission_fee,json=proofSubmissionFee,proto3" json:"proof_submission_fee"`
diff --git a/x/proof/types/query_validation.go b/x/proof/types/query_validation.go
index e1742dd99..50e3cbb7f 100644
--- a/x/proof/types/query_validation.go
+++ b/x/proof/types/query_validation.go
@@ -59,6 +59,7 @@ func (query *QueryGetProofRequest) ValidateBasic() error {
return nil
}
+// ValidateBasic performs basic (non-state-dependant) validation on a QueryAllProofsRequest.
func (query *QueryAllProofsRequest) ValidateBasic() error {
// TODO_TECHDEBT: update function signature to receive a context.
logger := polylog.Ctx(context.TODO())
diff --git a/x/proof/types/shared_query_client.go b/x/proof/types/shared_query_client.go
index 574735e7e..ead2cf97f 100644
--- a/x/proof/types/shared_query_client.go
+++ b/x/proof/types/shared_query_client.go
@@ -11,7 +11,7 @@ var _ client.SharedQueryClient = (*SharedKeeperQueryClient)(nil)
// SharedKeeperQueryClient is a thin wrapper around the SharedKeeper.
// It does not rely on the QueryClient, and therefore does not make any
-// network requests as in the off-chain implementation.
+// network requests as in the offchain implementation.
type SharedKeeperQueryClient struct {
sharedKeeper SharedKeeper
sessionKeeper SessionKeeper
@@ -29,7 +29,7 @@ func NewSharedKeeperQueryClient(
}
}
-// GetParams queries & returns the shared module on-chain parameters.
+// GetParams queries & returns the shared module onchain parameters.
func (sqc *SharedKeeperQueryClient) GetParams(
ctx context.Context,
) (params *sharedtypes.Params, err error) {
diff --git a/x/proof/types/types.pb.go b/x/proof/types/types.pb.go
index a316197dc..088ac987e 100644
--- a/x/proof/types/types.pb.go
+++ b/x/proof/types/types.pb.go
@@ -143,7 +143,7 @@ func (m *Proof) GetClosestMerkleProof() []byte {
return nil
}
-// Claim is the serialized object stored on-chain for claims pending to be proven
+// Claim is the serialized object stored onchain for claims pending to be proven
type Claim struct {
SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"`
// The session header of the session that this claim is for.
diff --git a/x/service/keeper/msg_server_add_service.go b/x/service/keeper/msg_server_add_service.go
index 4596883f9..8a1fea482 100644
--- a/x/service/keeper/msg_server_add_service.go
+++ b/x/service/keeper/msg_server_add_service.go
@@ -15,7 +15,7 @@ import (
// AddService adds a service to the network.
// The operation checks if the signer has enough funds (upokt) to pay the AddServiceFee.
// If funds are insufficient, the service won't be added. Otherwise, the fee is transferred from
-// the signer to the service module's account, afterwards the service will be present on-chain.
+// the signer to the service module's account, afterwards the service will be present onchain.
func (k msgServer) AddService(
goCtx context.Context,
msg *types.MsgAddService,
diff --git a/x/service/keeper/query_service.go b/x/service/keeper/query_service.go
index 73f420edc..999962eba 100644
--- a/x/service/keeper/query_service.go
+++ b/x/service/keeper/query_service.go
@@ -54,7 +54,8 @@ func (k Keeper) Service(ctx context.Context, req *types.QueryGetServiceRequest)
service, found := k.GetService(ctx, req.Id)
if !found {
- return nil, status.Error(codes.NotFound, "service ID not found")
+ msg := fmt.Sprintf("service ID not found: %q", req.GetId())
+ return nil, status.Error(codes.NotFound, msg)
}
return &types.QueryGetServiceResponse{Service: service}, nil
diff --git a/x/service/keeper/query_service_test.go b/x/service/keeper/query_service_test.go
index 7f2ab0584..02d123f36 100644
--- a/x/service/keeper/query_service_test.go
+++ b/x/service/keeper/query_service_test.go
@@ -43,9 +43,9 @@ func TestServiceQuerySingle(t *testing.T) {
{
desc: "KeyNotFound",
request: &types.QueryGetServiceRequest{
- Id: strconv.Itoa(100000),
+ Id: "service",
},
- expectedErr: status.Error(codes.NotFound, "service ID not found"),
+ expectedErr: status.Error(codes.NotFound, "service ID not found: \"service\""),
},
{
desc: "InvalidRequest",
diff --git a/x/service/keeper/update_relay_mining_difficulty.go b/x/service/keeper/update_relay_mining_difficulty.go
index b73e79131..d7e7512a4 100644
--- a/x/service/keeper/update_relay_mining_difficulty.go
+++ b/x/service/keeper/update_relay_mining_difficulty.go
@@ -30,8 +30,8 @@ var (
emaSmoothingFactor = new(big.Float).SetFloat64(0.1)
)
-// UpdateRelayMiningDifficulty updates the on-chain relay mining difficulty
-// based on the amount of on-chain relays for each service, given a map of serviceId->numRelays.
+// UpdateRelayMiningDifficulty updates the onchain relay mining difficulty
+// based on the amount of onchain relays for each service, given a map of serviceId->numRelays.
func (k Keeper) UpdateRelayMiningDifficulty(
ctx context.Context,
relaysPerServiceMap map[string]uint64,
diff --git a/x/service/types/params.pb.go b/x/service/types/params.pb.go
index 3fd9295e8..5f7693d39 100644
--- a/x/service/types/params.pb.go
+++ b/x/service/types/params.pb.go
@@ -32,7 +32,7 @@ type Params struct {
// and transferred to the pocket network foundation.
AddServiceFee *types.Coin `protobuf:"bytes,1,opt,name=add_service_fee,json=addServiceFee,proto3" json:"add_service_fee" yaml:"add_service_fee"`
// target_num_relays is the target for the EMA of the number of relays per session.
- // Per service, on-chain relay mining difficulty will be adjusted to maintain this target.
+ // Per service, onchain relay mining difficulty will be adjusted to maintain this target.
TargetNumRelays uint64 `protobuf:"varint,2,opt,name=target_num_relays,json=targetNumRelays,proto3" json:"target_num_relays" yaml:"target_num_relays"`
}
diff --git a/x/service/types/relay.go b/x/service/types/relay.go
index 57264aafc..bb21139b8 100644
--- a/x/service/types/relay.go
+++ b/x/service/types/relay.go
@@ -83,7 +83,7 @@ func (res RelayResponse) GetSignableBytesHash() ([protocol.RelayHasherSize]byte,
// TODO_TEST: Add tests for RelayResponse validation
func (res *RelayResponse) ValidateBasic() error {
// TODO_POST_MAINNET: if a client gets a response with an invalid/incomplete
- // SessionHeader, consider sending an on-chain challenge, lowering their
+ // SessionHeader, consider sending an onchain challenge, lowering their
// QoS, or other future work.
meta := res.GetMeta()
diff --git a/x/service/types/relay.pb.go b/x/service/types/relay.pb.go
index 305ff1de7..9a1129ac5 100644
--- a/x/service/types/relay.pb.go
+++ b/x/service/types/relay.pb.go
@@ -83,7 +83,7 @@ type RelayRequestMetadata struct {
// application has delegated to. The signature is made using the ring of the
// application in both cases.
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
- // TODO_MAINNET: make sure we're checking/verifying this address on-chain (if needed).
+ // TODO_MAINNET: make sure we're checking/verifying this address onchain (if needed).
// Relevant conversation: https://github.com/pokt-network/poktroll/pull/567#discussion_r1628722168
//
// The supplier operator address the relay is sent to. It is being used on the
diff --git a/x/service/types/relay_mining_difficulty.pb.go b/x/service/types/relay_mining_difficulty.pb.go
index 751c4b433..988d3ad48 100644
--- a/x/service/types/relay_mining_difficulty.pb.go
+++ b/x/service/types/relay_mining_difficulty.pb.go
@@ -23,7 +23,7 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-// RelayMiningDifficulty is a message used to store the on-chain Relay Mining
+// RelayMiningDifficulty is a message used to store the onchain Relay Mining
// difficulty associated with a specific service ID.
// TODO_TECHDEBT: Embed this message in the Service message.
type RelayMiningDifficulty struct {
diff --git a/x/session/keeper/session_hydrator.go b/x/session/keeper/session_hydrator.go
index 10d2c11e9..eaf5661bb 100644
--- a/x/session/keeper/session_hydrator.go
+++ b/x/session/keeper/session_hydrator.go
@@ -108,7 +108,7 @@ func (k Keeper) hydrateSessionMetadata(ctx context.Context, sh *sessionHydrator)
return nil
}
-// hydrateSessionID use both session and on-chain data to determine a unique session ID
+// hydrateSessionID use both session and onchain data to determine a unique session ID
func (k Keeper) hydrateSessionID(ctx context.Context, sh *sessionHydrator) error {
prevHashBz := k.GetBlockHash(ctx, sh.sessionHeader.SessionStartBlockHeight)
@@ -290,7 +290,7 @@ func (k Keeper) GetSessionId(
}
// GetSessionId returns the string and bytes representation of the sessionId for the
-// session containing blockHeight, given the shared on-chain parameters, application
+// session containing blockHeight, given the shared onchain parameters, application
// address, service ID, and block hash.
func GetSessionId(
sharedParams *sharedtypes.Params,
@@ -316,7 +316,7 @@ func GetSessionId(
}
// getSessionStartBlockHeightBz returns the bytes representation of the session
-// start height for the session containing blockHeight, given the shared on-chain
+// start height for the session containing blockHeight, given the shared onchain
// parameters.
func getSessionStartBlockHeightBz(sharedParams *sharedtypes.Params, blockHeight int64) []byte {
sessionStartBlockHeight := sharedtypes.GetSessionStartHeight(sharedParams, blockHeight)
diff --git a/x/session/types/types.pb.go b/x/session/types/types.pb.go
index 2d12d8f47..43295f939 100644
--- a/x/session/types/types.pb.go
+++ b/x/session/types/types.pb.go
@@ -31,11 +31,11 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type SessionHeader struct {
ApplicationAddress string `protobuf:"bytes,1,opt,name=application_address,json=applicationAddress,proto3" json:"application_address,omitempty"`
ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- // NOTE: session_id can be derived from the above values using on-chain but is included in the header for convenience
+ // NOTE: session_id can be derived from the above values using onchain but is included in the header for convenience
SessionId string `protobuf:"bytes,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
SessionStartBlockHeight int64 `protobuf:"varint,4,opt,name=session_start_block_height,json=sessionStartBlockHeight,proto3" json:"session_start_block_height,omitempty"`
// Note that`session_end_block_height` is a derivative of (`start` + `num_blocks_per_session`)
- // as goverened by on-chain params at the time of the session start.
+ // as goverened by onchain params at the time of the session start.
// It is stored as an additional field to simplofy business logic in case
// the number of blocks_per_session changes during the session.
SessionEndBlockHeight int64 `protobuf:"varint,5,opt,name=session_end_block_height,json=sessionEndBlockHeight,proto3" json:"session_end_block_height,omitempty"`
diff --git a/x/shared/keeper/session.go b/x/shared/keeper/session.go
index 3eaebb044..f97c2a72f 100644
--- a/x/shared/keeper/session.go
+++ b/x/shared/keeper/session.go
@@ -7,7 +7,7 @@ import (
)
// GetSessionStartHeight returns the block height at which the session containing
-// queryHeight starts, given the current shared on-chain parameters.
+// queryHeight starts, given the current shared onchain parameters.
// Returns 0 if the block height is not a consensus produced block.
// Example: If NumBlocksPerSession == 4, sessions start at blocks 1, 5, 9, etc.
func (k Keeper) GetSessionStartHeight(ctx context.Context, queryHeight int64) int64 {
@@ -16,7 +16,7 @@ func (k Keeper) GetSessionStartHeight(ctx context.Context, queryHeight int64) in
}
// GetSessionEndHeight returns the block height at which the session containing
-// queryHeight ends, given the current shared on-chain parameters.
+// queryHeight ends, given the current shared onchain parameters.
// Returns 0 if the block height is not a consensus produced block.
// Example: If NumBlocksPerSession == 4, sessions end at blocks 4, 8, 11, etc.
func (k Keeper) GetSessionEndHeight(ctx context.Context, queryHeight int64) int64 {
@@ -25,7 +25,7 @@ func (k Keeper) GetSessionEndHeight(ctx context.Context, queryHeight int64) int6
}
// GetSessionNumber returns the session number for the session containing queryHeight,
-// given the current shared on-chain parameters.
+// given the current shared onchain parameters.
// Returns session number 0 if the block height is not a consensus produced block.
// Returns session number 1 for block 1 to block NumBlocksPerSession - 1 (inclusive).
// i.e. If NubBlocksPerSession == 4, session == 1 for [1, 4], session == 2 for [5, 8], etc.
diff --git a/x/shared/types/params.pb.go b/x/shared/types/params.pb.go
index 7b5863516..d4447f61f 100644
--- a/x/shared/types/params.pb.go
+++ b/x/shared/types/params.pb.go
@@ -47,12 +47,12 @@ type Params struct {
ProofWindowCloseOffsetBlocks uint64 `protobuf:"varint,6,opt,name=proof_window_close_offset_blocks,json=proofWindowCloseOffsetBlocks,proto3" json:"proof_window_close_offset_blocks"`
// supplier_unbonding_period_sessions is the number of sessions that a supplier must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the unbonding
+ // Onchain business logic requires, and ensures, that the corresponding block count of the unbonding
// period will exceed the end of any active claim & proof lifecycles.
SupplierUnbondingPeriodSessions uint64 `protobuf:"varint,7,opt,name=supplier_unbonding_period_sessions,json=supplierUnbondingPeriodSessions,proto3" json:"supplier_unbonding_period_sessions"`
// application_unbonding_period_sessions is the number of sessions that an application must wait after
// unstaking before their staked assets are moved to their account balance.
- // On-chain business logic requires, and ensures, that the corresponding block count of the
+ // Onchain business logic requires, and ensures, that the corresponding block count of the
// application unbonding period will exceed the end of its corresponding proof window close height.
ApplicationUnbondingPeriodSessions uint64 `protobuf:"varint,8,opt,name=application_unbonding_period_sessions,json=applicationUnbondingPeriodSessions,proto3" json:"application_unbonding_period_sessions"`
// The amount of upokt that a compute unit should translate to when settling a session.
diff --git a/x/shared/types/service.pb.go b/x/shared/types/service.pb.go
index d5998637f..6da21cd5f 100644
--- a/x/shared/types/service.pb.go
+++ b/x/shared/types/service.pb.go
@@ -7,7 +7,6 @@
package types
import (
- encoding_binary "encoding/binary"
fmt "fmt"
_ "github.com/cosmos/cosmos-proto"
_ "github.com/cosmos/gogoproto/gogoproto"
@@ -64,7 +63,7 @@ func (RPCType) EnumDescriptor() ([]byte, []int) {
}
// Enum to define configuration options
-// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing on-chain QoS.
+// TODO_RESEARCH: Should these be configs, SLAs or something else? There will be more discussion once we get closer to implementing onchain QoS.
type ConfigOptions int32
const (
@@ -101,7 +100,7 @@ type Service struct {
// cost_per_relay_for_specific_service = compute_units_per_relay_for_specific_service * compute_units_to_tokens_multipler_global_value
ComputeUnitsPerRelay uint64 `protobuf:"varint,3,opt,name=compute_units_per_relay,json=computeUnitsPerRelay,proto3" json:"compute_units_per_relay,omitempty"`
// The owner address that created the service.
- // It is the address that receives rewards based on the Service's on-chain usage
+ // It is the address that receives rewards based on the Service's onchain usage
// It is the only address that can update the service configuration (e.g. compute_units_per_relay),
// or make other updates to it.
OwnerAddress string `protobuf:"bytes,4,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"`
@@ -321,8 +320,8 @@ func (m *SupplierEndpoint) GetConfigs() []*ConfigOption {
// ServiceRevenueShare message to hold revenue share configuration details
type ServiceRevenueShare struct {
- Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
- RevSharePercentage float32 `protobuf:"fixed32,2,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"`
+ Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ RevSharePercentage uint64 `protobuf:"varint,3,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"`
}
func (m *ServiceRevenueShare) Reset() { *m = ServiceRevenueShare{} }
@@ -361,7 +360,7 @@ func (m *ServiceRevenueShare) GetAddress() string {
return ""
}
-func (m *ServiceRevenueShare) GetRevSharePercentage() float32 {
+func (m *ServiceRevenueShare) GetRevSharePercentage() uint64 {
if m != nil {
return m.RevSharePercentage
}
@@ -431,46 +430,47 @@ func init() {
func init() { proto.RegisterFile("poktroll/shared/service.proto", fileDescriptor_302c2f793a11ae1e) }
var fileDescriptor_302c2f793a11ae1e = []byte{
- // 621 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
- 0x10, 0xcd, 0x26, 0x81, 0x24, 0xd3, 0x36, 0xb5, 0x86, 0x20, 0x4c, 0xa5, 0x5a, 0x25, 0xe2, 0x50,
- 0x55, 0x6a, 0x52, 0xa5, 0x42, 0x88, 0x03, 0x42, 0x6d, 0x14, 0xaa, 0x52, 0x35, 0x89, 0xd6, 0x29,
- 0x95, 0xb8, 0x58, 0xae, 0xbd, 0xa4, 0x56, 0x13, 0xaf, 0xb5, 0xb6, 0x53, 0x22, 0x7e, 0x02, 0xf1,
- 0x0b, 0xfc, 0x02, 0x27, 0xbe, 0x80, 0x63, 0xc5, 0xa9, 0x47, 0x94, 0xfe, 0x08, 0x5a, 0xaf, 0x1d,
- 0xa0, 0x41, 0x20, 0x6e, 0xe3, 0x79, 0x6f, 0x66, 0xde, 0xbe, 0x1d, 0x2f, 0xac, 0x07, 0xfc, 0x22,
- 0x12, 0x7c, 0x34, 0x6a, 0x86, 0xe7, 0xb6, 0x60, 0x6e, 0x33, 0x64, 0x62, 0xe2, 0x39, 0xac, 0x11,
- 0x08, 0x1e, 0x71, 0x5c, 0xcd, 0xe0, 0x86, 0x82, 0xd7, 0x1e, 0x3a, 0x3c, 0x1c, 0xf3, 0xd0, 0x4a,
- 0xe0, 0xa6, 0xfa, 0x50, 0xdc, 0xb5, 0xda, 0x90, 0x0f, 0xb9, 0xca, 0xcb, 0x48, 0x65, 0xeb, 0x9f,
- 0x08, 0x94, 0x4c, 0xd5, 0x13, 0xab, 0x90, 0xf7, 0x5c, 0x9d, 0x6c, 0x90, 0xcd, 0x0a, 0xcd, 0x7b,
- 0x2e, 0x22, 0x14, 0x7d, 0x7b, 0xcc, 0xf4, 0x7c, 0x92, 0x49, 0x62, 0x7c, 0x02, 0x0f, 0x1c, 0x3e,
- 0x0e, 0xe2, 0x88, 0x59, 0xb1, 0xef, 0x45, 0xa1, 0x15, 0x30, 0x61, 0x09, 0x36, 0xb2, 0xa7, 0x7a,
- 0x61, 0x83, 0x6c, 0x16, 0x69, 0x2d, 0x85, 0x4f, 0x24, 0xda, 0x67, 0x82, 0x4a, 0x0c, 0x9f, 0xc3,
- 0x0a, 0xbf, 0xf4, 0x99, 0xb0, 0x6c, 0xd7, 0x15, 0x2c, 0x0c, 0xf5, 0xa2, 0xec, 0xb9, 0xaf, 0x7f,
- 0xfb, 0xbc, 0x5d, 0x4b, 0x55, 0xee, 0x29, 0xc4, 0x8c, 0x84, 0xe7, 0x0f, 0xe9, 0x72, 0x42, 0x4f,
- 0x73, 0xf5, 0x67, 0xa0, 0xef, 0x05, 0xc1, 0xc8, 0x73, 0xec, 0xc8, 0xe3, 0x7e, 0xaa, 0xb7, 0xcd,
- 0xfd, 0xb7, 0xde, 0x10, 0xd7, 0x01, 0x52, 0x53, 0xac, 0xb9, 0xfa, 0x4a, 0x9a, 0x39, 0x74, 0xeb,
- 0x5f, 0x08, 0xdc, 0x37, 0x63, 0x59, 0xcc, 0xc4, 0xff, 0x14, 0xe2, 0x0b, 0xa8, 0x30, 0xdf, 0x0d,
- 0xb8, 0xe7, 0x47, 0xa1, 0x9e, 0xdf, 0x28, 0x6c, 0x2e, 0xb5, 0x1e, 0x35, 0x6e, 0xf9, 0xdd, 0xc8,
- 0x3a, 0x77, 0x52, 0x26, 0xfd, 0x59, 0x83, 0x7b, 0x50, 0x11, 0x6c, 0x62, 0x25, 0x4c, 0xbd, 0x90,
- 0x34, 0x78, 0xbc, 0xd8, 0x40, 0xcd, 0xa3, 0x6c, 0xc2, 0xfc, 0x98, 0x99, 0x32, 0x49, 0xcb, 0x82,
- 0x4d, 0x92, 0xa8, 0xfe, 0x91, 0x80, 0x76, 0x7b, 0x04, 0x6a, 0x50, 0x88, 0xc5, 0x28, 0x15, 0x2c,
- 0x43, 0xdc, 0x85, 0xb2, 0x08, 0x1c, 0x2b, 0x9a, 0x06, 0xea, 0xb2, 0xaa, 0x2d, 0x7d, 0x61, 0x10,
- 0xed, 0xb7, 0x07, 0xd3, 0x80, 0xd1, 0x92, 0x08, 0x1c, 0x19, 0xe0, 0x53, 0x28, 0x39, 0x89, 0x11,
- 0x61, 0x2a, 0x6e, 0x7d, 0xa1, 0x46, 0x19, 0xd5, 0x0b, 0xa4, 0xe9, 0x34, 0x63, 0xd7, 0xdf, 0xc3,
- 0xbd, 0x3f, 0xa8, 0xc6, 0x16, 0x94, 0xb2, 0xcb, 0x25, 0xff, 0xb8, 0xdc, 0x8c, 0x88, 0x3b, 0x50,
- 0x9b, 0x5b, 0x24, 0x37, 0xc9, 0x61, 0x7e, 0x64, 0x0f, 0xd5, 0x21, 0xf2, 0x14, 0x33, 0x1f, 0xfa,
- 0x73, 0xa4, 0xfe, 0x1a, 0x96, 0x7f, 0x55, 0x85, 0x3b, 0x50, 0xb8, 0x60, 0xd3, 0x64, 0x62, 0xb5,
- 0x65, 0xfc, 0xf5, 0x04, 0x21, 0x95, 0x54, 0xac, 0xc1, 0x9d, 0x89, 0x3d, 0x8a, 0xb3, 0xb5, 0x56,
- 0x1f, 0x5b, 0x47, 0x50, 0x4a, 0x1d, 0xc2, 0x55, 0x58, 0x3a, 0xe9, 0x1e, 0x75, 0x7b, 0xa7, 0x5d,
- 0x8b, 0xf6, 0xdb, 0x5a, 0x0e, 0xcb, 0x50, 0x3c, 0x90, 0x11, 0xc1, 0x15, 0xa8, 0x9c, 0x76, 0xf6,
- 0xcd, 0x5e, 0xfb, 0xa8, 0x33, 0xd0, 0xf2, 0xb8, 0x0c, 0xe5, 0x57, 0x66, 0x4f, 0xd1, 0x0a, 0x92,
- 0x46, 0x3b, 0xe6, 0x40, 0x2b, 0x6e, 0xed, 0xc0, 0xca, 0x6f, 0x83, 0x11, 0xa1, 0x9a, 0xb5, 0x6c,
- 0xf7, 0xba, 0x2f, 0x0f, 0x0f, 0xb4, 0x1c, 0x2e, 0x41, 0x69, 0x70, 0x78, 0xdc, 0xe9, 0x9d, 0x0c,
- 0x34, 0xb2, 0x7f, 0xfc, 0x75, 0x66, 0x90, 0xab, 0x99, 0x41, 0xae, 0x67, 0x06, 0xf9, 0x3e, 0x33,
- 0xc8, 0x87, 0x1b, 0x23, 0x77, 0x75, 0x63, 0xe4, 0xae, 0x6f, 0x8c, 0xdc, 0x9b, 0xe6, 0xd0, 0x8b,
- 0xce, 0xe3, 0xb3, 0x86, 0xc3, 0xc7, 0x4d, 0x79, 0xc2, 0x6d, 0x9f, 0x45, 0x97, 0x5c, 0x5c, 0x34,
- 0xe7, 0xaf, 0xc3, 0xbb, 0xec, 0x7d, 0x90, 0x3b, 0x10, 0x9e, 0xdd, 0x4d, 0x7e, 0xee, 0xdd, 0x1f,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x19, 0xf1, 0x60, 0x3f, 0x04, 0x00, 0x00,
+ // 628 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x6e, 0xd3, 0x4e,
+ 0x10, 0xce, 0xc6, 0xf9, 0xfd, 0x92, 0x4c, 0xdb, 0xd4, 0x1a, 0x82, 0x30, 0x95, 0x6a, 0x95, 0x88,
+ 0x43, 0x55, 0xa9, 0x49, 0x95, 0x0a, 0x21, 0x0e, 0x08, 0xb5, 0x51, 0xa8, 0xda, 0xaa, 0x49, 0xb4,
+ 0x4e, 0xa9, 0xc4, 0xc5, 0x72, 0xed, 0x25, 0xb5, 0xea, 0x78, 0xad, 0xb5, 0x9d, 0x92, 0x23, 0x6f,
+ 0x80, 0x78, 0x05, 0x5e, 0x81, 0x13, 0x4f, 0xc0, 0xb1, 0xe2, 0xd4, 0x23, 0x4a, 0x5f, 0x04, 0xad,
+ 0xff, 0x04, 0x68, 0x10, 0x88, 0xdb, 0x78, 0xbe, 0x6f, 0x66, 0xbe, 0xfd, 0x76, 0xbc, 0xb0, 0x1e,
+ 0xf0, 0xcb, 0x48, 0x70, 0xcf, 0x6b, 0x85, 0x17, 0x96, 0x60, 0x4e, 0x2b, 0x64, 0x62, 0xe2, 0xda,
+ 0xac, 0x19, 0x08, 0x1e, 0x71, 0x5c, 0xcd, 0xe1, 0x66, 0x0a, 0xaf, 0x3d, 0xb4, 0x79, 0x38, 0xe6,
+ 0xa1, 0x99, 0xc0, 0xad, 0xf4, 0x23, 0xe5, 0xae, 0xd5, 0x47, 0x7c, 0xc4, 0xd3, 0xbc, 0x8c, 0xd2,
+ 0x6c, 0xe3, 0x23, 0x81, 0xb2, 0x91, 0xf6, 0xc4, 0x1a, 0x14, 0x5d, 0x47, 0x23, 0x1b, 0x64, 0xb3,
+ 0x4a, 0x8b, 0xae, 0x83, 0x08, 0x25, 0xdf, 0x1a, 0x33, 0xad, 0x98, 0x64, 0x92, 0x18, 0x9f, 0xc0,
+ 0x03, 0x9b, 0x8f, 0x83, 0x38, 0x62, 0x66, 0xec, 0xbb, 0x51, 0x68, 0x06, 0x4c, 0x98, 0x82, 0x79,
+ 0xd6, 0x54, 0x53, 0x36, 0xc8, 0x66, 0x89, 0xd6, 0x33, 0xf8, 0x54, 0xa2, 0x03, 0x26, 0xa8, 0xc4,
+ 0xf0, 0x39, 0xac, 0xf0, 0x2b, 0x9f, 0x09, 0xd3, 0x72, 0x1c, 0xc1, 0xc2, 0x50, 0x2b, 0xc9, 0x9e,
+ 0xfb, 0xda, 0xd7, 0x4f, 0xdb, 0xf5, 0x4c, 0xe5, 0x5e, 0x8a, 0x18, 0x91, 0x70, 0xfd, 0x11, 0x5d,
+ 0x4e, 0xe8, 0x59, 0xae, 0xf1, 0x0c, 0xb4, 0xbd, 0x20, 0xf0, 0x5c, 0xdb, 0x8a, 0x5c, 0xee, 0x67,
+ 0x7a, 0x3b, 0xdc, 0x7f, 0xe3, 0x8e, 0x70, 0x1d, 0x20, 0x33, 0xc5, 0x9c, 0xab, 0xaf, 0x66, 0x99,
+ 0x43, 0xa7, 0xf1, 0x99, 0xc0, 0x7d, 0x23, 0x96, 0xc5, 0x4c, 0xfc, 0x4b, 0x21, 0xbe, 0x80, 0x2a,
+ 0xf3, 0x9d, 0x80, 0xbb, 0x7e, 0x14, 0x6a, 0xc5, 0x0d, 0x65, 0x73, 0xa9, 0xfd, 0xa8, 0x79, 0xc7,
+ 0xef, 0x66, 0xde, 0xb9, 0x9b, 0x31, 0xe9, 0x8f, 0x1a, 0xdc, 0x83, 0xaa, 0x60, 0x13, 0x33, 0x61,
+ 0x6a, 0x4a, 0xd2, 0xe0, 0xf1, 0x62, 0x83, 0x74, 0x1e, 0x65, 0x13, 0xe6, 0xc7, 0xcc, 0x90, 0x49,
+ 0x5a, 0x11, 0x6c, 0x92, 0x44, 0x8d, 0x0f, 0x04, 0xd4, 0xbb, 0x23, 0x50, 0x05, 0x25, 0x16, 0x5e,
+ 0x26, 0x58, 0x86, 0xb8, 0x0b, 0x15, 0x11, 0xd8, 0x66, 0x34, 0x0d, 0xd2, 0xcb, 0xaa, 0xb5, 0xb5,
+ 0x85, 0x41, 0x74, 0xd0, 0x19, 0x4e, 0x03, 0x46, 0xcb, 0x22, 0xb0, 0x65, 0x80, 0x4f, 0xa1, 0x6c,
+ 0x27, 0x46, 0x84, 0x99, 0xb8, 0xf5, 0x85, 0x9a, 0xd4, 0xa8, 0x7e, 0x20, 0x4d, 0xa7, 0x39, 0xbb,
+ 0xf1, 0x8e, 0xc0, 0xbd, 0xdf, 0xc8, 0xc6, 0x36, 0x94, 0xf3, 0xdb, 0x25, 0x7f, 0xb9, 0xdd, 0x9c,
+ 0x88, 0x3b, 0x50, 0x9f, 0x7b, 0x24, 0x57, 0xc9, 0x66, 0x7e, 0x64, 0x8d, 0x58, 0xb6, 0x4b, 0x98,
+ 0x1b, 0x31, 0x98, 0x23, 0x47, 0xa5, 0x4a, 0x51, 0x55, 0x1a, 0xaf, 0x60, 0xf9, 0x67, 0x71, 0xb8,
+ 0x03, 0xca, 0x25, 0x9b, 0x26, 0x73, 0x6b, 0x6d, 0xfd, 0x8f, 0x07, 0x09, 0xa9, 0xa4, 0x62, 0x1d,
+ 0xfe, 0x9b, 0x58, 0x5e, 0x9c, 0x6f, 0x77, 0xfa, 0xb1, 0x75, 0x0c, 0xe5, 0xcc, 0x28, 0x5c, 0x85,
+ 0xa5, 0xd3, 0xde, 0x71, 0xaf, 0x7f, 0xd6, 0x33, 0xe9, 0xa0, 0xa3, 0x16, 0xb0, 0x02, 0xa5, 0x03,
+ 0x19, 0x11, 0x5c, 0x81, 0xea, 0x59, 0x77, 0xdf, 0xe8, 0x77, 0x8e, 0xbb, 0x43, 0xb5, 0x88, 0xcb,
+ 0x50, 0x39, 0x32, 0xfa, 0x29, 0x4d, 0x91, 0x34, 0xda, 0x35, 0x86, 0x6a, 0x69, 0x6b, 0x07, 0x56,
+ 0x7e, 0x19, 0x8c, 0x08, 0xb5, 0xbc, 0x65, 0xa7, 0xdf, 0x7b, 0x79, 0x78, 0xa0, 0x16, 0x70, 0x09,
+ 0xca, 0xc3, 0xc3, 0x93, 0x6e, 0xff, 0x74, 0xa8, 0x92, 0xfd, 0x93, 0x2f, 0x33, 0x9d, 0x5c, 0xcf,
+ 0x74, 0x72, 0x33, 0xd3, 0xc9, 0xb7, 0x99, 0x4e, 0xde, 0xdf, 0xea, 0x85, 0xeb, 0x5b, 0xbd, 0x70,
+ 0x73, 0xab, 0x17, 0x5e, 0xb7, 0x46, 0x6e, 0x74, 0x11, 0x9f, 0x37, 0x6d, 0x3e, 0x6e, 0xc9, 0x13,
+ 0x6e, 0xfb, 0x2c, 0xba, 0xe2, 0xe2, 0xb2, 0x35, 0x7f, 0x24, 0xde, 0xe6, 0xcf, 0x84, 0x5c, 0x85,
+ 0xf0, 0xfc, 0xff, 0xe4, 0x1f, 0xdf, 0xfd, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x46, 0xaf, 0x53, 0xa2,
+ 0x46, 0x04, 0x00, 0x00,
}
func (m *Service) Marshal() (dAtA []byte, err error) {
@@ -680,10 +680,9 @@ func (m *ServiceRevenueShare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
var l int
_ = l
if m.RevSharePercentage != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RevSharePercentage))))
+ i = encodeVarintService(dAtA, i, uint64(m.RevSharePercentage))
i--
- dAtA[i] = 0x15
+ dAtA[i] = 0x18
}
if len(m.Address) > 0 {
i -= len(m.Address)
@@ -836,7 +835,7 @@ func (m *ServiceRevenueShare) Size() (n int) {
n += 1 + l + sovService(uint64(l))
}
if m.RevSharePercentage != 0 {
- n += 5
+ n += 1 + sovService(uint64(m.RevSharePercentage))
}
return n
}
@@ -1456,17 +1455,25 @@ func (m *ServiceRevenueShare) Unmarshal(dAtA []byte) error {
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
- if wireType != 5 {
+ case 3:
+ if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RevSharePercentage", wireType)
}
- var v uint32
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
+ m.RevSharePercentage = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowService
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevSharePercentage |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- m.RevSharePercentage = float32(math.Float32frombits(v))
default:
iNdEx = preIndex
skippy, err := skipService(dAtA[iNdEx:])
diff --git a/x/shared/types/service_configs.go b/x/shared/types/service_configs.go
index 122a10241..bcfb3935e 100644
--- a/x/shared/types/service_configs.go
+++ b/x/shared/types/service_configs.go
@@ -7,7 +7,7 @@ import (
)
const (
- requiredRevSharePercentageSum = 100
+ requiredRevSharePercentageSum = uint64(100)
)
// ValidateAppServiceConfigs returns an error if any of the application service configs are invalid
@@ -95,7 +95,7 @@ func ValidateSupplierServiceConfigs(services []*SupplierServiceConfig) error {
// ensuring that the sum of the revenue share percentages is 100.
// NB: This function is unit tested via the supplier staking config tests.
func ValidateServiceRevShare(revShareList []*ServiceRevenueShare) error {
- revSharePercentageSum := float32(0)
+ revSharePercentageSum := uint64(0)
if len(revShareList) == 0 {
return ErrSharedInvalidRevShare.Wrap("no rev share configurations")
@@ -106,7 +106,7 @@ func ValidateServiceRevShare(revShareList []*ServiceRevenueShare) error {
return ErrSharedInvalidRevShare.Wrap("rev share cannot be nil")
}
- // Validate the revshare address
+ // Validate the revenue share address
if revShare.Address == "" {
return ErrSharedInvalidRevShare.Wrapf("rev share address cannot be empty: %v", revShare)
}
diff --git a/x/shared/types/session.go b/x/shared/types/session.go
index 355d83c1e..a697c70aa 100644
--- a/x/shared/types/session.go
+++ b/x/shared/types/session.go
@@ -1,7 +1,7 @@
package types
// GetSessionStartHeight returns the block height at which the session containing
-// queryHeight starts, given the passed shared on-chain parameters.
+// queryHeight starts, given the passed shared onchain parameters.
// Returns 0 if the block height is not a consensus produced block.
// Example: If NumBlocksPerSession == 4, sessions start at blocks 1, 5, 9, etc.
func GetSessionStartHeight(sharedParams *Params, queryHeight int64) int64 {
@@ -17,7 +17,7 @@ func GetSessionStartHeight(sharedParams *Params, queryHeight int64) int64 {
}
// GetSessionEndHeight returns the block height at which the session containing
-// queryHeight ends, given the passed shared on-chain parameters.
+// queryHeight ends, given the passed shared onchain parameters.
// Returns 0 if the block height is not a consensus produced block.
// Example: If NumBlocksPerSession == 4, sessions end at blocks 4, 8, 11, etc.
func GetSessionEndHeight(sharedParams *Params, queryHeight int64) int64 {
@@ -32,8 +32,8 @@ func GetSessionEndHeight(sharedParams *Params, queryHeight int64) int64 {
}
// GetSessionNumber returns the session number of the session containing queryHeight,
-// given the passed on-chain shared parameters.
-// shared on-chain parameters.
+// given the passed onchain shared parameters.
+// shared onchain parameters.
// Returns session number 0 if the block height is not a consensus produced block.
// Returns session number 1 for block 1 to block NumBlocksPerSession - 1 (inclusive).
// i.e. If NubBlocksPerSession == 4, session == 1 for [1, 4], session == 2 for [5, 8], etc.
@@ -171,7 +171,7 @@ func GetSessionEndToProofWindowCloseBlocks(params *Params) int64 {
}
// GetSettlementSessionEndHeight returns the end height of the session in which the
-// session that includes queryHeight is settled, given the passed shared on-chain parameters.
+// session that includes queryHeight is settled, given the passed shared onchain parameters.
func GetSettlementSessionEndHeight(sharedParams *Params, queryHeight int64) int64 {
return GetSessionEndToProofWindowCloseBlocks(sharedParams) +
GetSessionEndHeight(sharedParams, queryHeight) + 1
diff --git a/x/shared/types/supplier.pb.go b/x/shared/types/supplier.pb.go
index 2a062c371..dee27dc69 100644
--- a/x/shared/types/supplier.pb.go
+++ b/x/shared/types/supplier.pb.go
@@ -26,25 +26,26 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-// Supplier is the type defining the actor in Pocket Network that provides RPC services.
+// Supplier represents an actor in Pocket Network that provides RPC services
type Supplier struct {
- // The address of the owner (i.e. staker, custodial) that owns the funds for staking.
- // By default, this address is the one that receives all the rewards unless owtherwise specified.
- // This property cannot be updated by the operator.
+ // Owner address that controls the staked funds and receives rewards by default
+ // Cannot be updated by the operator
OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"`
- // The operator address of the supplier operator (i.e. the one managing the off-chain server).
- // The operator address can update the supplier's configurations excluding the owner address.
- // This property does not change over the supplier's lifespan, the supplier must be unstaked
- // and re-staked to effectively update this value.
- OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"`
- Stake *types.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"`
- Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"`
- // The session end height at which an actively unbonding supplier unbonds its stake.
- // If the supplier did not unstake, this value will be 0.
+ // Operator address managing the offchain server
+ // Immutable for supplier's lifespan - requires unstake/re-stake to change.
+ // Can update supplier configs except for owner address.
+ OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"`
+ // Total amount of staked uPOKT
+ Stake *types.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"`
+ // List of service configurations supported by this supplier
+ Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"`
+ // Session end height when supplier initiated unstaking (0 if not unstaking)
UnstakeSessionEndHeight uint64 `protobuf:"varint,5,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"`
- // services_activation_heights_map is a map of serviceIds to the height at
- // which the staked supplier will become active for that service.
- // Activation heights are session start heights.
+ // Mapping of serviceIds to their activation heights
+ // - Key: serviceId
+ // - Value: Session start height when supplier becomes active for the service
+ // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf
+ // because maps are no longer supported for serialized types in the CosmoSDK.
ServicesActivationHeightsMap map[string]uint64 `protobuf:"bytes,6,rep,name=services_activation_heights_map,json=servicesActivationHeightsMap,proto3" json:"services_activation_heights_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
diff --git a/x/supplier/config/supplier_configs_reader.go b/x/supplier/config/supplier_configs_reader.go
index c2f009c2e..6c604677e 100644
--- a/x/supplier/config/supplier_configs_reader.go
+++ b/x/supplier/config/supplier_configs_reader.go
@@ -19,14 +19,14 @@ type YAMLStakeConfig struct {
OperatorAddress string `yaml:"operator_address"`
StakeAmount string `yaml:"stake_amount"`
Services []*YAMLStakeService `yaml:"services"`
- DefaultRevSharePercent map[string]float32 `yaml:"default_rev_share_percent"`
+ DefaultRevSharePercent map[string]uint64 `yaml:"default_rev_share_percent"`
}
// YAMLStakeService is the structure describing a single service entry in the
// stake config file.
type YAMLStakeService struct {
ServiceId string `yaml:"service_id"`
- RevSharePercent map[string]float32 `yaml:"rev_share_percent"`
+ RevSharePercent map[string]uint64 `yaml:"rev_share_percent"`
Endpoints []YAMLServiceEndpoint `yaml:"endpoints"`
}
@@ -102,7 +102,7 @@ func ParseSupplierConfigs(ctx context.Context, configContent []byte) (*SupplierS
)
}
- defaultRevSharePercent := map[string]float32{}
+ defaultRevSharePercent := map[string]uint64{}
if len(stakeConfig.DefaultRevSharePercent) == 0 {
// Ensure that if no default rev share is provided, the owner address is set
// to 100% rev share.
diff --git a/x/supplier/config/supplier_configs_reader_test.go b/x/supplier/config/supplier_configs_reader_test.go
index f5d557345..5b0f149bc 100644
--- a/x/supplier/config/supplier_configs_reader_test.go
+++ b/x/supplier/config/supplier_configs_reader_test.go
@@ -287,8 +287,8 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) {
owner_address: %s
operator_address: %s
default_rev_share_percent:
- %s: 50.5
- %s: 49.5
+ %s: 51
+ %s: 49
stake_amount: 1000upokt
services:
# Service with default rev share
@@ -322,11 +322,11 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) {
RevShare: []*types.ServiceRevenueShare{
{
Address: firstShareHolderAddress,
- RevSharePercentage: 50.5,
+ RevSharePercentage: 51,
},
{
Address: secondShareHolderAddress,
- RevSharePercentage: 49.5,
+ RevSharePercentage: 49,
},
},
},
@@ -728,24 +728,6 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) {
`, ownerAddress, operatorAddress, firstShareHolderAddress, ""),
expectedError: config.ErrSupplierConfigUnmarshalYAML,
},
- {
- desc: "negative revenue share allocation is disallowed",
- inputConfig: fmt.Sprintf(`
- owner_address: %s
- operator_address: %s
- stake_amount: 1000upokt
- services:
- - service_id: svc
- endpoints:
- - publicly_exposed_url: http://pokt.network:8081
- rpc_type: json_rpc
- rev_share_percent:
- %s: 90
- %s: 11
- %s: -1
- `, ownerAddress, operatorAddress, ownerAddress, firstShareHolderAddress, secondShareHolderAddress),
- expectedError: sharedtypes.ErrSharedInvalidRevShare,
- },
{
desc: "errors when the rev share config is empty",
inputConfig: fmt.Sprintf(`
diff --git a/x/supplier/keeper/msg_server_unstake_supplier.go b/x/supplier/keeper/msg_server_unstake_supplier.go
index 3757a2224..47240db86 100644
--- a/x/supplier/keeper/msg_server_unstake_supplier.go
+++ b/x/supplier/keeper/msg_server_unstake_supplier.go
@@ -76,10 +76,10 @@ func (k msgServer) UnstakeSupplier(
// Mark the supplier as unstaking by recording the height at which it should stop
// providing service.
// The supplier MUST continue to provide service until the end of the current
- // session. I.e., on-chain sessions' suppliers list MUST NOT change mid-session.
+ // session. I.e., onchain sessions' suppliers list MUST NOT change mid-session.
// Removing it right away could have undesired effects on the network
// (e.g. a session with less than the minimum or 0 number of suppliers,
- // off-chain actors that need to listen to session supplier's change mid-session, etc).
+ // offchain actors that need to listen to session supplier's change mid-session, etc).
supplier.UnstakeSessionEndHeight = uint64(sharedtypes.GetSessionEndHeight(&sharedParams, currentHeight))
k.SetSupplier(ctx, supplier)
diff --git a/x/supplier/keeper/query_supplier.go b/x/supplier/keeper/query_supplier.go
index 3d370a8fa..bdecfd975 100644
--- a/x/supplier/keeper/query_supplier.go
+++ b/x/supplier/keeper/query_supplier.go
@@ -24,11 +24,18 @@ func (k Keeper) AllSuppliers(
return nil, status.Error(codes.InvalidArgument, "invalid request")
}
- var suppliers []sharedtypes.Supplier
+ if err := req.ValidateBasic(); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+ // TODO_IMPROVE: Consider adding a custom onchain index (similar to proofs)
+ // based on other parameters (e.g. serviceId) if/when the performance of the
+ // flags used to filter the response becomes an issue.
store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
supplierStore := prefix.NewStore(store, types.KeyPrefix(types.SupplierKeyOperatorPrefix))
+ var suppliers []sharedtypes.Supplier
+
pageRes, err := query.Paginate(
supplierStore,
req.Pagination,
@@ -40,6 +47,25 @@ func (k Keeper) AllSuppliers(
return status.Error(codes.Internal, err.Error())
}
+ serviceIdFilter := req.GetServiceId()
+ if serviceIdFilter != "" {
+ hasService := false
+ for _, supplierServiceConfig := range supplier.Services {
+ if supplierServiceConfig.ServiceId == serviceIdFilter {
+ hasService = true
+ break
+ }
+ }
+ // Do not include the current supplier in the list returned.
+ if !hasService {
+ return nil
+ }
+ }
+
+ // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps.
+ // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team.
+ supplier.ServicesActivationHeightsMap = nil
+
suppliers = append(suppliers, supplier)
return nil
},
@@ -62,10 +88,13 @@ func (k Keeper) Supplier(
supplier, found := k.GetSupplier(ctx, req.OperatorAddress)
if !found {
- // TODO_TECHDEBT(@bryanchriswhite, #384): conform to logging conventions once established
- msg := fmt.Sprintf("supplier with address %q", req.GetOperatorAddress())
+ msg := fmt.Sprintf("supplier with address: %q", req.GetOperatorAddress())
return nil, status.Error(codes.NotFound, msg)
}
+ // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps.
+ // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team.
+ supplier.ServicesActivationHeightsMap = nil
+
return &types.QueryGetSupplierResponse{Supplier: supplier}, nil
}
diff --git a/x/supplier/keeper/query_supplier_test.go b/x/supplier/keeper/query_supplier_test.go
index b0d448ec4..880f815f9 100644
--- a/x/supplier/keeper/query_supplier_test.go
+++ b/x/supplier/keeper/query_supplier_test.go
@@ -1,6 +1,7 @@
package keeper_test
import (
+ "fmt"
"strconv"
"testing"
@@ -11,6 +12,7 @@ import (
keepertest "github.com/pokt-network/poktroll/testutil/keeper"
"github.com/pokt-network/poktroll/testutil/nullify"
+ "github.com/pokt-network/poktroll/testutil/sample"
"github.com/pokt-network/poktroll/x/supplier/types"
)
@@ -20,6 +22,8 @@ var _ = strconv.IntSize
func TestSupplierQuerySingle(t *testing.T) {
supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 2)
+ supplierAddr := sample.AccAddress()
+
tests := []struct {
desc string
request *types.QueryGetSupplierRequest
@@ -43,9 +47,9 @@ func TestSupplierQuerySingle(t *testing.T) {
{
desc: "KeyNotFound",
request: &types.QueryGetSupplierRequest{
- OperatorAddress: strconv.Itoa(100000),
+ OperatorAddress: supplierAddr,
},
- expectedErr: status.Error(codes.NotFound, "supplier with address \"100000\""),
+ expectedErr: status.Error(codes.NotFound, fmt.Sprintf("supplier with address: \"%s\"", supplierAddr)),
},
{
desc: "InvalidRequest",
@@ -70,7 +74,13 @@ func TestSupplierQuerySingle(t *testing.T) {
func TestSupplierQueryPaginated(t *testing.T) {
supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
- msgs := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5)
+ suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5)
+
+ // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps.
+ // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team.
+ for _, supplier := range suppliers {
+ supplier.ServicesActivationHeightsMap = nil
+ }
request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllSuppliersRequest {
return &types.QueryAllSuppliersRequest{
@@ -84,12 +94,12 @@ func TestSupplierQueryPaginated(t *testing.T) {
}
t.Run("ByOffset", func(t *testing.T) {
step := 2
- for i := 0; i < len(msgs); i += step {
+ for i := 0; i < len(suppliers); i += step {
resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(nil, uint64(i), uint64(step), false))
require.NoError(t, err)
require.LessOrEqual(t, len(resp.Supplier), step)
require.Subset(t,
- nullify.Fill(msgs),
+ nullify.Fill(suppliers),
nullify.Fill(resp.Supplier),
)
}
@@ -97,12 +107,12 @@ func TestSupplierQueryPaginated(t *testing.T) {
t.Run("ByKey", func(t *testing.T) {
step := 2
var next []byte
- for i := 0; i < len(msgs); i += step {
+ for i := 0; i < len(suppliers); i += step {
resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(next, 0, uint64(step), false))
require.NoError(t, err)
require.LessOrEqual(t, len(resp.Supplier), step)
require.Subset(t,
- nullify.Fill(msgs),
+ nullify.Fill(suppliers),
nullify.Fill(resp.Supplier),
)
next = resp.Pagination.NextKey
@@ -111,9 +121,9 @@ func TestSupplierQueryPaginated(t *testing.T) {
t.Run("Total", func(t *testing.T) {
resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(nil, 0, 0, true))
require.NoError(t, err)
- require.Equal(t, len(msgs), int(resp.Pagination.Total))
+ require.Equal(t, len(suppliers), int(resp.Pagination.Total))
require.ElementsMatch(t,
- nullify.Fill(msgs),
+ nullify.Fill(suppliers),
nullify.Fill(resp.Supplier),
)
})
@@ -122,3 +132,39 @@ func TestSupplierQueryPaginated(t *testing.T) {
require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request"))
})
}
+
+func TestSupplierQueryFilterByServiceId(t *testing.T) {
+ supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
+ suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5)
+
+ // Get the first service ID from the first supplier to use as filter
+ firstServiceId := suppliers[0].Services[0].ServiceId
+
+ request := &types.QueryAllSuppliersRequest{
+ Filter: &types.QueryAllSuppliersRequest_ServiceId{
+ ServiceId: firstServiceId,
+ },
+ Pagination: &query.PageRequest{
+ Limit: uint64(len(suppliers)),
+ },
+ }
+
+ resp, err := supplierModuleKeepers.AllSuppliers(ctx, request)
+ require.NoError(t, err)
+
+ // createNSuppliers assigns a separate service to each supplier
+ // so we can only expect one supplier to have the filtered service.
+ require.Len(t, resp.Supplier, 1)
+
+ // Verify each returned supplier has the filtered service
+ for _, supplier := range resp.Supplier {
+ hasService := false
+ for _, service := range supplier.Services {
+ if service.ServiceId == firstServiceId {
+ hasService = true
+ break
+ }
+ }
+ require.True(t, hasService, "Supplier should have the filtered service")
+ }
+}
diff --git a/x/supplier/keeper/supplier_test.go b/x/supplier/keeper/supplier_test.go
index 8cbddd555..cde7952c5 100644
--- a/x/supplier/keeper/supplier_test.go
+++ b/x/supplier/keeper/supplier_test.go
@@ -8,8 +8,11 @@ import (
"cosmossdk.io/math"
sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/query"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/gogo/status"
"github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
"github.com/pokt-network/poktroll/cmd/poktrolld/cmd"
keepertest "github.com/pokt-network/poktroll/testutil/keeper"
@@ -27,13 +30,7 @@ func init() {
cmd.InitSDKConfig()
}
-// The module address is derived off of its semantic name.
-// This test is a helper for us to easily identify the underlying address.
-func TestModuleAddressSupplier(t *testing.T) {
- moduleAddress := authtypes.NewModuleAddress(types.ModuleName)
- require.Equal(t, "pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa", moduleAddress.String())
-}
-
+// createNSuppliers creates n suppliers and stores them in the keeper
func createNSuppliers(keeper keeper.Keeper, ctx context.Context, n int) []sharedtypes.Supplier {
suppliers := make([]sharedtypes.Supplier, n)
for i := range suppliers {
@@ -59,7 +56,15 @@ func createNSuppliers(keeper keeper.Keeper, ctx context.Context, n int) []shared
return suppliers
}
-func TestSupplierGet(t *testing.T) {
+// DEV_NOTE: The account address is derived off of the module's semantic name (supplier).
+// This test is a helper for us to easily identify the underlying address.
+// See Module Accounts for more details: https://docs.cosmos.network/main/learn/beginner/accounts#module-accounts
+func TestModuleAddressSupplier(t *testing.T) {
+ moduleAddress := authtypes.NewModuleAddress(types.ModuleName)
+ require.Equal(t, "pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa", moduleAddress.String())
+}
+
+func TestSupplier_Get(t *testing.T) {
supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10)
for _, supplier := range suppliers {
@@ -74,7 +79,7 @@ func TestSupplierGet(t *testing.T) {
}
}
-func TestSupplierRemove(t *testing.T) {
+func TestSupplier_Remove(t *testing.T) {
supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10)
for _, supplier := range suppliers {
@@ -86,7 +91,7 @@ func TestSupplierRemove(t *testing.T) {
}
}
-func TestSupplierGetAll(t *testing.T) {
+func TestSupplier_GetAll(t *testing.T) {
supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t)
suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10)
require.ElementsMatch(t,
@@ -94,3 +99,145 @@ func TestSupplierGetAll(t *testing.T) {
nullify.Fill(supplierModuleKeepers.GetAllSuppliers(ctx)),
)
}
+
+func TestSupplier_Query(t *testing.T) {
+ keeper, ctx := keepertest.SupplierKeeper(t)
+ suppliers := createNSuppliers(*keeper.Keeper, ctx, 2)
+
+ tests := []struct {
+ desc string
+ request *types.QueryGetSupplierRequest
+ response *types.QueryGetSupplierResponse
+ expectedErr error
+ }{
+ {
+ desc: "supplier found",
+ request: &types.QueryGetSupplierRequest{
+ OperatorAddress: suppliers[0].OperatorAddress,
+ },
+ response: &types.QueryGetSupplierResponse{
+ Supplier: suppliers[0],
+ },
+ },
+ {
+ desc: "supplier not found",
+ request: &types.QueryGetSupplierRequest{
+ OperatorAddress: "non_existent_address",
+ },
+ expectedErr: status.Error(codes.NotFound, fmt.Sprintf("supplier with address: %q", "non_existent_address")),
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ response, err := keeper.Supplier(ctx, test.request)
+ if test.expectedErr != nil {
+ stat, ok := status.FromError(test.expectedErr)
+ require.True(t, ok)
+ require.ErrorContains(t, stat.Err(), test.expectedErr.Error())
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, response)
+ require.Equal(t,
+ nullify.Fill(test.response),
+ nullify.Fill(response),
+ )
+ }
+ })
+ }
+}
+
+func TestSuppliers_QueryAll_Pagination(t *testing.T) {
+ keeper, ctx := keepertest.SupplierKeeper(t)
+ suppliers := createNSuppliers(*keeper.Keeper, ctx, 5)
+
+ t.Run("ByOffset", func(t *testing.T) {
+ step := 2
+ for i := 0; i < len(suppliers); i += step {
+ req := &types.QueryAllSuppliersRequest{
+ Pagination: &query.PageRequest{
+ Offset: uint64(i),
+ Limit: uint64(step),
+ },
+ }
+ resp, err := keeper.AllSuppliers(ctx, req)
+ require.NoError(t, err)
+ require.LessOrEqual(t, len(resp.Supplier), step)
+ require.Subset(t,
+ nullify.Fill(suppliers),
+ nullify.Fill(resp.Supplier),
+ )
+ }
+ })
+
+ t.Run("ByKey", func(t *testing.T) {
+ step := 2
+ var nextKey []byte
+ for i := 0; i < len(suppliers); i += step {
+ req := &types.QueryAllSuppliersRequest{
+ Pagination: &query.PageRequest{
+ Key: nextKey,
+ Limit: uint64(step),
+ },
+ }
+ resp, err := keeper.AllSuppliers(ctx, req)
+ require.NoError(t, err)
+ require.LessOrEqual(t, len(resp.Supplier), step)
+ require.Subset(t,
+ nullify.Fill(suppliers),
+ nullify.Fill(resp.Supplier),
+ )
+ nextKey = resp.Pagination.NextKey
+ }
+ })
+
+ t.Run("Total", func(t *testing.T) {
+ req := &types.QueryAllSuppliersRequest{
+ Pagination: &query.PageRequest{
+ Offset: 0,
+ Limit: uint64(len(suppliers)),
+ CountTotal: true,
+ },
+ }
+ resp, err := keeper.AllSuppliers(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, len(suppliers), int(resp.Pagination.Total))
+ require.ElementsMatch(t,
+ nullify.Fill(suppliers),
+ nullify.Fill(resp.Supplier),
+ )
+ })
+}
+
+func TestSuppliers_QueryAll_Filters(t *testing.T) {
+ keeper, ctx := keepertest.SupplierKeeper(t)
+ suppliers := createNSuppliers(*keeper.Keeper, ctx, 5)
+
+ t.Run("Filter By ServiceId", func(t *testing.T) {
+ // Assuming the first supplier has at least one service
+ serviceId := suppliers[0].Services[0].ServiceId
+ req := &types.QueryAllSuppliersRequest{
+ Pagination: &query.PageRequest{
+ Offset: 0,
+ Limit: uint64(len(suppliers)),
+ },
+ Filter: &types.QueryAllSuppliersRequest_ServiceId{
+ ServiceId: serviceId,
+ },
+ }
+ resp, err := keeper.AllSuppliers(ctx, req)
+ require.NoError(t, err)
+
+ // Verify each returned supplier has the specified service
+ for _, s := range resp.Supplier {
+ hasService := false
+ for _, service := range s.Services {
+ if service.ServiceId == serviceId {
+ hasService = true
+ break
+ }
+ }
+ require.True(t, hasService, "Returned supplier does not have the specified service")
+ }
+ })
+}
diff --git a/x/supplier/module/autocli.go b/x/supplier/module/autocli.go
index 716e3774f..a64176dd8 100644
--- a/x/supplier/module/autocli.go
+++ b/x/supplier/module/autocli.go
@@ -10,30 +10,58 @@ import (
func (am AppModule) AutoCLIOptions() *autocliv1.ModuleOptions {
return &autocliv1.ModuleOptions{
Query: &autocliv1.ServiceCommandDescriptor{
- Service: modulev1.Query_ServiceDesc.ServiceName,
+ Service: modulev1.Query_ServiceDesc.ServiceName,
+ EnhanceCustomCommand: true, // only required if you want to use the custom command (for backwards compatibility)
RpcCommandOptions: []*autocliv1.RpcCommandOptions{
- //{
- // RpcMethod: "Params",
- // Use: "params",
- // Short: "Shows the parameters of the module",
- //},
- //{
- // RpcMethod: "AllSuppliers",
- // Use: "list-supplier",
- // Short: "List all supplier",
- //},
- //{
- // RpcMethod: "Supplier",
- // Use: "show-supplier [id]",
- // Short: "Shows a supplier",
- // PositionalArgs: []*autocliv1.PositionalArgDescriptor{{ProtoField: "index"}},
- //},
+ // {
+ // RpcMethod: "Params",
+ // Use: "params",
+ // Short: "Shows the parameters of the module",
+ // },
+ {
+ Alias: []string{"suppliers", "ls"},
+ RpcMethod: "AllSuppliers",
+ Use: "list-suppliers",
+ Short: "List all suppliers on Pocket Network",
+ Long: `Retrieves a paginated list of all suppliers currently registered on Pocket Network, including all their details.
+
+The command supports optional filtering by service ID and pagination parameters.
+Returns supplier addresses, staked amounts, service details, and current status.`,
+
+ Example: ` poktrolld query supplier list-suppliers
+ poktrolld query supplier list-suppliers --service-id anvil
+ poktrolld query supplier list-suppliers --page 2 --limit 50
+ poktrolld query supplier list-suppliers --service-id anvil --page 1 --limit 100`,
+ FlagOptions: map[string]*autocliv1.FlagOptions{
+ "service_id": {Name: "service-id", Shorthand: "s", Usage: "service id to filter by", Hidden: false},
+ },
+ },
+ {
+ Alias: []string{"supplier", "s"},
+ RpcMethod: "Supplier",
+ Use: "show-supplier [operator_address]",
+ Short: "Shows detailed information about a specific supplier",
+ Long: `Retrieves comprehensive information about a supplier identified by their address.
+
+Returns details include things like:
+- Supplier's staked amount and status
+- List of services they provide`,
+
+ Example: ` poktrolld query supplier show-supplier pokt1abc...xyz
+ poktrolld query supplier show-supplier pokt1abc...xyz --output json
+ poktrolld query supplier show-supplier pokt1abc...xyz --height 100`,
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "operator_address",
+ },
+ },
+ },
// this line is used by ignite scaffolding # autocli/query
},
},
Tx: &autocliv1.ServiceCommandDescriptor{
Service: modulev1.Msg_ServiceDesc.ServiceName,
- EnhanceCustomCommand: true, // only required if you want to use the custom command
+ EnhanceCustomCommand: true, // only required if you want to use the custom command (for backwards compatibility)
RpcCommandOptions: []*autocliv1.RpcCommandOptions{
//{
// RpcMethod: "UpdateParams",
diff --git a/x/supplier/module/flags.go b/x/supplier/module/flags.go
new file mode 100644
index 000000000..e4b1cbd9e
--- /dev/null
+++ b/x/supplier/module/flags.go
@@ -0,0 +1,5 @@
+package supplier
+
+const (
+ FlagServiceId = "service-id"
+)
diff --git a/x/supplier/module/query.go b/x/supplier/module/query.go
index b49ebf142..bb81daf33 100644
--- a/x/supplier/module/query.go
+++ b/x/supplier/module/query.go
@@ -22,8 +22,6 @@ func (am AppModule) GetQueryCmd() *cobra.Command {
}
cmd.AddCommand(CmdQueryParams())
- cmd.AddCommand(CmdListSuppliers())
- cmd.AddCommand(CmdShowSupplier())
// this line is used by starport scaffolding # 1
return cmd
diff --git a/x/supplier/module/query_supplier.go b/x/supplier/module/query_supplier.go
deleted file mode 100644
index 81a18454e..000000000
--- a/x/supplier/module/query_supplier.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package supplier
-
-import (
- "github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/client/flags"
- "github.com/spf13/cobra"
-
- "github.com/pokt-network/poktroll/x/supplier/types"
-)
-
-func CmdListSuppliers() *cobra.Command {
- cmd := &cobra.Command{
- Use: "list-supplier",
- Short: "list all supplier",
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientQueryContext(cmd)
- if err != nil {
- return err
- }
-
- pageReq, err := client.ReadPageRequest(cmd.Flags())
- if err != nil {
- return err
- }
-
- queryClient := types.NewQueryClient(clientCtx)
-
- params := &types.QueryAllSuppliersRequest{
- Pagination: pageReq,
- }
-
- res, err := queryClient.AllSuppliers(cmd.Context(), params)
- if err != nil {
- return err
- }
-
- return clientCtx.PrintProto(res)
- },
- }
-
- flags.AddPaginationFlagsToCmd(cmd, cmd.Use)
- flags.AddQueryFlagsToCmd(cmd)
-
- return cmd
-}
-
-func CmdShowSupplier() *cobra.Command {
- cmd := &cobra.Command{
- Use: "show-supplier ",
- Short: "shows a supplier",
- Args: cobra.ExactArgs(1),
- RunE: func(cmd *cobra.Command, args []string) (err error) {
- clientCtx, err := client.GetClientQueryContext(cmd)
- if err != nil {
- return err
- }
-
- queryClient := types.NewQueryClient(clientCtx)
-
- argAddress := args[0]
-
- params := &types.QueryGetSupplierRequest{
- OperatorAddress: argAddress,
- }
-
- res, err := queryClient.Supplier(cmd.Context(), params)
- if err != nil {
- return err
- }
-
- return clientCtx.PrintProto(res)
- },
- }
-
- flags.AddQueryFlagsToCmd(cmd)
-
- return cmd
-}
diff --git a/x/supplier/module/query_supplier_test.go b/x/supplier/module/query_supplier_test.go
deleted file mode 100644
index 78c529d90..000000000
--- a/x/supplier/module/query_supplier_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package supplier_test
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- cometcli "github.com/cometbft/cometbft/libs/cli"
- "github.com/cosmos/cosmos-sdk/client/flags"
- clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-
- "github.com/pokt-network/poktroll/testutil/nullify"
- sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
- supplier "github.com/pokt-network/poktroll/x/supplier/module"
- "github.com/pokt-network/poktroll/x/supplier/types"
-)
-
-func TestShowSupplier(t *testing.T) {
- net, suppliers := networkWithSupplierObjects(t, 2)
-
- ctx := net.Validators[0].ClientCtx
- common := []string{
- fmt.Sprintf("--%s=json", cometcli.OutputFlag),
- }
- tests := []struct {
- desc string
- idAddress string
-
- args []string
- expectedErr error
- supplier sharedtypes.Supplier
- }{
- {
- desc: "supplier found",
- idAddress: suppliers[0].OperatorAddress,
-
- args: common,
- supplier: suppliers[0],
- },
- {
- desc: "supplier not found",
- idAddress: strconv.Itoa(100000),
-
- args: common,
- expectedErr: status.Error(codes.NotFound, "not found"),
- },
- }
- for _, test := range tests {
- t.Run(test.desc, func(t *testing.T) {
- args := []string{
- test.idAddress,
- }
- args = append(args, test.args...)
- out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdShowSupplier(), args)
- if test.expectedErr != nil {
- stat, ok := status.FromError(test.expectedErr)
- require.True(t, ok)
- require.ErrorIs(t, stat.Err(), test.expectedErr)
- } else {
- require.NoError(t, err)
- var resp types.QueryGetSupplierResponse
- require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
- require.NotNil(t, resp.Supplier)
- require.Equal(t,
- nullify.Fill(&test.supplier),
- nullify.Fill(&resp.Supplier),
- )
- }
- })
- }
-}
-
-func TestListSuppliers(t *testing.T) {
- net, suppliers := networkWithSupplierObjects(t, 5)
-
- ctx := net.Validators[0].ClientCtx
- request := func(next []byte, offset, limit uint64, total bool) []string {
- args := []string{
- fmt.Sprintf("--%s=json", cometcli.OutputFlag),
- }
- if next == nil {
- args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset))
- } else {
- args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next))
- }
- args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit))
- if total {
- args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal))
- }
- return args
- }
- t.Run("ByOffset", func(t *testing.T) {
- step := 2
- for i := 0; i < len(suppliers); i += step {
- args := request(nil, uint64(i), uint64(step), false)
- out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args)
- require.NoError(t, err)
- var resp types.QueryAllSuppliersResponse
- require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
- require.LessOrEqual(t, len(resp.Supplier), step)
- require.Subset(t,
- nullify.Fill(suppliers),
- nullify.Fill(resp.Supplier),
- )
- }
- })
- t.Run("ByKey", func(t *testing.T) {
- step := 2
- var next []byte
- for i := 0; i < len(suppliers); i += step {
- args := request(next, 0, uint64(step), false)
- out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args)
- require.NoError(t, err)
- var resp types.QueryAllSuppliersResponse
- require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
- require.LessOrEqual(t, len(resp.Supplier), step)
- require.Subset(t,
- nullify.Fill(suppliers),
- nullify.Fill(resp.Supplier),
- )
- next = resp.Pagination.NextKey
- }
- })
- t.Run("Total", func(t *testing.T) {
- args := request(nil, 0, uint64(len(suppliers)), true)
- out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args)
- require.NoError(t, err)
- var resp types.QueryAllSuppliersResponse
- require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
- require.NoError(t, err)
- require.Equal(t, len(suppliers), int(resp.Pagination.Total))
- require.ElementsMatch(t,
- nullify.Fill(suppliers),
- nullify.Fill(resp.Supplier),
- )
- })
-}
diff --git a/x/supplier/types/errors.go b/x/supplier/types/errors.go
index c6188aade..d3a615a3b 100644
--- a/x/supplier/types/errors.go
+++ b/x/supplier/types/errors.go
@@ -15,4 +15,5 @@ var (
ErrSupplierServiceNotFound = sdkerrors.Register(ModuleName, 1106, "service not found")
ErrSupplierParamInvalid = sdkerrors.Register(ModuleName, 1107, "the provided param is invalid")
ErrSupplierEmitEvent = sdkerrors.Register(ModuleName, 1108, "failed to emit event")
+ ErrSupplierInvalidServiceId = sdkerrors.Register(ModuleName, 1109, "invalid service ID")
)
diff --git a/x/supplier/types/event.pb.go b/x/supplier/types/event.pb.go
index 49c1cc28b..a797d281b 100644
--- a/x/supplier/types/event.pb.go
+++ b/x/supplier/types/event.pb.go
@@ -54,7 +54,7 @@ func (SupplierUnbondingReason) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_22d2d1a82853ce0a, []int{0}
}
-// EventSupplierStaked is emitted when a supplier stake message is committed on-chain.
+// EventSupplierStaked is emitted when a supplier stake message is committed onchain.
type EventSupplierStaked struct {
Supplier *types.Supplier `protobuf:"bytes,1,opt,name=supplier,proto3" json:"supplier"`
// The session end height of the last session in which the supplier was staked.
@@ -105,7 +105,7 @@ func (m *EventSupplierStaked) GetSessionEndHeight() int64 {
}
// EventSupplierUnbondingBegin is emitted when an application unstake message
-// is committed on-chain, indicating that the supplier will now begin unbonding.
+// is committed onchain, indicating that the supplier will now begin unbonding.
type EventSupplierUnbondingBegin struct {
Supplier *types.Supplier `protobuf:"bytes,1,opt,name=supplier,proto3" json:"supplier"`
Reason SupplierUnbondingReason `protobuf:"varint,2,opt,name=reason,proto3,enum=poktroll.supplier.SupplierUnbondingReason" json:"reason"`
diff --git a/x/supplier/types/query.pb.go b/x/supplier/types/query.pb.go
index fc4da1c09..739fb0ef7 100644
--- a/x/supplier/types/query.pb.go
+++ b/x/supplier/types/query.pb.go
@@ -191,6 +191,9 @@ func (m *QueryGetSupplierResponse) GetSupplier() types.Supplier {
type QueryAllSuppliersRequest struct {
Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // Types that are valid to be assigned to Filter:
+ // *QueryAllSuppliersRequest_ServiceId
+ Filter isQueryAllSuppliersRequest_Filter `protobuf_oneof:"filter"`
}
func (m *QueryAllSuppliersRequest) Reset() { *m = QueryAllSuppliersRequest{} }
@@ -222,6 +225,25 @@ func (m *QueryAllSuppliersRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_QueryAllSuppliersRequest proto.InternalMessageInfo
+type isQueryAllSuppliersRequest_Filter interface {
+ isQueryAllSuppliersRequest_Filter()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type QueryAllSuppliersRequest_ServiceId struct {
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3,oneof" json:"service_id,omitempty"`
+}
+
+func (*QueryAllSuppliersRequest_ServiceId) isQueryAllSuppliersRequest_Filter() {}
+
+func (m *QueryAllSuppliersRequest) GetFilter() isQueryAllSuppliersRequest_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
func (m *QueryAllSuppliersRequest) GetPagination() *query.PageRequest {
if m != nil {
return m.Pagination
@@ -229,6 +251,20 @@ func (m *QueryAllSuppliersRequest) GetPagination() *query.PageRequest {
return nil
}
+func (m *QueryAllSuppliersRequest) GetServiceId() string {
+ if x, ok := m.GetFilter().(*QueryAllSuppliersRequest_ServiceId); ok {
+ return x.ServiceId
+ }
+ return ""
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*QueryAllSuppliersRequest) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*QueryAllSuppliersRequest_ServiceId)(nil),
+ }
+}
+
type QueryAllSuppliersResponse struct {
Supplier []types.Supplier `protobuf:"bytes,1,rep,name=supplier,proto3" json:"supplier"`
Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
@@ -289,43 +325,45 @@ func init() {
func init() { proto.RegisterFile("poktroll/supplier/query.proto", fileDescriptor_7a8c18c53656bd0d) }
var fileDescriptor_7a8c18c53656bd0d = []byte{
- // 567 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x31, 0x6f, 0x13, 0x31,
- 0x14, 0xc7, 0xe3, 0x16, 0xa2, 0xd6, 0x20, 0x41, 0x4d, 0x24, 0x92, 0x08, 0x0e, 0x74, 0x12, 0x21,
- 0x0a, 0xd4, 0x26, 0x65, 0x2c, 0x0c, 0x4d, 0x25, 0x3a, 0x52, 0x92, 0x01, 0x89, 0x81, 0xca, 0x49,
- 0xac, 0xeb, 0xa9, 0x97, 0xb3, 0x6b, 0x3b, 0x40, 0x85, 0x58, 0x58, 0x58, 0x91, 0x18, 0x99, 0xd8,
- 0x3a, 0x32, 0xf0, 0x21, 0x3a, 0x56, 0xb0, 0x54, 0x0c, 0x08, 0x25, 0x48, 0x7c, 0x0d, 0x14, 0xdb,
- 0x97, 0xa6, 0xdc, 0x45, 0x49, 0x97, 0xc8, 0xe7, 0xf7, 0xff, 0xbf, 0xf7, 0xf3, 0x7b, 0x4f, 0x81,
- 0x37, 0x05, 0xdf, 0xd3, 0x92, 0x47, 0x11, 0x51, 0x7d, 0x21, 0xa2, 0x90, 0x49, 0xb2, 0xdf, 0x67,
- 0xf2, 0x00, 0x0b, 0xc9, 0x35, 0x47, 0x2b, 0x49, 0x18, 0x27, 0xe1, 0xf2, 0x0a, 0xed, 0x85, 0x31,
- 0x27, 0xe6, 0xd7, 0xaa, 0xca, 0x85, 0x80, 0x07, 0xdc, 0x1c, 0xc9, 0xe8, 0xe4, 0x6e, 0x6f, 0x04,
- 0x9c, 0x07, 0x11, 0x23, 0x54, 0x84, 0x84, 0xc6, 0x31, 0xd7, 0x54, 0x87, 0x3c, 0x56, 0x2e, 0x5a,
- 0xea, 0x70, 0xd5, 0xe3, 0x6a, 0xc7, 0xda, 0xec, 0x87, 0x0b, 0xd5, 0xec, 0x17, 0x69, 0x53, 0xc5,
- 0x2c, 0x0d, 0x79, 0x55, 0x6f, 0x33, 0x4d, 0xeb, 0x44, 0xd0, 0x20, 0x8c, 0x4d, 0x1e, 0xa7, 0xf5,
- 0x26, 0xb5, 0x89, 0xaa, 0xc3, 0xc3, 0x71, 0x3c, 0xfd, 0x3e, 0x41, 0x25, 0xed, 0xa9, 0x74, 0x7c,
- 0x97, 0x4a, 0xd6, 0x1d, 0xcb, 0x6c, 0xdc, 0x2f, 0x40, 0xf4, 0x6c, 0x44, 0xb0, 0x6d, 0x4c, 0x4d,
- 0xb6, 0xdf, 0x67, 0x4a, 0xfb, 0x2d, 0x78, 0xed, 0xcc, 0xad, 0x12, 0x3c, 0x56, 0x0c, 0x3d, 0x82,
- 0x79, 0x9b, 0xbc, 0x08, 0x6e, 0x83, 0xea, 0xa5, 0xb5, 0x12, 0x4e, 0xb5, 0x0f, 0x5b, 0x4b, 0x63,
- 0xf9, 0xe8, 0xd7, 0xad, 0xdc, 0xe1, 0xdf, 0xaf, 0x35, 0xd0, 0x74, 0x1e, 0xff, 0x25, 0xbc, 0x6e,
- 0x92, 0x6e, 0x31, 0xdd, 0x72, 0x6a, 0x57, 0x0f, 0x6d, 0xc2, 0xab, 0x5c, 0x30, 0x49, 0x35, 0x97,
- 0x3b, 0xb4, 0xdb, 0x95, 0x4c, 0xd9, 0x12, 0xcb, 0x8d, 0xe2, 0xf7, 0x6f, 0xab, 0x05, 0xd7, 0xbd,
- 0x0d, 0x1b, 0x69, 0x69, 0x19, 0xc6, 0x41, 0xf3, 0x4a, 0xe2, 0x70, 0xd7, 0xfe, 0x73, 0x58, 0x4c,
- 0xe7, 0x77, 0xe4, 0xeb, 0x70, 0x29, 0x21, 0xcc, 0x60, 0x37, 0x9d, 0xc1, 0x89, 0xa9, 0x71, 0x61,
- 0xc4, 0xde, 0x1c, 0x1b, 0xfc, 0xb6, 0x4b, 0xbc, 0x11, 0x45, 0x89, 0x26, 0xe9, 0x14, 0x7a, 0x02,
- 0xe1, 0xe9, 0xcc, 0x5c, 0xea, 0x0a, 0x76, 0xc0, 0xa3, 0xa1, 0x61, 0xbb, 0x6e, 0x6e, 0x74, 0x78,
- 0x9b, 0x06, 0xcc, 0x79, 0x9b, 0x13, 0x4e, 0xff, 0x0b, 0x80, 0xa5, 0x8c, 0x22, 0x99, 0xf8, 0x8b,
- 0xe7, 0xc2, 0x47, 0x5b, 0x67, 0x10, 0x17, 0x0c, 0xe2, 0xdd, 0x99, 0x88, 0xb6, 0xf2, 0x24, 0xe3,
- 0xda, 0xcf, 0x45, 0x78, 0xd1, 0x30, 0xa2, 0x0f, 0x00, 0xe6, 0xed, 0xa0, 0xd1, 0x9d, 0x8c, 0x1d,
- 0x48, 0x6f, 0x54, 0xb9, 0x32, 0x4b, 0x66, 0xeb, 0xf9, 0xf8, 0xfd, 0x8f, 0x3f, 0x9f, 0x16, 0xaa,
- 0xa8, 0x42, 0x46, 0xfa, 0xd5, 0x98, 0xe9, 0xd7, 0x5c, 0xee, 0x91, 0x69, 0x5b, 0x8e, 0x0e, 0x01,
- 0x5c, 0x4a, 0x5e, 0x8e, 0x6a, 0xd3, 0x8a, 0xa4, 0x57, 0xae, 0x7c, 0x6f, 0x2e, 0xad, 0xa3, 0xda,
- 0x34, 0x54, 0x8f, 0xd1, 0xfa, 0x2c, 0xaa, 0xf1, 0xe1, 0xed, 0xff, 0xfb, 0xfc, 0x0e, 0x7d, 0x06,
- 0xf0, 0xf2, 0xe4, 0x74, 0xd1, 0x54, 0x84, 0x8c, 0x45, 0x2b, 0xdf, 0x9f, 0x4f, 0xec, 0x80, 0x1f,
- 0x18, 0xe0, 0x1a, 0xaa, 0xce, 0x0b, 0xdc, 0x78, 0x7a, 0x34, 0xf0, 0xc0, 0xf1, 0xc0, 0x03, 0x27,
- 0x03, 0x0f, 0xfc, 0x1e, 0x78, 0xe0, 0xe3, 0xd0, 0xcb, 0x1d, 0x0f, 0xbd, 0xdc, 0xc9, 0xd0, 0xcb,
- 0xbd, 0xa8, 0x07, 0xa1, 0xde, 0xed, 0xb7, 0x71, 0x87, 0xf7, 0xa6, 0x64, 0x7c, 0x73, 0x9a, 0x53,
- 0x1f, 0x08, 0xa6, 0xda, 0x79, 0xf3, 0x07, 0xf3, 0xf0, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x07,
- 0x67, 0x7e, 0xca, 0x82, 0x05, 0x00, 0x00,
+ // 602 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xb1, 0x6f, 0x13, 0x3f,
+ 0x18, 0x8d, 0xd3, 0xdf, 0x2f, 0x4a, 0x0c, 0x12, 0xd4, 0x44, 0x22, 0x89, 0xe0, 0x8a, 0x4e, 0x22,
+ 0x44, 0x81, 0xde, 0x91, 0x32, 0x16, 0x86, 0xa6, 0x12, 0x85, 0x89, 0x72, 0x19, 0x90, 0x18, 0x88,
+ 0x9c, 0xc4, 0x5c, 0xad, 0x5e, 0xce, 0x57, 0xdb, 0x29, 0x54, 0x88, 0x85, 0x85, 0x09, 0x09, 0x89,
+ 0x91, 0x89, 0xad, 0x23, 0x03, 0x7f, 0x44, 0xc7, 0x0a, 0x96, 0x8a, 0x01, 0xa1, 0x04, 0x89, 0x7f,
+ 0x03, 0x9d, 0xed, 0x4b, 0x53, 0x2e, 0x51, 0xc2, 0x12, 0xd9, 0xfe, 0xde, 0x7b, 0xdf, 0x7b, 0xf6,
+ 0x97, 0x83, 0x57, 0x23, 0xb6, 0x2b, 0x39, 0x0b, 0x02, 0x57, 0x0c, 0xa2, 0x28, 0xa0, 0x84, 0xbb,
+ 0x7b, 0x03, 0xc2, 0x0f, 0x9c, 0x88, 0x33, 0xc9, 0xd0, 0x72, 0x52, 0x76, 0x92, 0x72, 0x65, 0x19,
+ 0xf7, 0x69, 0xc8, 0x5c, 0xf5, 0xab, 0x51, 0x95, 0xa2, 0xcf, 0x7c, 0xa6, 0x96, 0x6e, 0xbc, 0x32,
+ 0xa7, 0x57, 0x7c, 0xc6, 0xfc, 0x80, 0xb8, 0x38, 0xa2, 0x2e, 0x0e, 0x43, 0x26, 0xb1, 0xa4, 0x2c,
+ 0x14, 0xa6, 0x5a, 0xee, 0x32, 0xd1, 0x67, 0xa2, 0xad, 0x69, 0x7a, 0x63, 0x4a, 0x75, 0xbd, 0x73,
+ 0x3b, 0x58, 0x10, 0xed, 0xc6, 0xdd, 0x6f, 0x74, 0x88, 0xc4, 0x0d, 0x37, 0xc2, 0x3e, 0x0d, 0x95,
+ 0x8e, 0xc1, 0x5a, 0x93, 0xd8, 0x04, 0xd5, 0x65, 0x74, 0x5c, 0x4f, 0xe7, 0x8b, 0x30, 0xc7, 0x7d,
+ 0x91, 0xae, 0xef, 0x60, 0x4e, 0x7a, 0x63, 0x98, 0xae, 0xdb, 0x45, 0x88, 0x1e, 0xc7, 0x0e, 0xb6,
+ 0x15, 0xc9, 0x23, 0x7b, 0x03, 0x22, 0xa4, 0xdd, 0x82, 0x97, 0xce, 0x9c, 0x8a, 0x88, 0x85, 0x82,
+ 0xa0, 0xbb, 0x30, 0xa7, 0xc5, 0x4b, 0xe0, 0x1a, 0xa8, 0x9d, 0x5b, 0x2b, 0x3b, 0xa9, 0xeb, 0x73,
+ 0x34, 0xa5, 0x59, 0x38, 0xfa, 0xb1, 0x92, 0x39, 0xfc, 0xfd, 0xb9, 0x0e, 0x3c, 0xc3, 0xb1, 0x9f,
+ 0xc1, 0xcb, 0x4a, 0x74, 0x8b, 0xc8, 0x96, 0x41, 0x9b, 0x7e, 0x68, 0x13, 0x5e, 0x64, 0x11, 0xe1,
+ 0x58, 0x32, 0xde, 0xc6, 0xbd, 0x1e, 0x27, 0x42, 0xb7, 0x28, 0x34, 0x4b, 0x5f, 0xbf, 0xac, 0x16,
+ 0xcd, 0xed, 0x6d, 0xe8, 0x4a, 0x4b, 0x72, 0x1a, 0xfa, 0xde, 0x85, 0x84, 0x61, 0x8e, 0xed, 0x27,
+ 0xb0, 0x94, 0xd6, 0x37, 0xce, 0xd7, 0x61, 0x3e, 0x71, 0x38, 0xc5, 0xbb, 0xba, 0x19, 0x27, 0x21,
+ 0x35, 0xff, 0x8b, 0xbd, 0x7b, 0x63, 0x82, 0xfd, 0x0e, 0x18, 0xe5, 0x8d, 0x20, 0x48, 0x40, 0xc9,
+ 0x55, 0xa1, 0xfb, 0x10, 0x9e, 0x3e, 0x9a, 0xd1, 0xae, 0x3a, 0xc6, 0x71, 0xfc, 0x6a, 0x8e, 0x9e,
+ 0x37, 0xf3, 0x76, 0xce, 0x36, 0xf6, 0x89, 0xe1, 0x7a, 0x13, 0x4c, 0xb4, 0x02, 0xa1, 0x20, 0x7c,
+ 0x9f, 0x76, 0x49, 0x9b, 0xf6, 0x4a, 0xd9, 0x38, 0xfc, 0x83, 0x8c, 0x57, 0x30, 0x67, 0x0f, 0x7b,
+ 0xcd, 0x3c, 0xcc, 0x3d, 0xa7, 0x81, 0x24, 0xdc, 0xfe, 0x04, 0x60, 0x79, 0x8a, 0x9f, 0xa9, 0x51,
+ 0x97, 0xfe, 0x29, 0x2a, 0xda, 0x3a, 0x93, 0x26, 0xab, 0xd2, 0xdc, 0x98, 0x9b, 0x46, 0x77, 0x9e,
+ 0x8c, 0xb3, 0xf6, 0x7d, 0x09, 0xfe, 0xaf, 0x3c, 0xa2, 0xb7, 0x00, 0xe6, 0xf4, 0x50, 0xa0, 0xeb,
+ 0x53, 0xe6, 0x25, 0x3d, 0x7d, 0x95, 0xea, 0x3c, 0x98, 0xee, 0x67, 0x3b, 0x6f, 0xbe, 0xfd, 0xfa,
+ 0x90, 0xad, 0xa1, 0xaa, 0x1b, 0xe3, 0x57, 0x43, 0x22, 0x5f, 0x30, 0xbe, 0xeb, 0xce, 0xfa, 0x47,
+ 0xa0, 0x43, 0x00, 0xf3, 0x49, 0x72, 0x54, 0x9f, 0xd5, 0x24, 0x3d, 0x9e, 0x95, 0x9b, 0x0b, 0x61,
+ 0x8d, 0xab, 0x4d, 0xe5, 0xea, 0x1e, 0x5a, 0x9f, 0xe7, 0x6a, 0xbc, 0x78, 0xf5, 0xf7, 0xec, 0xbf,
+ 0x46, 0x1f, 0x01, 0x3c, 0x3f, 0xf9, 0xba, 0x68, 0xa6, 0x85, 0x29, 0x33, 0x59, 0xb9, 0xb5, 0x18,
+ 0xd8, 0x18, 0xbe, 0xad, 0x0c, 0xd7, 0x51, 0x6d, 0x51, 0xc3, 0xcd, 0x47, 0x47, 0x43, 0x0b, 0x1c,
+ 0x0f, 0x2d, 0x70, 0x32, 0xb4, 0xc0, 0xcf, 0xa1, 0x05, 0xde, 0x8f, 0xac, 0xcc, 0xf1, 0xc8, 0xca,
+ 0x9c, 0x8c, 0xac, 0xcc, 0xd3, 0x86, 0x4f, 0xe5, 0xce, 0xa0, 0xe3, 0x74, 0x59, 0x7f, 0x86, 0xe2,
+ 0xcb, 0x53, 0x4d, 0x79, 0x10, 0x11, 0xd1, 0xc9, 0xa9, 0x8f, 0xd1, 0x9d, 0x3f, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0xc0, 0xc9, 0xa7, 0x97, 0xae, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -623,6 +661,15 @@ func (m *QueryAllSuppliersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error
_ = i
var l int
_ = l
+ if m.Filter != nil {
+ {
+ size := m.Filter.Size()
+ i -= size
+ if _, err := m.Filter.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
if m.Pagination != nil {
{
size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
@@ -638,6 +685,20 @@ func (m *QueryAllSuppliersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error
return len(dAtA) - i, nil
}
+func (m *QueryAllSuppliersRequest_ServiceId) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryAllSuppliersRequest_ServiceId) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.ServiceId)
+ copy(dAtA[i:], m.ServiceId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ServiceId)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
+}
func (m *QueryAllSuppliersResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -752,9 +813,22 @@ func (m *QueryAllSuppliersRequest) Size() (n int) {
l = m.Pagination.Size()
n += 1 + l + sovQuery(uint64(l))
}
+ if m.Filter != nil {
+ n += m.Filter.Size()
+ }
return n
}
+func (m *QueryAllSuppliersRequest_ServiceId) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ServiceId)
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
func (m *QueryAllSuppliersResponse) Size() (n int) {
if m == nil {
return 0
@@ -1143,6 +1217,38 @@ func (m *QueryAllSuppliersRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Filter = &QueryAllSuppliersRequest_ServiceId{string(dAtA[iNdEx:postIndex])}
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
diff --git a/x/supplier/types/query_validation.go b/x/supplier/types/query_validation.go
new file mode 100644
index 000000000..ccab23065
--- /dev/null
+++ b/x/supplier/types/query_validation.go
@@ -0,0 +1,44 @@
+package types
+
+import (
+ "context"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/pokt-network/poktroll/pkg/polylog"
+ sharedtypes "github.com/pokt-network/poktroll/x/shared/types"
+)
+
+// NOTE: Please note that these messages are not of type `sdk.Msg`, and are therefore not a message/request
+// that will be signable or invoke a state transition. However, following a similar `ValidateBasic` pattern
+// allows us to localize & reuse validation logic.
+
+// ValidateBasic performs basic (non-state-dependant) validation on a QueryGetSupplierRequest.
+func (query *QueryGetSupplierRequest) ValidateBasic() error {
+ // Validate the supplier operator address
+ if _, err := sdk.AccAddressFromBech32(query.OperatorAddress); err != nil {
+ return ErrSupplierInvalidAddress.Wrapf("invalid supplier operator address %s; (%v)", query.OperatorAddress, err)
+ }
+
+ return nil
+}
+
+// ValidateBasic performs basic (non-state-dependant) validation on a QueryAllSuppliersRequest.
+func (query *QueryAllSuppliersRequest) ValidateBasic() error {
+ // TODO_TECHDEBT: update function signature to receive a context.
+ logger := polylog.Ctx(context.TODO())
+
+ switch filter := query.Filter.(type) {
+ case *QueryAllSuppliersRequest_ServiceId:
+ // If the service ID is set, check if it's valid
+ if filter.ServiceId != "" && !sharedtypes.IsValidServiceId(filter.ServiceId) {
+ return ErrSupplierInvalidServiceId.Wrap("invalid empty service ID for suppliers being retrieved")
+ }
+
+ default:
+ // No filter is set
+ logger.Info().Msg("No specific filter set when listing suppliers")
+ }
+
+ return nil
+}
diff --git a/x/tokenomics/keeper/msg_server_update_param_test.go b/x/tokenomics/keeper/msg_server_update_param_test.go
index 6c358482a..b0da8a230 100644
--- a/x/tokenomics/keeper/msg_server_update_param_test.go
+++ b/x/tokenomics/keeper/msg_server_update_param_test.go
@@ -42,7 +42,7 @@ func TestMsgUpdateParam_UpdateMintAllocationPercentagesOnly(t *testing.T) {
require.NotEqual(t, defaultParams.MintAllocationPercentages, res.Params.MintAllocationPercentages)
require.Equal(t, expectedMintAllocationPercentages, res.Params.MintAllocationPercentages)
- // Assert that the on-chain mint allocation percentages is updated.
+ // Assert that the onchain mint allocation percentages is updated.
params := k.GetParams(ctx)
require.Equal(t, expectedMintAllocationPercentages, params.MintAllocationPercentages)
@@ -74,7 +74,7 @@ func TestMsgUpdateParam_UpdateDaoRewardAddressOnly(t *testing.T) {
require.NotEqual(t, defaultParams.DaoRewardAddress, res.Params.DaoRewardAddress)
require.Equal(t, expectedDaoRewardAddress, res.Params.DaoRewardAddress)
- // Assert that the on-chain dao reward address is updated.
+ // Assert that the onchain dao reward address is updated.
params := k.GetParams(ctx)
require.Equal(t, expectedDaoRewardAddress, params.DaoRewardAddress)
@@ -106,7 +106,7 @@ func TestMsgUpdateParam_UpdateGlobalInflationPerClaimOnly(t *testing.T) {
require.NotEqual(t, defaultParams.GlobalInflationPerClaim, res.Params.GlobalInflationPerClaim)
require.Equal(t, expectedGlobalInflationPerClaim, res.Params.GlobalInflationPerClaim)
- // Assert that the on-chain dao reward address is updated.
+ // Assert that the onchain dao reward address is updated.
params := k.GetParams(ctx)
require.Equal(t, expectedGlobalInflationPerClaim, params.GlobalInflationPerClaim)
diff --git a/x/tokenomics/keeper/msg_update_params_test.go b/x/tokenomics/keeper/msg_update_params_test.go
index 71375b179..dbc10b231 100644
--- a/x/tokenomics/keeper/msg_update_params_test.go
+++ b/x/tokenomics/keeper/msg_update_params_test.go
@@ -44,7 +44,7 @@ func TestMsgUpdateParams(t *testing.T) {
},
shouldError: true,
- expectedErrMsg: "the provided authority address does not match the on-chain governance address",
+ expectedErrMsg: "the provided authority address does not match the onchain governance address",
},
{
desc: "invalid: dao reward address missing",
diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go
index fa1995de2..c6ecd397a 100644
--- a/x/tokenomics/keeper/settle_pending_claims.go
+++ b/x/tokenomics/keeper/settle_pending_claims.go
@@ -25,7 +25,7 @@ import (
// If a claim is expired and requires a proof and a proof IS NOT available -> it's deleted.
// If a claim is expired and does NOT require a proof -> it's settled.
// Events are emitted for each claim that is settled or removed.
-// On-chain Claims & Proofs are deleted after they're settled or expired to free up space.
+// Onchain Claims & Proofs are deleted after they're settled or expired to free up space.
func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
settledResults tlm.ClaimSettlementResults,
expiredResults tlm.ClaimSettlementResults,
@@ -95,7 +95,7 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
targetNumRelays,
)
}
- // numEstimatedComputeUnits is the probabilistic estimation of the off-chain
+ // numEstimatedComputeUnits is the probabilistic estimation of the offchain
// work done by the relay miner in this session. It is derived from the claimed
// work and the relay mining difficulty.
numEstimatedComputeUnits, err = claim.GetNumEstimatedComputeUnits(relayMiningDifficulty)
@@ -114,7 +114,7 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
proof, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierOperatorAddress)
// Using the probabilistic proofs approach, determine if this expiring
- // claim required an on-chain proof
+ // claim required an onchain proof
proofRequirement, err = k.proofKeeper.ProofRequirementForClaim(ctx, &claim)
if err != nil {
return settledResults, expiredResults, err
@@ -178,7 +178,7 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
// owner or operator balances if the stake is negative.
// The claim & proof are no longer necessary, so there's no need for them
- // to take up on-chain space.
+ // to take up onchain space.
k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierOperatorAddress)
if isProofFound {
k.proofKeeper.RemoveProof(ctx, sessionId, claim.SupplierOperatorAddress)
@@ -233,6 +233,7 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
NumEstimatedComputeUnits: numEstimatedComputeUnits,
ClaimedUpokt: &claimeduPOKT,
ProofRequirement: proofRequirement,
+ SettlementResult: *ClaimSettlementResult,
}
if err = ctx.EventManager().EmitTypedEvent(&claimSettledEvent); err != nil {
@@ -242,7 +243,7 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) (
logger.Info("claim settled")
// The claim & proof are no longer necessary, so there's no need for them
- // to take up on-chain space.
+ // to take up onchain space.
k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierOperatorAddress)
// Whether or not the proof is required, the supplier may have submitted one
// so we need to delete it either way. If we don't have the if structure,
@@ -329,36 +330,36 @@ func (k Keeper) ExecutePendingSettledResults(ctx cosmostypes.Context, settledRes
logger.Info(fmt.Sprintf("begin executing %d pending settlement results", len(settledResults)))
for _, settledResult := range settledResults {
- logger = logger.With("session_id", settledResult.GetSessionId())
- logger.Info("begin executing pending settlement result")
+ sessionLogger := logger.With("session_id", settledResult.GetSessionId())
+ sessionLogger.Info("begin executing pending settlement result")
- logger.Info(fmt.Sprintf("begin executing %d pending mints", len(settledResult.GetMints())))
- if err := k.executePendingModuleMints(ctx, logger, settledResult.GetMints()); err != nil {
+ sessionLogger.Info(fmt.Sprintf("begin executing %d pending mints", len(settledResult.GetMints())))
+ if err := k.executePendingModuleMints(ctx, sessionLogger, settledResult.GetMints()); err != nil {
return err
}
- logger.Info("done executing pending mints")
+ sessionLogger.Info("done executing pending mints")
- logger.Info(fmt.Sprintf("begin executing %d pending module to module transfers", len(settledResult.GetModToModTransfers())))
- if err := k.executePendingModToModTransfers(ctx, logger, settledResult.GetModToModTransfers()); err != nil {
+ sessionLogger.Info(fmt.Sprintf("begin executing %d pending module to module transfers", len(settledResult.GetModToModTransfers())))
+ if err := k.executePendingModToModTransfers(ctx, sessionLogger, settledResult.GetModToModTransfers()); err != nil {
return err
}
- logger.Info("done executing pending module account to module account transfers")
+ sessionLogger.Info("done executing pending module account to module account transfers")
- logger.Info(fmt.Sprintf("begin executing %d pending module to account transfers", len(settledResult.GetModToAcctTransfers())))
- if err := k.executePendingModToAcctTransfers(ctx, logger, settledResult.GetModToAcctTransfers()); err != nil {
+ sessionLogger.Info(fmt.Sprintf("begin executing %d pending module to account transfers", len(settledResult.GetModToAcctTransfers())))
+ if err := k.executePendingModToAcctTransfers(ctx, sessionLogger, settledResult.GetModToAcctTransfers()); err != nil {
return err
}
- logger.Info("done executing pending module to account transfers")
+ sessionLogger.Info("done executing pending module to account transfers")
- logger.Info(fmt.Sprintf("begin executing %d pending burns", len(settledResult.GetBurns())))
- if err := k.executePendingModuleBurns(ctx, logger, settledResult.GetBurns()); err != nil {
+ sessionLogger.Info(fmt.Sprintf("begin executing %d pending burns", len(settledResult.GetBurns())))
+ if err := k.executePendingModuleBurns(ctx, sessionLogger, settledResult.GetBurns()); err != nil {
return err
}
- logger.Info("done executing pending burns")
+ sessionLogger.Info("done executing pending burns")
- logger.Info("done executing pending settlement result")
+ sessionLogger.Info("done executing pending settlement result")
- logger.Info(fmt.Sprintf(
+ sessionLogger.Info(fmt.Sprintf(
"done applying settled results for session %q",
settledResult.Claim.GetSessionHeader().GetSessionId(),
))
@@ -510,7 +511,7 @@ func (k Keeper) GetExpiringClaims(ctx cosmostypes.Context) (expiringClaims []pro
// 2a. This likely also requires adding validation to the shared module params.
blockHeight := ctx.BlockHeight()
- // NB: This error can be safely ignored as on-chain SharedQueryClient implementation cannot return an error.
+ // NB: This error can be safely ignored as onchain SharedQueryClient implementation cannot return an error.
sharedParams, _ := k.sharedQuerier.GetParams(ctx)
// expiringSessionEndHeight is the session end height of the session whose proof
@@ -656,7 +657,7 @@ func (k Keeper) slashSupplierStake(
))
// TODO_MAINNET: Should we just remove the supplier if the stake is
- // below the minimum, at the risk of making the off-chain actors have an
+ // below the minimum, at the risk of making the offchain actors have an
// inconsistent session supplier list? See the comment above for more details.
supplierToSlash.UnstakeSessionEndHeight = uint64(unstakeSessionEndHeight)
diff --git a/x/tokenomics/keeper/token_logic_modules.go b/x/tokenomics/keeper/token_logic_modules.go
index 876dddb28..3564322ca 100644
--- a/x/tokenomics/keeper/token_logic_modules.go
+++ b/x/tokenomics/keeper/token_logic_modules.go
@@ -109,7 +109,7 @@ func (k Keeper) ProcessTokenLogicModules(
return tokenomicstypes.ErrTokenomicsApplicationAddressInvalid.Wrapf("address (%q)", sessionHeader.GetApplicationAddress())
}
- // Retrieve the on-chain staked application record
+ // Retrieve the onchain staked application record
application, isAppFound := k.applicationKeeper.GetApplication(ctx, applicationAddress.String())
if !isAppFound {
logger.Warn(fmt.Sprintf("application for claim with address %q not found", applicationAddress))
@@ -124,7 +124,7 @@ func (k Keeper) ProcessTokenLogicModules(
)
}
- // Retrieve the on-chain staked supplier record
+ // Retrieve the onchain staked supplier record
supplier, isSupplierFound := k.supplierKeeper.GetSupplier(ctx, supplierOperatorAddr.String())
if !isSupplierFound {
logger.Warn(fmt.Sprintf("supplier for claim with address %q not found", supplierOperatorAddr))
@@ -246,9 +246,9 @@ func (k Keeper) ProcessTokenLogicModules(
}
}
- // State mutation: update the application's on-chain record.
+ // State mutation: update the application's onchain record.
k.applicationKeeper.SetApplication(ctx, application)
- logger.Info(fmt.Sprintf("updated on-chain application record with address %q", application.Address))
+ logger.Info(fmt.Sprintf("updated onchain application record with address %q", application.Address))
// TODO_MAINNET(@bryanchriswhite): If the application stake has dropped to (near?) zero:
// - Unstake it
@@ -256,9 +256,9 @@ func (k Keeper) ProcessTokenLogicModules(
// - Ensure this doesn't happen
// - Document the decision
- // State mutation: Update the suppliers's on-chain record
+ // State mutation: Update the suppliers's onchain record
k.supplierKeeper.SetSupplier(ctx, supplier)
- logger.Info(fmt.Sprintf("updated on-chain supplier record with address %q", supplier.OperatorAddress))
+ logger.Info(fmt.Sprintf("updated onchain supplier record with address %q", supplier.OperatorAddress))
// Update isSuccessful to true for telemetry
isSuccessful = true
diff --git a/x/tokenomics/keeper/token_logic_modules_test.go b/x/tokenomics/keeper/token_logic_modules_test.go
index b70d32608..eae0d4628 100644
--- a/x/tokenomics/keeper/token_logic_modules_test.go
+++ b/x/tokenomics/keeper/token_logic_modules_test.go
@@ -45,7 +45,7 @@ func TestProcessTokenLogicModules_TLMBurnEqualsMint_Valid(t *testing.T) {
// Test Parameters
appInitialStake := apptypes.DefaultMinStake.Amount.Mul(cosmosmath.NewInt(2))
supplierInitialStake := cosmosmath.NewInt(1000000)
- supplierRevShareRatios := []float32{12.5, 37.5, 50}
+ supplierRevShareRatios := []uint64{12, 38, 50}
globalComputeUnitsToTokensMultiplier := uint64(1)
serviceComputeUnitsPerRelay := uint64(1)
service := prepareTestService(serviceComputeUnitsPerRelay)
@@ -185,7 +185,7 @@ func TestProcessTokenLogicModules_TLMBurnEqualsMint_Valid_SupplierExceedsMaxClai
service := prepareTestService(serviceComputeUnitsPerRelay)
numRelays := uint64(1000) // By a single supplier for application in this session
supplierInitialStake := cosmosmath.NewInt(1000000)
- supplierRevShareRatios := []float32{12.5, 37.5, 50}
+ supplierRevShareRatios := []uint64{12, 38, 50}
// Prepare the keepers
keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t,
@@ -338,7 +338,7 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t
// Test Parameters
appInitialStake := apptypes.DefaultMinStake.Amount.Mul(cosmosmath.NewInt(2))
supplierInitialStake := cosmosmath.NewInt(1000000)
- supplierRevShareRatios := []float32{12.5, 37.5, 50}
+ supplierRevShareRatios := []uint64{12, 38, 50}
globalComputeUnitsToTokensMultiplier := uint64(1)
serviceComputeUnitsPerRelay := uint64(1)
service := prepareTestService(serviceComputeUnitsPerRelay)
@@ -447,7 +447,7 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t
propMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Proposer))
serviceOwnerMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.SourceOwner))
appMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Application))
- supplierMint := float32(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Supplier)
+ supplierMint := float64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Supplier)
// Ensure the balance was increased to the appropriate amount.
require.Equal(t, daoBalanceBefore.Amount.Add(daoMint).Add(numTokensMintedInt), daoBalanceAfter.Amount)
@@ -458,8 +458,8 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t
addr := revShare.Address
balanceBefore := supplierShareholderBalancesBefore[addr]
balanceAfter := supplierShareholderBalancesAfter[addr].Amount.Int64()
- mintShare := int64(supplierMint * revShare.RevSharePercentage / 100)
- rewardShare := int64(float32(numTokensClaimed) * revShare.RevSharePercentage / 100)
+ mintShare := int64(supplierMint * float64(revShare.RevSharePercentage) / 100.0)
+ rewardShare := int64(float64(numTokensClaimed) * float64(revShare.RevSharePercentage) / 100.0)
balanceIncrease := cosmosmath.NewInt(mintShare + rewardShare)
expectedBalanceAfter := balanceBefore.Amount.Add(balanceIncrease).Int64()
// TODO_MAINNET(@red-0ne): Remove the InDelta check and use the exact amount once the floating point arithmetic is fixed
diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go
index 298578e4c..ca8a5641b 100644
--- a/x/tokenomics/keeper/update_relay_mining_difficulty.go
+++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go
@@ -6,8 +6,8 @@ import (
"github.com/pokt-network/poktroll/x/service/types"
)
-// UpdateRelayMiningDifficulty updates the on-chain relay mining difficulty
-// based on the amount of on-chain relays for each service, given a map of serviceId->numRelays.
+// UpdateRelayMiningDifficulty updates the onchain relay mining difficulty
+// based on the amount of onchain relays for each service, given a map of serviceId->numRelays.
// This is a wrapper around the service keeper's UpdateRelayMiningDifficulty method
// to allow the tokenomics EndBlocker to update the relay mining difficulty after
// all claims have settled.
diff --git a/x/tokenomics/token_logic_module/distribution.go b/x/tokenomics/token_logic_module/distribution.go
index bdd2b5c38..f7071562d 100644
--- a/x/tokenomics/token_logic_module/distribution.go
+++ b/x/tokenomics/token_logic_module/distribution.go
@@ -94,7 +94,7 @@ func GetShareAmountMap(
shareAmountMap = make(map[string]uint64, len(serviceRevShare))
for _, revShare := range serviceRevShare {
// TODO_MAINNET(@red-0ne): Use big.Rat for deterministic results.
- sharePercentageFloat := big.NewFloat(float64(revShare.RevSharePercentage) / 100)
+ sharePercentageFloat := big.NewFloat(float64(revShare.RevSharePercentage) / float64(100.0))
amountToDistributeFloat := big.NewFloat(float64(amountToDistribute))
shareAmount, _ := big.NewFloat(0).Mul(amountToDistributeFloat, sharePercentageFloat).Uint64()
shareAmountMap[revShare.Address] = shareAmount
diff --git a/x/tokenomics/token_logic_module/relay_burn_equals_mint.go b/x/tokenomics/token_logic_module/relay_burn_equals_mint.go
index b89f79f4c..90d02eb80 100644
--- a/x/tokenomics/token_logic_module/relay_burn_equals_mint.go
+++ b/x/tokenomics/token_logic_module/relay_burn_equals_mint.go
@@ -88,7 +88,7 @@ func (tlm tlmRelayBurnEqualsMint) Process(
defer telemetry.BurnedTokensFromModule(suppliertypes.ModuleName, float32(tlmCtx.SettlementCoin.Amount.Int64()))
}
- // Update the application's on-chain stake
+ // Update the application's onchain stake
newAppStake, err := tlmCtx.Application.Stake.SafeSub(tlmCtx.SettlementCoin)
// DEV_NOTE: This should never occur because:
// 1. Application overservicing SHOULD be mitigated by the protocol.
diff --git a/x/tokenomics/token_logic_module/settlement_results.go b/x/tokenomics/token_logic_module/settlement_results.go
index ffc71ff5e..81f7484fd 100644
--- a/x/tokenomics/token_logic_module/settlement_results.go
+++ b/x/tokenomics/token_logic_module/settlement_results.go
@@ -97,7 +97,7 @@ func (rs ClaimSettlementResults) GetServiceIds() (serviceIds []string) {
// GetRelaysPerServiceMap returns a map of service IDs to the total number of relays
// claimed for that service in the combined results.
-// IMPORTANT: **DO NOT** directly iterate over returned map in on-chain code to avoid
+// IMPORTANT: **DO NOT** directly iterate over returned map in onchain code to avoid
// the possibility of introducing non-determinism. Instead, iterate over the service ID
// slice returned by OR a sorted slice of the service ID keys.
func (rs ClaimSettlementResults) GetRelaysPerServiceMap() (_ map[string]uint64, errs error) {
diff --git a/x/tokenomics/types/errors.go b/x/tokenomics/types/errors.go
index 642a2801c..fe433d7f6 100644
--- a/x/tokenomics/types/errors.go
+++ b/x/tokenomics/types/errors.go
@@ -6,7 +6,7 @@ import sdkerrors "cosmossdk.io/errors"
// x/tokenomics module sentinel errors
var (
- ErrTokenomicsInvalidSigner = sdkerrors.Register(ModuleName, 1100, "the provided authority address does not match the on-chain governance address")
+ ErrTokenomicsInvalidSigner = sdkerrors.Register(ModuleName, 1100, "the provided authority address does not match the onchain governance address")
ErrTokenomicsAddressInvalid = sdkerrors.Register(ModuleName, 1101, "the provided authority address is not a valid bech32 address")
ErrTokenomicsSessionHeaderNil = sdkerrors.Register(ModuleName, 1102, "provided claim's session header is nil")
ErrTokenomicsSessionHeaderInvalid = sdkerrors.Register(ModuleName, 1103, "provided claim's session header is invalid")
diff --git a/x/tokenomics/types/event.pb.go b/x/tokenomics/types/event.pb.go
index 5912e7206..3b486d01f 100644
--- a/x/tokenomics/types/event.pb.go
+++ b/x/tokenomics/types/event.pb.go
@@ -54,7 +54,7 @@ func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) {
}
// EventClaimExpired is an event emitted during settlement whenever a claim requiring
-// an on-chain proof doesn't have one. The claim cannot be settled, leading to that work
+// an onchain proof doesn't have one. The claim cannot be settled, leading to that work
// never being rewarded.
type EventClaimExpired struct {
Claim *types.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim"`
diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go
index c7c0de0c5..9d555f4e2 100644
--- a/x/tokenomics/types/expected_keepers.go
+++ b/x/tokenomics/types/expected_keepers.go
@@ -46,6 +46,7 @@ type ApplicationKeeper interface {
GetAllApplications(ctx context.Context) []apptypes.Application
UnbondApplication(ctx context.Context, app *apptypes.Application) error
EndBlockerUnbondApplications(ctx context.Context) error
+ GetParams(ctx context.Context) (params apptypes.Params)
}
type ProofKeeper interface {