From bb631a2c34bf426f7ca6ee0182dfbe979e0e8c3e Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 20 Jan 2025 13:37:27 -0500 Subject: [PATCH 01/24] [CLI] Filter Suppliers by ServiceID (#1028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Update the supplier query endpoint to use **AutoCLI** and add a flag to filter by `ServiceID` ### Primary Changes: - Add `service_id` filter to `list-suppliers` query to support filtering suppliers by service ID - Update CLI command from `list-supplier` to `list-suppliers` for improved clarity - Change revenue share percentage field type from `float32` to `uint32` to fix autoCLI issues ### Secondary changes: - Update documentation and config examples to reflect the new revenue share percentage type - Remove legacy CLI commands in favor of autocli implementation Screenshot 2025-01-16 at 12 58 06 PM Screenshot 2025-01-16 at 12 57 52 PM ## Type of change Select one or more from the following: - [x] New feature, functionality or library - [x] Consensus breaking; add the `consensus-breaking` label if so. See #791 for details - [ ] Bug fix - [ ] Code health or cleanup - [ ] Documentation - [ ] Other (specify) ## Testing - [x] **Documentation**: `make docusaurus_start`; only needed if you make doc changes - [x] **Unit Tests**: `make go_develop_and_test` - [ ] **LocalNet E2E Tests**: `make test_e2e` - [ ] **DevNet E2E Tests**: Add the `devnet-test-e2e` label to the PR. ## Sanity Checklist - [ ] I have tested my changes using the available tooling - [x] I have commented my code - [x] I have performed a self-review of my own code; both comments & source code - [x] I create and reference any new tickets, if applicable - [x] I have left TODOs throughout the codebase, if applicable --- .../run-e2e-test-job-template.yaml | 2 +- api/poktroll/application/types.pulsar.go | 34 +-- api/poktroll/shared/service.pulsar.go | 123 ++++----- api/poktroll/shared/supplier.pulsar.go | 35 +-- api/poktroll/supplier/query.pulsar.go | 240 +++++++++++++----- config.yml | 6 +- docusaurus/docs/README.md | 2 +- .../contributing/code_review_guidelines.md | 2 +- .../{quickstart.md => walkthrough.md} | 4 +- .../configs/supplier_staking_config.md | 4 +- .../quickstart/docker_compose_walkthrough.md | 4 +- .../operate/quickstart/gateway_cheatsheet.md | 2 +- .../operate/quickstart/service_cheatsheet.md | 2 +- .../operate/quickstart/supplier_cheatsheet.md | 4 +- .../run_a_node/full_node_walkthrough.md | 2 +- .../docs/operate/user_guide/check-balance.md | 2 +- .../operate/user_guide/create-new-wallet.md | 2 +- .../{install.md => poktrolld_cli.md} | 2 +- .../user_guide/recover-with-mnemonic.md | 2 +- .../docs/operate/user_guide/send-tokens.md | 2 +- e2e/tests/init_test.go | 28 +- e2e/tests/node.go | 13 +- e2e/tests/stake_supplier.feature | 36 +-- localnet/poktrolld/config/config.toml | 4 +- .../config/supplier1_stake_config.yaml | 4 +- .../config/supplier_stake_config_example.yaml | 4 +- makefiles/suppliers.mk | 10 +- makefiles/testnet.mk | 2 +- proto/poktroll/application/types.proto | 40 +-- proto/poktroll/shared/service.proto | 6 +- proto/poktroll/shared/supplier.proto | 40 +-- proto/poktroll/supplier/query.proto | 5 + testutil/integration/app.go | 2 +- testutil/keeper/tokenomics.go | 2 +- x/application/types/types.pb.go | 34 +-- x/proof/keeper/query_proof.go | 5 +- x/proof/types/query_validation.go | 1 + x/service/keeper/query_service.go | 3 +- x/service/keeper/query_service_test.go | 4 +- x/shared/types/service.pb.go | 119 +++++---- x/shared/types/service_configs.go | 6 +- x/shared/types/supplier.pb.go | 33 +-- x/supplier/config/supplier_configs_reader.go | 6 +- .../config/supplier_configs_reader_test.go | 26 +- x/supplier/keeper/query_supplier.go | 35 ++- x/supplier/keeper/query_supplier_test.go | 64 ++++- x/supplier/keeper/supplier_test.go | 167 +++++++++++- x/supplier/module/autocli.go | 64 +++-- x/supplier/module/flags.go | 5 + x/supplier/module/query.go | 2 - x/supplier/module/query_supplier.go | 78 ------ x/supplier/module/query_supplier_test.go | 140 ---------- x/supplier/types/errors.go | 1 + x/supplier/types/query.pb.go | 180 ++++++++++--- x/supplier/types/query_validation.go | 44 ++++ .../keeper/token_logic_modules_test.go | 12 +- .../token_logic_module/distribution.go | 2 +- 57 files changed, 1039 insertions(+), 664 deletions(-) rename docusaurus/docs/develop/developer_guide/{quickstart.md => walkthrough.md} (99%) rename docusaurus/docs/operate/user_guide/{install.md => poktrolld_cli.md} (98%) create mode 100644 x/supplier/module/flags.go delete mode 100644 x/supplier/module/query_supplier.go delete mode 100644 x/supplier/module/query_supplier_test.go create mode 100644 x/supplier/types/query_validation.go diff --git a/.github/workflows-helpers/run-e2e-test-job-template.yaml b/.github/workflows-helpers/run-e2e-test-job-template.yaml index 21d3f0e1a..ca03ca867 100644 --- a/.github/workflows-helpers/run-e2e-test-job-template.yaml +++ b/.github/workflows-helpers/run-e2e-test-job-template.yaml @@ -37,7 +37,7 @@ spec: ls -l /root/.poktroll/keyring-test/ && \ poktrolld q gateway list-gateway --node=$POCKET_NODE && \ poktrolld q application list-application --node=$POCKET_NODE && \ - poktrolld q supplier list-supplier --node=$POCKET_NODE && \ + poktrolld q supplier list-suppliers --node=$POCKET_NODE && \ make acc_initialize_pubkeys && \ go test -v ./e2e/tests/... -tags=e2e env: diff --git a/api/poktroll/application/types.pulsar.go b/api/poktroll/application/types.pulsar.go index 78bcffe29..21eb04ac4 100644 --- a/api/poktroll/application/types.pulsar.go +++ b/api/poktroll/application/types.pulsar.go @@ -2182,35 +2182,37 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Application defines the type used to store an onchain definition and state for an application +// Application represents the on-chain definition and state of an application type Application struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the application. - Stake *v1beta1.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"` // The total amount of uPOKT the application has staked - // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE. + // Bech32 address of the application + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Total amount of staked uPOKT + Stake *v1beta1.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"` + // CRITICAL: Must contain EXACTLY ONE service config // This prevents applications from over-servicing. - // The field is kept repeated (a list) for both legacy and future logic reaosns. - // References: + // Kept as repeated field for legacy and future compatibility + // Refs: // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033 // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 - ServiceConfigs []*shared.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"` // The list of services this appliccation is configured to request service for + ServiceConfigs []*shared.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"` // TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`. // Ensure to rename all relevant configs, comments, variables, function names, etc as well. - DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"` // The Bech32 encoded addresses for all delegatee Gateways, in a non-nullable slice - // A map from sessionEndHeights to a list of Gateways. - // The key is the height of the last block of the session during which the - // respective undelegation was committed. - // The value is a list of gateways being undelegated from. + // Non-nullable list of Bech32 encoded delegatee Gateway addresses + DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"` + // Mapping of session end heights to gateways being undelegated from + // - Key: Height of the last block of the session when undelegation tx was committed + // - Value: List of gateways being undelegated from // TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment // so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. PendingUndelegations map[uint64]*UndelegatingGatewayList `protobuf:"bytes,5,rep,name=pending_undelegations,json=pendingUndelegations,proto3" json:"pending_undelegations,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The end height of the session at which an application initiated its unstaking process. - // If the application did not unstake, this value will be 0. - UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` - PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"` + // Session end height when application initiated unstaking (0 if not unstaking) + UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` + // Information about pending application transfers + PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"` } func (x *Application) Reset() { diff --git a/api/poktroll/shared/service.pulsar.go b/api/poktroll/shared/service.pulsar.go index 92dcfb12b..a923eda0b 100644 --- a/api/poktroll/shared/service.pulsar.go +++ b/api/poktroll/shared/service.pulsar.go @@ -2,7 +2,6 @@ package shared import ( - binary "encoding/binary" fmt "fmt" _ "github.com/cosmos/cosmos-proto" runtime "github.com/cosmos/cosmos-proto/runtime" @@ -11,7 +10,6 @@ import ( protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" - math "math" reflect "reflect" sync "sync" ) @@ -2418,8 +2416,8 @@ func (x *fastReflection_ServiceRevenueShare) Range(f func(protoreflect.FieldDesc return } } - if x.RevSharePercentage != float32(0) || math.Signbit(float64(x.RevSharePercentage)) { - value := protoreflect.ValueOfFloat32(x.RevSharePercentage) + if x.RevSharePercentage != uint64(0) { + value := protoreflect.ValueOfUint64(x.RevSharePercentage) if !f(fd_ServiceRevenueShare_rev_share_percentage, value) { return } @@ -2442,7 +2440,7 @@ func (x *fastReflection_ServiceRevenueShare) Has(fd protoreflect.FieldDescriptor case "poktroll.shared.ServiceRevenueShare.address": return x.Address != "" case "poktroll.shared.ServiceRevenueShare.rev_share_percentage": - return x.RevSharePercentage != float32(0) || math.Signbit(float64(x.RevSharePercentage)) + return x.RevSharePercentage != uint64(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare")) @@ -2462,7 +2460,7 @@ func (x *fastReflection_ServiceRevenueShare) Clear(fd protoreflect.FieldDescript case "poktroll.shared.ServiceRevenueShare.address": x.Address = "" case "poktroll.shared.ServiceRevenueShare.rev_share_percentage": - x.RevSharePercentage = float32(0) + x.RevSharePercentage = uint64(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare")) @@ -2484,7 +2482,7 @@ func (x *fastReflection_ServiceRevenueShare) Get(descriptor protoreflect.FieldDe return protoreflect.ValueOfString(value) case "poktroll.shared.ServiceRevenueShare.rev_share_percentage": value := x.RevSharePercentage - return protoreflect.ValueOfFloat32(value) + return protoreflect.ValueOfUint64(value) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare")) @@ -2508,7 +2506,7 @@ func (x *fastReflection_ServiceRevenueShare) Set(fd protoreflect.FieldDescriptor case "poktroll.shared.ServiceRevenueShare.address": x.Address = value.Interface().(string) case "poktroll.shared.ServiceRevenueShare.rev_share_percentage": - x.RevSharePercentage = float32(value.Float()) + x.RevSharePercentage = value.Uint() default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare")) @@ -2549,7 +2547,7 @@ func (x *fastReflection_ServiceRevenueShare) NewField(fd protoreflect.FieldDescr case "poktroll.shared.ServiceRevenueShare.address": return protoreflect.ValueOfString("") case "poktroll.shared.ServiceRevenueShare.rev_share_percentage": - return protoreflect.ValueOfFloat32(float32(0)) + return protoreflect.ValueOfUint64(uint64(0)) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.shared.ServiceRevenueShare")) @@ -2623,8 +2621,8 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } - if x.RevSharePercentage != 0 || math.Signbit(float64(x.RevSharePercentage)) { - n += 5 + if x.RevSharePercentage != 0 { + n += 1 + runtime.Sov(uint64(x.RevSharePercentage)) } if x.unknownFields != nil { n += len(x.unknownFields) @@ -2655,11 +2653,10 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } - if x.RevSharePercentage != 0 || math.Signbit(float64(x.RevSharePercentage)) { - i -= 4 - binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(x.RevSharePercentage)))) + if x.RevSharePercentage != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.RevSharePercentage)) i-- - dAtA[i] = 0x15 + dAtA[i] = 0x18 } if len(x.Address) > 0 { i -= len(x.Address) @@ -2749,17 +2746,25 @@ func (x *fastReflection_ServiceRevenueShare) ProtoMethods() *protoiface.Methods } x.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 5 { + case 3: + if wireType != 0 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RevSharePercentage", wireType) } - var v uint32 - if (iNdEx + 4) > l { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + x.RevSharePercentage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.RevSharePercentage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - x.RevSharePercentage = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -3598,8 +3603,8 @@ type ServiceRevenueShare struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the revenue share recipient - RevSharePercentage float32 `protobuf:"fixed32,2,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` // The percentage of revenue share the recipient will receive + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The Bech32 address of the revenue share recipient + RevSharePercentage uint64 `protobuf:"varint,3,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` // The percentage of revenue share the recipient will receive } func (x *ServiceRevenueShare) Reset() { @@ -3629,7 +3634,7 @@ func (x *ServiceRevenueShare) GetAddress() string { return "" } -func (x *ServiceRevenueShare) GetRevSharePercentage() float32 { +func (x *ServiceRevenueShare) GetRevSharePercentage() uint64 { if x != nil { return x.RevSharePercentage } @@ -3724,39 +3729,39 @@ var file_poktroll_shared_service_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x22, 0x7b, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x76, 0x65, 0x6e, 0x75, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, - 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x30, 0x0a, 0x14, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, - 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x72, - 0x65, 0x76, 0x53, 0x68, 0x61, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, - 0x65, 0x22, 0x56, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x30, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, - 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, - 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, 0x4b, 0x0a, 0x07, 0x52, 0x50, 0x43, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, - 0x52, 0x50, 0x43, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, - 0x0d, 0x0a, 0x09, 0x57, 0x45, 0x42, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0c, - 0x0a, 0x08, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x52, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x30, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, - 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x42, 0xa6, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, - 0x61, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x20, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, - 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, - 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa, 0x02, 0x0f, 0x50, - 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0xca, 0x02, - 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, - 0xe2, 0x02, 0x1b, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72, - 0x65, 0x64, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x10, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x53, 0x68, 0x61, 0x72, 0x65, - 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x66, 0x69, 0x67, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x76, 0x65, 0x6e, 0x75, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x12, 0x32, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, + 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x70, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, + 0x72, 0x65, 0x76, 0x53, 0x68, 0x61, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x56, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2a, 0x4b, 0x0a, 0x07, 0x52, 0x50, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x57, 0x45, 0x42, 0x53, 0x4f, 0x43, + 0x4b, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x50, + 0x43, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x30, 0x0a, + 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, + 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x42, + 0xa6, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x20, 0x63, 0x6f, 0x73, + 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0xa2, 0x02, 0x03, + 0x50, 0x53, 0x58, 0xaa, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x65, 0x64, 0xca, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0xe2, 0x02, 0x1b, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x5c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x3a, 0x3a, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/api/poktroll/shared/supplier.pulsar.go b/api/poktroll/shared/supplier.pulsar.go index be0da6a07..8ac084889 100644 --- a/api/poktroll/shared/supplier.pulsar.go +++ b/api/poktroll/shared/supplier.pulsar.go @@ -1066,29 +1066,30 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Supplier is the type defining the actor in Pocket Network that provides RPC services. +// Supplier represents an actor in Pocket Network that provides RPC services type Supplier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The address of the owner (i.e. staker, custodial) that owns the funds for staking. - // By default, this address is the one that receives all the rewards unless owtherwise specified. - // This property cannot be updated by the operator. - OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"` // Bech32 cosmos address - // The operator address of the supplier operator (i.e. the one managing the offchain server). - // The operator address can update the supplier's configurations excluding the owner address. - // This property does not change over the supplier's lifespan, the supplier must be unstaked - // and re-staked to effectively update this value. - OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` // Bech32 cosmos address - Stake *v1beta1.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"` // The total amount of uPOKT the supplier has staked - Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"` // The service configs this supplier can support - // The session end height at which an actively unbonding supplier unbonds its stake. - // If the supplier did not unstake, this value will be 0. + // Owner address that controls the staked funds and receives rewards by default + // Cannot be updated by the operator + OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"` + // Operator address managing the offchain server + // Immutable for supplier's lifespan - requires unstake/re-stake to change. + // Can update supplier configs except for owner address. + OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` + // Total amount of staked uPOKT + Stake *v1beta1.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"` + // List of service configurations supported by this supplier + Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"` + // Session end height when supplier initiated unstaking (0 if not unstaking) UnstakeSessionEndHeight uint64 `protobuf:"varint,5,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` - // services_activation_heights_map is a map of serviceIds to the height at - // which the staked supplier will become active for that service. - // Activation heights are session start heights. + // Mapping of serviceIds to their activation heights + // - Key: serviceId + // - Value: Session start height when supplier becomes active for the service + // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf + // because maps are no longer supported for serialized types in the CosmoSDK. ServicesActivationHeightsMap map[string]uint64 `protobuf:"bytes,6,rep,name=services_activation_heights_map,json=servicesActivationHeightsMap,proto3" json:"services_activation_heights_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } diff --git a/api/poktroll/supplier/query.pulsar.go b/api/poktroll/supplier/query.pulsar.go index d1f087c62..392f4aa39 100644 --- a/api/poktroll/supplier/query.pulsar.go +++ b/api/poktroll/supplier/query.pulsar.go @@ -1668,12 +1668,14 @@ func (x *fastReflection_QueryGetSupplierResponse) ProtoMethods() *protoiface.Met var ( md_QueryAllSuppliersRequest protoreflect.MessageDescriptor fd_QueryAllSuppliersRequest_pagination protoreflect.FieldDescriptor + fd_QueryAllSuppliersRequest_service_id protoreflect.FieldDescriptor ) func init() { file_poktroll_supplier_query_proto_init() md_QueryAllSuppliersRequest = File_poktroll_supplier_query_proto.Messages().ByName("QueryAllSuppliersRequest") fd_QueryAllSuppliersRequest_pagination = md_QueryAllSuppliersRequest.Fields().ByName("pagination") + fd_QueryAllSuppliersRequest_service_id = md_QueryAllSuppliersRequest.Fields().ByName("service_id") } var _ protoreflect.Message = (*fastReflection_QueryAllSuppliersRequest)(nil) @@ -1747,6 +1749,16 @@ func (x *fastReflection_QueryAllSuppliersRequest) Range(f func(protoreflect.Fiel return } } + if x.Filter != nil { + switch o := x.Filter.(type) { + case *QueryAllSuppliersRequest_ServiceId: + v := o.ServiceId + value := protoreflect.ValueOfString(v) + if !f(fd_QueryAllSuppliersRequest_service_id, value) { + return + } + } + } } // Has reports whether a field is populated. @@ -1764,6 +1776,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) Has(fd protoreflect.FieldDescr switch fd.FullName() { case "poktroll.supplier.QueryAllSuppliersRequest.pagination": return x.Pagination != nil + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + if x.Filter == nil { + return false + } else if _, ok := x.Filter.(*QueryAllSuppliersRequest_ServiceId); ok { + return true + } else { + return false + } default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1782,6 +1802,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) Clear(fd protoreflect.FieldDes switch fd.FullName() { case "poktroll.supplier.QueryAllSuppliersRequest.pagination": x.Pagination = nil + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + x.Filter = nil default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1801,6 +1823,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) Get(descriptor protoreflect.Fi case "poktroll.supplier.QueryAllSuppliersRequest.pagination": value := x.Pagination return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + if x.Filter == nil { + return protoreflect.ValueOfString("") + } else if v, ok := x.Filter.(*QueryAllSuppliersRequest_ServiceId); ok { + return protoreflect.ValueOfString(v.ServiceId) + } else { + return protoreflect.ValueOfString("") + } default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1823,6 +1853,9 @@ func (x *fastReflection_QueryAllSuppliersRequest) Set(fd protoreflect.FieldDescr switch fd.FullName() { case "poktroll.supplier.QueryAllSuppliersRequest.pagination": x.Pagination = value.Message().Interface().(*v1beta1.PageRequest) + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + cv := value.Interface().(string) + x.Filter = &QueryAllSuppliersRequest_ServiceId{ServiceId: cv} default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1848,6 +1881,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) Mutable(fd protoreflect.FieldD x.Pagination = new(v1beta1.PageRequest) } return protoreflect.ValueOfMessage(x.Pagination.ProtoReflect()) + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + panic(fmt.Errorf("field service_id of message poktroll.supplier.QueryAllSuppliersRequest is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1864,6 +1899,8 @@ func (x *fastReflection_QueryAllSuppliersRequest) NewField(fd protoreflect.Field case "poktroll.supplier.QueryAllSuppliersRequest.pagination": m := new(v1beta1.PageRequest) return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "poktroll.supplier.QueryAllSuppliersRequest.service_id": + return protoreflect.ValueOfString("") default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.supplier.QueryAllSuppliersRequest")) @@ -1877,6 +1914,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) NewField(fd protoreflect.Field // It panics if the oneof descriptor does not belong to this message. func (x *fastReflection_QueryAllSuppliersRequest) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { switch d.FullName() { + case "poktroll.supplier.QueryAllSuppliersRequest.filter": + if x.Filter == nil { + return nil + } + switch x.Filter.(type) { + case *QueryAllSuppliersRequest_ServiceId: + return x.Descriptor().Fields().ByName("service_id") + } default: panic(fmt.Errorf("%s is not a oneof field in poktroll.supplier.QueryAllSuppliersRequest", d.FullName())) } @@ -1937,6 +1982,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met l = options.Size(x.Pagination) n += 1 + l + runtime.Sov(uint64(l)) } + switch x := x.Filter.(type) { + case *QueryAllSuppliersRequest_ServiceId: + if x == nil { + break + } + l = len(x.ServiceId) + n += 1 + l + runtime.Sov(uint64(l)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -1966,6 +2019,14 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + switch x := x.Filter.(type) { + case *QueryAllSuppliersRequest_ServiceId: + i -= len(x.ServiceId) + copy(dAtA[i:], x.ServiceId) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.ServiceId))) + i-- + dAtA[i] = 0x12 + } if x.Pagination != nil { encoded, err := options.Marshal(x.Pagination) if err != nil { @@ -2065,6 +2126,38 @@ func (x *fastReflection_QueryAllSuppliersRequest) ProtoMethods() *protoiface.Met return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err } iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Filter = &QueryAllSuppliersRequest_ServiceId{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -2755,7 +2848,7 @@ type QueryGetSupplierRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - OperatorAddress string `protobuf:"bytes,1,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` + OperatorAddress string `protobuf:"bytes,1,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` // TODO_TECHDEBT: Add the ability to query for a supplier by owner_id } func (x *QueryGetSupplierRequest) Reset() { @@ -2826,6 +2919,10 @@ type QueryAllSuppliersRequest struct { unknownFields protoimpl.UnknownFields Pagination *v1beta1.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // Types that are assignable to Filter: + // + // *QueryAllSuppliersRequest_ServiceId + Filter isQueryAllSuppliersRequest_Filter `protobuf_oneof:"filter"` } func (x *QueryAllSuppliersRequest) Reset() { @@ -2855,6 +2952,30 @@ func (x *QueryAllSuppliersRequest) GetPagination() *v1beta1.PageRequest { return nil } +func (x *QueryAllSuppliersRequest) GetFilter() isQueryAllSuppliersRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *QueryAllSuppliersRequest) GetServiceId() string { + if x, ok := x.GetFilter().(*QueryAllSuppliersRequest_ServiceId); ok { + return x.ServiceId + } + return "" +} + +type isQueryAllSuppliersRequest_Filter interface { + isQueryAllSuppliersRequest_Filter() +} + +type QueryAllSuppliersRequest_ServiceId struct { + ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3,oneof"` // unique service identifier to filter by +} + +func (*QueryAllSuppliersRequest_ServiceId) isQueryAllSuppliersRequest_Filter() {} + type QueryAllSuppliersResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2937,65 +3058,67 @@ var file_poktroll_supplier_query_proto_rawDesc = []byte{ 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, - 0x6c, 0x69, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c, - 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, - 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, 0x61, - 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa1, 0x01, 0x0a, 0x19, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, - 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, - 0x69, 0x65, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, - 0x69, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, - 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0xda, 0x03, 0x0a, - 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, - 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, - 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0xa8, 0x01, 0x0a, 0x08, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x2a, 0x2e, + 0x6c, 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, + 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x70, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x19, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, + 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, + 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, + 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, + 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x70, 0x61, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0xda, 0x03, 0x0a, 0x05, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74, + 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, + 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0xa8, 0x01, 0x0a, + 0x08, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x12, 0x3b, - 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, - 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, - 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x7d, 0x12, 0x9b, 0x01, 0x0a, 0x0c, - 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x2e, 0x70, - 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, - 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0xb0, 0x01, 0xd8, 0xe2, 0x1e, 0x01, - 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, - 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, - 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa, - 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, - 0x69, 0x65, 0x72, 0xca, 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, - 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xe2, 0x02, 0x1d, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x5c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x3a, 0x3a, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x47, + 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x12, 0x3b, 0x2f, 0x70, 0x6f, 0x6b, + 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x73, 0x75, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x7d, 0x12, 0x9b, 0x01, 0x0a, 0x0c, 0x41, 0x6c, 0x6c, 0x53, + 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x41, + 0x6c, 0x6c, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x2f, 0x70, 0x6f, + 0x6b, 0x74, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x2f, 0x73, 0x75, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0x42, 0xb0, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x15, 0x63, 0x6f, + 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x73, 0x75, 0x70, 0x70, 0x6c, + 0x69, 0x65, 0x72, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x73, 0x75, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, 0xaa, 0x02, 0x11, 0x50, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0xca, + 0x02, 0x11, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, 0x75, 0x70, 0x70, 0x6c, + 0x69, 0x65, 0x72, 0xe2, 0x02, 0x1d, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x53, + 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3122,6 +3245,9 @@ func file_poktroll_supplier_query_proto_init() { } } } + file_poktroll_supplier_query_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*QueryAllSuppliersRequest_ServiceId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/config.yml b/config.yml index 5be451816..cbd3fbe25 100644 --- a/config.yml +++ b/config.yml @@ -229,7 +229,7 @@ genesis: url: http://relayminer1:8545 rev_share: - address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj - rev_share_percentage: "100" + rev_share_percentage: 100 - service_id: rest endpoints: - configs: [] @@ -237,7 +237,7 @@ genesis: url: http://relayminer1:8545 rev_share: - address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj - rev_share_percentage: "100" + rev_share_percentage: 100 - service_id: ollama endpoints: - configs: [] @@ -245,7 +245,7 @@ genesis: url: http://relayminer1:8545 rev_share: - address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj - rev_share_percentage: "100" + rev_share_percentage: 100 stake: # NB: This value should be exactly 1upokt smaller than the value in # `supplier1_stake_config.yaml` so that the stake command causes a state change. diff --git a/docusaurus/docs/README.md b/docusaurus/docs/README.md index f226dc1d4..1de7ee4d7 100644 --- a/docusaurus/docs/README.md +++ b/docusaurus/docs/README.md @@ -57,7 +57,7 @@ You can view the Shannon Roadmap on [Github](https://github.com/orgs/pokt-networ ## Quickstart -The best way to get involved is by following the [quickstart instructions](./develop/developer_guide/quickstart.md). +The best way to get involved is by following the [quickstart instructions](develop/developer_guide/walkthrough.md). ## Godoc diff --git a/docusaurus/docs/develop/contributing/code_review_guidelines.md b/docusaurus/docs/develop/contributing/code_review_guidelines.md index 287b5d5d7..41f892a97 100644 --- a/docusaurus/docs/develop/contributing/code_review_guidelines.md +++ b/docusaurus/docs/develop/contributing/code_review_guidelines.md @@ -3,7 +3,7 @@ sidebar_position: 3 title: Code Review Guidelines --- -# Code Review Guidelines +## Code Review Guidelines :::note This is a living document and will be updated as the ecosystem matures & grows. diff --git a/docusaurus/docs/develop/developer_guide/quickstart.md b/docusaurus/docs/develop/developer_guide/walkthrough.md similarity index 99% rename from docusaurus/docs/develop/developer_guide/quickstart.md rename to docusaurus/docs/develop/developer_guide/walkthrough.md index a899fba64..da5f30ed3 100644 --- a/docusaurus/docs/develop/developer_guide/quickstart.md +++ b/docusaurus/docs/develop/developer_guide/walkthrough.md @@ -1,11 +1,11 @@ --- sidebar_position: 1 -title: Quickstart +title: Walkthrough --- import ReactPlayer from "react-player"; -# Quickstart +## Walkthrough :::info The goal of this document is to get you up and running with a LocalNet, some diff --git a/docusaurus/docs/operate/configs/supplier_staking_config.md b/docusaurus/docs/operate/configs/supplier_staking_config.md index 169221639..41ba22758 100644 --- a/docusaurus/docs/operate/configs/supplier_staking_config.md +++ b/docusaurus/docs/operate/configs/supplier_staking_config.md @@ -214,8 +214,8 @@ _`Optional`_, _`Non-empty`_ ```yaml default_rev_share_percent: - : - : + : + : ``` `default_rev_share_percent` is an optional map that defines the default the revenue diff --git a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md index 40e008fd1..6123e5ce8 100644 --- a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md +++ b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md @@ -132,7 +132,7 @@ Make sure to replace `olshansky` with your username. You can generally do everything as the `root` user, but it's recommended to create a new user and give it sudo permissions. -This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/install.md). +This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/poktrolld_cli.md). ```bash adduser poktroll @@ -190,7 +190,7 @@ sed -i -e s/NODE_HOSTNAME=/NODE_HOSTNAME=69.42.690.420/g .env You can generally do everything as the `root` user, but it's recommended to create a new user and give it sudo permissions. -This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/install.md). +This is necessary, in particular, if you want to use [homebrew](https://brew.sh/) [to install `poktrolld`](../user_guide/poktrolld_cli.md). You can create a new user (e.g. poktroll), provide sudo permissions and switch users like so: diff --git a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md index 741966d13..861c2b06a 100644 --- a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md +++ b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md @@ -38,7 +38,7 @@ streamline development and reduce friction for any new potential contributor. ## Pre-Requisites -1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md). +1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md). 2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md). :::warning diff --git a/docusaurus/docs/operate/quickstart/service_cheatsheet.md b/docusaurus/docs/operate/quickstart/service_cheatsheet.md index 4e3304db5..eda5d4af5 100644 --- a/docusaurus/docs/operate/quickstart/service_cheatsheet.md +++ b/docusaurus/docs/operate/quickstart/service_cheatsheet.md @@ -14,7 +14,7 @@ title: Service Cheat Sheet ### Pre-Requisites -1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md). +1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md). 2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md). ### How do I query for all existing onchain Services? diff --git a/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md b/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md index 1bc9eed64..32446be7a 100644 --- a/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md +++ b/docusaurus/docs/operate/quickstart/supplier_cheatsheet.md @@ -39,7 +39,7 @@ streamline development and reduce friction for any new potential contributor. ## Pre-Requisites -1. Make sure to [install the `poktrolld` CLI](../user_guide/install.md). +1. Make sure to [install the `poktrolld` CLI](../user_guide/poktrolld_cli.md). 2. Make sure you know how to [create and fund a new account](../user_guide/create-new-wallet.md). 3. You have either [staked a new `service` or found an existing one](./service_cheatsheet.md). 4. `[Optional]` You can run things locally or have dedicated long-running hardware. See the [Docker Compose Cheat Sheet](./docker_compose_debian_cheatsheet#deploy-your-server) if you're interested in the latter. @@ -277,5 +277,5 @@ poktrolld query supplier -h Then, you can query for all services like so: ```bash -poktrolld query supplier list-supplier --node https://shannon-testnet-grove-rpc.beta.poktroll.com --output json | jq +poktrolld query supplier list-suppliers --node https://shannon-testnet-grove-rpc.beta.poktroll.com --output json | jq ``` diff --git a/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md b/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md index 2a19269fb..44ab1e221 100644 --- a/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md +++ b/docusaurus/docs/operate/run_a_node/full_node_walkthrough.md @@ -130,7 +130,7 @@ source ~/.profile ### 5. Install `poktrolld` -Follow the instructions in the [CLI Installation Guide](../user_guide/install.md) page to install `poktrolld`. +Follow the instructions in the [CLI Installation Guide](../user_guide/poktrolld_cli.md) page to install `poktrolld`. Create a symlink of the binary so Comosvisor knows where to find it: diff --git a/docusaurus/docs/operate/user_guide/check-balance.md b/docusaurus/docs/operate/user_guide/check-balance.md index 98557295a..60897ca22 100644 --- a/docusaurus/docs/operate/user_guide/check-balance.md +++ b/docusaurus/docs/operate/user_guide/check-balance.md @@ -26,7 +26,7 @@ balance using the `poktrolld` command-line interface (CLI). ## Pre-requisites -1. `poktrolld` is installed on your system; see the [installation guide](./install) for more details +1. `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details 2. You have the address of the wallet you wish to check 3. You know the token denomination you wish to check; `upokt` for POKT tokens diff --git a/docusaurus/docs/operate/user_guide/create-new-wallet.md b/docusaurus/docs/operate/user_guide/create-new-wallet.md index 8df204d14..d5444dce7 100644 --- a/docusaurus/docs/operate/user_guide/create-new-wallet.md +++ b/docusaurus/docs/operate/user_guide/create-new-wallet.md @@ -51,7 +51,7 @@ refer to the [Cosmos SDK Keyring documentation](https://docs.cosmos.network/main Ensure you have `poktrolld` installed on your system. -Follow the [installation guide](./install) specific to your operating system. +Follow the [installation guide](./poktrolld_cli.md) specific to your operating system. ## Step 2: Creating the Wallet diff --git a/docusaurus/docs/operate/user_guide/install.md b/docusaurus/docs/operate/user_guide/poktrolld_cli.md similarity index 98% rename from docusaurus/docs/operate/user_guide/install.md rename to docusaurus/docs/operate/user_guide/poktrolld_cli.md index dab4f98b2..9ca5431b9 100644 --- a/docusaurus/docs/operate/user_guide/install.md +++ b/docusaurus/docs/operate/user_guide/poktrolld_cli.md @@ -1,5 +1,5 @@ --- -title: CLI Installation +title: poktrolld CLI Installation sidebar_position: 0 --- diff --git a/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md b/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md index ca561ec80..6343667f9 100644 --- a/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md +++ b/docusaurus/docs/operate/user_guide/recover-with-mnemonic.md @@ -24,7 +24,7 @@ seed phrase, recovering your account is straightforward! ## Pre-requisites - You have the mnemonic seed phrase of the wallet you wish to recover -- `poktrolld` is installed on your system; see the [installation guide](./install) for more details +- `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details ## Step 1: Prepare to Recover Your Wallet diff --git a/docusaurus/docs/operate/user_guide/send-tokens.md b/docusaurus/docs/operate/user_guide/send-tokens.md index dbff2914d..6d3c5676c 100644 --- a/docusaurus/docs/operate/user_guide/send-tokens.md +++ b/docusaurus/docs/operate/user_guide/send-tokens.md @@ -17,7 +17,7 @@ Pocket Network using the `poktrolld` command-line interface (CLI). ## Pre-requisites -1. `poktrolld` is installed on your system; see the [installation guide](./install) for more details +1. `poktrolld` is installed on your system; see the [installation guide](./poktrolld_cli.md) for more details 2. You have access to your wallet with sufficient tokens for the transaction and fees 3. You have the recipient's address diff --git a/e2e/tests/init_test.go b/e2e/tests/init_test.go index 680806c87..08804373d 100644 --- a/e2e/tests/init_test.go +++ b/e2e/tests/init_test.go @@ -171,7 +171,7 @@ func (s *suite) ThePocketdBinaryShouldExitWithoutError() { func (s *suite) TheUserRunsTheCommand(cmd string) { cmds := strings.Split(cmd, " ") res, err := s.pocketd.RunCommand(cmds...) - require.NoError(s, err, "error running command %s", cmd) + require.NoError(s, err, "error running command %s due to: %v", cmd, err) s.pocketd.result = res } @@ -192,7 +192,7 @@ func (s *suite) TheUserSendsUpoktFromAccountToAccount(amount int64, accName1, ac "-y", } res, err := s.pocketd.RunCommandOnHost("", args...) - require.NoError(s, err, "error sending upokt from %q to %q", accName1, accName2) + require.NoError(s, err, "error sending upokt from %q to %q due to: %v", accName1, accName2, err) s.pocketd.result = res } @@ -267,6 +267,7 @@ func (s *suite) TheUserStakesAWithUpoktFromTheAccount(actorType string, amount i "-y", } res, err := s.pocketd.RunCommandOnHost("", args...) + require.NoError(s, err, "error staking %s due to: %v", actorType, err) // Remove the temporary config file err = os.Remove(configFile.Name()) @@ -301,7 +302,7 @@ func (s *suite) TheUserStakesAWithUpoktForServiceFromTheAccount(actorType string "-y", } res, err := s.pocketd.RunCommandOnHost("", args...) - require.NoError(s, err, "error staking %s for service %s", actorType, serviceId) + require.NoError(s, err, "error staking %s for service %s due to: %v", actorType, serviceId, err) // Remove the temporary config file err = os.Remove(configFile.Name()) @@ -372,7 +373,7 @@ func (s *suite) TheUserUnstakesAFromTheAccount(actorType string, accName string) } res, err := s.pocketd.RunCommandOnHost("", args...) - require.NoError(s, err, "error unstaking %s", actorType) + require.NoError(s, err, "error unstaking %s due to: %v", actorType, err) // Get current balance balanceKey := accBalanceKey(accName) @@ -463,7 +464,7 @@ func (s *suite) TheApplicationSendsTheSupplierASuccessfulRequestForServiceWithPa appAddr := accNameToAddrMap[appName] res, err := s.pocketd.RunCurlWithRetry(pathUrl, serviceId, method, path, appAddr, requestData, 5) - require.NoError(s, err, "error sending relay request from app %q to supplier %q for service %q", appName, supplierOperatorName, serviceId) + require.NoError(s, err, "error sending relay request from app %q to supplier %q for service %q due to: %v", appName, supplierOperatorName, serviceId, err) var jsonContent json.RawMessage err = json.Unmarshal([]byte(res.Stdout), &jsonContent) @@ -569,11 +570,22 @@ func (s *suite) TheUserWaitsForTheApplicationForAccountPeriodToFinish(accName, p func (s *suite) getStakedAmount(actorType, accName string) (int, bool) { s.Helper() + + listCommand := fmt.Sprintf("list-%s", actorType) + // TODO_TECHDEBT(@olshansky): As of #1028, we started migrating some parts + // of the CLI to use AutoCLI which made list commands pluralized. + // E.g. "list-suppliers" instead of "list-supplier". + // Over time, all actor commands will be updated like so and this if can + // be removed. + if actorType == suppliertypes.ModuleName { + listCommand = fmt.Sprintf("%ss", listCommand) + } args := []string{ "query", actorType, - fmt.Sprintf("list-%s", actorType), + listCommand, } + res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, args...) require.NoError(s, err, "error getting %s", actorType) s.pocketd.result = res @@ -662,7 +674,7 @@ func (s *suite) buildSupplierMap() { argsAndFlags := []string{ "query", "supplier", - "list-supplier", + "list-suppliers", fmt.Sprintf("--%s=json", cometcli.OutputFlag), } res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, argsAndFlags...) @@ -752,7 +764,7 @@ func (s *suite) getSupplierInfo(supplierOperatorName string) *sharedtypes.Suppli } res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, args...) - require.NoError(s, err, "error getting supplier %s", supplierOperatorAddr) + require.NoError(s, err, "error getting supplier %s due to error: %v", supplierOperatorAddr, err) s.pocketd.result = res var resp suppliertypes.QueryGetSupplierResponse diff --git a/e2e/tests/node.go b/e2e/tests/node.go index 8d0f8710d..6620c995e 100644 --- a/e2e/tests/node.go +++ b/e2e/tests/node.go @@ -93,7 +93,18 @@ func (p *pocketdBin) RunCommandOnHostWithRetry(rpcUrl string, numRetries uint8, if err == nil { return res, nil } - // TODO_HACK: Figure out a better solution for retries. A parameter? Exponential backoff? What else? + // DEV_NOTE: Intentionally keeping a print statement here so errors are + // very visible even though the output may be noisy. + fmt.Printf(` +---------------------------------------- +Retrying command due to error: + - RPC URL: %s + - Arguments: %v + - Response: %v + - Error: %v +---------------------------------------- +`, rpcUrl, args, res, err) + // TODO_TECHDEBT(@bryanchriswhite): Figure out a better solution for retries. A parameter? Exponential backoff? What else? time.Sleep(5 * time.Second) return p.RunCommandOnHostWithRetry(rpcUrl, numRetries-1, args...) } diff --git a/e2e/tests/stake_supplier.feature b/e2e/tests/stake_supplier.feature index b24a579d9..3d9607fdc 100644 --- a/e2e/tests/stake_supplier.feature +++ b/e2e/tests/stake_supplier.feature @@ -30,19 +30,23 @@ Feature: Stake Supplier Namespace And the user verifies the "supplier" for account "supplier2" is not staked And the account balance of "supplier2" should be "1000070" uPOKT "more" than before - Scenario: User can restake a Supplier waiting for it to become active again - Given the user has the pocketd binary installed - # Reduce the application unbonding period to avoid timeouts and speed up scenarios. - And the "supplier" unbonding period param is successfully set to "1" sessions of "2" blocks - And the user verifies the "supplier" for account "supplier2" is not staked - Then the user stakes a "supplier" with "1000070" uPOKT for "anvil" service from the account "supplier2" - And the user should wait for the "supplier" module "StakeSupplier" message to be submitted - Then the user should see that the supplier for account "supplier2" is staked - But the session for application "app1" and service "anvil" does not contain "supplier2" - When the user waits for supplier "supplier2" to become active for service "anvil" - Then the session for application "app1" and service "anvil" contains the supplier "supplier2" - # Cleanup to make this feature idempotent. - And the user unstakes a "supplier" from the account "supplier2" - And the supplier for account "supplier2" is unbonding - And the user should wait for the "supplier" module "SupplierUnbondingBegin" tx event to be broadcast - And a "supplier" module "SupplierUnbondingEnd" end block event is broadcast + # TODO_MAINNET(@olshansk, #1033): Since the "to become active for service" step + # requires reading "ServicesActivationHeightsMap", which is temporarily set to nil, + # this test has been commented out. See #1033 for details and re-enable this test + # once that data is retrievable through a different method. + # Scenario: User can restake a Supplier waiting for it to become active again + # Given the user has the pocketd binary installed + # # Reduce the application unbonding period to avoid timeouts and speed up scenarios. + # And the "supplier" unbonding period param is successfully set to "1" sessions of "2" blocks + # And the user verifies the "supplier" for account "supplier2" is not staked + # Then the user stakes a "supplier" with "1000070" uPOKT for "anvil" service from the account "supplier2" + # And the user should wait for the "supplier" module "StakeSupplier" message to be submitted + # Then the user should see that the supplier for account "supplier2" is staked + # But the session for application "app1" and service "anvil" does not contain "supplier2" + # When the user waits for supplier "supplier2" to become active for service "anvil" + # Then the session for application "app1" and service "anvil" contains the supplier "supplier2" + # # Cleanup to make this feature idempotent. + # And the user unstakes a "supplier" from the account "supplier2" + # And the supplier for account "supplier2" is unbonding + # And the user should wait for the "supplier" module "SupplierUnbondingBegin" tx event to be broadcast + # And a "supplier" module "SupplierUnbondingEnd" end block event is broadcast diff --git a/localnet/poktrolld/config/config.toml b/localnet/poktrolld/config/config.toml index bbfa0366f..35dcda640 100644 --- a/localnet/poktrolld/config/config.toml +++ b/localnet/poktrolld/config/config.toml @@ -174,7 +174,7 @@ timeout_broadcast_tx_commit = "10s" max_request_batch_size = 10 # Maximum size of request body, in bytes -max_body_bytes = 1000000 +max_body_bytes = 100000000 # Maximum size of request header, in bytes max_header_bytes = 1048576 @@ -330,7 +330,7 @@ keep-invalid-txs-in-cache = false # Maximum size of a single transaction. # NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 +max_tx_bytes = 100000000 # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). diff --git a/localnet/poktrolld/config/supplier1_stake_config.yaml b/localnet/poktrolld/config/supplier1_stake_config.yaml index e3d475939..046fddb4c 100644 --- a/localnet/poktrolld/config/supplier1_stake_config.yaml +++ b/localnet/poktrolld/config/supplier1_stake_config.yaml @@ -2,8 +2,8 @@ owner_address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4 operator_address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4 stake_amount: 1000069upokt default_rev_share_percent: - pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80.5 - pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 19.5 + pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80 + pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 20 services: - service_id: anvil endpoints: diff --git a/localnet/poktrolld/config/supplier_stake_config_example.yaml b/localnet/poktrolld/config/supplier_stake_config_example.yaml index ce14f4a08..2fef94f0b 100644 --- a/localnet/poktrolld/config/supplier_stake_config_example.yaml +++ b/localnet/poktrolld/config/supplier_stake_config_example.yaml @@ -30,8 +30,8 @@ stake_amount: 1000069upokt # or include at least one item. default_rev_share_percent: # The sum of all shares MUST equal 100%. Staking will fail otherwise. - pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80.5 - pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 19.5 + pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4: 80 + pokt1eeeksh2tvkh7wzmfrljnhw4wrhs55lcuvmekkw: 20 services: # The endpoint URL for the Anvil service is provided via the RelayMiner. # The RelayMiner acts as a proxy, forwarding requests to the actual Anvil data node behind it. diff --git a/makefiles/suppliers.mk b/makefiles/suppliers.mk index 2cceb2d8f..2e0f885e8 100644 --- a/makefiles/suppliers.mk +++ b/makefiles/suppliers.mk @@ -4,7 +4,15 @@ .PHONY: supplier_list supplier_list: ## List all the staked supplier - poktrolld --home=$(POKTROLLD_HOME) q supplier list-supplier --node $(POCKET_NODE) + poktrolld --home=$(POKTROLLD_HOME) q supplier list-suppliers --node $(POCKET_NODE) + +.PHONY: supplier_list_anvil +supplier_list_anvil: ## List all the staked supplier staked for the anvil service + poktrolld --home=$(POKTROLLD_HOME) q supplier list-suppliers --service-id anvil --node $(POCKET_NODE) + +.PHONY: supplier_show_supplier1 +supplier_show_supplier1: ## Show supplier1 details + poktrolld --home=$(POKTROLLD_HOME) q supplier show-supplier supplier1 --node $(POCKET_NODE) .PHONY: supplier_stake supplier_stake: ## Stake tokens for the supplier specified (must specify the SUPPLIER and SUPPLIER_CONFIG env vars) diff --git a/makefiles/testnet.mk b/makefiles/testnet.mk index 0a13c056c..cd974a8d1 100644 --- a/makefiles/testnet.mk +++ b/makefiles/testnet.mk @@ -4,7 +4,7 @@ .PHONY: testnet_supplier_list testnet_supplier_list: ## List all the staked supplier on TestNet - poktrolld q supplier list-supplier --node=$(TESTNET_RPC) + poktrolld q supplier list-suppliers --node=$(TESTNET_RPC) .PHONY: testnet_gateway_list testnet_gateway_list: ## List all the staked gateways on TestNet diff --git a/proto/poktroll/application/types.proto b/proto/poktroll/application/types.proto index 8982cecda..81e7e893e 100644 --- a/proto/poktroll/application/types.proto +++ b/proto/poktroll/application/types.proto @@ -14,30 +14,38 @@ import "cosmos_proto/cosmos.proto"; import "poktroll/shared/service.proto"; -// Application defines the type used to store an onchain definition and state for an application +// Application represents the on-chain definition and state of an application message Application { - string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the application. - cosmos.base.v1beta1.Coin stake = 2; // The total amount of uPOKT the application has staked - // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE. + // Bech32 address of the application + string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // Total amount of staked uPOKT + cosmos.base.v1beta1.Coin stake = 2; + + // CRITICAL: Must contain EXACTLY ONE service config // This prevents applications from over-servicing. - // The field is kept repeated (a list) for both legacy and future logic reaosns. - // References: - // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033 - // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 - repeated poktroll.shared.ApplicationServiceConfig service_configs = 3; // The list of services this appliccation is configured to request service for + // Kept as repeated field for legacy and future compatibility + // Refs: + // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033 + // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 + repeated poktroll.shared.ApplicationServiceConfig service_configs = 3; + // TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`. // Ensure to rename all relevant configs, comments, variables, function names, etc as well. - repeated string delegatee_gateway_addresses = 4 [(cosmos_proto.scalar) = "cosmos.AddressString", (gogoproto.nullable) = false]; // The Bech32 encoded addresses for all delegatee Gateways, in a non-nullable slice - // A map from sessionEndHeights to a list of Gateways. - // The key is the height of the last block of the session during which the - // respective undelegation was committed. - // The value is a list of gateways being undelegated from. + // Non-nullable list of Bech32 encoded delegatee Gateway addresses + repeated string delegatee_gateway_addresses = 4 [(cosmos_proto.scalar) = "cosmos.AddressString", (gogoproto.nullable) = false]; + + // Mapping of session end heights to gateways being undelegated from + // - Key: Height of the last block of the session when undelegation tx was committed + // - Value: List of gateways being undelegated from // TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment // so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. map pending_undelegations = 5 [(gogoproto.nullable) = false]; - // The end height of the session at which an application initiated its unstaking process. - // If the application did not unstake, this value will be 0. + + // Session end height when application initiated unstaking (0 if not unstaking) uint64 unstake_session_end_height = 6; + + // Information about pending application transfers PendingApplicationTransfer pending_transfer = 7; } diff --git a/proto/poktroll/shared/service.proto b/proto/poktroll/shared/service.proto index 00dac4300..340c89abb 100644 --- a/proto/poktroll/shared/service.proto +++ b/proto/poktroll/shared/service.proto @@ -57,8 +57,12 @@ message SupplierEndpoint { // ServiceRevenueShare message to hold revenue share configuration details message ServiceRevenueShare { + // 2 was reserved in #1028 during the change of rev_share_percentage from float to uint64 + // TODO_TECHDEBT(#1033): Investigate if we can use a double instead. + reserved 2; + string address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The Bech32 address of the revenue share recipient - float rev_share_percentage = 2; // The percentage of revenue share the recipient will receive + uint64 rev_share_percentage = 3; // The percentage of revenue share the recipient will receive } // Enum to define RPC types diff --git a/proto/poktroll/shared/supplier.proto b/proto/poktroll/shared/supplier.proto index dfa6689ae..7b88dff05 100644 --- a/proto/poktroll/shared/supplier.proto +++ b/proto/poktroll/shared/supplier.proto @@ -10,24 +10,30 @@ import "cosmos/base/v1beta1/coin.proto"; import "poktroll/shared/service.proto"; import "gogoproto/gogo.proto"; -// Supplier is the type defining the actor in Pocket Network that provides RPC services. +// Supplier represents an actor in Pocket Network that provides RPC services message Supplier { - // The address of the owner (i.e. staker, custodial) that owns the funds for staking. - // By default, this address is the one that receives all the rewards unless owtherwise specified. - // This property cannot be updated by the operator. - string owner_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // Bech32 cosmos address - // The operator address of the supplier operator (i.e. the one managing the offchain server). - // The operator address can update the supplier's configurations excluding the owner address. - // This property does not change over the supplier's lifespan, the supplier must be unstaked - // and re-staked to effectively update this value. - string operator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // Bech32 cosmos address - cosmos.base.v1beta1.Coin stake = 3; // The total amount of uPOKT the supplier has staked - repeated SupplierServiceConfig services = 4; // The service configs this supplier can support - // The session end height at which an actively unbonding supplier unbonds its stake. - // If the supplier did not unstake, this value will be 0. + // Owner address that controls the staked funds and receives rewards by default + // Cannot be updated by the operator + string owner_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // Operator address managing the offchain server + // Immutable for supplier's lifespan - requires unstake/re-stake to change. + // Can update supplier configs except for owner address. + string operator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // Total amount of staked uPOKT + cosmos.base.v1beta1.Coin stake = 3; + + // List of service configurations supported by this supplier + repeated SupplierServiceConfig services = 4; + + // Session end height when supplier initiated unstaking (0 if not unstaking) uint64 unstake_session_end_height = 5; - // services_activation_heights_map is a map of serviceIds to the height at - // which the staked supplier will become active for that service. - // Activation heights are session start heights. + + // Mapping of serviceIds to their activation heights + // - Key: serviceId + // - Value: Session start height when supplier becomes active for the service + // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf + // because maps are no longer supported for serialized types in the CosmoSDK. map services_activation_heights_map = 6; } diff --git a/proto/poktroll/supplier/query.proto b/proto/poktroll/supplier/query.proto index cb7a4d31a..71de22f24 100644 --- a/proto/poktroll/supplier/query.proto +++ b/proto/poktroll/supplier/query.proto @@ -45,6 +45,7 @@ message QueryParamsResponse { message QueryGetSupplierRequest { string operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // TODO_TECHDEBT: Add the ability to query for a supplier by owner_id } message QueryGetSupplierResponse { @@ -53,6 +54,10 @@ message QueryGetSupplierResponse { message QueryAllSuppliersRequest { cosmos.base.query.v1beta1.PageRequest pagination = 1; + + oneof filter { + string service_id = 2; // unique service identifier to filter by + } } message QueryAllSuppliersResponse { diff --git a/testutil/integration/app.go b/testutil/integration/app.go index 25d0a0c6e..04305089a 100644 --- a/testutil/integration/app.go +++ b/testutil/integration/app.go @@ -908,7 +908,7 @@ func (app *App) setupDefaultActorsState( RevShare: []*sharedtypes.ServiceRevenueShare{ { Address: sample.AccAddress(), - RevSharePercentage: 100, + RevSharePercentage: uint64(100), }, }, ServiceId: defaultService.Id, diff --git a/testutil/keeper/tokenomics.go b/testutil/keeper/tokenomics.go index 626aa5b11..31f5dc53a 100644 --- a/testutil/keeper/tokenomics.go +++ b/testutil/keeper/tokenomics.go @@ -146,7 +146,7 @@ func TokenomicsKeeperWithActorAddrs(t testing.TB) ( RevShare: []*sharedtypes.ServiceRevenueShare{ { Address: supplierOwnerAddr, - RevSharePercentage: 100, + RevSharePercentage: uint64(100), }, }, }, diff --git a/x/application/types/types.pb.go b/x/application/types/types.pb.go index 4a7e43bba..ec942dee4 100644 --- a/x/application/types/types.pb.go +++ b/x/application/types/types.pb.go @@ -27,31 +27,33 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Application defines the type used to store an onchain definition and state for an application +// Application represents the on-chain definition and state of an application type Application struct { - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - Stake *types.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"` - // CRITICAL_DEV_NOTE: The number of service_configs must be EXACTLY ONE. + // Bech32 address of the application + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Total amount of staked uPOKT + Stake *types.Coin `protobuf:"bytes,2,opt,name=stake,proto3" json:"stake,omitempty"` + // CRITICAL: Must contain EXACTLY ONE service config // This prevents applications from over-servicing. - // The field is kept repeated (a list) for both legacy and future logic reaosns. - // References: - // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033 - // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 + // Kept as repeated field for legacy and future compatibility + // Refs: + // - https://github.com/pokt-network/poktroll/pull/750#discussion_r1735025033 + // - https://www.notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 ServiceConfigs []*types1.ApplicationServiceConfig `protobuf:"bytes,3,rep,name=service_configs,json=serviceConfigs,proto3" json:"service_configs,omitempty"` // TODO_BETA(@bryanchriswhite): Rename `delegatee_gateway_addresses` to `gateway_addresses_delegated_to`. // Ensure to rename all relevant configs, comments, variables, function names, etc as well. + // Non-nullable list of Bech32 encoded delegatee Gateway addresses DelegateeGatewayAddresses []string `protobuf:"bytes,4,rep,name=delegatee_gateway_addresses,json=delegateeGatewayAddresses,proto3" json:"delegatee_gateway_addresses,omitempty"` - // A map from sessionEndHeights to a list of Gateways. - // The key is the height of the last block of the session during which the - // respective undelegation was committed. - // The value is a list of gateways being undelegated from. + // Mapping of session end heights to gateways being undelegated from + // - Key: Height of the last block of the session when undelegation tx was committed + // - Value: List of gateways being undelegated from // TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment // so its clear to everyone why this is necessary; https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. PendingUndelegations map[uint64]UndelegatingGatewayList `protobuf:"bytes,5,rep,name=pending_undelegations,json=pendingUndelegations,proto3" json:"pending_undelegations" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The end height of the session at which an application initiated its unstaking process. - // If the application did not unstake, this value will be 0. - UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` - PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"` + // Session end height when application initiated unstaking (0 if not unstaking) + UnstakeSessionEndHeight uint64 `protobuf:"varint,6,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` + // Information about pending application transfers + PendingTransfer *PendingApplicationTransfer `protobuf:"bytes,7,opt,name=pending_transfer,json=pendingTransfer,proto3" json:"pending_transfer,omitempty"` } func (m *Application) Reset() { *m = Application{} } diff --git a/x/proof/keeper/query_proof.go b/x/proof/keeper/query_proof.go index f2a84ba42..4d596f948 100644 --- a/x/proof/keeper/query_proof.go +++ b/x/proof/keeper/query_proof.go @@ -13,7 +13,10 @@ import ( "github.com/pokt-network/poktroll/x/proof/types" ) -func (k Keeper) AllProofs(ctx context.Context, req *types.QueryAllProofsRequest) (*types.QueryAllProofsResponse, error) { +func (k Keeper) AllProofs( + ctx context.Context, + req *types.QueryAllProofsRequest, +) (*types.QueryAllProofsResponse, error) { logger := k.Logger().With("method", "AllProofs") if req == nil { diff --git a/x/proof/types/query_validation.go b/x/proof/types/query_validation.go index e1742dd99..50e3cbb7f 100644 --- a/x/proof/types/query_validation.go +++ b/x/proof/types/query_validation.go @@ -59,6 +59,7 @@ func (query *QueryGetProofRequest) ValidateBasic() error { return nil } +// ValidateBasic performs basic (non-state-dependant) validation on a QueryAllProofsRequest. func (query *QueryAllProofsRequest) ValidateBasic() error { // TODO_TECHDEBT: update function signature to receive a context. logger := polylog.Ctx(context.TODO()) diff --git a/x/service/keeper/query_service.go b/x/service/keeper/query_service.go index 73f420edc..999962eba 100644 --- a/x/service/keeper/query_service.go +++ b/x/service/keeper/query_service.go @@ -54,7 +54,8 @@ func (k Keeper) Service(ctx context.Context, req *types.QueryGetServiceRequest) service, found := k.GetService(ctx, req.Id) if !found { - return nil, status.Error(codes.NotFound, "service ID not found") + msg := fmt.Sprintf("service ID not found: %q", req.GetId()) + return nil, status.Error(codes.NotFound, msg) } return &types.QueryGetServiceResponse{Service: service}, nil diff --git a/x/service/keeper/query_service_test.go b/x/service/keeper/query_service_test.go index 7f2ab0584..02d123f36 100644 --- a/x/service/keeper/query_service_test.go +++ b/x/service/keeper/query_service_test.go @@ -43,9 +43,9 @@ func TestServiceQuerySingle(t *testing.T) { { desc: "KeyNotFound", request: &types.QueryGetServiceRequest{ - Id: strconv.Itoa(100000), + Id: "service", }, - expectedErr: status.Error(codes.NotFound, "service ID not found"), + expectedErr: status.Error(codes.NotFound, "service ID not found: \"service\""), }, { desc: "InvalidRequest", diff --git a/x/shared/types/service.pb.go b/x/shared/types/service.pb.go index 358154b9f..6da21cd5f 100644 --- a/x/shared/types/service.pb.go +++ b/x/shared/types/service.pb.go @@ -7,7 +7,6 @@ package types import ( - encoding_binary "encoding/binary" fmt "fmt" _ "github.com/cosmos/cosmos-proto" _ "github.com/cosmos/gogoproto/gogoproto" @@ -321,8 +320,8 @@ func (m *SupplierEndpoint) GetConfigs() []*ConfigOption { // ServiceRevenueShare message to hold revenue share configuration details type ServiceRevenueShare struct { - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - RevSharePercentage float32 `protobuf:"fixed32,2,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + RevSharePercentage uint64 `protobuf:"varint,3,opt,name=rev_share_percentage,json=revSharePercentage,proto3" json:"rev_share_percentage,omitempty"` } func (m *ServiceRevenueShare) Reset() { *m = ServiceRevenueShare{} } @@ -361,7 +360,7 @@ func (m *ServiceRevenueShare) GetAddress() string { return "" } -func (m *ServiceRevenueShare) GetRevSharePercentage() float32 { +func (m *ServiceRevenueShare) GetRevSharePercentage() uint64 { if m != nil { return m.RevSharePercentage } @@ -431,46 +430,47 @@ func init() { func init() { proto.RegisterFile("poktroll/shared/service.proto", fileDescriptor_302c2f793a11ae1e) } var fileDescriptor_302c2f793a11ae1e = []byte{ - // 621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xcd, 0x26, 0x81, 0x24, 0xd3, 0x36, 0xb5, 0x86, 0x20, 0x4c, 0xa5, 0x5a, 0x25, 0xe2, 0x50, - 0x55, 0x6a, 0x52, 0xa5, 0x42, 0x88, 0x03, 0x42, 0x6d, 0x14, 0xaa, 0x52, 0x35, 0x89, 0xd6, 0x29, - 0x95, 0xb8, 0x58, 0xae, 0xbd, 0xa4, 0x56, 0x13, 0xaf, 0xb5, 0xb6, 0x53, 0x22, 0x7e, 0x02, 0xf1, - 0x0b, 0xfc, 0x02, 0x27, 0xbe, 0x80, 0x63, 0xc5, 0xa9, 0x47, 0x94, 0xfe, 0x08, 0x5a, 0xaf, 0x1d, - 0xa0, 0x41, 0x20, 0x6e, 0xe3, 0x79, 0x6f, 0x66, 0xde, 0xbe, 0x1d, 0x2f, 0xac, 0x07, 0xfc, 0x22, - 0x12, 0x7c, 0x34, 0x6a, 0x86, 0xe7, 0xb6, 0x60, 0x6e, 0x33, 0x64, 0x62, 0xe2, 0x39, 0xac, 0x11, - 0x08, 0x1e, 0x71, 0x5c, 0xcd, 0xe0, 0x86, 0x82, 0xd7, 0x1e, 0x3a, 0x3c, 0x1c, 0xf3, 0xd0, 0x4a, - 0xe0, 0xa6, 0xfa, 0x50, 0xdc, 0xb5, 0xda, 0x90, 0x0f, 0xb9, 0xca, 0xcb, 0x48, 0x65, 0xeb, 0x9f, - 0x08, 0x94, 0x4c, 0xd5, 0x13, 0xab, 0x90, 0xf7, 0x5c, 0x9d, 0x6c, 0x90, 0xcd, 0x0a, 0xcd, 0x7b, - 0x2e, 0x22, 0x14, 0x7d, 0x7b, 0xcc, 0xf4, 0x7c, 0x92, 0x49, 0x62, 0x7c, 0x02, 0x0f, 0x1c, 0x3e, - 0x0e, 0xe2, 0x88, 0x59, 0xb1, 0xef, 0x45, 0xa1, 0x15, 0x30, 0x61, 0x09, 0x36, 0xb2, 0xa7, 0x7a, - 0x61, 0x83, 0x6c, 0x16, 0x69, 0x2d, 0x85, 0x4f, 0x24, 0xda, 0x67, 0x82, 0x4a, 0x0c, 0x9f, 0xc3, - 0x0a, 0xbf, 0xf4, 0x99, 0xb0, 0x6c, 0xd7, 0x15, 0x2c, 0x0c, 0xf5, 0xa2, 0xec, 0xb9, 0xaf, 0x7f, - 0xfb, 0xbc, 0x5d, 0x4b, 0x55, 0xee, 0x29, 0xc4, 0x8c, 0x84, 0xe7, 0x0f, 0xe9, 0x72, 0x42, 0x4f, - 0x73, 0xf5, 0x67, 0xa0, 0xef, 0x05, 0xc1, 0xc8, 0x73, 0xec, 0xc8, 0xe3, 0x7e, 0xaa, 0xb7, 0xcd, - 0xfd, 0xb7, 0xde, 0x10, 0xd7, 0x01, 0x52, 0x53, 0xac, 0xb9, 0xfa, 0x4a, 0x9a, 0x39, 0x74, 0xeb, - 0x5f, 0x08, 0xdc, 0x37, 0x63, 0x59, 0xcc, 0xc4, 0xff, 0x14, 0xe2, 0x0b, 0xa8, 0x30, 0xdf, 0x0d, - 0xb8, 0xe7, 0x47, 0xa1, 0x9e, 0xdf, 0x28, 0x6c, 0x2e, 0xb5, 0x1e, 0x35, 0x6e, 0xf9, 0xdd, 0xc8, - 0x3a, 0x77, 0x52, 0x26, 0xfd, 0x59, 0x83, 0x7b, 0x50, 0x11, 0x6c, 0x62, 0x25, 0x4c, 0xbd, 0x90, - 0x34, 0x78, 0xbc, 0xd8, 0x40, 0xcd, 0xa3, 0x6c, 0xc2, 0xfc, 0x98, 0x99, 0x32, 0x49, 0xcb, 0x82, - 0x4d, 0x92, 0xa8, 0xfe, 0x91, 0x80, 0x76, 0x7b, 0x04, 0x6a, 0x50, 0x88, 0xc5, 0x28, 0x15, 0x2c, - 0x43, 0xdc, 0x85, 0xb2, 0x08, 0x1c, 0x2b, 0x9a, 0x06, 0xea, 0xb2, 0xaa, 0x2d, 0x7d, 0x61, 0x10, - 0xed, 0xb7, 0x07, 0xd3, 0x80, 0xd1, 0x92, 0x08, 0x1c, 0x19, 0xe0, 0x53, 0x28, 0x39, 0x89, 0x11, - 0x61, 0x2a, 0x6e, 0x7d, 0xa1, 0x46, 0x19, 0xd5, 0x0b, 0xa4, 0xe9, 0x34, 0x63, 0xd7, 0xdf, 0xc3, - 0xbd, 0x3f, 0xa8, 0xc6, 0x16, 0x94, 0xb2, 0xcb, 0x25, 0xff, 0xb8, 0xdc, 0x8c, 0x88, 0x3b, 0x50, - 0x9b, 0x5b, 0x24, 0x37, 0xc9, 0x61, 0x7e, 0x64, 0x0f, 0xd5, 0x21, 0xf2, 0x14, 0x33, 0x1f, 0xfa, - 0x73, 0xa4, 0xfe, 0x1a, 0x96, 0x7f, 0x55, 0x85, 0x3b, 0x50, 0xb8, 0x60, 0xd3, 0x64, 0x62, 0xb5, - 0x65, 0xfc, 0xf5, 0x04, 0x21, 0x95, 0x54, 0xac, 0xc1, 0x9d, 0x89, 0x3d, 0x8a, 0xb3, 0xb5, 0x56, - 0x1f, 0x5b, 0x47, 0x50, 0x4a, 0x1d, 0xc2, 0x55, 0x58, 0x3a, 0xe9, 0x1e, 0x75, 0x7b, 0xa7, 0x5d, - 0x8b, 0xf6, 0xdb, 0x5a, 0x0e, 0xcb, 0x50, 0x3c, 0x90, 0x11, 0xc1, 0x15, 0xa8, 0x9c, 0x76, 0xf6, - 0xcd, 0x5e, 0xfb, 0xa8, 0x33, 0xd0, 0xf2, 0xb8, 0x0c, 0xe5, 0x57, 0x66, 0x4f, 0xd1, 0x0a, 0x92, - 0x46, 0x3b, 0xe6, 0x40, 0x2b, 0x6e, 0xed, 0xc0, 0xca, 0x6f, 0x83, 0x11, 0xa1, 0x9a, 0xb5, 0x6c, - 0xf7, 0xba, 0x2f, 0x0f, 0x0f, 0xb4, 0x1c, 0x2e, 0x41, 0x69, 0x70, 0x78, 0xdc, 0xe9, 0x9d, 0x0c, - 0x34, 0xb2, 0x7f, 0xfc, 0x75, 0x66, 0x90, 0xab, 0x99, 0x41, 0xae, 0x67, 0x06, 0xf9, 0x3e, 0x33, - 0xc8, 0x87, 0x1b, 0x23, 0x77, 0x75, 0x63, 0xe4, 0xae, 0x6f, 0x8c, 0xdc, 0x9b, 0xe6, 0xd0, 0x8b, - 0xce, 0xe3, 0xb3, 0x86, 0xc3, 0xc7, 0x4d, 0x79, 0xc2, 0x6d, 0x9f, 0x45, 0x97, 0x5c, 0x5c, 0x34, - 0xe7, 0xaf, 0xc3, 0xbb, 0xec, 0x7d, 0x90, 0x3b, 0x10, 0x9e, 0xdd, 0x4d, 0x7e, 0xee, 0xdd, 0x1f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x19, 0xf1, 0x60, 0x3f, 0x04, 0x00, 0x00, + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xce, 0xc6, 0xf9, 0xfd, 0x92, 0x4c, 0xdb, 0xd4, 0x1a, 0x82, 0x30, 0x95, 0x6a, 0x95, 0x88, + 0x43, 0x55, 0xa9, 0x49, 0x95, 0x0a, 0x21, 0x0e, 0x08, 0xb5, 0x51, 0xa8, 0xda, 0xaa, 0x49, 0xb4, + 0x4e, 0xa9, 0xc4, 0xc5, 0x72, 0xed, 0x25, 0xb5, 0xea, 0x78, 0xad, 0xb5, 0x9d, 0x92, 0x23, 0x6f, + 0x80, 0x78, 0x05, 0x5e, 0x81, 0x13, 0x4f, 0xc0, 0xb1, 0xe2, 0xd4, 0x23, 0x4a, 0x5f, 0x04, 0xad, + 0xff, 0x04, 0x68, 0x10, 0x88, 0xdb, 0x78, 0xbe, 0x6f, 0x66, 0xbe, 0xfd, 0x76, 0xbc, 0xb0, 0x1e, + 0xf0, 0xcb, 0x48, 0x70, 0xcf, 0x6b, 0x85, 0x17, 0x96, 0x60, 0x4e, 0x2b, 0x64, 0x62, 0xe2, 0xda, + 0xac, 0x19, 0x08, 0x1e, 0x71, 0x5c, 0xcd, 0xe1, 0x66, 0x0a, 0xaf, 0x3d, 0xb4, 0x79, 0x38, 0xe6, + 0xa1, 0x99, 0xc0, 0xad, 0xf4, 0x23, 0xe5, 0xae, 0xd5, 0x47, 0x7c, 0xc4, 0xd3, 0xbc, 0x8c, 0xd2, + 0x6c, 0xe3, 0x23, 0x81, 0xb2, 0x91, 0xf6, 0xc4, 0x1a, 0x14, 0x5d, 0x47, 0x23, 0x1b, 0x64, 0xb3, + 0x4a, 0x8b, 0xae, 0x83, 0x08, 0x25, 0xdf, 0x1a, 0x33, 0xad, 0x98, 0x64, 0x92, 0x18, 0x9f, 0xc0, + 0x03, 0x9b, 0x8f, 0x83, 0x38, 0x62, 0x66, 0xec, 0xbb, 0x51, 0x68, 0x06, 0x4c, 0x98, 0x82, 0x79, + 0xd6, 0x54, 0x53, 0x36, 0xc8, 0x66, 0x89, 0xd6, 0x33, 0xf8, 0x54, 0xa2, 0x03, 0x26, 0xa8, 0xc4, + 0xf0, 0x39, 0xac, 0xf0, 0x2b, 0x9f, 0x09, 0xd3, 0x72, 0x1c, 0xc1, 0xc2, 0x50, 0x2b, 0xc9, 0x9e, + 0xfb, 0xda, 0xd7, 0x4f, 0xdb, 0xf5, 0x4c, 0xe5, 0x5e, 0x8a, 0x18, 0x91, 0x70, 0xfd, 0x11, 0x5d, + 0x4e, 0xe8, 0x59, 0xae, 0xf1, 0x0c, 0xb4, 0xbd, 0x20, 0xf0, 0x5c, 0xdb, 0x8a, 0x5c, 0xee, 0x67, + 0x7a, 0x3b, 0xdc, 0x7f, 0xe3, 0x8e, 0x70, 0x1d, 0x20, 0x33, 0xc5, 0x9c, 0xab, 0xaf, 0x66, 0x99, + 0x43, 0xa7, 0xf1, 0x99, 0xc0, 0x7d, 0x23, 0x96, 0xc5, 0x4c, 0xfc, 0x4b, 0x21, 0xbe, 0x80, 0x2a, + 0xf3, 0x9d, 0x80, 0xbb, 0x7e, 0x14, 0x6a, 0xc5, 0x0d, 0x65, 0x73, 0xa9, 0xfd, 0xa8, 0x79, 0xc7, + 0xef, 0x66, 0xde, 0xb9, 0x9b, 0x31, 0xe9, 0x8f, 0x1a, 0xdc, 0x83, 0xaa, 0x60, 0x13, 0x33, 0x61, + 0x6a, 0x4a, 0xd2, 0xe0, 0xf1, 0x62, 0x83, 0x74, 0x1e, 0x65, 0x13, 0xe6, 0xc7, 0xcc, 0x90, 0x49, + 0x5a, 0x11, 0x6c, 0x92, 0x44, 0x8d, 0x0f, 0x04, 0xd4, 0xbb, 0x23, 0x50, 0x05, 0x25, 0x16, 0x5e, + 0x26, 0x58, 0x86, 0xb8, 0x0b, 0x15, 0x11, 0xd8, 0x66, 0x34, 0x0d, 0xd2, 0xcb, 0xaa, 0xb5, 0xb5, + 0x85, 0x41, 0x74, 0xd0, 0x19, 0x4e, 0x03, 0x46, 0xcb, 0x22, 0xb0, 0x65, 0x80, 0x4f, 0xa1, 0x6c, + 0x27, 0x46, 0x84, 0x99, 0xb8, 0xf5, 0x85, 0x9a, 0xd4, 0xa8, 0x7e, 0x20, 0x4d, 0xa7, 0x39, 0xbb, + 0xf1, 0x8e, 0xc0, 0xbd, 0xdf, 0xc8, 0xc6, 0x36, 0x94, 0xf3, 0xdb, 0x25, 0x7f, 0xb9, 0xdd, 0x9c, + 0x88, 0x3b, 0x50, 0x9f, 0x7b, 0x24, 0x57, 0xc9, 0x66, 0x7e, 0x64, 0x8d, 0x58, 0xb6, 0x4b, 0x98, + 0x1b, 0x31, 0x98, 0x23, 0x47, 0xa5, 0x4a, 0x51, 0x55, 0x1a, 0xaf, 0x60, 0xf9, 0x67, 0x71, 0xb8, + 0x03, 0xca, 0x25, 0x9b, 0x26, 0x73, 0x6b, 0x6d, 0xfd, 0x8f, 0x07, 0x09, 0xa9, 0xa4, 0x62, 0x1d, + 0xfe, 0x9b, 0x58, 0x5e, 0x9c, 0x6f, 0x77, 0xfa, 0xb1, 0x75, 0x0c, 0xe5, 0xcc, 0x28, 0x5c, 0x85, + 0xa5, 0xd3, 0xde, 0x71, 0xaf, 0x7f, 0xd6, 0x33, 0xe9, 0xa0, 0xa3, 0x16, 0xb0, 0x02, 0xa5, 0x03, + 0x19, 0x11, 0x5c, 0x81, 0xea, 0x59, 0x77, 0xdf, 0xe8, 0x77, 0x8e, 0xbb, 0x43, 0xb5, 0x88, 0xcb, + 0x50, 0x39, 0x32, 0xfa, 0x29, 0x4d, 0x91, 0x34, 0xda, 0x35, 0x86, 0x6a, 0x69, 0x6b, 0x07, 0x56, + 0x7e, 0x19, 0x8c, 0x08, 0xb5, 0xbc, 0x65, 0xa7, 0xdf, 0x7b, 0x79, 0x78, 0xa0, 0x16, 0x70, 0x09, + 0xca, 0xc3, 0xc3, 0x93, 0x6e, 0xff, 0x74, 0xa8, 0x92, 0xfd, 0x93, 0x2f, 0x33, 0x9d, 0x5c, 0xcf, + 0x74, 0x72, 0x33, 0xd3, 0xc9, 0xb7, 0x99, 0x4e, 0xde, 0xdf, 0xea, 0x85, 0xeb, 0x5b, 0xbd, 0x70, + 0x73, 0xab, 0x17, 0x5e, 0xb7, 0x46, 0x6e, 0x74, 0x11, 0x9f, 0x37, 0x6d, 0x3e, 0x6e, 0xc9, 0x13, + 0x6e, 0xfb, 0x2c, 0xba, 0xe2, 0xe2, 0xb2, 0x35, 0x7f, 0x24, 0xde, 0xe6, 0xcf, 0x84, 0x5c, 0x85, + 0xf0, 0xfc, 0xff, 0xe4, 0x1f, 0xdf, 0xfd, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x46, 0xaf, 0x53, 0xa2, + 0x46, 0x04, 0x00, 0x00, } func (m *Service) Marshal() (dAtA []byte, err error) { @@ -680,10 +680,9 @@ func (m *ServiceRevenueShare) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.RevSharePercentage != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RevSharePercentage)))) + i = encodeVarintService(dAtA, i, uint64(m.RevSharePercentage)) i-- - dAtA[i] = 0x15 + dAtA[i] = 0x18 } if len(m.Address) > 0 { i -= len(m.Address) @@ -836,7 +835,7 @@ func (m *ServiceRevenueShare) Size() (n int) { n += 1 + l + sovService(uint64(l)) } if m.RevSharePercentage != 0 { - n += 5 + n += 1 + sovService(uint64(m.RevSharePercentage)) } return n } @@ -1456,17 +1455,25 @@ func (m *ServiceRevenueShare) Unmarshal(dAtA []byte) error { } m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 5 { + case 3: + if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RevSharePercentage", wireType) } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF + m.RevSharePercentage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevSharePercentage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.RevSharePercentage = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) diff --git a/x/shared/types/service_configs.go b/x/shared/types/service_configs.go index 122a10241..bcfb3935e 100644 --- a/x/shared/types/service_configs.go +++ b/x/shared/types/service_configs.go @@ -7,7 +7,7 @@ import ( ) const ( - requiredRevSharePercentageSum = 100 + requiredRevSharePercentageSum = uint64(100) ) // ValidateAppServiceConfigs returns an error if any of the application service configs are invalid @@ -95,7 +95,7 @@ func ValidateSupplierServiceConfigs(services []*SupplierServiceConfig) error { // ensuring that the sum of the revenue share percentages is 100. // NB: This function is unit tested via the supplier staking config tests. func ValidateServiceRevShare(revShareList []*ServiceRevenueShare) error { - revSharePercentageSum := float32(0) + revSharePercentageSum := uint64(0) if len(revShareList) == 0 { return ErrSharedInvalidRevShare.Wrap("no rev share configurations") @@ -106,7 +106,7 @@ func ValidateServiceRevShare(revShareList []*ServiceRevenueShare) error { return ErrSharedInvalidRevShare.Wrap("rev share cannot be nil") } - // Validate the revshare address + // Validate the revenue share address if revShare.Address == "" { return ErrSharedInvalidRevShare.Wrapf("rev share address cannot be empty: %v", revShare) } diff --git a/x/shared/types/supplier.pb.go b/x/shared/types/supplier.pb.go index 9c0287112..dee27dc69 100644 --- a/x/shared/types/supplier.pb.go +++ b/x/shared/types/supplier.pb.go @@ -26,25 +26,26 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Supplier is the type defining the actor in Pocket Network that provides RPC services. +// Supplier represents an actor in Pocket Network that provides RPC services type Supplier struct { - // The address of the owner (i.e. staker, custodial) that owns the funds for staking. - // By default, this address is the one that receives all the rewards unless owtherwise specified. - // This property cannot be updated by the operator. + // Owner address that controls the staked funds and receives rewards by default + // Cannot be updated by the operator OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty"` - // The operator address of the supplier operator (i.e. the one managing the offchain server). - // The operator address can update the supplier's configurations excluding the owner address. - // This property does not change over the supplier's lifespan, the supplier must be unstaked - // and re-staked to effectively update this value. - OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` - Stake *types.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"` - Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"` - // The session end height at which an actively unbonding supplier unbonds its stake. - // If the supplier did not unstake, this value will be 0. + // Operator address managing the offchain server + // Immutable for supplier's lifespan - requires unstake/re-stake to change. + // Can update supplier configs except for owner address. + OperatorAddress string `protobuf:"bytes,2,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` + // Total amount of staked uPOKT + Stake *types.Coin `protobuf:"bytes,3,opt,name=stake,proto3" json:"stake,omitempty"` + // List of service configurations supported by this supplier + Services []*SupplierServiceConfig `protobuf:"bytes,4,rep,name=services,proto3" json:"services,omitempty"` + // Session end height when supplier initiated unstaking (0 if not unstaking) UnstakeSessionEndHeight uint64 `protobuf:"varint,5,opt,name=unstake_session_end_height,json=unstakeSessionEndHeight,proto3" json:"unstake_session_end_height,omitempty"` - // services_activation_heights_map is a map of serviceIds to the height at - // which the staked supplier will become active for that service. - // Activation heights are session start heights. + // Mapping of serviceIds to their activation heights + // - Key: serviceId + // - Value: Session start height when supplier becomes active for the service + // TODO_MAINNET(@olshansk, #1033): Look into moving this to an external repeated protobuf + // because maps are no longer supported for serialized types in the CosmoSDK. ServicesActivationHeightsMap map[string]uint64 `protobuf:"bytes,6,rep,name=services_activation_heights_map,json=servicesActivationHeightsMap,proto3" json:"services_activation_heights_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } diff --git a/x/supplier/config/supplier_configs_reader.go b/x/supplier/config/supplier_configs_reader.go index c2f009c2e..6c604677e 100644 --- a/x/supplier/config/supplier_configs_reader.go +++ b/x/supplier/config/supplier_configs_reader.go @@ -19,14 +19,14 @@ type YAMLStakeConfig struct { OperatorAddress string `yaml:"operator_address"` StakeAmount string `yaml:"stake_amount"` Services []*YAMLStakeService `yaml:"services"` - DefaultRevSharePercent map[string]float32 `yaml:"default_rev_share_percent"` + DefaultRevSharePercent map[string]uint64 `yaml:"default_rev_share_percent"` } // YAMLStakeService is the structure describing a single service entry in the // stake config file. type YAMLStakeService struct { ServiceId string `yaml:"service_id"` - RevSharePercent map[string]float32 `yaml:"rev_share_percent"` + RevSharePercent map[string]uint64 `yaml:"rev_share_percent"` Endpoints []YAMLServiceEndpoint `yaml:"endpoints"` } @@ -102,7 +102,7 @@ func ParseSupplierConfigs(ctx context.Context, configContent []byte) (*SupplierS ) } - defaultRevSharePercent := map[string]float32{} + defaultRevSharePercent := map[string]uint64{} if len(stakeConfig.DefaultRevSharePercent) == 0 { // Ensure that if no default rev share is provided, the owner address is set // to 100% rev share. diff --git a/x/supplier/config/supplier_configs_reader_test.go b/x/supplier/config/supplier_configs_reader_test.go index f5d557345..5b0f149bc 100644 --- a/x/supplier/config/supplier_configs_reader_test.go +++ b/x/supplier/config/supplier_configs_reader_test.go @@ -287,8 +287,8 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) { owner_address: %s operator_address: %s default_rev_share_percent: - %s: 50.5 - %s: 49.5 + %s: 51 + %s: 49 stake_amount: 1000upokt services: # Service with default rev share @@ -322,11 +322,11 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) { RevShare: []*types.ServiceRevenueShare{ { Address: firstShareHolderAddress, - RevSharePercentage: 50.5, + RevSharePercentage: 51, }, { Address: secondShareHolderAddress, - RevSharePercentage: 49.5, + RevSharePercentage: 49, }, }, }, @@ -728,24 +728,6 @@ func Test_ParseSupplierConfigs_Services(t *testing.T) { `, ownerAddress, operatorAddress, firstShareHolderAddress, ""), expectedError: config.ErrSupplierConfigUnmarshalYAML, }, - { - desc: "negative revenue share allocation is disallowed", - inputConfig: fmt.Sprintf(` - owner_address: %s - operator_address: %s - stake_amount: 1000upokt - services: - - service_id: svc - endpoints: - - publicly_exposed_url: http://pokt.network:8081 - rpc_type: json_rpc - rev_share_percent: - %s: 90 - %s: 11 - %s: -1 - `, ownerAddress, operatorAddress, ownerAddress, firstShareHolderAddress, secondShareHolderAddress), - expectedError: sharedtypes.ErrSharedInvalidRevShare, - }, { desc: "errors when the rev share config is empty", inputConfig: fmt.Sprintf(` diff --git a/x/supplier/keeper/query_supplier.go b/x/supplier/keeper/query_supplier.go index 3d370a8fa..bdecfd975 100644 --- a/x/supplier/keeper/query_supplier.go +++ b/x/supplier/keeper/query_supplier.go @@ -24,11 +24,18 @@ func (k Keeper) AllSuppliers( return nil, status.Error(codes.InvalidArgument, "invalid request") } - var suppliers []sharedtypes.Supplier + if err := req.ValidateBasic(); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + // TODO_IMPROVE: Consider adding a custom onchain index (similar to proofs) + // based on other parameters (e.g. serviceId) if/when the performance of the + // flags used to filter the response becomes an issue. store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) supplierStore := prefix.NewStore(store, types.KeyPrefix(types.SupplierKeyOperatorPrefix)) + var suppliers []sharedtypes.Supplier + pageRes, err := query.Paginate( supplierStore, req.Pagination, @@ -40,6 +47,25 @@ func (k Keeper) AllSuppliers( return status.Error(codes.Internal, err.Error()) } + serviceIdFilter := req.GetServiceId() + if serviceIdFilter != "" { + hasService := false + for _, supplierServiceConfig := range supplier.Services { + if supplierServiceConfig.ServiceId == serviceIdFilter { + hasService = true + break + } + } + // Do not include the current supplier in the list returned. + if !hasService { + return nil + } + } + + // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps. + // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team. + supplier.ServicesActivationHeightsMap = nil + suppliers = append(suppliers, supplier) return nil }, @@ -62,10 +88,13 @@ func (k Keeper) Supplier( supplier, found := k.GetSupplier(ctx, req.OperatorAddress) if !found { - // TODO_TECHDEBT(@bryanchriswhite, #384): conform to logging conventions once established - msg := fmt.Sprintf("supplier with address %q", req.GetOperatorAddress()) + msg := fmt.Sprintf("supplier with address: %q", req.GetOperatorAddress()) return nil, status.Error(codes.NotFound, msg) } + // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps. + // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team. + supplier.ServicesActivationHeightsMap = nil + return &types.QueryGetSupplierResponse{Supplier: supplier}, nil } diff --git a/x/supplier/keeper/query_supplier_test.go b/x/supplier/keeper/query_supplier_test.go index b0d448ec4..880f815f9 100644 --- a/x/supplier/keeper/query_supplier_test.go +++ b/x/supplier/keeper/query_supplier_test.go @@ -1,6 +1,7 @@ package keeper_test import ( + "fmt" "strconv" "testing" @@ -11,6 +12,7 @@ import ( keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/nullify" + "github.com/pokt-network/poktroll/testutil/sample" "github.com/pokt-network/poktroll/x/supplier/types" ) @@ -20,6 +22,8 @@ var _ = strconv.IntSize func TestSupplierQuerySingle(t *testing.T) { supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 2) + supplierAddr := sample.AccAddress() + tests := []struct { desc string request *types.QueryGetSupplierRequest @@ -43,9 +47,9 @@ func TestSupplierQuerySingle(t *testing.T) { { desc: "KeyNotFound", request: &types.QueryGetSupplierRequest{ - OperatorAddress: strconv.Itoa(100000), + OperatorAddress: supplierAddr, }, - expectedErr: status.Error(codes.NotFound, "supplier with address \"100000\""), + expectedErr: status.Error(codes.NotFound, fmt.Sprintf("supplier with address: \"%s\"", supplierAddr)), }, { desc: "InvalidRequest", @@ -70,7 +74,13 @@ func TestSupplierQuerySingle(t *testing.T) { func TestSupplierQueryPaginated(t *testing.T) { supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) - msgs := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5) + suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5) + + // TODO_MAINNET(@olshansk, #1033): Newer version of the CosmosSDK doesn't support maps. + // Decide on a direction w.r.t maps in protos based on feedback from the CosmoSDK team. + for _, supplier := range suppliers { + supplier.ServicesActivationHeightsMap = nil + } request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllSuppliersRequest { return &types.QueryAllSuppliersRequest{ @@ -84,12 +94,12 @@ func TestSupplierQueryPaginated(t *testing.T) { } t.Run("ByOffset", func(t *testing.T) { step := 2 - for i := 0; i < len(msgs); i += step { + for i := 0; i < len(suppliers); i += step { resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(nil, uint64(i), uint64(step), false)) require.NoError(t, err) require.LessOrEqual(t, len(resp.Supplier), step) require.Subset(t, - nullify.Fill(msgs), + nullify.Fill(suppliers), nullify.Fill(resp.Supplier), ) } @@ -97,12 +107,12 @@ func TestSupplierQueryPaginated(t *testing.T) { t.Run("ByKey", func(t *testing.T) { step := 2 var next []byte - for i := 0; i < len(msgs); i += step { + for i := 0; i < len(suppliers); i += step { resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(next, 0, uint64(step), false)) require.NoError(t, err) require.LessOrEqual(t, len(resp.Supplier), step) require.Subset(t, - nullify.Fill(msgs), + nullify.Fill(suppliers), nullify.Fill(resp.Supplier), ) next = resp.Pagination.NextKey @@ -111,9 +121,9 @@ func TestSupplierQueryPaginated(t *testing.T) { t.Run("Total", func(t *testing.T) { resp, err := supplierModuleKeepers.AllSuppliers(ctx, request(nil, 0, 0, true)) require.NoError(t, err) - require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.Equal(t, len(suppliers), int(resp.Pagination.Total)) require.ElementsMatch(t, - nullify.Fill(msgs), + nullify.Fill(suppliers), nullify.Fill(resp.Supplier), ) }) @@ -122,3 +132,39 @@ func TestSupplierQueryPaginated(t *testing.T) { require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) }) } + +func TestSupplierQueryFilterByServiceId(t *testing.T) { + supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) + suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 5) + + // Get the first service ID from the first supplier to use as filter + firstServiceId := suppliers[0].Services[0].ServiceId + + request := &types.QueryAllSuppliersRequest{ + Filter: &types.QueryAllSuppliersRequest_ServiceId{ + ServiceId: firstServiceId, + }, + Pagination: &query.PageRequest{ + Limit: uint64(len(suppliers)), + }, + } + + resp, err := supplierModuleKeepers.AllSuppliers(ctx, request) + require.NoError(t, err) + + // createNSuppliers assigns a separate service to each supplier + // so we can only expect one supplier to have the filtered service. + require.Len(t, resp.Supplier, 1) + + // Verify each returned supplier has the filtered service + for _, supplier := range resp.Supplier { + hasService := false + for _, service := range supplier.Services { + if service.ServiceId == firstServiceId { + hasService = true + break + } + } + require.True(t, hasService, "Supplier should have the filtered service") + } +} diff --git a/x/supplier/keeper/supplier_test.go b/x/supplier/keeper/supplier_test.go index 8cbddd555..cde7952c5 100644 --- a/x/supplier/keeper/supplier_test.go +++ b/x/supplier/keeper/supplier_test.go @@ -8,8 +8,11 @@ import ( "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/gogo/status" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" keepertest "github.com/pokt-network/poktroll/testutil/keeper" @@ -27,13 +30,7 @@ func init() { cmd.InitSDKConfig() } -// The module address is derived off of its semantic name. -// This test is a helper for us to easily identify the underlying address. -func TestModuleAddressSupplier(t *testing.T) { - moduleAddress := authtypes.NewModuleAddress(types.ModuleName) - require.Equal(t, "pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa", moduleAddress.String()) -} - +// createNSuppliers creates n suppliers and stores them in the keeper func createNSuppliers(keeper keeper.Keeper, ctx context.Context, n int) []sharedtypes.Supplier { suppliers := make([]sharedtypes.Supplier, n) for i := range suppliers { @@ -59,7 +56,15 @@ func createNSuppliers(keeper keeper.Keeper, ctx context.Context, n int) []shared return suppliers } -func TestSupplierGet(t *testing.T) { +// DEV_NOTE: The account address is derived off of the module's semantic name (supplier). +// This test is a helper for us to easily identify the underlying address. +// See Module Accounts for more details: https://docs.cosmos.network/main/learn/beginner/accounts#module-accounts +func TestModuleAddressSupplier(t *testing.T) { + moduleAddress := authtypes.NewModuleAddress(types.ModuleName) + require.Equal(t, "pokt1j40dzzmn6cn9kxku7a5tjnud6hv37vesr5ccaa", moduleAddress.String()) +} + +func TestSupplier_Get(t *testing.T) { supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10) for _, supplier := range suppliers { @@ -74,7 +79,7 @@ func TestSupplierGet(t *testing.T) { } } -func TestSupplierRemove(t *testing.T) { +func TestSupplier_Remove(t *testing.T) { supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10) for _, supplier := range suppliers { @@ -86,7 +91,7 @@ func TestSupplierRemove(t *testing.T) { } } -func TestSupplierGetAll(t *testing.T) { +func TestSupplier_GetAll(t *testing.T) { supplierModuleKeepers, ctx := keepertest.SupplierKeeper(t) suppliers := createNSuppliers(*supplierModuleKeepers.Keeper, ctx, 10) require.ElementsMatch(t, @@ -94,3 +99,145 @@ func TestSupplierGetAll(t *testing.T) { nullify.Fill(supplierModuleKeepers.GetAllSuppliers(ctx)), ) } + +func TestSupplier_Query(t *testing.T) { + keeper, ctx := keepertest.SupplierKeeper(t) + suppliers := createNSuppliers(*keeper.Keeper, ctx, 2) + + tests := []struct { + desc string + request *types.QueryGetSupplierRequest + response *types.QueryGetSupplierResponse + expectedErr error + }{ + { + desc: "supplier found", + request: &types.QueryGetSupplierRequest{ + OperatorAddress: suppliers[0].OperatorAddress, + }, + response: &types.QueryGetSupplierResponse{ + Supplier: suppliers[0], + }, + }, + { + desc: "supplier not found", + request: &types.QueryGetSupplierRequest{ + OperatorAddress: "non_existent_address", + }, + expectedErr: status.Error(codes.NotFound, fmt.Sprintf("supplier with address: %q", "non_existent_address")), + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + response, err := keeper.Supplier(ctx, test.request) + if test.expectedErr != nil { + stat, ok := status.FromError(test.expectedErr) + require.True(t, ok) + require.ErrorContains(t, stat.Err(), test.expectedErr.Error()) + } else { + require.NoError(t, err) + require.NotNil(t, response) + require.Equal(t, + nullify.Fill(test.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestSuppliers_QueryAll_Pagination(t *testing.T) { + keeper, ctx := keepertest.SupplierKeeper(t) + suppliers := createNSuppliers(*keeper.Keeper, ctx, 5) + + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(suppliers); i += step { + req := &types.QueryAllSuppliersRequest{ + Pagination: &query.PageRequest{ + Offset: uint64(i), + Limit: uint64(step), + }, + } + resp, err := keeper.AllSuppliers(ctx, req) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Supplier), step) + require.Subset(t, + nullify.Fill(suppliers), + nullify.Fill(resp.Supplier), + ) + } + }) + + t.Run("ByKey", func(t *testing.T) { + step := 2 + var nextKey []byte + for i := 0; i < len(suppliers); i += step { + req := &types.QueryAllSuppliersRequest{ + Pagination: &query.PageRequest{ + Key: nextKey, + Limit: uint64(step), + }, + } + resp, err := keeper.AllSuppliers(ctx, req) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Supplier), step) + require.Subset(t, + nullify.Fill(suppliers), + nullify.Fill(resp.Supplier), + ) + nextKey = resp.Pagination.NextKey + } + }) + + t.Run("Total", func(t *testing.T) { + req := &types.QueryAllSuppliersRequest{ + Pagination: &query.PageRequest{ + Offset: 0, + Limit: uint64(len(suppliers)), + CountTotal: true, + }, + } + resp, err := keeper.AllSuppliers(ctx, req) + require.NoError(t, err) + require.Equal(t, len(suppliers), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(suppliers), + nullify.Fill(resp.Supplier), + ) + }) +} + +func TestSuppliers_QueryAll_Filters(t *testing.T) { + keeper, ctx := keepertest.SupplierKeeper(t) + suppliers := createNSuppliers(*keeper.Keeper, ctx, 5) + + t.Run("Filter By ServiceId", func(t *testing.T) { + // Assuming the first supplier has at least one service + serviceId := suppliers[0].Services[0].ServiceId + req := &types.QueryAllSuppliersRequest{ + Pagination: &query.PageRequest{ + Offset: 0, + Limit: uint64(len(suppliers)), + }, + Filter: &types.QueryAllSuppliersRequest_ServiceId{ + ServiceId: serviceId, + }, + } + resp, err := keeper.AllSuppliers(ctx, req) + require.NoError(t, err) + + // Verify each returned supplier has the specified service + for _, s := range resp.Supplier { + hasService := false + for _, service := range s.Services { + if service.ServiceId == serviceId { + hasService = true + break + } + } + require.True(t, hasService, "Returned supplier does not have the specified service") + } + }) +} diff --git a/x/supplier/module/autocli.go b/x/supplier/module/autocli.go index 716e3774f..a64176dd8 100644 --- a/x/supplier/module/autocli.go +++ b/x/supplier/module/autocli.go @@ -10,30 +10,58 @@ import ( func (am AppModule) AutoCLIOptions() *autocliv1.ModuleOptions { return &autocliv1.ModuleOptions{ Query: &autocliv1.ServiceCommandDescriptor{ - Service: modulev1.Query_ServiceDesc.ServiceName, + Service: modulev1.Query_ServiceDesc.ServiceName, + EnhanceCustomCommand: true, // only required if you want to use the custom command (for backwards compatibility) RpcCommandOptions: []*autocliv1.RpcCommandOptions{ - //{ - // RpcMethod: "Params", - // Use: "params", - // Short: "Shows the parameters of the module", - //}, - //{ - // RpcMethod: "AllSuppliers", - // Use: "list-supplier", - // Short: "List all supplier", - //}, - //{ - // RpcMethod: "Supplier", - // Use: "show-supplier [id]", - // Short: "Shows a supplier", - // PositionalArgs: []*autocliv1.PositionalArgDescriptor{{ProtoField: "index"}}, - //}, + // { + // RpcMethod: "Params", + // Use: "params", + // Short: "Shows the parameters of the module", + // }, + { + Alias: []string{"suppliers", "ls"}, + RpcMethod: "AllSuppliers", + Use: "list-suppliers", + Short: "List all suppliers on Pocket Network", + Long: `Retrieves a paginated list of all suppliers currently registered on Pocket Network, including all their details. + +The command supports optional filtering by service ID and pagination parameters. +Returns supplier addresses, staked amounts, service details, and current status.`, + + Example: ` poktrolld query supplier list-suppliers + poktrolld query supplier list-suppliers --service-id anvil + poktrolld query supplier list-suppliers --page 2 --limit 50 + poktrolld query supplier list-suppliers --service-id anvil --page 1 --limit 100`, + FlagOptions: map[string]*autocliv1.FlagOptions{ + "service_id": {Name: "service-id", Shorthand: "s", Usage: "service id to filter by", Hidden: false}, + }, + }, + { + Alias: []string{"supplier", "s"}, + RpcMethod: "Supplier", + Use: "show-supplier [operator_address]", + Short: "Shows detailed information about a specific supplier", + Long: `Retrieves comprehensive information about a supplier identified by their address. + +Returns details include things like: +- Supplier's staked amount and status +- List of services they provide`, + + Example: ` poktrolld query supplier show-supplier pokt1abc...xyz + poktrolld query supplier show-supplier pokt1abc...xyz --output json + poktrolld query supplier show-supplier pokt1abc...xyz --height 100`, + PositionalArgs: []*autocliv1.PositionalArgDescriptor{ + { + ProtoField: "operator_address", + }, + }, + }, // this line is used by ignite scaffolding # autocli/query }, }, Tx: &autocliv1.ServiceCommandDescriptor{ Service: modulev1.Msg_ServiceDesc.ServiceName, - EnhanceCustomCommand: true, // only required if you want to use the custom command + EnhanceCustomCommand: true, // only required if you want to use the custom command (for backwards compatibility) RpcCommandOptions: []*autocliv1.RpcCommandOptions{ //{ // RpcMethod: "UpdateParams", diff --git a/x/supplier/module/flags.go b/x/supplier/module/flags.go new file mode 100644 index 000000000..e4b1cbd9e --- /dev/null +++ b/x/supplier/module/flags.go @@ -0,0 +1,5 @@ +package supplier + +const ( + FlagServiceId = "service-id" +) diff --git a/x/supplier/module/query.go b/x/supplier/module/query.go index b49ebf142..bb81daf33 100644 --- a/x/supplier/module/query.go +++ b/x/supplier/module/query.go @@ -22,8 +22,6 @@ func (am AppModule) GetQueryCmd() *cobra.Command { } cmd.AddCommand(CmdQueryParams()) - cmd.AddCommand(CmdListSuppliers()) - cmd.AddCommand(CmdShowSupplier()) // this line is used by starport scaffolding # 1 return cmd diff --git a/x/supplier/module/query_supplier.go b/x/supplier/module/query_supplier.go deleted file mode 100644 index 81a18454e..000000000 --- a/x/supplier/module/query_supplier.go +++ /dev/null @@ -1,78 +0,0 @@ -package supplier - -import ( - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/spf13/cobra" - - "github.com/pokt-network/poktroll/x/supplier/types" -) - -func CmdListSuppliers() *cobra.Command { - cmd := &cobra.Command{ - Use: "list-supplier", - Short: "list all supplier", - RunE: func(cmd *cobra.Command, args []string) error { - clientCtx, err := client.GetClientQueryContext(cmd) - if err != nil { - return err - } - - pageReq, err := client.ReadPageRequest(cmd.Flags()) - if err != nil { - return err - } - - queryClient := types.NewQueryClient(clientCtx) - - params := &types.QueryAllSuppliersRequest{ - Pagination: pageReq, - } - - res, err := queryClient.AllSuppliers(cmd.Context(), params) - if err != nil { - return err - } - - return clientCtx.PrintProto(res) - }, - } - - flags.AddPaginationFlagsToCmd(cmd, cmd.Use) - flags.AddQueryFlagsToCmd(cmd) - - return cmd -} - -func CmdShowSupplier() *cobra.Command { - cmd := &cobra.Command{ - Use: "show-supplier ", - Short: "shows a supplier", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) (err error) { - clientCtx, err := client.GetClientQueryContext(cmd) - if err != nil { - return err - } - - queryClient := types.NewQueryClient(clientCtx) - - argAddress := args[0] - - params := &types.QueryGetSupplierRequest{ - OperatorAddress: argAddress, - } - - res, err := queryClient.Supplier(cmd.Context(), params) - if err != nil { - return err - } - - return clientCtx.PrintProto(res) - }, - } - - flags.AddQueryFlagsToCmd(cmd) - - return cmd -} diff --git a/x/supplier/module/query_supplier_test.go b/x/supplier/module/query_supplier_test.go deleted file mode 100644 index 78c529d90..000000000 --- a/x/supplier/module/query_supplier_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package supplier_test - -import ( - "fmt" - "strconv" - "testing" - - cometcli "github.com/cometbft/cometbft/libs/cli" - "github.com/cosmos/cosmos-sdk/client/flags" - clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/pokt-network/poktroll/testutil/nullify" - sharedtypes "github.com/pokt-network/poktroll/x/shared/types" - supplier "github.com/pokt-network/poktroll/x/supplier/module" - "github.com/pokt-network/poktroll/x/supplier/types" -) - -func TestShowSupplier(t *testing.T) { - net, suppliers := networkWithSupplierObjects(t, 2) - - ctx := net.Validators[0].ClientCtx - common := []string{ - fmt.Sprintf("--%s=json", cometcli.OutputFlag), - } - tests := []struct { - desc string - idAddress string - - args []string - expectedErr error - supplier sharedtypes.Supplier - }{ - { - desc: "supplier found", - idAddress: suppliers[0].OperatorAddress, - - args: common, - supplier: suppliers[0], - }, - { - desc: "supplier not found", - idAddress: strconv.Itoa(100000), - - args: common, - expectedErr: status.Error(codes.NotFound, "not found"), - }, - } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - args := []string{ - test.idAddress, - } - args = append(args, test.args...) - out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdShowSupplier(), args) - if test.expectedErr != nil { - stat, ok := status.FromError(test.expectedErr) - require.True(t, ok) - require.ErrorIs(t, stat.Err(), test.expectedErr) - } else { - require.NoError(t, err) - var resp types.QueryGetSupplierResponse - require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) - require.NotNil(t, resp.Supplier) - require.Equal(t, - nullify.Fill(&test.supplier), - nullify.Fill(&resp.Supplier), - ) - } - }) - } -} - -func TestListSuppliers(t *testing.T) { - net, suppliers := networkWithSupplierObjects(t, 5) - - ctx := net.Validators[0].ClientCtx - request := func(next []byte, offset, limit uint64, total bool) []string { - args := []string{ - fmt.Sprintf("--%s=json", cometcli.OutputFlag), - } - if next == nil { - args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) - } else { - args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) - } - args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) - if total { - args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) - } - return args - } - t.Run("ByOffset", func(t *testing.T) { - step := 2 - for i := 0; i < len(suppliers); i += step { - args := request(nil, uint64(i), uint64(step), false) - out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args) - require.NoError(t, err) - var resp types.QueryAllSuppliersResponse - require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) - require.LessOrEqual(t, len(resp.Supplier), step) - require.Subset(t, - nullify.Fill(suppliers), - nullify.Fill(resp.Supplier), - ) - } - }) - t.Run("ByKey", func(t *testing.T) { - step := 2 - var next []byte - for i := 0; i < len(suppliers); i += step { - args := request(next, 0, uint64(step), false) - out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args) - require.NoError(t, err) - var resp types.QueryAllSuppliersResponse - require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) - require.LessOrEqual(t, len(resp.Supplier), step) - require.Subset(t, - nullify.Fill(suppliers), - nullify.Fill(resp.Supplier), - ) - next = resp.Pagination.NextKey - } - }) - t.Run("Total", func(t *testing.T) { - args := request(nil, 0, uint64(len(suppliers)), true) - out, err := clitestutil.ExecTestCLICmd(ctx, supplier.CmdListSuppliers(), args) - require.NoError(t, err) - var resp types.QueryAllSuppliersResponse - require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) - require.NoError(t, err) - require.Equal(t, len(suppliers), int(resp.Pagination.Total)) - require.ElementsMatch(t, - nullify.Fill(suppliers), - nullify.Fill(resp.Supplier), - ) - }) -} diff --git a/x/supplier/types/errors.go b/x/supplier/types/errors.go index c6188aade..d3a615a3b 100644 --- a/x/supplier/types/errors.go +++ b/x/supplier/types/errors.go @@ -15,4 +15,5 @@ var ( ErrSupplierServiceNotFound = sdkerrors.Register(ModuleName, 1106, "service not found") ErrSupplierParamInvalid = sdkerrors.Register(ModuleName, 1107, "the provided param is invalid") ErrSupplierEmitEvent = sdkerrors.Register(ModuleName, 1108, "failed to emit event") + ErrSupplierInvalidServiceId = sdkerrors.Register(ModuleName, 1109, "invalid service ID") ) diff --git a/x/supplier/types/query.pb.go b/x/supplier/types/query.pb.go index fc4da1c09..739fb0ef7 100644 --- a/x/supplier/types/query.pb.go +++ b/x/supplier/types/query.pb.go @@ -191,6 +191,9 @@ func (m *QueryGetSupplierResponse) GetSupplier() types.Supplier { type QueryAllSuppliersRequest struct { Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // Types that are valid to be assigned to Filter: + // *QueryAllSuppliersRequest_ServiceId + Filter isQueryAllSuppliersRequest_Filter `protobuf_oneof:"filter"` } func (m *QueryAllSuppliersRequest) Reset() { *m = QueryAllSuppliersRequest{} } @@ -222,6 +225,25 @@ func (m *QueryAllSuppliersRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryAllSuppliersRequest proto.InternalMessageInfo +type isQueryAllSuppliersRequest_Filter interface { + isQueryAllSuppliersRequest_Filter() + MarshalTo([]byte) (int, error) + Size() int +} + +type QueryAllSuppliersRequest_ServiceId struct { + ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3,oneof" json:"service_id,omitempty"` +} + +func (*QueryAllSuppliersRequest_ServiceId) isQueryAllSuppliersRequest_Filter() {} + +func (m *QueryAllSuppliersRequest) GetFilter() isQueryAllSuppliersRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + func (m *QueryAllSuppliersRequest) GetPagination() *query.PageRequest { if m != nil { return m.Pagination @@ -229,6 +251,20 @@ func (m *QueryAllSuppliersRequest) GetPagination() *query.PageRequest { return nil } +func (m *QueryAllSuppliersRequest) GetServiceId() string { + if x, ok := m.GetFilter().(*QueryAllSuppliersRequest_ServiceId); ok { + return x.ServiceId + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*QueryAllSuppliersRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*QueryAllSuppliersRequest_ServiceId)(nil), + } +} + type QueryAllSuppliersResponse struct { Supplier []types.Supplier `protobuf:"bytes,1,rep,name=supplier,proto3" json:"supplier"` Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` @@ -289,43 +325,45 @@ func init() { func init() { proto.RegisterFile("poktroll/supplier/query.proto", fileDescriptor_7a8c18c53656bd0d) } var fileDescriptor_7a8c18c53656bd0d = []byte{ - // 567 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x31, 0x6f, 0x13, 0x31, - 0x14, 0xc7, 0xe3, 0x16, 0xa2, 0xd6, 0x20, 0x41, 0x4d, 0x24, 0x92, 0x08, 0x0e, 0x74, 0x12, 0x21, - 0x0a, 0xd4, 0x26, 0x65, 0x2c, 0x0c, 0x4d, 0x25, 0x3a, 0x52, 0x92, 0x01, 0x89, 0x81, 0xca, 0x49, - 0xac, 0xeb, 0xa9, 0x97, 0xb3, 0x6b, 0x3b, 0x40, 0x85, 0x58, 0x58, 0x58, 0x91, 0x18, 0x99, 0xd8, - 0x3a, 0x32, 0xf0, 0x21, 0x3a, 0x56, 0xb0, 0x54, 0x0c, 0x08, 0x25, 0x48, 0x7c, 0x0d, 0x14, 0xdb, - 0x97, 0xa6, 0xdc, 0x45, 0x49, 0x97, 0xc8, 0xe7, 0xf7, 0xff, 0xbf, 0xf7, 0xf3, 0x7b, 0x4f, 0x81, - 0x37, 0x05, 0xdf, 0xd3, 0x92, 0x47, 0x11, 0x51, 0x7d, 0x21, 0xa2, 0x90, 0x49, 0xb2, 0xdf, 0x67, - 0xf2, 0x00, 0x0b, 0xc9, 0x35, 0x47, 0x2b, 0x49, 0x18, 0x27, 0xe1, 0xf2, 0x0a, 0xed, 0x85, 0x31, - 0x27, 0xe6, 0xd7, 0xaa, 0xca, 0x85, 0x80, 0x07, 0xdc, 0x1c, 0xc9, 0xe8, 0xe4, 0x6e, 0x6f, 0x04, - 0x9c, 0x07, 0x11, 0x23, 0x54, 0x84, 0x84, 0xc6, 0x31, 0xd7, 0x54, 0x87, 0x3c, 0x56, 0x2e, 0x5a, - 0xea, 0x70, 0xd5, 0xe3, 0x6a, 0xc7, 0xda, 0xec, 0x87, 0x0b, 0xd5, 0xec, 0x17, 0x69, 0x53, 0xc5, - 0x2c, 0x0d, 0x79, 0x55, 0x6f, 0x33, 0x4d, 0xeb, 0x44, 0xd0, 0x20, 0x8c, 0x4d, 0x1e, 0xa7, 0xf5, - 0x26, 0xb5, 0x89, 0xaa, 0xc3, 0xc3, 0x71, 0x3c, 0xfd, 0x3e, 0x41, 0x25, 0xed, 0xa9, 0x74, 0x7c, - 0x97, 0x4a, 0xd6, 0x1d, 0xcb, 0x6c, 0xdc, 0x2f, 0x40, 0xf4, 0x6c, 0x44, 0xb0, 0x6d, 0x4c, 0x4d, - 0xb6, 0xdf, 0x67, 0x4a, 0xfb, 0x2d, 0x78, 0xed, 0xcc, 0xad, 0x12, 0x3c, 0x56, 0x0c, 0x3d, 0x82, - 0x79, 0x9b, 0xbc, 0x08, 0x6e, 0x83, 0xea, 0xa5, 0xb5, 0x12, 0x4e, 0xb5, 0x0f, 0x5b, 0x4b, 0x63, - 0xf9, 0xe8, 0xd7, 0xad, 0xdc, 0xe1, 0xdf, 0xaf, 0x35, 0xd0, 0x74, 0x1e, 0xff, 0x25, 0xbc, 0x6e, - 0x92, 0x6e, 0x31, 0xdd, 0x72, 0x6a, 0x57, 0x0f, 0x6d, 0xc2, 0xab, 0x5c, 0x30, 0x49, 0x35, 0x97, - 0x3b, 0xb4, 0xdb, 0x95, 0x4c, 0xd9, 0x12, 0xcb, 0x8d, 0xe2, 0xf7, 0x6f, 0xab, 0x05, 0xd7, 0xbd, - 0x0d, 0x1b, 0x69, 0x69, 0x19, 0xc6, 0x41, 0xf3, 0x4a, 0xe2, 0x70, 0xd7, 0xfe, 0x73, 0x58, 0x4c, - 0xe7, 0x77, 0xe4, 0xeb, 0x70, 0x29, 0x21, 0xcc, 0x60, 0x37, 0x9d, 0xc1, 0x89, 0xa9, 0x71, 0x61, - 0xc4, 0xde, 0x1c, 0x1b, 0xfc, 0xb6, 0x4b, 0xbc, 0x11, 0x45, 0x89, 0x26, 0xe9, 0x14, 0x7a, 0x02, - 0xe1, 0xe9, 0xcc, 0x5c, 0xea, 0x0a, 0x76, 0xc0, 0xa3, 0xa1, 0x61, 0xbb, 0x6e, 0x6e, 0x74, 0x78, - 0x9b, 0x06, 0xcc, 0x79, 0x9b, 0x13, 0x4e, 0xff, 0x0b, 0x80, 0xa5, 0x8c, 0x22, 0x99, 0xf8, 0x8b, - 0xe7, 0xc2, 0x47, 0x5b, 0x67, 0x10, 0x17, 0x0c, 0xe2, 0xdd, 0x99, 0x88, 0xb6, 0xf2, 0x24, 0xe3, - 0xda, 0xcf, 0x45, 0x78, 0xd1, 0x30, 0xa2, 0x0f, 0x00, 0xe6, 0xed, 0xa0, 0xd1, 0x9d, 0x8c, 0x1d, - 0x48, 0x6f, 0x54, 0xb9, 0x32, 0x4b, 0x66, 0xeb, 0xf9, 0xf8, 0xfd, 0x8f, 0x3f, 0x9f, 0x16, 0xaa, - 0xa8, 0x42, 0x46, 0xfa, 0xd5, 0x98, 0xe9, 0xd7, 0x5c, 0xee, 0x91, 0x69, 0x5b, 0x8e, 0x0e, 0x01, - 0x5c, 0x4a, 0x5e, 0x8e, 0x6a, 0xd3, 0x8a, 0xa4, 0x57, 0xae, 0x7c, 0x6f, 0x2e, 0xad, 0xa3, 0xda, - 0x34, 0x54, 0x8f, 0xd1, 0xfa, 0x2c, 0xaa, 0xf1, 0xe1, 0xed, 0xff, 0xfb, 0xfc, 0x0e, 0x7d, 0x06, - 0xf0, 0xf2, 0xe4, 0x74, 0xd1, 0x54, 0x84, 0x8c, 0x45, 0x2b, 0xdf, 0x9f, 0x4f, 0xec, 0x80, 0x1f, - 0x18, 0xe0, 0x1a, 0xaa, 0xce, 0x0b, 0xdc, 0x78, 0x7a, 0x34, 0xf0, 0xc0, 0xf1, 0xc0, 0x03, 0x27, - 0x03, 0x0f, 0xfc, 0x1e, 0x78, 0xe0, 0xe3, 0xd0, 0xcb, 0x1d, 0x0f, 0xbd, 0xdc, 0xc9, 0xd0, 0xcb, - 0xbd, 0xa8, 0x07, 0xa1, 0xde, 0xed, 0xb7, 0x71, 0x87, 0xf7, 0xa6, 0x64, 0x7c, 0x73, 0x9a, 0x53, - 0x1f, 0x08, 0xa6, 0xda, 0x79, 0xf3, 0x07, 0xf3, 0xf0, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, - 0x67, 0x7e, 0xca, 0x82, 0x05, 0x00, 0x00, + // 602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xb1, 0x6f, 0x13, 0x3f, + 0x18, 0x8d, 0xd3, 0xdf, 0x2f, 0x4a, 0x0c, 0x12, 0xd4, 0x44, 0x22, 0x89, 0xe0, 0x8a, 0x4e, 0x22, + 0x44, 0x81, 0xde, 0x91, 0x32, 0x16, 0x86, 0xa6, 0x12, 0x85, 0x89, 0x72, 0x19, 0x90, 0x18, 0x88, + 0x9c, 0xc4, 0x5c, 0xad, 0x5e, 0xce, 0x57, 0xdb, 0x29, 0x54, 0x88, 0x85, 0x85, 0x09, 0x09, 0x89, + 0x91, 0x89, 0xad, 0x23, 0x03, 0x7f, 0x44, 0xc7, 0x0a, 0x96, 0x8a, 0x01, 0xa1, 0x04, 0x89, 0x7f, + 0x03, 0x9d, 0xed, 0x4b, 0x53, 0x2e, 0x51, 0xc2, 0x12, 0xd9, 0xfe, 0xde, 0x7b, 0xdf, 0x7b, 0xf6, + 0x97, 0x83, 0x57, 0x23, 0xb6, 0x2b, 0x39, 0x0b, 0x02, 0x57, 0x0c, 0xa2, 0x28, 0xa0, 0x84, 0xbb, + 0x7b, 0x03, 0xc2, 0x0f, 0x9c, 0x88, 0x33, 0xc9, 0xd0, 0x72, 0x52, 0x76, 0x92, 0x72, 0x65, 0x19, + 0xf7, 0x69, 0xc8, 0x5c, 0xf5, 0xab, 0x51, 0x95, 0xa2, 0xcf, 0x7c, 0xa6, 0x96, 0x6e, 0xbc, 0x32, + 0xa7, 0x57, 0x7c, 0xc6, 0xfc, 0x80, 0xb8, 0x38, 0xa2, 0x2e, 0x0e, 0x43, 0x26, 0xb1, 0xa4, 0x2c, + 0x14, 0xa6, 0x5a, 0xee, 0x32, 0xd1, 0x67, 0xa2, 0xad, 0x69, 0x7a, 0x63, 0x4a, 0x75, 0xbd, 0x73, + 0x3b, 0x58, 0x10, 0xed, 0xc6, 0xdd, 0x6f, 0x74, 0x88, 0xc4, 0x0d, 0x37, 0xc2, 0x3e, 0x0d, 0x95, + 0x8e, 0xc1, 0x5a, 0x93, 0xd8, 0x04, 0xd5, 0x65, 0x74, 0x5c, 0x4f, 0xe7, 0x8b, 0x30, 0xc7, 0x7d, + 0x91, 0xae, 0xef, 0x60, 0x4e, 0x7a, 0x63, 0x98, 0xae, 0xdb, 0x45, 0x88, 0x1e, 0xc7, 0x0e, 0xb6, + 0x15, 0xc9, 0x23, 0x7b, 0x03, 0x22, 0xa4, 0xdd, 0x82, 0x97, 0xce, 0x9c, 0x8a, 0x88, 0x85, 0x82, + 0xa0, 0xbb, 0x30, 0xa7, 0xc5, 0x4b, 0xe0, 0x1a, 0xa8, 0x9d, 0x5b, 0x2b, 0x3b, 0xa9, 0xeb, 0x73, + 0x34, 0xa5, 0x59, 0x38, 0xfa, 0xb1, 0x92, 0x39, 0xfc, 0xfd, 0xb9, 0x0e, 0x3c, 0xc3, 0xb1, 0x9f, + 0xc1, 0xcb, 0x4a, 0x74, 0x8b, 0xc8, 0x96, 0x41, 0x9b, 0x7e, 0x68, 0x13, 0x5e, 0x64, 0x11, 0xe1, + 0x58, 0x32, 0xde, 0xc6, 0xbd, 0x1e, 0x27, 0x42, 0xb7, 0x28, 0x34, 0x4b, 0x5f, 0xbf, 0xac, 0x16, + 0xcd, 0xed, 0x6d, 0xe8, 0x4a, 0x4b, 0x72, 0x1a, 0xfa, 0xde, 0x85, 0x84, 0x61, 0x8e, 0xed, 0x27, + 0xb0, 0x94, 0xd6, 0x37, 0xce, 0xd7, 0x61, 0x3e, 0x71, 0x38, 0xc5, 0xbb, 0xba, 0x19, 0x27, 0x21, + 0x35, 0xff, 0x8b, 0xbd, 0x7b, 0x63, 0x82, 0xfd, 0x0e, 0x18, 0xe5, 0x8d, 0x20, 0x48, 0x40, 0xc9, + 0x55, 0xa1, 0xfb, 0x10, 0x9e, 0x3e, 0x9a, 0xd1, 0xae, 0x3a, 0xc6, 0x71, 0xfc, 0x6a, 0x8e, 0x9e, + 0x37, 0xf3, 0x76, 0xce, 0x36, 0xf6, 0x89, 0xe1, 0x7a, 0x13, 0x4c, 0xb4, 0x02, 0xa1, 0x20, 0x7c, + 0x9f, 0x76, 0x49, 0x9b, 0xf6, 0x4a, 0xd9, 0x38, 0xfc, 0x83, 0x8c, 0x57, 0x30, 0x67, 0x0f, 0x7b, + 0xcd, 0x3c, 0xcc, 0x3d, 0xa7, 0x81, 0x24, 0xdc, 0xfe, 0x04, 0x60, 0x79, 0x8a, 0x9f, 0xa9, 0x51, + 0x97, 0xfe, 0x29, 0x2a, 0xda, 0x3a, 0x93, 0x26, 0xab, 0xd2, 0xdc, 0x98, 0x9b, 0x46, 0x77, 0x9e, + 0x8c, 0xb3, 0xf6, 0x7d, 0x09, 0xfe, 0xaf, 0x3c, 0xa2, 0xb7, 0x00, 0xe6, 0xf4, 0x50, 0xa0, 0xeb, + 0x53, 0xe6, 0x25, 0x3d, 0x7d, 0x95, 0xea, 0x3c, 0x98, 0xee, 0x67, 0x3b, 0x6f, 0xbe, 0xfd, 0xfa, + 0x90, 0xad, 0xa1, 0xaa, 0x1b, 0xe3, 0x57, 0x43, 0x22, 0x5f, 0x30, 0xbe, 0xeb, 0xce, 0xfa, 0x47, + 0xa0, 0x43, 0x00, 0xf3, 0x49, 0x72, 0x54, 0x9f, 0xd5, 0x24, 0x3d, 0x9e, 0x95, 0x9b, 0x0b, 0x61, + 0x8d, 0xab, 0x4d, 0xe5, 0xea, 0x1e, 0x5a, 0x9f, 0xe7, 0x6a, 0xbc, 0x78, 0xf5, 0xf7, 0xec, 0xbf, + 0x46, 0x1f, 0x01, 0x3c, 0x3f, 0xf9, 0xba, 0x68, 0xa6, 0x85, 0x29, 0x33, 0x59, 0xb9, 0xb5, 0x18, + 0xd8, 0x18, 0xbe, 0xad, 0x0c, 0xd7, 0x51, 0x6d, 0x51, 0xc3, 0xcd, 0x47, 0x47, 0x43, 0x0b, 0x1c, + 0x0f, 0x2d, 0x70, 0x32, 0xb4, 0xc0, 0xcf, 0xa1, 0x05, 0xde, 0x8f, 0xac, 0xcc, 0xf1, 0xc8, 0xca, + 0x9c, 0x8c, 0xac, 0xcc, 0xd3, 0x86, 0x4f, 0xe5, 0xce, 0xa0, 0xe3, 0x74, 0x59, 0x7f, 0x86, 0xe2, + 0xcb, 0x53, 0x4d, 0x79, 0x10, 0x11, 0xd1, 0xc9, 0xa9, 0x8f, 0xd1, 0x9d, 0x3f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xc0, 0xc9, 0xa7, 0x97, 0xae, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -623,6 +661,15 @@ func (m *QueryAllSuppliersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if m.Filter != nil { + { + size := m.Filter.Size() + i -= size + if _, err := m.Filter.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } if m.Pagination != nil { { size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) @@ -638,6 +685,20 @@ func (m *QueryAllSuppliersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *QueryAllSuppliersRequest_ServiceId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllSuppliersRequest_ServiceId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ServiceId) + copy(dAtA[i:], m.ServiceId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ServiceId))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} func (m *QueryAllSuppliersResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -752,9 +813,22 @@ func (m *QueryAllSuppliersRequest) Size() (n int) { l = m.Pagination.Size() n += 1 + l + sovQuery(uint64(l)) } + if m.Filter != nil { + n += m.Filter.Size() + } return n } +func (m *QueryAllSuppliersRequest_ServiceId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceId) + n += 1 + l + sovQuery(uint64(l)) + return n +} func (m *QueryAllSuppliersResponse) Size() (n int) { if m == nil { return 0 @@ -1143,6 +1217,38 @@ func (m *QueryAllSuppliersRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = &QueryAllSuppliersRequest_ServiceId{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/x/supplier/types/query_validation.go b/x/supplier/types/query_validation.go new file mode 100644 index 000000000..ccab23065 --- /dev/null +++ b/x/supplier/types/query_validation.go @@ -0,0 +1,44 @@ +package types + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/pokt-network/poktroll/pkg/polylog" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" +) + +// NOTE: Please note that these messages are not of type `sdk.Msg`, and are therefore not a message/request +// that will be signable or invoke a state transition. However, following a similar `ValidateBasic` pattern +// allows us to localize & reuse validation logic. + +// ValidateBasic performs basic (non-state-dependant) validation on a QueryGetSupplierRequest. +func (query *QueryGetSupplierRequest) ValidateBasic() error { + // Validate the supplier operator address + if _, err := sdk.AccAddressFromBech32(query.OperatorAddress); err != nil { + return ErrSupplierInvalidAddress.Wrapf("invalid supplier operator address %s; (%v)", query.OperatorAddress, err) + } + + return nil +} + +// ValidateBasic performs basic (non-state-dependant) validation on a QueryAllSuppliersRequest. +func (query *QueryAllSuppliersRequest) ValidateBasic() error { + // TODO_TECHDEBT: update function signature to receive a context. + logger := polylog.Ctx(context.TODO()) + + switch filter := query.Filter.(type) { + case *QueryAllSuppliersRequest_ServiceId: + // If the service ID is set, check if it's valid + if filter.ServiceId != "" && !sharedtypes.IsValidServiceId(filter.ServiceId) { + return ErrSupplierInvalidServiceId.Wrap("invalid empty service ID for suppliers being retrieved") + } + + default: + // No filter is set + logger.Info().Msg("No specific filter set when listing suppliers") + } + + return nil +} diff --git a/x/tokenomics/keeper/token_logic_modules_test.go b/x/tokenomics/keeper/token_logic_modules_test.go index b70d32608..eae0d4628 100644 --- a/x/tokenomics/keeper/token_logic_modules_test.go +++ b/x/tokenomics/keeper/token_logic_modules_test.go @@ -45,7 +45,7 @@ func TestProcessTokenLogicModules_TLMBurnEqualsMint_Valid(t *testing.T) { // Test Parameters appInitialStake := apptypes.DefaultMinStake.Amount.Mul(cosmosmath.NewInt(2)) supplierInitialStake := cosmosmath.NewInt(1000000) - supplierRevShareRatios := []float32{12.5, 37.5, 50} + supplierRevShareRatios := []uint64{12, 38, 50} globalComputeUnitsToTokensMultiplier := uint64(1) serviceComputeUnitsPerRelay := uint64(1) service := prepareTestService(serviceComputeUnitsPerRelay) @@ -185,7 +185,7 @@ func TestProcessTokenLogicModules_TLMBurnEqualsMint_Valid_SupplierExceedsMaxClai service := prepareTestService(serviceComputeUnitsPerRelay) numRelays := uint64(1000) // By a single supplier for application in this session supplierInitialStake := cosmosmath.NewInt(1000000) - supplierRevShareRatios := []float32{12.5, 37.5, 50} + supplierRevShareRatios := []uint64{12, 38, 50} // Prepare the keepers keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, @@ -338,7 +338,7 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t // Test Parameters appInitialStake := apptypes.DefaultMinStake.Amount.Mul(cosmosmath.NewInt(2)) supplierInitialStake := cosmosmath.NewInt(1000000) - supplierRevShareRatios := []float32{12.5, 37.5, 50} + supplierRevShareRatios := []uint64{12, 38, 50} globalComputeUnitsToTokensMultiplier := uint64(1) serviceComputeUnitsPerRelay := uint64(1) service := prepareTestService(serviceComputeUnitsPerRelay) @@ -447,7 +447,7 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t propMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Proposer)) serviceOwnerMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.SourceOwner)) appMint := cosmosmath.NewInt(int64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Application)) - supplierMint := float32(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Supplier) + supplierMint := float64(numTokensMinted * tokenomicsParams.MintAllocationPercentages.Supplier) // Ensure the balance was increased to the appropriate amount. require.Equal(t, daoBalanceBefore.Amount.Add(daoMint).Add(numTokensMintedInt), daoBalanceAfter.Amount) @@ -458,8 +458,8 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t addr := revShare.Address balanceBefore := supplierShareholderBalancesBefore[addr] balanceAfter := supplierShareholderBalancesAfter[addr].Amount.Int64() - mintShare := int64(supplierMint * revShare.RevSharePercentage / 100) - rewardShare := int64(float32(numTokensClaimed) * revShare.RevSharePercentage / 100) + mintShare := int64(supplierMint * float64(revShare.RevSharePercentage) / 100.0) + rewardShare := int64(float64(numTokensClaimed) * float64(revShare.RevSharePercentage) / 100.0) balanceIncrease := cosmosmath.NewInt(mintShare + rewardShare) expectedBalanceAfter := balanceBefore.Amount.Add(balanceIncrease).Int64() // TODO_MAINNET(@red-0ne): Remove the InDelta check and use the exact amount once the floating point arithmetic is fixed diff --git a/x/tokenomics/token_logic_module/distribution.go b/x/tokenomics/token_logic_module/distribution.go index bdd2b5c38..f7071562d 100644 --- a/x/tokenomics/token_logic_module/distribution.go +++ b/x/tokenomics/token_logic_module/distribution.go @@ -94,7 +94,7 @@ func GetShareAmountMap( shareAmountMap = make(map[string]uint64, len(serviceRevShare)) for _, revShare := range serviceRevShare { // TODO_MAINNET(@red-0ne): Use big.Rat for deterministic results. - sharePercentageFloat := big.NewFloat(float64(revShare.RevSharePercentage) / 100) + sharePercentageFloat := big.NewFloat(float64(revShare.RevSharePercentage) / float64(100.0)) amountToDistributeFloat := big.NewFloat(float64(amountToDistribute)) shareAmount, _ := big.NewFloat(0).Mul(amountToDistributeFloat, sharePercentageFloat).Uint64() shareAmountMap[revShare.Address] = shareAmount From d87e1b307cb882a48c94116cee62529b1cf56eab Mon Sep 17 00:00:00 2001 From: Dmitry K Date: Tue, 21 Jan 2025 17:06:29 -0800 Subject: [PATCH 02/24] Empty commit From ac41632478beefe5c4df832b7249cd97d360fd8a Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Thu, 23 Jan 2025 04:26:01 +0100 Subject: [PATCH 03/24] wip: queriers caching --- .env.dev | 2 +- Tiltfile | 2 +- create-accounts.sh | 41 +++++++++++ delegate_apps.sh | 37 ++++++++++ .../develop/developer_guide/quickstart.md | 4 +- .../docker_compose_debian_cheatsheet.md | 4 +- .../quickstart/docker_compose_walkthrough.md | 4 +- .../operate/quickstart/gateway_cheatsheet.md | 4 +- e2e/tests/init_test.go | 2 +- fund_apps.sh | 59 +++++++++++++++ load-testing/loadtest_manifest_localnet.yaml | 6 +- ...est_manifest_localnet_single_supplier.yaml | 22 +++--- .../tests/relays_stress_helpers_test.go | 39 +++++----- .../relays_stress_single_supplier.feature | 4 +- load-testing/tests/relays_stress_test.go | 12 ++-- .../config/application_stake_config.yaml | 3 + localnet/poktrolld/config/config.toml | 4 +- makefiles/relay.mk | 2 +- pkg/client/query/accquerier.go | 17 ++++- pkg/client/query/appquerier.go | 38 +++++++++- pkg/client/query/bankquerier.go | 28 +++++++- pkg/client/query/proofquerier.go | 30 +++++++- pkg/client/query/servicequerier.go | 38 +++++++++- pkg/client/query/sessionquerier.go | 42 ++++++++++- pkg/client/query/sharedquerier.go | 71 ++++++++++++++++--- pkg/client/query/supplierquerier.go | 29 +++++++- pkg/deps/config/suppliers.go | 32 ++++----- pkg/relayer/cmd/cmd.go | 2 +- stake_apps.sh | 71 +++++++++++++++++++ store_addresses.sh | 52 ++++++++++++++ testutil/integration/suites/application.go | 2 +- .../proof_proof_requirement_threshold.json | 2 +- 32 files changed, 613 insertions(+), 92 deletions(-) create mode 100755 create-accounts.sh create mode 100755 delegate_apps.sh create mode 100755 fund_apps.sh create mode 100644 localnet/poktrolld/config/application_stake_config.yaml create mode 100755 stake_apps.sh create mode 100755 store_addresses.sh diff --git a/.env.dev b/.env.dev index f48e6b915..7784b0994 100644 --- a/.env.dev +++ b/.env.dev @@ -7,7 +7,7 @@ POKTROLLD_HOME=./localnet/poktrolld POCKET_NODE=tcp://127.0.0.1:26657 # TestNet RPC endpoint for validator maintained by Grove. Needs to be updated if there's another "primary" testnet. TESTNET_RPC=https://testnet-validated-validator-rpc.poktroll.com/ -PATH_URL=http://localhost:3000 +PATH_URL=http://localhost:3069 POCKET_ADDR_PREFIX=pokt CHAIN_ID=poktroll diff --git a/Tiltfile b/Tiltfile index 7a9af482d..f5366e943 100644 --- a/Tiltfile +++ b/Tiltfile @@ -365,7 +365,7 @@ for x in range(localnet_config["path_gateways"]["count"]): port_forwards=[ # See PATH for the default port used by the gateway. As of PR #1026, it is :3069. # https://github.com/buildwithgrove/path/blob/main/config/router.go - str(2999 + actor_number) + ":3069" + str(3068 + actor_number) + ":3069" ], ) diff --git a/create-accounts.sh b/create-accounts.sh new file mode 100755 index 000000000..60d611bd6 --- /dev/null +++ b/create-accounts.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +TOTAL_ACCOUNTS=50000 +PARALLEL_JOBS=8 +ACCOUNTS_PER_JOB=$((TOTAL_ACCOUNTS / PARALLEL_JOBS)) + +create_accounts() { + local start=$1 + local end=$2 + local job_id=$3 + + for i in $(seq $start $end); do + if ! poktrolld keys add "app-$i" > /dev/null 2>&1; then + echo "Job $job_id: Error creating account app-$i" + continue + fi + + if [ $((i % 100)) -eq 0 ]; then + echo "Job $job_id: Progress $i/$end accounts created" + fi + done +} + +echo "Starting parallel account creation with $PARALLEL_JOBS jobs..." + +# Launch parallel jobs +for job in $(seq 0 $((PARALLEL_JOBS-1))); do + start=$((job * ACCOUNTS_PER_JOB + 1)) + if [ $job -eq $((PARALLEL_JOBS-1)) ]; then + end=$TOTAL_ACCOUNTS + else + end=$((start + ACCOUNTS_PER_JOB - 1)) + fi + + create_accounts $start $end $job & +done + +# Wait for all background jobs to complete +wait + +echo "All account creation jobs completed!" \ No newline at end of file diff --git a/delegate_apps.sh b/delegate_apps.sh new file mode 100755 index 000000000..0d8efe707 --- /dev/null +++ b/delegate_apps.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +TOTAL_APPS=50000 +PARALLEL_JOBS=8 +SEGMENT_SIZE=$((TOTAL_APPS / PARALLEL_JOBS)) + +# Function to process a segment of apps +process_segment() { + local start=$1 + local end=$2 + local job_id=$3 + + echo "Job $job_id delegating apps to gateway $start to $end" + for i in $(seq $start $end); do + local app_name="app-$i" + poktrolld tx application delegate-to-gateway pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4 -y \ + --keyring-backend test \ + --from "$app_name" > /dev/null 2>&1 + done +} + +export -f process_segment + +# Launch parallel jobs +for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do + start=$((job_id * SEGMENT_SIZE + 1)) + end=$((start + SEGMENT_SIZE - 1)) + # Adjust last segment to include remainder + if [ $job_id -eq $((PARALLEL_JOBS - 1)) ]; then + end=$TOTAL_APPS + fi + process_segment $start $end $job_id & +done + +wait + +echo "Delegation complete!" \ No newline at end of file diff --git a/docusaurus/docs/develop/developer_guide/quickstart.md b/docusaurus/docs/develop/developer_guide/quickstart.md index a899fba64..744f8c7e8 100644 --- a/docusaurus/docs/develop/developer_guide/quickstart.md +++ b/docusaurus/docs/develop/developer_guide/quickstart.md @@ -559,7 +559,7 @@ You can use `curl` ```bash curl -X POST -H "Content-Type: application/json" \ --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ - http://anvil.localhost:3000/v1 + http://anvil.localhost:3069/v1/ ``` If everything worked as expected, you should see output similar to the following: @@ -632,7 +632,7 @@ Give it a shot by running the following multiple times: ```bash curl -X POST -H "Content-Type: application/json" \ --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ - http://anvil.localhost:3000/v1 + http://anvil.localhost:3069/v1/ ``` ### 5.5. Inspect the logs diff --git a/docusaurus/docs/operate/quickstart/docker_compose_debian_cheatsheet.md b/docusaurus/docs/operate/quickstart/docker_compose_debian_cheatsheet.md index eb8fd7f6c..fc9058f23 100644 --- a/docusaurus/docs/operate/quickstart/docker_compose_debian_cheatsheet.md +++ b/docusaurus/docs/operate/quickstart/docker_compose_debian_cheatsheet.md @@ -277,7 +277,7 @@ docker logs -f --tail 100 gateway ## Send a Relay ```bash -curl http://eth.localhost:3000/v1 \ +curl http://eth.localhost:3069/v1/ \ -X POST \ -H "Content-Type: application/json" \ --data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}' @@ -289,7 +289,7 @@ To ensure you get a response, run the request a few times. ```bash for i in {1..10}; do - curl http://eth.localhost:3000/v1 \ + curl http://eth.localhost:3069/v1/ \ -X POST \ -H "Content-Type: application/json" \ --data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}' \ diff --git a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md index 40e008fd1..b884f0844 100644 --- a/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md +++ b/docusaurus/docs/operate/quickstart/docker_compose_walkthrough.md @@ -672,7 +672,7 @@ you need to have a domain name that resolves to the IP address of your node. ::: ```bash -curl http://eth.localhost:3000/v1 \ +curl http://eth.localhost:3069/v1/ \ -X POST \ -H "Content-Type: application/json" \ --data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}' @@ -690,7 +690,7 @@ To ensure you get a response, you may need to run the request a few times: ```bash for i in {1..10}; do - curl http://eth.localhost:3000/v1 \ + curl http://eth.localhost:3069/v1/ \ -X POST \ -H "Content-Type: application/json" \ --data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}' \ diff --git a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md index 741966d13..36b0bd170 100644 --- a/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md +++ b/docusaurus/docs/operate/quickstart/gateway_cheatsheet.md @@ -238,7 +238,7 @@ You should see the following output: ```json {"level":"info","message":"Starting the cache update process."} {"level":"warn","message":"endpoint hydrator is disabled: no service QoS generators are specified"} -{"level":"info","package":"router","message":"PATH gateway running on port 3000"} +{"level":"info","package":"router","message":"PATH gateway running on port 3069"} ``` #### [TODO] Run the `PATH` Gateway using Docker @@ -250,7 +250,7 @@ _TODO_IMPROVE(@olshansk): Add instructions for running the `PATH` Gateway using Check that the `PATH Gateway` is serving relays by running the following command yourself: ```bash -curl http://eth.localhost:3000/v1 \ +curl http://eth.localhost:3069/v1/ \ -X POST \ -H "Content-Type: application/json" \ -d '{"jsonrpc": "2.0", "id": 1, "method": "eth_blockNumber" }' diff --git a/e2e/tests/init_test.go b/e2e/tests/init_test.go index 680806c87..390559915 100644 --- a/e2e/tests/init_test.go +++ b/e2e/tests/init_test.go @@ -62,7 +62,7 @@ var ( keyRingFlag = "--keyring-backend=test" chainIdFlag = "--chain-id=poktroll" // pathUrl points to a local gateway using the PATH framework in centralized mode. - pathUrl = "http://localhost:3000/v1" // localhost is kept as the default to streamline local development & testing. + pathUrl = "http://localhost:3069/v1" // localhost is kept as the default to streamline local development & testing. ) func init() { diff --git a/fund_apps.sh b/fund_apps.sh new file mode 100755 index 000000000..b9de6a8a8 --- /dev/null +++ b/fund_apps.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +TOTAL_APPS=50000 +AMOUNT="100500000upokt" +FAUCET_ACCOUNT="faucet" +MSGS_PER_TX=5000 +ADDRESSES_FILE="app_addresses.txt" + +echo "Starting funding of $TOTAL_APPS applications (Messages per TX: $MSGS_PER_TX)" + +# Function to wait for next block +wait_for_next_block() { + current_height=$(poktrolld query block --output json | sed '1d' | jq -r .header.height) + target_height=$((current_height + 1)) + + echo "Waiting for block $target_height..." + while true; do + new_height=$(poktrolld query block --output json | sed '1d' | jq -r .header.height) + if [ "$new_height" -ge "$target_height" ]; then + break + fi + sleep 1 + done +} + +# Process in batches +for ((i=1; i<=TOTAL_APPS; i+=MSGS_PER_TX)); do + batch_end=$((i + MSGS_PER_TX - 1)) + [ $batch_end -gt $TOTAL_APPS ] && batch_end=$TOTAL_APPS + + echo "Processing batch $i to $batch_end" + + # Generate messages for this batch + ACCOUNTS="" + for j in $(seq $i $batch_end); do + APP_ADDRESS=$(sed -n "${j}p" $ADDRESSES_FILE) + if [ $j -eq $i ]; then + ACCOUNTS="$APP_ADDRESS" + else + ACCOUNTS="$ACCOUNTS $APP_ADDRESS" + fi + done + + + # Create and broadcast multi-msg transaction + poktrolld tx bank multi-send $FAUCET_ACCOUNT $ACCOUNTS $AMOUNT \ + --from $FAUCET_ACCOUNT \ + --chain-id poktroll \ + --keyring-backend test \ + --gas auto \ + --gas-adjustment 1.5 \ + --gas-prices 0.025upokt \ + -y + + echo "Batch $((i / MSGS_PER_TX + 1)) complete" + wait_for_next_block +done + +echo "All transactions submitted!" \ No newline at end of file diff --git a/load-testing/loadtest_manifest_localnet.yaml b/load-testing/loadtest_manifest_localnet.yaml index da7d729ba..4d51b6634 100644 --- a/load-testing/loadtest_manifest_localnet.yaml +++ b/load-testing/loadtest_manifest_localnet.yaml @@ -52,12 +52,12 @@ gateways: # Gateway 1; http://localhost:10350/r/gateway1/overview - address: pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4 - exposed_url: http://localhost:3000/v1/ # The gateway url that the user sends relays to (e.g. curl) + exposed_url: http://localhost:3069/v1/ # The gateway url that the user sends relays to (e.g. curl) # Gateway 2; http://localhost:10350/r/gateway2/overview - address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz - exposed_url: http://localhost:3001/v1/ + exposed_url: http://localhost:3070/v1/ # Gateway 3; http://localhost:10350/r/gateway3/overview - address: pokt1zhmkkd0rh788mc9prfq0m2h88t9ge0j83gnxya - exposed_url: http://localhost:3002/v1/ + exposed_url: http://localhost:3071/v1/ diff --git a/load-testing/loadtest_manifest_localnet_single_supplier.yaml b/load-testing/loadtest_manifest_localnet_single_supplier.yaml index e2a87e5d7..2b14858a9 100644 --- a/load-testing/loadtest_manifest_localnet_single_supplier.yaml +++ b/load-testing/loadtest_manifest_localnet_single_supplier.yaml @@ -1,7 +1,7 @@ # NB: The number of pre-provisioned **LocalNet** actors are managed in # 'localnet_config.yaml' by the respective actors `count` property. -is_ephemeral_chain: true # This should be `true` for LocalNet as it is an ephemeral network +is_ephemeral_chain: false # This should be `true` for LocalNet as it is an ephemeral network # rpc_node is the URL of the RPC node that the load test will use to query the # chain and submit transactions. @@ -27,9 +27,9 @@ suppliers: # `relay_miner_config.yaml` file. # RelayMiner 1; http://localhost:10350/r/relayminer1/overview - - address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj - # The advertised URL used by the supplier when it submits a stake message onchain. - exposed_url: http://relayminer1:8545 + #- address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj + # # The advertised URL used by the supplier when it submits a stake message onchain. + # exposed_url: http://relayminer1:8545 # List of pre-provisioned gateways used for load testing. # These gateways will be progressively staked and delegated to during the load test. @@ -44,12 +44,12 @@ gateways: # Gateway 1; http://localhost:10350/r/gateway1/overview - address: pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4 - exposed_url: http://localhost:3000/v1/ # The gateway url that the user sends relays to (e.g. curl) + exposed_url: http://localhost:3069/v1/ # The gateway url that the user sends relays to (e.g. curl) - # Gateway 2; http://localhost:10350/r/gateway2/overview - - address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz - exposed_url: http://localhost:3001/v1/ + ## Gateway 2; http://localhost:10350/r/gateway2/overview + #- address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz + # exposed_url: http://localhost:3070/v1/ - # Gateway 3; http://localhost:10350/r/gateway3/overview - - address: pokt1zhmkkd0rh788mc9prfq0m2h88t9ge0j83gnxya - exposed_url: http://localhost:3002/v1/ + ## Gateway 3; http://localhost:10350/r/gateway3/overview + #- address: pokt1zhmkkd0rh788mc9prfq0m2h88t9ge0j83gnxya + # exposed_url: http://localhost:3071/v1/ diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 5eea3fd03..28b0b7163 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -23,7 +23,6 @@ import ( rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" sdkclient "github.com/cosmos/cosmos-sdk/client" codectypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/authz" @@ -525,7 +524,7 @@ func (s *relaysSuite) sendFundAvailableActorsTx() (suppliers, gateways, applicat // Send all the funding account's pending messages in a single transaction. // This is done to avoid sending multiple transactions to fund the initial actors. // pendingMsgs is reset after the transaction is sent. - defer s.sendPendingMsgsTx(s.fundingAccountInfo) + //defer s.sendPendingMsgsTx(s.fundingAccountInfo) // Fund accounts for **initial** applications only. // Additional applications are generated and funded as they're incremented. for i := int64(0); i < s.appInitialCount; i++ { @@ -536,7 +535,7 @@ func (s *relaysSuite) sendFundAvailableActorsTx() (suppliers, gateways, applicat // The application is created with the keyName formatted as "app-%d" starting from 1. application := s.createApplicationAccount(i+1, appFundingAmount) // Add a bank.MsgSend message to fund the application. - s.addPendingFundMsg(application.address, sdk.NewCoins(application.amountToStake)) + //s.addPendingFundMsg(application.address, sdk.NewCoins(application.amountToStake)) applications = append(applications, application) } @@ -637,11 +636,11 @@ func (s *relaysSuite) createApplicationAccount( amountToStake sdk.Coin, ) *accountInfo { keyName := fmt.Sprintf("app-%d", appIdx) - privKey := secp256k1.GenPrivKey() - privKeyHex := fmt.Sprintf("%x", privKey) + //privKey := secp256k1.GenPrivKey() + //privKeyHex := fmt.Sprintf("%x", privKey) - err := s.txContext.GetKeyring().ImportPrivKeyHex(keyName, privKeyHex, "secp256k1") - require.NoError(s, err) + //err := s.txContext.GetKeyring().ImportPrivKeyHex(keyName, privKeyHex, "secp256k1") + //require.NoError(s, err) keyRecord, err := s.txContext.GetKeyring().Key(keyName) require.NoError(s, err) @@ -649,6 +648,8 @@ func (s *relaysSuite) createApplicationAccount( accAddress, err := keyRecord.GetAddress() require.NoError(s, err) + logger.Debug().Msgf("Application added %s", keyName) + return &accountInfo{ address: accAddress.String(), pendingMsgs: []sdk.Msg{}, @@ -1421,9 +1422,9 @@ func (s *relaysSuite) querySharedParams(queryNodeRPCURL string) { blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL) require.NoError(s, err) - deps = depinject.Configs(deps, depinject.Supply(blockQueryClient)) + deps = depinject.Configs(deps, depinject.Supply(blockQueryClient, s.blockClient)) - sharedQueryClient, err := query.NewSharedQuerier(deps) + sharedQueryClient, err := query.NewSharedQuerier(s.ctx, deps) require.NoError(s, err) sharedParams, err := sharedQueryClient.GetParams(s.ctx) @@ -1441,9 +1442,9 @@ func (s *relaysSuite) queryAppParams(queryNodeRPCURL string) { blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL) require.NoError(s, err) - deps = depinject.Configs(deps, depinject.Supply(blockQueryClient)) + deps = depinject.Configs(deps, depinject.Supply(blockQueryClient, s.blockClient)) - appQueryclient, err := query.NewApplicationQuerier(deps) + appQueryclient, err := query.NewApplicationQuerier(s.ctx, deps) require.NoError(s, err) appParams, err := appQueryclient.GetParams(s.ctx) @@ -1457,13 +1458,13 @@ func (s *relaysSuite) queryAppParams(queryNodeRPCURL string) { func (s *relaysSuite) queryProofParams(queryNodeRPCURL string) { s.Helper() - deps := depinject.Supply(s.txContext.GetClientCtx()) + deps := depinject.Supply(s.txContext.GetClientCtx(), s.blockClient) blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL) require.NoError(s, err) deps = depinject.Configs(deps, depinject.Supply(blockQueryClient)) - proofQueryclient, err := query.NewProofQuerier(deps) + proofQueryclient, err := query.NewProofQuerier(s.ctx, deps) require.NoError(s, err) params, err := proofQueryclient.GetParams(s.ctx) @@ -1506,13 +1507,13 @@ func (s *relaysSuite) queryTokenomicsParams(queryNodeRPCURL string) { func (s *relaysSuite) queryTestedService(queryNodeRPCURL string) { s.Helper() - deps := depinject.Supply(s.txContext.GetClientCtx()) + deps := depinject.Supply(s.txContext.GetClientCtx(), s.blockClient) blockQueryClient, err := sdkclient.NewClientFromNode(queryNodeRPCURL) require.NoError(s, err) deps = depinject.Configs(deps, depinject.Supply(blockQueryClient)) - serviceQueryclient, err := query.NewServiceQuerier(deps) + serviceQueryclient, err := query.NewServiceQuerier(s.ctx, deps) require.NoError(s, err) service, err := serviceQueryclient.GetService(s.ctx, "anvil") @@ -1553,13 +1554,15 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn // each sending relayRatePerApp relays per second. relaysPerSec := len(relayBatchInfo.appAccounts) * int(s.relayRatePerApp) // Determine the interval between each relay request. - relayInterval := time.Second / time.Duration(relaysPerSec) + relayInterval := time.Second / time.Duration(85) batchWaitGroup := new(sync.WaitGroup) batchWaitGroup.Add(relaysPerSec * int(blockDurationSec)) - for i := 0; i < relaysPerSec*int(blockDurationSec); i++ { - iterationTime := relayBatchInfo.nextBatchTime.Add(time.Duration(i+1) * relayInterval) + now := time.Now() + + for i := 0; i < 50000; i++ { + iterationTime := now.Add(time.Duration(i+1) * relayInterval) batchLimiter.Go(s.ctx, func() { relaysSent := s.numRelaysSent.Add(1) - 1 diff --git a/load-testing/tests/relays_stress_single_supplier.feature b/load-testing/tests/relays_stress_single_supplier.feature index 34d51a6dd..08a77f88d 100644 --- a/load-testing/tests/relays_stress_single_supplier.feature +++ b/load-testing/tests/relays_stress_single_supplier.feature @@ -5,12 +5,12 @@ Feature: Loading gateway server with relays And a rate of "1" relay requests per second is sent per application And the following initial actors are staked: | actor | count | - | application | 4 | + | application | 50000 | | gateway | 1 | | supplier | 1 | And more actors are staked as follows: | actor | actor inc amount | blocks per inc | max actors | - | application | 4 | 10 | 12 | + | application | 1 | 10 | 50000 | | gateway | 1 | 10 | 1 | | supplier | 1 | 10 | 1 | When a load of concurrent relay requests are sent from the applications diff --git a/load-testing/tests/relays_stress_test.go b/load-testing/tests/relays_stress_test.go index 8557bfa15..3bc24b96c 100644 --- a/load-testing/tests/relays_stress_test.go +++ b/load-testing/tests/relays_stress_test.go @@ -74,7 +74,7 @@ var ( testedServiceId string // blockDurationSec is the duration of a block in seconds. // NB: This value SHOULD be equal to `timeout_propose` in `config.yml`. - blockDurationSec = int64(2) + blockDurationSec = int64(60) // relayPayloadFmt is the JSON-RPC request relayPayloadFmt to send a relay request. relayPayloadFmt = `{"jsonrpc":"2.0","method":"%s","params":[],"id":%d}` // relayRequestMethod is the method of the JSON-RPC request to be relayed. @@ -386,7 +386,7 @@ func (s *relaysSuite) MoreActorsAreStakedAsFollows(table gocuke.DataTable) { // only one transaction is expected to be committed. fundedActors := append(fundedSuppliers, fundedGateways...) fundedActors = append(fundedActors, fundedApplications...) - s.ensureFundedActors(s.ctx, fundedActors) + //s.ensureFundedActors(s.ctx, fundedActors) logger.Info().Msg("Actors funded") @@ -398,8 +398,8 @@ func (s *relaysSuite) MoreActorsAreStakedAsFollows(table gocuke.DataTable) { stakedActors := append(stakedSuppliers, stakedGateways...) stakedActors = append(stakedActors, stakedApplications...) - s.sendInitialActorsStakeMsgs(stakedSuppliers, stakedGateways, stakedApplications) - s.ensureStakedActors(s.ctx, stakedActors) + //s.sendInitialActorsStakeMsgs(stakedSuppliers, stakedGateways, stakedApplications) + //s.ensureStakedActors(s.ctx, stakedActors) logger.Info().Msg("Actors staked") @@ -414,8 +414,8 @@ func (s *relaysSuite) MoreActorsAreStakedAsFollows(table gocuke.DataTable) { } // Delegate the initial applications to the initial gateways - s.sendDelegateInitialAppsTxs(stakedApplications, stakedGateways) - s.ensureDelegatedApps(s.ctx, stakedApplications, stakedGateways) + //s.sendDelegateInitialAppsTxs(stakedApplications, stakedGateways) + //s.ensureDelegatedApps(s.ctx, stakedApplications, stakedGateways) logger.Info().Msg("Apps delegated") diff --git a/localnet/poktrolld/config/application_stake_config.yaml b/localnet/poktrolld/config/application_stake_config.yaml new file mode 100644 index 000000000..8d507a44a --- /dev/null +++ b/localnet/poktrolld/config/application_stake_config.yaml @@ -0,0 +1,3 @@ +stake_amount: 100000069upokt +service_ids: + - anvil diff --git a/localnet/poktrolld/config/config.toml b/localnet/poktrolld/config/config.toml index bbfa0366f..35dcda640 100644 --- a/localnet/poktrolld/config/config.toml +++ b/localnet/poktrolld/config/config.toml @@ -174,7 +174,7 @@ timeout_broadcast_tx_commit = "10s" max_request_batch_size = 10 # Maximum size of request body, in bytes -max_body_bytes = 1000000 +max_body_bytes = 100000000 # Maximum size of request header, in bytes max_header_bytes = 1048576 @@ -330,7 +330,7 @@ keep-invalid-txs-in-cache = false # Maximum size of a single transaction. # NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 +max_tx_bytes = 100000000 # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). diff --git a/makefiles/relay.mk b/makefiles/relay.mk index 202dba1d7..d45d75780 100644 --- a/makefiles/relay.mk +++ b/makefiles/relay.mk @@ -8,7 +8,7 @@ send_relay_path_JSONRPC: test_e2e_env ## Send a JSONRPC relay through PATH to a -H "X-App-Address: pokt1mrqt5f7qh8uxs27cjm9t7v9e74a9vvdnq5jva4" \ -H "target-service-id: anvil" \ --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ - http://localhost:3000/v1/ + http://localhost:3069/v1/ # TODO_MAINNET(@red-0ne): Re-enable this once PATH Gateway supports REST. # See https://github.com/buildwithgrove/path/issues/87 diff --git a/pkg/client/query/accquerier.go b/pkg/client/query/accquerier.go index 468d89657..80b4c3b48 100644 --- a/pkg/client/query/accquerier.go +++ b/pkg/client/query/accquerier.go @@ -11,6 +11,7 @@ import ( grpc "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" ) var _ client.AccountQueryClient = (*accQuerier)(nil) @@ -21,6 +22,7 @@ var _ client.AccountQueryClient = (*accQuerier)(nil) type accQuerier struct { clientConn grpc.ClientConn accountQuerier accounttypes.QueryClient + blockClient client.BlockClient // accountCache is a cache of accounts that have already been queried. // TODO_TECHDEBT: Add a size limit to the cache and consider an LRU cache. @@ -33,16 +35,25 @@ type accQuerier struct { // // Required dependencies: // - clientCtx -func NewAccountQuerier(deps depinject.Config) (client.AccountQueryClient, error) { +func NewAccountQuerier(ctx context.Context, deps depinject.Config) (client.AccountQueryClient, error) { aq := &accQuerier{accountCache: make(map[string]types.AccountI)} if err := depinject.Inject( deps, + &aq.blockClient, &aq.clientConn, ); err != nil { return nil, err } + channel.ForEach( + ctx, + aq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + aq.ClearCache() + }, + ) + aq.accountQuerier = accounttypes.NewQueryClient(aq.clientConn) return aq, nil @@ -107,4 +118,8 @@ func (aq *accQuerier) GetPubKeyFromAddress(ctx context.Context, address string) } func (aq *accQuerier) ClearCache() { + aq.accountCacheMu.Lock() + defer aq.accountCacheMu.Unlock() + + aq.accountCache = make(map[string]types.AccountI) } diff --git a/pkg/client/query/appquerier.go b/pkg/client/query/appquerier.go index 356ce674c..aba0f956e 100644 --- a/pkg/client/query/appquerier.go +++ b/pkg/client/query/appquerier.go @@ -2,11 +2,13 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" grpc "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" apptypes "github.com/pokt-network/poktroll/x/application/types" ) @@ -18,6 +20,11 @@ var _ client.ApplicationQueryClient = (*appQuerier)(nil) type appQuerier struct { clientConn grpc.ClientConn applicationQuerier apptypes.QueryClient + + blockClient client.BlockClient + appCache map[string]*apptypes.Application + appParamsCache *apptypes.Params + appCacheMu sync.Mutex } // NewApplicationQuerier returns a new instance of a client.ApplicationQueryClient @@ -25,11 +32,12 @@ type appQuerier struct { // // Required dependencies: // - clientCtx -func NewApplicationQuerier(deps depinject.Config) (client.ApplicationQueryClient, error) { +func NewApplicationQuerier(ctx context.Context, deps depinject.Config) (client.ApplicationQueryClient, error) { aq := &appQuerier{} if err := depinject.Inject( deps, + &aq.blockClient, &aq.clientConn, ); err != nil { return nil, err @@ -37,6 +45,18 @@ func NewApplicationQuerier(deps depinject.Config) (client.ApplicationQueryClient aq.applicationQuerier = apptypes.NewQueryClient(aq.clientConn) + channel.ForEach( + ctx, + aq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + aq.appCacheMu.Lock() + defer aq.appCacheMu.Unlock() + + aq.appCache = make(map[string]*apptypes.Application) + aq.appParamsCache = nil + }, + ) + return aq, nil } @@ -45,11 +65,19 @@ func (aq *appQuerier) GetApplication( ctx context.Context, appAddress string, ) (apptypes.Application, error) { + aq.appCacheMu.Lock() + defer aq.appCacheMu.Unlock() + + if foundApp, isAppFound := aq.appCache[appAddress]; isAppFound { + return *foundApp, nil + } + req := apptypes.QueryGetApplicationRequest{Address: appAddress} res, err := aq.applicationQuerier.Application(ctx, &req) if err != nil { return apptypes.Application{}, apptypes.ErrAppNotFound.Wrapf("app address: %s [%v]", appAddress, err) } + aq.appCache[appAddress] = &res.Application return res.Application, nil } @@ -65,10 +93,18 @@ func (aq *appQuerier) GetAllApplications(ctx context.Context) ([]apptypes.Applic // GetParams returns the application module parameters func (aq *appQuerier) GetParams(ctx context.Context) (*apptypes.Params, error) { + aq.appCacheMu.Lock() + defer aq.appCacheMu.Unlock() + + if aq.appParamsCache != nil { + return aq.appParamsCache, nil + } + req := apptypes.QueryParamsRequest{} res, err := aq.applicationQuerier.Params(ctx, &req) if err != nil { return nil, err } + aq.appParamsCache = &res.Params return &res.Params, nil } diff --git a/pkg/client/query/bankquerier.go b/pkg/client/query/bankquerier.go index ca28a4998..7f4c3cecf 100644 --- a/pkg/client/query/bankquerier.go +++ b/pkg/client/query/bankquerier.go @@ -2,6 +2,7 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" sdk "github.com/cosmos/cosmos-sdk/types" @@ -10,6 +11,7 @@ import ( "github.com/pokt-network/poktroll/app/volatile" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" ) var _ client.BankQueryClient = (*bankQuerier)(nil) @@ -19,6 +21,10 @@ var _ client.BankQueryClient = (*bankQuerier)(nil) type bankQuerier struct { clientConn grpc.ClientConn bankQuerier banktypes.QueryClient + + blockClient client.BlockClient + bankCache map[string]*sdk.Coin + bankCacheMu sync.Mutex } // NewBankQuerier returns a new instance of a client.BankQueryClient by @@ -26,11 +32,12 @@ type bankQuerier struct { // // Required dependencies: // - clientCtx -func NewBankQuerier(deps depinject.Config) (client.BankQueryClient, error) { +func NewBankQuerier(ctx context.Context, deps depinject.Config) (client.BankQueryClient, error) { bq := &bankQuerier{} if err := depinject.Inject( deps, + &bq.blockClient, &bq.clientConn, ); err != nil { return nil, err @@ -38,6 +45,17 @@ func NewBankQuerier(deps depinject.Config) (client.BankQueryClient, error) { bq.bankQuerier = banktypes.NewQueryClient(bq.clientConn) + channel.ForEach( + ctx, + bq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + bq.bankCacheMu.Lock() + defer bq.bankCacheMu.Unlock() + + bq.bankCache = make(map[string]*sdk.Coin) + }, + ) + return bq, nil } @@ -46,6 +64,12 @@ func (bq *bankQuerier) GetBalance( ctx context.Context, address string, ) (*sdk.Coin, error) { + bq.bankCacheMu.Lock() + defer bq.bankCacheMu.Unlock() + + if foundBalance, isBalanceFound := bq.bankCache[address]; isBalanceFound { + return foundBalance, nil + } // Query the blockchain for the balance record req := &banktypes.QueryBalanceRequest{Address: address, Denom: volatile.DenomuPOKT} res, err := bq.bankQuerier.Balance(ctx, req) @@ -53,5 +77,7 @@ func (bq *bankQuerier) GetBalance( return nil, ErrQueryBalanceNotFound.Wrapf("address: %s [%s]", address, err) } + bq.bankCache[address] = res.Balance + return res.Balance, nil } diff --git a/pkg/client/query/proofquerier.go b/pkg/client/query/proofquerier.go index 6751dc995..11eec1f25 100644 --- a/pkg/client/query/proofquerier.go +++ b/pkg/client/query/proofquerier.go @@ -2,11 +2,13 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" prooftypes "github.com/pokt-network/poktroll/x/proof/types" ) @@ -15,6 +17,10 @@ import ( type proofQuerier struct { clientConn grpc.ClientConn proofQuerier prooftypes.QueryClient + + blockClient client.BlockClient + proofParamsCache client.ProofParams + bankCacheMu sync.Mutex } // NewProofQuerier returns a new instance of a client.ProofQueryClient by @@ -22,11 +28,12 @@ type proofQuerier struct { // // Required dependencies: // - grpc.ClientConn -func NewProofQuerier(deps depinject.Config) (client.ProofQueryClient, error) { +func NewProofQuerier(ctx context.Context, deps depinject.Config) (client.ProofQueryClient, error) { querier := &proofQuerier{} if err := depinject.Inject( deps, + &querier.blockClient, &querier.clientConn, ); err != nil { return nil, err @@ -34,6 +41,17 @@ func NewProofQuerier(deps depinject.Config) (client.ProofQueryClient, error) { querier.proofQuerier = prooftypes.NewQueryClient(querier.clientConn) + channel.ForEach( + ctx, + querier.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + querier.bankCacheMu.Lock() + defer querier.bankCacheMu.Unlock() + + querier.proofParamsCache = nil + }, + ) + return querier, nil } @@ -41,10 +59,18 @@ func NewProofQuerier(deps depinject.Config) (client.ProofQueryClient, error) { func (pq *proofQuerier) GetParams( ctx context.Context, ) (client.ProofParams, error) { + pq.bankCacheMu.Lock() + defer pq.bankCacheMu.Unlock() + + if pq.proofParamsCache != nil { + return pq.proofParamsCache, nil + } req := &prooftypes.QueryParamsRequest{} res, err := pq.proofQuerier.Params(ctx, req) if err != nil { return nil, err } - return &res.Params, nil + + pq.proofParamsCache = &res.Params + return pq.proofParamsCache, nil } diff --git a/pkg/client/query/servicequerier.go b/pkg/client/query/servicequerier.go index 1f5ef2d2a..20bf6c2d1 100644 --- a/pkg/client/query/servicequerier.go +++ b/pkg/client/query/servicequerier.go @@ -2,11 +2,13 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" servicetypes "github.com/pokt-network/poktroll/x/service/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -19,6 +21,11 @@ var _ client.ServiceQueryClient = (*serviceQuerier)(nil) type serviceQuerier struct { clientConn grpc.ClientConn serviceQuerier servicetypes.QueryClient + + blockClient client.BlockClient + serviceCache map[string]*sharedtypes.Service + relayMiningDifficultyCache map[string]servicetypes.RelayMiningDifficulty + serviceCacheMu sync.Mutex } // NewServiceQuerier returns a new instance of a client.ServiceQueryClient by @@ -26,11 +33,12 @@ type serviceQuerier struct { // // Required dependencies: // - clientCtx (grpc.ClientConn) -func NewServiceQuerier(deps depinject.Config) (client.ServiceQueryClient, error) { +func NewServiceQuerier(ctx context.Context, deps depinject.Config) (client.ServiceQueryClient, error) { servq := &serviceQuerier{} if err := depinject.Inject( deps, + &servq.blockClient, &servq.clientConn, ); err != nil { return nil, err @@ -38,6 +46,18 @@ func NewServiceQuerier(deps depinject.Config) (client.ServiceQueryClient, error) servq.serviceQuerier = servicetypes.NewQueryClient(servq.clientConn) + channel.ForEach( + ctx, + servq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + servq.serviceCacheMu.Lock() + defer servq.serviceCacheMu.Unlock() + + servq.serviceCache = make(map[string]*sharedtypes.Service) + servq.relayMiningDifficultyCache = make(map[string]servicetypes.RelayMiningDifficulty) + }, + ) + return servq, nil } @@ -47,6 +67,13 @@ func (servq *serviceQuerier) GetService( ctx context.Context, serviceId string, ) (sharedtypes.Service, error) { + servq.serviceCacheMu.Lock() + defer servq.serviceCacheMu.Unlock() + + if foundService, isServiceFound := servq.serviceCache[serviceId]; isServiceFound { + return *foundService, nil + } + req := &servicetypes.QueryGetServiceRequest{ Id: serviceId, } @@ -58,6 +85,8 @@ func (servq *serviceQuerier) GetService( serviceId, err, ) } + + servq.serviceCache[serviceId] = &res.Service return res.Service, nil } @@ -67,6 +96,12 @@ func (servq *serviceQuerier) GetServiceRelayDifficulty( ctx context.Context, serviceId string, ) (servicetypes.RelayMiningDifficulty, error) { + servq.serviceCacheMu.Lock() + defer servq.serviceCacheMu.Unlock() + + if foundRelayMiningDifficulty, isRelayMiningDifficultyFound := servq.relayMiningDifficultyCache[serviceId]; isRelayMiningDifficultyFound { + return foundRelayMiningDifficulty, nil + } req := &servicetypes.QueryGetRelayMiningDifficultyRequest{ ServiceId: serviceId, } @@ -76,5 +111,6 @@ func (servq *serviceQuerier) GetServiceRelayDifficulty( return servicetypes.RelayMiningDifficulty{}, err } + servq.relayMiningDifficultyCache[serviceId] = res.RelayMiningDifficulty return res.RelayMiningDifficulty, nil } diff --git a/pkg/client/query/sessionquerier.go b/pkg/client/query/sessionquerier.go index fdf6c42e9..aac074a73 100644 --- a/pkg/client/query/sessionquerier.go +++ b/pkg/client/query/sessionquerier.go @@ -2,11 +2,14 @@ package query import ( "context" + "fmt" + "sync" "cosmossdk.io/depinject" "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" sessiontypes "github.com/pokt-network/poktroll/x/session/types" ) @@ -18,6 +21,11 @@ var _ client.SessionQueryClient = (*sessionQuerier)(nil) type sessionQuerier struct { clientConn grpc.ClientConn sessionQuerier sessiontypes.QueryClient + + blockClient client.BlockClient + sessionCache map[string]*sessiontypes.Session + sessionParamsCache *sessiontypes.Params + sessionCacheMu sync.Mutex } // NewSessionQuerier returns a new instance of a client.SessionQueryClient by @@ -25,11 +33,12 @@ type sessionQuerier struct { // // Required dependencies: // - clientCtx (grpc.ClientConn) -func NewSessionQuerier(deps depinject.Config) (client.SessionQueryClient, error) { +func NewSessionQuerier(ctx context.Context, deps depinject.Config) (client.SessionQueryClient, error) { sessq := &sessionQuerier{} if err := depinject.Inject( deps, + &sessq.blockClient, &sessq.clientConn, ); err != nil { return nil, err @@ -37,6 +46,18 @@ func NewSessionQuerier(deps depinject.Config) (client.SessionQueryClient, error) sessq.sessionQuerier = sessiontypes.NewQueryClient(sessq.clientConn) + channel.ForEach( + ctx, + sessq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + sessq.sessionCacheMu.Lock() + defer sessq.sessionCacheMu.Unlock() + + sessq.sessionCache = make(map[string]*sessiontypes.Session) + sessq.sessionParamsCache = nil + }, + ) + return sessq, nil } @@ -48,6 +69,15 @@ func (sessq *sessionQuerier) GetSession( serviceId string, blockHeight int64, ) (*sessiontypes.Session, error) { + sessq.sessionCacheMu.Lock() + defer sessq.sessionCacheMu.Unlock() + + sessionCacheKey := fmt.Sprintf("%s-%s", appAddress, serviceId) + + if foundSession, isSessionFound := sessq.sessionCache[sessionCacheKey]; isSessionFound { + return foundSession, nil + } + req := &sessiontypes.QueryGetSessionRequest{ ApplicationAddress: appAddress, ServiceId: serviceId, @@ -60,15 +90,25 @@ func (sessq *sessionQuerier) GetSession( appAddress, serviceId, blockHeight, err, ) } + + sessq.sessionCache[sessionCacheKey] = res.Session return res.Session, nil } // GetParams queries & returns the session module onchain parameters. func (sessq *sessionQuerier) GetParams(ctx context.Context) (*sessiontypes.Params, error) { + sessq.sessionCacheMu.Lock() + defer sessq.sessionCacheMu.Unlock() + + if sessq.sessionParamsCache != nil { + return sessq.sessionParamsCache, nil + } req := &sessiontypes.QueryParamsRequest{} res, err := sessq.sessionQuerier.Params(ctx, req) if err != nil { return nil, ErrQuerySessionParams.Wrapf("[%v]", err) } + + sessq.sessionParamsCache = &res.Params return &res.Params, nil } diff --git a/pkg/client/query/sharedquerier.go b/pkg/client/query/sharedquerier.go index bbe67b0de..ff3868117 100644 --- a/pkg/client/query/sharedquerier.go +++ b/pkg/client/query/sharedquerier.go @@ -2,11 +2,13 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -19,6 +21,11 @@ type sharedQuerier struct { clientConn grpc.ClientConn sharedQuerier sharedtypes.QueryClient blockQuerier client.BlockQueryClient + + blockClient client.BlockClient + sharedParamsCache *sharedtypes.Params + blockCache map[int64][]byte + sessionCacheMu sync.Mutex } // NewSharedQuerier returns a new instance of a client.SharedQueryClient by @@ -27,12 +34,13 @@ type sharedQuerier struct { // Required dependencies: // - clientCtx (grpc.ClientConn) // - client.BlockQueryClient -func NewSharedQuerier(deps depinject.Config) (client.SharedQueryClient, error) { +func NewSharedQuerier(ctx context.Context, deps depinject.Config) (client.SharedQueryClient, error) { querier := &sharedQuerier{} if err := depinject.Inject( deps, &querier.clientConn, + &querier.blockClient, &querier.blockQuerier, ); err != nil { return nil, err @@ -40,6 +48,18 @@ func NewSharedQuerier(deps depinject.Config) (client.SharedQueryClient, error) { querier.sharedQuerier = sharedtypes.NewQueryClient(querier.clientConn) + channel.ForEach( + ctx, + querier.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + querier.sessionCacheMu.Lock() + defer querier.sessionCacheMu.Unlock() + + querier.blockCache = make(map[int64][]byte) + querier.sharedParamsCache = nil + }, + ) + return querier, nil } @@ -49,11 +69,20 @@ func NewSharedQuerier(deps depinject.Config) (client.SharedQueryClient, error) { // Once `ModuleParamsClient` is implemented, use its replay observable's `#Last()` method // to get the most recently (asynchronously) observed (and cached) value. func (sq *sharedQuerier) GetParams(ctx context.Context) (*sharedtypes.Params, error) { + sq.sessionCacheMu.Lock() + defer sq.sessionCacheMu.Unlock() + + if sq.sharedParamsCache != nil { + return sq.sharedParamsCache, nil + } + req := &sharedtypes.QueryParamsRequest{} res, err := sq.sharedQuerier.Params(ctx, req) if err != nil { return nil, ErrQuerySessionParams.Wrapf("[%v]", err) } + + sq.sharedParamsCache = &res.Params return &res.Params, nil } @@ -127,14 +156,23 @@ func (sq *sharedQuerier) GetEarliestSupplierClaimCommitHeight(ctx context.Contex // Fetch the block at the proof window open height. Its hash is used as part // of the seed to the pseudo-random number generator. claimWindowOpenHeight := sharedtypes.GetClaimWindowOpenHeight(sharedParams, queryHeight) - claimWindowOpenBlock, err := sq.blockQuerier.Block(ctx, &claimWindowOpenHeight) - if err != nil { - return 0, err + sq.sessionCacheMu.Lock() + defer sq.sessionCacheMu.Unlock() + + var claimWindowOpenBlockHash []byte + if hash, ok := sq.blockCache[claimWindowOpenHeight]; !ok { + claimWindowOpenBlockHash = hash + } else { + claimWindowOpenBlock, err := sq.blockQuerier.Block(ctx, &claimWindowOpenHeight) + if err != nil { + return 0, err + } + + // NB: Byte slice representation of block hashes don't need to be normalized. + claimWindowOpenBlockHash = claimWindowOpenBlock.BlockID.Hash.Bytes() + sq.blockCache[claimWindowOpenHeight] = claimWindowOpenBlockHash } - // NB: Byte slice representation of block hashes don't need to be normalized. - claimWindowOpenBlockHash := claimWindowOpenBlock.BlockID.Hash.Bytes() - return sharedtypes.GetEarliestSupplierClaimCommitHeight( sharedParams, queryHeight, @@ -160,15 +198,26 @@ func (sq *sharedQuerier) GetEarliestSupplierProofCommitHeight(ctx context.Contex // Fetch the block at the proof window open height. Its hash is used as part // of the seed to the pseudo-random number generator. proofWindowOpenHeight := sharedtypes.GetProofWindowOpenHeight(sharedParams, queryHeight) - proofWindowOpenBlock, err := sq.blockQuerier.Block(ctx, &proofWindowOpenHeight) - if err != nil { - return 0, err + sq.sessionCacheMu.Lock() + defer sq.sessionCacheMu.Unlock() + + var proofWindowOpenBlockHash []byte + if hash, ok := sq.blockCache[proofWindowOpenHeight]; !ok { + proofWindowOpenBlockHash = hash + } else { + proofWindowOpenBlock, err := sq.blockQuerier.Block(ctx, &proofWindowOpenHeight) + if err != nil { + return 0, err + } + + proofWindowOpenBlockHash = proofWindowOpenBlock.BlockID.Hash + sq.blockCache[proofWindowOpenHeight] = proofWindowOpenBlockHash } return sharedtypes.GetEarliestSupplierProofCommitHeight( sharedParams, queryHeight, - proofWindowOpenBlock.BlockID.Hash, + proofWindowOpenBlockHash, supplierOperatorAddr, ), nil } diff --git a/pkg/client/query/supplierquerier.go b/pkg/client/query/supplierquerier.go index 927f2b335..f1c2c9d3d 100644 --- a/pkg/client/query/supplierquerier.go +++ b/pkg/client/query/supplierquerier.go @@ -2,11 +2,13 @@ package query import ( "context" + "sync" "cosmossdk.io/depinject" "github.com/cosmos/gogoproto/grpc" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/observable/channel" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" suppliertypes "github.com/pokt-network/poktroll/x/supplier/types" ) @@ -17,6 +19,10 @@ import ( type supplierQuerier struct { clientConn grpc.ClientConn supplierQuerier suppliertypes.QueryClient + + blockClient client.BlockClient + supplierCache map[string]*sharedtypes.Supplier + supplierCacheMu sync.Mutex } // NewSupplierQuerier returns a new instance of a client.SupplierQueryClient by @@ -24,11 +30,12 @@ type supplierQuerier struct { // // Required dependencies: // - grpc.ClientConn -func NewSupplierQuerier(deps depinject.Config) (client.SupplierQueryClient, error) { +func NewSupplierQuerier(ctx context.Context, deps depinject.Config) (client.SupplierQueryClient, error) { supq := &supplierQuerier{} if err := depinject.Inject( deps, + &supq.blockClient, &supq.clientConn, ); err != nil { return nil, err @@ -36,6 +43,17 @@ func NewSupplierQuerier(deps depinject.Config) (client.SupplierQueryClient, erro supq.supplierQuerier = suppliertypes.NewQueryClient(supq.clientConn) + channel.ForEach( + ctx, + supq.blockClient.CommittedBlocksSequence(ctx), + func(ctx context.Context, block client.Block) { + supq.supplierCacheMu.Lock() + defer supq.supplierCacheMu.Unlock() + + supq.supplierCache = make(map[string]*sharedtypes.Supplier) + }, + ) + return supq, nil } @@ -44,6 +62,13 @@ func (supq *supplierQuerier) GetSupplier( ctx context.Context, operatorAddress string, ) (sharedtypes.Supplier, error) { + supq.supplierCacheMu.Lock() + defer supq.supplierCacheMu.Unlock() + + if supplier, ok := supq.supplierCache[operatorAddress]; ok { + return *supplier, nil + } + req := &suppliertypes.QueryGetSupplierRequest{OperatorAddress: operatorAddress} res, err := supq.supplierQuerier.Supplier(ctx, req) if err != nil { @@ -52,5 +77,7 @@ func (supq *supplierQuerier) GetSupplier( operatorAddress, err, ) } + + supq.supplierCache[operatorAddress] = &res.Supplier return res.Supplier, nil } diff --git a/pkg/deps/config/suppliers.go b/pkg/deps/config/suppliers.go index 26f04043e..49b00d029 100644 --- a/pkg/deps/config/suppliers.go +++ b/pkg/deps/config/suppliers.go @@ -257,12 +257,12 @@ func NewSupplyTxClientContextFn( // NewSupplyAccountQuerierFn supplies a depinject config with an AccountQuerier. func NewSupplyAccountQuerierFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { // Create the account querier. - accountQuerier, err := query.NewAccountQuerier(deps) + accountQuerier, err := query.NewAccountQuerier(ctx, deps) if err != nil { return nil, err } @@ -275,12 +275,12 @@ func NewSupplyAccountQuerierFn() SupplierFn { // NewSupplyApplicationQuerierFn supplies a depinject config with an ApplicationQuerier. func NewSupplyApplicationQuerierFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { // Create the application querier. - applicationQuerier, err := query.NewApplicationQuerier(deps) + applicationQuerier, err := query.NewApplicationQuerier(ctx, deps) if err != nil { return nil, err } @@ -293,12 +293,12 @@ func NewSupplyApplicationQuerierFn() SupplierFn { // NewSupplySessionQuerierFn supplies a depinject config with a SessionQuerier. func NewSupplySessionQuerierFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { // Create the session querier. - sessionQuerier, err := query.NewSessionQuerier(deps) + sessionQuerier, err := query.NewSessionQuerier(ctx, deps) if err != nil { return nil, err } @@ -311,12 +311,12 @@ func NewSupplySessionQuerierFn() SupplierFn { // NewSupplySupplierQuerierFn supplies a depinject config with a SupplierQuerier. func NewSupplySupplierQuerierFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { // Create the supplier querier. - supplierQuerier, err := query.NewSupplierQuerier(deps) + supplierQuerier, err := query.NewSupplierQuerier(ctx, deps) if err != nil { return nil, err } @@ -410,11 +410,11 @@ func NewSupplyBlockQueryClientFn(queryNodeRPCUrl *url.URL) SupplierFn { // is supplied with the given deps and the new SharedQueryClient. func NewSupplySharedQueryClientFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { - sharedQuerier, err := query.NewSharedQuerier(deps) + sharedQuerier, err := query.NewSharedQuerier(ctx, deps) if err != nil { return nil, err } @@ -428,11 +428,11 @@ func NewSupplySharedQueryClientFn() SupplierFn { // is supplied with the given deps and the new ProofQueryClient. func NewSupplyProofQueryClientFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { - proofQuerier, err := query.NewProofQuerier(deps) + proofQuerier, err := query.NewProofQuerier(ctx, deps) if err != nil { return nil, err } @@ -446,11 +446,11 @@ func NewSupplyProofQueryClientFn() SupplierFn { // is supplied with the given deps and the new ServiceQueryClient. func NewSupplyServiceQueryClientFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { - serviceQuerier, err := query.NewServiceQuerier(deps) + serviceQuerier, err := query.NewServiceQuerier(ctx, deps) if err != nil { return nil, err } @@ -462,12 +462,12 @@ func NewSupplyServiceQueryClientFn() SupplierFn { // NewSupplyBankQuerierFn supplies a depinject config with an BankQuerier. func NewSupplyBankQuerierFn() SupplierFn { return func( - _ context.Context, + ctx context.Context, deps depinject.Config, _ *cobra.Command, ) (depinject.Config, error) { // Create the bank querier. - bankQuerier, err := query.NewBankQuerier(deps) + bankQuerier, err := query.NewBankQuerier(ctx, deps) if err != nil { return nil, err } diff --git a/pkg/relayer/cmd/cmd.go b/pkg/relayer/cmd/cmd.go index 574f405b4..812bc5eb3 100644 --- a/pkg/relayer/cmd/cmd.go +++ b/pkg/relayer/cmd/cmd.go @@ -198,7 +198,7 @@ func setupRelayerDependencies( config.NewSupplyQueryClientContextFn(queryNodeGRPCUrl), // leaf config.NewSupplyTxClientContextFn(queryNodeGRPCUrl, txNodeRPCUrl), // leaf config.NewSupplyDelegationClientFn(), // leaf - config.NewSupplySharedQueryClientFn(), // leaf + config.NewSupplySharedQueryClientFn(), config.NewSupplyServiceQueryClientFn(), config.NewSupplyApplicationQuerierFn(), config.NewSupplySessionQuerierFn(), diff --git a/stake_apps.sh b/stake_apps.sh new file mode 100755 index 000000000..292f9d9c8 --- /dev/null +++ b/stake_apps.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +TOTAL_APPS=50000 +PARALLEL_JOBS=8 +CONFIG_DIR="localnet/poktrolld/config" +TEMP_DIR=/tmp/stake_apps +SEGMENT_SIZE=$((TOTAL_APPS / PARALLEL_JOBS)) + +# Create and setup temp directory +rm -rf $TEMP_DIR +mkdir -p $TEMP_DIR +chmod 777 $TEMP_DIR +trap 'rm -rf $TEMP_DIR' EXIT + +# Function to process a segment of apps +process_segment() { + local start=$1 + local end=$2 + local job_id=$3 + local output="$TEMP_DIR/segment_$job_id.txt" + local config_file="${CONFIG_DIR}/application_stake_config.yaml" + + echo "Job $job_id staking apps $start to $end" + for i in $(seq $start $end); do + local app_name="app-$i" + if poktrolld tx application stake-application -y \ + --config "$config_file" \ + --keyring-backend test \ + --from "$app_name" > /dev/null 2>&1; then + echo "$app_name" >> "$output.success" + else + echo "$app_name" >> "$output.failed" + fi + done +} + +export -f process_segment +export CONFIG_DIR TEMP_DIR + +# Launch parallel jobs +for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do + start=$((job_id * SEGMENT_SIZE + 1)) + end=$((start + SEGMENT_SIZE - 1)) + # Adjust last segment to include remainder + if [ $job_id -eq $((PARALLEL_JOBS - 1)) ]; then + end=$TOTAL_APPS + fi + process_segment $start $end $job_id & +done + +wait + +# Report results +total_success=0 +total_failed=0 +for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do + if [ -f "$TEMP_DIR/segment_$job_id.txt.success" ]; then + success=$(wc -l < "$TEMP_DIR/segment_$job_id.txt.success") + total_success=$((total_success + success)) + fi + if [ -f "$TEMP_DIR/segment_$job_id.txt.failed" ]; then + failed=$(wc -l < "$TEMP_DIR/segment_$job_id.txt.failed") + total_failed=$((total_failed + failed)) + echo "Failed apps in job $job_id:" + cat "$TEMP_DIR/segment_$job_id.txt.failed" + fi +done + +echo "Staking complete!" +echo "Successfully staked: $total_success applications" +echo "Failed: $total_failed applications" \ No newline at end of file diff --git a/store_addresses.sh b/store_addresses.sh new file mode 100755 index 000000000..944ffa07a --- /dev/null +++ b/store_addresses.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +TOTAL_APPS=50000 +PARALLEL_JOBS=32 +OUTPUT_FILE="app_addresses.txt" +TEMP_DIR=/tmp/addrs +SEGMENT_SIZE=$((TOTAL_APPS / PARALLEL_JOBS)) + +# Create and setup temp directory +rm -rf $TEMP_DIR +mkdir -p $TEMP_DIR +chmod 777 $TEMP_DIR +trap 'rm -rf $TEMP_DIR' EXIT + +# Function to process a segment of apps +process_segment() { + local start=$1 + local end=$2 + local job_id=$3 + local output="$TEMP_DIR/segment_$job_id.txt" + + echo "Job $job_id processing apps $start to $end" + for i in $(seq $start $end); do + local app_name="app-$i" + local addr=$(poktrolld keys show $app_name -a --keyring-backend test) + echo "$addr" >> "$output" + done +} + +export -f process_segment +export TEMP_DIR + +# Launch parallel jobs +for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do + start=$((job_id * SEGMENT_SIZE + 1)) + end=$((start + SEGMENT_SIZE - 1)) + # Adjust last segment to include remainder + if [ $job_id -eq $((PARALLEL_JOBS - 1)) ]; then + end=$TOTAL_APPS + fi + echo "Launching job $job_id for apps $start to $end" + parallel -j1 process_segment ::: $start ::: $end ::: $job_id & +done + +wait + +# Combine results in order +for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do + cat "$TEMP_DIR/segment_$job_id.txt" +done > $OUTPUT_FILE + +echo "Generated addresses for $TOTAL_APPS apps in $OUTPUT_FILE" \ No newline at end of file diff --git a/testutil/integration/suites/application.go b/testutil/integration/suites/application.go index 86b22fccf..f78f78520 100644 --- a/testutil/integration/suites/application.go +++ b/testutil/integration/suites/application.go @@ -26,7 +26,7 @@ type ApplicationModuleSuite struct { // module of the integration app. func (s *ApplicationModuleSuite) GetAppQueryClient() client.ApplicationQueryClient { deps := depinject.Supply(s.GetApp().QueryHelper()) - appQueryClient, err := query.NewApplicationQuerier(deps) + appQueryClient, err := query.NewApplicationQuerier(s.app.QueryHelper().Ctx, deps) require.NoError(s.T(), err) return appQueryClient diff --git a/tools/scripts/params/proof_proof_requirement_threshold.json b/tools/scripts/params/proof_proof_requirement_threshold.json index 94155c57f..7a2c5487e 100644 --- a/tools/scripts/params/proof_proof_requirement_threshold.json +++ b/tools/scripts/params/proof_proof_requirement_threshold.json @@ -7,7 +7,7 @@ "name": "proof_requirement_threshold", "as_coin": { "denom": "upokt", - "amount": "20000000" + "amount": "1" } } ] From c6fc68f6279315184f32bba1fbcbf0b67a5c490f Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Thu, 23 Jan 2025 04:34:19 +0100 Subject: [PATCH 04/24] Empty commit From c7698f8c48762667e219aec0f05b5f1b63564034 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Thu, 23 Jan 2025 15:21:31 +0100 Subject: [PATCH 05/24] fix: init cache maps --- pkg/client/query/appquerier.go | 2 ++ pkg/client/query/bankquerier.go | 2 ++ pkg/client/query/servicequerier.go | 3 +++ pkg/client/query/sessionquerier.go | 2 ++ pkg/client/query/sharedquerier.go | 2 ++ pkg/client/query/supplierquerier.go | 2 ++ 6 files changed, 13 insertions(+) diff --git a/pkg/client/query/appquerier.go b/pkg/client/query/appquerier.go index aba0f956e..46b0f8188 100644 --- a/pkg/client/query/appquerier.go +++ b/pkg/client/query/appquerier.go @@ -43,6 +43,8 @@ func NewApplicationQuerier(ctx context.Context, deps depinject.Config) (client.A return nil, err } + aq.appCache = make(map[string]*apptypes.Application) + aq.applicationQuerier = apptypes.NewQueryClient(aq.clientConn) channel.ForEach( diff --git a/pkg/client/query/bankquerier.go b/pkg/client/query/bankquerier.go index 7f4c3cecf..2c4634b0e 100644 --- a/pkg/client/query/bankquerier.go +++ b/pkg/client/query/bankquerier.go @@ -45,6 +45,8 @@ func NewBankQuerier(ctx context.Context, deps depinject.Config) (client.BankQuer bq.bankQuerier = banktypes.NewQueryClient(bq.clientConn) + bq.bankCache = make(map[string]*sdk.Coin) + channel.ForEach( ctx, bq.blockClient.CommittedBlocksSequence(ctx), diff --git a/pkg/client/query/servicequerier.go b/pkg/client/query/servicequerier.go index 20bf6c2d1..566b7bf83 100644 --- a/pkg/client/query/servicequerier.go +++ b/pkg/client/query/servicequerier.go @@ -44,6 +44,9 @@ func NewServiceQuerier(ctx context.Context, deps depinject.Config) (client.Servi return nil, err } + servq.serviceCache = make(map[string]*sharedtypes.Service) + servq.relayMiningDifficultyCache = make(map[string]servicetypes.RelayMiningDifficulty) + servq.serviceQuerier = servicetypes.NewQueryClient(servq.clientConn) channel.ForEach( diff --git a/pkg/client/query/sessionquerier.go b/pkg/client/query/sessionquerier.go index aac074a73..5c65126de 100644 --- a/pkg/client/query/sessionquerier.go +++ b/pkg/client/query/sessionquerier.go @@ -44,6 +44,8 @@ func NewSessionQuerier(ctx context.Context, deps depinject.Config) (client.Sessi return nil, err } + sessq.sessionCache = make(map[string]*sessiontypes.Session) + sessq.sessionQuerier = sessiontypes.NewQueryClient(sessq.clientConn) channel.ForEach( diff --git a/pkg/client/query/sharedquerier.go b/pkg/client/query/sharedquerier.go index ff3868117..f0431be8d 100644 --- a/pkg/client/query/sharedquerier.go +++ b/pkg/client/query/sharedquerier.go @@ -48,6 +48,8 @@ func NewSharedQuerier(ctx context.Context, deps depinject.Config) (client.Shared querier.sharedQuerier = sharedtypes.NewQueryClient(querier.clientConn) + querier.blockCache = make(map[int64][]byte) + channel.ForEach( ctx, querier.blockClient.CommittedBlocksSequence(ctx), diff --git a/pkg/client/query/supplierquerier.go b/pkg/client/query/supplierquerier.go index f1c2c9d3d..18eba14cd 100644 --- a/pkg/client/query/supplierquerier.go +++ b/pkg/client/query/supplierquerier.go @@ -43,6 +43,8 @@ func NewSupplierQuerier(ctx context.Context, deps depinject.Config) (client.Supp supq.supplierQuerier = suppliertypes.NewQueryClient(supq.clientConn) + supq.supplierCache = make(map[string]*sharedtypes.Supplier) + channel.ForEach( ctx, supq.blockClient.CommittedBlocksSequence(ctx), From ddf1371d0209da86379c04a0e17aecc49207727f Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Thu, 23 Jan 2025 13:11:39 -0500 Subject: [PATCH 06/24] Update pull_request_template.md --- .github/pull_request_template.md | 54 ++++++++------------------------ 1 file changed, 13 insertions(+), 41 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index dba8e6fea..f387c62d7 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,26 +1,15 @@ - - ## Summary - +< One line summary> -## Issue +Changes: +- < Change 1 > +- < Change 2 > - +## Issue -- #{ISSUE_NUMBER} +- Description: < Description > +- Issue: #{ISSUE_NUMBER} ## Type of change @@ -33,28 +22,11 @@ Select one or more from the following: - [ ] Documentation - [ ] Other (specify) -## Testing - - - -- [ ] **Documentation**: `make docusaurus_start`; only needed if you make doc changes -- [ ] **Unit Tests**: `make go_develop_and_test` -- [ ] **LocalNet E2E Tests**: `make test_e2e` -- [ ] **DevNet E2E Tests**: Add the `devnet-test-e2e` label to the PR. - ## Sanity Checklist -- [ ] I have tested my changes using the available tooling -- [ ] I have commented my code -- [ ] I have performed a self-review of my own code; both comments & source code -- [ ] I create and reference any new tickets, if applicable -- [ ] I have left TODOs throughout the codebase, if applicable +- [ ] I have updated the GitHub Issue `assignees`, `reviewers`, `labels`, `project`, `iteration` and `milestone` +- [ ] For docs, I have run `make docusaurus_start` +- [ ] For code, I have run `make go_develop_and_test` and `make test_e2e` +- [ ] For code, I have added the `devnet-test-e2e` label to run E2E tests in CI +- [ ] For configurations, I have update the documentation +- [ ] I added TODOs where applicable From f17ea06db602185e28d374aebd5ad62a518fb9dd Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Thu, 23 Jan 2025 19:03:56 -0500 Subject: [PATCH 07/24] [Workflows] Update GitHub actions from v3 to v4 (#1041) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Upgrade github ations from `v3` to `v4`. ## Issue ### Origin Document https://github.blog/changelog/2024-04-16-deprecation-notice-v3-of-the-artifact-actions/ Screenshot 2025-01-23 at 2 32 22 PM ### Example in action Screenshot 2025-01-23 at 2 32 40 PM https://github.com/buildwithgrove/path/actions/runs/12935368023/job/36078630477 --------- Co-authored-by: Dmitry K Co-authored-by: Dima K. --- .github/workflows/main-build.yml | 2 +- .github/workflows/release-artifacts.yml | 6 +++--- .github/workflows/run-tests.yml | 2 +- .github/workflows/upload-pages-artifact.yml | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index 8530c3d23..f9b3d03d7 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -13,7 +13,7 @@ jobs: build-push-container: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147 diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 06ffee0fc..5fb703abc 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -13,7 +13,7 @@ jobs: release-artifacts: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147 @@ -62,7 +62,7 @@ jobs: type=sha,format=long,suffix=-prod - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} @@ -82,7 +82,7 @@ jobs: # TODO_TECHDEBT(@okdas): use for releases (also change the "on" part at the top so it only tgirrered for tags/releases) - name: Add release and publish binaries - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: files: | release/* diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 70d66edb9..454e59202 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -17,7 +17,7 @@ jobs: go-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147 diff --git a/.github/workflows/upload-pages-artifact.yml b/.github/workflows/upload-pages-artifact.yml index 6caa01401..503b42123 100644 --- a/.github/workflows/upload-pages-artifact.yml +++ b/.github/workflows/upload-pages-artifact.yml @@ -16,7 +16,7 @@ jobs: update-docs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "0" # Per https://github.com/ignite/cli/issues/1674#issuecomment-1144619147 @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Node.js uses: actions/setup-node@v4 @@ -63,7 +63,7 @@ jobs: pages: write steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Node.js uses: actions/setup-node@v4 @@ -83,15 +83,15 @@ jobs: yarn build - name: Setup Pages - uses: actions/configure-pages@v4 + uses: actions/configure-pages@v5 with: enablement: true - name: Upload artifact - uses: actions/upload-pages-artifact@v2 + uses: actions/upload-pages-artifact@v3 with: path: docusaurus/build - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@v4 From ea89904c943c2ef837a4df04762384d90a70cfc9 Mon Sep 17 00:00:00 2001 From: "Dima K." Date: Thu, 23 Jan 2025 16:43:39 -0800 Subject: [PATCH 08/24] [Docs] upgrade/chain halt recovery (#837) ## Summary Performed the first upgrade on the Alpha TestNet. Add some documentation changes to prevent some issues in the future. ## Issue N/A ## Type of change Select one or more from the following: - [ ] New feature, functionality or library - [ ] Consensus breaking; add the `consensus-breaking` label if so. See #791 for details - [ ] Bug fix - [x] Code health or cleanup - [x] Documentation - [ ] Other (specify) ## Testing - [x] **Documentation**: `make docusaurus_start`; only needed if you make doc changes - [ ] **Unit Tests**: `make go_develop_and_test` - [ ] **LocalNet E2E Tests**: `make test_e2e` - [ ] **DevNet E2E Tests**: Add the `devnet-test-e2e` label to the PR. ## Sanity Checklist - [ ] I have tested my changes using the available tooling - [ ] I have commented my code - [ ] I have performed a self-review of my own code; both comments & source code - [ ] I create and reference any new tickets, if applicable - [ ] I have left TODOs throughout the codebase, if applicable --------- Co-authored-by: DK Co-authored-by: Daniel Olshansky Co-authored-by: Bryan White --- Dockerfile.release | 1 - app/upgrades/historical.go | 10 + .../chain_halt_troubleshooting.md | 23 +- .../recovery_from_chain_halt.md | 196 +++++++++++++++++ .../protocol/upgrades/contigency_plans.md | 100 +++++++++ .../docs/protocol/upgrades/release_process.md | 13 +- .../protocol/upgrades/upgrade_procedure.md | 205 +++++++++++++++--- makefiles/localnet.mk | 8 + .../upgrades/authz_cancel_upgrade_tx.json | 10 + tools/scripts/upgrades/upgrade_tx_v0.0.9.json | 15 ++ 10 files changed, 543 insertions(+), 38 deletions(-) create mode 100644 docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md create mode 100644 docusaurus/docs/protocol/upgrades/contigency_plans.md create mode 100644 tools/scripts/upgrades/authz_cancel_upgrade_tx.json create mode 100644 tools/scripts/upgrades/upgrade_tx_v0.0.9.json diff --git a/Dockerfile.release b/Dockerfile.release index 35d2a659c..efd5d2f44 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -8,7 +8,6 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends ca-certificates && \ rm -rf /var/lib/apt/lists/* - # Use `1025` G/UID so users can switch between this and `heighliner` image without a need to chown the files. RUN groupadd -g 1025 pocket && useradd -u 1025 -g pocket -m -s /sbin/nologin pocket diff --git a/app/upgrades/historical.go b/app/upgrades/historical.go index 2c0740652..35393ad02 100644 --- a/app/upgrades/historical.go +++ b/app/upgrades/historical.go @@ -17,6 +17,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" + cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/poktroll/app/keepers" ) @@ -29,6 +30,8 @@ func defaultUpgradeHandler( configurator module.Configurator, ) upgradetypes.UpgradeHandler { return func(ctx context.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { + logger := cosmostypes.UnwrapSDKContext(ctx).Logger() + logger.Info("Starting the migration in defaultUpgradeHandler") return mm.RunMigrations(ctx, configurator, vm) } } @@ -87,3 +90,10 @@ var Upgrade_0_0_4 = Upgrade{ // No changes to the KVStore in this upgrade. StoreUpgrades: storetypes.StoreUpgrades{}, } + +// Upgrade_0_0_9 is a small upgrade on TestNet. +var Upgrade_0_0_9 = Upgrade{ + PlanName: "v0.0.9", + CreateUpgradeHandler: defaultUpgradeHandler, + StoreUpgrades: storetypes.StoreUpgrades{}, +} diff --git a/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md b/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md index 72da1f4f3..5b32f5cda 100644 --- a/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md +++ b/docusaurus/docs/develop/developer_guide/chain_halt_troubleshooting.md @@ -8,13 +8,15 @@ title: Chain Halt Troubleshooting - [Understanding Chain Halts](#understanding-chain-halts) - [Definition and Causes](#definition-and-causes) - [Impact on Network](#impact-on-network) -- [Troubleshooting Process](#troubleshooting-process) +- [Troubleshooting `wrong Block.Header.AppHash`](#troubleshooting-wrong-blockheaderapphash) - [Step 1: Identifying the Issue](#step-1-identifying-the-issue) - [Step 2: Collecting Node Data](#step-2-collecting-node-data) - [Step 3: Analyzing Discrepancies](#step-3-analyzing-discrepancies) - [Step 4: Decoding and Interpreting Data](#step-4-decoding-and-interpreting-data) - [Step 5: Comparing Records](#step-5-comparing-records) - [Step 6: Investigation and Resolution](#step-6-investigation-and-resolution) +- [Troubleshooting `wrong Block.Header.LastResultsHash`](#troubleshooting-wrong-blockheaderlastresultshash) +- [Syncing from genesis](#syncing-from-genesis) ## Understanding Chain Halts @@ -40,7 +42,7 @@ Chain halts can have severe consequences for the network: Given these impacts, swift and effective troubleshooting is crucial to maintain network health and user trust. -## Troubleshooting Process +## Troubleshooting `wrong Block.Header.AppHash` ### Step 1: Identifying the Issue @@ -94,3 +96,20 @@ Based on the identified discrepancies: 2. Develop a fix or patch to address the issue. 3. If necessary, initiate discussions with the validator community to reach social consensus on how to proceed. 4. Implement the agreed-upon solution and monitor the network closely during and after the fix. + +## Troubleshooting `wrong Block.Header.LastResultsHash` + +Errors like the following can occur from using the incorrect binary version at a certain height. + +```bash +reactor validation error: wrong Block.Header.LastResultsHash. +``` + +The solution is to use the correct binary version to sync the full node at the correct height. + +Tools like [cosmosvisor](https://docs.cosmos.network/v0.45/run-node/cosmovisor.html) make it easier +to sync a node from genesis by automatically using the appropriate binary for each range of block heights. + +## Syncing from genesis + +If you're encountering any of the errors mentioned above while trying to sync the historical blocks - make sure you're running the correct version of the binary in accordance with this table [Upgrade List](../../protocol/upgrades/upgrade_list.md). diff --git a/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md b/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md new file mode 100644 index 000000000..d1ca5a069 --- /dev/null +++ b/docusaurus/docs/develop/developer_guide/recovery_from_chain_halt.md @@ -0,0 +1,196 @@ +--- +sidebar_position: 7 +title: Chain Halt Recovery +--- + +## Chain Halt Recovery + +This document describes how to recover from a chain halt. + +It assumes that the cause of the chain halt has been identified, and that the +new release has been created and verified to function correctly. + +:::tip + +See [Chain Halt Troubleshooting](./chain_halt_troubleshooting.md) for more information on identifying the cause of a chain halt. + +::: + +- [Background](#background) +- [Resolving halts during a network upgrade](#resolving-halts-during-a-network-upgrade) + - [Manual binary replacement (preferred)](#manual-binary-replacement-preferred) + - [Rollback, fork and upgrade](#rollback-fork-and-upgrade) + - [Troubleshooting](#troubleshooting) + - [Data rollback - retrieving snapshot at a specific height (step 5)](#data-rollback---retrieving-snapshot-at-a-specific-height-step-5) + - [Validator Isolation - risks (step 6)](#validator-isolation---risks-step-6) + +## Background + +Pocket network is built on top of `cosmos-sdk`, which utilizes the CometBFT consensus engine. +Comet's Byzantine Fault Tolerant (BFT) consensus algorithm requires that **at least** 2/3 of Validators +are online and voting for the same block to reach a consensus. In order to maintain liveness +and avoid a chain-halt, we need the majority (> 2/3) of Validators to participate +and use the same version of the software. + +## Resolving halts during a network upgrade + +If the halt is caused by the network upgrade, it is possible the solution can be as simple as +skipping an upgrade (i.e. `unsafe-skip-upgrade`) and creating a new (fixed) upgrade. + +Read more about [upgrade contingency plans](../../protocol/upgrades/contigency_plans.md). + +### Manual binary replacement (preferred) + +:::note + +This is the preferred way of resolving consensus-breaking issues. + +**Significant side effect**: this breaks an ability to sync from genesis **without manual interventions**. +For example, when a consensus-breaking issue occurs on a node that is synching from the first block, node operators need +to manually replace the binary with the new one. There are efforts underway to mitigate this issue, including +configuration for `cosmovisor` that could automate the process. + + + +::: + +Since the chain is not moving, **it is impossible** to issue an automatic upgrade with an upgrade plan. Instead, +we need **social consensus** to manually replace the binary and get the chain moving. + +The steps to doing so are: + +1. Prepare and verify a new binary that addresses the consensus-breaking issue. +2. Reach out to the community and validators so they can upgrade the binary manually. +3. Update [the documentation](../../protocol/upgrades/upgrade_list.md) to include a range a height when the binary needs + to be replaced. + +:::warning + +TODO_MAINNET(@okdas): + +1. **For step 2**: Investigate if the CometBFT rounds/steps need to be aligned as in Morse chain halts. See [this ref](https://docs.cometbft.com/v1.0/spec/consensus/consensus). +2. **For step 3**: Add `cosmovisor` documentation so its configured to automatically replace the binary when synching from genesis. + +::: + +```mermaid +sequenceDiagram + participant DevTeam + participant Community + participant Validators + participant Documentation + participant Network + + DevTeam->>DevTeam: 1. Prepare and verify new binary + DevTeam->>Community: 2. Announce new binary and instructions + DevTeam->>Validators: 2. Notify validators to upgrade manually + Validators->>Validators: 2. Manually replace the binary + Validators->>Network: 2. Restart nodes with new binary + DevTeam->>Documentation: 3. Update documentation (GitHub Release and Upgrade List to include instructions) + Validators-->>Network: Network resumes operation + +``` + +### Rollback, fork and upgrade + +:::info + +These instructions are only relevant to Pocket Network's Shannon release. + +We do not currently use `x/gov` or on-chain voting for upgrades. +Instead, all participants in our DAO vote on upgrades off-chain, and the Foundation +executes transactions on their behalf. + +::: + +:::warning + +This should be avoided or more testing is required. In our tests, the full nodes were +propagating the existing blocks signed by the Validators, making it hard to rollback. + +::: + +**Performing a rollback is analogous to forking the network at the older height.** + +However, if necessary, the instructions to follow are: + +1. Prepare & verify a new binary that addresses the consensus-breaking issue. +2. [Create a release](../../protocol/upgrades/release_process.md). +3. [Prepare an upgrade transaction](../../protocol/upgrades/upgrade_procedure.md#writing-an-upgrade-transaction) to the new version. +4. Disconnect the `Validator set` from the rest of the network **3 blocks** prior to the height of the chain halt. For example: + - Assume an issue at height `103`. + - Revert the `validator set` to height `100`. + - Submit an upgrade transaction at `101`. + - Upgrade the chain at height `102`. + - Avoid the issue at height `103`. +5. Ensure all validators rolled back to the same height and use the same snapshot ([how to get a snapshot](#data-rollback---retrieving-snapshot-at-a-specific-height-step-5)) + - The snapshot should be imported into each Validator's data directory. + - This is necessary to ensure data continuity and prevent forks. +6. Isolate the `validator set` from full nodes - ([why this is necessary](#validator-isolation---risks-step-6)). + - This is necessary to avoid full nodes from gossiping blocks that have been rolled back. + - This may require using a firewall or a private network. + - Validators should only be permitted to gossip blocks amongst themselves. +7. Start the `validator set` and perform the upgrade. For example, reiterating the process above: + - Start all Validators at height `100`. + - On block `101`, submit the `MsgSoftwareUpgrade` transaction with a `Plan.height` set to `102`. + - `x/upgrade` will perform the upgrade in the `EndBlocker` of block `102`. + - The node will stop climbing with an error waiting for the upgrade to be performed. + - Cosmovisor deployments automatically replace the binary. + - Manual deployments will require a manual replacement at this point. + - Start the node back up. +8. Wait for the network to reach the height of the previous ledger (`104`+). +9. Allow validators to open their network to full nodes again. + - **Note**: full nodes will need to perform the rollback or use a snapshot as well. + +```mermaid +sequenceDiagram + participant DevTeam + participant Foundation + participant Validators + participant FullNodes + %% participant Network + + DevTeam->>DevTeam: 1. Prepare & verify new binary + DevTeam->>DevTeam: 2 & 3. Create a release & prepare upgrade transaction + Validators->>Validators: 4 & 5. Roll back to height before issue or import snapshot + Validators->>Validators: 6. Isolate from Full Nodes + Foundation->>Validators: 7. Distribute upgrade transaction + Validators->>Validators: 7. Start network and perform upgrade + + break + Validators->>Validators: 8. Wait until previously problematic height elapses + end + + Validators-->FullNodes: 9. Open network connections + FullNodes-->>Validators: 9. Sync with updated network + note over Validators,FullNodes: Network resumes operation +``` + +### Troubleshooting + +#### Data rollback - retrieving snapshot at a specific height (step 5) + +There are two ways to get a snapshot from a prior height: + +1. Execute + + ```bash + poktrolld rollback --hard + ``` + + repeately, until the command responds with the desired block number. + +2. Use a snapshot from below the halt height (e.g. `100`) and start the node with `--halt-height=100` parameter so it only syncs up to certain height and then + gracefully shuts down. Add this argument to `poktrolld start` like this: + + ```bash + poktrolld start --halt-height=100 + ``` + +#### Validator Isolation - risks (step 6) + +Having at least one node that has knowledge of the forking ledger can jeopardize the whole process. In particular, the +following errors in logs are the sign of the nodes syncing blocks from the wrong fork: + +- `found conflicting vote from ourselves; did you unsafe_reset a validator?` +- `conflicting votes from validator` diff --git a/docusaurus/docs/protocol/upgrades/contigency_plans.md b/docusaurus/docs/protocol/upgrades/contigency_plans.md new file mode 100644 index 000000000..260f37823 --- /dev/null +++ b/docusaurus/docs/protocol/upgrades/contigency_plans.md @@ -0,0 +1,100 @@ +--- +title: Failed upgrade contingency plan +sidebar_position: 5 +--- + +:::tip + +This documentation covers failed upgrade contingency for `poktroll` - a `cosmos-sdk` based chain. + +While this can be helpful for other blockchain networks, it is not guaranteed to work for other chains. + +::: + +## Contingency plans + +There's always a chance the upgrade will fail. + +This document is intended to help you recover without significant downtime. + +- [Option 0: The bug is discovered before the upgrade height is reached](#option-0-the-bug-is-discovered-before-the-upgrade-height-is-reached) +- [Option 1: The migration didn't start (i.e. migration halt)](#option-1-the-migration-didnt-start-ie-migration-halt) +- [Option 2: The migration is stuck (i.e. incomplete/partial migration)](#option-2-the-migration-is-stuck-ie-incompletepartial-migration) +- [Option 3: The migration succeed but the network is stuck (i.e. migration had a bug)](#option-3-the-migration-succeed-but-the-network-is-stuck-ie-migration-had-a-bug) +- [MANDATORY Checklist of Documentation \& Scripts to Update](#mandatory-checklist-of-documentation--scripts-to-update) + +### Option 0: The bug is discovered before the upgrade height is reached + +**Cancel the upgrade plan!** + +See the instructions of [how to do that here](./upgrade_procedure.md#cancelling-the-upgrade-plan). + +### Option 1: The migration didn't start (i.e. migration halt) + +**This is unlikely to happen.** + +Possible reasons for this are if the name of the upgrade handler is different +from the one specified in the upgrade plan, or if the binary suggested by the +upgrade plan is wrong. + +If the nodes on the network stopped at the upgrade height and the migration did not +start yet (i.e. there are no logs indicating the upgrade handler and store migrations are being executed), +we **MUST** gather social consensus to restart validators with the `--unsafe-skip-upgrade=$upgradeHeightNumber` flag. + +This will skip the upgrade process, allowing the chain to continue and the protocol team to plan another release. + +`--unsafe-skip-upgrade` simply skips the upgrade handler and store migrations. +The chain continues as if the upgrade plan was never set. +The upgrade needs to be fixed, and then a new plan needs to be submitted to the network. + +:::caution + +`--unsafe-skip-upgrade` needs to be documented in the list of upgrades and added +to the scripts so the next time somebody tries to sync the network from genesis, +they will automatically skip the failed upgrade. +[Documentation and scripts to update](#documentation-and-scripts-to-update) + + + +::: + +### Option 2: The migration is stuck (i.e. incomplete/partial migration) + +If the migration is stuck, there's always a chance the upgrade handler was executed on-chain as scheduled, but the migration didn't complete. + +In such a case, we need: + +- **All full nodes and validators**: Roll back validators to the backup + + - A snapshot is taken by `cosmovisor` automatically prior to upgrade when `UNSAFE_SKIP_BACKUP` is set to `false` (the default recommended value; + [more information](https://docs.cosmos.network/main/build/tooling/cosmovisor#command-line-arguments-and-environment-variables)) + +- **All full nodes and validators**: skip the upgrade + + - Add the `--unsafe-skip-upgrade=$upgradeHeightNumber` argument to `poktroll start` command like so: + + ```bash + poktrolld start --unsafe-skip-upgrade=$upgradeHeightNumber # ... the rest of the arguments + ``` + +- **Protocol team**: Resolve the issue with an upgrade and schedule a new plan. + + - The upgrade needs to be fixed, and then a new plan needs to be submitted to the network. + +- **Protocol team**: document the failed upgrade + + - Document and add `--unsafe-skip-upgrade=$upgradeHeightNumber` to the scripts (such as docker-compose and cosmovisor installer) + - The next time somebody tries to sync the network from genesis they will automatically skip the failed upgrade; see [documentation and scripts to update](#documentation-and-scripts-to-update) + + + +### Option 3: The migration succeed but the network is stuck (i.e. migration had a bug) + +This should be treated as a consensus or non-determinism bug that is unrelated to the upgrade. See [Recovery From Chain Halt](../../develop/developer_guide/recovery_from_chain_halt.md) for more information on how to handle such issues. + +### MANDATORY Checklist of Documentation & Scripts to Update + +- [ ] The [upgrade list](./upgrade_list.md) should reflect a failed upgrade and provide a range of heights that served by each version. +- [ ] Systemd service should include`--unsafe-skip-upgrade=$upgradeHeightNumber` argument in its start command [here](https://github.com/pokt-network/poktroll/blob/main/tools/installer/full-node.sh). +- [ ] The [Helm chart](https://github.com/pokt-network/helm-charts/blob/main/charts/poktrolld/templates/StatefulSet.yaml) should point to the latest version;consider exposing via a `values.yaml` file +- [ ] The [docker-compose](https://github.com/pokt-network/poktroll-docker-compose-example/tree/main/scripts) examples should point to the latest version diff --git a/docusaurus/docs/protocol/upgrades/release_process.md b/docusaurus/docs/protocol/upgrades/release_process.md index 2845f4c84..398d56c05 100644 --- a/docusaurus/docs/protocol/upgrades/release_process.md +++ b/docusaurus/docs/protocol/upgrades/release_process.md @@ -16,13 +16,6 @@ sidebar_position: 4 This document is for the Pocket Network protocol team's internal use only. ::: -- [1. Determine if the Release is Consensus-Breaking](#1-determine-if-the-release-is-consensus-breaking) -- [2. Create a GitHub Release](#2-create-a-github-release) - - [Legend](#legend) -- [3. Write an Upgrade Plan](#3-write-an-upgrade-plan) -- [4. Issue Upgrade on TestNet](#4-issue-upgrade-on-testnet) -- [5. Issue Upgrade on MainNet](#5-issue-upgrade-on-mainnet) - ### 1. Determine if the Release is Consensus-Breaking :::note @@ -59,12 +52,18 @@ You can find an example [here](https://github.com/pokt-network/poktroll/releases ```text ## Protocol Upgrades + + - **Planned Upgrade:** ❌ Not applicable for this release. - **Breaking Change:** ❌ Not applicable for this release. - **Manual Intervention Required:** ✅ Yes, but only for Alpha TestNet participants. If you are participating, please follow the [instructions provided here](https://dev.poktroll.com/operate/quickstart/docker_compose_walkthrough#restarting-a-full-node-after-re-genesis-) for restarting your full node after re-genesis. - **Upgrade Height:** ❌ Not applicable for this release. ## What's Changed + ``` diff --git a/docusaurus/docs/protocol/upgrades/upgrade_procedure.md b/docusaurus/docs/protocol/upgrades/upgrade_procedure.md index 8dd572ece..91dfc12bf 100644 --- a/docusaurus/docs/protocol/upgrades/upgrade_procedure.md +++ b/docusaurus/docs/protocol/upgrades/upgrade_procedure.md @@ -6,22 +6,32 @@ sidebar_position: 2 # Upgrade procedure :::warning -This page describes the protocol upgrade process, which is internal to the protocol team. If you're interested in upgrading your Pocket Network node, please check our [releases page](https://github.com/pokt-network/poktroll/releases) for upgrade instructions and changelogs. + +This page describes the protocol upgrade process, intended for the protocol team's internal use. + +If you're interested in upgrading your Pocket Network node, please check our [releases page](https://github.com/pokt-network/poktroll/releases) for upgrade instructions and changelogs. + ::: - [When is an Upgrade Warranted?](#when-is-an-upgrade-warranted) - [Implementing the Upgrade](#implementing-the-upgrade) - [Writing an Upgrade Transaction](#writing-an-upgrade-transaction) + - [Validate the URLs (live network only)](#validate-the-urls-live-network-only) - [Submitting the upgrade onchain](#submitting-the-upgrade-onchain) +- [Cancelling the upgrade plan](#cancelling-the-upgrade-plan) - [Testing the Upgrade](#testing-the-upgrade) - - [LocalNet](#localnet) - - [DevNet](#devnet) - - [TestNet](#testnet) - - [Mainnet](#mainnet) + - [LocalNet Upgrades](#localnet-upgrades) + - [LocalNet Upgrade Cheat Sheet](#localnet-upgrade-cheat-sheet) + - [DevNet Upgrades](#devnet-upgrades) + - [TestNet Upgrades](#testnet-upgrades) + - [Mainnet Upgrades](#mainnet-upgrades) ## Overview -When a consensus-breaking change is made to the protocol, we must carefully evaluate and implement an upgrade path that allows existing nodes to transition safely from one software version to another without disruption. This process involves several key steps: +When a consensus-breaking change is made to the protocol, we must carefully evaluate and implement an upgrade path that +allows existing nodes to transition safely from one software version to another without disruption. + +This process involves several key steps: 1. **Proposal**: The DAO drafts an upgrade proposal using our offchain governance system. 2. **Implementation**: The proposed changes are implemented in the codebase. @@ -36,16 +46,34 @@ An upgrade is necessary whenever there's an API, State Machine, or other Consens ## Implementing the Upgrade -1. When a new version includes a consensus-breaking change, plan for the next protocol upgrade: - - If there's a change to a specific module, bump that module's consensus version. +1. When a new version includes a `consensus-breaking` change, plan for the next protocol upgrade: + + - If there's a change to a specific module -> bump that module's consensus version. - Note any potential parameter changes to include in the upgrade. + 2. Create a new upgrade in `app/upgrades`: - Refer to `historical.go` for past upgrades and examples. - - Consult Cosmos-sdk documentation on upgrades for additional guidance [here](https://docs.cosmos.network/main/build/building-apps/app-upgrade) and [here](https://docs.cosmos.network/main/build/modules/upgrade). + - Consult Cosmos-sdk documentation on upgrades for additional guidance on [building-apps/app-upgrade](https://docs.cosmos.network/main/build/building-apps/app-upgrade) and [modules/upgrade](https://docs.cosmos.network/main/build/modules/upgrade). + +:::info + +Creating a new upgrade plan **MUST BE DONE** even if there are no state changes. + +::: ## Writing an Upgrade Transaction -An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/blob/0fda53f265de4bcf4be1a13ea9fad450fc2e66d4/x/upgrade/proto/cosmos/upgrade/v1beta1/upgrade.proto#L14) with specific details about the upgrade. This information helps schedule the upgrade on the network and provides necessary data for automatic upgrades via `Cosmovisor`. A typical upgrade transaction will look like the following: +An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/blob/0fda53f265de4bcf4be1a13ea9fad450fc2e66d4/x/upgrade/proto/cosmos/upgrade/v1beta1/upgrade.proto#L14) with specific details about the upgrade. + +This information helps schedule the upgrade on the network and provides necessary data for automatic upgrades via `Cosmovisor`. + +A typical upgrade transaction includes: + +- `name`: Name of the upgrade. It should match the `VersionName` of `upgrades.Upgrade`. +- `height`: The height at which an upgrade should be executed and the node will be restarted. +- `info`: Can be empty. **Only needed for live networks where we want cosmovisor to upgrade nodes automatically**. + +And looks like the following as an example: ```json { @@ -65,52 +93,171 @@ An upgrade transaction includes a [Plan](https://github.com/cosmos/cosmos-sdk/bl } ``` -- `name`: Name of the upgrade. It should match the `VersionName` of `upgrades.Upgrade`. -- `height`: The height at which an upgrade should be executed and the node will be restarted. -- `info`: While this field can theoretically contain any information about the upgrade, in practice, `cosmovisor`uses it to obtain information about the binaries. When`cosmovisor` is configured to automatically download binaries, it will pull the binary from the link provided in this field and perform a hash verification (which is optional). +:::tip + +When `cosmovisor` is configured to automatically download binaries, it will pull the binary from the link provided in +the object about and perform a hash verification (which is also optional). + +**NOTE THAT** we only know the hashes **AFTER** the release has been cut and CI created artifacts for this version. + +::: + +### Validate the URLs (live network only) + +The URLs of the binaries contain checksums. It is critical to ensure they are correct. +Otherwise Cosmovisor won't be able to download the binaries and go through the upgrade. + +The command below (using tools build by the authors of Cosmosvisor) can be used to achieve the above: + +```bash +jq -r '.body.messages[0].plan.info | fromjson | .binaries[]' $PATH_TO_UPGRADE_TRANSACTION_JSON | while IFS= read -r url; do + go-getter "$url" . +done +``` + +The output should look like this: + +```text +2024/09/24 12:40:40 success! +2024/09/24 12:40:42 success! +2024/09/24 12:40:44 success! +2024/09/24 12:40:46 success! +``` + +:::tip + +`go-getter` can be installed using the following command: + +```bash +go install github.com/hashicorp/go-getter/cmd/go-getter@latest +``` + +::: ## Submitting the upgrade onchain The `MsgSoftwareUpgrade` can be submitted using the following command: ```bash -poktrolld tx authz exec PATH_TO_TRANSACTION_JSON --from pnf +poktrolld tx authz exec $PATH_TO_UPGRADE_TRANSACTION_JSON --from=pnf ``` -If the transaction has been accepted, upgrade plan can be viewed with this command: +If the transaction has been accepted, the upgrade plan can be viewed with this command: ```bash poktrolld query upgrade plan ``` +## Cancelling the upgrade plan + +It is possible to cancel the upgrade before the upgrade plan height is reached. To do so, execute the following make target: + +```bash +make localnet_cancel_upgrade +``` + ## Testing the Upgrade :::warning -Note that for local testing, `cosmovisor` won't pull the binary from the info field. +Note that for local testing, `cosmovisor` won't pull the binary from the upgrade Plan's info field. ::: -### LocalNet +### LocalNet Upgrades + +LocalNet **DOES NOT** support `cosmovisor` and automatic upgrades at the moment. + +However, **IT IS NOT NEEDED** to simulate and test the upgrade procedure. + +#### LocalNet Upgrade Cheat Sheet + +For a hypothetical scenario to upgrade from `0.1` to `0.2`: + +1. **Stop LocalNet** to prevent interference. Pull the `poktroll` repo into two separate directories. Let's name them `old` and `new`. It is recommended to open at least two tabs/shell panels in each directory for easier switching between directories. + +2. **(`old` repo)** - Check out the old version. For the test to be accurate, we need to upgrade from the correct version. + + ```bash + git checkout v0.1 + ``` + +3. **(`new` repo)** + + ```bash + git checkout -b branch_to_test + ``` -LocalNet currently does not support `cosmovisor` and automatic upgrades. However, we have provided scripts to facilitate local testing in the `tools/scripts/upgrades` directory: + Replace `branch_to_test` with the actual branch you want to test. -1. Modify `tools/scripts/upgrades/authz_upgrade_tx_example_v0.0.4_height_30.json` to reflect the name of the upgrade and the height at which it should be scheduled. + :::note + This branch should have an upgrade implemented per the docs in [Implementing the Upgrade](#implementing-the-upgrade). + Here, the upgrade should be named `v0.2`. + ::: -2. Check and update the `tools/scripts/upgrades/cosmovisor-start-node.sh` to point to the correct binaries: +4. **(BOTH repos)** - We'll use binaries from both versions - old and new. - - The old binary should be compiled to work before the upgrade. - - The new binary should contain the upgrade logic to be executed immediately after the node is started using the new binary. + ```bash + make go_develop ignite_release ignite_release_extract_binaries + ``` -3. Run `bash tools/scripts/upgrades/cosmovisor-start-node.sh` to wipe the `~/.poktroll` directory and place binaries in the correct locations. + :::note + The binary produced by these commands in the old repo should result in the same binary as it was downloaded from [production releases](https://github.com/pokt-network/poktroll/releases). You can use them as an alternative to building the binary from source. + ::: -4. Execute the transaction as shown in [Submitting the upgrade onchain](#submitting-the-upgrade-onchain) section above. +5. **(`old` repo)** - Clean up and generate an empty genesis using the old version. -### DevNet + ```bash + rm -rf ~/.poktroll && ./release_binaries/poktroll_darwin_arm64 comet unsafe-reset-all && make localnet_regenesis + ``` + +6. **(`old` repo)** Write and save [an upgrade transaction](#writing-an-upgrade-transaction) for `v0.2`. The upgrade plan should be named after the version to which you're upgrading. + +7. **(`old` repo)** Start the node: + + ```bash + ./release_binaries/poktroll_darwin_arm64 start + ``` + + The validator node should run and produce blocks as expected. + +8. **(`old` repo)** Submit the upgrade transaction. **NOTE THAT** the upgrade height in the transaction should be higher than the current block height. Adjust and submit if necessary: + + ```bash + ./release_binaries/poktroll_darwin_arm64 tx authz exec tools/scripts/upgrades/local_test_v0.2.json --from=pnf + ``` + + Replace the path to the JSON transaction with your prepared upgrade transaction. Verify the upgrade plan was submitted and accepted: + + ```bash + ./release_binaries/poktroll_darwin_arm64 query upgrade plan + ``` + +9. Wait for the upgrade height to be reached on the old version. The old version should stop working since it has no knowledge of the `v0.2` upgrade. This simulates a real-world scenario. Stop the old node, and switch to the new version. + +10. **(`new` repo)** + + ```bash + ./release_binaries/poktroll_darwin_arm64 start + ``` + +11. **Observe the output:** + + - A successful upgrade should output `applying upgrade "v0.2" at height: 20 module=x/upgrade`. + - The node on the new version should continue producing blocks. + - If there were errors during the upgrade, investigate and address them. + +12. **(`new` repo, optional**) - If parameters were changed during the upgrade, test if these changes were applied. For example: + + ```bash + ./release_binaries/poktroll_darwin_arm64 q application params + ``` + +### DevNet Upgrades DevNets currently do not support `cosmovisor`. We use Kubernetes to manage software versions, including validators. Introducing another component to manage versions would be complex, requiring a re-architecture of our current solution to accommodate this change. -### TestNet +### TestNet Upgrades We currently deploy TestNet validators using Kubernetes with helm charts, which prevents us from managing the validator with `cosmovisor`. We do not control what other TestNet participants are running. However, if participants have deployed their nodes using the [cosmovisor guide](../../operate/run_a_node/full_node_walkthrough.md), their nodes will upgrade automatically. @@ -121,9 +268,11 @@ Until we transition to [cosmos-operator](https://github.com/strangelove-ventures 3. Monitor validator node(s) as they start and begin producing blocks. :::tip -If you are a member of Grove, you can find the instructions to access the infrastructure [here](https://www.notion.so/buildwithgrove/How-to-re-genesis-a-Shannon-TestNet-a6230dd8869149c3a4c21613e3cfad15?pvs=4). + +If you are a member of Grove, you can find the instructions to access the infrastructure [on notion](https://www.notion.so/buildwithgrove/How-to-re-genesis-a-Shannon-TestNet-a6230dd8869149c3a4c21613e3cfad15?pvs=4). + ::: -### Mainnet +### Mainnet Upgrades The Mainnet upgrade process is to be determined. We aim to develop and implement improved tooling for this environment. diff --git a/makefiles/localnet.mk b/makefiles/localnet.mk index d43cdccb6..9278457e6 100644 --- a/makefiles/localnet.mk +++ b/makefiles/localnet.mk @@ -29,3 +29,11 @@ localnet_regenesis: check_yq warn_message_acc_initialize_pubkeys ## Regenerate t .PHONY: cosmovisor_start_node cosmovisor_start_node: ## Starts the node using cosmovisor that waits for an upgrade plan bash tools/scripts/upgrades/cosmovisor-start-node.sh + +.PHONY: localnet_cancel_upgrade +localnet_cancel_upgrade: ## Cancels the planed upgrade on local node + poktrolld tx authz exec tools/scripts/upgrades/authz_cancel_upgrade_tx.json --gas=auto --from=pnf + +.PHONY: localnet_show_upgrade_plan +localnet_show_upgrade_plan: ## Shows the upgrade plan on local node + poktrolld query upgrade plan diff --git a/tools/scripts/upgrades/authz_cancel_upgrade_tx.json b/tools/scripts/upgrades/authz_cancel_upgrade_tx.json new file mode 100644 index 000000000..014eaac60 --- /dev/null +++ b/tools/scripts/upgrades/authz_cancel_upgrade_tx.json @@ -0,0 +1,10 @@ +{ + "body": { + "messages": [ + { + "@type": "/cosmos.upgrade.v1beta1.MsgCancelUpgrade", + "authority": "pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t" + } + ] + } +} \ No newline at end of file diff --git a/tools/scripts/upgrades/upgrade_tx_v0.0.9.json b/tools/scripts/upgrades/upgrade_tx_v0.0.9.json new file mode 100644 index 000000000..c945229d9 --- /dev/null +++ b/tools/scripts/upgrades/upgrade_tx_v0.0.9.json @@ -0,0 +1,15 @@ +{ + "body": { + "messages": [ + { + "@type": "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade", + "authority": "pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t", + "plan": { + "name": "v0.0.9", + "height": "15510", + "info": "{\"binaries\":{\"linux/amd64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_linux_amd64.tar.gz?checksum=sha256:ab5b99ca0bc4bfbdd7031378d5a01c2a9f040ff310b745866a4dee7e62321c94\",\"linux/arm64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_linux_arm64.tar.gz?checksum=sha256:4b68c2ad326da055d43af1ad1a580158cec0f229d2ec6d9e18280d065260b622\",\"darwin/amd64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_darwin_amd64.tar.gz?checksum=sha256:c81aabddeb190044b979412e5a518bbf5c88305272f72a47e32e13aa765c3330\",\"darwin/arm64\":\"https://github.com/pokt-network/poktroll/releases/download/v0.0.9/poktroll_darwin_arm64.tar.gz?checksum=sha256:e683c55ac13902d107d7a726ed4a5c5affb2af1be3c67dd131ec2072a2cfbcb2\"}}" + } + } + ] + } +} \ No newline at end of file From 4ed42b364cb97ba7f5e860a503284c87bede9758 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Fri, 24 Jan 2025 06:28:36 +0100 Subject: [PATCH 09/24] chore: address review change requests --- api/poktroll/proof/event.pulsar.go | 759 +++++++++++++++++- api/poktroll/proof/types.pulsar.go | 165 +++- proto/poktroll/proof/event.proto | 8 + proto/poktroll/proof/types.proto | 11 + testutil/testtree/tree.go | 1 + x/proof/keeper/msg_server_submit_proof.go | 65 +- x/proof/keeper/proof_validation.go | 189 +++-- x/proof/keeper/proof_validation_test.go | 17 +- x/proof/keeper/validate_proofs.go | 253 ++++-- x/proof/module/abci.go | 22 +- x/proof/module/module.go | 5 +- x/proof/types/event.pb.go | 363 ++++++++- x/proof/types/query.pb.go | 1 - x/proof/types/types.pb.go | 131 ++- .../keeper_settle_pending_claims_test.go | 29 +- x/tokenomics/keeper/settle_pending_claims.go | 37 +- x/tokenomics/types/expected_keepers.go | 2 +- 17 files changed, 1724 insertions(+), 334 deletions(-) diff --git a/api/poktroll/proof/event.pulsar.go b/api/poktroll/proof/event.pulsar.go index 2954b58b7..dd7b048a8 100644 --- a/api/poktroll/proof/event.pulsar.go +++ b/api/poktroll/proof/event.pulsar.go @@ -2804,6 +2804,601 @@ func (x *fastReflection_EventProofUpdated) ProtoMethods() *protoiface.Methods { } } +var ( + md_EventProofValidityChecked protoreflect.MessageDescriptor + fd_EventProofValidityChecked_proof protoreflect.FieldDescriptor + fd_EventProofValidityChecked_block_height protoreflect.FieldDescriptor + fd_EventProofValidityChecked_proof_status protoreflect.FieldDescriptor + fd_EventProofValidityChecked_reason protoreflect.FieldDescriptor +) + +func init() { + file_poktroll_proof_event_proto_init() + md_EventProofValidityChecked = File_poktroll_proof_event_proto.Messages().ByName("EventProofValidityChecked") + fd_EventProofValidityChecked_proof = md_EventProofValidityChecked.Fields().ByName("proof") + fd_EventProofValidityChecked_block_height = md_EventProofValidityChecked.Fields().ByName("block_height") + fd_EventProofValidityChecked_proof_status = md_EventProofValidityChecked.Fields().ByName("proof_status") + fd_EventProofValidityChecked_reason = md_EventProofValidityChecked.Fields().ByName("reason") +} + +var _ protoreflect.Message = (*fastReflection_EventProofValidityChecked)(nil) + +type fastReflection_EventProofValidityChecked EventProofValidityChecked + +func (x *EventProofValidityChecked) ProtoReflect() protoreflect.Message { + return (*fastReflection_EventProofValidityChecked)(x) +} + +func (x *EventProofValidityChecked) slowProtoReflect() protoreflect.Message { + mi := &file_poktroll_proof_event_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_EventProofValidityChecked_messageType fastReflection_EventProofValidityChecked_messageType +var _ protoreflect.MessageType = fastReflection_EventProofValidityChecked_messageType{} + +type fastReflection_EventProofValidityChecked_messageType struct{} + +func (x fastReflection_EventProofValidityChecked_messageType) Zero() protoreflect.Message { + return (*fastReflection_EventProofValidityChecked)(nil) +} +func (x fastReflection_EventProofValidityChecked_messageType) New() protoreflect.Message { + return new(fastReflection_EventProofValidityChecked) +} +func (x fastReflection_EventProofValidityChecked_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_EventProofValidityChecked +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_EventProofValidityChecked) Descriptor() protoreflect.MessageDescriptor { + return md_EventProofValidityChecked +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_EventProofValidityChecked) Type() protoreflect.MessageType { + return _fastReflection_EventProofValidityChecked_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_EventProofValidityChecked) New() protoreflect.Message { + return new(fastReflection_EventProofValidityChecked) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_EventProofValidityChecked) Interface() protoreflect.ProtoMessage { + return (*EventProofValidityChecked)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_EventProofValidityChecked) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Proof != nil { + value := protoreflect.ValueOfMessage(x.Proof.ProtoReflect()) + if !f(fd_EventProofValidityChecked_proof, value) { + return + } + } + if x.BlockHeight != uint64(0) { + value := protoreflect.ValueOfUint64(x.BlockHeight) + if !f(fd_EventProofValidityChecked_block_height, value) { + return + } + } + if x.ProofStatus != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ProofStatus)) + if !f(fd_EventProofValidityChecked_proof_status, value) { + return + } + } + if x.Reason != "" { + value := protoreflect.ValueOfString(x.Reason) + if !f(fd_EventProofValidityChecked_reason, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_EventProofValidityChecked) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + return x.Proof != nil + case "poktroll.proof.EventProofValidityChecked.block_height": + return x.BlockHeight != uint64(0) + case "poktroll.proof.EventProofValidityChecked.proof_status": + return x.ProofStatus != 0 + case "poktroll.proof.EventProofValidityChecked.reason": + return x.Reason != "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventProofValidityChecked) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + x.Proof = nil + case "poktroll.proof.EventProofValidityChecked.block_height": + x.BlockHeight = uint64(0) + case "poktroll.proof.EventProofValidityChecked.proof_status": + x.ProofStatus = 0 + case "poktroll.proof.EventProofValidityChecked.reason": + x.Reason = "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_EventProofValidityChecked) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + value := x.Proof + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "poktroll.proof.EventProofValidityChecked.block_height": + value := x.BlockHeight + return protoreflect.ValueOfUint64(value) + case "poktroll.proof.EventProofValidityChecked.proof_status": + value := x.ProofStatus + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) + case "poktroll.proof.EventProofValidityChecked.reason": + value := x.Reason + return protoreflect.ValueOfString(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventProofValidityChecked) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + x.Proof = value.Message().Interface().(*Proof) + case "poktroll.proof.EventProofValidityChecked.block_height": + x.BlockHeight = value.Uint() + case "poktroll.proof.EventProofValidityChecked.proof_status": + x.ProofStatus = (ClaimProofStatus)(value.Enum()) + case "poktroll.proof.EventProofValidityChecked.reason": + x.Reason = value.Interface().(string) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventProofValidityChecked) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + if x.Proof == nil { + x.Proof = new(Proof) + } + return protoreflect.ValueOfMessage(x.Proof.ProtoReflect()) + case "poktroll.proof.EventProofValidityChecked.block_height": + panic(fmt.Errorf("field block_height of message poktroll.proof.EventProofValidityChecked is not mutable")) + case "poktroll.proof.EventProofValidityChecked.proof_status": + panic(fmt.Errorf("field proof_status of message poktroll.proof.EventProofValidityChecked is not mutable")) + case "poktroll.proof.EventProofValidityChecked.reason": + panic(fmt.Errorf("field reason of message poktroll.proof.EventProofValidityChecked is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_EventProofValidityChecked) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "poktroll.proof.EventProofValidityChecked.proof": + m := new(Proof) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "poktroll.proof.EventProofValidityChecked.block_height": + return protoreflect.ValueOfUint64(uint64(0)) + case "poktroll.proof.EventProofValidityChecked.proof_status": + return protoreflect.ValueOfEnum(0) + case "poktroll.proof.EventProofValidityChecked.reason": + return protoreflect.ValueOfString("") + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.EventProofValidityChecked")) + } + panic(fmt.Errorf("message poktroll.proof.EventProofValidityChecked does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_EventProofValidityChecked) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in poktroll.proof.EventProofValidityChecked", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_EventProofValidityChecked) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventProofValidityChecked) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_EventProofValidityChecked) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_EventProofValidityChecked) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*EventProofValidityChecked) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.Proof != nil { + l = options.Size(x.Proof) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.BlockHeight != 0 { + n += 1 + runtime.Sov(uint64(x.BlockHeight)) + } + if x.ProofStatus != 0 { + n += 1 + runtime.Sov(uint64(x.ProofStatus)) + } + l = len(x.Reason) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*EventProofValidityChecked) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.Reason) > 0 { + i -= len(x.Reason) + copy(dAtA[i:], x.Reason) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Reason))) + i-- + dAtA[i] = 0x22 + } + if x.ProofStatus != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.ProofStatus)) + i-- + dAtA[i] = 0x18 + } + if x.BlockHeight != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.BlockHeight)) + i-- + dAtA[i] = 0x10 + } + if x.Proof != nil { + encoded, err := options.Marshal(x.Proof) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*EventProofValidityChecked) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: EventProofValidityChecked: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: EventProofValidityChecked: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Proof == nil { + x.Proof = &Proof{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Proof); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + x.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + } + x.ProofStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.0 @@ -3103,6 +3698,66 @@ func (x *EventProofUpdated) GetClaimedUpokt() *v1beta1.Coin { return nil } +// Event emitted after a proof has been checked for validity. +type EventProofValidityChecked struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *EventProofValidityChecked) Reset() { + *x = EventProofValidityChecked{} + if protoimpl.UnsafeEnabled { + mi := &file_poktroll_proof_event_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventProofValidityChecked) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventProofValidityChecked) ProtoMessage() {} + +// Deprecated: Use EventProofValidityChecked.ProtoReflect.Descriptor instead. +func (*EventProofValidityChecked) Descriptor() ([]byte, []int) { + return file_poktroll_proof_event_proto_rawDescGZIP(), []int{4} +} + +func (x *EventProofValidityChecked) GetProof() *Proof { + if x != nil { + return x.Proof + } + return nil +} + +func (x *EventProofValidityChecked) GetBlockHeight() uint64 { + if x != nil { + return x.BlockHeight + } + return 0 +} + +func (x *EventProofValidityChecked) GetProofStatus() ClaimProofStatus { + if x != nil { + return x.ProofStatus + } + return ClaimProofStatus_NOT_FOUND +} + +func (x *EventProofValidityChecked) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + var File_poktroll_proof_event_proto protoreflect.FileDescriptor var file_poktroll_proof_event_proto_rawDesc = []byte{ @@ -3219,18 +3874,34 @@ var file_poktroll_proof_event_proto_rawDesc = []byte{ 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x11, 0xea, 0xde, 0x1f, 0x0d, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x6f, 0x6b, 0x74, 0x52, 0x0c, 0x63, 0x6c, 0x61, 0x69, - 0x6d, 0x65, 0x64, 0x55, 0x70, 0x6f, 0x6b, 0x74, 0x42, 0x9e, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, - 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, - 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, 0xaa, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, - 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xca, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xe2, 0x02, 0x1a, 0x50, 0x6f, 0x6b, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x6d, 0x65, 0x64, 0x55, 0x70, 0x6f, 0x6b, 0x74, 0x22, 0x83, 0x02, 0x0a, 0x19, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x09, 0xea, 0xde, + 0x1f, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x33, + 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x42, 0x10, 0xea, 0xde, 0x1f, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x55, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x10, 0xea, 0xde, 0x1f, + 0x0c, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x70, + 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xea, 0xde, 0x1f, 0x06, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x9e, + 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, + 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, 0xaa, 0x02, + 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xca, + 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0xe2, 0x02, 0x1a, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, + 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3245,32 +3916,36 @@ func file_poktroll_proof_event_proto_rawDescGZIP() []byte { return file_poktroll_proof_event_proto_rawDescData } -var file_poktroll_proof_event_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_poktroll_proof_event_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_poktroll_proof_event_proto_goTypes = []interface{}{ - (*EventClaimCreated)(nil), // 0: poktroll.proof.EventClaimCreated - (*EventClaimUpdated)(nil), // 1: poktroll.proof.EventClaimUpdated - (*EventProofSubmitted)(nil), // 2: poktroll.proof.EventProofSubmitted - (*EventProofUpdated)(nil), // 3: poktroll.proof.EventProofUpdated - (*Claim)(nil), // 4: poktroll.proof.Claim - (*v1beta1.Coin)(nil), // 5: cosmos.base.v1beta1.Coin - (*Proof)(nil), // 6: poktroll.proof.Proof + (*EventClaimCreated)(nil), // 0: poktroll.proof.EventClaimCreated + (*EventClaimUpdated)(nil), // 1: poktroll.proof.EventClaimUpdated + (*EventProofSubmitted)(nil), // 2: poktroll.proof.EventProofSubmitted + (*EventProofUpdated)(nil), // 3: poktroll.proof.EventProofUpdated + (*EventProofValidityChecked)(nil), // 4: poktroll.proof.EventProofValidityChecked + (*Claim)(nil), // 5: poktroll.proof.Claim + (*v1beta1.Coin)(nil), // 6: cosmos.base.v1beta1.Coin + (*Proof)(nil), // 7: poktroll.proof.Proof + (ClaimProofStatus)(0), // 8: poktroll.proof.ClaimProofStatus } var file_poktroll_proof_event_proto_depIdxs = []int32{ - 4, // 0: poktroll.proof.EventClaimCreated.claim:type_name -> poktroll.proof.Claim - 5, // 1: poktroll.proof.EventClaimCreated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin - 4, // 2: poktroll.proof.EventClaimUpdated.claim:type_name -> poktroll.proof.Claim - 5, // 3: poktroll.proof.EventClaimUpdated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin - 4, // 4: poktroll.proof.EventProofSubmitted.claim:type_name -> poktroll.proof.Claim - 6, // 5: poktroll.proof.EventProofSubmitted.proof:type_name -> poktroll.proof.Proof - 5, // 6: poktroll.proof.EventProofSubmitted.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin - 4, // 7: poktroll.proof.EventProofUpdated.claim:type_name -> poktroll.proof.Claim - 6, // 8: poktroll.proof.EventProofUpdated.proof:type_name -> poktroll.proof.Proof - 5, // 9: poktroll.proof.EventProofUpdated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 5, // 0: poktroll.proof.EventClaimCreated.claim:type_name -> poktroll.proof.Claim + 6, // 1: poktroll.proof.EventClaimCreated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin + 5, // 2: poktroll.proof.EventClaimUpdated.claim:type_name -> poktroll.proof.Claim + 6, // 3: poktroll.proof.EventClaimUpdated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin + 5, // 4: poktroll.proof.EventProofSubmitted.claim:type_name -> poktroll.proof.Claim + 7, // 5: poktroll.proof.EventProofSubmitted.proof:type_name -> poktroll.proof.Proof + 6, // 6: poktroll.proof.EventProofSubmitted.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin + 5, // 7: poktroll.proof.EventProofUpdated.claim:type_name -> poktroll.proof.Claim + 7, // 8: poktroll.proof.EventProofUpdated.proof:type_name -> poktroll.proof.Proof + 6, // 9: poktroll.proof.EventProofUpdated.claimed_upokt:type_name -> cosmos.base.v1beta1.Coin + 7, // 10: poktroll.proof.EventProofValidityChecked.proof:type_name -> poktroll.proof.Proof + 8, // 11: poktroll.proof.EventProofValidityChecked.proof_status:type_name -> poktroll.proof.ClaimProofStatus + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_poktroll_proof_event_proto_init() } @@ -3328,6 +4003,18 @@ func file_poktroll_proof_event_proto_init() { return nil } } + file_poktroll_proof_event_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventProofValidityChecked); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -3335,7 +4022,7 @@ func file_poktroll_proof_event_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_poktroll_proof_event_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/api/poktroll/proof/types.pulsar.go b/api/poktroll/proof/types.pulsar.go index ac6d5b248..2498a8038 100644 --- a/api/poktroll/proof/types.pulsar.go +++ b/api/poktroll/proof/types.pulsar.go @@ -585,6 +585,7 @@ var ( fd_Claim_supplier_operator_address protoreflect.FieldDescriptor fd_Claim_session_header protoreflect.FieldDescriptor fd_Claim_root_hash protoreflect.FieldDescriptor + fd_Claim_proof_status protoreflect.FieldDescriptor ) func init() { @@ -593,6 +594,7 @@ func init() { fd_Claim_supplier_operator_address = md_Claim.Fields().ByName("supplier_operator_address") fd_Claim_session_header = md_Claim.Fields().ByName("session_header") fd_Claim_root_hash = md_Claim.Fields().ByName("root_hash") + fd_Claim_proof_status = md_Claim.Fields().ByName("proof_status") } var _ protoreflect.Message = (*fastReflection_Claim)(nil) @@ -678,6 +680,12 @@ func (x *fastReflection_Claim) Range(f func(protoreflect.FieldDescriptor, protor return } } + if x.ProofStatus != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ProofStatus)) + if !f(fd_Claim_proof_status, value) { + return + } + } } // Has reports whether a field is populated. @@ -699,6 +707,8 @@ func (x *fastReflection_Claim) Has(fd protoreflect.FieldDescriptor) bool { return x.SessionHeader != nil case "poktroll.proof.Claim.root_hash": return len(x.RootHash) != 0 + case "poktroll.proof.Claim.proof_status": + return x.ProofStatus != 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -721,6 +731,8 @@ func (x *fastReflection_Claim) Clear(fd protoreflect.FieldDescriptor) { x.SessionHeader = nil case "poktroll.proof.Claim.root_hash": x.RootHash = nil + case "poktroll.proof.Claim.proof_status": + x.ProofStatus = 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -746,6 +758,9 @@ func (x *fastReflection_Claim) Get(descriptor protoreflect.FieldDescriptor) prot case "poktroll.proof.Claim.root_hash": value := x.RootHash return protoreflect.ValueOfBytes(value) + case "poktroll.proof.Claim.proof_status": + value := x.ProofStatus + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -772,6 +787,8 @@ func (x *fastReflection_Claim) Set(fd protoreflect.FieldDescriptor, value protor x.SessionHeader = value.Message().Interface().(*session.SessionHeader) case "poktroll.proof.Claim.root_hash": x.RootHash = value.Bytes() + case "poktroll.proof.Claim.proof_status": + x.ProofStatus = (ClaimProofStatus)(value.Enum()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -801,6 +818,8 @@ func (x *fastReflection_Claim) Mutable(fd protoreflect.FieldDescriptor) protoref panic(fmt.Errorf("field supplier_operator_address of message poktroll.proof.Claim is not mutable")) case "poktroll.proof.Claim.root_hash": panic(fmt.Errorf("field root_hash of message poktroll.proof.Claim is not mutable")) + case "poktroll.proof.Claim.proof_status": + panic(fmt.Errorf("field proof_status of message poktroll.proof.Claim is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -821,6 +840,8 @@ func (x *fastReflection_Claim) NewField(fd protoreflect.FieldDescriptor) protore return protoreflect.ValueOfMessage(m.ProtoReflect()) case "poktroll.proof.Claim.root_hash": return protoreflect.ValueOfBytes(nil) + case "poktroll.proof.Claim.proof_status": + return protoreflect.ValueOfEnum(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -902,6 +923,9 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } + if x.ProofStatus != 0 { + n += 1 + runtime.Sov(uint64(x.ProofStatus)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -931,6 +955,11 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + if x.ProofStatus != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.ProofStatus)) + i-- + dAtA[i] = 0x20 + } if len(x.RootHash) > 0 { i -= len(x.RootHash) copy(dAtA[i:], x.RootHash) @@ -1110,6 +1139,25 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { x.RootHash = []byte{} } iNdEx = postIndex + case 4: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + } + x.ProofStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -1263,6 +1311,57 @@ func (ClaimProofStage) EnumDescriptor() ([]byte, []int) { return file_poktroll_proof_types_proto_rawDescGZIP(), []int{1} } +// ClaimProofStatus defines the status of the proof for a claim. +// The default value is NOT_FOUND, whether the proof is required or not. +type ClaimProofStatus int32 + +const ( + ClaimProofStatus_NOT_FOUND ClaimProofStatus = 0 + ClaimProofStatus_VALID ClaimProofStatus = 1 + ClaimProofStatus_INVALID ClaimProofStatus = 2 +) + +// Enum value maps for ClaimProofStatus. +var ( + ClaimProofStatus_name = map[int32]string{ + 0: "NOT_FOUND", + 1: "VALID", + 2: "INVALID", + } + ClaimProofStatus_value = map[string]int32{ + "NOT_FOUND": 0, + "VALID": 1, + "INVALID": 2, + } +) + +func (x ClaimProofStatus) Enum() *ClaimProofStatus { + p := new(ClaimProofStatus) + *p = x + return p +} + +func (x ClaimProofStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClaimProofStatus) Descriptor() protoreflect.EnumDescriptor { + return file_poktroll_proof_types_proto_enumTypes[2].Descriptor() +} + +func (ClaimProofStatus) Type() protoreflect.EnumType { + return &file_poktroll_proof_types_proto_enumTypes[2] +} + +func (x ClaimProofStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClaimProofStatus.Descriptor instead. +func (ClaimProofStatus) EnumDescriptor() ([]byte, []int) { + return file_poktroll_proof_types_proto_rawDescGZIP(), []int{2} +} + type Proof struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1328,6 +1427,9 @@ type Claim struct { SessionHeader *session.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` // Root hash returned from smt.SMST#Root(). RootHash []byte `protobuf:"bytes,3,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // Claim proof status captures the status of the proof for this claim. + // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ProofStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` } func (x *Claim) Reset() { @@ -1371,6 +1473,13 @@ func (x *Claim) GetRootHash() []byte { return nil } +func (x *Claim) GetProofStatus() ClaimProofStatus { + if x != nil { + return x.ProofStatus + } + return ClaimProofStatus_NOT_FOUND +} + var File_poktroll_proof_types_proto protoreflect.FileDescriptor var file_poktroll_proof_types_proto_rawDesc = []byte{ @@ -1395,7 +1504,7 @@ var file_poktroll_proof_types_proto_rawDesc = []byte{ 0x64, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xc2, 0x01, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x87, 0x02, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x54, 0x0a, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, @@ -1407,16 +1516,24 @@ var file_poktroll_proof_types_proto_rawDesc = []byte{ 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x2a, 0x4c, 0x0a, 0x16, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x42, 0x41, 0x42, - 0x49, 0x4c, 0x49, 0x53, 0x54, 0x49, 0x43, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, - 0x45, 0x53, 0x48, 0x4f, 0x4c, 0x44, 0x10, 0x02, 0x2a, 0x44, 0x0a, 0x0f, 0x43, 0x6c, 0x61, 0x69, - 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, - 0x4c, 0x41, 0x49, 0x4d, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x56, - 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x54, 0x54, 0x4c, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x42, 0x9e, + 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x43, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, + 0x4c, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x54, + 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x50, + 0x52, 0x4f, 0x42, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x53, 0x54, 0x49, 0x43, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x48, 0x52, 0x45, 0x53, 0x48, 0x4f, 0x4c, 0x44, 0x10, 0x02, 0x2a, 0x44, 0x0a, + 0x0f, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x52, 0x4f, 0x56, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x54, + 0x54, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x10, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x42, 0x9e, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, @@ -1442,23 +1559,25 @@ func file_poktroll_proof_types_proto_rawDescGZIP() []byte { return file_poktroll_proof_types_proto_rawDescData } -var file_poktroll_proof_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_poktroll_proof_types_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_poktroll_proof_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_poktroll_proof_types_proto_goTypes = []interface{}{ (ProofRequirementReason)(0), // 0: poktroll.proof.ProofRequirementReason (ClaimProofStage)(0), // 1: poktroll.proof.ClaimProofStage - (*Proof)(nil), // 2: poktroll.proof.Proof - (*Claim)(nil), // 3: poktroll.proof.Claim - (*session.SessionHeader)(nil), // 4: poktroll.session.SessionHeader + (ClaimProofStatus)(0), // 2: poktroll.proof.ClaimProofStatus + (*Proof)(nil), // 3: poktroll.proof.Proof + (*Claim)(nil), // 4: poktroll.proof.Claim + (*session.SessionHeader)(nil), // 5: poktroll.session.SessionHeader } var file_poktroll_proof_types_proto_depIdxs = []int32{ - 4, // 0: poktroll.proof.Proof.session_header:type_name -> poktroll.session.SessionHeader - 4, // 1: poktroll.proof.Claim.session_header:type_name -> poktroll.session.SessionHeader - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 5, // 0: poktroll.proof.Proof.session_header:type_name -> poktroll.session.SessionHeader + 5, // 1: poktroll.proof.Claim.session_header:type_name -> poktroll.session.SessionHeader + 2, // 2: poktroll.proof.Claim.proof_status:type_name -> poktroll.proof.ClaimProofStatus + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_poktroll_proof_types_proto_init() } @@ -1497,7 +1616,7 @@ func file_poktroll_proof_types_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_poktroll_proof_types_proto_rawDesc, - NumEnums: 2, + NumEnums: 3, NumMessages: 2, NumExtensions: 0, NumServices: 0, diff --git a/proto/poktroll/proof/event.proto b/proto/poktroll/proof/event.proto index b4e99aad9..f54231121 100644 --- a/proto/poktroll/proof/event.proto +++ b/proto/poktroll/proof/event.proto @@ -43,3 +43,11 @@ message EventProofUpdated { uint64 num_estimated_compute_units = 5 [(gogoproto.jsontag) = "num_estimated_compute_units"]; cosmos.base.v1beta1.Coin claimed_upokt = 6 [(gogoproto.jsontag) = "claimed_upokt"]; } + +// Event emitted after a proof has been checked for validity. +message EventProofValidityChecked { + poktroll.proof.Proof proof = 1 [(gogoproto.jsontag) = "proof"]; + uint64 block_height = 2 [(gogoproto.jsontag) = "block_height"]; + poktroll.proof.ClaimProofStatus proof_status = 3 [(gogoproto.jsontag) = "proof_status"]; + string reason = 4 [(gogoproto.jsontag) = "reason"]; +} diff --git a/proto/poktroll/proof/types.proto b/proto/poktroll/proof/types.proto index d131adf90..e979621b6 100644 --- a/proto/poktroll/proof/types.proto +++ b/proto/poktroll/proof/types.proto @@ -29,6 +29,9 @@ message Claim { poktroll.session.SessionHeader session_header = 2; // Root hash returned from smt.SMST#Root(). bytes root_hash = 3; + // Claim proof status captures the status of the proof for this claim. + // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ClaimProofStatus proof_status = 4; } enum ProofRequirementReason { @@ -43,3 +46,11 @@ enum ClaimProofStage { SETTLED = 2; EXPIRED = 3; } + +// ClaimProofStatus defines the status of the proof for a claim. +// The default value is NOT_FOUND, whether the proof is required or not. +enum ClaimProofStatus { + NOT_FOUND = 0; + VALID = 1; + INVALID = 2; +} \ No newline at end of file diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go index 737e710d0..680a95011 100644 --- a/testutil/testtree/tree.go +++ b/testutil/testtree/tree.go @@ -152,5 +152,6 @@ func NewClaim( SupplierOperatorAddress: supplierOperatorAddr, SessionHeader: sessionHeader, RootHash: rootHash, + ProofStatus: prooftypes.ClaimProofStatus_NOT_FOUND, } } diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index 768527096..dadd4e977 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -38,9 +38,11 @@ func (k msgServer) SubmitProof( ctx context.Context, msg *types.MsgSubmitProof, ) (_ *types.MsgSubmitProofResponse, err error) { + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + // Declare claim to reference in telemetry. var ( - claim = new(types.Claim) + claim *types.Claim isExistingProof bool numRelays uint64 numClaimComputeUnits uint64 @@ -52,42 +54,55 @@ func (k msgServer) SubmitProof( // Basic validation of the SubmitProof message. if err = msg.ValidateBasic(); err != nil { + logger.Error("failed to validate the submitProof message") return nil, status.Error(codes.InvalidArgument, err.Error()) } - logger.Info("validated the submitProof message") sessionHeader = msg.GetSessionHeader() + supplierOperatorAddress := msg.GetSupplierOperatorAddress() + + logger = logger.With( + "session_id", sessionHeader.GetSessionId(), + "application_address", sessionHeader.GetApplicationAddress(), + "service_id", sessionHeader.GetServiceId(), + "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "supplier_operator_address", supplierOperatorAddress, + ) + logger.Info("validated the submitProof message") // Defer telemetry calls so that they reference the final values the relevant variables. defer k.finalizeSubmitProofTelemetry(sessionHeader, msg, isExistingProof, numRelays, numClaimComputeUnits, err) // Construct the proof from the message. - proof := &types.Proof{ - SupplierOperatorAddress: msg.GetSupplierOperatorAddress(), - SessionHeader: msg.GetSessionHeader(), - ClosestMerkleProof: msg.GetProof(), - } - - // Ensure the proof is well-formed by checking the proof, its corresponding - // claim and relay session headers was well as the proof's submission timing - // (i.e. it is submitted within the proof submission window). - claim, err = k.EnsureWellFormedProof(ctx, proof) - if err != nil { + proof := newProofFromMsg(msg) + + // EnsureWellFormedProof ensures proper proof formation by verifying: + // - Proof structure + // - Associated claim + // - Relay session headers + // - Submission timing within required window + if err = k.EnsureWellFormedProof(ctx, proof); err != nil { + logger.Error(fmt.Sprintf("failed to ensure well-formed proof: %v", err)) return nil, status.Error(codes.FailedPrecondition, err.Error()) } logger.Info("checked the proof is well-formed") - if err = k.deductProofSubmissionFee(ctx, msg.GetSupplierOperatorAddress()); err != nil { + // Retrieve the claim associated with the proof. + // The claim should ALWAYS exist since the proof validation in EnsureWellFormedProof + // retrieves and validates the associated claim. + foundClaim, claimFound := k.GetClaim(ctx, sessionHeader.GetSessionId(), supplierOperatorAddress) + if !claimFound { + logger.Error("failed to find the claim associated with the proof") + return nil, status.Error(codes.FailedPrecondition, types.ErrProofClaimNotFound.Error()) + } + + claim = &foundClaim + + if err = k.deductProofSubmissionFee(ctx, supplierOperatorAddress); err != nil { logger.Error(fmt.Sprintf("failed to deduct proof submission fee: %v", err)) return nil, status.Error(codes.FailedPrecondition, err.Error()) } - // Helpers for logging the same metadata throughout this function calls - logger = logger.With( - "session_id", proof.SessionHeader.SessionId, - "session_end_height", proof.SessionHeader.SessionEndBlockHeight, - "supplier_operator_address", proof.SupplierOperatorAddress) - // Check if a proof is required for the claim. proofRequirement, err := k.ProofRequirementForClaim(ctx, claim) if err != nil { @@ -152,7 +167,6 @@ func (k msgServer) SubmitProof( ) } - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) if err = sdkCtx.EventManager().EmitTypedEvent(proofUpsertEvent); err != nil { return nil, status.Error( codes.Internal, @@ -350,3 +364,12 @@ func (k Keeper) finalizeProofRequirementTelemetry( err, ) } + +// newProofFromMsg creates a new proof from a MsgSubmitProof message. +func newProofFromMsg(msg *types.MsgSubmitProof) *types.Proof { + return &types.Proof{ + SupplierOperatorAddress: msg.GetSupplierOperatorAddress(), + SessionHeader: msg.GetSessionHeader(), + ClosestMerkleProof: msg.GetProof(), + } +} diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 0892f071b..897e3ed16 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -31,6 +31,7 @@ package keeper import ( "bytes" "context" + "fmt" cosmostelemetry "github.com/cosmos/cosmos-sdk/telemetry" "github.com/pokt-network/smt" @@ -42,23 +43,24 @@ import ( sessiontypes "github.com/pokt-network/poktroll/x/session/types" ) -// EnsureWellFormedProof ensures that the proof submitted by the supplier is valid w.r.t its -// 1. Session header, -// 2. Submission block height is within the proof submission window, -// 3. Corresponding relay request and response pass basic validation and their -// session headers match the proof session header, -// 4. Relay mining difficulty is above the minimum required to earn rewards. +// EnsureWellFormedProof validates a supplier's proof for: +// 1. Valid session header +// 2. Submission height within window +// 3. Matching relay request/response headers +// 4. Relay Mining difficulty above reward threshold // -// It does not validate the proof's relay signatures or ClosestMerkleProof as these are -// computationally expensive and should be done in the EndBlocker corresponding -// to the block height at which the proof is submitted. +// EnsureWellFormedProof does not validate computationally expensive operations like: +// 1. Proof relay signatures +// 2. ClosestMerkleProof // -// This function should be called in the handler corresponding to the message type -// that submits the proof (i.e. SubmitProof). +// Additional developer context as of #1031: +// - This function is expected to be called from the SubmitProof messages handler +// - Computationally expensive operations are left to the block's EndBlocker // -// NOTE: A fully valid proof must pass both EnsureWellFormedProof and -// EnsureValidProofSignaturesAndClosestPath. -func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) (*types.Claim, error) { +// NOTE: Full validation requires passing both: +// 1. EnsureWellFormedProof (this function) +// 2. EnsureValidProofSignaturesAndClosestPath +func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) error { logger := k.Logger().With("method", "EnsureWellFormedProof") supplierOperatorAddr := proof.SupplierOperatorAddress @@ -67,7 +69,7 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) ( var onChainSession *sessiontypes.Session onChainSession, err := k.queryAndValidateSessionHeader(ctx, proof.SessionHeader, supplierOperatorAddr) if err != nil { - return nil, err + return err } logger.Info("queried and validated the session header") @@ -76,73 +78,86 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) ( // header which can be derived from known values (e.g. session end height). sessionHeader := onChainSession.GetHeader() + logger = logger.With( + "session_id", sessionHeader.GetSessionId(), + "application_address", sessionHeader.GetApplicationAddress(), + "service_id", sessionHeader.GetServiceId(), + "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "supplier_operator_address", supplierOperatorAddr, + ) + // Validate proof message commit height is within the respective session's // proof submission window using the onchain session header. if err = k.validateProofWindow(ctx, sessionHeader, supplierOperatorAddr); err != nil { - return nil, err + logger.Error(fmt.Sprintf("failed to validate proof window due to error: %v", err)) + return err } if len(proof.ClosestMerkleProof) == 0 { - return nil, types.ErrProofInvalidProof.Wrap("proof cannot be empty") + logger.Error("closest merkle proof cannot be empty") + return types.ErrProofInvalidProof.Wrap("closest merkle proof cannot be empty") } - // Unmarshal the closest merkle proof from the message. + // Unmarshal the sparse compact closest merkle proof from the message. sparseCompactMerkleClosestProof := &smt.SparseCompactMerkleClosestProof{} if err = sparseCompactMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { - return nil, types.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to unmarshal sparse compact merkle closest proof due to error: %v", err)) + return types.ErrProofInvalidProof.Wrapf("failed to unmarshal sparse compact merkle closest proof: %s", err) } // SparseCompactMerkeClosestProof does not implement GetValueHash, so we need to decompact it. sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) if err != nil { - return nil, types.ErrProofInvalidProof.Wrapf( - "failed to decompact closest merkle proof: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) + return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse erkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { - return nil, types.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) + return types.ErrProofInvalidRelay.Wrapf("failed to unmarshal relay: %s", err) } // Basic validation of the relay request. relayReq := relay.GetReq() if err = relayReq.ValidateBasic(); err != nil { - return nil, err + logger.Error(fmt.Sprintf("failed to validate relay request due to error: %v", err)) + return err } logger.Debug("successfully validated relay request") // Make sure that the supplier operator address in the proof matches the one in the relay request. if supplierOperatorAddr != relayReq.Meta.SupplierOperatorAddress { - return nil, types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") + logger.Error(fmt.Sprintf( + "supplier operator address mismatch; proof: %s, relay request: %s", + supplierOperatorAddr, + relayReq.Meta.SupplierOperatorAddress, + )) + return types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") } logger.Debug("the proof supplier operator address matches the relay request supplier operator address") // Basic validation of the relay response. relayRes := relay.GetRes() if err = relayRes.ValidateBasic(); err != nil { - return nil, err + logger.Error(fmt.Sprintf("failed to validate relay response due to error: %v", err)) + return err } logger.Debug("successfully validated relay response") // Verify that the relay request session header matches the proof session header. if err = compareSessionHeaders(sessionHeader, relayReq.Meta.GetSessionHeader()); err != nil { - return nil, err + logger.Error(fmt.Sprintf("relay request and proof session header mismatch: %v", err)) + return err } logger.Debug("successfully compared relay request session header") // Verify that the relay response session header matches the proof session header. if err = compareSessionHeaders(sessionHeader, relayRes.Meta.GetSessionHeader()); err != nil { - return nil, err + logger.Error(fmt.Sprintf("relay response and proof session header mismatch: %v", err)) + return err } logger.Debug("successfully compared relay response session header") @@ -154,83 +169,92 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) ( relayBz, serviceRelayDifficulty.GetTargetHash(), ); err != nil { - return nil, types.ErrProofInvalidRelayDifficulty.Wrapf("failed to validate relay difficulty for service %s due to: %v", sessionHeader.ServiceId, err) + logger.Error(fmt.Sprintf("failed to validate relay difficulty due to error: %v", err)) + return types.ErrProofInvalidRelayDifficulty.Wrapf("failed to validate relay difficulty for service %s due to: %v", sessionHeader.ServiceId, err) } logger.Debug("successfully validated relay mining difficulty") // Retrieve the corresponding claim for the proof submitted - claim, err := k.queryAndValidateClaimForProof(ctx, sessionHeader, supplierOperatorAddr) - if err != nil { - return nil, err + if err := k.validateClaimForProof(ctx, sessionHeader, supplierOperatorAddr); err != nil { + return err } logger.Debug("successfully retrieved and validated claim") - return claim, nil + return nil } -// EnsureValidProofSignaturesAndClosestPath ensures that the proof submitted by -// the supplier has a valid relay request/response signatures and closest path -// with respect to an onchain claim. +// EnsureValidProofSignaturesAndClosestPath validates: +// 1. Proof signatures from the supplier +// 2. Valid relay request/response signatures from the application/supplier respectively +// 3. Closest path validation against onchain claim // -// This function should be called in the EndBlocker corresponding to the block height -// at which the proof is submitted rather than during proof submission (i.e. SubmitProof). +// Execution requirements: +// 1. Must run in the EndBlocker of the proof submission height +// 2. Cannot run during SubmitProof due to computational cost // -// NOTE: A fully valid proof must pass both EnsureWellFormedProof and -// EnsureValidProofSignaturesAndClosestPath. +// NOTE: Full validation requires passing both: +// 1. EnsureWellFormedProof +// 2. EnsureValidProofSignaturesAndClosestPath (this function) func (k Keeper) EnsureValidProofSignaturesAndClosestPath( ctx context.Context, + claim *types.Claim, proof *types.Proof, ) error { // Telemetry: measure execution time. defer cosmostelemetry.MeasureSince(cosmostelemetry.Now(), telemetry.MetricNameKeys("proof", "validation")...) - logger := k.Logger().With("method", "EnsureValidProofSignaturesAndClosestPath") + sessionHeader := proof.GetSessionHeader() + supplierOperatorAddr := proof.SupplierOperatorAddress + + logger := k.Logger().With( + "method", "EnsureValidProofSignaturesAndClosestPath", + "session_id", sessionHeader.GetSessionId(), + "application_address", sessionHeader.GetApplicationAddress(), + "service_id", sessionHeader.GetServiceId(), + "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "supplier_operator_address", supplierOperatorAddr, + ) // Retrieve the supplier operator's public key. - supplierOperatorAddr := proof.SupplierOperatorAddress supplierOperatorPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierOperatorAddr) if err != nil { + logger.Error(fmt.Sprintf("failed to retrieve supplier operator public key due to error: %v", err)) return err } - sessionHeader := proof.GetSessionHeader() - - // Unmarshal the closest merkle proof from the message. + // Unmarshal the sparse compact merkle closest proof from the message. sparseCompactMerkleClosestProof := &smt.SparseCompactMerkleClosestProof{} if err = sparseCompactMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { - return types.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to unmarshal sparse compact merkle closest proof due to error: %v", err)) + return types.ErrProofInvalidProof.Wrapf("failed to unmarshal sparse compact merkle closest proof: %s", err) } - // SparseCompactMerkeClosestProof does not implement GetValueHash, so we need to decompact it. + // SparseCompactMerkeClosestProof was intentionally compacted to reduce its onchain state size + // so it must be decompacted rather than just retrieving the value via GetValueHash (not implemented). sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) if err != nil { - return types.ErrProofInvalidProof.Wrapf( - "failed to decompact closest merkle proof: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) + return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse merkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { - return types.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - err, - ) + logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) + return types.ErrProofInvalidRelay.Wrapf("failed to unmarshal relay: %s", err) } // Verify the relay request's signature. if err = k.ringClient.VerifyRelayRequestSignature(ctx, relay.GetReq()); err != nil { + logger.Error(fmt.Sprintf("failed to verify relay request signature due to error: %v", err)) return err } logger.Debug("successfully verified relay request signature") // Verify the relay response's signature. if err = relay.GetRes().VerifySupplierOperatorSignature(supplierOperatorPubKey); err != nil { + logger.Error(fmt.Sprintf("failed to verify relay response signature due to error: %v", err)) return err } logger.Debug("successfully verified relay response signature") @@ -243,23 +267,17 @@ func (k Keeper) EnsureValidProofSignaturesAndClosestPath( sessionHeader, supplierOperatorAddr, ); err != nil { + logger.Error(fmt.Sprintf("failed to validate closest path due to error: %v", err)) return err } logger.Debug("successfully validated proof path") - // Retrieve the corresponding claim for the proof submitted so it can be - // used in the proof validation below. - // EnsureWellFormedProof has already validated that the claim referenced by the - // proof exists and has a matching session header. - claim, _ := k.GetClaim(ctx, sessionHeader.GetSessionId(), supplierOperatorAddr) - - logger.Debug("successfully retrieved claim") - - // Verify the proof's closest merkle proof. + // Verify the proof's sparse merkle closest proof. if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { + logger.Error(fmt.Sprintf("failed to verify sparse merkle closest proof due to error: %v", err)) return err } - logger.Debug("successfully verified closest merkle proof") + logger.Debug("successfully verified sparse merkle closest proof") return nil } @@ -313,21 +331,20 @@ func (k Keeper) validateClosestPath( return nil } -// queryAndValidateClaimForProof ensures that a claim corresponding to the given -// proof's session exists & has a matching supplier operator address and session header, -// it then returns the corresponding claim if the validation is successful. -func (k Keeper) queryAndValidateClaimForProof( +// validateClaimForProof ensures that a claim corresponding to the given proof's +// session exists & has a matching supplier operator address and session header. +func (k Keeper) validateClaimForProof( ctx context.Context, sessionHeader *sessiontypes.SessionHeader, supplierOperatorAddr string, -) (*types.Claim, error) { +) error { sessionId := sessionHeader.SessionId // NB: no need to assert the testSessionId or supplier operator address as it is retrieved // by respective values of the given proof. I.e., if the claim exists, then these // values are guaranteed to match. foundClaim, found := k.GetClaim(ctx, sessionId, supplierOperatorAddr) if !found { - return nil, types.ErrProofClaimNotFound.Wrapf( + return types.ErrProofClaimNotFound.Wrapf( "no claim found for session ID %q and supplier %q", sessionId, supplierOperatorAddr, @@ -339,7 +356,7 @@ func (k Keeper) queryAndValidateClaimForProof( // Ensure session start heights match. if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { - return nil, types.ErrProofInvalidSessionStartHeight.Wrapf( + return types.ErrProofInvalidSessionStartHeight.Wrapf( "claim session start height %d does not match proof session start height %d", claimSessionHeader.GetSessionStartBlockHeight(), proofSessionHeader.GetSessionStartBlockHeight(), @@ -348,7 +365,7 @@ func (k Keeper) queryAndValidateClaimForProof( // Ensure session end heights match. if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { - return nil, types.ErrProofInvalidSessionEndHeight.Wrapf( + return types.ErrProofInvalidSessionEndHeight.Wrapf( "claim session end height %d does not match proof session end height %d", claimSessionHeader.GetSessionEndBlockHeight(), proofSessionHeader.GetSessionEndBlockHeight(), @@ -357,7 +374,7 @@ func (k Keeper) queryAndValidateClaimForProof( // Ensure application addresses match. if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { - return nil, types.ErrProofInvalidAddress.Wrapf( + return types.ErrProofInvalidAddress.Wrapf( "claim application address %q does not match proof application address %q", claimSessionHeader.GetApplicationAddress(), proofSessionHeader.GetApplicationAddress(), @@ -366,14 +383,14 @@ func (k Keeper) queryAndValidateClaimForProof( // Ensure service IDs match. if claimSessionHeader.GetServiceId() != proofSessionHeader.GetServiceId() { - return nil, types.ErrProofInvalidService.Wrapf( + return types.ErrProofInvalidService.Wrapf( "claim service ID %q does not match proof service ID %q", claimSessionHeader.GetServiceId(), proofSessionHeader.GetServiceId(), ) } - return &foundClaim, nil + return nil } // compareSessionHeaders compares a session header against an expected session header. diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 7f34b38bf..349dcd59c 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -285,7 +285,7 @@ func TestEnsureValidProof_Error(t *testing.T) { return proof }, expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", + "failed to unmarshal sparse compact merkle closest proof: %s", expectedInvalidProofUnmarshalErr, ), }, @@ -753,6 +753,9 @@ func TestEnsureValidProof_Error(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { proof := test.newProof(t) + sessionId := proof.GetSessionHeader().GetSessionId() + supplierOperatorAddr := proof.GetSupplierOperatorAddress() + foundClaim, _ := keepers.GetClaim(ctx, sessionId, supplierOperatorAddr) // Advance the block height to the proof path seed height. earliestSupplierProofCommitHeight := sharedtypes.GetEarliestSupplierProofCommitHeight( @@ -770,15 +773,19 @@ func TestEnsureValidProof_Error(t *testing.T) { // Advance the block height to the earliest proof commit height. ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight) - // An invalid proof is either one that is not well-formed or one that - // has invalid signatures or closest path. + // A proof is valid IFF it is: + // 1. Well-formed; session header and other metadata + // 2. Has valid relay signatures + // 3. Satisfies the closest merkle path - if _, err := keepers.EnsureWellFormedProof(ctx, proof); err != nil { + // Ensure the proof is well-formed. + if err := keepers.EnsureWellFormedProof(ctx, proof); err != nil { require.ErrorContains(t, err, test.expectedErr.Error()) return } - if err := keepers.EnsureValidProofSignaturesAndClosestPath(ctx, proof); err != nil { + // Ensure the proof satisfies the closest merkle path and has valid relay signatures. + if err := keepers.EnsureValidProofSignaturesAndClosestPath(ctx, &foundClaim, proof); err != nil { require.ErrorContains(t, err, test.expectedErr.Error()) return } diff --git a/x/proof/keeper/validate_proofs.go b/x/proof/keeper/validate_proofs.go index 6f02f6afa..6ebce5af9 100644 --- a/x/proof/keeper/validate_proofs.go +++ b/x/proof/keeper/validate_proofs.go @@ -1,6 +1,8 @@ package keeper import ( + "context" + "fmt" "runtime" "sync" @@ -9,87 +11,196 @@ import ( "github.com/pokt-network/poktroll/x/proof/types" ) -// ValidateSubmittedProofs validates all proofs submitted in the block and removes -// any invalid proof from the store so that it is not included in the block. -func (k Keeper) ValidateSubmittedProofs(ctx sdk.Context) { +// numCPU is the number of CPU cores available on the machine. +// It is initialized in the init function to prevent runtime.NumCPU from being called +// multiple times in the ValidateSubmittedProofs function. +var numCPU int + +func init() { + // Initialize the number of CPU cores available on the machine. + numCPU = runtime.NumCPU() +} + +// ValidateSubmittedProofs concurrently validates block proofs. +// It marks their corresponding claims as valid or invalid based on the proof validation. +// It removes them from the store once they are processed. +func (k Keeper) ValidateSubmittedProofs(ctx sdk.Context) (numValidProofs, numInvalidProofs uint64, err error) { logger := k.Logger().With("method", "ValidateSubmittedProofs") - // Use an iterator to iterate over all proofs instead of fetching them all - // at once to avoid memory issues. - iterator := k.GetAllProofsIterator(ctx) - defer iterator.Close() + logger.Info(fmt.Sprintf("Number of CPU cores used for parallel proof validation: %d\n", numCPU)) - // Since the proofs are independent of each other, we can validate them in parallel - // across all CPU cores to speed up the process. + // Iterate over proofs using an proofIterator to prevent memory issues from bulk fetching. + proofIterator := k.GetAllProofsIterator(ctx) - // Use a semaphore to limit the number of goroutines to the number of CPU cores. - // This is to avoid creating too many goroutines which can lead to memory issues. - sem := make(chan struct{}, runtime.NumCPU()) + coordinator := &proofValidationTaskCoordinator{ + // Parallelize proof validation across CPU cores since they are independent from one another. + // Use semaphores to limit concurrent goroutines and prevent memory issues. + sem: make(chan struct{}, numCPU), + // Use a wait group to wait for all goroutines to finish before returning. + wg: &sync.WaitGroup{}, - // Use a wait group to wait for all goroutines to finish before returning. - wg := sync.WaitGroup{} + processedProofs: make(map[string][]string), + coordinatorMu: &sync.Mutex{}, + } - for ; iterator.Valid(); iterator.Next() { - proofBz := iterator.Value() + for ; proofIterator.Valid(); proofIterator.Next() { + proofBz := proofIterator.Value() // Acquire a semaphore to limit the number of goroutines. // This will block if the sem channel is full. - sem <- struct{}{} - // Increment the wait group to wait for validation to finish. - wg.Add(1) - - go func(proofBz []byte) { - // Decrement the wait group when the goroutine finishes. - defer wg.Done() - // Release the semaphore after the goroutine finishes which unblocks another - // iteration to run its goroutine. - defer func() { <-sem }() - - var proof types.Proof - // proofBz is not expected to fail unmarshalling since it is should have - // passed EnsureWellFormedProof validation in MsgSubmitProof handler. - // Panic if it fails unmarshalling. - k.cdc.MustUnmarshal(proofBz, &proof) - - // Already validated proofs will have their ClosestMerkleProof cleared. - // Skip already validated proofs submitted at earlier block heights of - // the proof submission window. - if len(proof.ClosestMerkleProof) == 0 { - return - } - - // Try to validate the proof and remove it if it is invalid. - if err := k.EnsureValidProofSignaturesAndClosestPath(ctx, &proof); err != nil { - // Remove the proof if it is invalid to save block space and trigger the - // supplier slashing code path in the SettlePendingClaims flow. - k.RemoveProof(ctx, proof.GetSessionHeader().GetSessionId(), proof.GetSupplierOperatorAddress()) - - // TODO_MAINNET(red-0ne): Emit an invalid proof event to signal that a proof was - // removed due to bad signatures or ClosestMerkleProof. - // For now this could be inferred from the EventProofSubmitted+EventClaimExpired events. - - logger.Info("Removed invalid proof", - "session_id", proof.GetSessionHeader().GetSessionId(), - "supplier_operator_address", proof.GetSupplierOperatorAddress(), - "error", err, - ) - - return - } - - // Clear the ClosestMerkleProof for successfully validated proofs to: - // 1. Save block space as the ClosestMerkleProof embeds the entire relay request and - // response bytes which account for the majority of the proof size. - // 2. Mark the proof as validated to avoid re-validating it in subsequent blocks - // within the same proof submission window. - proof.ClosestMerkleProof = make([]byte, 0) - - // Update the proof in the store to clear the ClosestMerkleProof which makes the - // committed block to never store the potentially large ClosestMerkleProof. - k.UpsertProof(ctx, proof) - }(proofBz) + coordinator.sem <- struct{}{} + + // Increment the wait group to wait for proof validation to finish. + coordinator.wg.Add(1) + + go k.validateProof(ctx, proofBz, coordinator) } // Wait for all goroutines to finish before returning. - wg.Wait() + coordinator.wg.Wait() + + // Close the proof iterator before deleting the processed proofs. + proofIterator.Close() + + // Delete all the processed proofs from the store since they are no longer needed. + logger.Info("removing processed proofs from the store") + for supplierOperatorAddr, processedProofs := range coordinator.processedProofs { + for _, sessionId := range processedProofs { + k.RemoveProof(ctx, sessionId, supplierOperatorAddr) + logger.Info(fmt.Sprintf( + "removing proof for supplier %s with session ID %s", + supplierOperatorAddr, + sessionId, + )) + } + } + + return coordinator.numValidProofs, coordinator.numInvalidProofs, nil +} + +// validateProof validates a proof before removing it from the store. +// It marks the corresponding claim as valid or invalid based on the proof validation. +// It is meant to be called concurrently by multiple goroutines to parallelize +// proof validation. +func (k Keeper) validateProof( + ctx context.Context, + proofBz []byte, + coordinator *proofValidationTaskCoordinator, +) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + + logger := k.Logger().With("method", "validateProof") + + // Decrement the wait group when the goroutine finishes. + defer coordinator.wg.Done() + + // Release the semaphore after the goroutine finishes which unblocks another one. + defer func() { <-coordinator.sem }() + + var proof types.Proof + // proofBz is not expected to fail unmarshalling since it is should have + // passed EnsureWellFormedProof validation in MsgSubmitProof handler. + // Panic if it fails unmarshalling. + k.cdc.MustUnmarshal(proofBz, &proof) + + sessionHeader := proof.GetSessionHeader() + supplierOperatorAddr := proof.GetSupplierOperatorAddress() + + logger = logger.With( + "session_id", sessionHeader.GetSessionId(), + "application_address", sessionHeader.GetApplicationAddress(), + "service_id", sessionHeader.GetServiceId(), + "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "supplier_operator_address", supplierOperatorAddr, + ) + + // Retrieve the corresponding claim for the proof submitted so it can be + // used in the proof validation below. + // EnsureWellFormedProof has already validated that the claim referenced by the + // proof exists and has a matching session header. + claim, claimFound := k.GetClaim(ctx, sessionHeader.GetSessionId(), supplierOperatorAddr) + if !claimFound { + // DEV_NOTE: This should never happen since EnsureWellFormedProof has already checked + // that the proof has a corresponding claim. + logger.Error("no claim found for the corresponding proof") + return + } + logger.Debug("successfully retrieved claim") + + // Set the proof status to valid by default. + proofStatus := types.ClaimProofStatus_VALID + // Set the invalidity reason to an empty string by default. + invalidProofCause := "" + + if err := k.EnsureValidProofSignaturesAndClosestPath(ctx, &claim, &proof); err != nil { + // Set the proof status to invalid. + proofStatus = types.ClaimProofStatus_INVALID + + // Set the invalidity reason to the error message. + invalidProofCause = err.Error() + + logger.Info(fmt.Sprintf("invalid proof due to error: %v", err)) + } + logger.Info(fmt.Sprintf("proof checked, validation result: %s", proofStatus)) + + // Create and emit an event for the proof validation result. + eventProofValidityChecked := types.EventProofValidityChecked{ + Proof: &proof, + BlockHeight: uint64(sdkCtx.BlockHeight()), + ProofStatus: proofStatus, + Reason: invalidProofCause, + } + + if err := sdkCtx.EventManager().EmitTypedEvent(&eventProofValidityChecked); err != nil { + logger.Error(fmt.Sprintf("failed to emit proof validity check event due to: %v", err)) + return + } + + // Protect the subsequent operations from concurrent access. + coordinator.coordinatorMu.Lock() + defer coordinator.coordinatorMu.Unlock() + + // Update the claim to reflect its corresponding the proof validation result. + // + // It will be used later by the SettlePendingClaims routine to determine whether: + // 1. The claim should be settled or not + // 2. The corresponding supplier should be slashed or not + claim.ProofStatus = proofStatus + k.UpsertClaim(ctx, claim) + + // Collect the processed proofs info to delete them after the proofIterator is closed + // to prevent iterator invalidation. + coordinator.processedProofs[supplierOperatorAddr] = append( + coordinator.processedProofs[supplierOperatorAddr], + sessionHeader.GetSessionId(), + ) + + if proofStatus == types.ClaimProofStatus_INVALID { + // Increment the number of invalid proofs. + coordinator.numInvalidProofs++ + } else { + // Increment the number of valid proofs. + coordinator.numValidProofs++ + } +} + +// proofValidationTaskCoordinator is a helper struct to coordinate parallel proof +// validation tasks. +type proofValidationTaskCoordinator struct { + // sem is a semaphore to limit the number of concurrent goroutines. + sem chan struct{} + + // wg is a wait group to wait for all goroutines to finish before returning. + wg *sync.WaitGroup + + // processedProofs is a map of supplier operator addresses to the session IDs + // of proofs that have been processed. + processedProofs map[string][]string + + // numValidProofs and numInvalidProofs are counters for the number of valid and invalid proofs. + numValidProofs, + numInvalidProofs uint64 + + // coordinatorMu protects the coordinator fields. + coordinatorMu *sync.Mutex } diff --git a/x/proof/module/abci.go b/x/proof/module/abci.go index b986fe803..88fc0762d 100644 --- a/x/proof/module/abci.go +++ b/x/proof/module/abci.go @@ -1,6 +1,8 @@ package proof import ( + "fmt" + cosmostelemetry "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" @@ -8,14 +10,26 @@ import ( "github.com/pokt-network/poktroll/x/proof/types" ) -// EndBlocker called at every block and validates all proofs submitted at the block -// height and removes any invalid proofs. +// EndBlocker is called at every block and handles proof-related operations. func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { // Telemetry: measure the end-block execution time following standard cosmos-sdk practices. defer cosmostelemetry.ModuleMeasureSince(types.ModuleName, cosmostelemetry.Now(), cosmostelemetry.MetricKeyEndBlocker) - // ValidateSubmittedProofs does not return an error as it is a best-effort function. - k.ValidateSubmittedProofs(ctx) + logger := k.Logger().With("method", "EndBlocker") + + // Iterates through all proofs submitted in this block and removes invalid ones. + numValidProofs, numInvalidProofs, err := k.ValidateSubmittedProofs(ctx) + if err != nil { + logger.Error(fmt.Sprintf("could not validate submitted proofs due to error %v", err)) + return err + } + + logger.Info(fmt.Sprintf( + "validated %d proofs: %d valid, %d invalid", + numValidProofs+numInvalidProofs, + numValidProofs, + numInvalidProofs, + )) return nil } diff --git a/x/proof/module/module.go b/x/proof/module/module.go index 2cc495194..82d9b04d0 100644 --- a/x/proof/module/module.go +++ b/x/proof/module/module.go @@ -149,8 +149,9 @@ func (am AppModule) BeginBlock(_ context.Context) error { // EndBlock contains the logic that is automatically triggered at the end of each block. // The end block implementation is optional. -func (am AppModule) EndBlock(_ context.Context) error { - return nil +func (am AppModule) EndBlock(goCtx context.Context) error { + ctx := sdk.UnwrapSDKContext(goCtx) + return EndBlocker(ctx, am.keeper) } // IsOnePerModuleType implements the depinject.OnePerModuleType interface. diff --git a/x/proof/types/event.pb.go b/x/proof/types/event.pb.go index 8e467c0c5..d71cea0bf 100644 --- a/x/proof/types/event.pb.go +++ b/x/proof/types/event.pb.go @@ -330,46 +330,118 @@ func (m *EventProofUpdated) GetClaimedUpokt() *types.Coin { return nil } +// Event emitted after a proof has been checked for validity. +type EventProofValidityChecked struct { + Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof"` + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height"` + ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason"` +} + +func (m *EventProofValidityChecked) Reset() { *m = EventProofValidityChecked{} } +func (m *EventProofValidityChecked) String() string { return proto.CompactTextString(m) } +func (*EventProofValidityChecked) ProtoMessage() {} +func (*EventProofValidityChecked) Descriptor() ([]byte, []int) { + return fileDescriptor_dd4c19e04487fbec, []int{4} +} +func (m *EventProofValidityChecked) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventProofValidityChecked) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventProofValidityChecked) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventProofValidityChecked.Merge(m, src) +} +func (m *EventProofValidityChecked) XXX_Size() int { + return m.Size() +} +func (m *EventProofValidityChecked) XXX_DiscardUnknown() { + xxx_messageInfo_EventProofValidityChecked.DiscardUnknown(m) +} + +var xxx_messageInfo_EventProofValidityChecked proto.InternalMessageInfo + +func (m *EventProofValidityChecked) GetProof() *Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *EventProofValidityChecked) GetBlockHeight() uint64 { + if m != nil { + return m.BlockHeight + } + return 0 +} + +func (m *EventProofValidityChecked) GetProofStatus() ClaimProofStatus { + if m != nil { + return m.ProofStatus + } + return ClaimProofStatus_NOT_FOUND +} + +func (m *EventProofValidityChecked) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + func init() { proto.RegisterType((*EventClaimCreated)(nil), "poktroll.proof.EventClaimCreated") proto.RegisterType((*EventClaimUpdated)(nil), "poktroll.proof.EventClaimUpdated") proto.RegisterType((*EventProofSubmitted)(nil), "poktroll.proof.EventProofSubmitted") proto.RegisterType((*EventProofUpdated)(nil), "poktroll.proof.EventProofUpdated") + proto.RegisterType((*EventProofValidityChecked)(nil), "poktroll.proof.EventProofValidityChecked") } func init() { proto.RegisterFile("poktroll/proof/event.proto", fileDescriptor_dd4c19e04487fbec) } var fileDescriptor_dd4c19e04487fbec = []byte{ - // 451 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0x1b, 0x7a, 0x3d, 0xe9, 0x0c, 0x9c, 0x74, 0xe1, 0x8f, 0x72, 0x45, 0x38, 0x27, 0xa6, - 0x5b, 0xce, 0x56, 0x41, 0xea, 0x07, 0x48, 0xd4, 0x8d, 0x01, 0x82, 0x2a, 0x21, 0x06, 0xaa, 0x24, - 0x35, 0x25, 0x6a, 0x6c, 0x47, 0x89, 0x5d, 0xe8, 0x27, 0x60, 0xe5, 0x1b, 0x21, 0x36, 0xc6, 0x8e, - 0x9d, 0x22, 0x94, 0x6e, 0xf9, 0x14, 0xc8, 0x76, 0x83, 0xda, 0x08, 0x90, 0x50, 0x25, 0x58, 0x3a, - 0xc5, 0x7e, 0x9f, 0xe7, 0xb1, 0x9d, 0xf7, 0x27, 0xcb, 0xa0, 0x9f, 0xf1, 0xb9, 0xc8, 0x79, 0x9a, - 0xe2, 0x2c, 0xe7, 0xfc, 0x1d, 0x26, 0x0b, 0xc2, 0x04, 0xca, 0x72, 0x2e, 0xb8, 0x7d, 0xde, 0x68, - 0x48, 0x6b, 0x7d, 0x18, 0xf3, 0x82, 0xf2, 0x02, 0x47, 0x61, 0x41, 0xf0, 0x62, 0x10, 0x11, 0x11, - 0x0e, 0x70, 0xcc, 0x13, 0x66, 0xfc, 0xfd, 0xfb, 0x33, 0x3e, 0xe3, 0x7a, 0x88, 0xd5, 0x68, 0x5b, - 0x6d, 0xef, 0x20, 0x96, 0x19, 0x29, 0x8c, 0xf6, 0xe4, 0x53, 0x17, 0x5c, 0x8c, 0xd4, 0x8e, 0x7e, - 0x1a, 0x26, 0xd4, 0xcf, 0x49, 0x28, 0xc8, 0xd4, 0x1e, 0x82, 0x5e, 0xac, 0xe6, 0x8e, 0x75, 0x65, - 0x5d, 0xdf, 0x7e, 0xfa, 0x00, 0xed, 0x9f, 0x03, 0x69, 0xb3, 0x77, 0x56, 0x97, 0xae, 0xf1, 0x05, - 0xe6, 0x63, 0xdf, 0x00, 0xc0, 0x24, 0x9d, 0xe4, 0x24, 0x0d, 0x97, 0x85, 0x73, 0xeb, 0xca, 0xba, - 0x3e, 0xf1, 0xce, 0xeb, 0xd2, 0xdd, 0xa9, 0x06, 0x67, 0x4c, 0xd2, 0x40, 0x0f, 0xed, 0xd7, 0xe0, - 0x52, 0x09, 0x3a, 0x4b, 0xa6, 0x93, 0x98, 0xd3, 0x4c, 0x0a, 0x32, 0x91, 0x2c, 0x11, 0x85, 0x73, - 0xa2, 0xd3, 0x8f, 0xeb, 0xd2, 0xfd, 0xbd, 0x29, 0x78, 0xc8, 0x24, 0xf5, 0x8d, 0xe2, 0x1b, 0x61, - 0xac, 0xea, 0xf6, 0x5b, 0xf0, 0x48, 0x85, 0x48, 0x21, 0x12, 0xaa, 0xfe, 0xa8, 0xb5, 0x76, 0x4f, - 0xaf, 0xed, 0xd6, 0xa5, 0xfb, 0x27, 0x5b, 0xe0, 0x30, 0x49, 0x47, 0x8d, 0xb6, 0xb7, 0xfe, 0x4b, - 0x70, 0xb7, 0x39, 0x90, 0x54, 0xbd, 0x71, 0x4e, 0x75, 0xa3, 0x2e, 0x91, 0x01, 0x84, 0x14, 0x20, - 0xb4, 0x05, 0x84, 0x7c, 0x9e, 0x30, 0xef, 0xa2, 0x2e, 0xdd, 0xfd, 0x4c, 0x70, 0x67, 0x3b, 0x1d, - 0xab, 0x59, 0x8b, 0xc4, 0x38, 0x9b, 0x1e, 0x49, 0xfc, 0x27, 0x12, 0x5f, 0xbb, 0xe0, 0x9e, 0x26, - 0xf1, 0x42, 0xb5, 0xf8, 0x95, 0x8c, 0x68, 0x22, 0x0e, 0x61, 0x31, 0x04, 0x3d, 0x6d, 0xd0, 0x18, - 0x7e, 0x91, 0xd3, 0xdb, 0x98, 0x9c, 0x2e, 0x04, 0xe6, 0xd3, 0x62, 0xd8, 0x3d, 0x32, 0xfc, 0x0b, - 0x86, 0x5f, 0x9a, 0xdb, 0xa4, 0x9b, 0x7b, 0xe8, 0x6d, 0x3a, 0x12, 0xfc, 0xe7, 0x04, 0xbd, 0xe7, - 0xdf, 0x2a, 0x68, 0xad, 0x2a, 0x68, 0xad, 0x2b, 0x68, 0x7d, 0xaf, 0xa0, 0xf5, 0x79, 0x03, 0x3b, - 0xab, 0x0d, 0xec, 0xac, 0x37, 0xb0, 0xf3, 0x06, 0xcd, 0x12, 0xf1, 0x5e, 0x46, 0x28, 0xe6, 0x14, - 0x2b, 0xfb, 0x0d, 0x23, 0xe2, 0x03, 0xcf, 0xe7, 0xf8, 0xe7, 0x5b, 0xf7, 0x71, 0xf7, 0xb5, 0x8b, - 0x4e, 0xf5, 0x73, 0xf7, 0xec, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x12, 0xa3, 0x30, 0x6e, - 0x07, 0x00, 0x00, + // 551 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0x4f, 0x6f, 0xda, 0x3e, + 0x18, 0xc7, 0x49, 0x29, 0x48, 0x18, 0x7e, 0xe8, 0xd7, 0xec, 0x8f, 0x80, 0x69, 0x09, 0xea, 0xa9, + 0x97, 0x26, 0x6a, 0x2b, 0xf5, 0x05, 0x10, 0x55, 0xda, 0x61, 0x87, 0x2d, 0x13, 0xd3, 0xb4, 0xc3, + 0x50, 0x12, 0x3c, 0xb0, 0x88, 0xed, 0x28, 0xb1, 0xbb, 0x71, 0xde, 0x61, 0xd7, 0xbd, 0xa3, 0x69, + 0xb7, 0x1d, 0x7b, 0xec, 0x29, 0x9a, 0xe0, 0x96, 0x57, 0x31, 0xf9, 0x71, 0x58, 0x01, 0x75, 0x93, + 0xaa, 0x4a, 0xdb, 0x85, 0x93, 0xfd, 0x3c, 0xdf, 0xe7, 0x6b, 0x9b, 0xe7, 0x43, 0x6c, 0xd4, 0x4b, + 0xf8, 0x4c, 0xa4, 0x3c, 0x8e, 0xdd, 0x24, 0xe5, 0xfc, 0xbd, 0x8b, 0x2f, 0x31, 0x13, 0x4e, 0x92, + 0x72, 0xc1, 0xcd, 0xf6, 0x4a, 0x73, 0x40, 0xeb, 0x59, 0x11, 0xcf, 0x28, 0xcf, 0xdc, 0x30, 0xc8, + 0xb0, 0x7b, 0x79, 0x12, 0x62, 0x11, 0x9c, 0xb8, 0x11, 0x27, 0x4c, 0xd7, 0xf7, 0x1e, 0x4e, 0xf8, + 0x84, 0xc3, 0xd4, 0x55, 0xb3, 0x32, 0xbb, 0xbd, 0x83, 0x98, 0x27, 0x38, 0xd3, 0xda, 0xe1, 0xe7, + 0x2a, 0x3a, 0xb8, 0x50, 0x3b, 0x7a, 0x71, 0x40, 0xa8, 0x97, 0xe2, 0x40, 0xe0, 0xb1, 0x79, 0x8e, + 0x6a, 0x91, 0x8a, 0x3b, 0x46, 0xdf, 0x38, 0x6a, 0x9e, 0x3e, 0x72, 0x36, 0xcf, 0xe1, 0x40, 0xf1, + 0xa0, 0x51, 0xe4, 0xb6, 0xae, 0xf3, 0xf5, 0x60, 0x1e, 0x23, 0xc4, 0x24, 0x1d, 0xa5, 0x38, 0x0e, + 0xe6, 0x59, 0x67, 0xaf, 0x6f, 0x1c, 0xed, 0x0f, 0xda, 0x45, 0x6e, 0xaf, 0x65, 0xfd, 0x06, 0x93, + 0xd4, 0x87, 0xa9, 0xf9, 0x06, 0x75, 0x95, 0x00, 0x5e, 0x3c, 0x1e, 0x45, 0x9c, 0x26, 0x52, 0xe0, + 0x91, 0x64, 0x44, 0x64, 0x9d, 0x7d, 0x70, 0x3f, 0x2d, 0x72, 0xfb, 0xf7, 0x45, 0xfe, 0x63, 0x26, + 0xa9, 0xa7, 0x15, 0x4f, 0x0b, 0x43, 0x95, 0x37, 0xdf, 0xa1, 0x27, 0xca, 0x84, 0x33, 0x41, 0xa8, + 0xfa, 0x45, 0x5b, 0x6b, 0xd7, 0x60, 0x6d, 0xbb, 0xc8, 0xed, 0x3f, 0x95, 0xf9, 0x1d, 0x26, 0xe9, + 0xc5, 0x4a, 0xdb, 0x58, 0xff, 0x25, 0xfa, 0x6f, 0x75, 0x20, 0xa9, 0x7a, 0xd3, 0xa9, 0x43, 0xa3, + 0xba, 0x8e, 0x06, 0xe4, 0x28, 0x40, 0x4e, 0x09, 0xc8, 0xf1, 0x38, 0x61, 0x83, 0x83, 0x22, 0xb7, + 0x37, 0x3d, 0x7e, 0xab, 0x0c, 0x87, 0x2a, 0xda, 0x22, 0x31, 0x4c, 0xc6, 0x3b, 0x12, 0xff, 0x88, + 0xc4, 0xb7, 0x2a, 0x7a, 0x00, 0x24, 0x5e, 0xa8, 0x16, 0xbf, 0x92, 0x21, 0x25, 0xe2, 0x3e, 0x2c, + 0xce, 0x51, 0x0d, 0x0a, 0x00, 0xc3, 0x2d, 0x3e, 0xd8, 0x46, 0xfb, 0x20, 0xe1, 0xeb, 0x61, 0x8b, + 0x61, 0x75, 0xc7, 0xf0, 0x0e, 0x0c, 0xbf, 0xae, 0xbe, 0x26, 0x68, 0xee, 0x7d, 0xbf, 0xa6, 0x1d, + 0xc1, 0xbf, 0x4f, 0xf0, 0xd3, 0x1e, 0xea, 0xde, 0x10, 0x7c, 0x1d, 0xc4, 0x64, 0x4c, 0xc4, 0xdc, + 0x9b, 0xe2, 0x68, 0xa6, 0x49, 0x6a, 0x22, 0xc6, 0xdd, 0x88, 0x9c, 0xa1, 0x56, 0x18, 0xf3, 0x68, + 0x36, 0x9a, 0x62, 0x32, 0x99, 0x8a, 0xf2, 0x66, 0xfc, 0xbf, 0xc8, 0xed, 0x8d, 0xbc, 0xdf, 0x84, + 0xe8, 0x19, 0x04, 0xe6, 0x10, 0xb5, 0xc0, 0x3d, 0xca, 0x44, 0x20, 0xa4, 0x06, 0xd9, 0x3e, 0xed, + 0xdf, 0xfa, 0xef, 0xd1, 0x77, 0x06, 0xd4, 0xe9, 0x65, 0xd7, 0x9d, 0x7e, 0x33, 0xb9, 0x91, 0xcd, + 0x43, 0x54, 0x4f, 0x71, 0x90, 0x71, 0x06, 0x6c, 0x1b, 0x03, 0x54, 0xe4, 0x76, 0x99, 0xf1, 0xcb, + 0x71, 0xf0, 0xfc, 0xfb, 0xc2, 0x32, 0xae, 0x16, 0x96, 0x71, 0xbd, 0xb0, 0x8c, 0x1f, 0x0b, 0xcb, + 0xf8, 0xb2, 0xb4, 0x2a, 0x57, 0x4b, 0xab, 0x72, 0xbd, 0xb4, 0x2a, 0x6f, 0x9d, 0x09, 0x11, 0x53, + 0x19, 0x3a, 0x11, 0xa7, 0xae, 0x3a, 0xcc, 0x31, 0xc3, 0xe2, 0x03, 0x4f, 0x67, 0xee, 0xaf, 0x17, + 0xff, 0xe3, 0xfa, 0x9b, 0x1f, 0xd6, 0xe1, 0xd1, 0x3f, 0xfb, 0x19, 0x00, 0x00, 0xff, 0xff, 0x68, + 0x6f, 0xcf, 0x81, 0x74, 0x08, 0x00, 0x00, } func (m *EventClaimCreated) Marshal() (dAtA []byte, err error) { @@ -644,6 +716,58 @@ func (m *EventProofUpdated) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *EventProofValidityChecked) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventProofValidityChecked) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventProofValidityChecked) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + } + if m.ProofStatus != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.ProofStatus)) + i-- + dAtA[i] = 0x18 + } + if m.BlockHeight != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.BlockHeight)) + i-- + dAtA[i] = 0x10 + } + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { offset -= sovEvent(v) base := offset @@ -767,6 +891,29 @@ func (m *EventProofUpdated) Size() (n int) { return n } +func (m *EventProofValidityChecked) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovEvent(uint64(l)) + } + if m.BlockHeight != 0 { + n += 1 + sovEvent(uint64(m.BlockHeight)) + } + if m.ProofStatus != 0 { + n += 1 + sovEvent(uint64(m.ProofStatus)) + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + func sovEvent(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1561,6 +1708,162 @@ func (m *EventProofUpdated) Unmarshal(dAtA []byte) error { } return nil } +func (m *EventProofValidityChecked) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventProofValidityChecked: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventProofValidityChecked: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + m.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + } + m.ProofStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipEvent(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/proof/types/query.pb.go b/x/proof/types/query.pb.go index ebe21a473..c27b6d9c2 100644 --- a/x/proof/types/query.pb.go +++ b/x/proof/types/query.pb.go @@ -436,7 +436,6 @@ func (m *QueryGetProofResponse) GetProof() Proof { type QueryAllProofsRequest struct { Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` // Types that are valid to be assigned to Filter: - // // *QueryAllProofsRequest_SupplierOperatorAddress // *QueryAllProofsRequest_SessionId // *QueryAllProofsRequest_SessionEndHeight diff --git a/x/proof/types/types.pb.go b/x/proof/types/types.pb.go index 088ac987e..f96db744a 100644 --- a/x/proof/types/types.pb.go +++ b/x/proof/types/types.pb.go @@ -84,6 +84,36 @@ func (ClaimProofStage) EnumDescriptor() ([]byte, []int) { return fileDescriptor_b75ef15dfd4d6998, []int{1} } +// ClaimProofStatus defines the status of the proof for a claim. +// The default value is NOT_FOUND, whether the proof is required or not. +type ClaimProofStatus int32 + +const ( + ClaimProofStatus_NOT_FOUND ClaimProofStatus = 0 + ClaimProofStatus_VALID ClaimProofStatus = 1 + ClaimProofStatus_INVALID ClaimProofStatus = 2 +) + +var ClaimProofStatus_name = map[int32]string{ + 0: "NOT_FOUND", + 1: "VALID", + 2: "INVALID", +} + +var ClaimProofStatus_value = map[string]int32{ + "NOT_FOUND": 0, + "VALID": 1, + "INVALID": 2, +} + +func (x ClaimProofStatus) String() string { + return proto.EnumName(ClaimProofStatus_name, int32(x)) +} + +func (ClaimProofStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b75ef15dfd4d6998, []int{2} +} + type Proof struct { // Address of the supplier's operator that submitted this proof. SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` @@ -150,6 +180,9 @@ type Claim struct { SessionHeader *types.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` // Root hash returned from smt.SMST#Root(). RootHash []byte `protobuf:"bytes,3,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // Claim proof status captures the status of the proof for this claim. + // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ProofStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` } func (m *Claim) Reset() { *m = Claim{} } @@ -202,9 +235,17 @@ func (m *Claim) GetRootHash() []byte { return nil } +func (m *Claim) GetProofStatus() ClaimProofStatus { + if m != nil { + return m.ProofStatus + } + return ClaimProofStatus_NOT_FOUND +} + func init() { proto.RegisterEnum("poktroll.proof.ProofRequirementReason", ProofRequirementReason_name, ProofRequirementReason_value) proto.RegisterEnum("poktroll.proof.ClaimProofStage", ClaimProofStage_name, ClaimProofStage_value) + proto.RegisterEnum("poktroll.proof.ClaimProofStatus", ClaimProofStatus_name, ClaimProofStatus_value) proto.RegisterType((*Proof)(nil), "poktroll.proof.Proof") proto.RegisterType((*Claim)(nil), "poktroll.proof.Claim") } @@ -212,36 +253,39 @@ func init() { func init() { proto.RegisterFile("poktroll/proof/types.proto", fileDescriptor_b75ef15dfd4d6998) } var fileDescriptor_b75ef15dfd4d6998 = []byte{ - // 452 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x92, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xb3, 0xa9, 0x5a, 0xc8, 0xb6, 0x29, 0x66, 0x15, 0x41, 0x1a, 0x90, 0x89, 0x7a, 0x8a, - 0x2a, 0xd5, 0x46, 0xf0, 0x04, 0xf9, 0x63, 0x14, 0x4b, 0x6e, 0x1d, 0xd6, 0x06, 0x21, 0x2e, 0x96, - 0x9b, 0x2c, 0xb6, 0x15, 0xdb, 0x63, 0x76, 0x37, 0x02, 0xde, 0x82, 0x87, 0xe1, 0x09, 0x38, 0x71, - 0xac, 0xb8, 0xd0, 0x23, 0x72, 0x5e, 0x04, 0x79, 0xed, 0x46, 0x79, 0x04, 0x4e, 0xf6, 0xcc, 0x6f, - 0xe6, 0x9b, 0xfd, 0x46, 0x83, 0x07, 0x05, 0xac, 0x25, 0x87, 0x34, 0x35, 0x0b, 0x0e, 0xf0, 0xc9, - 0x94, 0xdf, 0x0a, 0x26, 0x8c, 0x82, 0x83, 0x04, 0x72, 0x7a, 0xcf, 0x0c, 0xc5, 0x06, 0x67, 0x4b, - 0x10, 0x19, 0x88, 0x40, 0x51, 0xb3, 0x0e, 0xea, 0xd2, 0xc1, 0xf3, 0x9d, 0x8c, 0x60, 0x42, 0x24, - 0x90, 0xef, 0x0b, 0x0d, 0x7a, 0x11, 0x44, 0x50, 0x77, 0x55, 0x7f, 0x75, 0xf6, 0xfc, 0x0f, 0xc2, - 0x87, 0x8b, 0x4a, 0x98, 0xf8, 0xf8, 0x4c, 0x6c, 0x8a, 0x22, 0x4d, 0x18, 0x0f, 0xa0, 0x60, 0x3c, - 0x94, 0xc0, 0x83, 0x70, 0xb5, 0xe2, 0x4c, 0x88, 0x3e, 0x1a, 0xa2, 0x51, 0x67, 0xd2, 0xff, 0xfd, - 0xe3, 0xb2, 0xd7, 0x8c, 0x1c, 0xd7, 0xc4, 0x93, 0x3c, 0xc9, 0x23, 0xfa, 0xf4, 0xbe, 0xd5, 0x6d, - 0x3a, 0x1b, 0x4c, 0xde, 0xe0, 0xd3, 0xe6, 0x31, 0x41, 0xcc, 0xc2, 0x15, 0xe3, 0xfd, 0xf6, 0x10, - 0x8d, 0x8e, 0x5f, 0xbd, 0x30, 0x76, 0xbe, 0x1a, 0x6e, 0x78, 0xf5, 0x77, 0xae, 0xca, 0x68, 0x57, - 0xec, 0x87, 0xe4, 0x25, 0xee, 0x2d, 0x53, 0x10, 0x4c, 0xc8, 0x20, 0x63, 0x7c, 0x9d, 0xb2, 0x40, - 0xad, 0xa3, 0x7f, 0x30, 0x44, 0xa3, 0x13, 0x4a, 0x1a, 0x76, 0xa5, 0x90, 0xf2, 0x73, 0xfe, 0x13, - 0xe1, 0xc3, 0x69, 0x1a, 0x26, 0xd9, 0x7f, 0xee, 0xec, 0x19, 0xee, 0x70, 0x00, 0x19, 0xc4, 0xa1, - 0x88, 0x1b, 0x3b, 0x0f, 0xab, 0xc4, 0x3c, 0x14, 0xf1, 0x85, 0x83, 0x9f, 0x28, 0x37, 0x94, 0x7d, - 0xde, 0x24, 0x9c, 0x65, 0x2c, 0x97, 0x94, 0x85, 0x02, 0x72, 0xa2, 0xe1, 0x93, 0x6b, 0xd7, 0x0f, - 0xa8, 0xf5, 0xf6, 0x9d, 0x4d, 0xad, 0x99, 0xd6, 0x22, 0x8f, 0x71, 0x77, 0x41, 0xdd, 0xc9, 0x78, - 0x62, 0x3b, 0xb6, 0xe7, 0xdb, 0x53, 0x0d, 0x91, 0x2e, 0xee, 0xf8, 0x73, 0x6a, 0x79, 0x73, 0xd7, - 0x99, 0x69, 0xed, 0x8b, 0x19, 0x7e, 0xa4, 0x36, 0xa2, 0x24, 0x3d, 0x19, 0x46, 0x8c, 0x1c, 0xe3, - 0x07, 0x53, 0x67, 0x6c, 0x5f, 0x29, 0x05, 0x8c, 0x8f, 0x16, 0xd4, 0x7d, 0x6f, 0x5d, 0x6b, 0xa8, - 0x02, 0x9e, 0xe5, 0xfb, 0x8e, 0x35, 0xd3, 0xda, 0x55, 0x60, 0x7d, 0x58, 0xa8, 0x39, 0x07, 0x13, - 0xe7, 0x57, 0xa9, 0xa3, 0xdb, 0x52, 0x47, 0x77, 0xa5, 0x8e, 0xfe, 0x96, 0x3a, 0xfa, 0xbe, 0xd5, - 0x5b, 0xb7, 0x5b, 0xbd, 0x75, 0xb7, 0xd5, 0x5b, 0x1f, 0x8d, 0x28, 0x91, 0xf1, 0xe6, 0xc6, 0x58, - 0x42, 0x66, 0x56, 0x8b, 0xb8, 0xcc, 0x99, 0xfc, 0x02, 0x7c, 0x6d, 0xee, 0x8e, 0xf3, 0xeb, 0xfe, - 0x95, 0xdf, 0x1c, 0xa9, 0x3b, 0x7c, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x74, 0x9b, 0x7a, 0x80, - 0x04, 0x03, 0x00, 0x00, + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x93, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xb3, 0x29, 0x2d, 0x64, 0xf3, 0x07, 0xb3, 0x8a, 0x20, 0x0d, 0xc8, 0x44, 0x3d, 0x45, + 0x91, 0xea, 0xa0, 0x72, 0xe2, 0x98, 0xc4, 0xae, 0x62, 0xc9, 0x8d, 0xc3, 0xda, 0xad, 0x10, 0x17, + 0xcb, 0x4d, 0x16, 0xdb, 0x8a, 0xed, 0x35, 0xbb, 0x1b, 0x01, 0x4f, 0xc0, 0x95, 0x87, 0xe1, 0x21, + 0x38, 0x56, 0x5c, 0xe8, 0x11, 0x25, 0x2f, 0x82, 0xbc, 0x76, 0x43, 0xca, 0x13, 0x70, 0xb2, 0x67, + 0x7f, 0x33, 0xdf, 0xcc, 0xb7, 0xda, 0x81, 0xdd, 0x8c, 0xae, 0x04, 0xa3, 0x71, 0x3c, 0xcc, 0x18, + 0xa5, 0x1f, 0x86, 0xe2, 0x4b, 0x46, 0xb8, 0x96, 0x31, 0x2a, 0x28, 0x6a, 0xdd, 0x31, 0x4d, 0xb2, + 0xee, 0xf1, 0x82, 0xf2, 0x84, 0x72, 0x4f, 0xd2, 0x61, 0x11, 0x14, 0xa9, 0xdd, 0x17, 0x3b, 0x19, + 0x4e, 0x38, 0x8f, 0x68, 0xba, 0x2f, 0xd4, 0x6d, 0x07, 0x34, 0xa0, 0x45, 0x55, 0xfe, 0x57, 0x9c, + 0x9e, 0xfc, 0x02, 0xf0, 0x70, 0x9e, 0x0b, 0x23, 0x17, 0x1e, 0xf3, 0x75, 0x96, 0xc5, 0x11, 0x61, + 0x1e, 0xcd, 0x08, 0xf3, 0x05, 0x65, 0x9e, 0xbf, 0x5c, 0x32, 0xc2, 0x79, 0x07, 0xf4, 0x40, 0xbf, + 0x36, 0xee, 0xfc, 0xfc, 0x7e, 0xda, 0x2e, 0x5b, 0x8e, 0x0a, 0xe2, 0x08, 0x16, 0xa5, 0x01, 0x7e, + 0x76, 0x57, 0x6a, 0x97, 0x95, 0x25, 0x46, 0xe7, 0xb0, 0x55, 0x0e, 0xe3, 0x85, 0xc4, 0x5f, 0x12, + 0xd6, 0xa9, 0xf6, 0x40, 0xbf, 0x7e, 0xf6, 0x52, 0xdb, 0xf9, 0x2a, 0xb9, 0xe6, 0x14, 0xdf, 0xa9, + 0x4c, 0xc3, 0x4d, 0xbe, 0x1f, 0xa2, 0x57, 0xb0, 0xbd, 0x88, 0x29, 0x27, 0x5c, 0x78, 0x09, 0x61, + 0xab, 0x98, 0x78, 0xf2, 0x3a, 0x3a, 0x07, 0x3d, 0xd0, 0x6f, 0x60, 0x54, 0xb2, 0x0b, 0x89, 0xa4, + 0x9f, 0x93, 0xaf, 0x55, 0x78, 0x38, 0x89, 0xfd, 0x28, 0xf9, 0xcf, 0x9d, 0x3d, 0x87, 0x35, 0x46, + 0xa9, 0xf0, 0x42, 0x9f, 0x87, 0xa5, 0x9d, 0x47, 0xf9, 0xc1, 0xd4, 0xe7, 0x21, 0x9a, 0xc0, 0x86, + 0xf4, 0xe9, 0x71, 0xe1, 0x8b, 0x35, 0xef, 0x3c, 0xe8, 0x81, 0x7e, 0xeb, 0xac, 0xa7, 0xdd, 0x7f, + 0x14, 0x9a, 0xf4, 0x29, 0x6d, 0x3b, 0x32, 0x0f, 0xd7, 0xb3, 0xbf, 0xc1, 0xc0, 0x82, 0x4f, 0x25, + 0xc3, 0xe4, 0xe3, 0x3a, 0x62, 0x24, 0x21, 0xa9, 0xc0, 0xc4, 0xe7, 0x34, 0x45, 0x0a, 0x6c, 0xcc, + 0x6c, 0xd7, 0xc3, 0xc6, 0xdb, 0x4b, 0x13, 0x1b, 0xba, 0x52, 0x41, 0x4f, 0x60, 0x73, 0x8e, 0xed, + 0xf1, 0x68, 0x6c, 0x5a, 0xa6, 0xe3, 0x9a, 0x13, 0x05, 0xa0, 0x26, 0xac, 0xb9, 0x53, 0x6c, 0x38, + 0x53, 0xdb, 0xd2, 0x95, 0xea, 0x40, 0x87, 0x8f, 0xef, 0xb5, 0x0b, 0x08, 0xaa, 0xc3, 0x87, 0x13, + 0x6b, 0x64, 0x5e, 0x48, 0x05, 0x08, 0x8f, 0xe6, 0xd8, 0xbe, 0x32, 0x66, 0x0a, 0xc8, 0x81, 0x63, + 0xb8, 0xae, 0x65, 0xe8, 0x4a, 0x35, 0x0f, 0x8c, 0x77, 0x73, 0xd9, 0xe7, 0x60, 0xf0, 0x06, 0x2a, + 0xff, 0x0e, 0x9d, 0x37, 0xca, 0xa7, 0x39, 0xb7, 0x2f, 0x67, 0xb9, 0x50, 0x0d, 0x1e, 0x5e, 0x8d, + 0x2c, 0x53, 0x2f, 0x74, 0xcc, 0x59, 0x11, 0x54, 0xc7, 0xd6, 0x8f, 0x8d, 0x0a, 0x6e, 0x36, 0x2a, + 0xb8, 0xdd, 0xa8, 0xe0, 0xf7, 0x46, 0x05, 0xdf, 0xb6, 0x6a, 0xe5, 0x66, 0xab, 0x56, 0x6e, 0xb7, + 0x6a, 0xe5, 0xbd, 0x16, 0x44, 0x22, 0x5c, 0x5f, 0x6b, 0x0b, 0x9a, 0x0c, 0xf3, 0x5b, 0x3a, 0x4d, + 0x89, 0xf8, 0x44, 0xd9, 0x6a, 0xb8, 0x5b, 0x8e, 0xcf, 0xfb, 0x5b, 0x76, 0x7d, 0x24, 0xf7, 0xe0, + 0xf5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x7f, 0xdb, 0x75, 0x84, 0x03, 0x00, 0x00, } func (m *Proof) Marshal() (dAtA []byte, err error) { @@ -313,6 +357,11 @@ func (m *Claim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ProofStatus != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ProofStatus)) + i-- + dAtA[i] = 0x20 + } if len(m.RootHash) > 0 { i -= len(m.RootHash) copy(dAtA[i:], m.RootHash) @@ -392,6 +441,9 @@ func (m *Claim) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.ProofStatus != 0 { + n += 1 + sovTypes(uint64(m.ProofStatus)) + } return n } @@ -684,6 +736,25 @@ func (m *Claim) Unmarshal(dAtA []byte) error { m.RootHash = []byte{} } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + } + m.ProofStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 138c61a4b..46daae103 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -384,12 +384,15 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide s.keepers.UpsertClaim(ctx, s.claim) s.keepers.UpsertProof(ctx, s.proof) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + s.keepers.ValidateSubmittedProofs(sdkCtx) + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := sharedtypes.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) + sdkCtx = cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -472,18 +475,26 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOn // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() - require.Equal(t, 12, len(events)) // minting, burning, settling, etc.. + require.Equal(t, 13, len(events)) // minting, burning, settling, etc.. + expectedClaimExpiredEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, events) require.Equal(t, 1, len(expectedClaimExpiredEvents)) + // Confirm an invalid proof removed event was emitted + expectedProofValidityCheckedEvents := testutilevents.FilterEvents[*prooftypes.EventProofValidityChecked](t, events) + require.Equal(t, 1, len(expectedProofValidityCheckedEvents)) + // Validate the event expectedClaimExpiredEvent := expectedClaimExpiredEvents[0] - require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedClaimExpiredEvent.GetExpirationReason()) + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedClaimExpiredEvent.GetExpirationReason()) require.Equal(t, s.numRelays, expectedClaimExpiredEvent.GetNumRelays()) require.Equal(t, s.numClaimedComputeUnits, expectedClaimExpiredEvent.GetNumClaimedComputeUnits()) require.Equal(t, s.numEstimatedComputeUnits, expectedClaimExpiredEvent.GetNumEstimatedComputeUnits()) require.Equal(t, s.claimedUpokt, *expectedClaimExpiredEvent.GetClaimedUpokt()) + expectedProofValidityCheckedEvent := expectedProofValidityCheckedEvents[0] + require.Equal(t, prooftypes.ClaimProofStatus_INVALID, expectedProofValidityCheckedEvent.GetProofStatus()) + // Confirm that a slashing event was emitted expectedSlashingEvents := testutilevents.FilterEvents[*tokenomicstypes.EventSupplierSlashed](t, events) require.Equal(t, 1, len(expectedSlashingEvents)) @@ -519,12 +530,15 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi s.keepers.UpsertClaim(ctx, s.claim) s.keepers.UpsertProof(ctx, s.proof) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + s.keepers.ValidateSubmittedProofs(sdkCtx) + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proof window has definitely closed at this point sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := sharedtypes.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) + sdkCtx = cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResults, expiredResults, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -574,12 +588,15 @@ func (s *TestSuite) TestSettlePendingClaims_Settles_WhenAProofIsNotRequired() { // Upsert the claim only (not the proof) s.keepers.UpsertClaim(ctx, s.claim) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + s.keepers.ValidateSubmittedProofs(sdkCtx) + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := sharedtypes.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) + sdkCtx = cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResults, expiredResults, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -774,7 +791,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_SupplierUnstaked() { // Validate the EventSupplierUnbondingBegin event. unbondingEndHeight := sharedtypes.GetSupplierUnbondingEndHeight(&sharedParams, &slashedSupplier) slashedSupplier.ServicesActivationHeightsMap = make(map[string]uint64) - for i, _ := range slashedSupplier.GetServices() { + for i := range slashedSupplier.GetServices() { slashedSupplier.Services[i].Endpoints = make([]*sharedtypes.SupplierEndpoint, 0) } expectedUnbondingBeginEvent := &suppliertypes.EventSupplierUnbondingBegin{ diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index b422b468d..15ea37146 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -112,7 +112,6 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) ( return settledResults, expiredResults, err } - _, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierOperatorAddress) // Using the probabilistic proofs approach, determine if this expiring // claim required an onchain proof proofRequirement, err = k.proofKeeper.ProofRequirementForClaim(ctx, &claim) @@ -135,14 +134,27 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) ( proofIsRequired := proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED if proofIsRequired { - expirationReason := tokenomicstypes.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default - - if !isProofFound { + // The tokenomics end blocker, which calls SettlePendingClaims, is ALWAYS executed + // AFTER the proof submission window closes. In contrast, the proof end blocker, + // which handles proof validation, is ALWAYS executed WITHIN the proof submission + // window of the same session number. + // This ensures that proof validation is completed before claims settlement, + // as they occur at different block heights. + + var expirationReason tokenomicstypes.ClaimExpirationReason + switch claim.ProofStatus { + // If the proof is required and not found, the claim is expired. + case prooftypes.ClaimProofStatus_NOT_FOUND: expirationReason = tokenomicstypes.ClaimExpirationReason_PROOF_MISSING + // If the proof is required and invalid, the claim is expired. + case prooftypes.ClaimProofStatus_INVALID: + expirationReason = tokenomicstypes.ClaimExpirationReason_PROOF_INVALID + // If the proof is required and valid, the claim is settled. + case prooftypes.ClaimProofStatus_VALID: + expirationReason = tokenomicstypes.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED } - // If the proof is missing or invalid -> expire it - if expirationReason != tokenomicstypes.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED { + if claim.ProofStatus != prooftypes.ClaimProofStatus_VALID { // TODO_BETA(@red-0ne): Slash the supplier in proportion to their stake. // TODO_POST_MAINNET: Consider allowing suppliers to RemoveClaim via a new // message in case it was sent by accident @@ -172,12 +184,8 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) ( // have other valid claims and the protocol might want to touch the supplier // owner or operator balances if the stake is negative. - // The claim & proof are no longer necessary, so there's no need for them - // to take up onchain space. + // The claim is no longer necessary, so there's no need for it to take up onchain space. k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierOperatorAddress) - if isProofFound { - k.proofKeeper.RemoveProof(ctx, sessionId, claim.SupplierOperatorAddress) - } // Append the settlement result to the expired results. expiredResults.Append(ClaimSettlementResult) @@ -240,13 +248,6 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) ( // The claim & proof are no longer necessary, so there's no need for them // to take up onchain space. k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierOperatorAddress) - // Whether or not the proof is required, the supplier may have submitted one - // so we need to delete it either way. If we don't have the if structure, - // a safe error will be printed, but it can be confusing to the operator - // or developer. - if isProofFound { - k.proofKeeper.RemoveProof(ctx, sessionId, claim.SupplierOperatorAddress) - } logger.Debug(fmt.Sprintf("Successfully settled claim for session ID %q at block height %d", claim.SessionHeader.SessionId, blockHeight)) diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index b09de7983..dac960fa4 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -59,7 +59,7 @@ type ProofKeeper interface { ProofRequirementForClaim(ctx context.Context, claim *prooftypes.Claim) (prooftypes.ProofRequirementReason, error) // Only used for testing & simulation - ValidateSubmittedProofs(ctx cosmostypes.Context) + ValidateSubmittedProofs(ctx cosmostypes.Context) (numValidProofs, numInvalidProofs uint64, err error) GetAllProofs(ctx context.Context) []prooftypes.Proof UpsertClaim(ctx context.Context, claim prooftypes.Claim) UpsertProof(ctx context.Context, claim prooftypes.Proof) From 7784a5bce140ccb2e82f0ff892cbe46111ab2d99 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 24 Jan 2025 07:07:35 +0100 Subject: [PATCH 10/24] [Off-chain] fix: duplicate log fields (#1040) ## Summary Construct a new logger instance for each session during settlement to avoid field duplication. ## Issue - https://discord.com/channels/824324475256438814/1316109400917934201/1330007345882988665 ## Type of change Select one or more from the following: - [ ] New feature, functionality or library - [ ] Consensus breaking; add the `consensus-breaking` label if so. See #791 for details - [x] Bug fix - [ ] Code health or cleanup - [ ] Documentation - [ ] Other (specify) ## Testing - [ ] **Documentation**: `make docusaurus_start`; only needed if you make doc changes - [ ] **Unit Tests**: `make go_develop_and_test` - [ ] **LocalNet E2E Tests**: `make test_e2e` - [ ] **DevNet E2E Tests**: Add the `devnet-test-e2e` label to the PR. ## Sanity Checklist - [ ] I have tested my changes using the available tooling - [ ] I have commented my code - [ ] I have performed a self-review of my own code; both comments & source code - [ ] I create and reference any new tickets, if applicable - [ ] I have left TODOs throughout the codebase, if applicable --- x/tokenomics/keeper/settle_pending_claims.go | 32 ++++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 2d30a0e6b..c6ecd397a 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -330,36 +330,36 @@ func (k Keeper) ExecutePendingSettledResults(ctx cosmostypes.Context, settledRes logger.Info(fmt.Sprintf("begin executing %d pending settlement results", len(settledResults))) for _, settledResult := range settledResults { - logger = logger.With("session_id", settledResult.GetSessionId()) - logger.Info("begin executing pending settlement result") + sessionLogger := logger.With("session_id", settledResult.GetSessionId()) + sessionLogger.Info("begin executing pending settlement result") - logger.Info(fmt.Sprintf("begin executing %d pending mints", len(settledResult.GetMints()))) - if err := k.executePendingModuleMints(ctx, logger, settledResult.GetMints()); err != nil { + sessionLogger.Info(fmt.Sprintf("begin executing %d pending mints", len(settledResult.GetMints()))) + if err := k.executePendingModuleMints(ctx, sessionLogger, settledResult.GetMints()); err != nil { return err } - logger.Info("done executing pending mints") + sessionLogger.Info("done executing pending mints") - logger.Info(fmt.Sprintf("begin executing %d pending module to module transfers", len(settledResult.GetModToModTransfers()))) - if err := k.executePendingModToModTransfers(ctx, logger, settledResult.GetModToModTransfers()); err != nil { + sessionLogger.Info(fmt.Sprintf("begin executing %d pending module to module transfers", len(settledResult.GetModToModTransfers()))) + if err := k.executePendingModToModTransfers(ctx, sessionLogger, settledResult.GetModToModTransfers()); err != nil { return err } - logger.Info("done executing pending module account to module account transfers") + sessionLogger.Info("done executing pending module account to module account transfers") - logger.Info(fmt.Sprintf("begin executing %d pending module to account transfers", len(settledResult.GetModToAcctTransfers()))) - if err := k.executePendingModToAcctTransfers(ctx, logger, settledResult.GetModToAcctTransfers()); err != nil { + sessionLogger.Info(fmt.Sprintf("begin executing %d pending module to account transfers", len(settledResult.GetModToAcctTransfers()))) + if err := k.executePendingModToAcctTransfers(ctx, sessionLogger, settledResult.GetModToAcctTransfers()); err != nil { return err } - logger.Info("done executing pending module to account transfers") + sessionLogger.Info("done executing pending module to account transfers") - logger.Info(fmt.Sprintf("begin executing %d pending burns", len(settledResult.GetBurns()))) - if err := k.executePendingModuleBurns(ctx, logger, settledResult.GetBurns()); err != nil { + sessionLogger.Info(fmt.Sprintf("begin executing %d pending burns", len(settledResult.GetBurns()))) + if err := k.executePendingModuleBurns(ctx, sessionLogger, settledResult.GetBurns()); err != nil { return err } - logger.Info("done executing pending burns") + sessionLogger.Info("done executing pending burns") - logger.Info("done executing pending settlement result") + sessionLogger.Info("done executing pending settlement result") - logger.Info(fmt.Sprintf( + sessionLogger.Info(fmt.Sprintf( "done applying settled results for session %q", settledResult.Claim.GetSessionHeader().GetSessionId(), )) From 980a63d2a4a17ce41535106b99f67c442d0c0b8f Mon Sep 17 00:00:00 2001 From: "Dima K." Date: Fri, 24 Jan 2025 12:34:13 -0800 Subject: [PATCH 11/24] [Upgrade] Alpha TestNet v0.0.11 (#967) ## Summary An upgrade for Alpha TestNet from v0.0.10 to v0.0.11. ## Issue Beta TestNet has been launched using v0.0.11 (release candidate), and we need to upgrade alpha so both networks use the same version. ## Type of change Chain upgrade. ## Testing - [] An upgrade is tested by upgrading a network provisioned by the old version (v0.0.10) to the new version (v0.0.11) ## Sanity Checklist - [ ] I have tested my changes using the available tooling - [ ] I have commented my code - [ ] I have performed a self-review of my own code; both comments & source code - [ ] I create and reference any new tickets, if applicable - [ ] I have left TODOs throughout the codebase, if applicable --------- Co-authored-by: Daniel Olshansky --- app/upgrades.go | 1 + app/upgrades/v0.0.11.go | 98 +++++++++++++++++++++++++++++++++++++ cmd/poktrolld/cmd/config.go | 8 ++- telemetry/defaults.go | 14 ++++++ telemetry/telemetry.go | 23 +++++++-- 5 files changed, 135 insertions(+), 9 deletions(-) create mode 100644 app/upgrades/v0.0.11.go create mode 100644 telemetry/defaults.go diff --git a/app/upgrades.go b/app/upgrades.go index 42582dc2b..df4543c99 100644 --- a/app/upgrades.go +++ b/app/upgrades.go @@ -13,6 +13,7 @@ import ( var allUpgrades = []upgrades.Upgrade{ upgrades.Upgrade_0_0_4, upgrades.Upgrade_0_0_10, + upgrades.Upgrade_0_0_11, } // setUpgrades sets upgrade handlers for all upgrades and executes KVStore migration if an upgrade plan file exists. diff --git a/app/upgrades/v0.0.11.go b/app/upgrades/v0.0.11.go new file mode 100644 index 000000000..615feecef --- /dev/null +++ b/app/upgrades/v0.0.11.go @@ -0,0 +1,98 @@ +package upgrades + +import ( + "context" + + storetypes "cosmossdk.io/store/types" + upgradetypes "cosmossdk.io/x/upgrade/types" + cosmosTypes "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/pokt-network/poktroll/app/keepers" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" + tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" +) + +// Upgrade_0_0_11 is the upgrade handler for v0.0.11 Alpha TestNet upgrade +// Beta TestNet was launched with v0.0.11, so this upgrade is exclusively for Alpha TestNet. +// - Before: v0.0.10 +// - After: v0.0.11 +var Upgrade_0_0_11 = Upgrade{ + PlanName: "v0.0.11", + CreateUpgradeHandler: func(mm *module.Manager, + keepers *keepers.Keepers, + configurator module.Configurator, + ) upgradetypes.UpgradeHandler { + // Adds new parameters using ignite's config.yml as a reference. Assuming we don't need any other parameters. + // https://github.com/pokt-network/poktroll/compare/v0.0.10...v0.0.11-rc + applyNewParameters := func(ctx context.Context) (err error) { + logger := cosmosTypes.UnwrapSDKContext(ctx).Logger() + logger.Info("Starting parameter updates for v0.0.11") + + // Set num_suppliers_per_session to 15 + // Validate with: `poktrolld q session params --node=https://testnet-validated-validator-rpc.poktroll.com/` + sessionParams := sessiontypes.Params{ + NumSuppliersPerSession: uint64(15), + } + + // ALL parameters must be present when setting params. + err = keepers.SessionKeeper.SetParams(ctx, sessionParams) + if err != nil { + logger.Error("Failed to set session params", "error", err) + return err + } + logger.Info("Successfully updated session params", "new_params", sessionParams) + + // Set tokenomics params. The values are based on default values for LocalNet/Beta TestNet. + // Validate with: `poktrolld q tokenomics params --node=https://testnet-validated-validator-rpc.poktroll.com/` + tokenomicsParams := tokenomicstypes.Params{ + MintAllocationPercentages: tokenomicstypes.MintAllocationPercentages{ + Dao: 0.1, + Proposer: 0.05, + Supplier: 0.7, + SourceOwner: 0.15, + Application: 0.0, + }, + DaoRewardAddress: AlphaTestNetPnfAddress, + } + + // ALL parameters must be present when setting params. + err = keepers.TokenomicsKeeper.SetParams(ctx, tokenomicsParams) + if err != nil { + logger.Error("Failed to set tokenomics params", "error", err) + return err + } + logger.Info("Successfully updated tokenomics params", "new_params", tokenomicsParams) + + return + } + + // The diff shows that the only new authz authorization is for the `poktroll.session.MsgUpdateParam` message. + // However, this message is already authorized for the `pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t` address. + // See here: poktrolld q authz grants-by-granter pokt10d07y265gmmuvt4z0w9aw880jnsr700j8yv32t --node=https://shannon-testnet-grove-seed-rpc.alpha.poktroll.com + // If this upgrade would have been applied to other networks, we could have added a separate upgrade handler for each network. + + // Returns the upgrade handler for v0.0.11 + return func(ctx context.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { + logger := cosmosTypes.UnwrapSDKContext(ctx).Logger() + logger.Info("Starting v0.0.11 upgrade handler") + + err := applyNewParameters(ctx) + if err != nil { + logger.Error("Failed to apply new parameters", "error", err) + return vm, err + } + + logger.Info("Running module migrations") + vm, err = mm.RunMigrations(ctx, configurator, vm) + if err != nil { + logger.Error("Failed to run migrations", "error", err) + return vm, err + } + + logger.Info("Successfully completed v0.0.11 upgrade handler") + return vm, nil + } + }, + // No changes to the KVStore in this upgrade. + StoreUpgrades: storetypes.StoreUpgrades{}, +} diff --git a/cmd/poktrolld/cmd/config.go b/cmd/poktrolld/cmd/config.go index ae9520a41..89536a6a1 100644 --- a/cmd/poktrolld/cmd/config.go +++ b/cmd/poktrolld/cmd/config.go @@ -21,12 +21,10 @@ type PoktrollAppConfig struct { } // poktrollAppConfigDefaults sets default values to render in `app.toml`. -// Checkout `customAppConfigTemplate()` for additional information about each setting. +// Checkout `customAppConfigTemplate()` for additional information about each config parameter. func poktrollAppConfigDefaults() PoktrollAppConfig { return PoktrollAppConfig{ - Telemetry: telemetry.PoktrollTelemetryConfig{ - CardinalityLevel: "medium", - }, + Telemetry: telemetry.DefaultConfig(), } } @@ -104,7 +102,6 @@ func initCometBFTConfig() *cmtcfg.Config { // return "", nil if no custom configuration is required for the application. // TODO_MAINNET: Reconsider values - check `app.toml` for possible options. func initAppConfig() (string, interface{}) { - // The following code snippet is just for reference. type CustomAppConfig struct { serverconfig.Config `mapstructure:",squash"` Poktroll PoktrollAppConfig `mapstructure:"poktroll"` @@ -140,6 +137,7 @@ func initAppConfig() (string, interface{}) { srvCfg.GRPC.Enable = true srvCfg.GRPCWeb.Enable = true + // Create the custom config with both server and poktroll configs customAppConfig := CustomAppConfig{ Config: *srvCfg, Poktroll: poktrollAppConfigDefaults(), diff --git a/telemetry/defaults.go b/telemetry/defaults.go new file mode 100644 index 000000000..e059e2c93 --- /dev/null +++ b/telemetry/defaults.go @@ -0,0 +1,14 @@ +package telemetry + +// Default configuration values for telemetry +const ( + // DefaultCardinalityLevel represents the default cardinality level for metrics collection + DefaultCardinalityLevel = "medium" +) + +// DefaultConfig returns the default telemetry configuration +func DefaultConfig() PoktrollTelemetryConfig { + return PoktrollTelemetryConfig{ + CardinalityLevel: DefaultCardinalityLevel, + } +} diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index 348b73898..d7b93a879 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -11,16 +11,31 @@ import ( // Set once on initialization and remains constant during runtime. var globalTelemetryConfig PoktrollTelemetryConfig -// PoktrollTelemetryConfig represents the telemetry protion of the custom poktroll config section in `app.toml`. +// PoktrollTelemetryConfig represents the telemetry portion of the custom poktroll config section in `app.toml`. type PoktrollTelemetryConfig struct { CardinalityLevel string `mapstructure:"cardinality-level"` } // New sets the globalTelemetryConfig for telemetry package. func New(appOpts servertypes.AppOptions) error { - // Extract the map from appOpts. - // `poktroll.telemetry` comes from `app.toml` which is parsed into a map. - telemetryMap := appOpts.Get("poktroll.telemetry").(map[string]interface{}) + // Get the poktroll config section. If it doesn't exist, use defaults + poktrollConfig := appOpts.Get("poktroll") + if poktrollConfig == nil { + globalTelemetryConfig = DefaultConfig() + return nil + } + + // Try to get the telemetry subsection + poktrollMap, ok := poktrollConfig.(map[string]interface{}) + if !ok { + return fmt.Errorf("invalid poktroll config format: expected map[string]interface{}, got %T", poktrollConfig) + } + + telemetryMap, ok := poktrollMap["telemetry"].(map[string]interface{}) + if !ok { + globalTelemetryConfig = DefaultConfig() + return nil + } // Use mapstructure to decode the map into the struct if err := mapstructure.Decode(telemetryMap, &globalTelemetryConfig); err != nil { From 1e34b01fbc327a7a19096e16db21b249c24b1f5d Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sat, 25 Jan 2025 10:24:11 +0100 Subject: [PATCH 12/24] fix caching concurrency --- create-accounts.sh | 71 +++++++++++-------- .../tests/relays_stress_helpers_test.go | 4 +- stake_apps.sh | 36 +--------- x/application/keeper/application.go | 16 ++--- x/application/keeper/keeper.go | 12 ++-- x/application/keeper/params.go | 9 ++- x/application/types/cache.go | 11 --- x/proof/keeper/claim.go | 16 ++--- x/proof/keeper/keeper.go | 16 +++-- x/proof/keeper/params.go | 9 ++- x/proof/keeper/proof.go | 17 ++--- x/proof/types/account_query_client.go | 7 ++ x/proof/types/cache.go | 13 ---- x/service/keeper/keeper.go | 15 ++-- x/service/keeper/params.go | 9 ++- x/service/keeper/relay_mining_difficulty.go | 13 ++-- x/service/keeper/service.go | 12 ++-- x/service/types/cache.go | 15 ---- x/session/keeper/block_hash.go | 7 +- x/session/keeper/keeper.go | 11 +-- x/session/keeper/params.go | 9 ++- x/session/types/cache.go | 11 --- x/shared/keeper/keeper.go | 4 +- x/shared/keeper/params.go | 11 ++- x/shared/types/cache.go | 39 ++++++++-- x/supplier/keeper/keeper.go | 8 +-- x/supplier/keeper/params.go | 8 +-- x/supplier/keeper/supplier.go | 14 ++-- x/supplier/types/cache.go | 13 ---- x/tokenomics/keeper/keeper.go | 5 +- x/tokenomics/keeper/params.go | 10 +-- x/tokenomics/types/cache.go | 9 --- 32 files changed, 206 insertions(+), 254 deletions(-) delete mode 100644 x/application/types/cache.go delete mode 100644 x/proof/types/cache.go delete mode 100644 x/service/types/cache.go delete mode 100644 x/session/types/cache.go delete mode 100644 x/supplier/types/cache.go delete mode 100644 x/tokenomics/types/cache.go diff --git a/create-accounts.sh b/create-accounts.sh index 60d611bd6..b693207db 100755 --- a/create-accounts.sh +++ b/create-accounts.sh @@ -1,41 +1,52 @@ #!/bin/bash -TOTAL_ACCOUNTS=50000 -PARALLEL_JOBS=8 -ACCOUNTS_PER_JOB=$((TOTAL_ACCOUNTS / PARALLEL_JOBS)) - -create_accounts() { - local start=$1 - local end=$2 - local job_id=$3 - - for i in $(seq $start $end); do - if ! poktrolld keys add "app-$i" > /dev/null 2>&1; then - echo "Job $job_id: Error creating account app-$i" - continue - fi +TOTAL_ADDRESSES=${1:-50000} +PARALLEL_JOBS=${2:-8} +SEGMENTS_DIR="./segments" +OUTPUT_FILE="app_addresses.txt" + +create_segment() { + local job_id=$1 + local start_idx=$2 + local num_addresses=$3 + local segment_file="$SEGMENTS_DIR/segment_$job_id.txt" + + > "$segment_file" + for i in $(seq 0 $(($num_addresses-1))); do + addr_idx=$(($start_idx + $i + 1)) + output=$(poktrolld keys add "app-$addr_idx" --output json | jq -r .address 2>&1) + echo "$output" >> "$segment_file" + done +} - if [ $((i % 100)) -eq 0 ]; then - echo "Job $job_id: Progress $i/$end accounts created" +merge_segments() { + > "$OUTPUT_FILE" + for i in $(seq 0 $((PARALLEL_JOBS-1))); do + if [ -f "$SEGMENTS_DIR/segment_$i.txt" ]; then + cat "$SEGMENTS_DIR/segment_$i.txt" >> "$OUTPUT_FILE" + else + echo "Missing segment file: segment_$i.txt" >&2 + return 1 fi done } -echo "Starting parallel account creation with $PARALLEL_JOBS jobs..." +main() { + rm -rf $SEGMENTS_DIR + mkdir -p $SEGMENTS_DIR -# Launch parallel jobs -for job in $(seq 0 $((PARALLEL_JOBS-1))); do - start=$((job * ACCOUNTS_PER_JOB + 1)) - if [ $job -eq $((PARALLEL_JOBS-1)) ]; then - end=$TOTAL_ACCOUNTS - else - end=$((start + ACCOUNTS_PER_JOB - 1)) - fi + ADDRS_PER_JOB=$(( (TOTAL_ADDRESSES + PARALLEL_JOBS - 1) / PARALLEL_JOBS )) + echo "Creating $TOTAL_ADDRESSES addresses using $PARALLEL_JOBS parallel jobs" - create_accounts $start $end $job & -done + for job_id in $(seq 0 $((PARALLEL_JOBS-1))); do + start_idx=$((job_id * ADDRS_PER_JOB)) + create_segment "$job_id" "$start_idx" "$ADDRS_PER_JOB" & + done -# Wait for all background jobs to complete -wait + wait + merge_segments + rm -rf $SEGMENTS_DIR + echo "Complete - addresses written to $OUTPUT_FILE" +} -echo "All account creation jobs completed!" \ No newline at end of file +main \ No newline at end of file diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 28b0b7163..48d8c352b 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -648,7 +648,9 @@ func (s *relaysSuite) createApplicationAccount( accAddress, err := keyRecord.GetAddress() require.NoError(s, err) - logger.Debug().Msgf("Application added %s", keyName) + if appIdx%5000 == 0 { + logger.Debug().Msgf("Application added %s", keyName) + } return &accountInfo{ address: accAddress.String(), diff --git a/stake_apps.sh b/stake_apps.sh index 292f9d9c8..637bd5af9 100755 --- a/stake_apps.sh +++ b/stake_apps.sh @@ -3,34 +3,22 @@ TOTAL_APPS=50000 PARALLEL_JOBS=8 CONFIG_DIR="localnet/poktrolld/config" -TEMP_DIR=/tmp/stake_apps SEGMENT_SIZE=$((TOTAL_APPS / PARALLEL_JOBS)) -# Create and setup temp directory -rm -rf $TEMP_DIR -mkdir -p $TEMP_DIR -chmod 777 $TEMP_DIR -trap 'rm -rf $TEMP_DIR' EXIT - # Function to process a segment of apps process_segment() { local start=$1 local end=$2 local job_id=$3 - local output="$TEMP_DIR/segment_$job_id.txt" local config_file="${CONFIG_DIR}/application_stake_config.yaml" echo "Job $job_id staking apps $start to $end" for i in $(seq $start $end); do local app_name="app-$i" - if poktrolld tx application stake-application -y \ + poktrolld tx application stake-application -y \ --config "$config_file" \ --keyring-backend test \ - --from "$app_name" > /dev/null 2>&1; then - echo "$app_name" >> "$output.success" - else - echo "$app_name" >> "$output.failed" - fi + --from "$app_name" > /dev/null 2>&1; done } @@ -50,22 +38,4 @@ done wait -# Report results -total_success=0 -total_failed=0 -for job_id in $(seq 0 $((PARALLEL_JOBS - 1))); do - if [ -f "$TEMP_DIR/segment_$job_id.txt.success" ]; then - success=$(wc -l < "$TEMP_DIR/segment_$job_id.txt.success") - total_success=$((total_success + success)) - fi - if [ -f "$TEMP_DIR/segment_$job_id.txt.failed" ]; then - failed=$(wc -l < "$TEMP_DIR/segment_$job_id.txt.failed") - total_failed=$((total_failed + failed)) - echo "Failed apps in job $job_id:" - cat "$TEMP_DIR/segment_$job_id.txt.failed" - fi -done - -echo "Staking complete!" -echo "Successfully staked: $total_success applications" -echo "Failed: $total_failed applications" \ No newline at end of file +echo "Staking complete!" \ No newline at end of file diff --git a/x/application/keeper/application.go b/x/application/keeper/application.go index e63ba8d18..6135d2c5c 100644 --- a/x/application/keeper/application.go +++ b/x/application/keeper/application.go @@ -12,14 +12,11 @@ import ( // SetApplication set a specific application in the store from its index func (k Keeper) SetApplication(ctx context.Context, application types.Application) { - if k.cache.Applications[application.Address] != nil { - k.cache.Applications[application.Address] = &application - } - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.ApplicationKeyPrefix)) appBz := k.cdc.MustMarshal(&application) store.Set(types.ApplicationKey(application.Address), appBz) + k.applicationsCache.Set(application.Address, application) } // GetApplication returns a application from its index @@ -27,9 +24,9 @@ func (k Keeper) GetApplication( ctx context.Context, appAddr string, ) (app types.Application, found bool) { - if app, found := k.cache.Applications[appAddr]; found { + if app, found := k.applicationsCache.Get(appAddr); found { k.logger.Info("-----Application cache hit-----") - return *app, true + return app, true } storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -54,18 +51,17 @@ func (k Keeper) GetApplication( app.DelegateeGatewayAddresses = make([]string, 0) } - k.cache.Applications[appAddr] = &app + k.applicationsCache.Set(appAddr, app) return app, true } // RemoveApplication removes a application from the store func (k Keeper) RemoveApplication(ctx context.Context, appAddr string) { - delete(k.cache.Applications, appAddr) - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.ApplicationKeyPrefix)) store.Delete(types.ApplicationKey(appAddr)) + k.applicationsCache.Delete(appAddr) } // GetAllApplications returns all application @@ -86,7 +82,7 @@ func (k Keeper) GetAllApplications(ctx context.Context) (apps []types.Applicatio app.PendingUndelegations = make(map[uint64]types.UndelegatingGatewayList) } - k.cache.Applications[app.Address] = &app + k.applicationsCache.Set(app.Address, app) apps = append(apps, app) } diff --git a/x/application/keeper/keeper.go b/x/application/keeper/keeper.go index 7a55fbbe4..6432a89cf 100644 --- a/x/application/keeper/keeper.go +++ b/x/application/keeper/keeper.go @@ -9,6 +9,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/poktroll/x/application/types" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) type ( @@ -26,7 +27,8 @@ type ( gatewayKeeper types.GatewayKeeper sharedKeeper types.SharedKeeper - cache *types.Cache + applicationsCache *sharedtypes.Cache[string, types.Application] + paramsCache *sharedtypes.Cache[string, types.Params] } ) @@ -56,14 +58,14 @@ func NewKeeper( gatewayKeeper: gatewayKeeper, sharedKeeper: sharedKeeper, - cache: &types.Cache{ - Applications: make(map[string]*types.Application), - }, + applicationsCache: sharedtypes.NewCache[string, types.Application](), + paramsCache: sharedtypes.NewCache[string, types.Params](), } } func (k Keeper) ClearCache() { - k.cache.Clear() + k.applicationsCache.Clear() + k.paramsCache.Clear() } // GetAuthority returns the module's authority. diff --git a/x/application/keeper/params.go b/x/application/keeper/params.go index ecc58aacb..eece5c1d2 100644 --- a/x/application/keeper/params.go +++ b/x/application/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Application params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,21 +22,20 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) + k.paramsCache.Set("", params) - k.cache.Params = ¶ms return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } diff --git a/x/application/types/cache.go b/x/application/types/cache.go deleted file mode 100644 index c87030175..000000000 --- a/x/application/types/cache.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -type Cache struct { - Params *Params - Applications map[string]*Application -} - -func (c *Cache) Clear() { - c.Params = nil - clear(c.Applications) -} diff --git a/x/proof/keeper/claim.go b/x/proof/keeper/claim.go index c1a305197..600bdb0fd 100644 --- a/x/proof/keeper/claim.go +++ b/x/proof/keeper/claim.go @@ -22,8 +22,7 @@ func (k Keeper) UpsertClaim(ctx context.Context, claim types.Claim) { sessionId := claim.GetSessionHeader().GetSessionId() primaryKey := types.ClaimPrimaryKey(sessionId, claim.SupplierOperatorAddress) primaryStore.Set(primaryKey, claimBz) - - k.cache.Claims[sessionId] = &claim + k.claimsCache.Set(string(primaryKey), claim) logger.Info(fmt.Sprintf("upserted claim for supplier %s with primaryKey %s", claim.SupplierOperatorAddress, primaryKey)) @@ -43,14 +42,15 @@ func (k Keeper) UpsertClaim(ctx context.Context, claim types.Claim) { // GetClaim returns a claim from its index func (k Keeper) GetClaim(ctx context.Context, sessionId, supplierOperatorAddr string) (_ types.Claim, isClaimFound bool) { - if claim, found := k.cache.Claims[sessionId]; found { + primaryKey := types.ClaimPrimaryKey(sessionId, supplierOperatorAddr) + if claim, found := k.claimsCache.Get(string(primaryKey)); found { k.logger.Info("-----Supplier cache hit-----") - return *claim, true + return claim, true } - claim, found := k.getClaimByPrimaryKey(ctx, types.ClaimPrimaryKey(sessionId, supplierOperatorAddr)) + claim, found := k.getClaimByPrimaryKey(ctx, primaryKey) if found { - k.cache.Claims[sessionId] = &claim + k.claimsCache.Set(string(primaryKey), claim) } return claim, found @@ -65,7 +65,6 @@ func (k Keeper) RemoveClaim(ctx context.Context, sessionId, supplierOperatorAddr // Check if the claim exists primaryKey := types.ClaimPrimaryKey(sessionId, supplierOperatorAddr) - delete(k.cache.Claims, sessionId) foundClaim, isClaimFound := k.getClaimByPrimaryKey(ctx, primaryKey) if !isClaimFound { logger.Error(fmt.Sprintf("trying to delete non-existent claim with primary key %s for supplier %s and session %s", primaryKey, supplierOperatorAddr, sessionId)) @@ -84,6 +83,7 @@ func (k Keeper) RemoveClaim(ctx context.Context, sessionId, supplierOperatorAddr primaryStore.Delete(primaryKey) supplierOperatorAddrStore.Delete(supplierOperatorAddrKey) sessionEndHeightStore.Delete(sessionEndHeightKey) + k.claimsCache.Delete(string(primaryKey)) logger.Info(fmt.Sprintf("deleted claim with primary key %s for supplier %s and session %s", primaryKey, supplierOperatorAddr, sessionId)) } @@ -99,7 +99,7 @@ func (k Keeper) GetAllClaims(ctx context.Context) (claims []types.Claim) { for ; iterator.Valid(); iterator.Next() { var claim types.Claim k.cdc.MustUnmarshal(iterator.Value(), &claim) - k.cache.Claims[claim.GetSessionHeader().GetSessionId()] = &claim + k.claimsCache.Set(string(iterator.Key()), claim) claims = append(claims, claim) } diff --git a/x/proof/keeper/keeper.go b/x/proof/keeper/keeper.go index 7a802cc95..e5d2920ee 100644 --- a/x/proof/keeper/keeper.go +++ b/x/proof/keeper/keeper.go @@ -16,6 +16,7 @@ import ( "github.com/pokt-network/poktroll/pkg/polylog" _ "github.com/pokt-network/poktroll/pkg/polylog/polyzero" "github.com/pokt-network/poktroll/x/proof/types" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) type ( @@ -39,7 +40,9 @@ type ( accountQuerier client.AccountQueryClient sharedQuerier client.SharedQueryClient - cache *types.Cache + claimsCache *sharedtypes.Cache[string, types.Claim] + proofsCache *sharedtypes.Cache[string, types.Proof] + paramsCache *sharedtypes.Cache[string, types.Params] } ) @@ -104,15 +107,16 @@ func NewKeeper( accountQuerier: accountQuerier, sharedQuerier: sharedQuerier, - cache: &types.Cache{ - Proofs: make(map[string]*types.Proof), - Claims: make(map[string]*types.Claim), - }, + claimsCache: sharedtypes.NewCache[string, types.Claim](), + proofsCache: sharedtypes.NewCache[string, types.Proof](), + paramsCache: sharedtypes.NewCache[string, types.Params](), } } func (k Keeper) ClearCache() { - k.cache.Clear() + k.claimsCache.Clear() + k.proofsCache.Clear() + k.paramsCache.Clear() k.accountQuerier.ClearCache() } diff --git a/x/proof/keeper/params.go b/x/proof/keeper/params.go index 521fefe96..9c416c97f 100644 --- a/x/proof/keeper/params.go +++ b/x/proof/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Proof params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,20 +22,19 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } diff --git a/x/proof/keeper/proof.go b/x/proof/keeper/proof.go index 0bcaf60b6..207fb1416 100644 --- a/x/proof/keeper/proof.go +++ b/x/proof/keeper/proof.go @@ -23,8 +23,6 @@ func (k Keeper) UpsertProof(ctx context.Context, proof types.Proof) { primaryKey := types.ProofPrimaryKey(sessionId, proof.GetSupplierOperatorAddress()) primaryStore.Set(primaryKey, proofBz) - k.cache.Proofs[sessionId] = &proof - logger.Info( fmt.Sprintf("upserted proof for supplier %s with primaryKey %s", proof.GetSupplierOperatorAddress(), primaryKey), ) @@ -41,18 +39,20 @@ func (k Keeper) UpsertProof(ctx context.Context, proof types.Proof) { sessionEndHeight := proof.GetSessionHeader().GetSessionEndBlockHeight() sessionEndHeightKey := types.ProofSupplierEndSessionHeightKey(sessionEndHeight, primaryKey) sessionEndHeightStore.Set(sessionEndHeightKey, primaryKey) + k.proofsCache.Set(string(primaryKey), proof) } // GetProof returns a proof from its index func (k Keeper) GetProof(ctx context.Context, sessionId, supplierOperatorAddr string) (_ types.Proof, isProofFound bool) { - if proof, found := k.cache.Proofs[sessionId]; found { + primaryKey := types.ProofPrimaryKey(sessionId, supplierOperatorAddr) + if proof, found := k.proofsCache.Get(string(primaryKey)); found { k.logger.Info("-----Proof cache hit-----") - return *proof, true + return proof, true } - proof, found := k.getProofByPrimaryKey(ctx, types.ProofPrimaryKey(sessionId, supplierOperatorAddr)) + proof, found := k.getProofByPrimaryKey(ctx, primaryKey) if found { - k.cache.Proofs[sessionId] = &proof + k.proofsCache.Set(string(primaryKey), proof) } return proof, found @@ -67,7 +67,6 @@ func (k Keeper) RemoveProof(ctx context.Context, sessionId, supplierOperatorAddr // Check if the proof exists primaryKey := types.ProofPrimaryKey(sessionId, supplierOperatorAddr) - delete(k.cache.Proofs, sessionId) foundProof, isProofFound := k.getProofByPrimaryKey(ctx, primaryKey) if !isProofFound { logger.Error( @@ -78,6 +77,7 @@ func (k Keeper) RemoveProof(ctx context.Context, sessionId, supplierOperatorAddr sessionId, ), ) + k.proofsCache.Delete(string(primaryKey)) return } @@ -93,6 +93,7 @@ func (k Keeper) RemoveProof(ctx context.Context, sessionId, supplierOperatorAddr primaryStore.Delete(primaryKey) supplierOperatorAddrStore.Delete(supplierOperatorAddrKey) sessionEndHeightStore.Delete(sessionEndHeightKey) + k.proofsCache.Delete(string(primaryKey)) logger.Info( fmt.Sprintf( @@ -120,7 +121,7 @@ func (k Keeper) GetAllProofs(ctx context.Context) (proofs []types.Proof) { for ; iterator.Valid(); iterator.Next() { var proof types.Proof k.cdc.MustUnmarshal(iterator.Value(), &proof) - k.cache.Proofs[proof.GetSessionHeader().GetSessionId()] = &proof + k.proofsCache.Set(string(iterator.Key()), proof) proofs = append(proofs, proof) } diff --git a/x/proof/types/account_query_client.go b/x/proof/types/account_query_client.go index 65da7061c..2ce4fa58c 100644 --- a/x/proof/types/account_query_client.go +++ b/x/proof/types/account_query_client.go @@ -3,6 +3,7 @@ package types import ( "context" fmt "fmt" + "sync" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" "github.com/cosmos/cosmos-sdk/types" @@ -18,6 +19,7 @@ var _ client.AccountQueryClient = (*AccountKeeperQueryClient)(nil) type AccountKeeperQueryClient struct { keeper AccountKeeper accountPubKeyCache map[string]cryptotypes.PubKey + CacheMu *sync.RWMutex } // NewAccountKeeperQueryClient returns a new AccountQueryClient that is backed @@ -29,6 +31,7 @@ func NewAccountKeeperQueryClient(accountKeeper AccountKeeper) client.AccountQuer return &AccountKeeperQueryClient{ keeper: accountKeeper, accountPubKeyCache: make(map[string]cryptotypes.PubKey), + CacheMu: &sync.RWMutex{}, } } @@ -63,6 +66,8 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( ctx context.Context, address string, ) (cryptotypes.PubKey, error) { + accountQueryClient.CacheMu.RLock() + defer accountQueryClient.CacheMu.RUnlock() if acc, found := accountQueryClient.accountPubKeyCache[address]; found { fmt.Println("-----PubKey cache hit-----") return acc, nil @@ -88,5 +93,7 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( } func (accountQueryClient *AccountKeeperQueryClient) ClearCache() { + accountQueryClient.CacheMu.Lock() + defer accountQueryClient.CacheMu.Unlock() clear(accountQueryClient.accountPubKeyCache) } diff --git a/x/proof/types/cache.go b/x/proof/types/cache.go deleted file mode 100644 index 42c2bc010..000000000 --- a/x/proof/types/cache.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -type Cache struct { - Params *Params - Claims map[string]*Claim - Proofs map[string]*Proof -} - -func (c *Cache) Clear() { - c.Params = nil - clear(c.Claims) - clear(c.Proofs) -} diff --git a/x/service/keeper/keeper.go b/x/service/keeper/keeper.go index b7ec31bfd..56174c976 100644 --- a/x/service/keeper/keeper.go +++ b/x/service/keeper/keeper.go @@ -24,7 +24,9 @@ type ( bankKeeper types.BankKeeper - cache *types.Cache + servicesCache *sharedtypes.Cache[string, sharedtypes.Service] + relayMiningDifficultyCache *sharedtypes.Cache[string, types.RelayMiningDifficulty] + paramsCache *sharedtypes.Cache[string, types.Params] } ) @@ -48,15 +50,16 @@ func NewKeeper( bankKeeper: bankKeeper, - cache: &types.Cache{ - Services: make(map[string]*sharedtypes.Service), - RelayMiningDifficulty: make(map[string]*types.RelayMiningDifficulty), - }, + servicesCache: sharedtypes.NewCache[string, sharedtypes.Service](), + relayMiningDifficultyCache: sharedtypes.NewCache[string, types.RelayMiningDifficulty](), + paramsCache: sharedtypes.NewCache[string, types.Params](), } } func (k Keeper) ClearCache() { - k.cache.Clear() + k.servicesCache.Clear() + k.relayMiningDifficultyCache.Clear() + k.paramsCache.Clear() } // GetAuthority returns the module's authority. diff --git a/x/service/keeper/params.go b/x/service/keeper/params.go index 840c34f4c..d2e366853 100644 --- a/x/service/keeper/params.go +++ b/x/service/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Service params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,20 +22,19 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } diff --git a/x/service/keeper/relay_mining_difficulty.go b/x/service/keeper/relay_mining_difficulty.go index 9da817acc..70ae47701 100644 --- a/x/service/keeper/relay_mining_difficulty.go +++ b/x/service/keeper/relay_mining_difficulty.go @@ -13,13 +13,13 @@ import ( // SetRelayMiningDifficulty set a specific relayMiningDifficulty in the store from its index func (k Keeper) SetRelayMiningDifficulty(ctx context.Context, relayMiningDifficulty types.RelayMiningDifficulty) { - k.cache.RelayMiningDifficulty[relayMiningDifficulty.ServiceId] = &relayMiningDifficulty storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.RelayMiningDifficultyKeyPrefix)) difficultyBz := k.cdc.MustMarshal(&relayMiningDifficulty) store.Set(types.RelayMiningDifficultyKey( relayMiningDifficulty.ServiceId, ), difficultyBz) + k.relayMiningDifficultyCache.Set(relayMiningDifficulty.ServiceId, relayMiningDifficulty) } // GetRelayMiningDifficulty returns a relayMiningDifficulty from its index @@ -27,9 +27,9 @@ func (k Keeper) GetRelayMiningDifficulty( ctx context.Context, serviceId string, ) (difficulty types.RelayMiningDifficulty, found bool) { - if difficulty, found := k.cache.RelayMiningDifficulty[serviceId]; found { + if difficulty, found := k.relayMiningDifficultyCache.Get(serviceId); found { k.logger.Info("-----Difficulty cache hit-----") - return *difficulty, true + return difficulty, true } storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.RelayMiningDifficultyKeyPrefix)) @@ -52,7 +52,7 @@ func (k Keeper) GetRelayMiningDifficulty( } k.cdc.MustUnmarshal(difficultyBz, &difficulty) - k.cache.RelayMiningDifficulty[serviceId] = &difficulty + k.relayMiningDifficultyCache.Set(serviceId, difficulty) return difficulty, true } @@ -63,8 +63,6 @@ func (k Keeper) RemoveRelayMiningDifficulty( ) { logger := k.Logger().With("method", "RemoveRelayMiningDifficulty") - delete(k.cache.RelayMiningDifficulty, serviceId) - storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.RelayMiningDifficultyKeyPrefix)) difficultyKey := types.RelayMiningDifficultyKey( @@ -79,6 +77,7 @@ func (k Keeper) RemoveRelayMiningDifficulty( store.Delete(types.RelayMiningDifficultyKey( serviceId, )) + k.relayMiningDifficultyCache.Delete(serviceId) } // GetAllRelayMiningDifficulty returns all relayMiningDifficulty @@ -92,7 +91,7 @@ func (k Keeper) GetAllRelayMiningDifficulty(ctx context.Context) (list []types.R for ; iterator.Valid(); iterator.Next() { var difficulty types.RelayMiningDifficulty k.cdc.MustUnmarshal(iterator.Value(), &difficulty) - k.cache.RelayMiningDifficulty[difficulty.ServiceId] = &difficulty + k.relayMiningDifficultyCache.Set(difficulty.ServiceId, difficulty) list = append(list, difficulty) } diff --git a/x/service/keeper/service.go b/x/service/keeper/service.go index 2ccee3dc2..94bf9ecd4 100644 --- a/x/service/keeper/service.go +++ b/x/service/keeper/service.go @@ -13,11 +13,11 @@ import ( // SetService set a specific service in the store from its index func (k Keeper) SetService(ctx context.Context, service sharedtypes.Service) { - k.cache.Services[service.Id] = &service storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.ServiceKeyPrefix)) serviceBz := k.cdc.MustMarshal(&service) store.Set(types.ServiceKey(service.Id), serviceBz) + k.servicesCache.Set(service.Id, service) } // GetService returns a service from its index @@ -25,9 +25,9 @@ func (k Keeper) GetService( ctx context.Context, serviceId string, ) (service sharedtypes.Service, found bool) { - if service, found := k.cache.Services[service.Id]; found { + if service, found := k.servicesCache.Get(serviceId); found { k.logger.Info("-----Service cache hit-----") - return *service, true + return service, true } storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.ServiceKeyPrefix)) @@ -38,7 +38,7 @@ func (k Keeper) GetService( } k.cdc.MustUnmarshal(serviceBz, &service) - k.cache.Services[service.Id] = &service + k.servicesCache.Set(serviceId, service) return service, true } @@ -47,10 +47,10 @@ func (k Keeper) RemoveService( ctx context.Context, serviceId string, ) { - delete(k.cache.Services, serviceId) storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.ServiceKeyPrefix)) store.Delete(types.ServiceKey(serviceId)) + k.servicesCache.Delete(serviceId) } // GetAllServices returns all services @@ -64,7 +64,7 @@ func (k Keeper) GetAllServices(ctx context.Context) (services []sharedtypes.Serv for ; iterator.Valid(); iterator.Next() { var service sharedtypes.Service k.cdc.MustUnmarshal(iterator.Value(), &service) - k.cache.Services[service.Id] = &service + k.servicesCache.Set(service.Id, service) services = append(services, service) } diff --git a/x/service/types/cache.go b/x/service/types/cache.go deleted file mode 100644 index 0819e9038..000000000 --- a/x/service/types/cache.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -import sharedtypes "github.com/pokt-network/poktroll/x/shared/types" - -type Cache struct { - Params *Params - Services map[string]*sharedtypes.Service - RelayMiningDifficulty map[string]*RelayMiningDifficulty -} - -func (c *Cache) Clear() { - c.Params = nil - clear(c.Services) - clear(c.RelayMiningDifficulty) -} diff --git a/x/session/keeper/block_hash.go b/x/session/keeper/block_hash.go index 974773131..f0ad1e711 100644 --- a/x/session/keeper/block_hash.go +++ b/x/session/keeper/block_hash.go @@ -11,7 +11,7 @@ import ( // GetBlockHash returns the hash of the block at the given height. func (k Keeper) GetBlockHash(ctx context.Context, height int64) []byte { - if hash, found := k.cache.BlockHashes[height]; found { + if hash, found := k.blockHashesCache.Get(height); found { k.logger.Info("-----Blockhash cache hit-----") return hash } @@ -24,10 +24,11 @@ func (k Keeper) GetBlockHash(ctx context.Context, height int64) []byte { storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.BlockHashKeyPrefix)) blockHash := store.Get(types.BlockHashKey(height)) - k.cache.BlockHashes[height] = blockHash + k.blockHashesCache.Set(height, blockHash) return blockHash } func (k Keeper) ClearCache() { - k.cache.Clear() + k.blockHashesCache.Clear() + k.paramsCache.Clear() } diff --git a/x/session/keeper/keeper.go b/x/session/keeper/keeper.go index 7399fcec3..9a2884197 100644 --- a/x/session/keeper/keeper.go +++ b/x/session/keeper/keeper.go @@ -12,6 +12,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/poktroll/x/session/types" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) type ( @@ -30,7 +31,8 @@ type ( supplierKeeper types.SupplierKeeper sharedKeeper types.SharedKeeper - cache *types.Cache + blockHashesCache *sharedtypes.Cache[int64, []byte] + paramsCache *sharedtypes.Cache[string, types.Params] } ) @@ -62,9 +64,8 @@ func NewKeeper( supplierKeeper: supplierKeeper, sharedKeeper: sharedKeeper, - cache: &types.Cache{ - BlockHashes: make(map[int64][]byte), - }, + blockHashesCache: sharedtypes.NewCache[int64, []byte](), + paramsCache: sharedtypes.NewCache[string, types.Params](), } } @@ -90,7 +91,7 @@ func (k Keeper) StoreBlockHash(goCtx context.Context) { // ctx.BlocHeight() is the height of the block being validated. height := ctx.BlockHeight() - k.cache.BlockHashes[height] = hash + k.blockHashesCache.Set(height, hash) storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(goCtx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.BlockHashKeyPrefix)) diff --git a/x/session/keeper/params.go b/x/session/keeper/params.go index f2c3b0a23..5ec9da118 100644 --- a/x/session/keeper/params.go +++ b/x/session/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Session params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,20 +22,19 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } diff --git a/x/session/types/cache.go b/x/session/types/cache.go deleted file mode 100644 index 24c19be58..000000000 --- a/x/session/types/cache.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -type Cache struct { - BlockHashes map[int64][]byte - Params *Params -} - -func (c *Cache) Clear() { - c.Params = nil - clear(c.BlockHashes) -} diff --git a/x/shared/keeper/keeper.go b/x/shared/keeper/keeper.go index 13dbf3a9b..ef5441a7c 100644 --- a/x/shared/keeper/keeper.go +++ b/x/shared/keeper/keeper.go @@ -21,7 +21,7 @@ type ( // should be the x/gov module account. authority string - cache *types.Cache + paramsCache *types.Cache[string, types.Params] } ) @@ -42,7 +42,7 @@ func NewKeeper( authority: authority, logger: logger, - cache: &types.Cache{}, + paramsCache: types.NewCache[string, types.Params](), } } diff --git a/x/shared/keeper/params.go b/x/shared/keeper/params.go index 70e31a121..8233eabc0 100644 --- a/x/shared/keeper/params.go +++ b/x/shared/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Shared params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,24 +22,23 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(bz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) bz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, bz) + k.paramsCache.Set("", params) return nil } func (k Keeper) ClearCache() { - k.cache.Clear() + k.paramsCache.Clear() } diff --git a/x/shared/types/cache.go b/x/shared/types/cache.go index cee8c58b8..e1db4eebc 100644 --- a/x/shared/types/cache.go +++ b/x/shared/types/cache.go @@ -1,9 +1,40 @@ package types -type Cache struct { - Params *Params +import "sync" + +type Cache[K comparable, V any] struct { + store map[K]V + cacheMu *sync.RWMutex +} + +func (c *Cache[K, V]) Get(key K) (V, bool) { + c.cacheMu.RLock() + defer c.cacheMu.RUnlock() + val, ok := c.store[key] + return val, ok +} + +func (c *Cache[K, V]) Set(key K, val V) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + c.store[key] = val +} + +func (c *Cache[K, V]) Delete(key K) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + delete(c.store, key) +} + +func (c *Cache[K, V]) Clear() { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + clear(c.store) } -func (c *Cache) Clear() { - c.Params = nil +func NewCache[K comparable, V any]() *Cache[K, V] { + return &Cache[K, V]{ + store: make(map[K]V), + cacheMu: &sync.RWMutex{}, + } } diff --git a/x/supplier/keeper/keeper.go b/x/supplier/keeper/keeper.go index 073e57664..9825fb41f 100644 --- a/x/supplier/keeper/keeper.go +++ b/x/supplier/keeper/keeper.go @@ -26,7 +26,8 @@ type ( sharedKeeper types.SharedKeeper serviceKeeper types.ServiceKeeper - cache types.Cache + suppliersCache *sharedtypes.Cache[string, sharedtypes.Supplier] + paramsCache *sharedtypes.Cache[string, types.Params] } ) @@ -54,9 +55,8 @@ func NewKeeper( sharedKeeper: sharedKeeper, serviceKeeper: serviceKeeper, - cache: types.Cache{ - Suppliers: make(map[string]*sharedtypes.Supplier), - }, + suppliersCache: sharedtypes.NewCache[string, sharedtypes.Supplier](), + paramsCache: sharedtypes.NewCache[string, types.Params](), } } diff --git a/x/supplier/keeper/params.go b/x/supplier/keeper/params.go index a64416dd7..c6489c2c3 100644 --- a/x/supplier/keeper/params.go +++ b/x/supplier/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Supplier params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -22,19 +22,19 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } diff --git a/x/supplier/keeper/supplier.go b/x/supplier/keeper/supplier.go index 8b9aa8ff6..70ab976ab 100644 --- a/x/supplier/keeper/supplier.go +++ b/x/supplier/keeper/supplier.go @@ -13,13 +13,13 @@ import ( // SetSupplier set a specific supplier in the store from its index func (k Keeper) SetSupplier(ctx context.Context, supplier sharedtypes.Supplier) { - k.cache.Suppliers[supplier.OperatorAddress] = &supplier storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.SupplierKeyOperatorPrefix)) supplierBz := k.cdc.MustMarshal(&supplier) store.Set(types.SupplierOperatorKey( supplier.OperatorAddress, ), supplierBz) + k.suppliersCache.Set(supplier.OperatorAddress, supplier) } // GetSupplier returns a supplier from its index @@ -27,9 +27,9 @@ func (k Keeper) GetSupplier( ctx context.Context, supplierOperatorAddr string, ) (supplier sharedtypes.Supplier, found bool) { - if supplier, found := k.cache.Suppliers[supplierOperatorAddr]; found { + if supplier, found := k.suppliersCache.Get(supplierOperatorAddr); found { k.logger.Info("-----Supplier cache hit-----") - return *supplier, true + return supplier, true } storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) @@ -41,16 +41,16 @@ func (k Keeper) GetSupplier( } k.cdc.MustUnmarshal(supplierBz, &supplier) - k.cache.Suppliers[supplier.OperatorAddress] = &supplier + k.suppliersCache.Set(supplier.OperatorAddress, supplier) return supplier, true } // RemoveSupplier removes a supplier from the store func (k Keeper) RemoveSupplier(ctx context.Context, supplierOperatorAddress string) { - delete(k.cache.Suppliers, supplierOperatorAddress) storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) store := prefix.NewStore(storeAdapter, types.KeyPrefix(types.SupplierKeyOperatorPrefix)) store.Delete(types.SupplierOperatorKey(supplierOperatorAddress)) + k.suppliersCache.Delete(supplierOperatorAddress) } // GetAllSuppliers returns all supplier @@ -64,7 +64,7 @@ func (k Keeper) GetAllSuppliers(ctx context.Context) (suppliers []sharedtypes.Su for ; iterator.Valid(); iterator.Next() { var supplier sharedtypes.Supplier k.cdc.MustUnmarshal(iterator.Value(), &supplier) - k.cache.Suppliers[supplier.OperatorAddress] = &supplier + k.suppliersCache.Set(supplier.OperatorAddress, supplier) suppliers = append(suppliers, supplier) } @@ -72,7 +72,7 @@ func (k Keeper) GetAllSuppliers(ctx context.Context) (suppliers []sharedtypes.Su } func (k Keeper) ClearCache() { - k.cache.Clear() + k.suppliersCache.Clear() } // TODO_OPTIMIZE: Index suppliers by service ID diff --git a/x/supplier/types/cache.go b/x/supplier/types/cache.go deleted file mode 100644 index dcbd40fa2..000000000 --- a/x/supplier/types/cache.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -import sharedtypes "github.com/pokt-network/poktroll/x/shared/types" - -type Cache struct { - Params *Params - Suppliers map[string]*sharedtypes.Supplier -} - -func (c *Cache) Clear() { - c.Params = nil - clear(c.Suppliers) -} diff --git a/x/tokenomics/keeper/keeper.go b/x/tokenomics/keeper/keeper.go index c7ba36789..435caf8af 100644 --- a/x/tokenomics/keeper/keeper.go +++ b/x/tokenomics/keeper/keeper.go @@ -10,6 +10,7 @@ import ( "github.com/pokt-network/poktroll/pkg/client" prooftypes "github.com/pokt-network/poktroll/x/proof/types" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" tlm "github.com/pokt-network/poktroll/x/tokenomics/token_logic_module" "github.com/pokt-network/poktroll/x/tokenomics/types" ) @@ -37,7 +38,7 @@ type Keeper struct { tokenLogicModules []tlm.TokenLogicModule - cache *types.Cache + paramsCache *sharedtypes.Cache[string, types.Params] } func NewKeeper( @@ -84,7 +85,7 @@ func NewKeeper( sharedQuerier: sharedQuerier, tokenLogicModules: tokenLogicModules, - cache: &types.Cache{}, + paramsCache: sharedtypes.NewCache[string, types.Params](), } } diff --git a/x/tokenomics/keeper/params.go b/x/tokenomics/keeper/params.go index ad77b4c8f..968b5fbd6 100644 --- a/x/tokenomics/keeper/params.go +++ b/x/tokenomics/keeper/params.go @@ -10,9 +10,9 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx context.Context) (params types.Params) { - if k.cache.Params != nil { + if params, found := k.paramsCache.Get(""); found { k.logger.Info("-----Tokenomics params cache hit-----") - return *k.cache.Params + return params } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz := store.Get(types.ParamsKey) @@ -21,25 +21,25 @@ func (k Keeper) GetParams(ctx context.Context) (params types.Params) { } k.cdc.MustUnmarshal(paramsBz, ¶ms) - k.cache.Params = ¶ms + k.paramsCache.Set("", params) return params } // SetParams set the params func (k Keeper) SetParams(ctx context.Context, params types.Params) error { - k.cache.Params = ¶ms store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) paramsBz, err := k.cdc.Marshal(¶ms) if err != nil { return err } store.Set(types.ParamsKey, paramsBz) + k.paramsCache.Set("", params) return nil } func (k Keeper) ClearCache() { - k.cache.Clear() + k.paramsCache.Clear() k.applicationKeeper.ClearCache() k.supplierKeeper.ClearCache() k.sharedKeeper.ClearCache() diff --git a/x/tokenomics/types/cache.go b/x/tokenomics/types/cache.go deleted file mode 100644 index cee8c58b8..000000000 --- a/x/tokenomics/types/cache.go +++ /dev/null @@ -1,9 +0,0 @@ -package types - -type Cache struct { - Params *Params -} - -func (c *Cache) Clear() { - c.Params = nil -} From 49e013eb445d4b72e11eab1577871bfb1a52db99 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sat, 25 Jan 2025 20:44:37 +0100 Subject: [PATCH 13/24] increase http request bytes --- config.yml | 4 ++-- load-testing/tests/relays_stress_helpers_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config.yml b/config.yml index cbd3fbe25..76eacf858 100644 --- a/config.yml +++ b/config.yml @@ -111,13 +111,13 @@ validators: rpc: # Controls how large any single RPC request accepted by the CometBFT # server (offchain) can be. - max_body_bytes: "100000000" + max_body_bytes: "10000000000" mempool: # Control how big any single transaction accepted by the CometBFT server # (offchain) can be. # Since multiple messages are bundled into a single transaction, # max_tx_bytes needs to be increased alongside max_txs_bytes as well. - max_tx_bytes: "100000000" + max_tx_bytes: "100000000000" client: chain-id: poktroll diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 48d8c352b..6796002f1 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -1556,14 +1556,14 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn // each sending relayRatePerApp relays per second. relaysPerSec := len(relayBatchInfo.appAccounts) * int(s.relayRatePerApp) // Determine the interval between each relay request. - relayInterval := time.Second / time.Duration(85) + relayInterval := time.Second / time.Duration(20) batchWaitGroup := new(sync.WaitGroup) batchWaitGroup.Add(relaysPerSec * int(blockDurationSec)) now := time.Now() - for i := 0; i < 50000; i++ { + for i := 0; i < 10000; i++ { iterationTime := now.Add(time.Duration(i+1) * relayInterval) batchLimiter.Go(s.ctx, func() { From 80145cce180fffe619c29e934908584ec84b1e40 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sat, 25 Jan 2025 21:57:35 +0100 Subject: [PATCH 14/24] increase and log tx size --- config.yml | 7 +++++-- pkg/client/tx/client.go | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/config.yml b/config.yml index 76eacf858..162746345 100644 --- a/config.yml +++ b/config.yml @@ -96,6 +96,8 @@ validators: # "high" produces a lot of timeseries. # ONLY suitable for small networks such as LocalNet. cardinality-level: high + max-recv-msg-size: "1000000000000" + max-send-msg-size: "1000000000000" config: moniker: "validator1" consensus: @@ -111,13 +113,14 @@ validators: rpc: # Controls how large any single RPC request accepted by the CometBFT # server (offchain) can be. - max_body_bytes: "10000000000" + max_body_bytes: "1000000000000" mempool: # Control how big any single transaction accepted by the CometBFT server # (offchain) can be. # Since multiple messages are bundled into a single transaction, # max_tx_bytes needs to be increased alongside max_txs_bytes as well. - max_tx_bytes: "100000000000" + max_tx_bytes: "10000000000000" + max_txs_bytes: "100000000000000" client: chain-id: poktroll diff --git a/pkg/client/tx/client.go b/pkg/client/tx/client.go index a1f311dfc..f2e1bca36 100644 --- a/pkg/client/tx/client.go +++ b/pkg/client/tx/client.go @@ -290,6 +290,7 @@ func (txnClient *txClient) SignAndBroadcast( return either.SyncErr(err) } + fmt.Printf("----Tx size: %d\n", len(txBz)) txResponse, err := txnClient.txCtx.BroadcastTx(txBz) if err != nil { return either.SyncErr(err) From 345c378f017ad455851b04dc4113758fb9ca6cec Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sun, 26 Jan 2025 00:02:30 +0100 Subject: [PATCH 15/24] change send rate --- load-testing/tests/relays_stress_helpers_test.go | 2 +- pkg/client/tx/client.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 6796002f1..0cb0a7fac 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -1563,7 +1563,7 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn now := time.Now() - for i := 0; i < 10000; i++ { + for i := 0; i < 11000; i++ { iterationTime := now.Add(time.Duration(i+1) * relayInterval) batchLimiter.Go(s.ctx, func() { diff --git a/pkg/client/tx/client.go b/pkg/client/tx/client.go index f2e1bca36..a1f311dfc 100644 --- a/pkg/client/tx/client.go +++ b/pkg/client/tx/client.go @@ -290,7 +290,6 @@ func (txnClient *txClient) SignAndBroadcast( return either.SyncErr(err) } - fmt.Printf("----Tx size: %d\n", len(txBz)) txResponse, err := txnClient.txCtx.BroadcastTx(txBz) if err != nil { return either.SyncErr(err) From ef9c8b739d08f07ff21d78e822e2fe84f1088c5a Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sun, 26 Jan 2025 13:47:01 +0100 Subject: [PATCH 16/24] pipline c&p creation --- pkg/relayer/session/claim.go | 28 ++++++--- pkg/relayer/session/proof.go | 112 ++++++++++++++++++++--------------- 2 files changed, 85 insertions(+), 55 deletions(-) diff --git a/pkg/relayer/session/claim.go b/pkg/relayer/session/claim.go index 4d09fb2a7..2278c9cd8 100644 --- a/pkg/relayer/session/claim.go +++ b/pkg/relayer/session/claim.go @@ -3,7 +3,9 @@ package session import ( "context" "fmt" + "runtime" "slices" + "sync" sdktypes "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/smt" @@ -231,22 +233,34 @@ func (rs *relayerSessionsManager) goCreateClaimRoots( ) { failedClaims := []relayer.SessionTree{} flushedClaims := []relayer.SessionTree{} + wg := sync.WaitGroup{} + sem := make(chan struct{}, runtime.NumCPU()) for _, sessionTree := range sessionTrees { select { case <-ctx.Done(): return default: } - // This session should no longer be updated - if _, err := sessionTree.Flush(); err != nil { - rs.logger.Error().Err(err).Msg("failed to flush session") - failedClaims = append(failedClaims, sessionTree) - continue - } - flushedClaims = append(flushedClaims, sessionTree) + sem <- struct{}{} + wg.Add(1) + + go func(tree relayer.SessionTree) { + defer wg.Done() + defer func() { <-sem }() + // This session should no longer be updated + if _, err := tree.Flush(); err != nil { + rs.logger.Error().Err(err).Msg("failed to flush session") + failedClaims = append(failedClaims, tree) + return + } + + flushedClaims = append(flushedClaims, tree) + }(sessionTree) } + wg.Wait() + failSubmitProofsSessionsCh <- failedClaims claimsFlushedCh <- flushedClaims } diff --git a/pkg/relayer/session/proof.go b/pkg/relayer/session/proof.go index 728615cfb..075019771 100644 --- a/pkg/relayer/session/proof.go +++ b/pkg/relayer/session/proof.go @@ -3,6 +3,8 @@ package session import ( "context" "fmt" + "runtime" + "sync" "github.com/pokt-network/poktroll/pkg/client" "github.com/pokt-network/poktroll/pkg/crypto/protocol" @@ -183,19 +185,24 @@ func (rs *relayerSessionsManager) newMapProveSessionsFn( return either.Error[[]relayer.SessionTree](err), false } - for _, sessionTree := range sessionTrees { - rs.removeFromRelayerSessions(sessionTree) - if err := sessionTree.Delete(); err != nil { - // Do not fail the entire operation if a session tree cannot be deleted - // as this does not affect the C&P lifecycle. - rs.logger.Error().Err(err).Msg("failed to delete session tree") - } - } + go rs.removeSessions(sessionTrees) return either.Success(sessionTrees), false } } +// removeSessions deletes the session trees from the KVStore. +func (rs *relayerSessionsManager) removeSessions(sessionTrees []relayer.SessionTree) { + for _, sessionTree := range sessionTrees { + rs.removeFromRelayerSessions(sessionTree) + if err := sessionTree.Delete(); err != nil { + // Do not fail the entire operation if a session tree cannot be deleted + // as this does not affect the C&P lifecycle. + rs.logger.Error().Err(err).Msg("failed to delete session tree") + } + } +} + // proveClaims generates the proofs corresponding to the given sessionTrees, // then sends the successful and failed proofs to their respective channels. func (rs *relayerSessionsManager) proveClaims( @@ -205,50 +212,59 @@ func (rs *relayerSessionsManager) proveClaims( // should be generated for. proofPathSeedBlock client.Block, ) (successProofs []relayer.SessionTree, failedProofs []relayer.SessionTree) { - logger := rs.logger.With("method", "goProveClaims") + logger := rs.logger.With("method", "proveClaims") - // sessionTreesWithProofRequired will accumulate all the sessionTrees that - // will require a proof to be submitted. - sessionTreesWithProofRequired := make([]relayer.SessionTree, 0) - for _, sessionTree := range sessionTrees { - isProofRequired, err := rs.isProofRequired(ctx, sessionTree, proofPathSeedBlock) - - // If an error is encountered while determining if a proof is required, - // do not create the claim since the proof requirement is unknown. - // WARNING: Creating a claim and not submitting a proof (if necessary) could lead to a stake burn!! - if err != nil { - failedProofs = append(failedProofs, sessionTree) - rs.logger.Error().Err(err).Msg("failed to determine if proof is required, skipping claim creation") - continue - } + proofsMu := sync.Mutex{} + wg := sync.WaitGroup{} + sem := make(chan struct{}, runtime.NumCPU()) - // If a proof is required, add the session to the list of sessions that require a proof. - if isProofRequired { - sessionTreesWithProofRequired = append(sessionTreesWithProofRequired, sessionTree) - } - } + for _, sessionTree := range sessionTrees { + sem <- struct{}{} + wg.Add(1) + go func(tree relayer.SessionTree) { + defer wg.Done() + defer func() { <-sem }() + + isProofRequired, err := rs.isProofRequired(ctx, tree, proofPathSeedBlock) + + // If an error is encountered while determining if a proof is required, + // do not create the claim since the proof requirement is unknown. + // WARNING: Creating a claim and not submitting a proof (if necessary) could lead to a stake burn!! + if err != nil { + proofsMu.Lock() + failedProofs = append(failedProofs, tree) + proofsMu.Unlock() + rs.logger.Error().Err(err).Msg("failed to determine if proof is required, skipping claim creation") + return + } - // Separate the sessionTrees into those that failed to generate a proof - // and those that succeeded, before returning each of them. - for _, sessionTree := range sessionTreesWithProofRequired { - // Generate the proof path for the sessionTree using the previously committed - // sessionPathBlock hash. - path := protocol.GetPathForProof( - proofPathSeedBlock.Hash(), - sessionTree.GetSessionHeader().GetSessionId(), - ) - - // If the proof cannot be generated, add the sessionTree to the failedProofs. - if _, err := sessionTree.ProveClosest(path); err != nil { - logger.Error().Err(err).Msg("failed to generate proof") - - failedProofs = append(failedProofs, sessionTree) - continue - } + // If a proof is required, add the session to the list of sessions that require a proof. + if isProofRequired { + // Generate the proof path for the sessionTree using the previously committed + // sessionPathBlock hash. + path := protocol.GetPathForProof( + proofPathSeedBlock.Hash(), + tree.GetSessionHeader().GetSessionId(), + ) + + // If the proof cannot be generated, add the sessionTree to the failedProofs. + if _, err := tree.ProveClosest(path); err != nil { + proofsMu.Lock() + failedProofs = append(failedProofs, tree) + proofsMu.Unlock() + logger.Error().Err(err).Msg("failed to generate proof") + return + } + + // If the proof was generated successfully, add the sessionTree to the + // successProofs slice that will be sent to the proof submission step. + proofsMu.Lock() + successProofs = append(successProofs, tree) + proofsMu.Unlock() + } + }(sessionTree) - // If the proof was generated successfully, add the sessionTree to the - // successProofs slice that will be sent to the proof submission step. - successProofs = append(successProofs, sessionTree) + wg.Wait() } return successProofs, failedProofs From 21cd1d56c9002fd87bc1d011a12ea203ddade5ab Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Sun, 26 Jan 2025 19:52:36 +0100 Subject: [PATCH 17/24] disable accounts deletion --- .../tests/relays_stress_helpers_test.go | 2 +- load-testing/tests/relays_stress_test.go | 28 +++++++++---------- localnet/poktrolld/config/app.toml | 2 ++ localnet/poktrolld/config/config.toml | 6 ++-- supplier_stake_config.yaml | 10 +++++++ 5 files changed, 30 insertions(+), 18 deletions(-) create mode 100644 supplier_stake_config.yaml diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 0cb0a7fac..6796002f1 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -1563,7 +1563,7 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn now := time.Now() - for i := 0; i < 11000; i++ { + for i := 0; i < 10000; i++ { iterationTime := now.Add(time.Duration(i+1) * relayInterval) batchLimiter.Go(s.ctx, func() { diff --git a/load-testing/tests/relays_stress_test.go b/load-testing/tests/relays_stress_test.go index 3bc24b96c..5255031bf 100644 --- a/load-testing/tests/relays_stress_test.go +++ b/load-testing/tests/relays_stress_test.go @@ -263,26 +263,26 @@ func (s *relaysSuite) LocalnetIsRunning() { // Delete the keyring entries for the application accounts since they are // not persisted across test runs. signals.GoOnExitSignal(func() { - for _, app := range append(s.activeApplications, s.preparedApplications...) { - accAddress := sdk.MustAccAddressFromBech32(app.address) + //for _, app := range append(s.activeApplications, s.preparedApplications...) { + // accAddress := sdk.MustAccAddressFromBech32(app.address) - _ = s.txContext.GetKeyring().DeleteByAddress(accAddress) - } - s.cancelCtx() + // _ = s.txContext.GetKeyring().DeleteByAddress(accAddress) + //} + //s.cancelCtx() }) s.Cleanup(func() { - for _, app := range s.activeApplications { - accAddress := sdk.MustAccAddressFromBech32(app.address) + //for _, app := range s.activeApplications { + // accAddress := sdk.MustAccAddressFromBech32(app.address) - s.txContext.GetKeyring().DeleteByAddress(accAddress) - } - for _, app := range s.preparedApplications { - accAddress, err := sdk.AccAddressFromBech32(app.address) - require.NoError(s, err) + // s.txContext.GetKeyring().DeleteByAddress(accAddress) + //} + //for _, app := range s.preparedApplications { + // accAddress, err := sdk.AccAddressFromBech32(app.address) + // require.NoError(s, err) - s.txContext.GetKeyring().DeleteByAddress(accAddress) - } + // s.txContext.GetKeyring().DeleteByAddress(accAddress) + //} }) // Initialize the provisioned gateway and suppliers address->URL map that will diff --git a/localnet/poktrolld/config/app.toml b/localnet/poktrolld/config/app.toml index d5135735e..60a87da0d 100644 --- a/localnet/poktrolld/config/app.toml +++ b/localnet/poktrolld/config/app.toml @@ -5,6 +5,8 @@ iavl-cache-size = 781250 iavl-disable-fastnode = false index-events = [] inter-block-cache = true +max-recv-msg-size = "1000000000000" +max-send-msg-size = "1000000000000" min-retain-blocks = 0 minimum-gas-prices = "0upokt" pruning = "nothing" diff --git a/localnet/poktrolld/config/config.toml b/localnet/poktrolld/config/config.toml index 35dcda640..378b7529a 100644 --- a/localnet/poktrolld/config/config.toml +++ b/localnet/poktrolld/config/config.toml @@ -174,7 +174,7 @@ timeout_broadcast_tx_commit = "10s" max_request_batch_size = 10 # Maximum size of request body, in bytes -max_body_bytes = 100000000 +max_body_bytes = 1000000000000 # Maximum size of request header, in bytes max_header_bytes = 1048576 @@ -318,7 +318,7 @@ size = 5000 # Limit the total size of all txs in the mempool. # This only accounts for raw transactions (e.g. given 1MB transactions and # max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 +max_txs_bytes = 100000000000000 # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = 10000 @@ -330,7 +330,7 @@ keep-invalid-txs-in-cache = false # Maximum size of a single transaction. # NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 100000000 +max_tx_bytes = 10000000000000 # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). diff --git a/supplier_stake_config.yaml b/supplier_stake_config.yaml new file mode 100644 index 000000000..9cd27c3dd --- /dev/null +++ b/supplier_stake_config.yaml @@ -0,0 +1,10 @@ +owner_address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj +operator_address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj +stake_amount: 1000069upokt +default_rev_share_percent: + pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj: 100 +services: + - service_id: anvil + endpoints: + - publicly_exposed_url: http://relayminer1:8545 + rpc_type: JSON_RPC From abef59ca2a807feef5aba41c849eb75b50f9dc6a Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Mon, 27 Jan 2025 01:16:40 +0100 Subject: [PATCH 18/24] increase send relay rate --- load-testing/tests/relays_stress_helpers_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/load-testing/tests/relays_stress_helpers_test.go b/load-testing/tests/relays_stress_helpers_test.go index 6796002f1..a3a74a21a 100644 --- a/load-testing/tests/relays_stress_helpers_test.go +++ b/load-testing/tests/relays_stress_helpers_test.go @@ -1556,7 +1556,7 @@ func (s *relaysSuite) forEachRelayBatchSendBatch(_ context.Context, relayBatchIn // each sending relayRatePerApp relays per second. relaysPerSec := len(relayBatchInfo.appAccounts) * int(s.relayRatePerApp) // Determine the interval between each relay request. - relayInterval := time.Second / time.Duration(20) + relayInterval := time.Second / time.Duration(25) batchWaitGroup := new(sync.WaitGroup) batchWaitGroup.Add(relaysPerSec * int(blockDurationSec)) From 43a9a3d4a3dfebb57a134458c1ac2b005d621aca Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Wed, 29 Jan 2025 12:57:41 +0100 Subject: [PATCH 19/24] chore: Address reivew change requests --- api/poktroll/proof/event.pulsar.go | 9 +- api/poktroll/proof/types.pulsar.go | 146 ++++++++++--------- proto/poktroll/proof/event.proto | 5 +- proto/poktroll/proof/types.proto | 21 +-- testutil/testtree/tree.go | 2 +- x/proof/keeper/msg_server_submit_proof.go | 23 ++- x/proof/keeper/proof_validation.go | 30 ++-- x/proof/keeper/validate_proofs.go | 82 +++++------ x/proof/module/abci.go | 6 +- x/proof/types/event.pb.go | 9 +- x/proof/types/types.pb.go | 118 +++++++-------- x/tokenomics/keeper/settle_pending_claims.go | 18 +-- 12 files changed, 240 insertions(+), 229 deletions(-) diff --git a/api/poktroll/proof/event.pulsar.go b/api/poktroll/proof/event.pulsar.go index dd7b048a8..fa9a83ea1 100644 --- a/api/poktroll/proof/event.pulsar.go +++ b/api/poktroll/proof/event.pulsar.go @@ -3698,7 +3698,8 @@ func (x *EventProofUpdated) GetClaimedUpokt() *v1beta1.Coin { return nil } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. type EventProofValidityChecked struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3707,7 +3708,9 @@ type EventProofValidityChecked struct { Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` - Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` } func (x *EventProofValidityChecked) Reset() { @@ -3748,7 +3751,7 @@ func (x *EventProofValidityChecked) GetProofStatus() ClaimProofStatus { if x != nil { return x.ProofStatus } - return ClaimProofStatus_NOT_FOUND + return ClaimProofStatus_PENDING_VALIDATION } func (x *EventProofValidityChecked) GetReason() string { diff --git a/api/poktroll/proof/types.pulsar.go b/api/poktroll/proof/types.pulsar.go index 2498a8038..a49834e88 100644 --- a/api/poktroll/proof/types.pulsar.go +++ b/api/poktroll/proof/types.pulsar.go @@ -585,7 +585,7 @@ var ( fd_Claim_supplier_operator_address protoreflect.FieldDescriptor fd_Claim_session_header protoreflect.FieldDescriptor fd_Claim_root_hash protoreflect.FieldDescriptor - fd_Claim_proof_status protoreflect.FieldDescriptor + fd_Claim_proof_validation_status protoreflect.FieldDescriptor ) func init() { @@ -594,7 +594,7 @@ func init() { fd_Claim_supplier_operator_address = md_Claim.Fields().ByName("supplier_operator_address") fd_Claim_session_header = md_Claim.Fields().ByName("session_header") fd_Claim_root_hash = md_Claim.Fields().ByName("root_hash") - fd_Claim_proof_status = md_Claim.Fields().ByName("proof_status") + fd_Claim_proof_validation_status = md_Claim.Fields().ByName("proof_validation_status") } var _ protoreflect.Message = (*fastReflection_Claim)(nil) @@ -680,9 +680,9 @@ func (x *fastReflection_Claim) Range(f func(protoreflect.FieldDescriptor, protor return } } - if x.ProofStatus != 0 { - value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ProofStatus)) - if !f(fd_Claim_proof_status, value) { + if x.ProofValidationStatus != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ProofValidationStatus)) + if !f(fd_Claim_proof_validation_status, value) { return } } @@ -707,8 +707,8 @@ func (x *fastReflection_Claim) Has(fd protoreflect.FieldDescriptor) bool { return x.SessionHeader != nil case "poktroll.proof.Claim.root_hash": return len(x.RootHash) != 0 - case "poktroll.proof.Claim.proof_status": - return x.ProofStatus != 0 + case "poktroll.proof.Claim.proof_validation_status": + return x.ProofValidationStatus != 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -731,8 +731,8 @@ func (x *fastReflection_Claim) Clear(fd protoreflect.FieldDescriptor) { x.SessionHeader = nil case "poktroll.proof.Claim.root_hash": x.RootHash = nil - case "poktroll.proof.Claim.proof_status": - x.ProofStatus = 0 + case "poktroll.proof.Claim.proof_validation_status": + x.ProofValidationStatus = 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -758,8 +758,8 @@ func (x *fastReflection_Claim) Get(descriptor protoreflect.FieldDescriptor) prot case "poktroll.proof.Claim.root_hash": value := x.RootHash return protoreflect.ValueOfBytes(value) - case "poktroll.proof.Claim.proof_status": - value := x.ProofStatus + case "poktroll.proof.Claim.proof_validation_status": + value := x.ProofValidationStatus return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) default: if descriptor.IsExtension() { @@ -787,8 +787,8 @@ func (x *fastReflection_Claim) Set(fd protoreflect.FieldDescriptor, value protor x.SessionHeader = value.Message().Interface().(*session.SessionHeader) case "poktroll.proof.Claim.root_hash": x.RootHash = value.Bytes() - case "poktroll.proof.Claim.proof_status": - x.ProofStatus = (ClaimProofStatus)(value.Enum()) + case "poktroll.proof.Claim.proof_validation_status": + x.ProofValidationStatus = (ClaimProofStatus)(value.Enum()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -818,8 +818,8 @@ func (x *fastReflection_Claim) Mutable(fd protoreflect.FieldDescriptor) protoref panic(fmt.Errorf("field supplier_operator_address of message poktroll.proof.Claim is not mutable")) case "poktroll.proof.Claim.root_hash": panic(fmt.Errorf("field root_hash of message poktroll.proof.Claim is not mutable")) - case "poktroll.proof.Claim.proof_status": - panic(fmt.Errorf("field proof_status of message poktroll.proof.Claim is not mutable")) + case "poktroll.proof.Claim.proof_validation_status": + panic(fmt.Errorf("field proof_validation_status of message poktroll.proof.Claim is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.proof.Claim")) @@ -840,7 +840,7 @@ func (x *fastReflection_Claim) NewField(fd protoreflect.FieldDescriptor) protore return protoreflect.ValueOfMessage(m.ProtoReflect()) case "poktroll.proof.Claim.root_hash": return protoreflect.ValueOfBytes(nil) - case "poktroll.proof.Claim.proof_status": + case "poktroll.proof.Claim.proof_validation_status": return protoreflect.ValueOfEnum(0) default: if fd.IsExtension() { @@ -923,8 +923,8 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { if l > 0 { n += 1 + l + runtime.Sov(uint64(l)) } - if x.ProofStatus != 0 { - n += 1 + runtime.Sov(uint64(x.ProofStatus)) + if x.ProofValidationStatus != 0 { + n += 1 + runtime.Sov(uint64(x.ProofValidationStatus)) } if x.unknownFields != nil { n += len(x.unknownFields) @@ -955,8 +955,8 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } - if x.ProofStatus != 0 { - i = runtime.EncodeVarint(dAtA, i, uint64(x.ProofStatus)) + if x.ProofValidationStatus != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.ProofValidationStatus)) i-- dAtA[i] = 0x20 } @@ -1141,9 +1141,9 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { iNdEx = postIndex case 4: if wireType != 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ProofValidationStatus", wireType) } - x.ProofStatus = 0 + x.ProofValidationStatus = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow @@ -1153,7 +1153,7 @@ func (x *fastReflection_Claim) ProtoMethods() *protoiface.Methods { } b := dAtA[iNdEx] iNdEx++ - x.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + x.ProofValidationStatus |= ClaimProofStatus(b&0x7F) << shift if b < 0x80 { break } @@ -1311,27 +1311,27 @@ func (ClaimProofStage) EnumDescriptor() ([]byte, []int) { return file_poktroll_proof_types_proto_rawDescGZIP(), []int{1} } -// ClaimProofStatus defines the status of the proof for a claim. -// The default value is NOT_FOUND, whether the proof is required or not. +// Status of proof validation for a claim +// Default is PENDING_VALIDATION regardless of proof requirement type ClaimProofStatus int32 const ( - ClaimProofStatus_NOT_FOUND ClaimProofStatus = 0 - ClaimProofStatus_VALID ClaimProofStatus = 1 - ClaimProofStatus_INVALID ClaimProofStatus = 2 + ClaimProofStatus_PENDING_VALIDATION ClaimProofStatus = 0 + ClaimProofStatus_VALIDATED ClaimProofStatus = 1 + ClaimProofStatus_INVALID ClaimProofStatus = 2 ) // Enum value maps for ClaimProofStatus. var ( ClaimProofStatus_name = map[int32]string{ - 0: "NOT_FOUND", - 1: "VALID", + 0: "PENDING_VALIDATION", + 1: "VALIDATED", 2: "INVALID", } ClaimProofStatus_value = map[string]int32{ - "NOT_FOUND": 0, - "VALID": 1, - "INVALID": 2, + "PENDING_VALIDATION": 0, + "VALIDATED": 1, + "INVALID": 2, } ) @@ -1422,14 +1422,14 @@ type Claim struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Address of the supplier's operator that submitted this claim. SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` // the address of the supplier's operator that submitted this claim - // The session header of the session that this claim is for. + // Session header this claim is for. SessionHeader *session.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // Root hash returned from smt.SMST#Root(). + // Root hash from smt.SMST#Root(). RootHash []byte `protobuf:"bytes,3,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - // Claim proof status captures the status of the proof for this claim. - // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath - ProofStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` + // Important: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ProofValidationStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_validation_status,json=proofValidationStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_validation_status,omitempty"` } func (x *Claim) Reset() { @@ -1473,11 +1473,11 @@ func (x *Claim) GetRootHash() []byte { return nil } -func (x *Claim) GetProofStatus() ClaimProofStatus { +func (x *Claim) GetProofValidationStatus() ClaimProofStatus { if x != nil { - return x.ProofStatus + return x.ProofValidationStatus } - return ClaimProofStatus_NOT_FOUND + return ClaimProofStatus_PENDING_VALIDATION } var File_poktroll_proof_types_proto protoreflect.FileDescriptor @@ -1504,7 +1504,7 @@ var file_poktroll_proof_types_proto_rawDesc = []byte{ 0x64, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x87, 0x02, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x9c, 0x02, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x54, 0x0a, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, @@ -1516,35 +1516,37 @@ var file_poktroll_proof_types_proto_rawDesc = []byte{ 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x43, 0x0a, 0x0c, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, - 0x66, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, - 0x4c, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x54, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x50, - 0x52, 0x4f, 0x42, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x53, 0x54, 0x49, 0x43, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x48, 0x52, 0x45, 0x53, 0x48, 0x4f, 0x4c, 0x44, 0x10, 0x02, 0x2a, 0x44, 0x0a, - 0x0f, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x52, 0x4f, 0x56, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x54, - 0x54, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x10, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x42, 0x9e, - 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, - 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, - 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, 0xaa, 0x02, - 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xca, - 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0xe2, 0x02, 0x1a, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, - 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x58, 0x0a, 0x17, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x70, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x15, 0x70, + 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4c, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x0c, 0x4e, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x42, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x53, 0x54, 0x49, + 0x43, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x45, 0x53, 0x48, 0x4f, 0x4c, 0x44, + 0x10, 0x02, 0x2a, 0x44, 0x0a, 0x0f, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x56, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x54, 0x54, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, + 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x46, 0x0a, 0x10, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, + 0x42, 0x9e, 0x01, 0xd8, 0xe2, 0x1e, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, + 0xaa, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0xca, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0xe2, 0x02, 0x1a, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1572,7 +1574,7 @@ var file_poktroll_proof_types_proto_goTypes = []interface{}{ var file_poktroll_proof_types_proto_depIdxs = []int32{ 5, // 0: poktroll.proof.Proof.session_header:type_name -> poktroll.session.SessionHeader 5, // 1: poktroll.proof.Claim.session_header:type_name -> poktroll.session.SessionHeader - 2, // 2: poktroll.proof.Claim.proof_status:type_name -> poktroll.proof.ClaimProofStatus + 2, // 2: poktroll.proof.Claim.proof_validation_status:type_name -> poktroll.proof.ClaimProofStatus 3, // [3:3] is the sub-list for method output_type 3, // [3:3] is the sub-list for method input_type 3, // [3:3] is the sub-list for extension type_name diff --git a/proto/poktroll/proof/event.proto b/proto/poktroll/proof/event.proto index f54231121..271a6c355 100644 --- a/proto/poktroll/proof/event.proto +++ b/proto/poktroll/proof/event.proto @@ -44,10 +44,13 @@ message EventProofUpdated { cosmos.base.v1beta1.Coin claimed_upokt = 6 [(gogoproto.jsontag) = "claimed_upokt"]; } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. message EventProofValidityChecked { poktroll.proof.Proof proof = 1 [(gogoproto.jsontag) = "proof"]; uint64 block_height = 2 [(gogoproto.jsontag) = "block_height"]; poktroll.proof.ClaimProofStatus proof_status = 3 [(gogoproto.jsontag) = "proof_status"]; + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") string reason = 4 [(gogoproto.jsontag) = "reason"]; } diff --git a/proto/poktroll/proof/types.proto b/proto/poktroll/proof/types.proto index e979621b6..c35b08fb8 100644 --- a/proto/poktroll/proof/types.proto +++ b/proto/poktroll/proof/types.proto @@ -24,14 +24,17 @@ message Proof { // Claim is the serialized object stored onchain for claims pending to be proven message Claim { + // Address of the supplier's operator that submitted this claim. string supplier_operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // the address of the supplier's operator that submitted this claim - // The session header of the session that this claim is for. + + // Session header this claim is for. poktroll.session.SessionHeader session_header = 2; - // Root hash returned from smt.SMST#Root(). + + // Root hash from smt.SMST#Root(). bytes root_hash = 3; - // Claim proof status captures the status of the proof for this claim. - // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath - ClaimProofStatus proof_status = 4; + + // Important: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ClaimProofStatus proof_validation_status = 4; } enum ProofRequirementReason { @@ -47,10 +50,10 @@ enum ClaimProofStage { EXPIRED = 3; } -// ClaimProofStatus defines the status of the proof for a claim. -// The default value is NOT_FOUND, whether the proof is required or not. +// Status of proof validation for a claim +// Default is PENDING_VALIDATION regardless of proof requirement enum ClaimProofStatus { - NOT_FOUND = 0; - VALID = 1; + PENDING_VALIDATION = 0; + VALIDATED = 1; INVALID = 2; } \ No newline at end of file diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go index 680a95011..6252926e2 100644 --- a/testutil/testtree/tree.go +++ b/testutil/testtree/tree.go @@ -152,6 +152,6 @@ func NewClaim( SupplierOperatorAddress: supplierOperatorAddr, SessionHeader: sessionHeader, RootHash: rootHash, - ProofStatus: prooftypes.ClaimProofStatus_NOT_FOUND, + ProofValidationStatus: prooftypes.ClaimProofStatus_PENDING_VALIDATION, } } diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index dadd4e977..30030a818 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -19,21 +19,18 @@ import ( sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) -// SubmitProof is the server handler to submit and store a proof onchain. -// A proof that's stored onchain is what leads to rewards (i.e. inflation) -// downstream, making this a critical part of the protocol. +// SubmitProof is the server message handler that stores a valid +// proof onchain, enabling downstream reward distribution. // -// Note that the validation of the proof is done in `EnsureValidProofSignaturesAndClosestPath`. -// However, preliminary checks are done in the handler to prevent sybil or DoS attacks on -// full nodes by submitting malformed proofs. +// IMPORTANT: Full proof validation occurs in EnsureValidProofSignaturesAndClosestPath. +// This handler performs preliminary validation to prevent sybil/DoS attacks. // -// We are playing a balance of security and efficiency here, where enough validation -// is done on proof submission, and exhaustive validation is done during the endblocker. +// There is a security & performance balance and tradeoff between the handler and end blocker: +// - Basic validation on submission (here) +// - Exhaustive validation in endblocker (EnsureValidProofSignaturesAndClosestPath) // -// The entity sending the SubmitProof messages does not necessarily need -// to correspond to the supplier signing the proof. For example, a single entity -// could (theoretically) batch multiple proofs (signed by the corresponding supplier) -// into one transaction to save on transaction fees. +// Note: Proof submitter may differ from supplier signer, allowing batched submissions +// to optimize transaction fees. func (k msgServer) SubmitProof( ctx context.Context, msg *types.MsgSubmitProof, @@ -85,7 +82,7 @@ func (k msgServer) SubmitProof( logger.Error(fmt.Sprintf("failed to ensure well-formed proof: %v", err)) return nil, status.Error(codes.FailedPrecondition, err.Error()) } - logger.Info("checked the proof is well-formed") + logger.Info("ensured the proof is well-formed") // Retrieve the claim associated with the proof. // The claim should ALWAYS exist since the proof validation in EnsureWellFormedProof diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 897e3ed16..e4d53be2e 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -175,7 +175,7 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) e logger.Debug("successfully validated relay mining difficulty") // Retrieve the corresponding claim for the proof submitted - if err := k.validateClaimForProof(ctx, sessionHeader, supplierOperatorAddr); err != nil { + if err := k.validateSessionClaim(ctx, sessionHeader, supplierOperatorAddr); err != nil { return err } logger.Debug("successfully retrieved and validated claim") @@ -331,17 +331,16 @@ func (k Keeper) validateClosestPath( return nil } -// validateClaimForProof ensures that a claim corresponding to the given proof's -// session exists & has a matching supplier operator address and session header. -func (k Keeper) validateClaimForProof( +// validateSessionClaim ensures that the given session header and supplierOperatorAddress +// have a corresponding claim. +func (k Keeper) validateSessionClaim( ctx context.Context, sessionHeader *sessiontypes.SessionHeader, supplierOperatorAddr string, ) error { sessionId := sessionHeader.SessionId - // NB: no need to assert the testSessionId or supplier operator address as it is retrieved - // by respective values of the given proof. I.e., if the claim exists, then these - // values are guaranteed to match. + + // Retrieve the claim corresponding to the session ID and supplier operator address. foundClaim, found := k.GetClaim(ctx, sessionId, supplierOperatorAddr) if !found { return types.ErrProofClaimNotFound.Wrapf( @@ -352,41 +351,40 @@ func (k Keeper) validateClaimForProof( } claimSessionHeader := foundClaim.GetSessionHeader() - proofSessionHeader := sessionHeader // Ensure session start heights match. - if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { + if claimSessionHeader.GetSessionStartBlockHeight() != sessionHeader.GetSessionStartBlockHeight() { return types.ErrProofInvalidSessionStartHeight.Wrapf( "claim session start height %d does not match proof session start height %d", claimSessionHeader.GetSessionStartBlockHeight(), - proofSessionHeader.GetSessionStartBlockHeight(), + sessionHeader.GetSessionStartBlockHeight(), ) } // Ensure session end heights match. - if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { + if claimSessionHeader.GetSessionEndBlockHeight() != sessionHeader.GetSessionEndBlockHeight() { return types.ErrProofInvalidSessionEndHeight.Wrapf( "claim session end height %d does not match proof session end height %d", claimSessionHeader.GetSessionEndBlockHeight(), - proofSessionHeader.GetSessionEndBlockHeight(), + sessionHeader.GetSessionEndBlockHeight(), ) } // Ensure application addresses match. - if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { + if claimSessionHeader.GetApplicationAddress() != sessionHeader.GetApplicationAddress() { return types.ErrProofInvalidAddress.Wrapf( "claim application address %q does not match proof application address %q", claimSessionHeader.GetApplicationAddress(), - proofSessionHeader.GetApplicationAddress(), + sessionHeader.GetApplicationAddress(), ) } // Ensure service IDs match. - if claimSessionHeader.GetServiceId() != proofSessionHeader.GetServiceId() { + if claimSessionHeader.GetServiceId() != sessionHeader.GetServiceId() { return types.ErrProofInvalidService.Wrapf( "claim service ID %q does not match proof service ID %q", claimSessionHeader.GetServiceId(), - proofSessionHeader.GetServiceId(), + sessionHeader.GetServiceId(), ) } diff --git a/x/proof/keeper/validate_proofs.go b/x/proof/keeper/validate_proofs.go index 6ebce5af9..0e085aafe 100644 --- a/x/proof/keeper/validate_proofs.go +++ b/x/proof/keeper/validate_proofs.go @@ -11,9 +11,28 @@ import ( "github.com/pokt-network/poktroll/x/proof/types" ) -// numCPU is the number of CPU cores available on the machine. -// It is initialized in the init function to prevent runtime.NumCPU from being called -// multiple times in the ValidateSubmittedProofs function. +// proofValidationTaskCoordinator is a helper struct to coordinate parallel proof +// validation tasks. +type proofValidationTaskCoordinator struct { + // sem is a semaphore to limit the number of concurrent goroutines. + sem chan struct{} + + // wg is a wait group to wait for all goroutines to finish before returning. + wg *sync.WaitGroup + + // processedProofs is a map of supplier operator addresses to the session IDs + // whose proofs that have been processed. + processedProofs map[string][]string + + // numValidProofs and numInvalidProofs are counters to keep track of proof validation results. + numValidProofs, + numInvalidProofs uint64 + + // coordinatorMu protects the coordinator fields. + coordinatorMu *sync.Mutex +} + +// numCPU caches runtime.NumCPU() to avoid being retrieved on every ValidateSubmittedProofs call. var numCPU int func init() { @@ -21,18 +40,17 @@ func init() { numCPU = runtime.NumCPU() } -// ValidateSubmittedProofs concurrently validates block proofs. -// It marks their corresponding claims as valid or invalid based on the proof validation. -// It removes them from the store once they are processed. +// ValidateSubmittedProofs performs concurrent proof validation, updating claims' +// proof validation states and removing processed proofs from storage. func (k Keeper) ValidateSubmittedProofs(ctx sdk.Context) (numValidProofs, numInvalidProofs uint64, err error) { logger := k.Logger().With("method", "ValidateSubmittedProofs") logger.Info(fmt.Sprintf("Number of CPU cores used for parallel proof validation: %d\n", numCPU)) - // Iterate over proofs using an proofIterator to prevent memory issues from bulk fetching. + // Iterate over proofs using an iterator to prevent OOM issues caused by bulk fetching. proofIterator := k.GetAllProofsIterator(ctx) - coordinator := &proofValidationTaskCoordinator{ + proofValidationCoordinator := &proofValidationTaskCoordinator{ // Parallelize proof validation across CPU cores since they are independent from one another. // Use semaphores to limit concurrent goroutines and prevent memory issues. sem: make(chan struct{}, numCPU), @@ -48,23 +66,23 @@ func (k Keeper) ValidateSubmittedProofs(ctx sdk.Context) (numValidProofs, numInv // Acquire a semaphore to limit the number of goroutines. // This will block if the sem channel is full. - coordinator.sem <- struct{}{} + proofValidationCoordinator.sem <- struct{}{} // Increment the wait group to wait for proof validation to finish. - coordinator.wg.Add(1) + proofValidationCoordinator.wg.Add(1) - go k.validateProof(ctx, proofBz, coordinator) + go k.validateProof(ctx, proofBz, proofValidationCoordinator) } // Wait for all goroutines to finish before returning. - coordinator.wg.Wait() + proofValidationCoordinator.wg.Wait() // Close the proof iterator before deleting the processed proofs. proofIterator.Close() // Delete all the processed proofs from the store since they are no longer needed. logger.Info("removing processed proofs from the store") - for supplierOperatorAddr, processedProofs := range coordinator.processedProofs { + for supplierOperatorAddr, processedProofs := range proofValidationCoordinator.processedProofs { for _, sessionId := range processedProofs { k.RemoveProof(ctx, sessionId, supplierOperatorAddr) logger.Info(fmt.Sprintf( @@ -75,10 +93,10 @@ func (k Keeper) ValidateSubmittedProofs(ctx sdk.Context) (numValidProofs, numInv } } - return coordinator.numValidProofs, coordinator.numInvalidProofs, nil + return proofValidationCoordinator.numValidProofs, proofValidationCoordinator.numInvalidProofs, nil } -// validateProof validates a proof before removing it from the store. +// validateProof validates a proof submitted by a supplier. // It marks the corresponding claim as valid or invalid based on the proof validation. // It is meant to be called concurrently by multiple goroutines to parallelize // proof validation. @@ -101,6 +119,9 @@ func (k Keeper) validateProof( // proofBz is not expected to fail unmarshalling since it is should have // passed EnsureWellFormedProof validation in MsgSubmitProof handler. // Panic if it fails unmarshalling. + // If a failure occurs, it indicates either a bug in the code or data corruption. + // In either case, panicking is an appropriate response since both panics and + // returning an error would halt block production. k.cdc.MustUnmarshal(proofBz, &proof) sessionHeader := proof.GetSessionHeader() @@ -116,8 +137,8 @@ func (k Keeper) validateProof( // Retrieve the corresponding claim for the proof submitted so it can be // used in the proof validation below. - // EnsureWellFormedProof has already validated that the claim referenced by the - // proof exists and has a matching session header. + // EnsureWellFormedProof which is called in MsgSubmitProof handler has already validated + // that the claim referenced by the proof exists and has a matching session header. claim, claimFound := k.GetClaim(ctx, sessionHeader.GetSessionId(), supplierOperatorAddr) if !claimFound { // DEV_NOTE: This should never happen since EnsureWellFormedProof has already checked @@ -128,7 +149,7 @@ func (k Keeper) validateProof( logger.Debug("successfully retrieved claim") // Set the proof status to valid by default. - proofStatus := types.ClaimProofStatus_VALID + proofStatus := types.ClaimProofStatus_VALIDATED // Set the invalidity reason to an empty string by default. invalidProofCause := "" @@ -160,12 +181,12 @@ func (k Keeper) validateProof( coordinator.coordinatorMu.Lock() defer coordinator.coordinatorMu.Unlock() - // Update the claim to reflect its corresponding the proof validation result. + // Update the claim to reflect the validation result of the associated proof. // // It will be used later by the SettlePendingClaims routine to determine whether: // 1. The claim should be settled or not // 2. The corresponding supplier should be slashed or not - claim.ProofStatus = proofStatus + claim.ProofValidationStatus = proofStatus k.UpsertClaim(ctx, claim) // Collect the processed proofs info to delete them after the proofIterator is closed @@ -183,24 +204,3 @@ func (k Keeper) validateProof( coordinator.numValidProofs++ } } - -// proofValidationTaskCoordinator is a helper struct to coordinate parallel proof -// validation tasks. -type proofValidationTaskCoordinator struct { - // sem is a semaphore to limit the number of concurrent goroutines. - sem chan struct{} - - // wg is a wait group to wait for all goroutines to finish before returning. - wg *sync.WaitGroup - - // processedProofs is a map of supplier operator addresses to the session IDs - // of proofs that have been processed. - processedProofs map[string][]string - - // numValidProofs and numInvalidProofs are counters for the number of valid and invalid proofs. - numValidProofs, - numInvalidProofs uint64 - - // coordinatorMu protects the coordinator fields. - coordinatorMu *sync.Mutex -} diff --git a/x/proof/module/abci.go b/x/proof/module/abci.go index 88fc0762d..5c4f02d4b 100644 --- a/x/proof/module/abci.go +++ b/x/proof/module/abci.go @@ -17,7 +17,9 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { logger := k.Logger().With("method", "EndBlocker") - // Iterates through all proofs submitted in this block and removes invalid ones. + // Iterates through all proofs submitted in this block and: + // 1. Updates the proof validation status in the associated claim + // 2. Removes all processed proofs from onchain state numValidProofs, numInvalidProofs, err := k.ValidateSubmittedProofs(ctx) if err != nil { logger.Error(fmt.Sprintf("could not validate submitted proofs due to error %v", err)) @@ -25,7 +27,7 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { } logger.Info(fmt.Sprintf( - "validated %d proofs: %d valid, %d invalid", + "checked %d proofs: %d valid, %d invalid", numValidProofs+numInvalidProofs, numValidProofs, numInvalidProofs, diff --git a/x/proof/types/event.pb.go b/x/proof/types/event.pb.go index d71cea0bf..45873d3b6 100644 --- a/x/proof/types/event.pb.go +++ b/x/proof/types/event.pb.go @@ -330,12 +330,15 @@ func (m *EventProofUpdated) GetClaimedUpokt() *types.Coin { return nil } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. type EventProofValidityChecked struct { Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height"` ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status"` - Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason"` + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason"` } func (m *EventProofValidityChecked) Reset() { *m = EventProofValidityChecked{} } @@ -385,7 +388,7 @@ func (m *EventProofValidityChecked) GetProofStatus() ClaimProofStatus { if m != nil { return m.ProofStatus } - return ClaimProofStatus_NOT_FOUND + return ClaimProofStatus_PENDING_VALIDATION } func (m *EventProofValidityChecked) GetReason() string { diff --git a/x/proof/types/types.pb.go b/x/proof/types/types.pb.go index f96db744a..dc594d9e0 100644 --- a/x/proof/types/types.pb.go +++ b/x/proof/types/types.pb.go @@ -84,26 +84,26 @@ func (ClaimProofStage) EnumDescriptor() ([]byte, []int) { return fileDescriptor_b75ef15dfd4d6998, []int{1} } -// ClaimProofStatus defines the status of the proof for a claim. -// The default value is NOT_FOUND, whether the proof is required or not. +// Status of proof validation for a claim +// Default is PENDING_VALIDATION regardless of proof requirement type ClaimProofStatus int32 const ( - ClaimProofStatus_NOT_FOUND ClaimProofStatus = 0 - ClaimProofStatus_VALID ClaimProofStatus = 1 - ClaimProofStatus_INVALID ClaimProofStatus = 2 + ClaimProofStatus_PENDING_VALIDATION ClaimProofStatus = 0 + ClaimProofStatus_VALIDATED ClaimProofStatus = 1 + ClaimProofStatus_INVALID ClaimProofStatus = 2 ) var ClaimProofStatus_name = map[int32]string{ - 0: "NOT_FOUND", - 1: "VALID", + 0: "PENDING_VALIDATION", + 1: "VALIDATED", 2: "INVALID", } var ClaimProofStatus_value = map[string]int32{ - "NOT_FOUND": 0, - "VALID": 1, - "INVALID": 2, + "PENDING_VALIDATION": 0, + "VALIDATED": 1, + "INVALID": 2, } func (x ClaimProofStatus) String() string { @@ -175,14 +175,14 @@ func (m *Proof) GetClosestMerkleProof() []byte { // Claim is the serialized object stored onchain for claims pending to be proven type Claim struct { + // Address of the supplier's operator that submitted this claim. SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` - // The session header of the session that this claim is for. + // Session header this claim is for. SessionHeader *types.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // Root hash returned from smt.SMST#Root(). + // Root hash from smt.SMST#Root(). RootHash []byte `protobuf:"bytes,3,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - // Claim proof status captures the status of the proof for this claim. - // WARNING: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath - ProofStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` + // Important: This field MUST only be set by proofKeeper#EnsureValidProofSignaturesAndClosestPath + ProofValidationStatus ClaimProofStatus `protobuf:"varint,4,opt,name=proof_validation_status,json=proofValidationStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_validation_status,omitempty"` } func (m *Claim) Reset() { *m = Claim{} } @@ -235,11 +235,11 @@ func (m *Claim) GetRootHash() []byte { return nil } -func (m *Claim) GetProofStatus() ClaimProofStatus { +func (m *Claim) GetProofValidationStatus() ClaimProofStatus { if m != nil { - return m.ProofStatus + return m.ProofValidationStatus } - return ClaimProofStatus_NOT_FOUND + return ClaimProofStatus_PENDING_VALIDATION } func init() { @@ -253,39 +253,41 @@ func init() { func init() { proto.RegisterFile("poktroll/proof/types.proto", fileDescriptor_b75ef15dfd4d6998) } var fileDescriptor_b75ef15dfd4d6998 = []byte{ - // 511 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x93, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xb3, 0x29, 0x2d, 0x64, 0xf3, 0x07, 0xb3, 0x8a, 0x20, 0x0d, 0xc8, 0x44, 0x3d, 0x45, - 0x91, 0xea, 0xa0, 0x72, 0xe2, 0x98, 0xc4, 0xae, 0x62, 0xc9, 0x8d, 0xc3, 0xda, 0xad, 0x10, 0x17, - 0xcb, 0x4d, 0x16, 0xdb, 0x8a, 0xed, 0x35, 0xbb, 0x1b, 0x01, 0x4f, 0xc0, 0x95, 0x87, 0xe1, 0x21, - 0x38, 0x56, 0x5c, 0xe8, 0x11, 0x25, 0x2f, 0x82, 0xbc, 0x76, 0x43, 0xca, 0x13, 0x70, 0xb2, 0x67, - 0x7f, 0x33, 0xdf, 0xcc, 0xb7, 0xda, 0x81, 0xdd, 0x8c, 0xae, 0x04, 0xa3, 0x71, 0x3c, 0xcc, 0x18, - 0xa5, 0x1f, 0x86, 0xe2, 0x4b, 0x46, 0xb8, 0x96, 0x31, 0x2a, 0x28, 0x6a, 0xdd, 0x31, 0x4d, 0xb2, - 0xee, 0xf1, 0x82, 0xf2, 0x84, 0x72, 0x4f, 0xd2, 0x61, 0x11, 0x14, 0xa9, 0xdd, 0x17, 0x3b, 0x19, - 0x4e, 0x38, 0x8f, 0x68, 0xba, 0x2f, 0xd4, 0x6d, 0x07, 0x34, 0xa0, 0x45, 0x55, 0xfe, 0x57, 0x9c, - 0x9e, 0xfc, 0x02, 0xf0, 0x70, 0x9e, 0x0b, 0x23, 0x17, 0x1e, 0xf3, 0x75, 0x96, 0xc5, 0x11, 0x61, - 0x1e, 0xcd, 0x08, 0xf3, 0x05, 0x65, 0x9e, 0xbf, 0x5c, 0x32, 0xc2, 0x79, 0x07, 0xf4, 0x40, 0xbf, - 0x36, 0xee, 0xfc, 0xfc, 0x7e, 0xda, 0x2e, 0x5b, 0x8e, 0x0a, 0xe2, 0x08, 0x16, 0xa5, 0x01, 0x7e, - 0x76, 0x57, 0x6a, 0x97, 0x95, 0x25, 0x46, 0xe7, 0xb0, 0x55, 0x0e, 0xe3, 0x85, 0xc4, 0x5f, 0x12, - 0xd6, 0xa9, 0xf6, 0x40, 0xbf, 0x7e, 0xf6, 0x52, 0xdb, 0xf9, 0x2a, 0xb9, 0xe6, 0x14, 0xdf, 0xa9, - 0x4c, 0xc3, 0x4d, 0xbe, 0x1f, 0xa2, 0x57, 0xb0, 0xbd, 0x88, 0x29, 0x27, 0x5c, 0x78, 0x09, 0x61, - 0xab, 0x98, 0x78, 0xf2, 0x3a, 0x3a, 0x07, 0x3d, 0xd0, 0x6f, 0x60, 0x54, 0xb2, 0x0b, 0x89, 0xa4, - 0x9f, 0x93, 0xaf, 0x55, 0x78, 0x38, 0x89, 0xfd, 0x28, 0xf9, 0xcf, 0x9d, 0x3d, 0x87, 0x35, 0x46, - 0xa9, 0xf0, 0x42, 0x9f, 0x87, 0xa5, 0x9d, 0x47, 0xf9, 0xc1, 0xd4, 0xe7, 0x21, 0x9a, 0xc0, 0x86, - 0xf4, 0xe9, 0x71, 0xe1, 0x8b, 0x35, 0xef, 0x3c, 0xe8, 0x81, 0x7e, 0xeb, 0xac, 0xa7, 0xdd, 0x7f, - 0x14, 0x9a, 0xf4, 0x29, 0x6d, 0x3b, 0x32, 0x0f, 0xd7, 0xb3, 0xbf, 0xc1, 0xc0, 0x82, 0x4f, 0x25, - 0xc3, 0xe4, 0xe3, 0x3a, 0x62, 0x24, 0x21, 0xa9, 0xc0, 0xc4, 0xe7, 0x34, 0x45, 0x0a, 0x6c, 0xcc, - 0x6c, 0xd7, 0xc3, 0xc6, 0xdb, 0x4b, 0x13, 0x1b, 0xba, 0x52, 0x41, 0x4f, 0x60, 0x73, 0x8e, 0xed, - 0xf1, 0x68, 0x6c, 0x5a, 0xa6, 0xe3, 0x9a, 0x13, 0x05, 0xa0, 0x26, 0xac, 0xb9, 0x53, 0x6c, 0x38, - 0x53, 0xdb, 0xd2, 0x95, 0xea, 0x40, 0x87, 0x8f, 0xef, 0xb5, 0x0b, 0x08, 0xaa, 0xc3, 0x87, 0x13, - 0x6b, 0x64, 0x5e, 0x48, 0x05, 0x08, 0x8f, 0xe6, 0xd8, 0xbe, 0x32, 0x66, 0x0a, 0xc8, 0x81, 0x63, - 0xb8, 0xae, 0x65, 0xe8, 0x4a, 0x35, 0x0f, 0x8c, 0x77, 0x73, 0xd9, 0xe7, 0x60, 0xf0, 0x06, 0x2a, - 0xff, 0x0e, 0x9d, 0x37, 0xca, 0xa7, 0x39, 0xb7, 0x2f, 0x67, 0xb9, 0x50, 0x0d, 0x1e, 0x5e, 0x8d, - 0x2c, 0x53, 0x2f, 0x74, 0xcc, 0x59, 0x11, 0x54, 0xc7, 0xd6, 0x8f, 0x8d, 0x0a, 0x6e, 0x36, 0x2a, - 0xb8, 0xdd, 0xa8, 0xe0, 0xf7, 0x46, 0x05, 0xdf, 0xb6, 0x6a, 0xe5, 0x66, 0xab, 0x56, 0x6e, 0xb7, - 0x6a, 0xe5, 0xbd, 0x16, 0x44, 0x22, 0x5c, 0x5f, 0x6b, 0x0b, 0x9a, 0x0c, 0xf3, 0x5b, 0x3a, 0x4d, - 0x89, 0xf8, 0x44, 0xd9, 0x6a, 0xb8, 0x5b, 0x8e, 0xcf, 0xfb, 0x5b, 0x76, 0x7d, 0x24, 0xf7, 0xe0, - 0xf5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x7f, 0xdb, 0x75, 0x84, 0x03, 0x00, 0x00, + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x53, 0x5f, 0x6e, 0xda, 0x4e, + 0x10, 0x66, 0xc9, 0x2f, 0xf9, 0x95, 0x4d, 0xa0, 0xee, 0x8a, 0x26, 0x84, 0x56, 0x2e, 0xca, 0x13, + 0x42, 0x8a, 0xa9, 0xd2, 0x13, 0x00, 0x76, 0x8a, 0x25, 0xc7, 0xa6, 0x6b, 0x17, 0x45, 0x7d, 0xb1, + 0x36, 0xb0, 0x05, 0x0b, 0xe3, 0x75, 0x77, 0x97, 0xfe, 0xb9, 0x45, 0x0f, 0xd0, 0x63, 0xf4, 0x10, + 0x7d, 0x8c, 0xfa, 0xd2, 0x3c, 0x56, 0x70, 0x91, 0xca, 0x6b, 0x07, 0x91, 0x9e, 0xa0, 0x4f, 0xf6, + 0x7c, 0xdf, 0xcc, 0x37, 0xf3, 0x8d, 0x76, 0x60, 0x33, 0x65, 0x0b, 0xc9, 0x59, 0x1c, 0x77, 0x53, + 0xce, 0xd8, 0xfb, 0xae, 0xfc, 0x92, 0x52, 0x61, 0xa4, 0x9c, 0x49, 0x86, 0x6a, 0xf7, 0x9c, 0xa1, + 0xb8, 0xe6, 0xe9, 0x84, 0x89, 0x25, 0x13, 0xa1, 0x62, 0xbb, 0x79, 0x90, 0xa7, 0x36, 0x9f, 0x6f, + 0x65, 0x04, 0x15, 0x22, 0x62, 0xc9, 0xae, 0x50, 0xb3, 0x3e, 0x63, 0x33, 0x96, 0x57, 0x65, 0x7f, + 0x39, 0x7a, 0xf6, 0x0b, 0xc0, 0xfd, 0x51, 0x26, 0x8c, 0x02, 0x78, 0x2a, 0x56, 0x69, 0x1a, 0x47, + 0x94, 0x87, 0x2c, 0xa5, 0x9c, 0x48, 0xc6, 0x43, 0x32, 0x9d, 0x72, 0x2a, 0x44, 0x03, 0xb4, 0x40, + 0xbb, 0xd2, 0x6f, 0xfc, 0xfc, 0x7e, 0x5e, 0x2f, 0x5a, 0xf6, 0x72, 0xc6, 0x97, 0x3c, 0x4a, 0x66, + 0xf8, 0xe4, 0xbe, 0xd4, 0x2b, 0x2a, 0x0b, 0x1a, 0x5d, 0xc2, 0x5a, 0x31, 0x4c, 0x38, 0xa7, 0x64, + 0x4a, 0x79, 0xa3, 0xdc, 0x02, 0xed, 0xc3, 0x8b, 0x17, 0xc6, 0xd6, 0x57, 0xc1, 0x1b, 0x7e, 0xfe, + 0x1d, 0xaa, 0x34, 0x5c, 0x15, 0xbb, 0x21, 0x7a, 0x09, 0xeb, 0x93, 0x98, 0x09, 0x2a, 0x64, 0xb8, + 0xa4, 0x7c, 0x11, 0xd3, 0x50, 0xad, 0xa3, 0xb1, 0xd7, 0x02, 0xed, 0x23, 0x8c, 0x0a, 0xee, 0x4a, + 0x51, 0xca, 0xcf, 0xd9, 0xb7, 0x32, 0xdc, 0x1f, 0xc4, 0x24, 0x5a, 0xfe, 0xe3, 0xce, 0x9e, 0xc1, + 0x0a, 0x67, 0x4c, 0x86, 0x73, 0x22, 0xe6, 0x85, 0x9d, 0x47, 0x19, 0x30, 0x24, 0x62, 0x8e, 0xae, + 0xe1, 0x89, 0xf2, 0x19, 0x7e, 0x24, 0x71, 0x34, 0x25, 0x32, 0xeb, 0x26, 0x24, 0x91, 0x2b, 0xd1, + 0xf8, 0xaf, 0x05, 0xda, 0xb5, 0x8b, 0x96, 0xf1, 0xf0, 0x7d, 0x18, 0xca, 0xb2, 0xda, 0x80, 0xaf, + 0xf2, 0xf0, 0x53, 0x85, 0x8f, 0xb7, 0xf5, 0x39, 0xdc, 0x71, 0xe0, 0xb1, 0xca, 0xc2, 0xf4, 0xc3, + 0x2a, 0xe2, 0x74, 0x49, 0x13, 0x89, 0x29, 0x11, 0x2c, 0x41, 0x1a, 0x3c, 0x72, 0xbd, 0x20, 0xc4, + 0xd6, 0x9b, 0xb7, 0x36, 0xb6, 0x4c, 0xad, 0x84, 0x9e, 0xc0, 0xea, 0x08, 0x7b, 0xfd, 0x5e, 0xdf, + 0x76, 0x6c, 0x3f, 0xb0, 0x07, 0x1a, 0x40, 0x55, 0x58, 0x09, 0x86, 0xd8, 0xf2, 0x87, 0x9e, 0x63, + 0x6a, 0xe5, 0x8e, 0x09, 0x1f, 0x3f, 0x68, 0x3c, 0xa3, 0xe8, 0x10, 0xfe, 0x3f, 0x70, 0x7a, 0xf6, + 0x95, 0x52, 0x80, 0xf0, 0x60, 0x84, 0xbd, 0xb1, 0xe5, 0x6a, 0x20, 0x23, 0x7c, 0x2b, 0x08, 0x1c, + 0xcb, 0xd4, 0xca, 0x59, 0x60, 0x5d, 0x8f, 0x54, 0x9f, 0xbd, 0xce, 0x25, 0xd4, 0xfe, 0x1e, 0x1f, + 0x1d, 0x43, 0x34, 0xb2, 0x5c, 0xd3, 0x76, 0x5f, 0x87, 0xe3, 0x9e, 0x63, 0x9b, 0xbd, 0xc0, 0xf6, + 0x5c, 0xad, 0x94, 0x0d, 0x50, 0xc4, 0x96, 0x99, 0x8b, 0xda, 0xae, 0x02, 0xb4, 0x72, 0xdf, 0xf9, + 0xb1, 0xd6, 0xc1, 0xed, 0x5a, 0x07, 0x77, 0x6b, 0x1d, 0xfc, 0x5e, 0xeb, 0xe0, 0xeb, 0x46, 0x2f, + 0xdd, 0x6e, 0xf4, 0xd2, 0xdd, 0x46, 0x2f, 0xbd, 0x33, 0x66, 0x91, 0x9c, 0xaf, 0x6e, 0x8c, 0x09, + 0x5b, 0x76, 0xb3, 0xe5, 0x9d, 0x27, 0x54, 0x7e, 0x62, 0x7c, 0xd1, 0xdd, 0x9e, 0xcf, 0xe7, 0xdd, + 0x3b, 0xbc, 0x39, 0x50, 0x97, 0xf2, 0xea, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x4b, 0xbc, + 0x6b, 0xa6, 0x03, 0x00, 0x00, } func (m *Proof) Marshal() (dAtA []byte, err error) { @@ -357,8 +359,8 @@ func (m *Claim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ProofStatus != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.ProofStatus)) + if m.ProofValidationStatus != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ProofValidationStatus)) i-- dAtA[i] = 0x20 } @@ -441,8 +443,8 @@ func (m *Claim) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.ProofStatus != 0 { - n += 1 + sovTypes(uint64(m.ProofStatus)) + if m.ProofValidationStatus != 0 { + n += 1 + sovTypes(uint64(m.ProofValidationStatus)) } return n } @@ -738,9 +740,9 @@ func (m *Claim) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProofValidationStatus", wireType) } - m.ProofStatus = 0 + m.ProofValidationStatus = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -750,7 +752,7 @@ func (m *Claim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ProofStatus |= ClaimProofStatus(b&0x7F) << shift + m.ProofValidationStatus |= ClaimProofStatus(b&0x7F) << shift if b < 0x80 { break } diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index c8a8fae25..722d328d6 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -134,27 +134,25 @@ func (k Keeper) SettlePendingClaims(ctx cosmostypes.Context) ( proofIsRequired := proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED if proofIsRequired { - // The tokenomics end blocker, which calls SettlePendingClaims, is ALWAYS executed - // AFTER the proof submission window closes. In contrast, the proof end blocker, - // which handles proof validation, is ALWAYS executed WITHIN the proof submission - // window of the same session number. - // This ensures that proof validation is completed before claims settlement, - // as they occur at different block heights. + // IMPORTANT: Proof validation and claims settlement timing: + // - Proof validation (proof end blocker): Executes WITHIN proof submission window + // - Claims settlement (tokenomics end blocker): Executes AFTER window closes + // This ensures proofs are validated before claims are settled var expirationReason tokenomicstypes.ClaimExpirationReason - switch claim.ProofStatus { + switch claim.ProofValidationStatus { // If the proof is required and not found, the claim is expired. - case prooftypes.ClaimProofStatus_NOT_FOUND: + case prooftypes.ClaimProofStatus_PENDING_VALIDATION: expirationReason = tokenomicstypes.ClaimExpirationReason_PROOF_MISSING // If the proof is required and invalid, the claim is expired. case prooftypes.ClaimProofStatus_INVALID: expirationReason = tokenomicstypes.ClaimExpirationReason_PROOF_INVALID // If the proof is required and valid, the claim is settled. - case prooftypes.ClaimProofStatus_VALID: + case prooftypes.ClaimProofStatus_VALIDATED: expirationReason = tokenomicstypes.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED } - if claim.ProofStatus != prooftypes.ClaimProofStatus_VALID { + if claim.ProofValidationStatus != prooftypes.ClaimProofStatus_VALIDATED { // TODO_BETA(@red-0ne): Slash the supplier in proportion to their stake. // TODO_POST_MAINNET: Consider allowing suppliers to RemoveClaim via a new // message in case it was sent by accident From 307ff89562ac28fd7e382913faf94687b20c5c80 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Wed, 29 Jan 2025 13:24:09 +0100 Subject: [PATCH 20/24] fix: Use fix smt verification concurrency --- pkg/crypto/protocol/proof_path.go | 28 ++++++++++++------------- x/proof/keeper/proof_validation.go | 10 ++++----- x/proof/keeper/proof_validation_test.go | 4 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/pkg/crypto/protocol/proof_path.go b/pkg/crypto/protocol/proof_path.go index 61f7e23ce..1a48b40ee 100644 --- a/pkg/crypto/protocol/proof_path.go +++ b/pkg/crypto/protocol/proof_path.go @@ -6,20 +6,8 @@ import ( "github.com/pokt-network/smt" ) -// SMT specification used for the proof verification. -var ( - newHasher = sha256.New - SmtSpec smt.TrieSpec -) - -func init() { - // Use a spec that does not prehash values in the smst. This returns a nil value - // hasher for the proof verification in order to avoid hashing the value twice. - SmtSpec = smt.NewTrieSpec( - newHasher(), true, - smt.WithValueHasher(nil), - ) -} +// newHasher is the hash function used by the SMT specification. +var newHasher = sha256.New // GetPathForProof computes the path to be used for proof validation by hashing // the block hash and session id. @@ -31,3 +19,15 @@ func GetPathForProof(blockHash []byte, sessionId string) []byte { return hasher.Sum(nil) } + +// NewSMTSpec returns the SMT specification used for the proof verification. +// It uses a new hasher at every call to avoid concurrency issues that could be +// caused by a shared hasher. +func NewSMTSpec() *smt.TrieSpec { + trieSpec := smt.NewTrieSpec( + newHasher(), true, + smt.WithValueHasher(nil), + ) + + return &trieSpec +} diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index e4d53be2e..bf55d2e3c 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -106,14 +106,14 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) e } // SparseCompactMerkeClosestProof does not implement GetValueHash, so we need to decompact it. - sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, protocol.NewSMTSpec()) if err != nil { logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse erkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relayBz := sparseMerkleClosestProof.GetValueHash(protocol.NewSMTSpec()) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) @@ -231,14 +231,14 @@ func (k Keeper) EnsureValidProofSignaturesAndClosestPath( // SparseCompactMerkeClosestProof was intentionally compacted to reduce its onchain state size // so it must be decompacted rather than just retrieving the value via GetValueHash (not implemented). - sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, protocol.NewSMTSpec()) if err != nil { logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse merkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relayBz := sparseMerkleClosestProof.GetValueHash(protocol.NewSMTSpec()) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) @@ -449,7 +449,7 @@ func verifyClosestProof( proof *smt.SparseMerkleClosestProof, claimRootHash []byte, ) error { - valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) + valid, err := smt.VerifyClosestProof(proof, claimRootHash, protocol.NewSMTSpec()) if err != nil { return err } diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 349dcd59c..f2701db84 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -611,10 +611,10 @@ func TestEnsureValidProof_Error(t *testing.T) { err = sparseCompactMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof) require.NoError(t, err) var sparseMerkleClosestProof *smt.SparseMerkleClosestProof - sparseMerkleClosestProof, err = smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + sparseMerkleClosestProof, err = smt.DecompactClosestProof(sparseCompactMerkleClosestProof, protocol.NewSMTSpec()) require.NoError(t, err) - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relayBz := sparseMerkleClosestProof.GetValueHash(protocol.NewSMTSpec()) relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] From ba4df79b9117dec092e59feeb7e03146be4c7afe Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Wed, 29 Jan 2025 13:59:49 +0100 Subject: [PATCH 21/24] Empty commit From 3345bd2901c058c12b38b4c11259c7a358958cc3 Mon Sep 17 00:00:00 2001 From: forcedebug <167591285+forcedebug@users.noreply.github.com> Date: Thu, 30 Jan 2025 01:49:11 +0800 Subject: [PATCH 22/24] chore: fix some function names in comment (#1019) fix some function names in comment --- e2e/tests/node.go | 2 +- e2e/tests/reset_params_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/tests/node.go b/e2e/tests/node.go index 6620c995e..d49970d8c 100644 --- a/e2e/tests/node.go +++ b/e2e/tests/node.go @@ -187,7 +187,7 @@ func (p *pocketdBin) runPocketCmd(args ...string) (*commandResult, error) { return r, err } -// runCurlPostCmd is a helper to run a command using the local pocketd binary with the flags provided +// runCurlCmd is a helper to run a command using the local pocketd binary with the flags provided func (p *pocketdBin) runCurlCmd(rpcBaseURL, service, method, path, appAddr, data string, args ...string) (*commandResult, error) { rpcUrl, err := url.Parse(rpcBaseURL) if err != nil { diff --git a/e2e/tests/reset_params_test.go b/e2e/tests/reset_params_test.go index 404ecd176..3b0c0ab1d 100644 --- a/e2e/tests/reset_params_test.go +++ b/e2e/tests/reset_params_test.go @@ -28,7 +28,7 @@ func (s *suite) resetAllModuleParamsToDefaults() { s.sendAuthzExecTx(s.granteeName, resetTxJSONFile.Name()) } -// allMoudlesMsgUpdateParamsToDefaultsAny returns a slice of Any messages, each corresponding +// allModulesMsgUpdateParamsToDefaultsAny returns a slice of Any messages, each corresponding // to a MsgUpdateParams for a module, populated with the respective default values. func (s *suite) allModulesMsgUpdateParamsToDefaultsAny() []*codectypes.Any { s.Helper() From 77c1cb6b0faa79985cd07d77d1d466475b8cc7f5 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Thu, 30 Jan 2025 00:24:00 +0100 Subject: [PATCH 23/24] revert to a working state with concurrency --- api/poktroll/proof/event.pulsar.go | 7 ++++-- ...est_manifest_localnet_single_supplier.yaml | 2 +- makefiles/tests.mk | 2 +- pkg/client/query/appquerier.go | 4 ++-- pkg/crypto/protocol/proof_path.go | 16 ++++++------- pkg/crypto/rings/client.go | 4 +--- proto/poktroll/proof/event.proto | 5 +++- x/proof/keeper/proof_validation.go | 24 +++++++++++++++---- x/proof/types/account_query_client.go | 20 ++++++---------- x/proof/types/event.pb.go | 7 ++++-- 10 files changed, 52 insertions(+), 39 deletions(-) diff --git a/api/poktroll/proof/event.pulsar.go b/api/poktroll/proof/event.pulsar.go index dd7b048a8..b14844e1c 100644 --- a/api/poktroll/proof/event.pulsar.go +++ b/api/poktroll/proof/event.pulsar.go @@ -3698,7 +3698,8 @@ func (x *EventProofUpdated) GetClaimedUpokt() *v1beta1.Coin { return nil } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. type EventProofValidityChecked struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3707,7 +3708,9 @@ type EventProofValidityChecked struct { Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status,omitempty"` - Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` } func (x *EventProofValidityChecked) Reset() { diff --git a/load-testing/loadtest_manifest_localnet_single_supplier.yaml b/load-testing/loadtest_manifest_localnet_single_supplier.yaml index 2b14858a9..a072b68d7 100644 --- a/load-testing/loadtest_manifest_localnet_single_supplier.yaml +++ b/load-testing/loadtest_manifest_localnet_single_supplier.yaml @@ -44,7 +44,7 @@ gateways: # Gateway 1; http://localhost:10350/r/gateway1/overview - address: pokt15vzxjqklzjtlz7lahe8z2dfe9nm5vxwwmscne4 - exposed_url: http://localhost:3069/v1/ # The gateway url that the user sends relays to (e.g. curl) + exposed_url: https://devnet-red0ne-path-1.poktroll.com/v1/ # The gateway url that the user sends relays to (e.g. curl) ## Gateway 2; http://localhost:10350/r/gateway2/overview #- address: pokt15w3fhfyc0lttv7r585e2ncpf6t2kl9uh8rsnyz diff --git a/makefiles/tests.mk b/makefiles/tests.mk index b1962c845..676889a73 100644 --- a/makefiles/tests.mk +++ b/makefiles/tests.mk @@ -59,7 +59,7 @@ test_load_relays_stress_localnet: test_e2e_env warn_message_local_stress_test ## .PHONY: test_load_relays_stress_localnet_single_supplier test_load_relays_stress_localnet_single_supplier: test_e2e_env warn_message_local_stress_test ## Run the stress test for E2E relays on LocalNet using exclusively one supplier. go test -v -count=1 ./load-testing/tests/... \ - -tags=load,test -run TestSingleSupplierLoadRelays --log-level=debug --timeout=30m \ + -tags=load,test -run TestSingleSupplierLoadRelays --log-level=debug --timeout=50m \ --manifest ./load-testing/loadtest_manifest_localnet_single_supplier.yaml .PHONY: test_verbose diff --git a/pkg/client/query/appquerier.go b/pkg/client/query/appquerier.go index 46b0f8188..459d55ea8 100644 --- a/pkg/client/query/appquerier.go +++ b/pkg/client/query/appquerier.go @@ -24,7 +24,7 @@ type appQuerier struct { blockClient client.BlockClient appCache map[string]*apptypes.Application appParamsCache *apptypes.Params - appCacheMu sync.Mutex + appCacheMu *sync.Mutex } // NewApplicationQuerier returns a new instance of a client.ApplicationQueryClient @@ -33,7 +33,7 @@ type appQuerier struct { // Required dependencies: // - clientCtx func NewApplicationQuerier(ctx context.Context, deps depinject.Config) (client.ApplicationQueryClient, error) { - aq := &appQuerier{} + aq := &appQuerier{appCacheMu: &sync.Mutex{}} if err := depinject.Inject( deps, diff --git a/pkg/crypto/protocol/proof_path.go b/pkg/crypto/protocol/proof_path.go index 61f7e23ce..c74f18184 100644 --- a/pkg/crypto/protocol/proof_path.go +++ b/pkg/crypto/protocol/proof_path.go @@ -2,29 +2,27 @@ package protocol import ( "crypto/sha256" - - "github.com/pokt-network/smt" ) // SMT specification used for the proof verification. var ( - newHasher = sha256.New - SmtSpec smt.TrieSpec + NewHasher = sha256.New + //SmtSpec smt.TrieSpec ) func init() { // Use a spec that does not prehash values in the smst. This returns a nil value // hasher for the proof verification in order to avoid hashing the value twice. - SmtSpec = smt.NewTrieSpec( - newHasher(), true, - smt.WithValueHasher(nil), - ) + //SmtSpec = smt.NewTrieSpec( + // newHasher(), true, + // smt.WithValueHasher(nil), + //) } // GetPathForProof computes the path to be used for proof validation by hashing // the block hash and session id. func GetPathForProof(blockHash []byte, sessionId string) []byte { - hasher := newHasher() + hasher := NewHasher() if _, err := hasher.Write(append(blockHash, []byte(sessionId)...)); err != nil { panic(err) } diff --git a/pkg/crypto/rings/client.go b/pkg/crypto/rings/client.go index 8373be876..87dfcb6cb 100644 --- a/pkg/crypto/rings/client.go +++ b/pkg/crypto/rings/client.go @@ -21,9 +21,7 @@ import ( var _ crypto.RingClient = (*ringClient)(nil) -// ringClient is an implementation of the RingClient interface that uses the -// client.ApplicationQueryClient to get application's delegation information -// needed to construct the ring for signing relay requests. +// ringClient implements the RingClient interface. type ringClient struct { logger polylog.Logger diff --git a/proto/poktroll/proof/event.proto b/proto/poktroll/proof/event.proto index f54231121..271a6c355 100644 --- a/proto/poktroll/proof/event.proto +++ b/proto/poktroll/proof/event.proto @@ -44,10 +44,13 @@ message EventProofUpdated { cosmos.base.v1beta1.Coin claimed_upokt = 6 [(gogoproto.jsontag) = "claimed_upokt"]; } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. message EventProofValidityChecked { poktroll.proof.Proof proof = 1 [(gogoproto.jsontag) = "proof"]; uint64 block_height = 2 [(gogoproto.jsontag) = "block_height"]; poktroll.proof.ClaimProofStatus proof_status = 3 [(gogoproto.jsontag) = "proof_status"]; + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") string reason = 4 [(gogoproto.jsontag) = "reason"]; } diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 897e3ed16..a4fb59113 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -105,15 +105,20 @@ func (k Keeper) EnsureWellFormedProof(ctx context.Context, proof *types.Proof) e return types.ErrProofInvalidProof.Wrapf("failed to unmarshal sparse compact merkle closest proof: %s", err) } + smtSpec := smt.NewTrieSpec( + protocol.NewHasher(), true, + smt.WithValueHasher(nil), + ) + // SparseCompactMerkeClosestProof does not implement GetValueHash, so we need to decompact it. - sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &smtSpec) if err != nil { logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse erkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relayBz := sparseMerkleClosestProof.GetValueHash(&smtSpec) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) @@ -229,16 +234,21 @@ func (k Keeper) EnsureValidProofSignaturesAndClosestPath( return types.ErrProofInvalidProof.Wrapf("failed to unmarshal sparse compact merkle closest proof: %s", err) } + smtSpec := smt.NewTrieSpec( + protocol.NewHasher(), true, + smt.WithValueHasher(nil), + ) + // SparseCompactMerkeClosestProof was intentionally compacted to reduce its onchain state size // so it must be decompacted rather than just retrieving the value via GetValueHash (not implemented). - sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &smtSpec) if err != nil { logger.Error(fmt.Sprintf("failed to decompact sparse merkle closest proof due to error: %v", err)) return types.ErrProofInvalidProof.Wrapf("failed to decompact sparse merkle closest proof: %s", err) } // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relayBz := sparseMerkleClosestProof.GetValueHash(&smtSpec) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { logger.Error(fmt.Sprintf("failed to unmarshal relay due to error: %v", err)) @@ -451,7 +461,11 @@ func verifyClosestProof( proof *smt.SparseMerkleClosestProof, claimRootHash []byte, ) error { - valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) + smtSpec := smt.NewTrieSpec( + protocol.NewHasher(), true, + smt.WithValueHasher(nil), + ) + valid, err := smt.VerifyClosestProof(proof, claimRootHash, &smtSpec) if err != nil { return err } diff --git a/x/proof/types/account_query_client.go b/x/proof/types/account_query_client.go index 2ce4fa58c..61f8faff8 100644 --- a/x/proof/types/account_query_client.go +++ b/x/proof/types/account_query_client.go @@ -3,12 +3,12 @@ package types import ( "context" fmt "fmt" - "sync" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/poktroll/pkg/client" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) var _ client.AccountQueryClient = (*AccountKeeperQueryClient)(nil) @@ -18,8 +18,7 @@ var _ client.AccountQueryClient = (*AccountKeeperQueryClient)(nil) // network requests as in the offchain implementation. type AccountKeeperQueryClient struct { keeper AccountKeeper - accountPubKeyCache map[string]cryptotypes.PubKey - CacheMu *sync.RWMutex + accountPubKeyCache *sharedtypes.Cache[string, cryptotypes.PubKey] } // NewAccountKeeperQueryClient returns a new AccountQueryClient that is backed @@ -30,8 +29,7 @@ type AccountKeeperQueryClient struct { func NewAccountKeeperQueryClient(accountKeeper AccountKeeper) client.AccountQueryClient { return &AccountKeeperQueryClient{ keeper: accountKeeper, - accountPubKeyCache: make(map[string]cryptotypes.PubKey), - CacheMu: &sync.RWMutex{}, + accountPubKeyCache: sharedtypes.NewCache[string, cryptotypes.PubKey](), } } @@ -66,11 +64,9 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( ctx context.Context, address string, ) (cryptotypes.PubKey, error) { - accountQueryClient.CacheMu.RLock() - defer accountQueryClient.CacheMu.RUnlock() - if acc, found := accountQueryClient.accountPubKeyCache[address]; found { + if pubkey, found := accountQueryClient.accountPubKeyCache.Get(address); found { fmt.Println("-----PubKey cache hit-----") - return acc, nil + return pubkey, nil } acc, err := accountQueryClient.GetAccount(ctx, address) @@ -87,13 +83,11 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( return nil, ErrProofPubKeyNotFound } - accountQueryClient.accountPubKeyCache[address] = pubKey + accountQueryClient.accountPubKeyCache.Set(address, pubKey) return pubKey, nil } func (accountQueryClient *AccountKeeperQueryClient) ClearCache() { - accountQueryClient.CacheMu.Lock() - defer accountQueryClient.CacheMu.Unlock() - clear(accountQueryClient.accountPubKeyCache) + accountQueryClient.accountPubKeyCache.Clear() } diff --git a/x/proof/types/event.pb.go b/x/proof/types/event.pb.go index d71cea0bf..b3ddcb191 100644 --- a/x/proof/types/event.pb.go +++ b/x/proof/types/event.pb.go @@ -330,12 +330,15 @@ func (m *EventProofUpdated) GetClaimedUpokt() *types.Coin { return nil } -// Event emitted after a proof has been checked for validity. +// Event emitted after a proof has been checked for validity in the proof module's +// EndBlocker. type EventProofValidityChecked struct { Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height"` ProofStatus ClaimProofStatus `protobuf:"varint,3,opt,name=proof_status,json=proofStatus,proto3,enum=poktroll.proof.ClaimProofStatus" json:"proof_status"` - Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason"` + // reason is the string representation of the error that led to the proof being + // marked as invalid (e.g. "invalid closest merkle proof", "invalid relay request signature") + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason"` } func (m *EventProofValidityChecked) Reset() { *m = EventProofValidityChecked{} } From 1486459a6399086e476c5ce27a1f02d09bee74d8 Mon Sep 17 00:00:00 2001 From: Redouane Lakrache Date: Thu, 30 Jan 2025 04:38:12 +0100 Subject: [PATCH 24/24] better concurrency --- pkg/relayer/session/claim.go | 18 ++++++++++++++++-- pkg/relayer/session/proof.go | 31 +++++++++++++++++++------------ 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/pkg/relayer/session/claim.go b/pkg/relayer/session/claim.go index 2278c9cd8..22f12d6ce 100644 --- a/pkg/relayer/session/claim.go +++ b/pkg/relayer/session/claim.go @@ -233,6 +233,10 @@ func (rs *relayerSessionsManager) goCreateClaimRoots( ) { failedClaims := []relayer.SessionTree{} flushedClaims := []relayer.SessionTree{} + + failedClaimsCh := make(chan relayer.SessionTree, len(sessionTrees)) + flushedClaimsCh := make(chan relayer.SessionTree, len(sessionTrees)) + wg := sync.WaitGroup{} sem := make(chan struct{}, runtime.NumCPU()) for _, sessionTree := range sessionTrees { @@ -251,15 +255,25 @@ func (rs *relayerSessionsManager) goCreateClaimRoots( // This session should no longer be updated if _, err := tree.Flush(); err != nil { rs.logger.Error().Err(err).Msg("failed to flush session") - failedClaims = append(failedClaims, tree) + failedClaimsCh <- tree return } - flushedClaims = append(flushedClaims, tree) + flushedClaimsCh <- tree }(sessionTree) } wg.Wait() + close(failedClaimsCh) + close(flushedClaimsCh) + + for failedClaim := range failedClaimsCh { + failedClaims = append(failedClaims, failedClaim) + } + + for flushedClaim := range flushedClaimsCh { + flushedClaims = append(flushedClaims, flushedClaim) + } failSubmitProofsSessionsCh <- failedClaims claimsFlushedCh <- flushedClaims diff --git a/pkg/relayer/session/proof.go b/pkg/relayer/session/proof.go index 075019771..3e9dfbef4 100644 --- a/pkg/relayer/session/proof.go +++ b/pkg/relayer/session/proof.go @@ -214,10 +214,13 @@ func (rs *relayerSessionsManager) proveClaims( ) (successProofs []relayer.SessionTree, failedProofs []relayer.SessionTree) { logger := rs.logger.With("method", "proveClaims") - proofsMu := sync.Mutex{} - wg := sync.WaitGroup{} + wg := &sync.WaitGroup{} sem := make(chan struct{}, runtime.NumCPU()) + // Create buffered channels for collecting results + successProofsCh := make(chan relayer.SessionTree, len(sessionTrees)) + failedProofsCh := make(chan relayer.SessionTree, len(sessionTrees)) + for _, sessionTree := range sessionTrees { sem <- struct{}{} wg.Add(1) @@ -231,9 +234,7 @@ func (rs *relayerSessionsManager) proveClaims( // do not create the claim since the proof requirement is unknown. // WARNING: Creating a claim and not submitting a proof (if necessary) could lead to a stake burn!! if err != nil { - proofsMu.Lock() - failedProofs = append(failedProofs, tree) - proofsMu.Unlock() + failedProofsCh <- tree rs.logger.Error().Err(err).Msg("failed to determine if proof is required, skipping claim creation") return } @@ -249,22 +250,28 @@ func (rs *relayerSessionsManager) proveClaims( // If the proof cannot be generated, add the sessionTree to the failedProofs. if _, err := tree.ProveClosest(path); err != nil { - proofsMu.Lock() - failedProofs = append(failedProofs, tree) - proofsMu.Unlock() + failedProofsCh <- tree logger.Error().Err(err).Msg("failed to generate proof") return } // If the proof was generated successfully, add the sessionTree to the // successProofs slice that will be sent to the proof submission step. - proofsMu.Lock() - successProofs = append(successProofs, tree) - proofsMu.Unlock() + successProofsCh <- tree } }(sessionTree) + } - wg.Wait() + wg.Wait() + close(successProofsCh) + close(failedProofsCh) + + // Convert channels to slices + for tree := range successProofsCh { + successProofs = append(successProofs, tree) + } + for tree := range failedProofsCh { + failedProofs = append(failedProofs, tree) } return successProofs, failedProofs