diff --git a/api/poktroll/tokenomics/event.pulsar.go b/api/poktroll/tokenomics/event.pulsar.go index 29cb934ef..9d3c567dd 100644 --- a/api/poktroll/tokenomics/event.pulsar.go +++ b/api/poktroll/tokenomics/event.pulsar.go @@ -1033,6 +1033,650 @@ func (x *fastReflection_EventClaimSettled) ProtoMethods() *protoiface.Methods { } } +var ( + md_EventRelayMiningDifficultyUpdated protoreflect.MessageDescriptor + fd_EventRelayMiningDifficultyUpdated_service_id protoreflect.FieldDescriptor + fd_EventRelayMiningDifficultyUpdated_prev_target_hash_hex_encoded protoreflect.FieldDescriptor + fd_EventRelayMiningDifficultyUpdated_new_target_hash_hex_encoded protoreflect.FieldDescriptor + fd_EventRelayMiningDifficultyUpdated_prev_num_relays_ema protoreflect.FieldDescriptor + fd_EventRelayMiningDifficultyUpdated_new_num_relays_ema protoreflect.FieldDescriptor +) + +func init() { + file_poktroll_tokenomics_event_proto_init() + md_EventRelayMiningDifficultyUpdated = File_poktroll_tokenomics_event_proto.Messages().ByName("EventRelayMiningDifficultyUpdated") + fd_EventRelayMiningDifficultyUpdated_service_id = md_EventRelayMiningDifficultyUpdated.Fields().ByName("service_id") + fd_EventRelayMiningDifficultyUpdated_prev_target_hash_hex_encoded = md_EventRelayMiningDifficultyUpdated.Fields().ByName("prev_target_hash_hex_encoded") + fd_EventRelayMiningDifficultyUpdated_new_target_hash_hex_encoded = md_EventRelayMiningDifficultyUpdated.Fields().ByName("new_target_hash_hex_encoded") + fd_EventRelayMiningDifficultyUpdated_prev_num_relays_ema = md_EventRelayMiningDifficultyUpdated.Fields().ByName("prev_num_relays_ema") + fd_EventRelayMiningDifficultyUpdated_new_num_relays_ema = md_EventRelayMiningDifficultyUpdated.Fields().ByName("new_num_relays_ema") +} + +var _ protoreflect.Message = (*fastReflection_EventRelayMiningDifficultyUpdated)(nil) + +type fastReflection_EventRelayMiningDifficultyUpdated EventRelayMiningDifficultyUpdated + +func (x *EventRelayMiningDifficultyUpdated) ProtoReflect() protoreflect.Message { + return (*fastReflection_EventRelayMiningDifficultyUpdated)(x) +} + +func (x *EventRelayMiningDifficultyUpdated) slowProtoReflect() protoreflect.Message { + mi := &file_poktroll_tokenomics_event_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_EventRelayMiningDifficultyUpdated_messageType fastReflection_EventRelayMiningDifficultyUpdated_messageType +var _ protoreflect.MessageType = fastReflection_EventRelayMiningDifficultyUpdated_messageType{} + +type fastReflection_EventRelayMiningDifficultyUpdated_messageType struct{} + +func (x fastReflection_EventRelayMiningDifficultyUpdated_messageType) Zero() protoreflect.Message { + return (*fastReflection_EventRelayMiningDifficultyUpdated)(nil) +} +func (x fastReflection_EventRelayMiningDifficultyUpdated_messageType) New() protoreflect.Message { + return new(fastReflection_EventRelayMiningDifficultyUpdated) +} +func (x fastReflection_EventRelayMiningDifficultyUpdated_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_EventRelayMiningDifficultyUpdated +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Descriptor() protoreflect.MessageDescriptor { + return md_EventRelayMiningDifficultyUpdated +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Type() protoreflect.MessageType { + return _fastReflection_EventRelayMiningDifficultyUpdated_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) New() protoreflect.Message { + return new(fastReflection_EventRelayMiningDifficultyUpdated) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Interface() protoreflect.ProtoMessage { + return (*EventRelayMiningDifficultyUpdated)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.ServiceId != "" { + value := protoreflect.ValueOfString(x.ServiceId) + if !f(fd_EventRelayMiningDifficultyUpdated_service_id, value) { + return + } + } + if x.PrevTargetHashHexEncoded != "" { + value := protoreflect.ValueOfString(x.PrevTargetHashHexEncoded) + if !f(fd_EventRelayMiningDifficultyUpdated_prev_target_hash_hex_encoded, value) { + return + } + } + if x.NewTargetHashHexEncoded != "" { + value := protoreflect.ValueOfString(x.NewTargetHashHexEncoded) + if !f(fd_EventRelayMiningDifficultyUpdated_new_target_hash_hex_encoded, value) { + return + } + } + if x.PrevNumRelaysEma != uint64(0) { + value := protoreflect.ValueOfUint64(x.PrevNumRelaysEma) + if !f(fd_EventRelayMiningDifficultyUpdated_prev_num_relays_ema, value) { + return + } + } + if x.NewNumRelaysEma != uint64(0) { + value := protoreflect.ValueOfUint64(x.NewNumRelaysEma) + if !f(fd_EventRelayMiningDifficultyUpdated_new_num_relays_ema, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + return x.ServiceId != "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + return x.PrevTargetHashHexEncoded != "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + return x.NewTargetHashHexEncoded != "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + return x.PrevNumRelaysEma != uint64(0) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + return x.NewNumRelaysEma != uint64(0) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + x.ServiceId = "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + x.PrevTargetHashHexEncoded = "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + x.NewTargetHashHexEncoded = "" + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + x.PrevNumRelaysEma = uint64(0) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + x.NewNumRelaysEma = uint64(0) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + value := x.ServiceId + return protoreflect.ValueOfString(value) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + value := x.PrevTargetHashHexEncoded + return protoreflect.ValueOfString(value) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + value := x.NewTargetHashHexEncoded + return protoreflect.ValueOfString(value) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + value := x.PrevNumRelaysEma + return protoreflect.ValueOfUint64(value) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + value := x.NewNumRelaysEma + return protoreflect.ValueOfUint64(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + x.ServiceId = value.Interface().(string) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + x.PrevTargetHashHexEncoded = value.Interface().(string) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + x.NewTargetHashHexEncoded = value.Interface().(string) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + x.PrevNumRelaysEma = value.Uint() + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + x.NewNumRelaysEma = value.Uint() + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + panic(fmt.Errorf("field service_id of message poktroll.tokenomics.EventRelayMiningDifficultyUpdated is not mutable")) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + panic(fmt.Errorf("field prev_target_hash_hex_encoded of message poktroll.tokenomics.EventRelayMiningDifficultyUpdated is not mutable")) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + panic(fmt.Errorf("field new_target_hash_hex_encoded of message poktroll.tokenomics.EventRelayMiningDifficultyUpdated is not mutable")) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + panic(fmt.Errorf("field prev_num_relays_ema of message poktroll.tokenomics.EventRelayMiningDifficultyUpdated is not mutable")) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + panic(fmt.Errorf("field new_num_relays_ema of message poktroll.tokenomics.EventRelayMiningDifficultyUpdated is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.service_id": + return protoreflect.ValueOfString("") + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_target_hash_hex_encoded": + return protoreflect.ValueOfString("") + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_target_hash_hex_encoded": + return protoreflect.ValueOfString("") + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.prev_num_relays_ema": + return protoreflect.ValueOfUint64(uint64(0)) + case "poktroll.tokenomics.EventRelayMiningDifficultyUpdated.new_num_relays_ema": + return protoreflect.ValueOfUint64(uint64(0)) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventRelayMiningDifficultyUpdated")) + } + panic(fmt.Errorf("message poktroll.tokenomics.EventRelayMiningDifficultyUpdated does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in poktroll.tokenomics.EventRelayMiningDifficultyUpdated", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_EventRelayMiningDifficultyUpdated) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*EventRelayMiningDifficultyUpdated) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.ServiceId) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.PrevTargetHashHexEncoded) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.NewTargetHashHexEncoded) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.PrevNumRelaysEma != 0 { + n += 1 + runtime.Sov(uint64(x.PrevNumRelaysEma)) + } + if x.NewNumRelaysEma != 0 { + n += 1 + runtime.Sov(uint64(x.NewNumRelaysEma)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*EventRelayMiningDifficultyUpdated) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.NewNumRelaysEma != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.NewNumRelaysEma)) + i-- + dAtA[i] = 0x28 + } + if x.PrevNumRelaysEma != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.PrevNumRelaysEma)) + i-- + dAtA[i] = 0x20 + } + if len(x.NewTargetHashHexEncoded) > 0 { + i -= len(x.NewTargetHashHexEncoded) + copy(dAtA[i:], x.NewTargetHashHexEncoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.NewTargetHashHexEncoded))) + i-- + dAtA[i] = 0x1a + } + if len(x.PrevTargetHashHexEncoded) > 0 { + i -= len(x.PrevTargetHashHexEncoded) + copy(dAtA[i:], x.PrevTargetHashHexEncoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.PrevTargetHashHexEncoded))) + i-- + dAtA[i] = 0x12 + } + if len(x.ServiceId) > 0 { + i -= len(x.ServiceId) + copy(dAtA[i:], x.ServiceId) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.ServiceId))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*EventRelayMiningDifficultyUpdated) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: EventRelayMiningDifficultyUpdated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: EventRelayMiningDifficultyUpdated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.ServiceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PrevTargetHashHexEncoded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.PrevTargetHashHexEncoded = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field NewTargetHashHexEncoded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.NewTargetHashHexEncoded = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field PrevNumRelaysEma", wireType) + } + x.PrevNumRelaysEma = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.PrevNumRelaysEma |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field NewNumRelaysEma", wireType) + } + x.NewNumRelaysEma = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.NewNumRelaysEma |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.0 @@ -1145,6 +1789,75 @@ func (x *EventClaimSettled) GetProofRequired() bool { return false } +// EventRelayMiningDifficultyUpdated is an event emitted whenever the relay mining difficulty is updated +// for a given service. +type EventRelayMiningDifficultyUpdated struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + PrevTargetHashHexEncoded string `protobuf:"bytes,2,opt,name=prev_target_hash_hex_encoded,json=prevTargetHashHexEncoded,proto3" json:"prev_target_hash_hex_encoded,omitempty"` + NewTargetHashHexEncoded string `protobuf:"bytes,3,opt,name=new_target_hash_hex_encoded,json=newTargetHashHexEncoded,proto3" json:"new_target_hash_hex_encoded,omitempty"` + PrevNumRelaysEma uint64 `protobuf:"varint,4,opt,name=prev_num_relays_ema,json=prevNumRelaysEma,proto3" json:"prev_num_relays_ema,omitempty"` + NewNumRelaysEma uint64 `protobuf:"varint,5,opt,name=new_num_relays_ema,json=newNumRelaysEma,proto3" json:"new_num_relays_ema,omitempty"` +} + +func (x *EventRelayMiningDifficultyUpdated) Reset() { + *x = EventRelayMiningDifficultyUpdated{} + if protoimpl.UnsafeEnabled { + mi := &file_poktroll_tokenomics_event_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventRelayMiningDifficultyUpdated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventRelayMiningDifficultyUpdated) ProtoMessage() {} + +// Deprecated: Use EventRelayMiningDifficultyUpdated.ProtoReflect.Descriptor instead. +func (*EventRelayMiningDifficultyUpdated) Descriptor() ([]byte, []int) { + return file_poktroll_tokenomics_event_proto_rawDescGZIP(), []int{2} +} + +func (x *EventRelayMiningDifficultyUpdated) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *EventRelayMiningDifficultyUpdated) GetPrevTargetHashHexEncoded() string { + if x != nil { + return x.PrevTargetHashHexEncoded + } + return "" +} + +func (x *EventRelayMiningDifficultyUpdated) GetNewTargetHashHexEncoded() string { + if x != nil { + return x.NewTargetHashHexEncoded + } + return "" +} + +func (x *EventRelayMiningDifficultyUpdated) GetPrevNumRelaysEma() uint64 { + if x != nil { + return x.PrevNumRelaysEma + } + return 0 +} + +func (x *EventRelayMiningDifficultyUpdated) GetNewNumRelaysEma() uint64 { + if x != nil { + return x.NewNumRelaysEma + } + return 0 +} + var File_poktroll_tokenomics_event_proto protoreflect.FileDescriptor var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ @@ -1168,19 +1881,37 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, - 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, - 0x6d, 0x69, 0x63, 0x73, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x24, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, - 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, - 0x6d, 0x69, 0x63, 0x73, 0xca, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, - 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x63, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x9c, 0x02, 0x0a, 0x21, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x66, + 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x3e, 0x0a, + 0x1c, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x68, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x18, 0x70, 0x72, 0x65, 0x76, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x48, 0x65, 0x78, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x12, 0x3c, 0x0a, + 0x1b, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x68, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x17, 0x6e, 0x65, 0x77, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x48, 0x65, 0x78, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x70, + 0x72, 0x65, 0x76, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x5f, 0x65, + 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x4e, 0x75, + 0x6d, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x45, 0x6d, 0x61, 0x12, 0x2b, 0x0a, 0x12, 0x6e, 0x65, + 0x77, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x5f, 0x65, 0x6d, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x4e, 0x75, 0x6d, 0x52, 0x65, + 0x6c, 0x61, 0x79, 0x73, 0x45, 0x6d, 0x61, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, + 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, + 0x69, 0x63, 0x73, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x24, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, 0x13, + 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, + 0x69, 0x63, 0x73, 0xca, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x63, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1195,15 +1926,16 @@ func file_poktroll_tokenomics_event_proto_rawDescGZIP() []byte { return file_poktroll_tokenomics_event_proto_rawDescData } -var file_poktroll_tokenomics_event_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_poktroll_tokenomics_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_poktroll_tokenomics_event_proto_goTypes = []interface{}{ - (*EventClaimExpired)(nil), // 0: poktroll.tokenomics.EventClaimExpired - (*EventClaimSettled)(nil), // 1: poktroll.tokenomics.EventClaimSettled - (*proof.Claim)(nil), // 2: poktroll.proof.Claim + (*EventClaimExpired)(nil), // 0: poktroll.tokenomics.EventClaimExpired + (*EventClaimSettled)(nil), // 1: poktroll.tokenomics.EventClaimSettled + (*EventRelayMiningDifficultyUpdated)(nil), // 2: poktroll.tokenomics.EventRelayMiningDifficultyUpdated + (*proof.Claim)(nil), // 3: poktroll.proof.Claim } var file_poktroll_tokenomics_event_proto_depIdxs = []int32{ - 2, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim - 2, // 1: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim + 3, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim + 3, // 1: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name @@ -1241,6 +1973,18 @@ func file_poktroll_tokenomics_event_proto_init() { return nil } } + file_poktroll_tokenomics_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventRelayMiningDifficultyUpdated); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1248,7 +1992,7 @@ func file_poktroll_tokenomics_event_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_poktroll_tokenomics_event_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index d5ccf8304..225bf8660 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -16291,9 +16291,9 @@ paths: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as - a reminder that an optional onchain representation - of the service is necessary + TODO_BETA: Name is currently unused but acts as a + reminder that an optional onchain representation of + the service is necessary title: >- ApplicationServiceConfig holds the service configuration the application stakes for @@ -16657,7 +16657,7 @@ paths: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a + TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary session_id: @@ -16762,7 +16762,7 @@ paths: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a + TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary session_id: @@ -16823,8 +16823,53 @@ paths: type: string format: uint64 description: >- - The minimum difficulty in bits for a relay to be included - in a Merkle proof. + min_relay_difficulty_bits is the minimum difficulty in + bits for a relay to + + be included in a Merkle proof. + proof_request_probability: + type: number + format: float + description: >- + proof_request_probability is the probability of a session + requiring a proof + + if it's cost (i.e. compute unit consumption) is below the + ProofRequirementThreshold. + proof_requirement_threshold: + type: string + format: uint64 + description: >- + proof_requirement_threshold is the session cost (i.e. + compute unit consumption) + + threshold which asserts that a session MUST have a + corresponding proof when its cost + + is equal to or above the threshold. This is in contrast to + the this requirement + + being determined probabilistically via + ProofRequestProbability. + + + TODO_MAINNET: Consider renaming this to + `proof_requirement_threshold_compute_units`. + proof_missing_penalty: + description: >- + proof_missing_penalty is the number of tokens (uPOKT) + which should be slashed from a supplier + + when a proof is required (either via + proof_requirement_threshold or proof_missing_penalty) + + but is not provided. + type: object + properties: + denom: + type: string + amount: + type: string description: Params defines the parameters for the module. description: >- MsgUpdateParamResponse defines the response structure for @@ -16879,6 +16924,24 @@ paths: as_bytes: type: string format: byte + as_float: + type: number + format: float + as_coin: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. description: >- MsgUpdateParam is the Msg/UpdateParam request type to update a single param. @@ -16940,250 +17003,59 @@ paths: type: string format: uint64 description: >- - The minimum difficulty in bits for a relay to be included - in a Merkle proof. - description: Params defines the parameters for the module. - description: >- - MsgUpdateParams is the Msg/UpdateParams request type to update all - params at once. - tags: - - Msg - /pokt-network/poktroll/service/params: - get: - summary: Parameters queries the parameters of the module. - operationId: PoktrollServiceQuery_Params - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - add_service_fee: - type: string - format: uint64 - description: |- - The amount of uPOKT required to add a new service. - This will be deducted from the signer's account balance, - and transferred to the pocket network foundation. - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - tags: - - Query - /pokt-network/poktroll/service/service: - get: - operationId: PoktrollServiceQuery_AllServices - responses: - '200': - description: A successful response. - schema: - type: object - properties: - service: - type: array - items: - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - title: >- - Service message to encapsulate unique and semantic - identifiers for a service on the network - pagination: - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the - - corresponding request message has used PageRequest. + min_relay_difficulty_bits is the minimum difficulty in + bits for a relay to - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - parameters: - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. + be included in a Merkle proof. + proof_request_probability: + type: number + format: float + description: >- + proof_request_probability is the probability of a session + requiring a proof - It is less efficient than using key. Only one of offset or key - should + if it's cost (i.e. compute unit consumption) is below the + ProofRequirementThreshold. + proof_requirement_threshold: + type: string + format: uint64 + description: >- + proof_requirement_threshold is the session cost (i.e. + compute unit consumption) - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. + threshold which asserts that a session MUST have a + corresponding proof when its cost - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include + is equal to or above the threshold. This is in contrast to + the this requirement - a count of the total number of items available for pagination in - UIs. + being determined probabilistically via + ProofRequestProbability. - count_total is only respected when offset is used. It is ignored - when key - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. + TODO_MAINNET: Consider renaming this to + `proof_requirement_threshold_compute_units`. + proof_missing_penalty: + description: >- + proof_missing_penalty is the number of tokens (uPOKT) + which should be slashed from a supplier + when a proof is required (either via + proof_requirement_threshold or proof_missing_penalty) - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - /pokt-network/poktroll/service/service/{id}: - get: - summary: Queries a list of Service items. - operationId: PoktrollServiceQuery_Service - responses: - '200': - description: A successful response. - schema: - type: object - properties: - service: - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - title: >- - Service message to encapsulate unique and semantic identifiers - for a service on the network - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - parameters: - - name: id - description: 'TODO: We could support getting services by name.' - in: path - required: true - type: string + but is not provided. + type: object + properties: + denom: + type: string + amount: + type: string + description: Params defines the parameters for the module. + description: >- + MsgUpdateParams is the Msg/UpdateParams request type to update all + params at once. tags: - - Query + - Msg /poktroll.service.Msg/AddService: post: operationId: PoktrollServiceMsg_AddService @@ -17220,7 +17092,7 @@ paths: permissionless. - TODO_IMPLEMENT: Add Champions / Sources once its fully defined. + TODO_BETA: Add Champions / Sources once its fully defined. in: body required: true schema: @@ -17246,9 +17118,9 @@ paths: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary + TODO_BETA: Name is currently unused but acts as a reminder + that an optional onchain representation of the service is + necessary description: >- MsgAddService defines a message for adding a new message to the network. @@ -17258,7 +17130,7 @@ paths: permissionless. - TODO_IMPLEMENT: Add Champions / Sources once its fully defined. + TODO_BETA: Add Champions / Sources once its fully defined. tags: - Msg /poktroll.service.Msg/UpdateParams: @@ -17379,421 +17251,9 @@ paths: description: MsgUpdateParams is the Msg/UpdateParams request type. tags: - Msg - /pokt-network/poktroll/session/get_session: - get: - summary: Queries the session given app_address, service and block_height. - operationId: PoktrollSessionQuery_GetSession - responses: - '200': - description: A successful response. - schema: - type: object - properties: - session: - type: object - properties: - header: - title: The header of the session containing lightweight data - type: object - properties: - application_address: - type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - service: - title: The service this session is for - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session - for a certain service but with some additional - configs that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name for the - service - title: >- - TODO_TECHDEBT: Name is currently unused but acts - as a reminder that an optional onchain - representation of the service is necessary - session_id: - type: string - description: A unique pseudoranom ID for this session - title: >- - NOTE: session_id can be derived from the above values - using on-chain but is included in the header for - convenience - session_start_block_height: - type: string - format: int64 - title: The height at which this session started - session_end_block_height: - type: string - format: int64 - description: >- - Note that`session_end_block_height` is a derivative of - (`start` + `num_blocks_per_session`) - - as goverened by on-chain params at the time of the - session start. - - It is stored as an additional field to simplofy - business logic in case - - the number of blocks_per_session changes during the - session. - - - The height at which this session ended, this is the - last block of the session - description: >- - SessionHeader is a lightweight header for a session that - can be passed around. - - It is the minimal amount of data required to hydrate & - retrieve all data relevant to the session. - session_id: - type: string - title: A unique pseudoranom ID for this session - session_number: - type: string - format: int64 - title: The session number since genesis - num_blocks_per_session: - type: string - format: int64 - title: The number of blocks per session when this session started - application: - title: A fully hydrated application object this session is for - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the application has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. - - - NOTE: The amount field is an Int which implements the - custom method - - signatures required by gogoproto. - service_configs: - type: array - items: - type: object - properties: - service: - title: >- - The Service for which the application is - configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a - session for a certain service but with some - additional configs that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name for - the service - title: >- - TODO_TECHDEBT: Name is currently unused but - acts as a reminder that an optional onchain - representation of the service is necessary - title: >- - ApplicationServiceConfig holds the service - configuration the application stakes for - title: >- - The list of services this appliccation is configured - to request service for - delegatee_gateway_addresses: - type: array - items: - type: string - description: >- - TODO_TECHDEBT: Rename `delegatee_gateway_addresses` to - `gateway_addresses_delegated_to`. - - Ensure to rename all relevant configs, comments, - variables, function names, etc as well. - - - The Bech32 encoded addresses for all delegatee - Gateways, in a non-nullable slice - pending_undelegations: - type: object - additionalProperties: - type: object - properties: - gateway_addresses: - type: array - items: - type: string - description: >- - UndelegatingGatewayList is used as the Value of - `pending_undelegations`. - - It is required to store a repeated list of strings - as a map value. - description: >- - A map from sessionEndHeights to a list of Gateways. - - The key is the height of the last block of the session - during which the - - respective undelegation was committed. - - The value is a list of gateways being undelegated - from. - - TODO_DOCUMENT(@red-0ne): Need to document the flow - from this comment - - so its clear to everyone why this is necessary; - https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. - suppliers: - type: array - items: - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the supplier using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the supplier has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. - - - NOTE: The amount field is an Int which implements - the custom method - - signatures required by gogoproto. - services: - type: array - items: - type: object - properties: - service: - title: >- - The Service for which the supplier is - configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a - session for a certain service but with - some additional configs that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name - for the service - title: >- - TODO_TECHDEBT: Name is currently unused - but acts as a reminder that an optional - onchain representation of the service is - necessary - endpoints: - type: array - items: - type: object - properties: - url: - type: string - title: URL of the endpoint - rpc_type: - title: Type of RPC exposed on the url above - type: string - enum: - - UNKNOWN_RPC - - GRPC - - WEBSOCKET - - JSON_RPC - - REST - default: UNKNOWN_RPC - description: |- - - UNKNOWN_RPC: Undefined RPC type - - GRPC: gRPC - - WEBSOCKET: WebSocket - - JSON_RPC: JSON-RPC - - REST: REST - configs: - type: array - items: - type: object - properties: - key: - title: Config option key - type: string - enum: - - UNKNOWN_CONFIG - - TIMEOUT - default: UNKNOWN_CONFIG - description: >- - Enum to define configuration options - - TODO_RESEARCH: Should these be configs, - SLAs or something else? There will be - more discussion once we get closer to - implementing on-chain QoS. - - - UNKNOWN_CONFIG: Undefined config option - - TIMEOUT: Timeout setting - value: - type: string - title: Config option value - title: >- - Key-value wrapper for config options, as - proto maps can't be keyed by enums - title: >- - Additional configuration options for the - endpoint - title: >- - SupplierEndpoint message to hold service - configuration details - title: List of endpoints for the service - title: >- - SupplierServiceConfig holds the service - configuration the supplier stakes for - title: The service configs this supplier can support - description: >- - Supplier is the type defining the actor in Pocket - Network that provides RPC services. - title: >- - A fully hydrated set of servicers that are serving the - application - description: >- - Session is a fully hydrated session object that contains all - the information for the Session - - and its parcipants. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - parameters: - - name: application_address - description: >- - The Bech32 address of the application using cosmos' ScalarDescriptor - to ensure deterministic encoding - in: query - required: false - type: string - - name: service.id - description: >- - For example, what if we want to request a session for a certain - service but with some additional configs that identify it? - - - Unique identifier for the service - in: query - required: false - type: string - - name: service.name - description: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder that - an optional onchain representation of the service is necessary - - - (Optional) Semantic human readable name for the service - in: query - required: false - type: string - - name: block_height - description: The block height to query the session for - in: query - required: false - type: string - format: int64 - tags: - - Query - /pokt-network/poktroll/session/params: - get: - summary: Parameters queries the parameters of the module. - operationId: PoktrollSessionQuery_Params - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - tags: - - Query - /poktroll.shared.Msg/UpdateParam: - post: - operationId: PoktrollSharedMsg_UpdateParam + /poktroll.shared.Msg/UpdateParam: + post: + operationId: PoktrollSharedMsg_UpdateParam responses: '200': description: A successful response. @@ -17825,6 +17285,22 @@ paths: after the claim window open height, at which the claim window closes. + proof_window_open_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_open_offset_blocks is the number of blocks + after the claim window + + close height, at which the proof window opens. + proof_window_close_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_close_offset_blocks is the number of blocks + after the proof window + + open height, at which the proof window closes. description: Params defines the parameters for the module. description: >- MsgUpdateParamResponse defines the response structure for @@ -17951,69 +17427,26 @@ paths: after the claim window open height, at which the claim window closes. - description: Params defines the parameters for the module. - description: MsgUpdateParams is the Msg/UpdateParams request type. - tags: - - Msg - /pokt-network/poktroll/shared/params: - get: - summary: Parameters queries the parameters of the module. - operationId: PoktrollSharedQuery_Params - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - num_blocks_per_session: - type: string - format: uint64 - description: >- - num_blocks_per_session is the number of blocks between the - session start & end heights. - claim_window_open_offset_blocks: + proof_window_open_offset_blocks: type: string format: uint64 description: >- - claim_window_open_offset_blocks is the number of blocks - after the session grace + proof_window_open_offset_blocks is the number of blocks + after the claim window - period height, at which the claim window opens. - claim_window_close_offset_blocks: + close height, at which the proof window opens. + proof_window_close_offset_blocks: type: string format: uint64 description: >- - claim_window_close_offset_blocks is the number of blocks - after the claim window + proof_window_close_offset_blocks is the number of blocks + after the proof window - open height, at which the claim window closes. - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} + open height, at which the proof window closes. + description: Params defines the parameters for the module. + description: MsgUpdateParams is the Msg/UpdateParams request type. tags: - - Query + - Msg /poktroll.supplier.Msg/StakeSupplier: post: operationId: PoktrollSupplierMsg_StakeSupplier @@ -18094,9 +17527,9 @@ paths: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as - a reminder that an optional onchain representation - of the service is necessary + TODO_BETA: Name is currently unused but acts as a + reminder that an optional onchain representation of + the service is necessary endpoints: type: array items: @@ -25372,9 +24805,9 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary + TODO_BETA: Name is currently unused but acts as a reminder + that an optional onchain representation of the service is + necessary title: >- ApplicationServiceConfig holds the service configuration the application stakes for @@ -25453,9 +24886,8 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary + TODO_BETA: Name is currently unused but acts as a reminder that an + optional onchain representation of the service is necessary title: >- ApplicationServiceConfig holds the service configuration the application stakes for @@ -25472,7 +24904,7 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder that an + TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary title: >- Service message to encapsulate unique and semantic identifiers for a @@ -25558,7 +24990,7 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder + TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary session_id: @@ -25628,7 +25060,7 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder + TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary session_id: @@ -25693,11 +25125,26 @@ definitions: as_bytes: type: string format: byte - description: >- - MsgUpdateParam is the Msg/UpdateParam request type to update a single - param. - poktroll.proof.MsgUpdateParamResponse: - type: object + as_float: + type: number + format: float + as_coin: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: >- + MsgUpdateParam is the Msg/UpdateParam request type to update a single + param. + poktroll.proof.MsgUpdateParamResponse: + type: object properties: params: type: object @@ -25706,8 +25153,52 @@ definitions: type: string format: uint64 description: >- - The minimum difficulty in bits for a relay to be included in a - Merkle proof. + min_relay_difficulty_bits is the minimum difficulty in bits for a + relay to + + be included in a Merkle proof. + proof_request_probability: + type: number + format: float + description: >- + proof_request_probability is the probability of a session + requiring a proof + + if it's cost (i.e. compute unit consumption) is below the + ProofRequirementThreshold. + proof_requirement_threshold: + type: string + format: uint64 + description: >- + proof_requirement_threshold is the session cost (i.e. compute unit + consumption) + + threshold which asserts that a session MUST have a corresponding + proof when its cost + + is equal to or above the threshold. This is in contrast to the + this requirement + + being determined probabilistically via ProofRequestProbability. + + + TODO_MAINNET: Consider renaming this to + `proof_requirement_threshold_compute_units`. + proof_missing_penalty: + description: >- + proof_missing_penalty is the number of tokens (uPOKT) which should + be slashed from a supplier + + when a proof is required (either via proof_requirement_threshold + or proof_missing_penalty) + + but is not provided. + type: object + properties: + denom: + type: string + amount: + type: string description: Params defines the parameters for the module. description: |- MsgUpdateParamResponse defines the response structure for executing a @@ -25727,8 +25218,52 @@ definitions: type: string format: uint64 description: >- - The minimum difficulty in bits for a relay to be included in a - Merkle proof. + min_relay_difficulty_bits is the minimum difficulty in bits for a + relay to + + be included in a Merkle proof. + proof_request_probability: + type: number + format: float + description: >- + proof_request_probability is the probability of a session + requiring a proof + + if it's cost (i.e. compute unit consumption) is below the + ProofRequirementThreshold. + proof_requirement_threshold: + type: string + format: uint64 + description: >- + proof_requirement_threshold is the session cost (i.e. compute unit + consumption) + + threshold which asserts that a session MUST have a corresponding + proof when its cost + + is equal to or above the threshold. This is in contrast to the + this requirement + + being determined probabilistically via ProofRequestProbability. + + + TODO_MAINNET: Consider renaming this to + `proof_requirement_threshold_compute_units`. + proof_missing_penalty: + description: >- + proof_missing_penalty is the number of tokens (uPOKT) which should + be slashed from a supplier + + when a proof is required (either via proof_requirement_threshold + or proof_missing_penalty) + + but is not provided. + type: object + properties: + denom: + type: string + amount: + type: string description: Params defines the parameters for the module. description: >- MsgUpdateParams is the Msg/UpdateParams request type to update all params @@ -25745,8 +25280,52 @@ definitions: type: string format: uint64 description: >- - The minimum difficulty in bits for a relay to be included in a Merkle - proof. + min_relay_difficulty_bits is the minimum difficulty in bits for a + relay to + + be included in a Merkle proof. + proof_request_probability: + type: number + format: float + description: >- + proof_request_probability is the probability of a session requiring a + proof + + if it's cost (i.e. compute unit consumption) is below the + ProofRequirementThreshold. + proof_requirement_threshold: + type: string + format: uint64 + description: >- + proof_requirement_threshold is the session cost (i.e. compute unit + consumption) + + threshold which asserts that a session MUST have a corresponding proof + when its cost + + is equal to or above the threshold. This is in contrast to the this + requirement + + being determined probabilistically via ProofRequestProbability. + + + TODO_MAINNET: Consider renaming this to + `proof_requirement_threshold_compute_units`. + proof_missing_penalty: + description: >- + proof_missing_penalty is the number of tokens (uPOKT) which should be + slashed from a supplier + + when a proof is required (either via proof_requirement_threshold or + proof_missing_penalty) + + but is not provided. + type: object + properties: + denom: + type: string + amount: + type: string description: Params defines the parameters for the module. poktroll.session.SessionHeader: type: object @@ -25770,9 +25349,8 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary + TODO_BETA: Name is currently unused but acts as a reminder that an + optional onchain representation of the service is necessary session_id: type: string description: A unique pseudoranom ID for this session @@ -25805,181 +25383,6 @@ definitions: It is the minimal amount of data required to hydrate & retrieve all data relevant to the session. - cosmos.base.query.v1beta1.PageRequest: - type: object - properties: - key: - type: string - format: byte - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - offset: - type: string - format: uint64 - description: |- - offset is a numeric offset that can be used when key is unavailable. - It is less efficient than using key. Only one of offset or key should - be set. - limit: - type: string - format: uint64 - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - count_total: - type: boolean - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in UIs. - - count_total is only respected when offset is used. It is ignored when - key - - is set. - reverse: - type: boolean - description: >- - reverse is set to true if results are to be returned in the descending - order. - - - Since: cosmos-sdk 0.43 - description: |- - message SomeRequest { - Foo some_parameter = 1; - PageRequest pagination = 2; - } - title: |- - PageRequest is to be embedded in gRPC request messages for efficient - pagination. Ex: - cosmos.base.query.v1beta1.PageResponse: - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: |- - total is total number of results available if PageRequest.count_total - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - poktroll.service.Params: - type: object - properties: - add_service_fee: - type: string - format: uint64 - description: |- - The amount of uPOKT required to add a new service. - This will be deducted from the signer's account balance, - and transferred to the pocket network foundation. - description: Params defines the parameters for the module. - poktroll.service.QueryAllServicesResponse: - type: object - properties: - service: - type: array - items: - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a certain - service but with some additional configs that identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary - title: >- - Service message to encapsulate unique and semantic identifiers for a - service on the network - pagination: - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - poktroll.service.QueryGetServiceResponse: - type: object - properties: - service: - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a certain - service but with some additional configs that identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary - title: >- - Service message to encapsulate unique and semantic identifiers for a - service on the network - poktroll.service.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - add_service_fee: - type: string - format: uint64 - description: |- - The amount of uPOKT required to add a new service. - This will be deducted from the signer's account balance, - and transferred to the pocket network foundation. - description: QueryParamsResponse is response type for the Query/Params RPC method. poktroll.service.MsgAddService: type: object properties: @@ -26002,14 +25405,13 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary + TODO_BETA: Name is currently unused but acts as a reminder that an + optional onchain representation of the service is necessary description: |- MsgAddService defines a message for adding a new message to the network. Services can be added by any actor in the network making them truly permissionless. - TODO_IMPLEMENT: Add Champions / Sources once its fully defined. + TODO_BETA: Add Champions / Sources once its fully defined. poktroll.service.MsgAddServiceResponse: type: object poktroll.service.MsgUpdateParams: @@ -26039,6 +25441,17 @@ definitions: description: |- MsgUpdateParamsResponse defines the response structure for executing a MsgUpdateParams message. + poktroll.service.Params: + type: object + properties: + add_service_fee: + type: string + format: uint64 + description: |- + The amount of uPOKT required to add a new service. + This will be deducted from the signer's account balance, + and transferred to the pocket network foundation. + description: Params defines the parameters for the module. poktroll.session.MsgUpdateParams: type: object properties: @@ -26061,712 +25474,173 @@ definitions: poktroll.session.Params: type: object description: Params defines the parameters for the module. - poktroll.application.Application: + poktroll.shared.MsgUpdateParam: type: object properties: - address: + authority: type: string - title: >- - The Bech32 address of the application using cosmos' ScalarDescriptor - to ensure deterministic encoding - stake: - title: The total amount of uPOKT the application has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - service_configs: - type: array - items: - type: object - properties: - service: - title: The Service for which the application is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - title: >- - ApplicationServiceConfig holds the service configuration the - application stakes for - title: >- - The list of services this appliccation is configured to request - service for - delegatee_gateway_addresses: - type: array - items: - type: string - description: >- - TODO_TECHDEBT: Rename `delegatee_gateway_addresses` to - `gateway_addresses_delegated_to`. - - Ensure to rename all relevant configs, comments, variables, function - names, etc as well. - - - The Bech32 encoded addresses for all delegatee Gateways, in a - non-nullable slice - pending_undelegations: - type: object - additionalProperties: - type: object - properties: - gateway_addresses: - type: array - items: - type: string - description: >- - UndelegatingGatewayList is used as the Value of - `pending_undelegations`. - - It is required to store a repeated list of strings as a map value. - description: >- - A map from sessionEndHeights to a list of Gateways. - - The key is the height of the last block of the session during which - the - - respective undelegation was committed. - - The value is a list of gateways being undelegated from. - - TODO_DOCUMENT(@red-0ne): Need to document the flow from this comment - - so its clear to everyone why this is necessary; - https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. - title: >- - Application defines the type used to store an on-chain definition and - state for an application - poktroll.application.UndelegatingGatewayList: - type: object - properties: - gateway_addresses: - type: array - items: - type: string - description: |- - UndelegatingGatewayList is used as the Value of `pending_undelegations`. - It is required to store a repeated list of strings as a map value. - poktroll.session.QueryGetSessionResponse: - type: object - properties: - session: - type: object - properties: - header: - title: The header of the session containing lightweight data - type: object - properties: - application_address: - type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - service: - title: The service this session is for - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - session_id: - type: string - description: A unique pseudoranom ID for this session - title: >- - NOTE: session_id can be derived from the above values using - on-chain but is included in the header for convenience - session_start_block_height: - type: string - format: int64 - title: The height at which this session started - session_end_block_height: - type: string - format: int64 - description: >- - Note that`session_end_block_height` is a derivative of - (`start` + `num_blocks_per_session`) - - as goverened by on-chain params at the time of the session - start. - - It is stored as an additional field to simplofy business logic - in case - - the number of blocks_per_session changes during the session. - - - The height at which this session ended, this is the last block - of the session - description: >- - SessionHeader is a lightweight header for a session that can be - passed around. - - It is the minimal amount of data required to hydrate & retrieve - all data relevant to the session. - session_id: - type: string - title: A unique pseudoranom ID for this session - session_number: - type: string - format: int64 - title: The session number since genesis - num_blocks_per_session: - type: string - format: int64 - title: The number of blocks per session when this session started - application: - title: A fully hydrated application object this session is for - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the application has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - service_configs: - type: array - items: - type: object - properties: - service: - title: The Service for which the application is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session - for a certain service but with some additional - configs that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name for the - service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as - a reminder that an optional onchain representation - of the service is necessary - title: >- - ApplicationServiceConfig holds the service configuration the - application stakes for - title: >- - The list of services this appliccation is configured to - request service for - delegatee_gateway_addresses: - type: array - items: - type: string - description: >- - TODO_TECHDEBT: Rename `delegatee_gateway_addresses` to - `gateway_addresses_delegated_to`. - - Ensure to rename all relevant configs, comments, variables, - function names, etc as well. - - - The Bech32 encoded addresses for all delegatee Gateways, in a - non-nullable slice - pending_undelegations: - type: object - additionalProperties: - type: object - properties: - gateway_addresses: - type: array - items: - type: string - description: >- - UndelegatingGatewayList is used as the Value of - `pending_undelegations`. - - It is required to store a repeated list of strings as a map - value. - description: >- - A map from sessionEndHeights to a list of Gateways. - - The key is the height of the last block of the session during - which the - - respective undelegation was committed. - - The value is a list of gateways being undelegated from. - - TODO_DOCUMENT(@red-0ne): Need to document the flow from this - comment - - so its clear to everyone why this is necessary; - https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. - suppliers: - type: array - items: - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the supplier using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the supplier has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - services: - type: array - items: - type: object - properties: - service: - title: The Service for which the supplier is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session - for a certain service but with some additional - configs that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name for the - service - title: >- - TODO_TECHDEBT: Name is currently unused but acts - as a reminder that an optional onchain - representation of the service is necessary - endpoints: - type: array - items: - type: object - properties: - url: - type: string - title: URL of the endpoint - rpc_type: - title: Type of RPC exposed on the url above - type: string - enum: - - UNKNOWN_RPC - - GRPC - - WEBSOCKET - - JSON_RPC - - REST - default: UNKNOWN_RPC - description: |- - - UNKNOWN_RPC: Undefined RPC type - - GRPC: gRPC - - WEBSOCKET: WebSocket - - JSON_RPC: JSON-RPC - - REST: REST - configs: - type: array - items: - type: object - properties: - key: - title: Config option key - type: string - enum: - - UNKNOWN_CONFIG - - TIMEOUT - default: UNKNOWN_CONFIG - description: >- - Enum to define configuration options - - TODO_RESEARCH: Should these be configs, - SLAs or something else? There will be more - discussion once we get closer to - implementing on-chain QoS. - - - UNKNOWN_CONFIG: Undefined config option - - TIMEOUT: Timeout setting - value: - type: string - title: Config option value - title: >- - Key-value wrapper for config options, as proto - maps can't be keyed by enums - title: >- - Additional configuration options for the - endpoint - title: >- - SupplierEndpoint message to hold service - configuration details - title: List of endpoints for the service - title: >- - SupplierServiceConfig holds the service configuration the - supplier stakes for - title: The service configs this supplier can support - description: >- - Supplier is the type defining the actor in Pocket Network that - provides RPC services. - title: A fully hydrated set of servicers that are serving the application description: >- - Session is a fully hydrated session object that contains all the - information for the Session - - and its parcipants. - poktroll.session.QueryParamsResponse: + authority is the address that controls the module (defaults to x/gov + unless overwritten). + name: + type: string + as_string: + type: string + as_int64: + type: string + format: int64 + as_bytes: + type: string + format: byte + description: >- + MsgUpdateParam is the Msg/UpdateParam request type to update a single + param. + poktroll.shared.MsgUpdateParamResponse: type: object properties: params: - description: params holds all the parameters of this module. - type: object - description: QueryParamsResponse is response type for the Query/Params RPC method. - poktroll.session.Session: - type: object - properties: - header: - title: The header of the session containing lightweight data type: object properties: - application_address: - type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - service: - title: The service this session is for - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that identify - it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary - session_id: - type: string - description: A unique pseudoranom ID for this session - title: >- - NOTE: session_id can be derived from the above values using - on-chain but is included in the header for convenience - session_start_block_height: + num_blocks_per_session: type: string - format: int64 - title: The height at which this session started - session_end_block_height: + format: uint64 + description: >- + num_blocks_per_session is the number of blocks between the session + start & end heights. + claim_window_open_offset_blocks: type: string - format: int64 + format: uint64 description: >- - Note that`session_end_block_height` is a derivative of (`start` + - `num_blocks_per_session`) - - as goverened by on-chain params at the time of the session start. - - It is stored as an additional field to simplofy business logic in - case - - the number of blocks_per_session changes during the session. - - - The height at which this session ended, this is the last block of - the session - description: >- - SessionHeader is a lightweight header for a session that can be passed - around. + claim_window_open_offset_blocks is the number of blocks after the + session grace - It is the minimal amount of data required to hydrate & retrieve all - data relevant to the session. - session_id: - type: string - title: A unique pseudoranom ID for this session - session_number: - type: string - format: int64 - title: The session number since genesis - num_blocks_per_session: - type: string - format: int64 - title: The number of blocks per session when this session started - application: - title: A fully hydrated application object this session is for - type: object - properties: - address: + period height, at which the claim window opens. + claim_window_close_offset_blocks: type: string - title: >- - The Bech32 address of the application using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the application has staked - type: object - properties: - denom: - type: string - amount: - type: string + format: uint64 description: >- - Coin defines a token with a denomination and an amount. + claim_window_close_offset_blocks is the number of blocks after the + claim window + open height, at which the claim window closes. + proof_window_open_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_open_offset_blocks is the number of blocks after the + claim window - NOTE: The amount field is an Int which implements the custom - method + close height, at which the proof window opens. + proof_window_close_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_close_offset_blocks is the number of blocks after the + proof window - signatures required by gogoproto. - service_configs: - type: array - items: - type: object - properties: - service: - title: The Service for which the application is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - title: >- - ApplicationServiceConfig holds the service configuration the - application stakes for - title: >- - The list of services this appliccation is configured to request - service for - delegatee_gateway_addresses: - type: array - items: - type: string + open height, at which the proof window closes. + description: Params defines the parameters for the module. + description: |- + MsgUpdateParamResponse defines the response structure for executing a + MsgUpdateParam message after a single param update. + poktroll.shared.MsgUpdateParams: + type: object + properties: + authority: + type: string + description: >- + authority is the address that controls the module (defaults to x/gov + unless overwritten). + params: + type: object + properties: + num_blocks_per_session: + type: string + format: uint64 description: >- - TODO_TECHDEBT: Rename `delegatee_gateway_addresses` to - `gateway_addresses_delegated_to`. - - Ensure to rename all relevant configs, comments, variables, - function names, etc as well. - - - The Bech32 encoded addresses for all delegatee Gateways, in a - non-nullable slice - pending_undelegations: - type: object - additionalProperties: - type: object - properties: - gateway_addresses: - type: array - items: - type: string - description: >- - UndelegatingGatewayList is used as the Value of - `pending_undelegations`. - - It is required to store a repeated list of strings as a map - value. + num_blocks_per_session is the number of blocks between the session + start & end heights. + claim_window_open_offset_blocks: + type: string + format: uint64 description: >- - A map from sessionEndHeights to a list of Gateways. - - The key is the height of the last block of the session during - which the + claim_window_open_offset_blocks is the number of blocks after the + session grace - respective undelegation was committed. + period height, at which the claim window opens. + claim_window_close_offset_blocks: + type: string + format: uint64 + description: >- + claim_window_close_offset_blocks is the number of blocks after the + claim window - The value is a list of gateways being undelegated from. + open height, at which the claim window closes. + proof_window_open_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_open_offset_blocks is the number of blocks after the + claim window - TODO_DOCUMENT(@red-0ne): Need to document the flow from this - comment + close height, at which the proof window opens. + proof_window_close_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_close_offset_blocks is the number of blocks after the + proof window - so its clear to everyone why this is necessary; - https://github.com/pokt-network/poktroll/issues/476#issuecomment-2052639906. - suppliers: - type: array - items: - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the supplier using cosmos' - ScalarDescriptor to ensure deterministic encoding - stake: - title: The total amount of uPOKT the supplier has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + open height, at which the proof window closes. + description: Params defines the parameters for the module. + description: MsgUpdateParams is the Msg/UpdateParams request type. + poktroll.shared.MsgUpdateParamsResponse: + type: object + description: |- + MsgUpdateParamsResponse defines the response structure for executing a + MsgUpdateParams message. + poktroll.shared.Params: + type: object + properties: + num_blocks_per_session: + type: string + format: uint64 + description: >- + num_blocks_per_session is the number of blocks between the session + start & end heights. + claim_window_open_offset_blocks: + type: string + format: uint64 + description: >- + claim_window_open_offset_blocks is the number of blocks after the + session grace + period height, at which the claim window opens. + claim_window_close_offset_blocks: + type: string + format: uint64 + description: >- + claim_window_close_offset_blocks is the number of blocks after the + claim window - NOTE: The amount field is an Int which implements the custom - method + open height, at which the claim window closes. + proof_window_open_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_open_offset_blocks is the number of blocks after the + claim window - signatures required by gogoproto. - services: - type: array - items: - type: object - properties: - service: - title: The Service for which the supplier is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for - a certain service but with some additional configs - that identify it? - name: - type: string - description: >- - (Optional) Semantic human readable name for the - service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of - the service is necessary - endpoints: - type: array - items: - type: object - properties: - url: - type: string - title: URL of the endpoint - rpc_type: - title: Type of RPC exposed on the url above - type: string - enum: - - UNKNOWN_RPC - - GRPC - - WEBSOCKET - - JSON_RPC - - REST - default: UNKNOWN_RPC - description: |- - - UNKNOWN_RPC: Undefined RPC type - - GRPC: gRPC - - WEBSOCKET: WebSocket - - JSON_RPC: JSON-RPC - - REST: REST - configs: - type: array - items: - type: object - properties: - key: - title: Config option key - type: string - enum: - - UNKNOWN_CONFIG - - TIMEOUT - default: UNKNOWN_CONFIG - description: >- - Enum to define configuration options - - TODO_RESEARCH: Should these be configs, SLAs - or something else? There will be more - discussion once we get closer to implementing - on-chain QoS. - - - UNKNOWN_CONFIG: Undefined config option - - TIMEOUT: Timeout setting - value: - type: string - title: Config option value - title: >- - Key-value wrapper for config options, as proto - maps can't be keyed by enums - title: Additional configuration options for the endpoint - title: >- - SupplierEndpoint message to hold service configuration - details - title: List of endpoints for the service - title: >- - SupplierServiceConfig holds the service configuration the - supplier stakes for - title: The service configs this supplier can support - description: >- - Supplier is the type defining the actor in Pocket Network that - provides RPC services. - title: A fully hydrated set of servicers that are serving the application - description: >- - Session is a fully hydrated session object that contains all the - information for the Session + close height, at which the proof window opens. + proof_window_close_offset_blocks: + type: string + format: uint64 + description: >- + proof_window_close_offset_blocks is the number of blocks after the + proof window - and its parcipants. + open height, at which the proof window closes. + description: Params defines the parameters for the module. poktroll.shared.ConfigOption: type: object properties: @@ -26790,143 +25664,38 @@ definitions: type: string title: Config option value title: >- - Key-value wrapper for config options, as proto maps can't be keyed by - enums - poktroll.shared.ConfigOptions: - type: string - enum: - - UNKNOWN_CONFIG - - TIMEOUT - default: UNKNOWN_CONFIG - description: >- - Enum to define configuration options - - TODO_RESEARCH: Should these be configs, SLAs or something else? There will - be more discussion once we get closer to implementing on-chain QoS. - - - UNKNOWN_CONFIG: Undefined config option - - TIMEOUT: Timeout setting - poktroll.shared.RPCType: - type: string - enum: - - UNKNOWN_RPC - - GRPC - - WEBSOCKET - - JSON_RPC - - REST - default: UNKNOWN_RPC - description: |- - - UNKNOWN_RPC: Undefined RPC type - - GRPC: gRPC - - WEBSOCKET: WebSocket - - JSON_RPC: JSON-RPC - - REST: REST - title: Enum to define RPC types - poktroll.shared.Supplier: - type: object - properties: - address: - type: string - title: >- - The Bech32 address of the supplier using cosmos' ScalarDescriptor to - ensure deterministic encoding - stake: - title: The total amount of uPOKT the supplier has staked - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - services: - type: array - items: - type: object - properties: - service: - title: The Service for which the supplier is configured - type: object - properties: - id: - type: string - description: Unique identifier for the service - title: >- - For example, what if we want to request a session for a - certain service but with some additional configs that - identify it? - name: - type: string - description: (Optional) Semantic human readable name for the service - title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary - endpoints: - type: array - items: - type: object - properties: - url: - type: string - title: URL of the endpoint - rpc_type: - title: Type of RPC exposed on the url above - type: string - enum: - - UNKNOWN_RPC - - GRPC - - WEBSOCKET - - JSON_RPC - - REST - default: UNKNOWN_RPC - description: |- - - UNKNOWN_RPC: Undefined RPC type - - GRPC: gRPC - - WEBSOCKET: WebSocket - - JSON_RPC: JSON-RPC - - REST: REST - configs: - type: array - items: - type: object - properties: - key: - title: Config option key - type: string - enum: - - UNKNOWN_CONFIG - - TIMEOUT - default: UNKNOWN_CONFIG - description: >- - Enum to define configuration options - - TODO_RESEARCH: Should these be configs, SLAs or - something else? There will be more discussion once - we get closer to implementing on-chain QoS. - - - UNKNOWN_CONFIG: Undefined config option - - TIMEOUT: Timeout setting - value: - type: string - title: Config option value - title: >- - Key-value wrapper for config options, as proto maps - can't be keyed by enums - title: Additional configuration options for the endpoint - title: SupplierEndpoint message to hold service configuration details - title: List of endpoints for the service - title: >- - SupplierServiceConfig holds the service configuration the supplier - stakes for - title: The service configs this supplier can support + Key-value wrapper for config options, as proto maps can't be keyed by + enums + poktroll.shared.ConfigOptions: + type: string + enum: + - UNKNOWN_CONFIG + - TIMEOUT + default: UNKNOWN_CONFIG description: >- - Supplier is the type defining the actor in Pocket Network that provides - RPC services. + Enum to define configuration options + + TODO_RESEARCH: Should these be configs, SLAs or something else? There will + be more discussion once we get closer to implementing on-chain QoS. + + - UNKNOWN_CONFIG: Undefined config option + - TIMEOUT: Timeout setting + poktroll.shared.RPCType: + type: string + enum: + - UNKNOWN_RPC + - GRPC + - WEBSOCKET + - JSON_RPC + - REST + default: UNKNOWN_RPC + description: |- + - UNKNOWN_RPC: Undefined RPC type + - GRPC: gRPC + - WEBSOCKET: WebSocket + - JSON_RPC: JSON-RPC + - REST: REST + title: Enum to define RPC types poktroll.shared.SupplierEndpoint: type: object properties: @@ -26995,9 +25764,8 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a reminder - that an optional onchain representation of the service is - necessary + TODO_BETA: Name is currently unused but acts as a reminder that an + optional onchain representation of the service is necessary endpoints: type: array items: @@ -27055,155 +25823,6 @@ definitions: title: >- SupplierServiceConfig holds the service configuration the supplier stakes for - poktroll.shared.MsgUpdateParam: - type: object - properties: - authority: - type: string - description: >- - authority is the address that controls the module (defaults to x/gov - unless overwritten). - name: - type: string - as_string: - type: string - as_int64: - type: string - format: int64 - as_bytes: - type: string - format: byte - description: >- - MsgUpdateParam is the Msg/UpdateParam request type to update a single - param. - poktroll.shared.MsgUpdateParamResponse: - type: object - properties: - params: - type: object - properties: - num_blocks_per_session: - type: string - format: uint64 - description: >- - num_blocks_per_session is the number of blocks between the session - start & end heights. - claim_window_open_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_open_offset_blocks is the number of blocks after the - session grace - - period height, at which the claim window opens. - claim_window_close_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_close_offset_blocks is the number of blocks after the - claim window - - open height, at which the claim window closes. - description: Params defines the parameters for the module. - description: |- - MsgUpdateParamResponse defines the response structure for executing a - MsgUpdateParam message after a single param update. - poktroll.shared.MsgUpdateParams: - type: object - properties: - authority: - type: string - description: >- - authority is the address that controls the module (defaults to x/gov - unless overwritten). - params: - type: object - properties: - num_blocks_per_session: - type: string - format: uint64 - description: >- - num_blocks_per_session is the number of blocks between the session - start & end heights. - claim_window_open_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_open_offset_blocks is the number of blocks after the - session grace - - period height, at which the claim window opens. - claim_window_close_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_close_offset_blocks is the number of blocks after the - claim window - - open height, at which the claim window closes. - description: Params defines the parameters for the module. - description: MsgUpdateParams is the Msg/UpdateParams request type. - poktroll.shared.MsgUpdateParamsResponse: - type: object - description: |- - MsgUpdateParamsResponse defines the response structure for executing a - MsgUpdateParams message. - poktroll.shared.Params: - type: object - properties: - num_blocks_per_session: - type: string - format: uint64 - description: >- - num_blocks_per_session is the number of blocks between the session - start & end heights. - claim_window_open_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_open_offset_blocks is the number of blocks after the - session grace - - period height, at which the claim window opens. - claim_window_close_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_close_offset_blocks is the number of blocks after the - claim window - - open height, at which the claim window closes. - description: Params defines the parameters for the module. - poktroll.shared.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - num_blocks_per_session: - type: string - format: uint64 - description: >- - num_blocks_per_session is the number of blocks between the session - start & end heights. - claim_window_open_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_open_offset_blocks is the number of blocks after the - session grace - - period height, at which the claim window opens. - claim_window_close_offset_blocks: - type: string - format: uint64 - description: >- - claim_window_close_offset_blocks is the number of blocks after the - claim window - - open height, at which the claim window closes. - description: QueryParamsResponse is response type for the Query/Params RPC method. poktroll.supplier.MsgStakeSupplier: type: object properties: @@ -27248,9 +25867,9 @@ definitions: type: string description: (Optional) Semantic human readable name for the service title: >- - TODO_TECHDEBT: Name is currently unused but acts as a - reminder that an optional onchain representation of the - service is necessary + TODO_BETA: Name is currently unused but acts as a reminder + that an optional onchain representation of the service is + necessary endpoints: type: array items: @@ -27422,6 +26041,83 @@ definitions: The amount of upokt that a compute unit should translate to when settling a session. description: Params defines the parameters for the tokenomics module. + cosmos.base.query.v1beta1.PageRequest: + type: object + properties: + key: + type: string + format: byte + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + offset: + type: string + format: uint64 + description: |- + offset is a numeric offset that can be used when key is unavailable. + It is less efficient than using key. Only one of offset or key should + be set. + limit: + type: string + format: uint64 + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + count_total: + type: boolean + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in UIs. + + count_total is only respected when offset is used. It is ignored when + key + + is set. + reverse: + type: boolean + description: >- + reverse is set to true if results are to be returned in the descending + order. + + + Since: cosmos-sdk 0.43 + description: |- + message SomeRequest { + Foo some_parameter = 1; + PageRequest pagination = 2; + } + title: |- + PageRequest is to be embedded in gRPC request messages for efficient + pagination. Ex: + cosmos.base.query.v1beta1.PageResponse: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: |- + total is total number of results available if PageRequest.count_total + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } poktroll.tokenomics.QueryAllRelayMiningDifficultyResponse: type: object properties: diff --git a/e2e/tests/init_test.go b/e2e/tests/init_test.go index d03af3b21..859799c47 100644 --- a/e2e/tests/init_test.go +++ b/e2e/tests/init_test.go @@ -291,7 +291,7 @@ func (s *suite) getConfigFileContent(amount int64, actorType, serviceId string) rpc_type: json_rpc`, amount, serviceId) default: - s.Fatalf("unknown actor type %s", actorType) + s.Fatalf("ERROR: unknown actor type %s", actorType) } fmt.Println(yaml.NormalizeYAMLIndentation(configContent)) return yaml.NormalizeYAMLIndentation(configContent) @@ -339,7 +339,7 @@ func (s *suite) TheApplicationIsStakedForService(appName string, serviceId strin return } } - s.Fatalf("application %s is not staked for service %s", appName, serviceId) + s.Fatalf("ERROR: application %s is not staked for service %s", appName, serviceId) } func (s *suite) TheSupplierIsStakedForService(supplierName string, serviceId string) { @@ -348,7 +348,7 @@ func (s *suite) TheSupplierIsStakedForService(supplierName string, serviceId str return } } - s.Fatalf("supplier %s is not staked for service %s", supplierName, serviceId) + s.Fatalf("ERROR: supplier %s is not staked for service %s", supplierName, serviceId) } func (s *suite) TheSessionForApplicationAndServiceContainsTheSupplier(appName string, serviceId string, supplierName string) { @@ -377,7 +377,7 @@ func (s *suite) TheSessionForApplicationAndServiceContainsTheSupplier(appName st return } } - s.Fatalf("session for app %s and service %s does not contain supplier %s", appName, serviceId, supplierName) + s.Fatalf("ERROR: session for app %s and service %s does not contain supplier %s", appName, serviceId, supplierName) } func (s *suite) TheApplicationSendsTheSupplierARequestForServiceWithPathAndData(appName, supplierName, serviceId, path, requestData string) { @@ -572,7 +572,7 @@ func (s *suite) validateAmountChange(prevAmount, currAmount int, expectedAmountC require.LessOrEqual(s, currAmount, prevAmount, "%s %s expected to have less upokt but actually had more", accName, balanceType) require.Equal(s, expectedAmountChange, deltaAmount, "%s %s expected) decrease in upokt was incorrect", accName, balanceType) default: - s.Fatalf("unknown condition %s", condition) + s.Fatalf("ERROR: unknown condition %s", condition) } } diff --git a/e2e/tests/parse_params_test.go b/e2e/tests/parse_params_test.go index 84cbfe2df..b01df8c0f 100644 --- a/e2e/tests/parse_params_test.go +++ b/e2e/tests/parse_params_test.go @@ -67,7 +67,7 @@ func (s *suite) parseParam(table gocuke.DataTable, rowIdx int) paramAny { coinValue := cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(coinAmount)) paramValue = &coinValue default: - s.Fatalf("unexpected param type %q", paramType) + s.Fatalf("ERROR: unexpected param type %q", paramType) } return paramAny{ @@ -116,7 +116,7 @@ func (s *suite) newTokenomicsMsgUpdateParams(params paramsMap) cosmostypes.Msg { case tokenomicstypes.ParamComputeUnitsToTokensMultiplier: msgUpdateParams.Params.ComputeUnitsToTokensMultiplier = uint64(paramValue.value.(int64)) default: - s.Fatalf("unexpected %q type param name %q", paramValue.typeStr, paramName) + s.Fatalf("ERROR: unexpected %q type param name %q", paramValue.typeStr, paramName) } } return proto.Message(msgUpdateParams) @@ -141,7 +141,7 @@ func (s *suite) newProofMsgUpdateParams(params paramsMap) cosmostypes.Msg { case prooftypes.ParamProofMissingPenalty: msgUpdateParams.Params.ProofMissingPenalty = paramValue.value.(*cosmostypes.Coin) default: - s.Fatalf("unexpected %q type param name %q", paramValue.typeStr, paramName) + s.Fatalf("ERROR: unexpected %q type param name %q", paramValue.typeStr, paramName) } } return proto.Message(msgUpdateParams) @@ -168,7 +168,7 @@ func (s *suite) newSharedMsgUpdateParams(params paramsMap) cosmostypes.Msg { case sharedtypes.ParamProofWindowCloseOffsetBlocks: msgUpdateParams.Params.ProofWindowCloseOffsetBlocks = uint64(paramValue.value.(int64)) default: - s.Fatalf("unexpected %q type param name %q", paramValue.typeStr, paramName) + s.Fatalf("ERROR: unexpected %q type param name %q", paramValue.typeStr, paramName) } } return proto.Message(msgUpdateParams) @@ -188,7 +188,7 @@ func (s *suite) newAppMsgUpdateParams(params paramsMap) cosmostypes.Msg { case apptypes.ParamMaxDelegatedGateways: msgUpdateParams.Params.MaxDelegatedGateways = uint64(paramValue.value.(int64)) default: - s.Fatalf("unexpected %q type param name %q", paramValue.typeStr, paramName) + s.Fatalf("ERROR: unexpected %q type param name %q", paramValue.typeStr, paramName) } } return proto.Message(msgUpdateParams) @@ -208,7 +208,7 @@ func (s *suite) newServiceMsgUpdateParams(params paramsMap) cosmostypes.Msg { case servicetypes.ParamAddServiceFee: msgUpdateParams.Params.AddServiceFee = uint64(paramValue.value.(int64)) default: - s.Fatalf("unexpected %q type param name %q", paramValue.typeStr, paramName) + s.Fatalf("ERROR: unexpected %q type param name %q", paramValue.typeStr, paramName) } } return proto.Message(msgUpdateParams) diff --git a/e2e/tests/reset_params_test.go b/e2e/tests/reset_params_test.go index 88083a177..80c9376e8 100644 --- a/e2e/tests/reset_params_test.go +++ b/e2e/tests/reset_params_test.go @@ -96,7 +96,7 @@ func (s *suite) msgUpdateParamsToDefaultsAny(moduleName string) *codectypes.Any }, ) default: - s.Fatalf("unknown module name: %s", moduleName) + s.Fatalf("ERROR: unknown module name: %s", moduleName) } require.NoError(s, err) diff --git a/e2e/tests/session_steps_test.go b/e2e/tests/session_steps_test.go index 29afad430..47124c308 100644 --- a/e2e/tests/session_steps_test.go +++ b/e2e/tests/session_steps_test.go @@ -10,6 +10,7 @@ import ( "cosmossdk.io/depinject" abci "github.com/cometbft/cometbft/abci/types" + cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/pkg/client" @@ -17,6 +18,7 @@ import ( "github.com/pokt-network/poktroll/pkg/client/events" "github.com/pokt-network/poktroll/pkg/client/tx" "github.com/pokt-network/poktroll/pkg/observable/channel" + testutilevents "github.com/pokt-network/poktroll/testutil/events" "github.com/pokt-network/poktroll/testutil/testclient" prooftypes "github.com/pokt-network/poktroll/x/proof/types" tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" @@ -25,7 +27,7 @@ import ( const ( // eventTimeout is the duration of time to wait after sending a valid tx // before the test should time out (fail). - eventTimeout = 60 * time.Second + eventTimeout = 100 * time.Second // testServiceId is the service ID used for testing purposes that is // expected to be available in LocalNet. testServiceId = "anvil" @@ -166,13 +168,23 @@ func (s *suite) TheClaimCreatedBySupplierForServiceForApplicationShouldBeSuccess if event.Type != "poktroll.tokenomics.EventClaimSettled" { return false } - claimSettledEvent := s.abciToClaimSettledEvent(event) + + // Parse the event + testutilevents.QuoteEventMode(event) + typedEvent, err := cosmostypes.ParseTypedEvent(*event) + require.NoError(s, err) + require.NotNil(s, typedEvent) + claimSettledEvent, ok := typedEvent.(*tokenomicstypes.EventClaimSettled) + require.True(s, ok) + + // Assert that the claim was settled for the correct application, supplier, and service. claim := claimSettledEvent.Claim require.Equal(s, app.Address, claim.SessionHeader.ApplicationAddress) require.Equal(s, supplier.Address, claim.SupplierAddress) require.Equal(s, serviceId, claim.SessionHeader.Service.Id) require.Greater(s, claimSettledEvent.ComputeUnits, uint64(0), "compute units should be greater than 0") s.Logf("Claim settled for %d compute units w/ proof requirement: %t\n", claimSettledEvent.ComputeUnits, claimSettledEvent.ProofRequired) + return true } @@ -235,7 +247,7 @@ func (s *suite) waitForTxResultEvent(targetAction string) { select { case <-time.After(eventTimeout): - s.Fatalf("timed out waiting for message with action %q", targetAction) + s.Fatalf("ERROR: timed out waiting for message with action %q", targetAction) case <-ctx.Done(): s.Log("Success; message detected before timeout.") } @@ -280,47 +292,8 @@ func (s *suite) waitForNewBlockEvent( select { case <-time.After(eventTimeout): - s.Fatalf("timed out waiting for NewBlock event") + s.Fatalf("ERROR: timed out waiting for NewBlock event") case <-ctx.Done(): s.Log("Success; message detected before timeout.") } } - -// abciToClaimSettledEvent converts an abci.Event to a tokenomics.EventClaimSettled -// - -func (s *suite) abciToClaimSettledEvent(event *abci.Event) *tokenomicstypes.EventClaimSettled { - var claimSettledEvent tokenomicstypes.EventClaimSettled - - // TODO_TECHDEBT: Investigate why `cosmostypes.ParseTypedEvent(*event)` throws - // an error where cosmostypes is imported from "github.com/cosmos/cosmos-sdk/types" - // resulting in the following error: - // 'json: error calling MarshalJSON for type json.RawMessage: invalid character 'E' looking for beginning of value' - // typedEvent, err := cosmostypes.ParseTypedEvent(*event) - - for _, attr := range event.Attributes { - switch string(attr.Key) { - case "claim": - var claim prooftypes.Claim - if err := s.cdc.UnmarshalJSON([]byte(attr.Value), &claim); err != nil { - s.Fatalf("Failed to unmarshal claim: %v", err) - } - claimSettledEvent.Claim = &claim - case "compute_units": - value := string(attr.Value) - value = value[1 : len(value)-1] // Remove surrounding quotes - computeUnits, err := strconv.ParseUint(value, 10, 64) - if err != nil { - s.Fatalf("Failed to parse compute_units: %v", err) - } - claimSettledEvent.ComputeUnits = computeUnits - case "proof_required": - proofRequired, err := strconv.ParseBool(string(attr.Value)) - if err != nil { - s.Fatalf("Failed to parse proof_required: %v", err) - } - claimSettledEvent.ProofRequired = proofRequired - } - } - return &claimSettledEvent -} diff --git a/pkg/relayer/miner/miner_test.go b/pkg/relayer/miner/miner_test.go index e878b8475..fbebf8ac6 100644 --- a/pkg/relayer/miner/miner_test.go +++ b/pkg/relayer/miner/miner_test.go @@ -70,7 +70,7 @@ func TestMiner_MinedRelays(t *testing.T) { // Assert that all minable relay fixtures were published to minedRelays. actualMinedRelaysMu.Lock() - require.EqualValues(t, expectedMinedRelays, actualMinedRelays) + require.EqualValues(t, expectedMinedRelays, actualMinedRelays, "TODO_FLAKY: Try re-running with 'go test -v -count=1 -run TestMiner_MinedRelays ./pkg/relayer/miner/...'") actualMinedRelaysMu.Unlock() } diff --git a/pkg/relayer/session/session_test.go b/pkg/relayer/session/session_test.go index 81f3a4b49..8615e64d4 100644 --- a/pkg/relayer/session/session_test.go +++ b/pkg/relayer/session/session_test.go @@ -25,23 +25,28 @@ import ( "github.com/pokt-network/poktroll/testutil/testclient/testsupplier" "github.com/pokt-network/poktroll/testutil/testpolylog" "github.com/pokt-network/poktroll/testutil/testrelayer" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) func TestRelayerSessionsManager_Start(t *testing.T) { - const ( - sessionStartHeight = 1 - sessionEndHeight = 2 - ) - // TODO_TECHDEBT(#446): Centralize the configuration for the SMT spec. var ( _, ctx = testpolylog.NewLoggerWithCtx(context.Background(), polyzero.DebugLevel) spec = smt.NewTrieSpec(sha256.New(), true) emptyBlockHash = make([]byte, spec.PathHasherSize()) + activeSession *sessiontypes.Session ) + activeSession = &sessiontypes.Session{ + Header: &sessiontypes.SessionHeader{ + SessionStartBlockHeight: 1, + SessionEndBlockHeight: 2, + }, + } + sessionHeader := activeSession.GetHeader() + // Set up dependencies. blocksObs, blockPublishCh := channel.NewReplayObservable[client.Block](ctx, 1) blockClient := testblock.NewAnyTimesCommittedBlocksSequenceBlockClient(t, emptyBlockHash, blocksObs) @@ -94,7 +99,7 @@ func TestRelayerSessionsManager_Start(t *testing.T) { relayerSessionsManager.Start(ctx) // Publish a mined relay to the minedRelaysPublishCh to insert into the session tree. - minedRelay := testrelayer.NewMinedRelay(t, sessionStartHeight, sessionEndHeight, supplierAddress) + minedRelay := testrelayer.NewUnsignedMinedRelay(t, activeSession, supplierAddress) minedRelaysPublishCh <- minedRelay // Wait a tick to allow the relayer sessions manager to process asynchronously. @@ -102,12 +107,14 @@ func TestRelayerSessionsManager_Start(t *testing.T) { time.Sleep(10 * time.Millisecond) // Publish a block to the blockPublishCh to simulate non-actionable blocks. + sessionStartHeight := sessionHeader.GetSessionStartBlockHeight() noopBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, sessionStartHeight) blockPublishCh <- noopBlock // Calculate the session grace period end block height to emit that block height // to the blockPublishCh to trigger claim creation for the session. sharedParams := sharedtypes.DefaultParams() + sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() sessionClaimWindowOpenHeight := shared.GetClaimWindowOpenHeight(&sharedParams, sessionEndHeight) // Publish a block to the blockPublishCh to trigger claim creation for the session. diff --git a/proto/poktroll/tokenomics/event.proto b/proto/poktroll/tokenomics/event.proto index cf3ff45ba..5fe5c8906 100644 --- a/proto/poktroll/tokenomics/event.proto +++ b/proto/poktroll/tokenomics/event.proto @@ -20,3 +20,13 @@ message EventClaimSettled { uint64 compute_units = 2; bool proof_required = 3; } + +// EventRelayMiningDifficultyUpdated is an event emitted whenever the relay mining difficulty is updated +// for a given service. +message EventRelayMiningDifficultyUpdated { + string service_id = 1; + string prev_target_hash_hex_encoded = 2; + string new_target_hash_hex_encoded = 3; + uint64 prev_num_relays_ema = 4; + uint64 new_num_relays_ema = 5; +} diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index 35daaecec..af0b9a9b7 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -1,9 +1,24 @@ package integration_test import ( + "context" + "crypto/sha256" "testing" + "github.com/pokt-network/smt" + "github.com/pokt-network/smt/kvstore/badger" + "github.com/stretchr/testify/require" + "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" + testutilevents "github.com/pokt-network/poktroll/testutil/events" + integration "github.com/pokt-network/poktroll/testutil/integration" + testutil "github.com/pokt-network/poktroll/testutil/integration" + "github.com/pokt-network/poktroll/testutil/testrelayer" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" + "github.com/pokt-network/poktroll/x/shared" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" + tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) // TODO_UPNEXT(@Olshansk, #571): Implement these tests @@ -12,7 +27,92 @@ func init() { cmd.InitSDKConfig() } -func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) {} +func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) { + // Create a new integration app + integrationApp := integration.NewCompleteIntegrationApp(t) + + // Move forward a few blocks to move away from the genesis block + integrationApp.NextBlocks(t, 3) + + // Get the current session and shared params + session := getSession(t, integrationApp) + sharedParams := getSharedParams(t, integrationApp) + + // Prepare the trie with a single mined relay + trie := prepareSMST(t, integrationApp.GetSdkCtx(), integrationApp, session) + + // Compute the number of blocks to wait between different events + // TODO_BLOCKER(@bryanchriswhite): See this comment: https://github.com/pokt-network/poktroll/pull/610#discussion_r1645777322 + sessionEndHeight := session.Header.SessionEndBlockHeight + claimWindowOpenHeight := shared.GetClaimWindowOpenHeight(&sharedParams, sessionEndHeight) + proofWindowOpenHeight := shared.GetProofWindowOpenHeight(&sharedParams, sessionEndHeight) + proofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) + + // Wait until the earliest claim commit height. + currentBlockHeight := integrationApp.GetSdkCtx().BlockHeight() + numBlocksUntilClaimWindowOpenHeight := claimWindowOpenHeight - currentBlockHeight + require.Greater(t, numBlocksUntilClaimWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest claim commit height") + integrationApp.NextBlocks(t, int(numBlocksUntilClaimWindowOpenHeight)) + + // Construct a new create claim message and commit it. + createClaimMsg := prooftypes.MsgCreateClaim{ + SupplierAddress: integrationApp.DefaultSupplier.Address, + SessionHeader: session.Header, + RootHash: trie.Root(), + } + result := integrationApp.RunMsg(t, + &createClaimMsg, + integration.WithAutomaticFinalizeBlock(), + integration.WithAutomaticCommit(), + ) + require.NotNil(t, result, "unexpected nil result when submitting a MsgCreateClaim tx") + + // Wait until the proof window is open + currentBlockHeight = integrationApp.GetSdkCtx().BlockHeight() + numBlocksUntilProofWindowOpenHeight := proofWindowOpenHeight - currentBlockHeight + require.Greater(t, numBlocksUntilProofWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest proof commit height") + integrationApp.NextBlocks(t, int(numBlocksUntilProofWindowOpenHeight)) + + // Construct a new proof message and commit it + createProofMsg := prooftypes.MsgSubmitProof{ + SupplierAddress: integrationApp.DefaultSupplier.Address, + SessionHeader: session.Header, + Proof: getProof(t, trie), + } + result = integrationApp.RunMsg(t, + &createProofMsg, + integration.WithAutomaticFinalizeBlock(), + integration.WithAutomaticCommit(), + ) + require.NotNil(t, result, "unexpected nil result when submitting a MsgSubmitProof tx") + + // Wait until the proof window is closed + currentBlockHeight = integrationApp.GetSdkCtx().BlockHeight() + numBlocksUntilProofWindowCloseHeight := proofWindowCloseHeight - currentBlockHeight + require.Greater(t, numBlocksUntilProofWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest proof commit height") + // TODO_TECHDEBT(@bryanchriswhite): Olshansky is unsure why the +1 is necessary here + // but it was required to pass the test. + integrationApp.NextBlocks(t, int(numBlocksUntilProofWindowCloseHeight)+1) + + // The number 14 was determined empirically by running the tests and will need + // to be updated if they are changed. + expectedNumEvents := 14 + // Check the number of events is consistent. + events := integrationApp.GetSdkCtx().EventManager().Events() + require.Len(t, events, expectedNumEvents, "unexpected number of total events") + + relayMiningEvents := testutilevents.FilterEvents[*tokenomicstypes.EventRelayMiningDifficultyUpdated](t, + events, "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") + require.Len(t, relayMiningEvents, 1, "unexpected number of relay mining difficulty updated events") + relayMiningEvent := relayMiningEvents[0] + require.Equal(t, "svc1", relayMiningEvent.ServiceId) + // The default difficulty) + require.Equal(t, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", relayMiningEvent.PrevTargetHashHexEncoded) + require.Equal(t, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", relayMiningEvent.NewTargetHashHexEncoded) + // The previous EMA is the same as the current one if the service is new + require.Equal(t, uint64(1), relayMiningEvent.PrevNumRelaysEma) + require.Equal(t, uint64(1), relayMiningEvent.NewNumRelaysEma) +} func UpdateRelayMiningDifficulty_UpdatingMultipleServicesAtOnce(t *testing.T) {} @@ -21,3 +121,84 @@ func UpdateRelayMiningDifficulty_UpdateServiceIsNotSeenForAWhile(t *testing.T) { func UpdateRelayMiningDifficulty_UpdateServiceIsIncreasing(t *testing.T) {} func UpdateRelayMiningDifficulty_UpdateServiceIsDecreasing(t *testing.T) {} + +// getSharedParams returns the shared parameters for the current block height. +func getSharedParams(t *testing.T, integrationApp *testutil.App) sharedtypes.Params { + t.Helper() + + sharedQueryClient := sharedtypes.NewQueryClient(integrationApp.QueryHelper()) + sharedParamsReq := sharedtypes.QueryParamsRequest{} + + sharedQueryRes, err := sharedQueryClient.Params(integrationApp.GetSdkCtx(), &sharedParamsReq) + require.NoError(t, err) + + return sharedQueryRes.Params +} + +// getSession returns the current session for the default application and service. +func getSession(t *testing.T, integrationApp *testutil.App) *sessiontypes.Session { + t.Helper() + + sessionQueryClient := sessiontypes.NewQueryClient(integrationApp.QueryHelper()) + getSessionReq := sessiontypes.QueryGetSessionRequest{ + ApplicationAddress: integrationApp.DefaultApplication.Address, + Service: integrationApp.DefaultService, + BlockHeight: integrationApp.GetSdkCtx().BlockHeight(), + } + + getSessionRes, err := sessionQueryClient.GetSession(integrationApp.GetSdkCtx(), &getSessionReq) + require.NoError(t, err) + require.NotNil(t, getSessionRes, "unexpected nil queryResponse") + return getSessionRes.Session +} + +// prepareSMST prepares an SMST with a single mined relay for the given session. +func prepareSMST( + t *testing.T, ctx context.Context, + integrationApp *testutil.App, + session *sessiontypes.Session, +) *smt.SMST { + t.Helper() + + // Generating an ephemeral tree & spec just so we can submit + // a proof of the right size. + // TODO_TECHDEBT(#446): Centralize the configuration for the SMT spec. + kvStore, err := badger.NewKVStore("") + require.NoError(t, err) + + // NB: A signed mined relay is a MinedRelay type with the appropriate + // payload, signatures and metadata populated. + // + // It does not (as of writing) adhere to the actual on-chain difficulty (i.e. + // hash check) of the test service surrounding the scope of this test. + minedRelay := testrelayer.NewSignedMinedRelay(t, ctx, + session, + integrationApp.DefaultApplication.Address, + integrationApp.DefaultSupplier.Address, + integrationApp.DefaultSupplierKeyringKeyringUid, + integrationApp.GetKeyRing(), + integrationApp.GetRingClient(), + ) + + trie := smt.NewSparseMerkleSumTrie(kvStore, sha256.New(), smt.WithValueHasher(nil)) + err = trie.Update(minedRelay.Hash, minedRelay.Bytes, 1) + require.NoError(t, err) + + return trie +} + +// getProof returns a proof for the given session for the empty path. +// If there is only one relay in the trie, the proof will be for that single +// relay since it is "closest" to any path provided, empty or not. +func getProof(t *testing.T, trie *smt.SMST) []byte { + t.Helper() + + emptyPath := make([]byte, trie.PathHasherSize()) + proof, err := trie.ProveClosest(emptyPath) + require.NoError(t, err) + + proofBz, err := proof.Marshal() + require.NoError(t, err) + + return proofBz +} diff --git a/tests/integration/tokenomics/tokenomics_example_test.go b/tests/integration/tokenomics/tokenomics_example_test.go index 3385670e2..0a428dcb6 100644 --- a/tests/integration/tokenomics/tokenomics_example_test.go +++ b/tests/integration/tokenomics/tokenomics_example_test.go @@ -30,14 +30,14 @@ func TestTokenomicsIntegrationExample(t *testing.T) { // Query and validate the default shared params sharedQueryClient := sharedtypes.NewQueryClient(integrationApp.QueryHelper()) sharedParamsReq := sharedtypes.QueryParamsRequest{} - sharedQueryRes, err := sharedQueryClient.Params(integrationApp.SdkCtx(), &sharedParamsReq) + sharedQueryRes, err := sharedQueryClient.Params(integrationApp.GetSdkCtx(), &sharedParamsReq) require.NoError(t, err) require.NotNil(t, sharedQueryRes, "unexpected nil params query response") require.EqualValues(t, sharedtypes.DefaultParams(), sharedQueryRes.GetParams()) // Prepare a request to update the compute_units_to_tokens_multiplier updateTokenomicsParamMsg := &tokenomicstypes.MsgUpdateParam{ - Authority: integrationApp.Authority(), + Authority: integrationApp.GetAuthority(), Name: tokenomicstypes.ParamComputeUnitsToTokensMultiplier, AsType: &tokenomicstypes.MsgUpdateParam_AsInt64{AsInt64: 11}, } @@ -52,7 +52,7 @@ func TestTokenomicsIntegrationExample(t *testing.T) { // Validate the response is correct and that the value was updated updateTokenomicsParamRes := tokenomicstypes.MsgUpdateParamResponse{} - err = integrationApp.Codec().Unmarshal(result.Value, &updateTokenomicsParamRes) + err = integrationApp.GetCodec().Unmarshal(result.Value, &updateTokenomicsParamRes) require.NoError(t, err) require.EqualValues(t, uint64(11), uint64(updateTokenomicsParamRes.Params.ComputeUnitsToTokensMultiplier)) @@ -61,11 +61,11 @@ func TestTokenomicsIntegrationExample(t *testing.T) { getSessionReq := sessiontypes.QueryGetSessionRequest{ ApplicationAddress: integrationApp.DefaultApplication.Address, Service: integrationApp.DefaultService, - BlockHeight: integrationApp.SdkCtx().BlockHeight(), + BlockHeight: integrationApp.GetSdkCtx().BlockHeight(), } // Query the session - getSessionRes, err := sessionQueryClient.GetSession(integrationApp.SdkCtx(), &getSessionReq) + getSessionRes, err := sessionQueryClient.GetSession(integrationApp.GetSdkCtx(), &getSessionReq) require.NoError(t, err) require.NotNil(t, getSessionRes, "unexpected nil queryResponse") sessionEndHeight := int(getSessionRes.Session.Header.SessionEndBlockHeight) @@ -74,12 +74,12 @@ func TestTokenomicsIntegrationExample(t *testing.T) { // Query and validate the default shared params sharedQueryClient = sharedtypes.NewQueryClient(integrationApp.QueryHelper()) sharedParamsReq = sharedtypes.QueryParamsRequest{} - sharedQueryRes, err = sharedQueryClient.Params(integrationApp.SdkCtx(), &sharedParamsReq) + sharedQueryRes, err = sharedQueryClient.Params(integrationApp.GetSdkCtx(), &sharedParamsReq) require.NoError(t, err) claimOpenWindowNumBlocks := int(sharedQueryRes.Params.ClaimWindowOpenOffsetBlocks) // Need to wait until the claim window is open - currentBlockHeight := int(integrationApp.SdkCtx().BlockHeight()) + currentBlockHeight := int(integrationApp.GetSdkCtx().BlockHeight()) numBlocksUntilClaimWindowIsOpen := int(sessionEndHeight + claimOpenWindowNumBlocks - currentBlockHeight + 1) for i := 0; i < numBlocksUntilClaimWindowIsOpen; i++ { integrationApp.NextBlock(t) diff --git a/testutil/events/filter.go b/testutil/events/filter.go new file mode 100644 index 000000000..bb03237f0 --- /dev/null +++ b/testutil/events/filter.go @@ -0,0 +1,49 @@ +package events + +import ( + "strconv" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" +) + +// FilterEvents filters allEvents, returning list of T type events whose protobuf message type string matches protoType. +func FilterEvents[T proto.Message]( + t *testing.T, + allEvents cosmostypes.Events, + protoType string, +) (parsedEvents []T) { + t.Helper() + + for _, event := range allEvents.ToABCIEvents() { + if event.Type != protoType { + continue + } + QuoteEventMode(&event) + parsedEvent, err := cosmostypes.ParseTypedEvent(event) + require.NoError(t, err) + require.NotNil(t, parsedEvent) + + castedEvent, ok := parsedEvent.(T) + require.True(t, ok) + + parsedEvents = append(parsedEvents, castedEvent) + } + + return parsedEvents +} + +// QuoteEventMode quotes (i.e. URL escape) the value associated with the 'mode' +// key in the event. This is injected by the caller that emits the event and +// causes issues in calling 'ParseTypedEvent'. +func QuoteEventMode(event *abci.Event) { + for i, attr := range event.Attributes { + if attr.Key == "mode" { + event.Attributes[i].Value = strconv.Quote(attr.Value) + return + } + } +} diff --git a/testutil/integration/app.go b/testutil/integration/app.go index 59cbde78d..e85519819 100644 --- a/testutil/integration/app.go +++ b/testutil/integration/app.go @@ -6,6 +6,7 @@ import ( "cosmossdk.io/core/appmodule" coreheader "cosmossdk.io/core/header" + "cosmossdk.io/depinject" "cosmossdk.io/log" "cosmossdk.io/math" "cosmossdk.io/store" @@ -19,8 +20,11 @@ import ( "github.com/cosmos/cosmos-sdk/codec" addresscodec "github.com/cosmos/cosmos-sdk/codec/address" codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/types" + cosmostypes "github.com/cosmos/cosmos-sdk/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/x/auth" @@ -34,7 +38,11 @@ import ( "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/app" - "github.com/pokt-network/poktroll/testutil/sample" + "github.com/pokt-network/poktroll/pkg/crypto" + "github.com/pokt-network/poktroll/pkg/crypto/rings" + "github.com/pokt-network/poktroll/pkg/polylog/polyzero" + testutilevents "github.com/pokt-network/poktroll/testutil/events" + "github.com/pokt-network/poktroll/testutil/testkeyring" appkeeper "github.com/pokt-network/poktroll/x/application/keeper" application "github.com/pokt-network/poktroll/x/application/module" apptypes "github.com/pokt-network/poktroll/x/application/types" @@ -70,19 +78,23 @@ type App struct { *baseapp.BaseApp // Internal state of the App needed for properly configuring the blockchain. - sdkCtx sdk.Context + sdkCtx *sdk.Context cdc codec.Codec logger log.Logger authority sdk.AccAddress moduleManager module.Manager queryHelper *baseapp.QueryServiceTestHelper + keyRing keyring.Keyring + ringClient crypto.RingClient // Some default helper fixtures for general testing. // They're publically exposed and should/could be improve and expand on // over time. - DefaultService *sharedtypes.Service - DefaultApplication *apptypes.Application - DefaultSupplier *sharedtypes.Supplier + DefaultService *sharedtypes.Service + DefaultApplication *apptypes.Application + DefaultApplicationKeyringUid string + DefaultSupplier *sharedtypes.Supplier + DefaultSupplierKeyringKeyringUid string } // NewIntegrationApp creates a new instance of the App with the provided details @@ -91,6 +103,7 @@ func NewIntegrationApp( t *testing.T, sdkCtx sdk.Context, cdc codec.Codec, + registry codectypes.InterfaceRegistry, logger log.Logger, authority sdk.AccAddress, modules map[string]appmodule.AppModule, @@ -102,12 +115,23 @@ func NewIntegrationApp( db := dbm.NewMemDB() - interfaceRegistry := codectypes.NewInterfaceRegistry() moduleManager := module.NewManagerFromMap(modules) basicModuleManager := module.NewBasicManagerFromManager(moduleManager, nil) - basicModuleManager.RegisterInterfaces(interfaceRegistry) + basicModuleManager.RegisterInterfaces(registry) - txConfig := authtx.NewTxConfig(codec.NewProtoCodec(interfaceRegistry), authtx.DefaultSignModes) + // TODO_HACK(@Olshansk): I needed to set the height to 2 so downstream logic + // works. I'm not 100% sure why, but believe it's a result of genesis and the + // first block being special and iterated over during the setup process. + cometHeader := cmtproto.Header{ + ChainID: appName, + Height: 2, + } + sdkCtx = sdkCtx. + WithBlockHeader(cometHeader). + WithIsCheckTx(true). + WithEventManager(cosmostypes.NewEventManager()) + + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseapp.SetChainID(appName)) bApp.MountKVStores(keys) @@ -129,7 +153,7 @@ func NewIntegrationApp( return moduleManager.EndBlock(sdkCtx) }) - msgRouter.SetInterfaceRegistry(interfaceRegistry) + msgRouter.SetInterfaceRegistry(registry) bApp.SetMsgServiceRouter(msgRouter) err := bApp.LoadLatestVersion() @@ -141,20 +165,11 @@ func NewIntegrationApp( _, err = bApp.Commit() require.NoError(t, err, "failed to commit") - // TODO_HACK(@Olshansk): I needed to set the height to 2 so downstream logic - // works. I'm not 100% sure why, but believe it's a result of genesis and the - // first block being special and iterated over during the setup process. - cometHeader := cmtproto.Header{ - ChainID: appName, - Height: 2, - } - ctx := sdkCtx.WithBlockHeader(cometHeader).WithIsCheckTx(true) - return &App{ BaseApp: bApp, logger: logger, authority: authority, - sdkCtx: ctx, + sdkCtx: &sdkCtx, cdc: cdc, moduleManager: *moduleManager, queryHelper: queryHelper, @@ -182,6 +197,9 @@ func NewCompleteIntegrationApp(t *testing.T) *App { prooftypes.RegisterInterfaces(registry) servicetypes.RegisterInterfaces(registry) authtypes.RegisterInterfaces(registry) + cosmostypes.RegisterInterfaces(registry) + cryptocodec.RegisterInterfaces(registry) + banktypes.RegisterInterfaces(registry) // Prepare the codec cdc := codec.NewProtoCodec(registry) @@ -405,6 +423,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { t, sdkCtx, cdc, + registry, logger, authority, modules, @@ -440,20 +459,27 @@ func NewCompleteIntegrationApp(t *testing.T) *App { integrationApp.NextBlock(t) // Set the default params for all the modules - err := sharedKeeper.SetParams(integrationApp.SdkCtx(), sharedtypes.DefaultParams()) + err := sharedKeeper.SetParams(integrationApp.GetSdkCtx(), sharedtypes.DefaultParams()) require.NoError(t, err) - err = tokenomicsKeeper.SetParams(integrationApp.SdkCtx(), tokenomicstypes.DefaultParams()) + err = tokenomicsKeeper.SetParams(integrationApp.GetSdkCtx(), tokenomicstypes.DefaultParams()) require.NoError(t, err) - err = proofKeeper.SetParams(integrationApp.SdkCtx(), prooftypes.DefaultParams()) + err = proofKeeper.SetParams(integrationApp.GetSdkCtx(), prooftypes.DefaultParams()) require.NoError(t, err) - err = sessionKeeper.SetParams(integrationApp.SdkCtx(), sessiontypes.DefaultParams()) + err = sessionKeeper.SetParams(integrationApp.GetSdkCtx(), sessiontypes.DefaultParams()) require.NoError(t, err) - err = gatewayKeeper.SetParams(integrationApp.SdkCtx(), gatewaytypes.DefaultParams()) + err = gatewayKeeper.SetParams(integrationApp.GetSdkCtx(), gatewaytypes.DefaultParams()) require.NoError(t, err) - err = applicationKeeper.SetParams(integrationApp.SdkCtx(), apptypes.DefaultParams()) + err = applicationKeeper.SetParams(integrationApp.GetSdkCtx(), apptypes.DefaultParams()) require.NoError(t, err) - // Prepare default testing fixtures + // Prepare default testing fixtures // + + // Construct a keyring to hold the keypairs for the accounts used in the test. + keyRing := keyring.NewInMemory(integrationApp.cdc) + integrationApp.keyRing = keyRing + + // Create a pre-generated account iterator to create accounts for the test. + preGeneratedAccts := testkeyring.PreGeneratedAccounts() // Prepare a new default service defaultService := sharedtypes.Service{ @@ -463,10 +489,20 @@ func NewCompleteIntegrationApp(t *testing.T) *App { serviceKeeper.SetService(integrationApp.sdkCtx, defaultService) integrationApp.DefaultService = &defaultService - // Prepare a new default supplier + // Create a supplier account with the corresponding keys in the keyring for the supplier. + integrationApp.DefaultSupplierKeyringKeyringUid = "supplier" + supplierAddr := testkeyring.CreateOnChainAccount( + integrationApp.sdkCtx, t, + integrationApp.DefaultSupplierKeyringKeyringUid, + keyRing, + accountKeeper, + preGeneratedAccts, + ) + + // Prepare the on-chain supplier supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) defaultSupplier := sharedtypes.Supplier{ - Address: sample.AccAddress(), + Address: supplierAddr.String(), Stake: &supplierStake, Services: []*sharedtypes.SupplierServiceConfig{ { @@ -477,10 +513,20 @@ func NewCompleteIntegrationApp(t *testing.T) *App { supplierKeeper.SetSupplier(integrationApp.sdkCtx, defaultSupplier) integrationApp.DefaultSupplier = &defaultSupplier - // Prepare a new default application + // Create an application account with the corresponding keys in the keyring for the application. + integrationApp.DefaultApplicationKeyringUid = "application" + applicationAddr := testkeyring.CreateOnChainAccount( + integrationApp.sdkCtx, t, + integrationApp.DefaultApplicationKeyringUid, + keyRing, + accountKeeper, + preGeneratedAccts, + ) + + // Prepare the on-chain supplier appStake := types.NewCoin("upokt", math.NewInt(1000000)) defaultApplication := apptypes.Application{ - Address: sample.AccAddress(), + Address: applicationAddr.String(), Stake: &appStake, ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{ { @@ -491,6 +537,24 @@ func NewCompleteIntegrationApp(t *testing.T) *App { applicationKeeper.SetApplication(integrationApp.sdkCtx, defaultApplication) integrationApp.DefaultApplication = &defaultApplication + // Construct a ringClient to get the application's ring & verify the relay + // request signature. + ringClient, err := rings.NewRingClient(depinject.Supply( + polyzero.NewLogger(), + prooftypes.NewAppKeeperQueryClient(applicationKeeper), + prooftypes.NewAccountKeeperQueryClient(accountKeeper), + prooftypes.NewSharedKeeperQueryClient(sharedKeeper), + )) + require.NoError(t, err) + integrationApp.ringClient = ringClient + + // TODO_IMPROVE: The setup above does not to proper "staking" of the suppliers and applications. + // This can result in the module accounts balance going negative. Giving them a baseline balance + // to start with to avoid this issue. There is opportunity to improve this in the future. + moduleBaseMint := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(690000000000000042))) + bankKeeper.MintCoins(integrationApp.sdkCtx, suppliertypes.ModuleName, moduleBaseMint) + bankKeeper.MintCoins(integrationApp.sdkCtx, apptypes.ModuleName, moduleBaseMint) + // Commit all the changes above by committing, finalizing and moving // to the next block. integrationApp.NextBlock(t) @@ -498,25 +562,35 @@ func NewCompleteIntegrationApp(t *testing.T) *App { return integrationApp } -// Codec returns the codec used by the application. -func (app *App) Codec() codec.Codec { +// GetRingClient returns the ring client used by the application. +func (app *App) GetRingClient() crypto.RingClient { + return app.ringClient +} + +// GetKeyRing returns the keyring used by the application. +func (app *App) GetKeyRing() keyring.Keyring { + return app.keyRing +} + +// GetCodec returns the codec used by the application. +func (app *App) GetCodec() codec.Codec { return app.cdc } -// SdkCtx returns the context used by the application. -func (app *App) SdkCtx() sdk.Context { +// GetSdkCtx returns the context used by the application. +func (app *App) GetSdkCtx() *sdk.Context { return app.sdkCtx } -// Authority returns the authority address used by the application. -func (app *App) Authority() string { +// GetAuthority returns the authority address used by the application. +func (app *App) GetAuthority() string { return app.authority.String() } // QueryHelper returns the query helper used by the application that can be // used to submit queries to the application. func (app *App) QueryHelper() *baseapp.QueryServiceTestHelper { - app.queryHelper.Ctx = app.sdkCtx + app.queryHelper.Ctx = *app.sdkCtx return app.queryHelper } @@ -546,14 +620,14 @@ func (app *App) RunMsg(t *testing.T, msg sdk.Msg, option ...RunOption) *codectyp // If configured, finalize the block after the message is executed. if cfg.AutomaticFinalizeBlock { - height := app.LastBlockHeight() + 1 - _, err := app.FinalizeBlock(&cmtabcitypes.RequestFinalizeBlock{ - Height: height, + finalizedBlockResponse, err := app.FinalizeBlock(&cmtabcitypes.RequestFinalizeBlock{ + Height: app.LastBlockHeight() + 1, DecidedLastCommit: cmtabcitypes.CommitInfo{ Votes: []cmtabcitypes.VoteInfo{{}}, }, }) require.NoError(t, err, "failed to finalize block") + app.emitEvents(t, finalizedBlockResponse) } app.logger.Info("Running msg", "msg", msg.String()) @@ -561,7 +635,7 @@ func (app *App) RunMsg(t *testing.T, msg sdk.Msg, option ...RunOption) *codectyp handler := app.MsgServiceRouter().Handler(msg) require.NotNil(t, handler, "handler not found for message %s", sdk.MsgTypeURL(msg)) - msgResult, err := handler(app.sdkCtx, msg) + msgResult, err := handler(*app.sdkCtx, msg) require.NoError(t, err, "failed to execute message %s", sdk.MsgTypeURL(msg)) var response *codectypes.Any @@ -574,15 +648,36 @@ func (app *App) RunMsg(t *testing.T, msg sdk.Msg, option ...RunOption) *codectyp return response } +// NextBlocks calls NextBlock numBlocks times +func (app *App) NextBlocks(t *testing.T, numBlocks int) { + t.Helper() + + for i := 0; i < numBlocks; i++ { + app.NextBlock(t) + } +} + +// emitEvents emits the events from the finalized block to the event manager +// of the context in the active app. +func (app *App) emitEvents(t *testing.T, res *abci.ResponseFinalizeBlock) { + t.Helper() + for _, event := range res.Events { + testutilevents.QuoteEventMode(&event) + abciEvent := cosmostypes.Event(event) + app.sdkCtx.EventManager().EmitEvent(abciEvent) + } +} + // NextBlock commits and finalizes all existing transactions. It then updates // and advances the context of the App. func (app *App) NextBlock(t *testing.T) { t.Helper() - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{ + finalizedBlockResponse, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{ Height: app.sdkCtx.BlockHeight(), Time: app.sdkCtx.BlockTime()}) require.NoError(t, err) + app.emitEvents(t, finalizedBlockResponse) _, err = app.Commit() require.NoError(t, err) @@ -601,14 +696,17 @@ func (app *App) nextBlockUpdateCtx() { header.Time = prevCtx.BlockTime().Add(time.Duration(1) * time.Second) header.Height++ - app.sdkCtx = app.BaseApp.NewUncachedContext(true, header). - WithHeaderInfo(coreheader.Info{ - Height: header.Height, - // Hash: ?? // TODO_TECHDEBT: Do we have to set it here? If so, What should this be? - Time: header.Time, - ChainID: appName, - // AppHash: ?? // TODO_TECHDEBT: Do we have to set it here? If so, What should this be? - }) + headerInfo := coreheader.Info{ + ChainID: appName, + Height: header.Height, + Time: header.Time, + } + + newContext := app.BaseApp.NewUncachedContext(true, header). + WithBlockHeader(header). + WithHeaderInfo(headerInfo). + WithEventManager(prevCtx.EventManager()) + *app.sdkCtx = newContext } // CreateMultiStore is a helper for setting up multiple stores for provided modules. diff --git a/testutil/testclient/testsupplier/client.go b/testutil/testclient/testsupplier/client.go index b650891c0..0622e9065 100644 --- a/testutil/testclient/testsupplier/client.go +++ b/testutil/testclient/testsupplier/client.go @@ -5,11 +5,10 @@ import ( "testing" "cosmossdk.io/depinject" + cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - cosmostypes "github.com/cosmos/cosmos-sdk/types" - "github.com/pokt-network/poktroll/pkg/client" "github.com/pokt-network/poktroll/pkg/client/supplier" "github.com/pokt-network/poktroll/pkg/client/tx" diff --git a/testutil/testkeyring/keyring.go b/testutil/testkeyring/keyring.go index 39c2828c5..28b104a2d 100644 --- a/testutil/testkeyring/keyring.go +++ b/testutil/testkeyring/keyring.go @@ -6,10 +6,15 @@ import ( "fmt" "testing" + ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" + ringtypes "github.com/athanorlabs/go-dleq/types" + cosmoscrypto "github.com/cosmos/cosmos-sdk/crypto" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/types" + cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // CreatePreGeneratedKeyringAccounts uses the mnemonic from limit number of @@ -45,3 +50,24 @@ func CreatePreGeneratedKeyringAccounts( return accounts[:limit] } + +// GetSigningKeyFromAddress retrieves the signing key associated with the given +// bech32 address from the provided keyring. +func GetSigningKeyFromAddress(t *testing.T, bech32 string, keyRing keyring.Keyring) ringtypes.Scalar { + t.Helper() + + addr, err := cosmostypes.AccAddressFromBech32(bech32) + require.NoError(t, err) + + armorPrivKey, err := keyRing.ExportPrivKeyArmorByAddress(addr, "") + require.NoError(t, err) + + privKey, _, err := cosmoscrypto.UnarmorDecryptPrivKey(armorPrivKey, "") + require.NoError(t, err) + + curve := ring_secp256k1.NewCurve() + signingKey, err := curve.DecodeToScalar(privKey.Bytes()) + require.NoError(t, err) + + return signingKey +} diff --git a/testutil/testrelayer/relays.go b/testutil/testrelayer/relays.go index d16dada92..ff2dfb54f 100644 --- a/testutil/testrelayer/relays.go +++ b/testutil/testrelayer/relays.go @@ -1,40 +1,61 @@ package testrelayer import ( + "context" + "fmt" + "strings" "testing" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" "github.com/stretchr/testify/require" + "github.com/pokt-network/poktroll/pkg/crypto" "github.com/pokt-network/poktroll/pkg/relayer" + testutilkeyring "github.com/pokt-network/poktroll/testutil/testkeyring" servicetypes "github.com/pokt-network/poktroll/x/service/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" ) -// NewMinedRelay returns a new mined relay with the given session start and end -// heights on the session header, and the bytes and hash fields populated. -func NewMinedRelay( +// NewUnsignedMinedRelay returns a new mined relay with the given session data, +// as well as the bytes and the hash fields populated. +// +// It DOES NOT populate the signature fields and should only be used in contexts +// where a partial mined relay is enough for testing purposes. +// +// TODO_IMPROVE: It does not (yet) verify against and adhere to the actual +// relay mining difficulty of the service at hand. +// +// TODO_TECHDEBT(@bryanchriswhite): Move the pre-mind relays in 'pkg/relayer/miner/relay_fixtures_test.go' +// to 'testutil', making any necessary adjustments the utils or docs as well. +func NewUnsignedMinedRelay( t *testing.T, - sessionStartHeight int64, - sessionEndHeight int64, + session *sessiontypes.Session, supplierAddress string, ) *relayer.MinedRelay { + t.Helper() + relay := servicetypes.Relay{ Req: &servicetypes.RelayRequest{ Meta: servicetypes.RelayRequestMetadata{ - SessionHeader: &sessiontypes.SessionHeader{ - SessionStartBlockHeight: sessionStartHeight, - SessionEndBlockHeight: sessionEndHeight, - }, + SessionHeader: session.Header, SupplierAddress: supplierAddress, }, + Payload: []byte("request_payload"), + }, + Res: &servicetypes.RelayResponse{ + Meta: servicetypes.RelayResponseMetadata{ + SessionHeader: session.Header, + }, + Payload: []byte("response_payload"), }, - Res: &servicetypes.RelayResponse{}, } - // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. relayBz, err := relay.Marshal() require.NoError(t, err) + relayHashArr := servicetypes.GetHashFromBytes(relayBz) relayHash := relayHashArr[:] @@ -44,3 +65,172 @@ func NewMinedRelay( Hash: relayHash, } } + +// NewSignedMinedRelay returns a new "mined relay" with the given session data, +// as well as the bytes and the hash fields populated. +// +// IT DOES populate the signature fields and should only be used in contexts +// where a fully signed mined relay is needed for testing purposes. +// +// TODO_IMPROVE: It does not (yet) verify against and adhere to the actual +// relay mining difficulty of the service at hand. +// +// TODO_TECHDEBT(@bryanchriswhite): Move the pre-mind relays in 'pkg/relayer/miner/relay_fixtures_test.go' +// to 'testutil', making any necessary adjustments the utils or docs as well. +func NewSignedMinedRelay( + t *testing.T, + ctx context.Context, + session *sessiontypes.Session, + appAddr, supplierAddr, supplierKeyUid string, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) *relayer.MinedRelay { + t.Helper() + + relay := servicetypes.Relay{ + Req: &servicetypes.RelayRequest{ + Meta: servicetypes.RelayRequestMetadata{ + SessionHeader: session.Header, + SupplierAddress: supplierAddr, + }, + Payload: []byte("request_payload"), + }, + Res: &servicetypes.RelayResponse{ + Meta: servicetypes.RelayResponseMetadata{ + SessionHeader: session.Header, + }, + Payload: []byte("response_payload"), + }, + } + + SignRelayRequest(ctx, t, &relay, appAddr, keyRing, ringClient) + SignRelayResponse(ctx, t, &relay, supplierKeyUid, supplierAddr, keyRing) + + // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. + relayBz, err := relay.Marshal() + require.NoError(t, err) + + relayHashArr := servicetypes.GetHashFromBytes(relayBz) + relayHash := relayHashArr[:] + + return &relayer.MinedRelay{ + Relay: relay, + Bytes: relayBz, + Hash: relayHash, + } +} + +// TODO_TECHDEBT(@red-0ne): Centralize this logic in the relayer package. +// SignRelayRequest signs the relay request (updates relay.Req.Meta.Signature) +// on behalf of appAddr using the clients provided. +func SignRelayRequest( + ctx context.Context, + t *testing.T, + relay *servicetypes.Relay, + appAddr string, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) { + t.Helper() + + relayReqMeta := relay.GetReq().GetMeta() + sessionEndHeight := relayReqMeta.GetSessionHeader().GetSessionEndBlockHeight() + + // Retrieve the signing ring associated with the application address at the session end height. + appRing, err := ringClient.GetRingForAddressAtHeight(ctx, appAddr, sessionEndHeight) + require.NoError(t, err) + + // Retrieve the signing key associated with the application address. + signingKey := testutilkeyring.GetSigningKeyFromAddress(t, + appAddr, + keyRing, + ) + + // Retrieve the signable bytes for the relay request. + relayReqSignableBz, err := relay.GetReq().GetSignableBytesHash() + require.NoError(t, err) + + // Sign the relay request. + signature, err := appRing.Sign(relayReqSignableBz, signingKey) + require.NoError(t, err) + + // Serialize the signature. + signatureBz, err := signature.Serialize() + require.NoError(t, err) + + // Update the relay request signature. + relay.Req.Meta.Signature = signatureBz +} + +// TODO_TECHDEBT(@red-0ne): Centralize this logic in the relayer package. +// in the relayer package? +// SignRelayResponse signs the relay response (updates relay.Res.Meta.SupplierSignature) +// on behalf of supplierAddr using the clients provided. +func SignRelayResponse( + _ context.Context, + t *testing.T, + relay *servicetypes.Relay, + supplierKeyUid, supplierAddr string, + keyRing keyring.Keyring, +) { + t.Helper() + + // Retrieve ths signable bytes for the relay response. + relayResSignableBz, err := relay.GetRes().GetSignableBytesHash() + require.NoError(t, err) + + // Sign the relay response. + signatureBz, signerPubKey, err := keyRing.Sign(supplierKeyUid, relayResSignableBz[:], signingtypes.SignMode_SIGN_MODE_DIRECT) + require.NoError(t, err) + + // Verify the signer address matches the expected supplier address. + addr, err := cosmostypes.AccAddressFromBech32(supplierAddr) + require.NoError(t, err) + addrHexBz := strings.ToUpper(fmt.Sprintf("%x", addr.Bytes())) + require.Equal(t, addrHexBz, signerPubKey.Address().String()) + + // Update the relay response signature. + relay.Res.Meta.SupplierSignature = signatureBz +} + +// NewSignedEmptyRelay creates a new relay structure for the given req & res headers. +// It signs the relay request on behalf of application in the reqHeader. +// It signs the relay response on behalf of supplier provided.. +func NewSignedEmptyRelay( + ctx context.Context, + t *testing.T, + supplierKeyUid, supplierAddr string, + reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) *servicetypes.Relay { + t.Helper() + + relay := NewEmptyRelay(reqHeader, resHeader, supplierAddr) + SignRelayRequest(ctx, t, relay, reqHeader.GetApplicationAddress(), keyRing, ringClient) + SignRelayResponse(ctx, t, relay, supplierKeyUid, supplierAddr, keyRing) + + return relay +} + +// NewEmptyRelay creates a new relay structure for the given req & res headers +// WITHOUT any payload or signatures. +func NewEmptyRelay(reqHeader, resHeader *sessiontypes.SessionHeader, supplierAddr string) *servicetypes.Relay { + return &servicetypes.Relay{ + Req: &servicetypes.RelayRequest{ + Meta: servicetypes.RelayRequestMetadata{ + SessionHeader: reqHeader, + Signature: nil, // Signature added elsewhere. + SupplierAddress: supplierAddr, + }, + Payload: nil, + }, + Res: &servicetypes.RelayResponse{ + Meta: servicetypes.RelayResponseMetadata{ + SessionHeader: resHeader, + SupplierSignature: nil, // Signature added elsewhere. + }, + Payload: nil, + }, + } +} diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index 8cccc80dd..c13abf4fb 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -469,12 +469,9 @@ func (k msgServer) validateClosestPath( } blockHash := k.sessionKeeper.GetBlockHash(ctx, sessionGracePeriodEndHeight) - // TODO_BETA: Investigate "proof for the path provided does not match one expected by the on-chain protocol" - // error that may occur due to block height differing from the off-chain part. - fmt.Println("E2E_DEBUG: height for block hash when verifying the proof", sessionGracePeriodEndHeight, sessionHeader.GetSessionId()) - expectedProofPath := GetPathForProof(blockHash, sessionHeader.GetSessionId()) if !bytes.Equal(proof.Path, expectedProofPath) { + fmt.Println("TODO_BETA: Investigate 'ErrProofInvalidProof' may occur due to block height differing from the off-chain part when height for block hash.") return types.ErrProofInvalidProof.Wrapf( "the proof for the path provided (%x) does not match one expected by the on-chain protocol (%x)", proof.Path, diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 52edfd0f2..547d3d87a 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -2,18 +2,13 @@ package keeper_test import ( "context" - "fmt" "os" - "strings" "testing" "cosmossdk.io/depinject" ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" - ringtypes "github.com/athanorlabs/go-dleq/types" - cosmoscrypto "github.com/cosmos/cosmos-sdk/crypto" "github.com/cosmos/cosmos-sdk/crypto/keyring" cosmostypes "github.com/cosmos/cosmos-sdk/types" - signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" "github.com/pokt-network/ring-go" "github.com/pokt-network/smt" "github.com/stretchr/testify/require" @@ -28,6 +23,7 @@ import ( "github.com/pokt-network/poktroll/pkg/relayer/session" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testrelayer" "github.com/pokt-network/poktroll/x/proof/keeper" "github.com/pokt-network/poktroll/x/proof/types" servicetypes "github.com/pokt-network/poktroll/x/service/types" @@ -499,11 +495,11 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Construct a relay to be mangled such that it fails to deserialize in order // to set the error expectation for the relevant test case. - mangledRelay := newEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + mangledRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) // Ensure valid relay request and response signatures. - signRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) - signRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) + testrelayer.SignRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) + testrelayer.SignRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) // Serialize the relay so that it can be mangled. mangledRelayBz, err := mangledRelay.Marshal() @@ -795,11 +791,11 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { desc: "relay request signature must be valid", newProofMsg: func(t *testing.T) *types.MsgSubmitProof { // Set the relay request signature to an invalid byte slice. - invalidRequestSignatureRelay := newEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + invalidRequestSignatureRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) invalidRequestSignatureRelay.Req.Meta.Signature = invalidSignatureBz // Ensure a valid relay response signature. - signRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) + testrelayer.SignRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) invalidRequestSignatureRelayBz, err := invalidRequestSignatureRelay.Marshal() require.NoError(t, err) @@ -857,11 +853,11 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { desc: "relay response signature must be valid", newProofMsg: func(t *testing.T) *types.MsgSubmitProof { // Set the relay response signature to an invalid byte slice. - relay := newEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + relay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) relay.Res.Meta.SupplierSignature = invalidSignatureBz // Ensure a valid relay request signature - signRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) + testrelayer.SignRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) relayBz, err := relay.Marshal() require.NoError(t, err) @@ -1203,7 +1199,7 @@ func fillSessionTree( t.Helper() for i := 0; i < int(numRelays); i++ { - relay := newSignedEmptyRelay( + relay := testrelayer.NewSignedEmptyRelay( ctx, t, supplierKeyUid, supplierAddr, reqHeader, resHeader, @@ -1322,142 +1318,6 @@ func getClosestRelayDifficultyBits( return uint64(relayDifficultyBits) } -// newSignedEmptyRelay creates a new relay structure for the given req & res headers. -// It signs the relay request on behalf of application in the reqHeader. -// It signs the relay response on behalf of supplier provided.. -func newSignedEmptyRelay( - ctx context.Context, - t *testing.T, - supplierKeyUid, supplierAddr string, - reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) *servicetypes.Relay { - t.Helper() - - relay := newEmptyRelay(reqHeader, resHeader, supplierAddr) - signRelayRequest(ctx, t, relay, reqHeader.GetApplicationAddress(), keyRing, ringClient) - signRelayResponse(ctx, t, relay, supplierKeyUid, supplierAddr, keyRing) - - return relay -} - -// newEmptyRelay creates a new relay structure for the given req & res headers -// WITHOUT any payload or signatures. -func newEmptyRelay(reqHeader, resHeader *sessiontypes.SessionHeader, supplierAddr string) *servicetypes.Relay { - return &servicetypes.Relay{ - Req: &servicetypes.RelayRequest{ - Meta: servicetypes.RelayRequestMetadata{ - SessionHeader: reqHeader, - Signature: nil, // Signature added elsewhere. - SupplierAddress: supplierAddr, - }, - Payload: nil, - }, - Res: &servicetypes.RelayResponse{ - Meta: servicetypes.RelayResponseMetadata{ - SessionHeader: resHeader, - SupplierSignature: nil, // Signature added elsewhere. - }, - Payload: nil, - }, - } -} - -// TODO_TECHDEBT(@red-0ne): Centralize this logic in the relayer package. -// signRelayRequest signs the relay request (updates relay.Req.Meta.Signature) -// on behalf of appAddr using the clients provided. -func signRelayRequest( - ctx context.Context, - t *testing.T, - relay *servicetypes.Relay, - appAddr string, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) { - t.Helper() - - relayReqMeta := relay.GetReq().GetMeta() - sessionEndHeight := relayReqMeta.GetSessionHeader().GetSessionEndBlockHeight() - - // Retrieve the signing ring associated with the application address at the session end height. - appRing, err := ringClient.GetRingForAddressAtHeight(ctx, appAddr, sessionEndHeight) - require.NoError(t, err) - - // Retrieve the signing key associated with the application address. - signingKey := getSigningKeyFromAddress(t, - appAddr, - keyRing, - ) - - // Retrieve the signable bytes for the relay request. - relayReqSignableBz, err := relay.GetReq().GetSignableBytesHash() - require.NoError(t, err) - - // Sign the relay request. - signature, err := appRing.Sign(relayReqSignableBz, signingKey) - require.NoError(t, err) - - // Serialize the signature. - signatureBz, err := signature.Serialize() - require.NoError(t, err) - - // Update the relay request signature. - relay.Req.Meta.Signature = signatureBz -} - -// TODO_TECHDEBT(@red-0ne): Centralize this logic in the relayer package. -// in the relayer package? -// signRelayResponse signs the relay response (updates relay.Res.Meta.SupplierSignature) -// on behalf of supplierAddr using the clients provided. -func signRelayResponse( - _ context.Context, - t *testing.T, - relay *servicetypes.Relay, - supplierKeyUid, supplierAddr string, - keyRing keyring.Keyring, -) { - t.Helper() - - // Retrieve ths signable bytes for the relay response. - relayResSignableBz, err := relay.GetRes().GetSignableBytesHash() - require.NoError(t, err) - - // Sign the relay response. - signatureBz, signerPubKey, err := keyRing.Sign(supplierKeyUid, relayResSignableBz[:], signingtypes.SignMode_SIGN_MODE_DIRECT) - require.NoError(t, err) - - // Verify the signer address matches the expected supplier address. - addr, err := cosmostypes.AccAddressFromBech32(supplierAddr) - require.NoError(t, err) - addrHexBz := strings.ToUpper(fmt.Sprintf("%x", addr.Bytes())) - require.Equal(t, addrHexBz, signerPubKey.Address().String()) - - // Update the relay response signature. - relay.Res.Meta.SupplierSignature = signatureBz -} - -// getSigningKeyFromAddress retrieves the signing key associated with the given -// bech32 address from the provided keyring. -func getSigningKeyFromAddress(t *testing.T, bech32 string, keyRing keyring.Keyring) ringtypes.Scalar { - t.Helper() - - addr, err := cosmostypes.AccAddressFromBech32(bech32) - require.NoError(t, err) - - armorPrivKey, err := keyRing.ExportPrivKeyArmorByAddress(addr, "") - require.NoError(t, err) - - privKey, _, err := cosmoscrypto.UnarmorDecryptPrivKey(armorPrivKey, "") - require.NoError(t, err) - - curve := ring_secp256k1.NewCurve() - signingKey, err := curve.DecodeToScalar(privKey.Bytes()) - require.NoError(t, err) - - return signingKey -} - // resetBlockHeightFn returns a function that resets the block height of the // given context to one; the first valid session block height. func resetBlockHeightFn(ctx *context.Context) func() { diff --git a/x/session/module/query_get_session_test.go b/x/session/module/query_get_session_test.go index 8c695432c..36ae2fd1a 100644 --- a/x/session/module/query_get_session_test.go +++ b/x/session/module/query_get_session_test.go @@ -179,7 +179,7 @@ func TestCLI_GetSession(t *testing.T) { require.Contains(t, stat.Message(), test.expectedErr.Error()) return } - require.NoError(t, err) + require.NoError(t, err, "TODO_FLAKY: Try re-running with 'go test -v -count=1 -run TestCLI_GetSession/valid_-_block_height_specified_and_is_greater_than_zero ./x/session/module/...'") var getSessionRes sessiontypes.QueryGetSessionResponse err = net.Config.Codec.UnmarshalJSON(getSessionOut.Bytes(), &getSessionRes) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 1b2d2cc69..acb550b38 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -6,14 +6,13 @@ import ( "time" "cosmossdk.io/math" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cosmos/cosmos-sdk/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" + testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" testutilproof "github.com/pokt-network/poktroll/testutil/proof" "github.com/pokt-network/poktroll/testutil/sample" @@ -123,10 +122,8 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingBeforeSettlement() { numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) - // Check that no claims were settled. + // Check that no claims were settled or expired. require.Equal(t, uint64(0), numClaimsSettled) - - // Validate that no claims expired. require.Equal(t, uint64(0), numClaimsExpired) // Validate that one claim still remains. @@ -146,10 +143,12 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingBeforeSettlement() { // Expectations: Claims should not be settled because the proof window hasn't closed yet. sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err = s.keepers.SettlePendingClaims(sdkCtx) - // Check that no claims were settled require.NoError(t, err) + + // Check that no claims were settled or expired. require.Equal(t, uint64(0), numClaimsSettled) require.Equal(t, uint64(0), numClaimsExpired) + // Validate that the claim still exists claims = s.keepers.GetAllClaims(ctx) require.Len(t, claims, 1) @@ -170,18 +169,18 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv claims := s.keepers.GetAllClaims(ctx) s.Require().Len(claims, 1) - // 1. Settle pending claims after proof window closes + // Settle pending claims after proof window closes // Expectation: All (1) claims should be expired. // NB: proofs should be rejected when the current height equals the proof window close height. - blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, claim.SessionHeader.SessionEndBlockHeight) + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) // Check that no claims were settled. require.Equal(t, uint64(0), numClaimsSettled) - - // Validate that one claims expired + // Validate that exactly one claims expired require.Equal(t, uint64(1), numClaimsExpired) // Validate that no claims remain. @@ -191,9 +190,12 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() require.Len(t, events, 5) // minting, burning, settling, etc.. - // Validate the expiration event - expectedEvent, ok := s.getClaimEvent(events, "poktroll.tokenomics.EventClaimExpired").(*tokenomicstypes.EventClaimExpired) - require.True(t, ok) + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, + events, "poktroll.tokenomics.EventClaimExpired") + require.Len(t, expectedEvents, 1) + + // Validate the event + expectedEvent := expectedEvents[0] require.Equal(t, s.expectedComputeUnits, expectedEvent.ComputeUnits) } @@ -207,7 +209,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide // Create a claim that requires a proof claim := s.claim - // 0. Add the claim & verify it exists + // Add the claim & verify it exists s.keepers.UpsertClaim(ctx, claim) claims := s.keepers.GetAllClaims(ctx) s.Require().Len(claims, 1) @@ -215,10 +217,11 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide // Upsert the proof s.keepers.UpsertProof(ctx, s.proof) - // 1. Settle pending claims after proof window closes + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. - blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, claim.SessionHeader.SessionEndBlockHeight) + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -235,8 +238,12 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide // Confirm an settlement event was emitted events := sdkCtx.EventManager().Events() - expectedEvent, ok := s.getClaimEvent(events, "poktroll.tokenomics.EventClaimSettled").(*tokenomicstypes.EventClaimSettled) - require.True(t, ok) + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, + events, "poktroll.tokenomics.EventClaimSettled") + require.Len(t, expectedEvents, 1) + + // Validate the event + expectedEvent := expectedEvents[0] require.True(t, expectedEvent.ProofRequired) require.Equal(t, s.expectedComputeUnits, expectedEvent.ComputeUnits) } @@ -270,17 +277,17 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // Upsert the proof s.keepers.UpsertProof(ctx, s.proof) - // 1. Settle pending claims after proof window closes + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proof window has definitely closed at this point - blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, claim.SessionHeader.SessionEndBlockHeight) + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) // Check that one claim was settled. require.Equal(t, uint64(1), numClaimsSettled) - // Validate that no claims expired. require.Equal(t, uint64(0), numClaimsExpired) @@ -290,8 +297,10 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // Confirm an settlement event was emitted events := sdkCtx.EventManager().Events() - expectedEvent, ok := s.getClaimEvent(events, "poktroll.tokenomics.EventClaimSettled").(*tokenomicstypes.EventClaimSettled) - require.True(t, ok) + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, + events, "poktroll.tokenomics.EventClaimSettled") + require.Len(t, expectedEvents, 1) + expectedEvent := expectedEvents[0] require.True(t, expectedEvent.ProofRequired) require.Equal(t, s.expectedComputeUnits, expectedEvent.ComputeUnits) } @@ -316,22 +325,22 @@ func (s *TestSuite) TestSettlePendingClaims_Settles_WhenAProofIsNotRequired() { }) require.NoError(t, err) - // 0. Add the claim & verify it exists + // Add the claim & verify it exists s.keepers.UpsertClaim(ctx, claim) claims := s.keepers.GetAllClaims(ctx) s.Require().Len(claims, 1) - // 1. Settle pending claims after proof window closes + // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. - blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, claim.SessionHeader.SessionEndBlockHeight) + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) // Check that one claim was settled. require.Equal(t, uint64(1), numClaimsSettled) - // Validate that no claims expired. require.Equal(t, uint64(0), numClaimsExpired) @@ -341,8 +350,12 @@ func (s *TestSuite) TestSettlePendingClaims_Settles_WhenAProofIsNotRequired() { // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() - expectedEvent, ok := s.getClaimEvent(events, "poktroll.tokenomics.EventClaimSettled").(*tokenomicstypes.EventClaimSettled) - require.True(t, ok) + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, + events, "poktroll.tokenomics.EventClaimSettled") + require.Len(t, expectedEvents, 1) + + // Validate the event + expectedEvent := expectedEvents[0] require.False(t, expectedEvent.ProofRequired) require.Equal(t, s.expectedComputeUnits, expectedEvent.ComputeUnits) } @@ -363,30 +376,6 @@ func (s *TestSuite) TestSettlePendingClaims_MultipleClaimsSettle_WithMultipleApp s.T().Skip("TODO_TEST: Implement that multiple claims settle at once when different sessions have overlapping applications and suppliers") } -// getClaimEvent verifies that there is exactly one event of type protoType in -// the given events and returns it. If there are 0 or more than 1 events of the -// given type, it fails the test. -func (s *TestSuite) getClaimEvent(events cosmostypes.Events, protoType string) proto.Message { - var parsedEvent proto.Message - numExpectedEvents := 0 - for _, event := range events { - switch event.Type { - case protoType: - var err error - parsedEvent, err = cosmostypes.ParseTypedEvent(abci.Event(event)) - s.Require().NoError(err) - numExpectedEvents++ - default: - continue - } - } - if numExpectedEvents == 1 { - return parsedEvent - } - require.NotEqual(s.T(), 1, numExpectedEvents, "Expected exactly one claim event") - return nil -} - func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { // Retrieve default values t := s.T() @@ -408,7 +397,8 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { sessionOneClaim := s.claim s.keepers.UpsertClaim(ctx, sessionOneClaim) - sessionOneStartHeight := sessionOneClaim.GetSessionHeader().GetSessionEndBlockHeight() + sessionOneEndHeight := sessionOneClaim.GetSessionHeader().GetSessionEndBlockHeight() + // Add a second claim with a session header corresponding to the next session. sessionTwoClaim := testutilproof.BaseClaim( sessionOneClaim.GetSessionHeader().GetApplicationAddress(), @@ -416,7 +406,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { s.expectedComputeUnits, ) - sessionOneProofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionOneStartHeight) + sessionOneProofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionOneEndHeight) sessionTwoStartHeight := shared.GetSessionStartHeight(&sharedParams, sessionOneProofWindowCloseHeight+1) sessionTwoProofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionTwoStartHeight) @@ -434,7 +424,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { // 1. Settle pending claims while the session is still active. // Expectations: No claims should be settled because the session is still ongoing - blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionOneStartHeight) + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionOneEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -456,10 +446,12 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { // Expectations: Claims should not be settled because the proof window hasn't closed yet. sdkCtx = sdkCtx.WithBlockHeight(blockHeight) numClaimsSettled, numClaimsExpired, _, _, err = s.keepers.SettlePendingClaims(sdkCtx) - // Check that no claims were settled require.NoError(t, err) + + // Check that no claims were settled or expired. require.Equal(t, uint64(0), numClaimsSettled) require.Equal(t, uint64(0), numClaimsExpired) + // Validate that the claim still exists claims = s.keepers.GetAllClaims(ctx) require.Len(t, claims, 1) diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 1224e72fd..aa166533f 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -3,6 +3,7 @@ package keeper import ( "bytes" "context" + "encoding/hex" "fmt" "math" @@ -13,6 +14,10 @@ import ( "github.com/pokt-network/poktroll/x/tokenomics/types" ) +// TODO_UPNET(@Olshansk, #542): Add telemetry that will enable: +// 1. Visualizing a multi-line chart of "Relays EMA per Service" (title) of "Relay EMA" (y-axis) vs block/time (x-axis) and being able to select each service. +// 1. Visualizing a multi-line chart of "Relay Mining Difficulty per service" (title) of "Relay EMA" (y-axis) vs block/time (x-axis) and being able to select each service. + const ( // Exponential moving average (ema) smoothing factor, commonly known as alpha. // Usually, alpha = 2 / (N+1), where N is the number of periods. @@ -72,17 +77,31 @@ func (k Keeper) UpdateRelayMiningDifficulty( } k.SetRelayMiningDifficulty(ctx, newDifficulty) - // Output the appropriate log message based on whether the difficulty was - // initialized, updated or unchanged. - if !found { - logger.Info(fmt.Sprintf("Initialized RelayMiningDifficulty for service %s at height %d with difficulty %x", serviceId, sdkCtx.BlockHeight(), newDifficulty.TargetHash)) - continue - } else if !bytes.Equal(prevDifficulty.TargetHash, newDifficulty.TargetHash) { - // TODO_BLOCKER(@Olshansk, #542): Emit an event for the updated difficulty. - logger.Info(fmt.Sprintf("Updated RelayMiningDifficulty for service %s at height %d from %x to %x", serviceId, sdkCtx.BlockHeight(), prevDifficulty.TargetHash, newDifficulty.TargetHash)) - } else { - logger.Info(fmt.Sprintf("No change in RelayMiningDifficulty for service %s at height %d. Current difficulty: %x", serviceId, sdkCtx.BlockHeight(), newDifficulty.TargetHash)) + // Emit an event for the updated relay mining difficulty regardless of + // whether the difficulty changed or not. + + relayMiningDifficultyUpdateEvent := types.EventRelayMiningDifficultyUpdated{ + ServiceId: serviceId, + PrevTargetHashHexEncoded: hex.EncodeToString(prevDifficulty.TargetHash), + NewTargetHashHexEncoded: hex.EncodeToString(newDifficulty.TargetHash), + PrevNumRelaysEma: prevDifficulty.NumRelaysEma, + NewNumRelaysEma: newDifficulty.NumRelaysEma, + } + if err := sdkCtx.EventManager().EmitTypedEvent(&relayMiningDifficultyUpdateEvent); err != nil { + return err + } + + // Output the appropriate log message based on whether the difficulty was initialized, updated or unchanged. + var logMessage string + switch { + case !found: + logMessage = fmt.Sprintf("Initialized RelayMiningDifficulty for service %s at height %d with difficulty %x", serviceId, sdkCtx.BlockHeight(), newDifficulty.TargetHash) + case !bytes.Equal(prevDifficulty.TargetHash, newDifficulty.TargetHash): + logMessage = fmt.Sprintf("Updated RelayMiningDifficulty for service %s at height %d from %x to %x", serviceId, sdkCtx.BlockHeight(), prevDifficulty.TargetHash, newDifficulty.TargetHash) + default: + logMessage = fmt.Sprintf("No change in RelayMiningDifficulty for service %s at height %d. Current difficulty: %x", serviceId, sdkCtx.BlockHeight(), newDifficulty.TargetHash) } + logger.Info(logMessage) } return nil diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index 1c89e2b74..5e5a1b1e1 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -4,16 +4,23 @@ import ( "bytes" "testing" + cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" + testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/x/tokenomics/keeper" tokenomicskeeper "github.com/pokt-network/poktroll/x/tokenomics/keeper" "github.com/pokt-network/poktroll/x/tokenomics/types" + tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -func TestUpdateRelayMiningDifficulty_General(t *testing.T) { +// This is a "base" test for updating relay mining difficulty to go through +// a flow testing a few different scenarios, but does not cover the full range +// of edge or use cases. +func TestUpdateRelayMiningDifficulty_Base(t *testing.T) { keeper, ctx := keepertest.TokenomicsKeeper(t) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) // Introduce svc1 for the first time relaysPerServiceMap := map[string]uint64{ @@ -80,6 +87,12 @@ func TestUpdateRelayMiningDifficulty_General(t *testing.T) { difficultySvc31, found := keeper.GetRelayMiningDifficulty(ctx, "svc3") require.True(t, found) require.Equal(t, uint64(1e10), difficultySvc31.NumRelaysEma) + + // Confirm a relay mining difficulty update event was emitted + events := sdkCtx.EventManager().Events() + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventRelayMiningDifficultyUpdated](t, + events, "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") + require.Len(t, expectedEvents, 6) // 3 for svc1, 2 for svc2, 1 for svc3 } func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { diff --git a/x/tokenomics/types/event.pb.go b/x/tokenomics/types/event.pb.go index bbb44e37e..c50abed3d 100644 --- a/x/tokenomics/types/event.pb.go +++ b/x/tokenomics/types/event.pb.go @@ -140,32 +140,121 @@ func (m *EventClaimSettled) GetProofRequired() bool { return false } +// EventRelayMiningDifficultyUpdated is an event emitted whenever the relay mining difficulty is updated +// for a given service. +type EventRelayMiningDifficultyUpdated struct { + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + PrevTargetHashHexEncoded string `protobuf:"bytes,2,opt,name=prev_target_hash_hex_encoded,json=prevTargetHashHexEncoded,proto3" json:"prev_target_hash_hex_encoded,omitempty"` + NewTargetHashHexEncoded string `protobuf:"bytes,3,opt,name=new_target_hash_hex_encoded,json=newTargetHashHexEncoded,proto3" json:"new_target_hash_hex_encoded,omitempty"` + PrevNumRelaysEma uint64 `protobuf:"varint,4,opt,name=prev_num_relays_ema,json=prevNumRelaysEma,proto3" json:"prev_num_relays_ema,omitempty"` + NewNumRelaysEma uint64 `protobuf:"varint,5,opt,name=new_num_relays_ema,json=newNumRelaysEma,proto3" json:"new_num_relays_ema,omitempty"` +} + +func (m *EventRelayMiningDifficultyUpdated) Reset() { *m = EventRelayMiningDifficultyUpdated{} } +func (m *EventRelayMiningDifficultyUpdated) String() string { return proto.CompactTextString(m) } +func (*EventRelayMiningDifficultyUpdated) ProtoMessage() {} +func (*EventRelayMiningDifficultyUpdated) Descriptor() ([]byte, []int) { + return fileDescriptor_a78874bbf91a58c7, []int{2} +} +func (m *EventRelayMiningDifficultyUpdated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventRelayMiningDifficultyUpdated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventRelayMiningDifficultyUpdated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventRelayMiningDifficultyUpdated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventRelayMiningDifficultyUpdated.Merge(m, src) +} +func (m *EventRelayMiningDifficultyUpdated) XXX_Size() int { + return m.Size() +} +func (m *EventRelayMiningDifficultyUpdated) XXX_DiscardUnknown() { + xxx_messageInfo_EventRelayMiningDifficultyUpdated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventRelayMiningDifficultyUpdated proto.InternalMessageInfo + +func (m *EventRelayMiningDifficultyUpdated) GetServiceId() string { + if m != nil { + return m.ServiceId + } + return "" +} + +func (m *EventRelayMiningDifficultyUpdated) GetPrevTargetHashHexEncoded() string { + if m != nil { + return m.PrevTargetHashHexEncoded + } + return "" +} + +func (m *EventRelayMiningDifficultyUpdated) GetNewTargetHashHexEncoded() string { + if m != nil { + return m.NewTargetHashHexEncoded + } + return "" +} + +func (m *EventRelayMiningDifficultyUpdated) GetPrevNumRelaysEma() uint64 { + if m != nil { + return m.PrevNumRelaysEma + } + return 0 +} + +func (m *EventRelayMiningDifficultyUpdated) GetNewNumRelaysEma() uint64 { + if m != nil { + return m.NewNumRelaysEma + } + return 0 +} + func init() { proto.RegisterType((*EventClaimExpired)(nil), "poktroll.tokenomics.EventClaimExpired") proto.RegisterType((*EventClaimSettled)(nil), "poktroll.tokenomics.EventClaimSettled") + proto.RegisterType((*EventRelayMiningDifficultyUpdated)(nil), "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") } func init() { proto.RegisterFile("poktroll/tokenomics/event.proto", fileDescriptor_a78874bbf91a58c7) } var fileDescriptor_a78874bbf91a58c7 = []byte{ - // 260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0xc8, 0xcf, 0x2e, - 0x29, 0xca, 0xcf, 0xc9, 0xd1, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0xcb, 0xcf, 0xcd, 0x4c, 0x2e, 0xd6, - 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0x29, 0xd0, - 0x43, 0x28, 0x90, 0x92, 0x82, 0xeb, 0x2a, 0x28, 0xca, 0xcf, 0x4f, 0xd3, 0x4f, 0xce, 0x49, 0xcc, - 0xcc, 0x85, 0x68, 0x50, 0x4a, 0xe5, 0x12, 0x74, 0x05, 0xe9, 0x77, 0x06, 0x89, 0xb9, 0x56, 0x14, - 0x64, 0x16, 0xa5, 0xa6, 0x08, 0x69, 0x73, 0xb1, 0x82, 0xd5, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, - 0x1b, 0x89, 0xea, 0xc1, 0x4d, 0x05, 0x1b, 0xa0, 0x07, 0x56, 0x1c, 0x04, 0x51, 0x23, 0xa4, 0xcc, - 0xc5, 0x9b, 0x9c, 0x9f, 0x5b, 0x50, 0x5a, 0x92, 0x1a, 0x5f, 0x9a, 0x97, 0x59, 0x52, 0x2c, 0xc1, - 0xa4, 0xc0, 0xa8, 0xc1, 0x12, 0xc4, 0x03, 0x15, 0x0c, 0x05, 0x89, 0x29, 0xf5, 0x30, 0x22, 0xdb, - 0x13, 0x9c, 0x5a, 0x52, 0x92, 0x43, 0x0b, 0x7b, 0x84, 0x54, 0xb9, 0xf8, 0xc0, 0x5a, 0xe3, 0x8b, - 0x52, 0x0b, 0x4b, 0x41, 0x7e, 0x91, 0x60, 0x56, 0x60, 0xd4, 0xe0, 0x08, 0xe2, 0x05, 0x8b, 0x06, - 0x41, 0x05, 0x9d, 0x7c, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, - 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x38, - 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0xe4, 0x1a, 0xdd, 0xbc, 0xd4, - 0x92, 0xf2, 0xfc, 0xa2, 0x6c, 0x7d, 0x78, 0x18, 0x56, 0x20, 0x87, 0x7d, 0x49, 0x65, 0x41, 0x6a, - 0x71, 0x12, 0x1b, 0x38, 0x2c, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x16, 0xe1, 0xcd, - 0x9f, 0x01, 0x00, 0x00, + // 420 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcb, 0x8e, 0xd3, 0x30, + 0x14, 0x86, 0xeb, 0xb9, 0x20, 0x6a, 0x18, 0x2e, 0x1e, 0x21, 0xa2, 0x01, 0x42, 0x29, 0x42, 0xaa, + 0x34, 0x9a, 0x44, 0x62, 0xb6, 0x88, 0x05, 0x10, 0x69, 0x58, 0x0c, 0x8b, 0xc0, 0x6c, 0xd8, 0x58, + 0x9e, 0xe4, 0xb4, 0xb1, 0x1a, 0x5f, 0xb0, 0x9d, 0x26, 0x7d, 0x07, 0x16, 0x3c, 0x00, 0x0f, 0xc4, + 0x72, 0x96, 0x2c, 0x51, 0xfb, 0x22, 0xc8, 0x6e, 0x29, 0x15, 0x82, 0x25, 0xdb, 0xff, 0x7c, 0xff, + 0xf9, 0xcf, 0xb1, 0x0f, 0x7e, 0xac, 0xd5, 0xd4, 0x19, 0x55, 0xd7, 0xa9, 0x53, 0x53, 0x90, 0x4a, + 0xf0, 0xc2, 0xa6, 0x30, 0x03, 0xe9, 0x12, 0x6d, 0x94, 0x53, 0xe4, 0xf0, 0x17, 0x90, 0xfc, 0x06, + 0x8e, 0x8e, 0x36, 0x2e, 0x6d, 0x94, 0x1a, 0xa7, 0x45, 0xcd, 0xb8, 0x58, 0x19, 0x86, 0x80, 0xef, + 0x66, 0xde, 0xff, 0xda, 0x6b, 0x59, 0xa7, 0xb9, 0x81, 0x92, 0x1c, 0xe3, 0xfd, 0xc0, 0x44, 0x68, + 0x80, 0x46, 0x37, 0x9e, 0xdf, 0x4b, 0x36, 0x5d, 0x43, 0x83, 0x24, 0xc0, 0xf9, 0x8a, 0x21, 0x4f, + 0xf1, 0x41, 0xa1, 0x84, 0x6e, 0x1c, 0xd0, 0x46, 0x72, 0x67, 0xa3, 0x9d, 0x01, 0x1a, 0xed, 0xe5, + 0x37, 0xd7, 0xe2, 0x85, 0xd7, 0x86, 0x9f, 0xd1, 0x76, 0xce, 0x7b, 0x70, 0xae, 0xfe, 0x1f, 0x39, + 0xe4, 0x19, 0xbe, 0x15, 0xac, 0xd4, 0xc0, 0xa7, 0xc6, 0xef, 0x12, 0xed, 0x0e, 0xd0, 0xe8, 0x7a, + 0x7e, 0x10, 0xd4, 0x7c, 0x2d, 0x0e, 0xbf, 0xee, 0xe0, 0x27, 0x61, 0x9c, 0x1c, 0x6a, 0x36, 0x3f, + 0xe7, 0x92, 0xcb, 0xc9, 0x1b, 0x3e, 0x1e, 0xf3, 0xa2, 0xa9, 0xdd, 0xfc, 0x42, 0x97, 0xcc, 0x41, + 0x49, 0x1e, 0x61, 0x6c, 0xc1, 0xcc, 0x78, 0x01, 0x94, 0x97, 0x61, 0xc6, 0x7e, 0xde, 0x5f, 0x2b, + 0x6f, 0x4b, 0xf2, 0x12, 0x3f, 0xd4, 0x06, 0x66, 0xd4, 0x31, 0x33, 0x01, 0x47, 0x2b, 0x66, 0x2b, + 0x5a, 0x41, 0x47, 0x41, 0x16, 0xaa, 0x84, 0x32, 0xcc, 0xd7, 0xcf, 0x23, 0xcf, 0x7c, 0x08, 0xc8, + 0x19, 0xb3, 0xd5, 0x19, 0x74, 0xd9, 0xaa, 0x4e, 0x5e, 0xe0, 0x07, 0x12, 0xda, 0x7f, 0xda, 0x77, + 0x83, 0xfd, 0xbe, 0x84, 0xf6, 0xaf, 0xee, 0x13, 0x7c, 0x18, 0xd2, 0x65, 0x23, 0xa8, 0xf1, 0x5b, + 0x58, 0x0a, 0x82, 0x45, 0x7b, 0xe1, 0x51, 0xee, 0xf8, 0xd2, 0xbb, 0x46, 0x84, 0xf5, 0x6c, 0x26, + 0x18, 0x39, 0xc6, 0xc4, 0x87, 0xfd, 0x41, 0xef, 0x07, 0xfa, 0xb6, 0x84, 0x76, 0x1b, 0x7e, 0x75, + 0xfe, 0x6d, 0x11, 0xa3, 0xab, 0x45, 0x8c, 0x7e, 0x2c, 0x62, 0xf4, 0x65, 0x19, 0xf7, 0xae, 0x96, + 0x71, 0xef, 0xfb, 0x32, 0xee, 0x7d, 0x3c, 0x9d, 0x70, 0x57, 0x35, 0x97, 0x49, 0xa1, 0x44, 0xea, + 0x3f, 0xeb, 0x44, 0x82, 0x6b, 0x95, 0x99, 0xa6, 0x9b, 0x13, 0xeb, 0xb6, 0x4f, 0xd3, 0xcd, 0x35, + 0xd8, 0xcb, 0x6b, 0xe1, 0xd4, 0x4e, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xee, 0x9a, 0x02, 0xb4, + 0xbe, 0x02, 0x00, 0x00, } func (m *EventClaimExpired) Marshal() (dAtA []byte, err error) { @@ -258,6 +347,60 @@ func (m *EventClaimSettled) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *EventRelayMiningDifficultyUpdated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventRelayMiningDifficultyUpdated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventRelayMiningDifficultyUpdated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NewNumRelaysEma != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.NewNumRelaysEma)) + i-- + dAtA[i] = 0x28 + } + if m.PrevNumRelaysEma != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.PrevNumRelaysEma)) + i-- + dAtA[i] = 0x20 + } + if len(m.NewTargetHashHexEncoded) > 0 { + i -= len(m.NewTargetHashHexEncoded) + copy(dAtA[i:], m.NewTargetHashHexEncoded) + i = encodeVarintEvent(dAtA, i, uint64(len(m.NewTargetHashHexEncoded))) + i-- + dAtA[i] = 0x1a + } + if len(m.PrevTargetHashHexEncoded) > 0 { + i -= len(m.PrevTargetHashHexEncoded) + copy(dAtA[i:], m.PrevTargetHashHexEncoded) + i = encodeVarintEvent(dAtA, i, uint64(len(m.PrevTargetHashHexEncoded))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceId) > 0 { + i -= len(m.ServiceId) + copy(dAtA[i:], m.ServiceId) + i = encodeVarintEvent(dAtA, i, uint64(len(m.ServiceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { offset -= sovEvent(v) base := offset @@ -304,6 +447,33 @@ func (m *EventClaimSettled) Size() (n int) { return n } +func (m *EventRelayMiningDifficultyUpdated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceId) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + l = len(m.PrevTargetHashHexEncoded) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + l = len(m.NewTargetHashHexEncoded) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + if m.PrevNumRelaysEma != 0 { + n += 1 + sovEvent(uint64(m.PrevNumRelaysEma)) + } + if m.NewNumRelaysEma != 0 { + n += 1 + sovEvent(uint64(m.NewNumRelaysEma)) + } + return n +} + func sovEvent(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -540,6 +710,190 @@ func (m *EventClaimSettled) Unmarshal(dAtA []byte) error { } return nil } +func (m *EventRelayMiningDifficultyUpdated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventRelayMiningDifficultyUpdated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventRelayMiningDifficultyUpdated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevTargetHashHexEncoded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevTargetHashHexEncoded = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewTargetHashHexEncoded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewTargetHashHexEncoded = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevNumRelaysEma", wireType) + } + m.PrevNumRelaysEma = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevNumRelaysEma |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewNumRelaysEma", wireType) + } + m.NewNumRelaysEma = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NewNumRelaysEma |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipEvent(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0