diff --git a/README.md b/README.md
index a6105f5d51..975b61224f 100644
--- a/README.md
+++ b/README.md
@@ -57,6 +57,22 @@ The project comes with several executables found in the `build/bin` directory.
| **`platon`** | Our main PlatON CLI client. It is the entry point into the PlatON network |
| `platonkey` | a key related tool. |
+### Hardware Requirements
+
+Minimum:
+
+* CPU with 2+ cores
+* 8GB RAM
+* 500GB free storage space to sync the Mainnet
+* 8 MBit/sec download Internet service
+
+Recommended:
+
+* Fast CPU with 4+ cores
+* 16GB+ RAM
+* High Performance SSD with at least 500GB free space
+* 25+ MBit/sec download Internet service
+
### Generate the keys
Each node requires two pairs of public&private keys, the one is called node's keypair, it's generated based on the secp256k1 curve for marking the node identity and signning the block, and the other is called node's blskeypair, it's based on the BLS_12_381 curve and is used for consensus verifing. These two pairs of public-private key need to be generated by the platonkey tool.
diff --git a/accounts/abi/selector_parser.go b/accounts/abi/selector_parser.go
new file mode 100644
index 0000000000..75609b28a6
--- /dev/null
+++ b/accounts/abi/selector_parser.go
@@ -0,0 +1,152 @@
+package abi
+
+import (
+ "fmt"
+)
+
+type SelectorMarshaling struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Inputs []ArgumentMarshaling `json:"inputs"`
+}
+
+func isDigit(c byte) bool {
+ return c >= '0' && c <= '9'
+}
+
+func isAlpha(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+func isIdentifierSymbol(c byte) bool {
+ return c == '$' || c == '_'
+}
+
+func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
+ if len(unescapedSelector) == 0 {
+ return "", "", fmt.Errorf("empty token")
+ }
+ firstChar := unescapedSelector[0]
+ position := 1
+ if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
+ return "", "", fmt.Errorf("invalid token start: %c", firstChar)
+ }
+ for position < len(unescapedSelector) {
+ char := unescapedSelector[position]
+ if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
+ break
+ }
+ position++
+ }
+ return unescapedSelector[:position], unescapedSelector[position:], nil
+}
+
+func parseIdentifier(unescapedSelector string) (string, string, error) {
+ return parseToken(unescapedSelector, true)
+}
+
+func parseElementaryType(unescapedSelector string) (string, string, error) {
+ parsedType, rest, err := parseToken(unescapedSelector, false)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
+ }
+ // handle arrays
+ for len(rest) > 0 && rest[0] == '[' {
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ for len(rest) > 0 && isDigit(rest[0]) {
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ }
+ if len(rest) == 0 || rest[0] != ']' {
+ return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
+ }
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ }
+ return parsedType, rest, nil
+}
+
+func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
+ if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
+ return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
+ }
+ parsedType, rest, err := parseType(unescapedSelector[1:])
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to parse type: %v", err)
+ }
+ result := []interface{}{parsedType}
+ for len(rest) > 0 && rest[0] != ')' {
+ parsedType, rest, err = parseType(rest[1:])
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to parse type: %v", err)
+ }
+ result = append(result, parsedType)
+ }
+ if len(rest) == 0 || rest[0] != ')' {
+ return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
+ }
+ return result, rest[1:], nil
+}
+
+func parseType(unescapedSelector string) (interface{}, string, error) {
+ if len(unescapedSelector) == 0 {
+ return nil, "", fmt.Errorf("empty type")
+ }
+ if unescapedSelector[0] == '(' {
+ return parseCompositeType(unescapedSelector)
+ } else {
+ return parseElementaryType(unescapedSelector)
+ }
+}
+
+func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
+ arguments := make([]ArgumentMarshaling, 0)
+ for i, arg := range args {
+ // generate dummy name to avoid unmarshal issues
+ name := fmt.Sprintf("name%d", i)
+ if s, ok := arg.(string); ok {
+ arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
+ } else if components, ok := arg.([]interface{}); ok {
+ subArgs, err := assembleArgs(components)
+ if err != nil {
+ return nil, fmt.Errorf("failed to assemble components: %v", err)
+ }
+ arguments = append(arguments, ArgumentMarshaling{name, "tuple", "tuple", subArgs, false})
+ } else {
+ return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
+ }
+ }
+ return arguments, nil
+}
+
+// ParseSelector converts a method selector into a struct that can be JSON encoded
+// and consumed by other functions in this package.
+// Note, although uppercase letters are not part of the ABI spec, this function
+// still accepts it as the general format is valid.
+func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
+ name, rest, err := parseIdentifier(unescapedSelector)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
+ }
+ args := []interface{}{}
+ if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
+ rest = rest[2:]
+ } else {
+ args, rest, err = parseCompositeType(rest)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
+ }
+ }
+ if len(rest) > 0 {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
+ }
+
+ // Reassemble the fake ABI and constuct the JSON
+ fakeArgs, err := assembleArgs(args)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
+ }
+
+ return SelectorMarshaling{name, "function", fakeArgs}, nil
+}
diff --git a/accounts/abi/selector_parser_test.go b/accounts/abi/selector_parser_test.go
new file mode 100644
index 0000000000..9720c9d530
--- /dev/null
+++ b/accounts/abi/selector_parser_test.go
@@ -0,0 +1,54 @@
+package abi
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "testing"
+)
+
+func TestParseSelector(t *testing.T) {
+ mkType := func(types ...interface{}) []ArgumentMarshaling {
+ var result []ArgumentMarshaling
+ for i, typeOrComponents := range types {
+ name := fmt.Sprintf("name%d", i)
+ if typeName, ok := typeOrComponents.(string); ok {
+ result = append(result, ArgumentMarshaling{name, typeName, typeName, nil, false})
+ } else if components, ok := typeOrComponents.([]ArgumentMarshaling); ok {
+ result = append(result, ArgumentMarshaling{name, "tuple", "tuple", components, false})
+ } else {
+ log.Fatalf("unexpected type %T", typeOrComponents)
+ }
+ }
+ return result
+ }
+ tests := []struct {
+ input string
+ name string
+ args []ArgumentMarshaling
+ }{
+ {"noargs()", "noargs", []ArgumentMarshaling{}},
+ {"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")},
+ {"other(uint256,address)", "other", mkType("uint256", "address")},
+ {"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")},
+ {"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")},
+ {"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest",
+ mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))},
+ }
+ for i, tt := range tests {
+ selector, err := ParseSelector(tt.input)
+ if err != nil {
+ t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err)
+ }
+ if selector.Name != tt.name {
+ t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name)
+ }
+
+ if selector.Type != "function" {
+ t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function")
+ }
+ if !reflect.DeepEqual(selector.Inputs, tt.args) {
+ t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args)
+ }
+ }
+}
diff --git a/build/checksums.txt b/build/checksums.txt
index ed29b8e05e..7b21faef20 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -13,23 +13,44 @@ d7d6c70b05a7c2f68b48aab5ab8cb5116b8444c9ddad131673b152e7cff7c726 go1.16.freebsd
27a1aaa988e930b7932ce459c8a63ad5b3333b3a06b016d87ff289f2a11aacd6 go1.16.linux-ppc64le.tar.gz
be4c9e4e2cf058efc4e3eb013a760cb989ddc4362f111950c990d1c63b27ccbe go1.16.linux-s390x.tar.gz
-d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
-e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
-14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
-337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
-6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
-878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
-42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
-6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
-2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
-08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
-c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
-3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
-f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
-1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
-8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
-5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
-e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
-7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
-59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
-65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip
+03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb
+08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm
+0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb
+10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb
+1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz
+15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb
+166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm
+1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz
+1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip
+3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz
+46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb
+4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb
+5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz
+518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm
+595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz
+6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip
+6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz
+726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz
+77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb
+7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip
+7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip
+7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
+828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
+879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
+87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
+8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
+9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
+995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
+a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
+a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
+aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
+c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
+dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
+eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
+ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
+ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
+ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
+f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
+f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
+fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
+fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm
diff --git a/build/ci.go b/build/ci.go
index d474b97504..f27ca90489 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -119,11 +119,10 @@ var (
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
debDistroGoBoots = map[string]string{
- "trusty": "golang-1.11",
- "xenial": "golang-go",
- "bionic": "golang-go",
- "focal": "golang-go",
- "hirsute": "golang-go",
+ "trusty": "golang-1.11",
+ "xenial": "golang-go",
+ "bionic": "golang-go",
+ "focal": "golang-go",
}
debGoBootPaths = map[string]string{
@@ -388,7 +387,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
- const version = "1.42.0"
+ const version = "1.45.2"
csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH
@@ -1120,21 +1119,21 @@ func doPurge(cmdline []string) {
// Iterate over the blobs, collect and sort all unstable builds
for i := 0; i < len(blobs); i++ {
- if !strings.Contains(blobs[i].Name, "unstable") {
+ if !strings.Contains(*blobs[i].Name, "unstable") {
blobs = append(blobs[:i], blobs[i+1:]...)
i--
}
}
for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ {
- if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
+ if blobs[i].Properties.LastModified.After(*blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i]
}
}
}
// Filter out all archives more recent that the given threshold
for i, blob := range blobs {
- if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
+ if time.Since(*blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i]
break
}
diff --git a/cmd/platon/config.go b/cmd/platon/config.go
index 053e875858..c8f86d22be 100644
--- a/cmd/platon/config.go
+++ b/cmd/platon/config.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/PlatONnetwork/PlatON-Go/core/rawdb"
"github.com/PlatONnetwork/PlatON-Go/log"
"io"
"os"
@@ -192,7 +193,24 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
snapshotdb.SetDBPathWithNode(stack.ResolvePath(snapshotdb.DBPath))
- backend := utils.RegisterEthService(stack, &cfg.Eth)
+ backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
+ // Warn users to migrate if they have a legacy freezer format.
+ if eth != nil {
+ firstIdx := uint64(0)
+ // Hack to speed up check for mainnet because we know
+ // the first non-empty block.
+ ghash := rawdb.ReadCanonicalHash(eth.ChainDb(), 0)
+ if cfg.Eth.NetworkId == 1 && ghash == params.MainnetGenesisHash {
+ firstIdx = 1
+ }
+ isLegacy, _, err := dbHasLegacyReceipts(eth.ChainDb(), firstIdx)
+ if err != nil {
+ utils.Fatalf("Failed to check db for legacy receipts: %v", err)
+ }
+ if isLegacy {
+ log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
+ }
+ }
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
diff --git a/cmd/platon/dbcmd.go b/cmd/platon/dbcmd.go
index 90ab469bb0..8724613b7f 100644
--- a/cmd/platon/dbcmd.go
+++ b/cmd/platon/dbcmd.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/core/state/snapshot"
+ "github.com/PlatONnetwork/PlatON-Go/core/types"
"github.com/olekukonko/tablewriter"
"os"
"os/signal"
@@ -73,6 +74,7 @@ Remove blockchain and state databases`,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
+ dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
@@ -217,6 +219,18 @@ WARNING: This is a low-level operation which may cause database corruption!`,
},
Description: "Shows metadata about the chain status.",
}
+ dbMigrateFreezerCmd = cli.Command{
+ Action: utils.MigrateFlags(freezerMigrate),
+ Name: "freezer-migrate",
+ Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.SyncModeFlag,
+ },
+ Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
+WARNING: please back-up the receipt files in your ancients before running this command.`,
+ }
)
func removeDB(ctx *cli.Context) error {
@@ -692,6 +706,7 @@ func showMetaData(ctx *cli.Context) error {
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
+
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
@@ -714,3 +729,88 @@ func showMetaData(ctx *cli.Context) error {
table.Render()
return nil
}
+
+func freezerMigrate(ctx *cli.Context) error {
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+
+ db := utils.MakeChainDatabase(ctx, stack, false)
+ defer db.Close()
+
+ // Check first block for legacy receipt format
+ numAncients, err := db.Ancients()
+ if err != nil {
+ return err
+ }
+ if numAncients < 1 {
+ log.Info("No receipts in freezer to migrate")
+ return nil
+ }
+
+ isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
+ if err != nil {
+ return err
+ }
+ if !isFirstLegacy {
+ log.Info("No legacy receipts to migrate")
+ return nil
+ }
+
+ log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
+ start := time.Now()
+ if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
+ return err
+ }
+ if err := db.Close(); err != nil {
+ return err
+ }
+ log.Info("Migration finished", "duration", time.Since(start))
+
+ return nil
+}
+
+// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
+// non-empty receipt and checks its format. The index of this first non-empty element is
+// the second return parameter.
+func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
+ // Check first block for legacy receipt format
+ numAncients, err := db.Ancients()
+ if err != nil {
+ return false, 0, err
+ }
+ if numAncients < 1 {
+ return false, 0, nil
+ }
+ if firstIdx >= numAncients {
+ return false, firstIdx, nil
+ }
+ var (
+ legacy bool
+ blob []byte
+ emptyRLPList = []byte{192}
+ )
+ // Find first block with non-empty receipt, only if
+ // the index is not already provided.
+ if firstIdx == 0 {
+ for i := uint64(0); i < numAncients; i++ {
+ blob, err = db.Ancient("receipts", i)
+ if err != nil {
+ return false, 0, err
+ }
+ if len(blob) == 0 {
+ continue
+ }
+ if !bytes.Equal(blob, emptyRLPList) {
+ firstIdx = i
+ break
+ }
+ }
+ }
+ // Is first non-empty receipt legacy?
+ first, err := db.Ancient("receipts", firstIdx)
+ if err != nil {
+ return false, 0, err
+ }
+ legacy, err = types.IsLegacyStoredReceipts(first)
+ return legacy, firstIdx, err
+}
diff --git a/cmd/platon/main.go b/cmd/platon/main.go
index 4dcc714ecd..3e0482654b 100644
--- a/cmd/platon/main.go
+++ b/cmd/platon/main.go
@@ -96,6 +96,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheTrieDBFlag,
utils.CachePreimagesFlag,
+ utils.FDLimitFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxConsensusPeersFlag,
@@ -127,6 +128,9 @@ var (
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag,
+ utils.AuthHostFlag,
+ utils.AuthPortFlag,
+ utils.JWTSecretFlag,
utils.HTTPVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
diff --git a/cmd/platon/snapshot.go b/cmd/platon/snapshot.go
index bde7773e7c..d575803d05 100644
--- a/cmd/platon/snapshot.go
+++ b/cmd/platon/snapshot.go
@@ -333,8 +333,7 @@ func traverseState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
- code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
- if len(code) == 0 {
+ if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code")
}
@@ -405,11 +404,10 @@ func traverseRawState(ctx *cli.Context) error {
nodes += 1
node := accIter.Hash()
+ // Check the present for non-empty hash node(embedded node doesn't
+ // have their own hash).
if node != (common.Hash{}) {
- // Check the present for non-empty hash node(embedded node doesn't
- // have their own hash).
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasTrieNode(chaindb, node) {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
@@ -453,8 +451,7 @@ func traverseRawState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
- code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
- if len(code) == 0 {
+ if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code")
}
diff --git a/cmd/platon/usage.go b/cmd/platon/usage.go
index 1f4033e6cd..65484cbb22 100644
--- a/cmd/platon/usage.go
+++ b/cmd/platon/usage.go
@@ -78,6 +78,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.CacheSnapshotFlag,
utils.CacheTrieDBFlag,
utils.CachePreimagesFlag,
+ utils.FDLimitFlag,
},
},
{
@@ -107,6 +108,9 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.WSApiFlag,
utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag,
+ utils.JWTSecretFlag,
+ utils.AuthHostFlag,
+ utils.AuthPortFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index d8a81ba536..570da5c262 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -310,6 +310,10 @@ var (
Name: "cache.preimages",
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
}
+ FDLimitFlag = cli.IntFlag{
+ Name: "fdlimit",
+ Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
+ }
MinerGasPriceFlag = BigFlag{
Name: "miner.gasprice",
Usage: "Minimum gas price for mining a transaction",
@@ -345,6 +349,21 @@ var (
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
Value: ethconfig.Defaults.RPCTxFeeCap,
}
+ // Authenticated RPC HTTP settings
+ AuthHostFlag = cli.StringFlag{
+ Name: "authrpc.host",
+ Usage: "Listening address for authenticated APIs",
+ Value: node.DefaultConfig.AuthHost,
+ }
+ AuthPortFlag = cli.IntFlag{
+ Name: "authrpc.port",
+ Usage: "Listening port for authenticated APIs",
+ Value: node.DefaultConfig.AuthPort,
+ }
+ JWTSecretFlag = cli.StringFlag{
+ Name: "authrpc.jwtsecret",
+ Usage: "Path to a JWT secret to use for authenticated RPC endpoints",
+ }
// Logging and debug settings
EthStatsURLFlag = cli.StringFlag{
Name: "ethstats",
@@ -855,6 +874,13 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalIsSet(HTTPPortFlag.Name) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
}
+ if ctx.GlobalIsSet(AuthHostFlag.Name) {
+ cfg.AuthHost = ctx.GlobalString(AuthHostFlag.Name)
+ }
+ if ctx.GlobalIsSet(AuthPortFlag.Name) {
+ cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
+ }
+
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
}
@@ -943,11 +969,24 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
-func MakeDatabaseHandles() int {
+func MakeDatabaseHandles(max int) int {
limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
+ switch {
+ case max == 0:
+ // User didn't specify a meaningful value, use system limits
+ case max < 128:
+ // User specified something unhealthy, just use system defaults
+ log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit)
+ case max > limit:
+ // User requested more than the OS allows, notify that we can't allocate it
+ log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit)
+ default:
+ // User limit is meaningful and within allowed range, use that
+ limit = max
+ }
raised, err := fdlimit.Raise(uint64(limit))
if err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
@@ -1067,6 +1106,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
}
+ if ctx.GlobalIsSet(JWTSecretFlag.Name) {
+ cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
+ }
+
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
}
@@ -1191,7 +1234,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(CacheTrieRejournalFlag.Name) {
cfg.TrieCleanCacheRejournal = ctx.GlobalDuration(CacheTrieRejournalFlag.Name)
}
- cfg.DatabaseHandles = MakeDatabaseHandles()
+ cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
}
@@ -1346,17 +1389,17 @@ func SetCbft(ctx *cli.Context, cfg *types.OptionsConfig, nodeCfg *node.Config) {
}
// RegisterEthService adds an Ethereum client to the stack.
-func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) ethapi.Backend {
+func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
Fatalf("Failed to register the Platon service: not les")
- return nil
+ return nil, nil
} else {
backend, err := eth.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the PlatON service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
- return backend.APIBackend
+ return backend.APIBackend, backend
}
}
@@ -1464,7 +1507,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
- handles = MakeDatabaseHandles()
+ handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
)
name := "chaindata"
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
diff --git a/core/blockchain.go b/core/blockchain.go
index 2f8f2cf999..e91eec584c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -633,7 +633,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateAncients(num); err != nil {
+ if err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
@@ -1083,7 +1083,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
size += int64(batch.ValueSize())
if err = batch.Write(); err != nil {
fastBlock := bc.CurrentFastBlock().NumberU64()
- if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
+ if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
@@ -1101,7 +1101,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
- if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
+ if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
@@ -1968,6 +1968,9 @@ Error: %v
// of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ if len(chain) == 0 {
+ return 0, nil
+ }
start := time.Now()
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 0382e62d64..34dafed051 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -86,8 +86,8 @@ type NumberHash struct {
Hash common.Hash
}
-// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
-// both canonical and reorged forks included.
+// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
+// heights, both canonical and reorged forks included.
// This method considers both limits to be _inclusive_.
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
var (
@@ -699,7 +699,7 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteHeader(db, block.Header())
}
-// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
+// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts) (int64, error) {
var (
stReceipts []*types.ReceiptForStorage
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 8ea7597f66..2faf0f8d65 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -28,17 +28,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
-// WritePreimages writes the provided set of preimages to the database.
-func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
- for hash, preimage := range preimages {
- if err := db.Put(preimageKey(hash), preimage); err != nil {
- log.Crit("Failed to store trie preimage", "err", err)
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
-}
-
// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
// Try with the prefixed code scheme first, if not then try with legacy
@@ -47,8 +36,8 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
if len(data) != 0 {
return data
}
- data2, _ := db.Get(hash[:])
- return data2
+ data, _ = db.Get(hash.Bytes())
+ return data
}
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
@@ -59,6 +48,24 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
+// ReadTrieNode retrieves the trie node of the provided hash.
+func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(hash.Bytes())
+ return data
+}
+
+// HasCode checks if the contract code corresponding to the
+// provided code hash is present in the db.
+func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ if ok := HasCodeWithPrefix(db, hash); ok {
+ return true
+ }
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
// HasCodeWithPrefix checks if the contract code corresponding to the
// provided code hash is present in the db. This function will only check
// presence using the prefix-scheme.
@@ -67,30 +74,28 @@ func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
return ok
}
-// WriteCode writes the provided contract code database.
-func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
- if err := db.Put(codeKey(hash), code); err != nil {
- log.Crit("Failed to store contract code", "err", err)
- }
+// HasTrieNode checks if the trie node with the provided hash is present in db.
+func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
}
-// DeleteCode deletes the specified contract code from the database.
-func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(codeKey(hash)); err != nil {
- log.Crit("Failed to delete contract code", "err", err)
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ log.Crit("Failed to store trie preimage", "err", err)
+ }
}
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
}
-// ReadTrieNode retrieves the trie node of the provided hash.
-func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(hash.Bytes())
- return data
-}
-
-// HasTrieNode checks if the trie node with the provided hash is present in db.
-func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
- ok, _ := db.Has(hash.Bytes())
- return ok
+// WriteCode writes the provided contract code database.
+func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(codeKey(hash), code); err != nil {
+ log.Crit("Failed to store contract code", "err", err)
+ }
}
// WriteTrieNode writes the provided trie node database.
@@ -100,6 +105,13 @@ func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
}
}
+// DeleteCode deletes the specified contract code from the database.
+func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(codeKey(hash)); err != nil {
+ log.Crit("Failed to delete contract code", "err", err)
+ }
+}
+
// DeleteTrieNode deletes the specified trie node from the database.
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(hash.Bytes()); err != nil {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 3f016f0afb..5176e4ad0f 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -99,6 +99,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
+// Tail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Tail() (uint64, error) {
+ return 0, errNotSupported
+}
+
// AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
@@ -109,8 +114,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
return 0, errNotSupported
}
-// TruncateAncients returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateAncients(items uint64) error {
+// TruncateHead returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateHead(items uint64) error {
+ return errNotSupported
+}
+
+// TruncateTail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateTail(items uint64) error {
return errNotSupported
}
@@ -135,6 +145,12 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
return fn(db)
}
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
+ return errNotSupported
+}
+
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -211,7 +227,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// Block #1 is still in the database, we're allowed to init a new feezer
}
// Otherwise, the head header is still the genesis, we're allowed to init a new
- // feezer.
+ // freezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index d8dbce4a22..7ce696f41e 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -19,6 +19,7 @@ package rawdb
import (
"errors"
"fmt"
+ "io/ioutil"
"math"
"os"
"path/filepath"
@@ -67,7 +68,7 @@ const (
freezerTableSize = 2 * 1000 * 1000 * 1000
)
-// freezer is an memory mapped append-only database to store immutable chain data
+// freezer is a memory mapped append-only database to store immutable chain data
// into flat files:
//
// - The append only nature ensures that disk writes are minimized.
@@ -79,6 +80,7 @@ type freezer struct {
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
frozen uint64 // Number of blocks already frozen
+ tail uint64 // Number of the first stored item in the freezer
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation, as well as
@@ -227,6 +229,11 @@ func (f *freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil
}
+// Tail returns the number of first stored item in the freezer.
+func (f *freezer) Tail() (uint64, error) {
+ return atomic.LoadUint64(&f.tail), nil
+}
+
// AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields.
@@ -262,7 +269,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
if err != nil {
// The write operation has failed. Go back to the previous item position.
for name, table := range f.tables {
- err := table.truncate(prevItem)
+ err := table.truncateHead(prevItem)
if err != nil {
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
}
@@ -282,8 +289,8 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
return writeSize, nil
}
-// TruncateAncients discards any recent data above the provided threshold number.
-func (f *freezer) TruncateAncients(items uint64) error {
+// TruncateHead discards any recent data above the provided threshold number.
+func (f *freezer) TruncateHead(items uint64) error {
if f.readonly {
return errReadOnly
}
@@ -294,7 +301,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil
}
for _, table := range f.tables {
- if err := table.truncate(items); err != nil {
+ if err := table.truncateHead(items); err != nil {
return err
}
}
@@ -302,6 +309,26 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil
}
+// TruncateTail discards any recent data below the provided threshold number.
+func (f *freezer) TruncateTail(tail uint64) error {
+ if f.readonly {
+ return errReadOnly
+ }
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ if atomic.LoadUint64(&f.tail) >= tail {
+ return nil
+ }
+ for _, table := range f.tables {
+ if err := table.truncateTail(tail); err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint64(&f.tail, tail)
+ return nil
+}
+
// Sync flushes all data tables to disk.
func (f *freezer) Sync() error {
var errs []error
@@ -345,19 +372,30 @@ func (f *freezer) validate() error {
// repair truncates all data tables to the same length.
func (f *freezer) repair() error {
- min := uint64(math.MaxUint64)
+ var (
+ head = uint64(math.MaxUint64)
+ tail = uint64(0)
+ )
for _, table := range f.tables {
items := atomic.LoadUint64(&table.items)
- if min > items {
- min = items
+ if head > items {
+ head = items
+ }
+ hidden := atomic.LoadUint64(&table.itemHidden)
+ if hidden > tail {
+ tail = hidden
}
}
for _, table := range f.tables {
- if err := table.truncate(min); err != nil {
+ if err := table.truncateHead(head); err != nil {
+ return err
+ }
+ if err := table.truncateTail(tail); err != nil {
return err
}
}
- atomic.StoreUint64(&f.frozen, min)
+ atomic.StoreUint64(&f.frozen, head)
+ atomic.StoreUint64(&f.tail, tail)
return nil
}
@@ -575,3 +613,116 @@ func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []
return hashes, err
}
+
+// convertLegacyFn takes a raw freezer entry in an older format and
+// returns it in the new format.
+type convertLegacyFn = func([]byte) ([]byte, error)
+
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
+ if f.readonly {
+ return errReadOnly
+ }
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ table, ok := f.tables[kind]
+ if !ok {
+ return errUnknownTable
+ }
+ // forEach iterates every entry in the table serially and in order, calling `fn`
+ // with the item as argument. If `fn` returns an error the iteration stops
+ // and that error will be returned.
+ forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
+ var (
+ items = atomic.LoadUint64(&t.items)
+ batchSize = uint64(1024)
+ maxBytes = uint64(1024 * 1024)
+ )
+ for i := offset; i < items; {
+ if i+batchSize > items {
+ batchSize = items - i
+ }
+ data, err := t.RetrieveItems(i, batchSize, maxBytes)
+ if err != nil {
+ return err
+ }
+ for j, item := range data {
+ if err := fn(i+uint64(j), item); err != nil {
+ return err
+ }
+ }
+ i += uint64(len(data))
+ }
+ return nil
+ }
+ // TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
+ // process assumes no deletion at tail and needs to be modified to account for that.
+ if table.itemOffset > 0 || table.itemHidden > 0 {
+ return fmt.Errorf("migration not supported for tail-deleted freezers")
+ }
+ ancientsPath := filepath.Dir(table.index.Name())
+ // Set up new dir for the migrated table, the content of which
+ // we'll at the end move over to the ancients dir.
+ migrationPath := filepath.Join(ancientsPath, "migration")
+ newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
+ if err != nil {
+ return err
+ }
+ var (
+ batch = newTable.newBatch()
+ out []byte
+ start = time.Now()
+ logged = time.Now()
+ offset = newTable.items
+ )
+ if offset > 0 {
+ log.Info("found previous migration attempt", "migrated", offset)
+ }
+ // Iterate through entries and transform them
+ if err := forEach(table, offset, func(i uint64, blob []byte) error {
+ if i%10000 == 0 && time.Since(logged) > 16*time.Second {
+ log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ out, err = convert(blob)
+ if err != nil {
+ return err
+ }
+ if err := batch.AppendRaw(i, out); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ if err := batch.commit(); err != nil {
+ return err
+ }
+ log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
+ // Release and delete old table files. Note this won't
+ // delete the index file.
+ table.releaseFilesAfter(0, true)
+
+ if err := newTable.Close(); err != nil {
+ return err
+ }
+ files, err := ioutil.ReadDir(migrationPath)
+ if err != nil {
+ return err
+ }
+ // Move migrated files to ancients dir.
+ for _, f := range files {
+ // This will replace the old index file as a side-effect.
+ if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
+ return err
+ }
+ }
+ // Delete by now empty dir.
+ if err := os.Remove(migrationPath); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index 7cb83401a5..ed483e39e6 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -191,7 +191,7 @@ func (batch *freezerTableBatch) commit() error {
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
- // Write index.
+ // Write indices.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err
diff --git a/core/rawdb/freezer_meta.go b/core/rawdb/freezer_meta.go
new file mode 100644
index 0000000000..97409e33ca
--- /dev/null
+++ b/core/rawdb/freezer_meta.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io"
+ "os"
+
+ "github.com/PlatONnetwork/PlatON-Go/log"
+ "github.com/PlatONnetwork/PlatON-Go/rlp"
+)
+
+const freezerVersion = 1 // The initial version tag of freezer table metadata
+
+// freezerTableMeta wraps all the metadata of the freezer table.
+type freezerTableMeta struct {
+ // Version is the versioning descriptor of the freezer table.
+ Version uint16
+
+ // VirtualTail indicates how many items have been marked as deleted.
+ // Its value is equal to the number of items removed from the table
+ // plus the number of items hidden in the table, so it should never
+ // be lower than the "actual tail".
+ VirtualTail uint64
+}
+
+// newMetadata initializes the metadata object with the given virtual tail.
+func newMetadata(tail uint64) *freezerTableMeta {
+ return &freezerTableMeta{
+ Version: freezerVersion,
+ VirtualTail: tail,
+ }
+}
+
+// readMetadata reads the metadata of the freezer table from the
+// given metadata file.
+func readMetadata(file *os.File) (*freezerTableMeta, error) {
+ _, err := file.Seek(0, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ var meta freezerTableMeta
+ if err := rlp.Decode(file, &meta); err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// writeMetadata writes the metadata of the freezer table into the
+// given metadata file.
+func writeMetadata(file *os.File, meta *freezerTableMeta) error {
+ _, err := file.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ return rlp.Encode(file, meta)
+}
+
+// loadMetadata loads the metadata from the given metadata file.
+// Initializes the metadata file with the given "actual tail" if
+// it's empty.
+func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ // Write the metadata with the given actual tail into metadata file
+ // if it's non-existent. There are two possible scenarios here:
+ // - the freezer table is empty
+ // - the freezer table is legacy
+ // In both cases, write the meta into the file with the actual tail
+ // as the virtual tail.
+ if stat.Size() == 0 {
+ m := newMetadata(tail)
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+ m, err := readMetadata(file)
+ if err != nil {
+ return nil, err
+ }
+ // Update the virtual tail with the given actual tail if it's even
+ // lower than it. Theoretically it shouldn't happen at all, print
+ // a warning here.
+ if m.VirtualTail < tail {
+ log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
+ m.VirtualTail = tail
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go
new file mode 100644
index 0000000000..191744a754
--- /dev/null
+++ b/core/rawdb/freezer_meta_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestReadWriteFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ err = writeMetadata(f, newMetadata(100))
+ if err != nil {
+ t.Fatalf("Failed to write metadata %v", err)
+ }
+ meta, err := readMetadata(f)
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
+
+func TestInitializeFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ meta, err := loadMetadata(f, uint64(100))
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index d2038b8fc9..ecbf7a391f 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -48,7 +48,7 @@ var (
)
// indexEntry contains the number/id of the file that the data resides in, aswell as the
-// offset within the file to the end of the data
+// offset within the file to the end of the data.
// In serialized form, the filenum is stored as uint16.
type indexEntry struct {
filenum uint32 // stored as uint16 ( 2 bytes)
@@ -58,10 +58,9 @@ type indexEntry struct {
const indexEntrySize = 6
// unmarshalBinary deserializes binary b into the rawIndex entry.
-func (i *indexEntry) unmarshalBinary(b []byte) error {
+func (i *indexEntry) unmarshalBinary(b []byte) {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6])
- return nil
}
// append adds the encoded entry to the end of b.
@@ -76,14 +75,14 @@ func (i *indexEntry) append(b []byte) []byte {
// bounds returns the start- and end- offsets, and the file number of where to
// read there data item marked by the two index entries. The two entries are
// assumed to be sequential.
-func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
- if start.filenum != end.filenum {
+func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
+ if i.filenum != end.filenum {
// If a piece of data 'crosses' a data-file,
// it's actually in one piece on the second data-file.
// We return a zero-indexEntry for the second file as start
return 0, end.offset, end.filenum
}
- return start.offset, end.offset, end.filenum
+ return i.offset, end.offset, end.filenum
}
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
@@ -93,7 +92,15 @@ type freezerTable struct {
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- items uint64 // Number of items stored in the table (including items removed from tail)
+ items uint64 // Number of items stored in the table (including items removed from tail)
+ itemOffset uint64 // Number of items removed from the table
+
+ // itemHidden is the number of items marked as deleted. Tail deletion is
+ // only supported at file level which means the actual deletion will be
+ // delayed until the entire data file is marked as deleted. Before that
+ // these items will be hidden to prevent being visited again. The value
+ // should never be lower than itemOffset.
+ itemHidden uint64
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
readonly bool
@@ -102,14 +109,11 @@ type freezerTable struct {
path string
head *os.File // File descriptor for the data head of the table
+ index *os.File // File descriptor for the indexEntry file of the table
+ meta *os.File // File descriptor for metadata of the table
files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file
- index *os.File // File descriptor for the indexEntry file of the table
-
- // In the case that old items are deleted (from the tail), we use itemOffset
- // to count how many historic items have gone missing.
- itemOffset uint32 // Offset (number of discarded items)
headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read
@@ -125,46 +129,8 @@ func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerT
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
}
-// openFreezerFileForAppend opens a freezer table file and seeks to the end
-func openFreezerFileForAppend(filename string) (*os.File, error) {
- // Open the file without the O_APPEND flag
- // because it has differing behaviour during Truncate operations
- // on different OS's
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return nil, err
- }
- // Seek to end for append
- if _, err = file.Seek(0, io.SeekEnd); err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// openFreezerFileForReadOnly opens a freezer table file for read only access
-func openFreezerFileForReadOnly(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDONLY, 0644)
-}
-
-// openFreezerFileTruncated opens a freezer table making sure it is truncated
-func openFreezerFileTruncated(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
-}
-
-// truncateFreezerFile resizes a freezer table file and seeks to the end
-func truncateFreezerFile(file *os.File, size int64) error {
- if err := file.Truncate(size); err != nil {
- return err
- }
- // Seek to end for append
- if _, err := file.Seek(0, io.SeekEnd); err != nil {
- return err
- }
- return nil
-}
-
// newTable opens a freezer table, creating the data and index files if they are
-// non existent. Both files are truncated to the shortest common length to ensure
+// non-existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
@@ -173,28 +139,47 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
}
var idxName string
if noCompression {
- // Raw idx
- idxName = fmt.Sprintf("%s.ridx", name)
+ idxName = fmt.Sprintf("%s.ridx", name) // raw index file
} else {
- // Compressed idx
- idxName = fmt.Sprintf("%s.cidx", name)
+ idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
}
var (
- err error
- offsets *os.File
+ err error
+ index *os.File
+ meta *os.File
)
if readonly {
// Will fail if table doesn't exist
- offsets, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
+ index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ // TODO(rjl493456442) change it to read-only mode. Open the metadata file
+ // in rw mode. It's a temporary solution for now and should be changed
+ // whenever the tail deletion is actually used. The reason for this hack is
+ // the additional meta file for each freezer table is added in order to support
+ // tail deletion, but for most legacy nodes this file is missing. This check
+ // will suddenly break lots of database relevant commands. So the metadata file
+ // is always opened for mutation and nothing else will be written except
+ // the initialization.
+ meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
+ if err != nil {
+ return nil, err
+ }
} else {
- offsets, err = openFreezerFileForAppend(filepath.Join(path, idxName))
- }
- if err != nil {
- return nil, err
+ index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
+ if err != nil {
+ return nil, err
+ }
}
// Create the table and repair any past inconsistency
tab := &freezerTable{
- index: offsets,
+ index: index,
+ meta: meta,
files: make(map[uint32]*os.File),
readMeter: readMeter,
writeMeter: writeMeter,
@@ -221,7 +206,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
return tab, nil
}
-// repair cross checks the head and the index file and truncates them to
+// repair cross-checks the head and the index file and truncates them to
// be in sync with each other after a potential crash / data loss.
func (t *freezerTable) repair() error {
// Create a temporary offset buffer to init files with and read indexEntry into
@@ -259,11 +244,27 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer)
+ // Assign the tail fields with the first stored index.
+ // The total removed items is represented with an uint32,
+ // which is not enough in theory but enough in practice.
+ // TODO: use uint64 to represent total removed items.
t.tailId = firstIndex.filenum
- t.itemOffset = firstIndex.offset
+ t.itemOffset = uint64(firstIndex.offset)
+
+ // Load metadata from the file
+ meta, err := loadMetadata(t.meta, t.itemOffset)
+ if err != nil {
+ return err
+ }
+ t.itemHidden = meta.VirtualTail
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
- lastIndex.unmarshalBinary(buffer)
+ // Read the last index, use the default value in case the freezer is empty
+ if offsetsSize == indexEntrySize {
+ lastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ lastIndex.unmarshalBinary(buffer)
+ }
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else {
@@ -279,7 +280,6 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset)
-
for contentExp != contentSize {
// Truncate the head file to the last offset pointer
if contentExp < contentSize {
@@ -296,9 +296,16 @@ func (t *freezerTable) repair() error {
return err
}
offsetsSize -= indexEntrySize
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+
+ // Read the new head index, use the default value in case
+ // the freezer is already empty.
var newLastIndex indexEntry
- newLastIndex.unmarshalBinary(buffer)
+ if offsetsSize == indexEntrySize {
+ newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ newLastIndex.unmarshalBinary(buffer)
+ }
// We might have slipped back into an earlier head-file here
if newLastIndex.filenum != lastIndex.filenum {
// Release earlier opened file
@@ -326,12 +333,21 @@ func (t *freezerTable) repair() error {
if err := t.head.Sync(); err != nil {
return err
}
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
}
// Update the item and byte counters and return
- t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
+ t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
t.headBytes = contentSize
t.headId = lastIndex.filenum
+ // Delete the leftover files because of head deletion
+ t.releaseFilesAfter(t.headId, true)
+
+ // Delete the leftover files because of tail deletion
+ t.releaseFilesBefore(t.tailId, true)
+
// Close opened files and preopen all files
if err := t.preopen(); err != nil {
return err
@@ -347,6 +363,7 @@ func (t *freezerTable) repair() error {
func (t *freezerTable) preopen() (err error) {
// The repair might have already opened (some) files
t.releaseFilesAfter(0, false)
+
// Open all except head in RDONLY
for i := t.tailId; i < t.headId; i++ {
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
@@ -362,16 +379,19 @@ func (t *freezerTable) preopen() (err error) {
return err
}
-// truncate discards any recent data above the provided threshold number.
-func (t *freezerTable) truncate(items uint64) error {
+// truncateHead discards any recent data above the provided threshold number.
+func (t *freezerTable) truncateHead(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
- // If our item count is correct, don't do anything
+ // Ensure the given truncate target falls in the correct range
existing := atomic.LoadUint64(&t.items)
if existing <= items {
return nil
}
+ if items < atomic.LoadUint64(&t.itemHidden) {
+ return errors.New("truncation below tail")
+ }
// We need to truncate, save the old size for metrics tracking
oldSize, err := t.sizeNolock()
if err != nil {
@@ -383,17 +403,24 @@ func (t *freezerTable) truncate(items uint64) error {
log = t.logger.Warn // Only loud warn if we delete multiple items
}
log("Truncating freezer table", "items", existing, "limit", items)
- if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
+
+ // Truncate the index file first, the tail position is also considered
+ // when calculating the new freezer table length.
+ length := items - atomic.LoadUint64(&t.itemOffset)
+ if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
// Calculate the new expected size of the data file and truncate it
- buffer := make([]byte, indexEntrySize)
- if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
- return err
- }
var expected indexEntry
- expected.unmarshalBinary(buffer)
-
+ if length == 0 {
+ expected = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ buffer := make([]byte, indexEntrySize)
+ if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
+ return err
+ }
+ expected.unmarshalBinary(buffer)
+ }
// We might need to truncate back to older files
if expected.filenum != t.headId {
// If already open for reading, force-reopen for writing
@@ -422,7 +449,110 @@ func (t *freezerTable) truncate(items uint64) error {
return err
}
t.sizeGauge.Dec(int64(oldSize - newSize))
+ return nil
+}
+
+// truncateTail discards any recent data before the provided threshold number.
+func (t *freezerTable) truncateTail(items uint64) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Ensure the given truncate target falls in the correct range
+ if atomic.LoadUint64(&t.itemHidden) >= items {
+ return nil
+ }
+ if atomic.LoadUint64(&t.items) < items {
+ return errors.New("truncation above head")
+ }
+ // Load the new tail index by the given new tail position
+ var (
+ newTailId uint32
+ buffer = make([]byte, indexEntrySize)
+ )
+ if atomic.LoadUint64(&t.items) == items {
+ newTailId = t.headId
+ } else {
+ offset := items - atomic.LoadUint64(&t.itemOffset)
+ if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var newTail indexEntry
+ newTail.unmarshalBinary(buffer)
+ newTailId = newTail.filenum
+ }
+ // Update the virtual tail marker and hidden these entries in table.
+ atomic.StoreUint64(&t.itemHidden, items)
+ if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
+ return err
+ }
+ // Hidden items still fall in the current tail file, no data file
+ // can be dropped.
+ if t.tailId == newTailId {
+ return nil
+ }
+ // Hidden items fall in the incorrect range, returns the error.
+ if t.tailId > newTailId {
+ return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
+ }
+ // Hidden items exceed the current tail file, drop the relevant
+ // data files. We need to truncate, save the old size for metrics
+ // tracking.
+ oldSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ // Count how many items can be deleted from the file.
+ var (
+ newDeleted = items
+ deleted = atomic.LoadUint64(&t.itemOffset)
+ )
+ for current := items - 1; current >= deleted; current -= 1 {
+ if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var pre indexEntry
+ pre.unmarshalBinary(buffer)
+ if pre.filenum != newTailId {
+ break
+ }
+ newDeleted = current
+ }
+ // Commit the changes of metadata file first before manipulating
+ // the indexes file.
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
+ // Truncate the deleted index entries from the index file.
+ err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
+ tailIndex := indexEntry{
+ filenum: newTailId,
+ offset: uint32(newDeleted),
+ }
+ _, err := f.Write(tailIndex.append(nil))
+ return err
+ })
+ if err != nil {
+ return err
+ }
+ // Reopen the modified index file to load the changes
+ if err := t.index.Close(); err != nil {
+ return err
+ }
+ t.index, err = openFreezerFileForAppend(t.index.Name())
+ if err != nil {
+ return err
+ }
+ // Release any files before the current tail
+ t.tailId = newTailId
+ atomic.StoreUint64(&t.itemOffset, newDeleted)
+ t.releaseFilesBefore(t.tailId, true)
+ // Retrieve the new size and update the total size counter
+ newSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ t.sizeGauge.Dec(int64(oldSize - newSize))
return nil
}
@@ -437,6 +567,11 @@ func (t *freezerTable) Close() error {
}
t.index = nil
+ if err := t.meta.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ t.meta = nil
+
for _, f := range t.files {
if err := f.Close(); err != nil {
errs = append(errs, err)
@@ -491,6 +626,19 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
}
}
+// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
+func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
+ for fnum, f := range t.files {
+ if fnum < num {
+ delete(t.files, fnum)
+ f.Close()
+ if remove {
+ os.Remove(f.Name())
+ }
+ }
+ }
+}
+
// getIndices returns the index entries for the given from-item, covering 'count' items.
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
// error is returned).
@@ -499,7 +647,7 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
// it will return error.
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
// Apply the table-offset
- from = from - uint64(t.itemOffset)
+ from = from - t.itemOffset
// For reading N items, we need N+1 indices.
buffer := make([]byte, (count+1)*indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
@@ -584,18 +732,21 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
t.lock.RLock()
defer t.lock.RUnlock()
- // Ensure the table and the item is accessible
+ // Ensure the table and the item are accessible
if t.index == nil || t.head == nil {
return nil, nil, errClosed
}
- itemCount := atomic.LoadUint64(&t.items) // max number
+ var (
+ items = atomic.LoadUint64(&t.items) // the total items(head + 1)
+ hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
+ )
// Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something
- if itemCount <= start || uint64(t.itemOffset) > start || count == 0 {
+ if items <= start || hidden > start || count == 0 {
return nil, nil, errOutOfBounds
}
- if start+count > itemCount {
- count = itemCount - start
+ if start+count > items {
+ count = items - start
}
var (
output = make([]byte, maxBytes) // Buffer to read data into
@@ -671,10 +822,10 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output[:outputSize], sizes, nil
}
-// has returns an indicator whether the specified number data
-// exists in the freezer table.
+// has returns an indicator whether the specified number data is still accessible
+// in the freezer table.
func (t *freezerTable) has(number uint64) bool {
- return atomic.LoadUint64(&t.items) > number
+ return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
}
// size returns the total data size in the freezer table.
@@ -728,6 +879,9 @@ func (t *freezerTable) Sync() error {
if err := t.index.Sync(); err != nil {
return err
}
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
return t.head.Sync()
}
@@ -745,13 +899,20 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
}
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
+ meta, err := readMetadata(t.meta)
+ if err != nil {
+ fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
+
buf := make([]byte, indexEntrySize)
fmt.Fprintf(w, "| number | fileno | offset |\n")
fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ {
- if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
+ if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
break
}
var entry indexEntry
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 1e2629f003..7dd380907c 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -18,11 +18,16 @@ package rawdb
import (
"bytes"
+ "encoding/binary"
"fmt"
+ "github.com/davecgh/go-spew/spew"
"math/rand"
"os"
"path/filepath"
+ "reflect"
+ "sync/atomic"
"testing"
+ "testing/quick"
"time"
"github.com/PlatONnetwork/PlatON-Go/metrics"
@@ -204,7 +209,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
}
// Remove everything but the first item, and leave data unaligned
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
- idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
+ idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
idxFile.Close()
// Now open it again
@@ -387,7 +392,7 @@ func TestFreezerTruncate(t *testing.T) {
t.Fatal(err)
}
defer f.Close()
- f.truncate(10) // 150 bytes
+ f.truncateHead(10) // 150 bytes
if f.items != 10 {
t.Fatalf("expected %d items, got %d", 10, f.items)
}
@@ -504,7 +509,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
}
// Now, truncate back to zero
- f.truncate(0)
+ f.truncateHead(0)
// Write the data again
batch := f.newBatch()
@@ -565,18 +570,19 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 4 ] at index zero
- tailId := uint32(2) // First file is 2
- itemOffset := uint32(4) // We have removed four items
zeroIndex := indexEntry{
- filenum: tailId,
- offset: itemOffset,
+ filenum: uint32(2), // First file is 2
+ offset: uint32(4), // We have removed four items
}
buf := zeroIndex.append(nil)
+
// Overwrite index zero
copy(indexBuf, buf)
+
// Remove the four next indices by overwriting
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
indexFile.WriteAt(indexBuf, 0)
+
// Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close()
@@ -623,13 +629,12 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 1M ] at index zero
- tailId := uint32(2) // First file is 2
- itemOffset := uint32(1000000) // We have removed 1M items
zeroIndex := indexEntry{
- offset: itemOffset,
- filenum: tailId,
+ offset: uint32(1000000), // We have removed 1M items
+ filenum: uint32(2), // First file is 2
}
buf := zeroIndex.append(nil)
+
// Overwrite index zero
copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0)
@@ -659,6 +664,171 @@ func TestFreezerOffset(t *testing.T) {
}
}
+func TestTruncateTail(t *testing.T) {
+ t.Parallel()
+ rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
+ fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
+
+ // Fill table
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write 7 x 20 bytes, splitting out into four files
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
+ require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
+ require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ // nothing to do, all the items should still be there.
+ f.truncateTail(0)
+ fmt.Println(f.dumpIndexString(0, 1000))
+ checkRetrieve(t, f, map[uint64][]byte{
+ 0: getChunk(20, 0xFF),
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate single element( item 0 ), deletion is only supported at file level
+ f.truncateTail(1)
+ fmt.Println(f.dumpIndexString(0, 1000))
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // Reopen the table, the deletion information should be persisted as well
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate two elements( item 0, item 1 ), the file 0 should be deleted
+ f.truncateTail(2)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // Reopen the table, the above testing should still pass
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate all, the entire freezer should be deleted
+ f.truncateTail(7)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ 2: errOutOfBounds,
+ 3: errOutOfBounds,
+ 4: errOutOfBounds,
+ 5: errOutOfBounds,
+ 6: errOutOfBounds,
+ })
+}
+
+func TestTruncateHead(t *testing.T) {
+ t.Parallel()
+ rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
+ fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
+
+ // Fill table
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write 7 x 20 bytes, splitting out into four files
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
+ require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
+ require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ f.truncateTail(4) // Tail = 4
+
+ // NewHead is required to be 3, the entire table should be truncated
+ f.truncateHead(4)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds, // Deleted by tail
+ 1: errOutOfBounds, // Deleted by tail
+ 2: errOutOfBounds, // Deleted by tail
+ 3: errOutOfBounds, // Deleted by tail
+ 4: errOutOfBounds, // Deleted by Head
+ 5: errOutOfBounds, // Deleted by Head
+ 6: errOutOfBounds, // Deleted by Head
+ })
+
+ // Append new items
+ batch = f.newBatch()
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ checkRetrieve(t, f, map[uint64][]byte{
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+}
+
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
t.Helper()
@@ -915,3 +1085,212 @@ func TestFreezerReadonly(t *testing.T) {
t.Fatalf("Writing to readonly table should fail")
}
}
+
+// randTest performs random freezer table operations.
+// Instances of this test are created by Generate.
+type randTest []randTestStep
+
+type randTestStep struct {
+ op int
+ items []uint64 // for append and retrieve
+ blobs [][]byte // for append
+ target uint64 // for truncate(head/tail)
+ err error // for debugging
+}
+
+const (
+ opReload = iota
+ opAppend
+ opRetrieve
+ opTruncateHead
+ opTruncateHeadAll
+ opTruncateTail
+ opTruncateTailAll
+ opCheckAll
+ opMax // boundary value, not an actual op
+)
+
+func getVals(first uint64, n int) [][]byte {
+ var ret [][]byte
+ for i := 0; i < n; i++ {
+ val := make([]byte, 8)
+ binary.BigEndian.PutUint64(val, first+uint64(i))
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
+ var (
+ deleted uint64 // The number of deleted items from tail
+ items []uint64 // The index of entries in table
+
+ // getItems retrieves the indexes for items in table.
+ getItems = func(n int) []uint64 {
+ length := len(items)
+ if length == 0 {
+ return nil
+ }
+ var ret []uint64
+ index := rand.Intn(length)
+ for i := index; len(ret) < n && i < length; i++ {
+ ret = append(ret, items[i])
+ }
+ return ret
+ }
+
+ // addItems appends the given length items into the table.
+ addItems = func(n int) []uint64 {
+ var first = deleted
+ if len(items) != 0 {
+ first = items[len(items)-1] + 1
+ }
+ var ret []uint64
+ for i := 0; i < n; i++ {
+ ret = append(ret, first+uint64(i))
+ }
+ items = append(items, ret...)
+ return ret
+ }
+ )
+
+ var steps randTest
+ for i := 0; i < size; i++ {
+ step := randTestStep{op: r.Intn(opMax)}
+ switch step.op {
+ case opReload, opCheckAll:
+ case opAppend:
+ num := r.Intn(3)
+ step.items = addItems(num)
+ if len(step.items) == 0 {
+ step.blobs = nil
+ } else {
+ step.blobs = getVals(step.items[0], num)
+ }
+ case opRetrieve:
+ step.items = getItems(r.Intn(3))
+ case opTruncateHead:
+ if len(items) == 0 {
+ step.target = deleted
+ } else {
+ index := r.Intn(len(items))
+ items = items[:index]
+ step.target = deleted + uint64(index)
+ }
+ case opTruncateHeadAll:
+ step.target = deleted
+ items = items[:0]
+ case opTruncateTail:
+ if len(items) == 0 {
+ step.target = deleted
+ } else {
+ index := r.Intn(len(items))
+ items = items[index:]
+ deleted += uint64(index)
+ step.target = deleted
+ }
+ case opTruncateTailAll:
+ step.target = deleted + uint64(len(items))
+ items = items[:0]
+ deleted = step.target
+ }
+ steps = append(steps, step)
+ }
+ return reflect.ValueOf(steps)
+}
+
+func runRandTest(rt randTest) bool {
+ fname := fmt.Sprintf("randtest-%d", rand.Uint64())
+ f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ panic("failed to initialize table")
+ }
+ var values [][]byte
+ for i, step := range rt {
+ switch step.op {
+ case opReload:
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ rt[i].err = fmt.Errorf("failed to reload table %v", err)
+ }
+ case opCheckAll:
+ tail := atomic.LoadUint64(&f.itemHidden)
+ head := atomic.LoadUint64(&f.items)
+
+ if tail == head {
+ continue
+ }
+ got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
+ if err != nil {
+ rt[i].err = err
+ } else {
+ if !reflect.DeepEqual(got, values) {
+ rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
+ }
+ }
+
+ case opAppend:
+ batch := f.newBatch()
+ for i := 0; i < len(step.items); i++ {
+ batch.AppendRaw(step.items[i], step.blobs[i])
+ }
+ batch.commit()
+ values = append(values, step.blobs...)
+
+ case opRetrieve:
+ var blobs [][]byte
+ if len(step.items) == 0 {
+ continue
+ }
+ tail := atomic.LoadUint64(&f.itemHidden)
+ for i := 0; i < len(step.items); i++ {
+ blobs = append(blobs, values[step.items[i]-tail])
+ }
+ got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
+ if err != nil {
+ rt[i].err = err
+ } else {
+ if !reflect.DeepEqual(got, blobs) {
+ rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
+ }
+ }
+
+ case opTruncateHead:
+ f.truncateHead(step.target)
+
+ length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
+ values = values[:length]
+
+ case opTruncateHeadAll:
+ f.truncateHead(step.target)
+ values = nil
+
+ case opTruncateTail:
+ prev := atomic.LoadUint64(&f.itemHidden)
+ f.truncateTail(step.target)
+
+ truncated := atomic.LoadUint64(&f.itemHidden) - prev
+ values = values[truncated:]
+
+ case opTruncateTailAll:
+ f.truncateTail(step.target)
+ values = nil
+ }
+ // Abort the test on error.
+ if rt[i].err != nil {
+ return false
+ }
+ }
+ f.Close()
+ return true
+}
+
+func TestRandom(t *testing.T) {
+ if err := quick.Check(runRandTest, nil); err != nil {
+ if cerr, ok := err.(*quick.CheckError); ok {
+ t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
+ }
+ t.Fatal(err)
+ }
+}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index f286fa30d7..7a09873ae4 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -24,6 +24,7 @@ import (
"math/big"
"math/rand"
"os"
+ "path"
"sync"
"testing"
@@ -186,7 +187,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
wg.Wait()
}
-// This test runs ModifyAncients and TruncateAncients concurrently with each other.
+// This test runs ModifyAncients and TruncateHead concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
f, dir := newFreezerForTesting(t, freezerTestTableDef)
defer os.RemoveAll(dir)
@@ -196,7 +197,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ {
// First reset and write 100 items.
- if err := f.TruncateAncients(0); err != nil {
+ if err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err)
}
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -231,7 +232,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done()
}()
go func() {
- truncateErr = f.TruncateAncients(10)
+ truncateErr = f.TruncateHead(10)
wg.Done()
}()
go func() {
@@ -337,3 +338,92 @@ func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
}
}
+
+func TestRenameWindows(t *testing.T) {
+ var (
+ fname = "file.bin"
+ fname2 = "file2.bin"
+ data = []byte{1, 2, 3, 4}
+ data2 = []byte{2, 3, 4, 5}
+ data3 = []byte{3, 5, 6, 7}
+ dataLen = 4
+ )
+
+ // Create 2 temp dirs
+ dir1, err := os.MkdirTemp("", "rename-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(dir1)
+ dir2, err := os.MkdirTemp("", "rename-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(dir2)
+
+ // Create file in dir1 and fill with data
+ f, err := os.Create(path.Join(dir1, fname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f2, err := os.Create(path.Join(dir1, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f3, err := os.Create(path.Join(dir2, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Write(data); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f2.Write(data2); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f3.Write(data3); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := f2.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := f3.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check file contents
+ f, err = os.Open(path.Join(dir2, fname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.Remove(f.Name())
+ buf := make([]byte, dataLen)
+ if _, err := f.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf, data) {
+ t.Errorf("unexpected file contents. Got %v\n", buf)
+ }
+
+ f, err = os.Open(path.Join(dir2, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.Remove(f.Name())
+ if _, err := f.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf, data2) {
+ t.Errorf("unexpected file contents. Got %v\n", buf)
+ }
+}
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
new file mode 100644
index 0000000000..5695fc0fa8
--- /dev/null
+++ b/core/rawdb/freezer_utils.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
+// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
+// Before the copy is executed, there is a callback can be registered to
+// manipulate the dest file.
+// It is perfectly valid to have destPath == srcPath.
+func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
+ // Create a temp file in the same dir where we want it to wind up
+ f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
+ if err != nil {
+ return err
+ }
+ fname := f.Name()
+
+ // Clean up the leftover file
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ os.Remove(fname)
+ }()
+ // Apply the given function if it's not nil before we copy
+ // the content from the src.
+ if before != nil {
+ if err := before(f); err != nil {
+ return err
+ }
+ }
+ // Open the source file
+ src, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ if _, err = src.Seek(int64(offset), 0); err != nil {
+ src.Close()
+ return err
+ }
+ // io.Copy uses 32K buffer internally.
+ _, err = io.Copy(f, src)
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Rename the temporary file to the specified dest name.
+ // src may be same as dest, so needs to be closed before
+ // we do the final move.
+ src.Close()
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ f = nil
+
+ if err := os.Rename(fname, destPath); err != nil {
+ return err
+ }
+ return nil
+}
+
+// openFreezerFileForAppend opens a freezer table file and seeks to the end
+func openFreezerFileForAppend(filename string) (*os.File, error) {
+ // Open the file without the O_APPEND flag
+ // because it has differing behaviour during Truncate operations
+ // on different OS's
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ // Seek to end for append
+ if _, err = file.Seek(0, io.SeekEnd); err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// openFreezerFileForReadOnly opens a freezer table file for read only access
+func openFreezerFileForReadOnly(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDONLY, 0644)
+}
+
+// openFreezerFileTruncated opens a freezer table making sure it is truncated
+func openFreezerFileTruncated(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+}
+
+// truncateFreezerFile resizes a freezer table file and seeks to the end
+func truncateFreezerFile(file *os.File, size int64) error {
+ if err := file.Truncate(size); err != nil {
+ return err
+ }
+ // Seek to end for append
+ if _, err := file.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/core/rawdb/freezer_utils_test.go b/core/rawdb/freezer_utils_test.go
new file mode 100644
index 0000000000..de8087f9b9
--- /dev/null
+++ b/core/rawdb/freezer_utils_test.go
@@ -0,0 +1,76 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestCopyFrom(t *testing.T) {
+ var (
+ content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
+ prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
+ )
+ var cases = []struct {
+ src, dest string
+ offset uint64
+ writePrefix bool
+ }{
+ {"foo", "bar", 0, false},
+ {"foo", "bar", 1, false},
+ {"foo", "bar", 8, false},
+ {"foo", "foo", 0, false},
+ {"foo", "foo", 1, false},
+ {"foo", "foo", 8, false},
+ {"foo", "bar", 0, true},
+ {"foo", "bar", 1, true},
+ {"foo", "bar", 8, true},
+ }
+ for _, c := range cases {
+ ioutil.WriteFile(c.src, content, 0644)
+
+ if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
+ if !c.writePrefix {
+ return nil
+ }
+ f.Write(prefix)
+ return nil
+ }); err != nil {
+ os.Remove(c.src)
+ t.Fatalf("Failed to copy %v", err)
+ }
+
+ blob, err := ioutil.ReadFile(c.dest)
+ if err != nil {
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ t.Fatalf("Failed to read %v", err)
+ }
+ want := content[c.offset:]
+ if c.writePrefix {
+ want = append(prefix, want...)
+ }
+ if !bytes.Equal(blob, want) {
+ t.Fatal("Unexpected value")
+ }
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ }
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 5d99d6387f..4a43ea8727 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -74,6 +74,12 @@ func (t *table) Ancients() (uint64, error) {
return t.db.Ancients()
}
+// Tail is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Tail() (uint64, error) {
+ return t.db.Tail()
+}
+
// AncientSize is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AncientSize(kind string) (uint64, error) {
@@ -89,10 +95,15 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err err
return t.db.ReadAncients(fn)
}
-// TruncateAncients is a noop passthrough that just forwards the request to the underlying
+// TruncateHead is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateAncients(items uint64) error {
- return t.db.TruncateAncients(items)
+func (t *table) TruncateHead(items uint64) error {
+ return t.db.TruncateHead(items)
+}
+
+// TruncateTail is a noop passthrough that just forwards the request to the underlying
+func (t *table) TruncateTail(items uint64) error {
+ return t.db.TruncateTail(items)
}
// Sync is a noop passthrough that just forwards the request to the underlying
@@ -101,6 +112,12 @@ func (t *table) Sync() error {
return t.db.Sync()
}
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
+ return t.db.MigrateTable(kind, convert)
+}
+
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
@@ -172,6 +189,18 @@ func (t *table) NewBatch() ethdb.Batch {
return &tableBatch{t.db.NewBatch(), t.prefix}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (t *table) NewBatchWithSize(size int) ethdb.Batch {
+ return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
+}
+
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
+ return t.db.NewSnapshot()
+}
+
// tableBatch is a wrapper around a database batch that prefixes each key access
// with a pre-configured string.
type tableBatch struct {
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 71a58d657d..b4d8cc0a7b 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -66,9 +66,9 @@ var (
// Pruner is an offline tool to prune the stale state with the
// help of the snapshot. The workflow of pruner is very simple:
//
-// - iterate the snapshot, reconstruct the relevant state
-// - iterate the database, delete all other state entries which
-// don't belong to the target state and the genesis state
+// - iterate the snapshot, reconstruct the relevant state
+// - iterate the database, delete all other state entries which
+// don't belong to the target state and the genesis state
//
// It can take several hours(around 2 hours for mainnet) to finish
// the whole pruning work. It's recommended to run this offline tool
@@ -230,7 +230,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
// Prune deletes all historical state nodes except the nodes belong to the
// specified state version. If user doesn't specify the state version, use
-// the top-most snapshot diff layer as the target.
+// the bottom-most snapshot diff layer as the target.
func (p *Pruner) Prune(root common.Hash) error {
// If the state bloom filter is already committed previously,
// reuse it for pruning instead of generating a new one. It's
@@ -243,8 +243,8 @@ func (p *Pruner) Prune(root common.Hash) error {
if stateBloomRoot != (common.Hash{}) {
return RecoverPruning(p.datadir, p.db, p.trieCachePath)
}
- // The target state root must be the root corresponding to the current HEAD
- // The reason for picking it is:
+ // If the target state root is not specified, use the HEAD-127 as the
+ // target. The reason for picking it is:
// - in most of the normal cases, the related state is available
// - the probability of this layer being reorg is very low
if root != (common.Hash{}) && root != p.headHeader.Root {
@@ -268,7 +268,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
- if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
+ if !rawdb.HasTrieNode(p.db, root) {
log.Error("Could not find the trie node corresponding to root", "root", root.TerminalString())
return fmt.Errorf("could not find the trie node corresponding to root:%s", root)
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 80e8ce3aff..15e2e816e2 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -1137,7 +1137,6 @@ func (s *StateDB) Root() common.Hash {
func (s *StateDB) Prepare(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
- s.accessList = newAccessList()
}
func (s *StateDB) clearJournalAndRefund() {
@@ -1183,6 +1182,9 @@ func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
//
// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number.
func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
+ // Clear out any leftover from previous executions
+ s.accessList = newAccessList()
+
s.AddAddressToAccessList(sender)
if dst != nil {
s.AddAddressToAccessList(*dst)
diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go
index 7781a676c3..11fed77505 100644
--- a/core/types/access_list_tx.go
+++ b/core/types/access_list_tx.go
@@ -22,7 +22,7 @@ import (
"github.com/PlatONnetwork/PlatON-Go/common"
)
-//go:generate gencodec -type AccessTuple -out gen_access_tuple.go
+//go:generate go run github.com/fjl/gencodec@latest -type AccessTuple -out gen_access_tuple.go
// AccessList is an EIP-2930 access list.
type AccessList []AccessTuple
diff --git a/core/types/block.go b/core/types/block.go
index 4662e174fd..45ca1c02d5 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -109,7 +109,8 @@ func (n *ETHBlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
-//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Header -field-override headerMarshaling -out gen_header_json.go
+//go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go
// Header represents a block header in the Ethereum blockchain.
type Header struct {
diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go
new file mode 100644
index 0000000000..97a7e01953
--- /dev/null
+++ b/core/types/gen_account_rlp.go
@@ -0,0 +1,28 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/PlatONnetwork/PlatON-Go/rlp"
+import "io"
+
+func (obj *StateAccount) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteUint64(obj.Nonce)
+ if obj.Balance == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Balance.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Balance)
+ }
+ w.WriteBytes(obj.Root[:])
+ w.WriteBytes(obj.CodeHash)
+ w.WriteBytes(obj.StorageKeyPrefix)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
new file mode 100644
index 0000000000..971baed3fd
--- /dev/null
+++ b/core/types/gen_header_rlp.go
@@ -0,0 +1,46 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/PlatONnetwork/PlatON-Go/rlp"
+import "io"
+
+func (obj *Header) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteBytes(obj.ParentHash[:])
+ w.WriteBytes(obj.Coinbase[:])
+ w.WriteBytes(obj.Root[:])
+ w.WriteBytes(obj.TxHash[:])
+ w.WriteBytes(obj.ReceiptHash[:])
+ w.WriteBytes(obj.Bloom[:])
+ if obj.Number == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Number.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Number)
+ }
+ w.WriteUint64(obj.GasLimit)
+ w.WriteUint64(obj.GasUsed)
+ w.WriteUint64(obj.Time)
+ w.WriteBytes(obj.Extra)
+ w.WriteBytes(obj.Nonce[:])
+ _tmp1 := obj.BaseFee != nil
+ if _tmp1 {
+ if obj.BaseFee == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.BaseFee.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.BaseFee)
+ }
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/gen_log_json.go b/core/types/gen_log_json.go
index e6fefb9e14..fa984ced40 100644
--- a/core/types/gen_log_json.go
+++ b/core/types/gen_log_json.go
@@ -23,7 +23,7 @@ func (l Log) MarshalJSON() ([]byte, error) {
TxIndex hexutil.Uint `json:"transactionIndex"`
BlockHash common.Hash `json:"blockHash"`
Index hexutil.Uint `json:"logIndex"`
- Removed bool `json:"removed"`
+ Removed bool `json:"removed" rlp:"-"`
}
var enc Log
enc.Address = l.Address
@@ -49,7 +49,7 @@ func (l *Log) UnmarshalJSON(input []byte) error {
TxIndex *hexutil.Uint `json:"transactionIndex"`
BlockHash *common.Hash `json:"blockHash"`
Index *hexutil.Uint `json:"logIndex"`
- Removed *bool `json:"removed"`
+ Removed *bool `json:"removed" rlp:"-"`
}
var dec Log
if err := json.Unmarshal(input, &dec); err != nil {
diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go
new file mode 100644
index 0000000000..16cfa5ded0
--- /dev/null
+++ b/core/types/gen_log_rlp.go
@@ -0,0 +1,23 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/PlatONnetwork/PlatON-Go/rlp"
+import "io"
+
+func (obj *rlpLog) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteBytes(obj.Address[:])
+ _tmp1 := w.List()
+ for _, _tmp2 := range obj.Topics {
+ w.WriteBytes(_tmp2[:])
+ }
+ w.ListEnd(_tmp1)
+ w.WriteBytes(obj.Data)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index 09323d2f5d..8e38a2a740 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -5,7 +5,6 @@ package types
import (
"encoding/json"
"errors"
- json2 "github.com/PlatONnetwork/PlatON-Go/common/json"
"math/big"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -46,40 +45,6 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
-func (r Receipt) MarshalJSON2() ([]byte, error) {
- type Receipt struct {
- Type hexutil.Uint64 `json:"type,omitempty"`
- PostState hexutil.Bytes `json:"root"`
- Status hexutil.Uint64 `json:"status"`
- CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"`
- Bloom Bloom `json:"logsBloom" gencodec:"required"`
- Logs []*Log `json:"logs" gencodec:"required"`
- TxHash common.Hash `json:"transactionHash" gencodec:"required"`
- ContractAddress common.Address `json:"contractAddress"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash,omitempty"`
- BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
- TransactionIndex hexutil.Uint `json:"transactionIndex"`
- // Access list transaction fields:
- ChainID *hexutil.Big `json:"chainId,omitempty"`
- AccessList *AccessList `json:"accessList,omitempty"`
- }
- var enc Receipt
- enc.Type = hexutil.Uint64(r.Type)
- enc.PostState = r.PostState
- enc.Status = hexutil.Uint64(r.Status)
- enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed)
- enc.Bloom = r.Bloom
- enc.Logs = r.Logs
- enc.TxHash = r.TxHash
- enc.ContractAddress = r.ContractAddress
- enc.GasUsed = hexutil.Uint64(r.GasUsed)
- enc.BlockHash = r.BlockHash
- enc.BlockNumber = (*hexutil.Big)(r.BlockNumber)
- enc.TransactionIndex = hexutil.Uint(r.TransactionIndex)
- return json2.Marshal(&enc)
-}
-
// UnmarshalJSON unmarshals from JSON.
func (r *Receipt) UnmarshalJSON(input []byte) error {
type Receipt struct {
diff --git a/core/types/legacy.go b/core/types/legacy.go
new file mode 100644
index 0000000000..64292e3029
--- /dev/null
+++ b/core/types/legacy.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "errors"
+
+ "github.com/PlatONnetwork/PlatON-Go/rlp"
+)
+
+// IsLegacyStoredReceipts tries to parse the RLP-encoded blob
+// first as an array of v3 stored receipt, then v4 stored receipt and
+// returns true if successful.
+func IsLegacyStoredReceipts(raw []byte) (bool, error) {
+ var v3 []v3StoredReceiptRLP
+ if err := rlp.DecodeBytes(raw, &v3); err == nil {
+ return true, nil
+ }
+ var v4 []v4StoredReceiptRLP
+ if err := rlp.DecodeBytes(raw, &v4); err == nil {
+ return true, nil
+ }
+ var v5 []storedReceiptRLP
+ // Check to see valid fresh stored receipt
+ if err := rlp.DecodeBytes(raw, &v5); err == nil {
+ return false, nil
+ }
+ return false, errors.New("value is not a valid receipt encoding")
+}
+
+// ConvertLegacyStoredReceipts takes the RLP encoding of an array of legacy
+// stored receipts and returns a fresh RLP-encoded stored receipt.
+func ConvertLegacyStoredReceipts(raw []byte) ([]byte, error) {
+ var receipts []ReceiptForStorage
+ if err := rlp.DecodeBytes(raw, &receipts); err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(&receipts)
+}
diff --git a/core/types/log.go b/core/types/log.go
index b2b23bc4a6..bb154df64f 100644
--- a/core/types/log.go
+++ b/core/types/log.go
@@ -25,7 +25,7 @@ import (
"github.com/PlatONnetwork/PlatON-Go/rlp"
)
-//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Log -field-override logMarshaling -out gen_log_json.go
// Log represents a contract log event. These events are generated by the LOG opcode and
// stored/indexed by the node.
@@ -89,15 +89,14 @@ type logMarshaling struct {
Index hexutil.Uint
}
+//go:generate go run ../../rlp/rlpgen -type rlpLog -out gen_log_rlp.go
+
type rlpLog struct {
Address common.Address
Topics []common.Hash
Data []byte
}
-// rlpStorageLog is the storage encoding of a log.
-type rlpStorageLog rlpLog
-
// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields.
type legacyRlpStorageLog struct {
Address common.Address
@@ -112,7 +111,8 @@ type legacyRlpStorageLog struct {
// EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data})
+ rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
+ return rlp.Encode(w, &rl)
}
// DecodeRLP implements rlp.Decoder.
@@ -131,11 +131,8 @@ type LogForStorage Log
// EncodeRLP implements rlp.Encoder.
func (l *LogForStorage) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpStorageLog{
- Address: l.Address,
- Topics: l.Topics,
- Data: l.Data,
- })
+ rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
+ return rlp.Encode(w, &rl)
}
// DecodeRLP implements rlp.Decoder.
@@ -146,7 +143,7 @@ func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error {
if err != nil {
return err
}
- var dec rlpStorageLog
+ var dec rlpLog
err = rlp.DecodeBytes(blob, &dec)
if err == nil {
*l = LogForStorage{
diff --git a/core/types/receipt.go b/core/types/receipt.go
index 746ce01105..a64ef7ac0b 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -32,15 +32,14 @@ import (
"github.com/PlatONnetwork/PlatON-Go/rlp"
)
-//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
var (
receiptStatusFailedRLP = []byte{}
receiptStatusSuccessfulRLP = []byte{0x01}
)
-// This error is returned when a typed receipt is decoded, but the string is empty.
-var errEmptyTypedReceipt = errors.New("empty typed receipt bytes")
+var errShortTypedReceipt = errors.New("typed receipt too short")
const (
// ReceiptStatusFailed is the status code of a transaction if execution failed.
@@ -184,26 +183,13 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
}
r.Type = LegacyTxType
return r.setFromRLP(dec)
- case kind == rlp.String:
+ default:
// It's an EIP-2718 typed tx receipt.
b, err := s.Bytes()
if err != nil {
return err
}
- if len(b) == 0 {
- return errEmptyTypedReceipt
- }
- r.Type = b[0]
- if r.Type == AccessListTxType || r.Type == DynamicFeeTxType {
- var dec receiptRLP
- if err := rlp.DecodeBytes(b[1:], &dec); err != nil {
- return err
- }
- return r.setFromRLP(dec)
- }
- return ErrTxTypeNotSupported
- default:
- return rlp.ErrExpectedList
+ return r.decodeTyped(b)
}
}
@@ -226,8 +212,8 @@ func (r *Receipt) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed receipt from the canonical format.
func (r *Receipt) decodeTyped(b []byte) error {
- if len(b) == 0 {
- return errEmptyTypedReceipt
+ if len(b) <= 1 {
+ return errShortTypedReceipt
}
switch b[0] {
case DynamicFeeTxType, AccessListTxType:
@@ -289,16 +275,20 @@ type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream.
-func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
- enc := &storedReceiptRLP{
- PostStateOrStatus: (*Receipt)(r).statusEncoding(),
- CumulativeGasUsed: r.CumulativeGasUsed,
- Logs: make([]*LogForStorage, len(r.Logs)),
- }
- for i, log := range r.Logs {
- enc.Logs[i] = (*LogForStorage)(log)
+func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ outerList := w.List()
+ w.WriteBytes((*Receipt)(r).statusEncoding())
+ w.WriteUint64(r.CumulativeGasUsed)
+ logList := w.List()
+ for _, log := range r.Logs {
+ if err := rlp.Encode(w, log); err != nil {
+ return err
+ }
}
- return rlp.Encode(w, enc)
+ w.ListEnd(logList)
+ w.ListEnd(outerList)
+ return w.Flush()
}
// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 1bc568767c..78f6ea6dbd 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -86,7 +86,7 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
input := []byte{0x80}
var r Receipt
err := rlp.DecodeBytes(input, &r)
- if err != errEmptyTypedReceipt {
+ if err != errShortTypedReceipt {
t.Fatal("wrong error:", err)
}
}
diff --git a/core/types/state_account.go b/core/types/state_account.go
index 67bd0af34f..afbfa74980 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -22,6 +22,8 @@ import (
"github.com/PlatONnetwork/PlatON-Go/common"
)
+//go:generate go run ../../rlp/rlpgen -type StateAccount -out gen_account_rlp.go
+
// StateAccount is the Ethereum consensus representation of accounts.
// These objects are stored in the main account trie.
type StateAccount struct {
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 35fb4f73a9..5bf91f9367 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -37,7 +37,7 @@ var (
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
- errEmptyTypedTx = errors.New("empty typed transaction bytes")
+ errShortTypedTx = errors.New("typed transaction too short")
)
// Transaction types.
@@ -137,7 +137,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(&inner, int(rlp.ListSize(size)))
}
return err
- case kind == rlp.String:
+ default:
// It's an EIP-2718 typed TX envelope.
var b []byte
if b, err = s.Bytes(); err != nil {
@@ -148,8 +148,6 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(inner, len(b))
}
return err
- default:
- return rlp.ErrExpectedList
}
}
@@ -177,8 +175,8 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed transaction from the canonical format.
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
- if len(b) == 0 {
- return nil, errEmptyTypedTx
+ if len(b) <= 1 {
+ return nil, errShortTypedTx
}
switch b[0] {
case AccessListTxType:
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index c8806b7bef..b98a1e52a8 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -77,7 +77,7 @@ func TestDecodeEmptyTypedTx(t *testing.T) {
input := []byte{0x80}
var tx Transaction
err := rlp.DecodeBytes(input, &tx)
- if err != errEmptyTypedTx {
+ if err != errShortTypedTx {
t.Fatal("wrong error:", err)
}
}
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index 6a4b425357..56eca83a52 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -215,7 +215,7 @@ var (
// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200)
- // gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539
+ // gasSStoreEIP2539 implements gas cost for SSTORE according to EIP-2539
// Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800)
gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529)
)
diff --git a/crypto/bn256/cloudflare/gfp_amd64.s b/crypto/bn256/cloudflare/gfp_amd64.s
index bdb4ffb787..64c97eaed9 100644
--- a/crypto/bn256/cloudflare/gfp_amd64.s
+++ b/crypto/bn256/cloudflare/gfp_amd64.s
@@ -49,7 +49,7 @@ TEXT ·gfpNeg(SB),0,$0-16
SBBQ 24(DI), R11
MOVQ $0, AX
- gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX)
+ gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,CX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -68,7 +68,7 @@ TEXT ·gfpAdd(SB),0,$0-24
ADCQ 24(SI), R11
ADCQ $0, R12
- gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX)
+ gfpCarry(R8,R9,R10,R11,R12, R13,R14,CX,AX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -83,7 +83,7 @@ TEXT ·gfpSub(SB),0,$0-24
MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14
- MOVQ ·p2+24(SB), R15
+ MOVQ ·p2+24(SB), CX
MOVQ $0, AX
SUBQ 0(SI), R8
@@ -94,12 +94,12 @@ TEXT ·gfpSub(SB),0,$0-24
CMOVQCC AX, R12
CMOVQCC AX, R13
CMOVQCC AX, R14
- CMOVQCC AX, R15
+ CMOVQCC AX, CX
ADDQ R12, R8
ADCQ R13, R9
ADCQ R14, R10
- ADCQ R15, R11
+ ADCQ CX, R11
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -115,7 +115,7 @@ TEXT ·gfpMul(SB),0,$160-24
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP))
- storeBlock(R12,R13,R14,R15, 32(SP))
+ storeBlock(R12,R13,R14,CX, 32(SP))
gfpReduceBMI2()
JMP end
@@ -125,5 +125,5 @@ nobmi2Mul:
end:
MOVQ c+0(FP), DI
- storeBlock(R12,R13,R14,R15, 0(DI))
+ storeBlock(R12,R13,R14,CX, 0(DI))
RET
diff --git a/crypto/bn256/cloudflare/mul_amd64.h b/crypto/bn256/cloudflare/mul_amd64.h
index bab5da8313..9d8e4b37db 100644
--- a/crypto/bn256/cloudflare/mul_amd64.h
+++ b/crypto/bn256/cloudflare/mul_amd64.h
@@ -165,7 +165,7 @@
\
\ // Add the 512-bit intermediate to m*N
loadBlock(96+stack, R8,R9,R10,R11) \
- loadBlock(128+stack, R12,R13,R14,R15) \
+ loadBlock(128+stack, R12,R13,R14,CX) \
\
MOVQ $0, AX \
ADDQ 0+stack, R8 \
@@ -175,7 +175,7 @@
ADCQ 32+stack, R12 \
ADCQ 40+stack, R13 \
ADCQ 48+stack, R14 \
- ADCQ 56+stack, R15 \
+ ADCQ 56+stack, CX \
ADCQ $0, AX \
\
- gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
+ gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)
diff --git a/crypto/bn256/cloudflare/mul_bmi2_amd64.h b/crypto/bn256/cloudflare/mul_bmi2_amd64.h
index 71ad0499af..403566c6fa 100644
--- a/crypto/bn256/cloudflare/mul_bmi2_amd64.h
+++ b/crypto/bn256/cloudflare/mul_bmi2_amd64.h
@@ -29,7 +29,7 @@
ADCQ $0, R14 \
\
MOVQ a2, DX \
- MOVQ $0, R15 \
+ MOVQ $0, CX \
MULXQ 0+rb, AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
@@ -43,7 +43,7 @@
MULXQ 24+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
- ADCQ $0, R15 \
+ ADCQ $0, CX \
\
MOVQ a3, DX \
MULXQ 0+rb, AX, BX \
@@ -52,13 +52,13 @@
MULXQ 16+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
- ADCQ $0, R15 \
+ ADCQ $0, CX \
MULXQ 8+rb, AX, BX \
ADDQ AX, R12 \
ADCQ BX, R13 \
MULXQ 24+rb, AX, BX \
ADCQ AX, R14 \
- ADCQ BX, R15
+ ADCQ BX, CX
#define gfpReduceBMI2() \
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
@@ -106,7 +106,7 @@
ADCQ 32(SP), R12 \
ADCQ 40(SP), R13 \
ADCQ 48(SP), R14 \
- ADCQ 56(SP), R15 \
+ ADCQ 56(SP), CX \
ADCQ $0, AX \
\
- gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
+ gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 58589572f7..f71617a59a 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -1458,15 +1458,18 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, bn *big.Int) er
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
- if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
- rollbackErr = err
- // If some headers were inserted, track them as uncertain
- if n > 0 && rollback == 0 {
- rollback = chunk[0].Number.Uint64()
+ if len(chunk) > 0 {
+ if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
+ rollbackErr = err
+ // If some headers were inserted, track them as uncertain
+ if n > 0 && rollback == 0 {
+ rollback = chunk[0].Number.Uint64()
+ }
+ log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
- log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
- return fmt.Errorf("%w: %v", errInvalidChain, err)
}
+
// All verifications passed, track all headers within the alloted limits
head := chunk[len(chunk)-1].Number.Uint64()
if head-rollback > uint64(fsHeaderSafetyNet) {
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index e694890395..0ef9a533fe 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -117,7 +117,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
reward, _ := tx.EffectiveGasTip(bf.block.BaseFee())
sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward}
}
- sort.Sort(sorter)
+ sort.Stable(sorter)
var txIndex int
sumGasUsed := sorter[0].gasUsed
@@ -184,10 +184,11 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block
// actually processed range is returned to avoid ambiguity when parts of the requested range
// are not available or when the head has changed during processing this request.
// Three arrays are returned based on the processed blocks:
-// - reward: the requested percentiles of effective priority fees per gas of transactions in each
-// block, sorted in ascending order and weighted by gas used.
-// - baseFee: base fee per gas in the given block
-// - gasUsedRatio: gasUsed/gasLimit in the given block
+// - reward: the requested percentiles of effective priority fees per gas of transactions in each
+// block, sorted in ascending order and weighted by gas used.
+// - baseFee: base fee per gas in the given block
+// - gasUsedRatio: gasUsed/gasLimit in the given block
+//
// Note: baseFee includes the next block after the newest of the returned range, because this
// value can be derived from the newest block.
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index e7c1e1e2c0..73be3bbe90 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -274,11 +274,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
} else {
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 123,
GetBlockHeadersPacket: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
RequestId: 123,
BlockHeadersPacket: headers,
}); err != nil {
@@ -296,14 +296,12 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
} else {
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 456,
GetBlockHeadersPacket: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
- RequestId: 456,
- BlockHeadersPacket: headers,
- }); err != nil {
+ expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
}
@@ -389,11 +387,11 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
t.Errorf("test %d: bodies mismatch: %v", i, err)
}
} else {
- p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{
+ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
RequestId: 123,
GetBlockBodiesPacket: hashes,
})
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{
+ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
RequestId: 123,
BlockBodiesPacket: bodies,
}); err != nil {
@@ -465,7 +463,7 @@ func testGetNodeData(t *testing.T, protocol uint) {
p2p.Send(peer.app, GetNodeDataMsg, hashes)
} else {
// Request all hashes.
- p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{
+ p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
RequestId: 123,
GetNodeDataPacket: hashes,
})
@@ -583,11 +581,11 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
t.Errorf("receipts mismatch: %v", err)
}
} else {
- p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{
+ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
RequestId: 123,
GetReceiptsPacket: hashes,
})
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{
+ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
RequestId: 123,
ReceiptsPacket: receipts,
}); err != nil {
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index 8ce0ec658f..6f1e1adfab 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -248,7 +248,7 @@ func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs [
p.knownTxs.Add(hashes...)
// Not packed into PooledTransactionsPacket to avoid RLP decoding
- return p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{
+ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
RequestId: id,
PooledTransactionsRLPPacket: txs,
})
@@ -309,7 +309,7 @@ func (p *Peer) SendBlockHeaders(headers []*types.Header) error {
// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders.
func (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error {
- return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket66{
+ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersPacket66{
RequestId: id,
BlockHeadersPacket: headers,
})
@@ -324,7 +324,7 @@ func (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding
- return p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesRLPPacket66{
+ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
RequestId: id,
BlockBodiesRLPPacket: bodies,
})
@@ -338,7 +338,7 @@ func (p *Peer) SendNodeData(data [][]byte) error {
// ReplyNodeData is the eth/66 response to GetNodeData.
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
- return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{
+ return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
RequestId: id,
NodeDataPacket: data,
})
@@ -352,7 +352,7 @@ func (p *Peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
- return p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{
+ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
RequestId: id,
ReceiptsRLPPacket: receipts,
})
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 0b2e61956e..9ba1baad2d 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -2803,6 +2803,11 @@ func (s *Syncer) reportSyncProgress(force bool) {
accountFills,
).Uint64())
+ // Don't report anything until we have a meaningful progress
+ if estBytes < 1.0 {
+ return
+ }
+
elapsed := time.Since(s.startTime)
estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 8350262e7c..e60478cd07 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1351,7 +1351,7 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
accTrie, _ := trie.New(common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1397,7 +1397,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill boundary accounts
for i := 0; i < len(boundaries); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1410,7 +1410,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1447,7 +1447,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
@@ -1495,7 +1495,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
if code {
codehash = getCodeHash(i)
}
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
diff --git a/ethdb/batch.go b/ethdb/batch.go
index 1353693318..541f40c838 100644
--- a/ethdb/batch.go
+++ b/ethdb/batch.go
@@ -43,6 +43,9 @@ type Batcher interface {
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
NewBatch() Batch
+
+ // NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+ NewBatchWithSize(size int) Batch
}
// HookedBatch wraps an arbitrary batch where each operation may be hooked into
diff --git a/ethdb/database.go b/ethdb/database.go
index 1eacd37f46..7865b4535e 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -64,6 +64,7 @@ type KeyValueStore interface {
Iteratee
Stater
Compacter
+ Snapshotter
io.Closer
}
@@ -86,6 +87,10 @@ type AncientReader interface {
// Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error)
+ // Tail returns the number of first stored item in the freezer.
+ // This number can also be interpreted as the total deleted item numbers.
+ Tail() (uint64, error)
+
// AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error)
}
@@ -106,11 +111,24 @@ type AncientWriter interface {
// The integer return value is the total size of the written data.
ModifyAncients(func(AncientWriteOp) error) (int64, error)
- // TruncateAncients discards all but the first n ancient data from the ancient store.
- TruncateAncients(n uint64) error
+ // TruncateHead discards all but the first n ancient data from the ancient store.
+ // After the truncation, the latest item can be accessed it item_n-1(start from 0).
+ TruncateHead(n uint64) error
+
+ // TruncateTail discards the first n ancient data from the ancient store. The already
+ // deleted items are ignored. After the truncation, the earliest item can be accessed
+ // is item_n(start from 0). The deleted items may not be removed from the ancient store
+ // immediately, but only when the accumulated deleted data reach the threshold then
+ // will be removed all together.
+ TruncateTail(n uint64) error
// Sync flushes all in-memory ancient store data to disk.
Sync() error
+
+ // MigrateTable processes and migrates entries of a given table to a new format.
+ // The second argument is a function that takes a raw entry and returns it
+ // in the newest format.
+ MigrateTable(string, func([]byte) ([]byte, error)) error
}
// AncientWriteOp is given to the function argument of ModifyAncients.
@@ -153,5 +171,6 @@ type Database interface {
Iteratee
Stater
Compacter
+ Snapshotter
io.Closer
}
diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go
index f727e6d84d..50b1307e6c 100644
--- a/ethdb/dbtest/testsuite.go
+++ b/ethdb/dbtest/testsuite.go
@@ -312,6 +312,68 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
}
})
+ t.Run("Snapshot", func(t *testing.T) {
+ db := New()
+ defer db.Close()
+
+ initial := map[string]string{
+ "k1": "v1", "k2": "v2", "k3": "", "k4": "",
+ }
+ for k, v := range initial {
+ db.Put([]byte(k), []byte(v))
+ }
+ snapshot, err := db.NewSnapshot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k, v := range initial {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, []byte(v)) {
+ t.Fatalf("Unexpected value want: %v, got %v", v, got)
+ }
+ }
+
+ // Flush more modifications into the database, ensure the snapshot
+ // isn't affected.
+ var (
+ update = map[string]string{"k1": "v1-b", "k3": "v3-b"}
+ insert = map[string]string{"k5": "v5-b"}
+ delete = map[string]string{"k2": ""}
+ )
+ for k, v := range update {
+ db.Put([]byte(k), []byte(v))
+ }
+ for k, v := range insert {
+ db.Put([]byte(k), []byte(v))
+ }
+ for k := range delete {
+ db.Delete([]byte(k))
+ }
+ for k, v := range initial {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, []byte(v)) {
+ t.Fatalf("Unexpected value want: %v, got %v", v, got)
+ }
+ }
+ for k := range insert {
+ got, err := snapshot.Get([]byte(k))
+ if err == nil || len(got) != 0 {
+ t.Fatal("Unexpected value")
+ }
+ }
+ for k := range delete {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil || len(got) == 0 {
+ t.Fatal("Unexpected deletion")
+ }
+ }
+ })
}
func iterateKeys(it ethdb.Iterator) []string {
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 2bb297e9f4..99ed9b9493 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -214,6 +214,14 @@ func (db *Database) NewBatch() ethdb.Batch {
}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
+ return &batch{
+ db: db.db,
+ b: leveldb.MakeBatch(size),
+ }
+}
+
// NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
@@ -221,6 +229,19 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
}
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+// Note don't forget to release the snapshot once it's used up, otherwise
+// the stale data will never be cleaned up by the underlying compactor.
+func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
+ snap, err := db.db.GetSnapshot()
+ if err != nil {
+ return nil, err
+ }
+ return &snapshot{db: snap}, nil
+}
+
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return db.db.GetProperty(property)
@@ -522,3 +543,26 @@ func bytesPrefixRange(prefix, start []byte) *util.Range {
r.Start = append(r.Start, start...)
return r
}
+
+// snapshot wraps a leveldb snapshot for implementing the Snapshot interface.
+type snapshot struct {
+ db *leveldb.Snapshot
+}
+
+// Has retrieves if a key is present in the snapshot backing by a key-value
+// data store.
+func (snap *snapshot) Has(key []byte) (bool, error) {
+ return snap.db.Has(key, nil)
+}
+
+// Get retrieves the given key if it's present in the snapshot backing by
+// key-value data store.
+func (snap *snapshot) Get(key []byte) ([]byte, error) {
+ return snap.db.Get(key, nil)
+}
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (snap *snapshot) Release() {
+ snap.db.Release()
+}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index 261f6dabd4..a8c8c2bc0a 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -133,6 +133,13 @@ func (db *Database) NewBatch() ethdb.Batch {
}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
+ return &batch{
+ db: db,
+ }
+}
+
// NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
@@ -168,6 +175,13 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
}
}
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
+ return newSnapshot(db), nil
+}
+
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", errors.New("unknown property")
diff --git a/ethdb/snapshot.go b/ethdb/snapshot.go
new file mode 100644
index 0000000000..753e0f6b1f
--- /dev/null
+++ b/ethdb/snapshot.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethdb
+
+type Snapshot interface {
+ // Has retrieves if a key is present in the snapshot backing by a key-value
+ // data store.
+ Has(key []byte) (bool, error)
+
+ // Get retrieves the given key if it's present in the snapshot backing by
+ // key-value data store.
+ Get(key []byte) ([]byte, error)
+
+ // Release releases associated resources. Release should always succeed and can
+ // be called multiple times without causing error.
+ Release()
+}
+
+// Snapshotter wraps the Snapshot method of a backing data store.
+type Snapshotter interface {
+ // NewSnapshot creates a database snapshot based on the current state.
+ // The created snapshot will not be affected by all following mutations
+ // happened on the database.
+ // Note don't forget to release the snapshot once it's used up, otherwise
+ // the stale data will never be cleaned up by the underlying compactor.
+ NewSnapshot() (Snapshot, error)
+}
diff --git a/go.mod b/go.mod
index 8c90d7d5e8..4f334cd6f2 100644
--- a/go.mod
+++ b/go.mod
@@ -67,6 +67,9 @@ require (
require (
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ github.com/golang-jwt/jwt/v4 v4.5.0
+ github.com/herumi/bls v1.37.0
github.com/influxdata/influxdb-client-go/v2 v2.12.3
golang.org/x/net v0.17.0
)
diff --git a/go.sum b/go.sum
index febe08a485..0655e5a6a3 100644
--- a/go.sum
+++ b/go.sum
@@ -55,6 +55,7 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
@@ -99,6 +100,8 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
@@ -134,6 +137,8 @@ github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/herumi/bls v1.37.0 h1:EKPaFujxWsxSMlfN1NeR9GTfVOeAsAaNRGbdBfn9lBE=
+github.com/herumi/bls v1.37.0/go.mod h1:CnmR5QZ/QBnBE8Z55O+OtmUc6ICUdrOW9fwSRQwz5Bo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index e059a2839b..d815a1cd93 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -23,6 +23,7 @@ import (
"fmt"
ethereum "github.com/PlatONnetwork/PlatON-Go"
"github.com/PlatONnetwork/PlatON-Go/common/math"
+ "github.com/PlatONnetwork/PlatON-Go/consensus/misc"
"math/big"
"strconv"
@@ -100,6 +101,14 @@ func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {
}
func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {
+ // Ask transaction pool for the nonce which includes pending transactions
+ if blockNr, ok := a.blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber {
+ nonce, err := a.backend.GetPoolNonce(ctx, a.address)
+ if err != nil {
+ return 0, err
+ }
+ return hexutil.Uint64(nonce), nil
+ }
state, err := a.getState(ctx)
if err != nil {
return 0, err
@@ -245,6 +254,10 @@ func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, erro
if err != nil || tx == nil {
return nil, err
}
+ // Pending tx
+ if t.block == nil {
+ return nil, nil
+ }
header, err := t.block.resolveHeader(ctx)
if err != nil || header == nil {
return nil, err
@@ -285,6 +298,30 @@ func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, e
}
}
+func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return nil, err
+ }
+ // Pending tx
+ if t.block == nil {
+ return nil, nil
+ }
+ header, err := t.block.resolveHeader(ctx)
+ if err != nil || header == nil {
+ return nil, err
+ }
+ if header.BaseFee == nil {
+ return (*hexutil.Big)(tx.GasPrice()), nil
+ }
+
+ tip, err := tx.EffectiveGasTip(header.BaseFee)
+ if err != nil {
+ return nil, err
+ }
+ return (*hexutil.Big)(tip), nil
+}
+
func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
tx, err := t.resolve(ctx)
if err != nil || tx == nil {
@@ -598,6 +635,22 @@ func (b *Block) BaseFeePerGas(ctx context.Context) (*hexutil.Big, error) {
return (*hexutil.Big)(header.BaseFee), nil
}
+func (b *Block) NextBaseFeePerGas(ctx context.Context) (*hexutil.Big, error) {
+ header, err := b.resolveHeader(ctx)
+ if err != nil {
+ return nil, err
+ }
+ chaincfg := b.backend.ChainConfig()
+ if header.BaseFee == nil {
+ // Make sure next block doesn't enable EIP-1559
+ if !chaincfg.IsPauli(new(big.Int).Add(header.Number, common.Big1)) {
+ return nil, nil
+ }
+ }
+ nextBaseFee := misc.CalcBaseFee(chaincfg, header)
+ return (*hexutil.Big)(nextBaseFee), nil
+}
+
func (b *Block) Parent(ctx context.Context) (*Block, error) {
if _, err := b.resolveHeader(ctx); err != nil {
return nil, err
diff --git a/graphql/schema.go b/graphql/schema.go
index fc6ccf0dab..f031c5bc5f 100644
--- a/graphql/schema.go
+++ b/graphql/schema.go
@@ -94,10 +94,12 @@ const schema string = `
value: BigInt!
# GasPrice is the price offered to miners for gas, in wei per unit.
gasPrice: BigInt!
- # MaxFeePerGas is the maximum fee per gas offered to include a transaction, in wei.
- maxFeePerGas: BigInt
- # MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
- maxPriorityFeePerGas: BigInt
+ # MaxFeePerGas is the maximum fee per gas offered to include a transaction, in von.
+ maxFeePerGas: BigInt
+ # MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in von.
+ maxPriorityFeePerGas: BigInt
+ # EffectiveTip is the actual amount of reward going to miner after considering the max fee cap.
+ effectiveTip: BigInt
# Gas is the maximum amount of gas this transaction can consume.
gas: Long!
# InputData is the data supplied to the target of the transaction.
@@ -187,8 +189,10 @@ const schema string = `
gasLimit: Long!
# GasUsed is the amount of gas that was used executing transactions in this block.
gasUsed: Long!
- # BaseFeePerGas is the fee perunit of gas burned by the protocol in this block.
- baseFeePerGas: BigInt
+ # BaseFeePerGas is the fee per unit of gas burned by the protocol in this block.
+ baseFeePerGas: BigInt
+ # NextBaseFeePerGas is the fee per unit of gas which needs to be burned in the next block.
+ nextBaseFeePerGas: BigInt
# Timestamp is the unix timestamp at which this block was mined.
timestamp: Long!
# LogsBloom is a bloom filter that can be used to check if a block may
@@ -244,10 +248,10 @@ const schema string = `
gas: Long
# GasPrice is the price, in wei, offered for each unit of gas.
gasPrice: BigInt
- # MaxFeePerGas is the maximum fee per gas offered, in wei.
- maxFeePerGas: BigInt
- # MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei.
- maxPriorityFeePerGas: BigInt
+ # MaxFeePerGas is the maximum fee per gas offered, in von.
+ maxFeePerGas: BigInt
+ # MaxPriorityFeePerGas is the maximum miner tip per gas offered, in von.
+ maxPriorityFeePerGas: BigInt
# Value is the value, in wei, sent along with the call.
value: BigInt
# Data is the data sent to the callee.
diff --git a/graphql/service.go b/graphql/service.go
index 2389ccea37..05161e66eb 100644
--- a/graphql/service.go
+++ b/graphql/service.go
@@ -99,7 +99,7 @@ func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string)
}
h := handler{Schema: s, SchemaEth: sEth}
- handler := node.NewHTTPHandlerStack(h, cors, vhosts)
+ handler := node.NewHTTPHandlerStack(h, cors, vhosts, nil)
stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{})
stack.RegisterHandler("GraphQL UI", "/platon/graphql/ui", GraphiQL{}) // for PlatON
diff --git a/internal/build/azure.go b/internal/build/azure.go
index e617fd620b..013e789486 100644
--- a/internal/build/azure.go
+++ b/internal/build/azure.go
@@ -37,13 +37,11 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
}
// Create an authenticated client against the Azure cloud
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- container := service.NewContainerURL(config.Container)
- blockblob := container.NewBlockBlobURL(name)
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return err
+ }
// Stream the file to upload into the designated blobstore container
in, err := os.Open(path)
@@ -51,54 +49,56 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
return err
}
defer in.Close()
-
- _, err = blockblob.Upload(context.Background(), in, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
+ blockblob := container.NewBlockBlobClient(name)
+ _, err = blockblob.Upload(context.Background(), in, nil)
return err*/
return nil
}
// AzureBlobstoreList lists all the files contained within an azure blobstore.
-/*func AzureBlobstoreList(config AzureBlobstoreConfig) ([]azblob.BlobItem, error) {
- credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- // List all the blobs from the container and return them
- container := service.NewContainerURL(config.Container)
-
- res, err := container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{
- MaxResults: 1024 * 1024 * 1024, // Yes, fetch all of them
- })
- if err != nil {
- return nil, err
- }
- return res.Segment.BlobItems, nil
+/*func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*azblob.BlobItemInternal, error) {
+ // Create an authenticated client against the Azure cloud
+ credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return nil, err
+ }
+ var maxResults int32 = 5000
+ pager := container.ListBlobsFlat(&azblob.ContainerListBlobFlatSegmentOptions{
+ Maxresults: &maxResults,
+ })
+ var allBlobs []*azblob.BlobItemInternal
+ for pager.NextPage(context.Background()) {
+ res := pager.PageResponse()
+ allBlobs = append(allBlobs, res.ContainerListBlobFlatSegmentResult.Segment.BlobItems...)
+ }
+ return allBlobs, pager.Err()
}*/
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
// from the blobstore.
-/*func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []azblob.BlobItem) error {
+/*func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*azblob.BlobItemInternal) error {
if *DryRunFlag {
for _, blob := range blobs {
- fmt.Printf("would delete %s (%s) from %s/%s\n", blob.Name, blob.Properties.LastModified, config.Account, config.Container)
+ fmt.Printf("would delete %s (%s) from %s/%s\n", *blob.Name, blob.Properties.LastModified, config.Account, config.Container)
}
return nil
}
// Create an authenticated client against the Azure cloud
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- container := service.NewContainerURL(config.Container)
-
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return err
+ }
// Iterate over the blobs and delete them
for _, blob := range blobs {
- blockblob := container.NewBlockBlobURL(blob.Name)
- if _, err := blockblob.Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
+ blockblob := container.NewBlockBlobClient(*blob.Name)
+ if _, err := blockblob.Delete(context.Background(), &azblob.DeleteBlobOptions{}); err != nil {
return err
}
}
diff --git a/internal/build/util.go b/internal/build/util.go
index f050797623..9dfba6b87e 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -17,6 +17,7 @@
package build
import (
+ "bufio"
"bytes"
"flag"
"fmt"
@@ -32,6 +33,7 @@ import (
"runtime"
"strings"
"text/template"
+ "time"
)
var DryRunFlag = flag.Bool("n", false, "dry run, don't execute commands")
@@ -146,7 +148,7 @@ func CopyFile(dst, src string, mode os.FileMode) {
// so that go commands executed by build use the same version of Go as the 'host' that runs
// build code. e.g.
//
-// /usr/lib/go-1.11/bin/go run build/ci.go ...
+// /usr/lib/go-1.11/bin/go run build/ci.go ...
//
// runs using go 1.11 and invokes go 1.11 tools from the same GOROOT. This is also important
// because runtime.Version checks on the host should match the tools that are run.
@@ -188,7 +190,6 @@ func needCompile(strPack string) bool {
// the form sftp://[user@]host[:port].
func UploadSFTP(identityFile, host, dir string, files []string) error {
sftp := exec.Command("sftp")
- sftp.Stdout = nil
sftp.Stderr = os.Stderr
if identityFile != "" {
sftp.Args = append(sftp.Args, "-i", identityFile)
@@ -200,6 +201,10 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
}
stdin, err := sftp.StdinPipe()
+ stdout, err := sftp.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("can't create stdout pipe for sftp: %v", err)
+ }
if err != nil {
return fmt.Errorf("can't create stdin pipe for sftp: %v", err)
}
@@ -210,8 +215,37 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
for _, f := range files {
fmt.Fprintln(in, "put", f, path.Join(dir, filepath.Base(f)))
}
+ fmt.Fprintln(in, "exit")
+ // Some issue with the PPA sftp server makes it so the server does not
+ // respond properly to a 'bye', 'exit' or 'quit' from the client.
+ // To work around that, we check the output, and when we see the client
+ // exit command, we do a hard exit.
+ // See
+ // https://github.com/kolban-google/sftp-gcs/issues/23
+ // https://github.com/mscdex/ssh2/pull/1111
+ aborted := false
+ go func() {
+ scanner := bufio.NewScanner(stdout)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fmt.Println(txt)
+ if txt == "sftp> exit" {
+ // Give it .5 seconds to exit (server might be fixed), then
+ // hard kill it from the outside
+ time.Sleep(500 * time.Millisecond)
+ aborted = true
+ sftp.Process.Kill()
+ }
+
+ }
+ }()
+
stdin.Close()
- return sftp.Wait()
+ err = sftp.Wait()
+ if aborted {
+ return nil
+ }
+ return err
}
// FindMainPackages finds all 'main' packages in the given directory and returns their
diff --git a/node/api.go b/node/api.go
index 47e4d1d6fa..4384ee924a 100644
--- a/node/api.go
+++ b/node/api.go
@@ -246,11 +246,12 @@ func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str
}
// Enable WebSocket on the server.
- server := api.node.wsServerForPort(*port)
+ server := api.node.wsServerForPort(*port, false)
if err := server.setListenAddr(*host, *port); err != nil {
return false, err
}
- if err := server.enableWS(api.node.rpcAPIs, config); err != nil {
+ openApis, _ := api.node.GetAPIs()
+ if err := server.enableWS(openApis, config); err != nil {
return false, err
}
if err := server.start(); err != nil {
diff --git a/node/config.go b/node/config.go
index e68359f7ad..3e98479ebc 100644
--- a/node/config.go
+++ b/node/config.go
@@ -39,6 +39,7 @@ import (
const (
datadirPrivateKey = "nodekey" // Path within the datadir to the node's private key
+ datadirJWTKey = "jwtsecret" // Path within the datadir to the node's jwt secret
datadirDefaultKeyStore = "keystore" // Path within the datadir to the keystore
datadirStaticNodes = "static-nodes.json" // Path within the datadir to the static node list
datadirTrustedNodes = "trusted-nodes.json" // Path within the datadir to the trusted node list
@@ -136,6 +137,12 @@ type Config struct {
// HTTPPathPrefix specifies a path prefix on which http-rpc is to be served.
HTTPPathPrefix string `toml:",omitempty"`
+ // AuthHost is the listening address on which authenticated APIs are provided.
+ AuthHost string `toml:",omitempty"`
+
+ // AuthPort is the port number on which authenticated APIs are provided.
+ AuthPort int `toml:",omitempty"`
+
// WSHost is the host interface on which to start the websocket RPC server. If
// this field is empty, no websocket API endpoint will be started.
WSHost string
@@ -185,6 +192,9 @@ type Config struct {
staticNodesWarning bool
trustedNodesWarning bool
oldGethResourceWarning bool
+
+ // JWTSecret is the hex-encoded jwt secret.
+ JWTSecret string `toml:",omitempty"`
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -243,7 +253,7 @@ func (c *Config) HTTPEndpoint() string {
// DefaultHTTPEndpoint returns the HTTP endpoint used by default.
func DefaultHTTPEndpoint() string {
- config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort}
+ config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort, AuthPort: DefaultAuthPort}
return config.HTTPEndpoint()
}
diff --git a/node/defaults.go b/node/defaults.go
index 4b31a26356..b5cc2bf2d4 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -34,12 +34,24 @@ const (
DefaultWSPort = 6790 // Default TCP port for the websocket RPC server
DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server
DefaultGraphQLPort = 6791 // Default TCP port for the GraphQL server
+ DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
+ DefaultAuthPort = 8551 // Default port for the authenticated apis
+)
+
+var (
+ DefaultAuthCors = []string{"localhost"} // Default cors domain for the authenticated apis
+ DefaultAuthVhosts = []string{"localhost"} // Default virtual hosts for the authenticated apis
+ DefaultAuthOrigins = []string{"localhost"} // Default origins for the authenticated apis
+ DefaultAuthPrefix = "" // Default prefix for the authenticated apis
+ DefaultAuthModules = []string{"eth", "engine"}
)
// DefaultConfig contains reasonable default settings.
var DefaultConfig = Config{
DataDir: DefaultDataDir(),
HTTPPort: DefaultHTTPPort,
+ AuthHost: DefaultAuthHost,
+ AuthPort: DefaultAuthPort,
HTTPModules: []string{"net", "web3"},
HTTPVirtualHosts: []string{"localhost"},
HTTPTimeouts: rpc.DefaultHTTPTimeouts,
diff --git a/node/endpoints.go b/node/endpoints.go
index e621e94fbb..99d76020dd 100644
--- a/node/endpoints.go
+++ b/node/endpoints.go
@@ -60,8 +60,10 @@ func checkModuleAvailability(modules []string, apis []rpc.API) (bad, available [
}
}
for _, name := range modules {
- if _, ok := availableSet[name]; !ok && name != rpc.MetadataApi {
- bad = append(bad, name)
+ if _, ok := availableSet[name]; !ok {
+ if name != rpc.MetadataApi && name != rpc.EngineApi {
+ bad = append(bad, name)
+ }
}
}
return bad, available
diff --git a/node/jwt_handler.go b/node/jwt_handler.go
new file mode 100644
index 0000000000..28d5b87c60
--- /dev/null
+++ b/node/jwt_handler.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package node
+
+import (
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt/v4"
+)
+
+type jwtHandler struct {
+ keyFunc func(token *jwt.Token) (interface{}, error)
+ next http.Handler
+}
+
+// newJWTHandler creates a http.Handler with jwt authentication support.
+func newJWTHandler(secret []byte, next http.Handler) http.Handler {
+ return &jwtHandler{
+ keyFunc: func(token *jwt.Token) (interface{}, error) {
+ return secret, nil
+ },
+ next: next,
+ }
+}
+
+// ServeHTTP implements http.Handler
+func (handler *jwtHandler) ServeHTTP(out http.ResponseWriter, r *http.Request) {
+ var (
+ strToken string
+ claims jwt.RegisteredClaims
+ )
+ if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "Bearer ") {
+ strToken = strings.TrimPrefix(auth, "Bearer ")
+ }
+ if len(strToken) == 0 {
+ http.Error(out, "missing token", http.StatusForbidden)
+ return
+ }
+ // We explicitly set only HS256 allowed, and also disables the
+ // claim-check: the RegisteredClaims internally requires 'iat' to
+ // be no later than 'now', but we allow for a bit of drift.
+ token, err := jwt.ParseWithClaims(strToken, &claims, handler.keyFunc,
+ jwt.WithValidMethods([]string{"HS256"}),
+ jwt.WithoutClaimsValidation())
+
+ switch {
+ case err != nil:
+ http.Error(out, err.Error(), http.StatusForbidden)
+ case !token.Valid:
+ http.Error(out, "invalid token", http.StatusForbidden)
+ case !claims.VerifyExpiresAt(time.Now(), false): // optional
+ http.Error(out, "token is expired", http.StatusForbidden)
+ case claims.IssuedAt == nil:
+ http.Error(out, "missing issued-at", http.StatusForbidden)
+ case time.Since(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "stale token", http.StatusForbidden)
+ case time.Until(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "future token", http.StatusForbidden)
+ default:
+ handler.next.ServeHTTP(out, r)
+ }
+}
diff --git a/node/node.go b/node/node.go
index cf57720b7b..65e0161815 100644
--- a/node/node.go
+++ b/node/node.go
@@ -17,8 +17,11 @@
package node
import (
+ crand "crypto/rand"
"errors"
"fmt"
+ "github.com/PlatONnetwork/PlatON-Go/common"
+ "github.com/PlatONnetwork/PlatON-Go/common/hexutil"
"math/big"
"net/http"
"os"
@@ -60,6 +63,8 @@ type Node struct {
rpcAPIs []rpc.API // List of APIs currently provided by the node
http *httpServer //
ws *httpServer //
+ httpAuth *httpServer //
+ wsAuth *httpServer //
ipc *ipcServer // Stores information about the ipc http server
inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
@@ -161,7 +166,9 @@ func New(conf *Config) (*Node, error) {
}
// Configure RPC servers.
node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
+ node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts)
node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
+ node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
return node, nil
@@ -355,7 +362,41 @@ func (n *Node) closeDataDir() {
}
}
-// configureRPC is a helper method to configure all the various RPC endpoints during node
+// obtainJWTSecret loads the jwt-secret, either from the provided config,
+// or from the default location. If neither of those are present, it generates
+// a new secret and stores to the default location.
+func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
+ fileName := cliParam
+ if len(fileName) == 0 {
+ // no path provided, use default
+ fileName = n.ResolvePath(datadirJWTKey)
+ }
+ // try reading from file
+ log.Debug("Reading JWT secret", "path", fileName)
+ if data, err := os.ReadFile(fileName); err == nil {
+ jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
+ if len(jwtSecret) == 32 {
+ return jwtSecret, nil
+ }
+ log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret))
+ return nil, errors.New("invalid JWT secret")
+ }
+ // Need to generate one
+ jwtSecret := make([]byte, 32)
+ crand.Read(jwtSecret)
+ // if we're in --dev mode, don't bother saving, just show it
+ if fileName == "" {
+ log.Info("Generated ephemeral JWT secret", "secret", hexutil.Encode(jwtSecret))
+ return jwtSecret, nil
+ }
+ if err := os.WriteFile(fileName, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil {
+ return nil, err
+ }
+ log.Info("Generated JWT secret", "path", fileName)
+ return jwtSecret, nil
+}
+
+// startRPC is a helper method to configure all the various RPC endpoints during node
// startup. It's not meant to be called at any time afterwards as it makes certain
// assumptions about the state of the node.
func (n *Node) startRPC() error {
@@ -370,54 +411,124 @@ func (n *Node) startRPC() error {
}
}
+ var (
+ servers []*httpServer
+ open, all = n.GetAPIs()
+ )
// Configure HTTP.
- if n.config.HTTPHost != "" {
- config := httpConfig{
+ initHttp := func(server *httpServer, apis []rpc.API, port int) error {
+ if err := server.setListenAddr(n.config.HTTPHost, port); err != nil {
+ return err
+ }
+ if err := server.enableRPC(apis, httpConfig{
CorsAllowedOrigins: n.config.HTTPCors,
Vhosts: n.config.HTTPVirtualHosts,
Modules: n.config.HTTPModules,
prefix: n.config.HTTPPathPrefix,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
+ }
+ initWS := func(apis []rpc.API, port int) error {
+ server := n.wsServerForPort(port, false)
+ if err := server.setListenAddr(n.config.WSHost, port); err != nil {
+ return err
+ }
+ if err := server.enableWS(n.rpcAPIs, wsConfig{
+ Modules: n.config.WSModules,
+ Origins: n.config.WSOrigins,
+ prefix: n.config.WSPathPrefix,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
+ }
+ initAuth := func(apis []rpc.API, port int, secret []byte) error {
+ // Enable auth via HTTP
+ server := n.httpAuth
+ if err := server.setListenAddr(n.config.AuthHost, port); err != nil {
+ return err
}
- if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil {
+ if err := server.enableRPC(apis, httpConfig{
+ CorsAllowedOrigins: DefaultAuthCors,
+ Vhosts: DefaultAuthVhosts,
+ Modules: DefaultAuthModules,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
return err
}
- if err := n.http.enableRPC(n.rpcAPIs, config); err != nil {
+ servers = append(servers, server)
+ // Enable auth via WS
+ server = n.wsServerForPort(port, true)
+ if err := server.setListenAddr(n.config.AuthHost, port); err != nil {
+ return err
+ }
+ if err := server.enableWS(apis, wsConfig{
+ Modules: DefaultAuthModules,
+ Origins: DefaultAuthOrigins,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
+ }
+ // Set up HTTP.
+ if n.config.HTTPHost != "" {
+ // Configure legacy unauthenticated HTTP.
+ if err := initHttp(n.http, open, n.config.HTTPPort); err != nil {
return err
}
}
// Configure WebSocket.
if n.config.WSHost != "" {
- server := n.wsServerForPort(n.config.WSPort)
- config := wsConfig{
- Modules: n.config.WSModules,
- Origins: n.config.WSOrigins,
- prefix: n.config.WSPathPrefix,
+ // legacy unauthenticated
+ if err := initWS(open, n.config.WSPort); err != nil {
+ return err
}
- if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil {
+ }
+ // Configure authenticated API
+ if len(open) != len(all) {
+ jwtSecret, err := n.obtainJWTSecret(n.config.JWTSecret)
+ if err != nil {
return err
}
- if err := server.enableWS(n.rpcAPIs, config); err != nil {
+ if err := initAuth(all, n.config.AuthPort, jwtSecret); err != nil {
return err
}
}
- if err := n.http.start(); err != nil {
- return err
+ // Start the servers
+ for _, server := range servers {
+ if err := server.start(); err != nil {
+ return err
+ }
}
- return n.ws.start()
+ return nil
}
-func (n *Node) wsServerForPort(port int) *httpServer {
- if n.config.HTTPHost == "" || n.http.port == port {
- return n.http
+func (n *Node) wsServerForPort(port int, authenticated bool) *httpServer {
+ httpServer, wsServer := n.http, n.ws
+ if authenticated {
+ httpServer, wsServer = n.httpAuth, n.wsAuth
+ }
+ if n.config.HTTPHost == "" || httpServer.port == port {
+ return httpServer
}
- return n.ws
+ return wsServer
}
func (n *Node) stopRPC() {
n.http.stop()
n.ws.stop()
+ n.httpAuth.stop()
+ n.wsAuth.stop()
n.ipc.stop()
n.stopInProc()
}
@@ -478,6 +589,17 @@ func (n *Node) RegisterAPIs(apis []rpc.API) {
n.rpcAPIs = append(n.rpcAPIs, apis...)
}
+// GetAPIs return two sets of APIs, both the ones that do not require
+// authentication, and the complete set
+func (n *Node) GetAPIs() (unauthenticated, all []rpc.API) {
+ for _, api := range n.rpcAPIs {
+ if !api.Authenticated {
+ unauthenticated = append(unauthenticated, api)
+ }
+ }
+ return unauthenticated, n.rpcAPIs
+}
+
func (n *Node) stopSnapshotDB() {
snapshotdb.Close()
}
diff --git a/node/node_test.go b/node/node_test.go
index dee1375a95..292040e414 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -577,13 +577,13 @@ func (test rpcPrefixTest) check(t *testing.T, node *Node) {
}
}
for _, path := range test.wantWS {
- err := wsRequest(t, wsBase+path, "")
+ err := wsRequest(t, wsBase+path)
if err != nil {
t.Errorf("Error: %s: WebSocket connection failed: %v", path, err)
}
}
for _, path := range test.wantNoWS {
- err := wsRequest(t, wsBase+path, "")
+ err := wsRequest(t, wsBase+path)
if err == nil {
t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path)
}
diff --git a/node/rpcstack.go b/node/rpcstack.go
index 2015acd080..d82129af00 100644
--- a/node/rpcstack.go
+++ b/node/rpcstack.go
@@ -41,13 +41,15 @@ type httpConfig struct {
CorsAllowedOrigins []string
Vhosts []string
prefix string // path prefix on which to mount http handler
+ jwtSecret []byte // optional JWT secret
}
// wsConfig is the JSON-RPC/Websocket configuration
type wsConfig struct {
- Origins []string
- Modules []string
- prefix string // path prefix on which to mount ws handler
+ Origins []string
+ Modules []string
+ prefix string // path prefix on which to mount ws handler
+ jwtSecret []byte // optional JWT secret
}
type rpcHandler struct {
@@ -158,7 +160,7 @@ func (h *httpServer) start() error {
}
// Log http endpoint.
h.log.Info("HTTP server started",
- "endpoint", listener.Addr(),
+ "endpoint", listener.Addr(), "auth", (h.httpConfig.jwtSecret != nil),
"prefix", h.httpConfig.prefix,
"cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","),
"vhosts", strings.Join(h.httpConfig.Vhosts, ","),
@@ -286,7 +288,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
}
h.httpConfig = config
h.httpHandler.Store(&rpcHandler{
- Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts),
+ Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts, config.jwtSecret),
server: srv,
})
return nil
@@ -318,7 +320,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
}
h.wsConfig = config
h.wsHandler.Store(&rpcHandler{
- Handler: srv.WebsocketHandler(config.Origins),
+ Handler: NewWSHandlerStack(srv.WebsocketHandler(config.Origins), config.jwtSecret),
server: srv,
})
return nil
@@ -363,13 +365,24 @@ func isWebsocket(r *http.Request) bool {
}
// NewHTTPHandlerStack returns wrapped http-related handlers
-func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string) http.Handler {
+func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string, jwtSecret []byte) http.Handler {
// Wrap the CORS-handler within a host-handler
handler := newCorsHandler(srv, cors)
handler = newVHostHandler(vhosts, handler)
+ if len(jwtSecret) != 0 {
+ handler = newJWTHandler(jwtSecret, handler)
+ }
return newGzipHandler(handler)
}
+// NewWSHandlerStack returns a wrapped ws-related handler.
+func NewWSHandlerStack(srv http.Handler, jwtSecret []byte) http.Handler {
+ if len(jwtSecret) != 0 {
+ return newJWTHandler(jwtSecret, srv)
+ }
+ return srv
+}
+
func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
// disable CORS support if user has not specified a custom CORS configuration
if len(allowedOrigins) == 0 {
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index 2a66517820..62cfba0766 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -19,11 +19,13 @@ package node
import (
"bytes"
"fmt"
+ "github.com/dgrijalva/jwt-go"
"net/http"
"net/url"
"strconv"
"strings"
"testing"
+ "time"
"github.com/PlatONnetwork/PlatON-Go/internal/testlog"
@@ -147,12 +149,12 @@ func TestWebsocketOrigins(t *testing.T) {
srv := createAndStartServer(t, &httpConfig{}, true, &wsConfig{Origins: splitAndTrim(tc.spec)})
url := fmt.Sprintf("ws://%v", srv.listenAddr())
for _, origin := range tc.expOk {
- if err := wsRequest(t, url, origin); err != nil {
+ if err := wsRequest(t, url, "Origin", origin); err != nil {
t.Errorf("spec '%v', origin '%v': expected ok, got %v", tc.spec, origin, err)
}
}
for _, origin := range tc.expFail {
- if err := wsRequest(t, url, origin); err == nil {
+ if err := wsRequest(t, url, "Origin", origin); err == nil {
t.Errorf("spec '%v', origin '%v': expected not to allow, got ok", tc.spec, origin)
}
}
@@ -244,13 +246,18 @@ func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsCon
}
// wsRequest attempts to open a WebSocket connection to the given URL.
-func wsRequest(t *testing.T, url, browserOrigin string) error {
+func wsRequest(t *testing.T, url string, extraHeaders ...string) error {
t.Helper()
- t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin)
+ //t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin)
headers := make(http.Header)
- if browserOrigin != "" {
- headers.Set("Origin", browserOrigin)
+ // Apply extra headers.
+ if len(extraHeaders)%2 != 0 {
+ panic("odd extraHeaders length")
+ }
+ for i := 0; i < len(extraHeaders); i += 2 {
+ key, value := extraHeaders[i], extraHeaders[i+1]
+ headers.Set(key, value)
}
conn, _, err := websocket.DefaultDialer.Dial(url, headers)
if conn != nil {
@@ -292,3 +299,79 @@ func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response
}
return resp
}
+
+type testClaim map[string]interface{}
+
+func (testClaim) Valid() error {
+ return nil
+}
+
+func TestJWT(t *testing.T) {
+ var secret = []byte("secret")
+ issueToken := func(secret []byte, method jwt.SigningMethod, input map[string]interface{}) string {
+ if method == nil {
+ method = jwt.SigningMethodHS256
+ }
+ ss, _ := jwt.NewWithClaims(method, testClaim(input)).SignedString(secret)
+ return ss
+ }
+ expOk := []string{
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "exp": time.Now().Unix() + 2,
+ })),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "bar": "baz",
+ })),
+ }
+ expFail := []string{
+ // future
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 6})),
+ // stale
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 6})),
+ // wrong algo
+ fmt.Sprintf("Bearer %v", issueToken(secret, jwt.SigningMethodHS512, testClaim{"iat": time.Now().Unix() + 4})),
+ // expired
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix(), "exp": time.Now().Unix()})),
+ // missing mandatory iat
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{})),
+ // wrong secret
+ fmt.Sprintf("Bearer %v", issueToken([]byte("wrong"), nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken([]byte{}, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(nil, nil, testClaim{"iat": time.Now().Unix()})),
+ // Various malformed syntax
+ fmt.Sprintf("%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer: %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer:%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer\t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer \t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ }
+ srv := createAndStartServer(t, &httpConfig{jwtSecret: []byte("secret")},
+ true, &wsConfig{Origins: []string{"*"}, jwtSecret: []byte("secret")})
+ wsUrl := fmt.Sprintf("ws://%v", srv.listenAddr())
+ htUrl := fmt.Sprintf("http://%v", srv.listenAddr())
+
+ for i, token := range expOk {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err != nil {
+ t.Errorf("test %d-ws, token '%v': expected ok, got %v", i, token, err)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 200 {
+ t.Errorf("test %d-http, token '%v': expected ok, got %v", i, token, resp.StatusCode)
+ }
+ }
+ for i, token := range expFail {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err == nil {
+ t.Errorf("tc %d-ws, token '%v': expected not to allow, got ok", i, token)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 403 {
+ t.Errorf("tc %d-http, token '%v': expected not to allow, got %v", i, token, resp.StatusCode)
+ }
+ }
+ srv.stop()
+}
diff --git a/p2p/peer_error.go b/p2p/peer_error.go
index b0c4060efe..d9f343bccd 100644
--- a/p2p/peer_error.go
+++ b/p2p/peer_error.go
@@ -54,7 +54,7 @@ func (pe *peerError) Error() string {
var errProtocolReturned = errors.New("protocol returned")
-type DiscReason uint
+type DiscReason uint8
const (
DiscRequested DiscReason = iota
diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go
index 039dc85fbc..132d926174 100644
--- a/p2p/simulations/http_test.go
+++ b/p2p/simulations/http_test.go
@@ -143,7 +143,7 @@ func (t *testService) Stop() error {
// message with the given code
func (t *testService) handshake(rw p2p.MsgReadWriter, code uint64) error {
errc := make(chan error, 2)
- go func() { errc <- p2p.Send(rw, code, struct{}{}) }()
+ go func() { errc <- p2p.SendItems(rw, code) }()
go func() { errc <- p2p.ExpectMsg(rw, code, struct{}{}) }()
for i := 0; i < 2; i++ {
if err := <-errc; err != nil {
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index 3fd53aea1b..f8779ee2d4 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -399,6 +399,21 @@ func TestEncodeToBytes(t *testing.T) {
runEncTests(t, EncodeToBytes)
}
+func TestEncodeAppendToBytes(t *testing.T) {
+ buffer := make([]byte, 20)
+ runEncTests(t, func(val interface{}) ([]byte, error) {
+ w := NewEncoderBuffer(nil)
+ defer w.Flush()
+
+ err := Encode(w, val)
+ if err != nil {
+ return nil, err
+ }
+ output := w.AppendToBytes(buffer[:0])
+ return output, nil
+ })
+}
+
func TestEncodeToReader(t *testing.T) {
runEncTests(t, func(val interface{}) ([]byte, error) {
_, r, err := EncodeToReader(val)
diff --git a/rpc/client.go b/rpc/client.go
index d0e28a864c..5da78f99ea 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -186,7 +186,7 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
}
}
-// Client retrieves the client from the context, if any. This can be used to perform
+// ClientFromContext retrieves the client from the context, if any. This can be used to perform
// 'reverse calls' in a handler method.
func ClientFromContext(ctx context.Context) (*Client, bool) {
client, ok := ctx.Value(clientContextKey{}).(*Client)
@@ -321,7 +321,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
}
-// BatchCall sends all given requests as a single batch and waits for the server
+// BatchCallContext sends all given requests as a single batch and waits for the server
// to return a response for all of them.
//
// In contrast to Call, BatchCall only returns I/O errors. Any error specific to
diff --git a/rpc/server.go b/rpc/server.go
index 4cb543c54c..5b74ec6296 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -27,6 +27,7 @@ import (
)
const MetadataApi = "rpc"
+const EngineApi = "engine"
// CodecOption specifies which type of messages a codec supports.
//
diff --git a/rpc/server_test.go b/rpc/server_test.go
index c692a071cf..9c8a3f1383 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -134,7 +134,6 @@ func TestServerShortLivedConn(t *testing.T) {
if err != nil {
t.Fatal("can't dial:", err)
}
- defer conn.Close()
conn.SetDeadline(deadline)
// Write the request, then half-close the connection so the server stops reading.
conn.Write([]byte(request))
@@ -142,6 +141,8 @@ func TestServerShortLivedConn(t *testing.T) {
// Now try to get the response.
buf := make([]byte, 2000)
n, err := conn.Read(buf)
+ conn.Close()
+
if err != nil {
t.Fatal("read error:", err)
}
diff --git a/rpc/types.go b/rpc/types.go
index 661d879adc..10528b6d34 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -31,10 +31,11 @@ import (
// API describes the set of methods offered over the RPC interface
type API struct {
- Namespace string // namespace under which the rpc methods of Service are exposed
- Version string // api version for DApp's
- Service interface{} // receiver instance which holds the methods
- Public bool // indication if the methods must be considered safe for public use
+ Namespace string // namespace under which the rpc methods of Service are exposed
+ Version string // api version for DApp's
+ Service interface{} // receiver instance which holds the methods
+ Public bool // indication if the methods must be considered safe for public use
+ Authenticated bool // whether the api should only be available behind authentication.
}
// ServerCodec implements reading, parsing and writing RPC messages for the server side of
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index cdfed5631f..3241370599 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -76,7 +76,7 @@ func TestWebsocketOriginCheck(t *testing.T) {
// Connections without origin header should work.
client, err = DialWebsocket(context.Background(), wsURL, "")
if err != nil {
- t.Fatal("error for empty origin")
+ t.Fatalf("error for empty origin: %v", err)
}
client.Close()
}
diff --git a/signer/fourbyte/abi.go b/signer/fourbyte/abi.go
index 71835afcd3..07e67fefc5 100644
--- a/signer/fourbyte/abi.go
+++ b/signer/fourbyte/abi.go
@@ -20,7 +20,6 @@ import (
"bytes"
"encoding/json"
"fmt"
- "regexp"
"strings"
"github.com/PlatONnetwork/PlatON-Go/accounts/abi"
@@ -75,42 +74,14 @@ func verifySelector(selector string, calldata []byte) (*decodedCallData, error)
return parseCallData(calldata, string(abidata))
}
-// selectorRegexp is used to validate that a 4byte database selector corresponds
-// to a valid ABI function declaration.
-//
-// Note, although uppercase letters are not part of the ABI spec, this regexp
-// still accepts it as the general format is valid. It will be rejected later
-// by the type checker.
-var selectorRegexp = regexp.MustCompile(`^([^\)]+)\(([A-Za-z0-9,\[\]]*)\)`)
-
// parseSelector converts a method selector into an ABI JSON spec. The returned
// data is a valid JSON string which can be consumed by the standard abi package.
func parseSelector(unescapedSelector string) ([]byte, error) {
- // Define a tiny fake ABI struct for JSON marshalling
- type fakeArg struct {
- Type string `json:"type"`
- }
- type fakeABI struct {
- Name string `json:"name"`
- Type string `json:"type"`
- Inputs []fakeArg `json:"inputs"`
- }
- // Validate the unescapedSelector and extract it's components
- groups := selectorRegexp.FindStringSubmatch(unescapedSelector)
- if len(groups) != 3 {
- return nil, fmt.Errorf("invalid selector %q (%v matches)", unescapedSelector, len(groups))
- }
- name := groups[1]
- args := groups[2]
-
- // Reassemble the fake ABI and constuct the JSON
- arguments := make([]fakeArg, 0)
- if len(args) > 0 {
- for _, arg := range strings.Split(args, ",") {
- arguments = append(arguments, fakeArg{arg})
- }
+ selector, err := abi.ParseSelector(unescapedSelector)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse selector: %v", err)
}
- return json.Marshal([]fakeABI{{name, "function", arguments}})
+ return json.Marshal([]abi.SelectorMarshaling{selector})
}
// parseCallData matches the provided call data against the ABI definition and
diff --git a/trie/database.go b/trie/database.go
index cbb2c5ed09..a4c14bb18f 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -124,16 +124,9 @@ func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end u
func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
func (n rawFullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range n {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
+ eb := rlp.NewEncoderBuffer(w)
+ n.encode(eb)
+ return eb.Flush()
}
// rawShortNode represents only the useful data content of a short node, with the
diff --git a/trie/hasher.go b/trie/hasher.go
index 9d0d4ab8e9..8dc061378f 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -154,7 +154,7 @@ func (h *hasher) store(n node, force bool) (node, error) {
// Larger nodes are replaced by their hash and stored in the database.
hash, _ := n.cache()
if len(hash) == 0 {
- hash = h.makeHashNode(enc)
+ hash = h.hashData(enc)
}
return hash, nil
@@ -176,9 +176,9 @@ func (h *hasher) encodedBytes() []byte {
return h.tmp
}
-// makeHashNode hashes the provided data
-func (h *hasher) makeHashNode(data []byte) hashNode {
- n := make(hashNode, h.sha.Size())
+// hashData hashes the provided data
+func (h *hasher) hashData(data []byte) hashNode {
+ n := make(hashNode, 32)
h.sha.Reset()
h.sha.Write(data)
h.sha.Read(n)
diff --git a/trie/iterator.go b/trie/iterator.go
index 96a0754f96..f1b93788e8 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -85,6 +85,10 @@ type NodeIterator interface {
// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
Path() []byte
+ // NodeBlob returns the rlp-encoded value of the current iterated node.
+ // If the node is an embedded node in its parent, nil is returned then.
+ NodeBlob() []byte
+
// Leaf returns true iff the current node is a leaf node.
Leaf() bool
@@ -226,6 +230,18 @@ func (it *nodeIterator) Path() []byte {
return it.path
}
+func (it *nodeIterator) NodeBlob() []byte {
+ if it.Hash() == (common.Hash{}) {
+ return nil // skip the non-standalone node
+ }
+ blob, err := it.resolveBlob(it.Hash().Bytes(), it.Path())
+ if err != nil {
+ it.err = err
+ return nil
+ }
+ return blob
+}
+
func (it *nodeIterator) Error() error {
if it.err == errIteratorEnd {
return nil
@@ -364,6 +380,15 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
return resolved, err
}
+func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
+ if it.resolver != nil {
+ if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
+ return blob, nil
+ }
+ }
+ return it.trie.resolveBlob(hash, path)
+}
+
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
if hash, ok := st.node.(hashNode); ok {
resolved, err := it.resolveHash(hash, path)
@@ -552,6 +577,10 @@ func (it *differenceIterator) Path() []byte {
return it.b.Path()
}
+func (it *differenceIterator) NodeBlob() []byte {
+ return it.b.NodeBlob()
+}
+
func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
@@ -663,6 +692,10 @@ func (it *unionIterator) Path() []byte {
return (*it.items)[0].Path()
}
+func (it *unionIterator) NodeBlob() []byte {
+ return (*it.items)[0].NodeBlob()
+}
+
func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 61db91fb59..8c4303d8ce 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -484,10 +484,18 @@ func (l *loggingDb) NewBatch() ethdb.Batch {
return l.backend.NewBatch()
}
+func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch {
+ return l.backend.NewBatchWithSize(size)
+}
+
func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- fmt.Printf("NewIterator\n")
return l.backend.NewIterator(prefix, start)
}
+
+func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) {
+ return l.backend.NewSnapshot()
+}
+
func (l *loggingDb) Stat(property string) (string, error) {
return l.backend.Stat(property)
}
@@ -538,3 +546,54 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
+
+func TestIteratorNodeBlob(t *testing.T) {
+ var (
+ db = memorydb.New()
+ triedb = NewDatabase(db)
+ trie, _ = New(common.Hash{}, triedb)
+ )
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ all := make(map[string]string)
+ for _, val := range vals {
+ all[val.k] = val.v
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ trie.Commit(nil)
+ triedb.Cap(0)
+
+ found := make(map[common.Hash][]byte)
+ it := trie.NodeIterator(nil)
+ for it.Next(true) {
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ found[it.Hash()] = it.NodeBlob()
+ }
+
+ dbIter := db.NewIterator(nil, nil)
+ defer dbIter.Release()
+
+ var count int
+ for dbIter.Next() {
+ got, present := found[common.BytesToHash(dbIter.Key())]
+ if !present {
+ t.Fatalf("Miss trie node %v", dbIter.Key())
+ }
+ if !bytes.Equal(got, dbIter.Value()) {
+ t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
+ }
+ count += 1
+ }
+ if count != len(found) {
+ t.Fatal("Find extra trie node via iterator")
+ }
+}
diff --git a/trie/node.go b/trie/node.go
index 28acd0f493..28cf880ef1 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -53,16 +53,9 @@ var nilValueNode = valueNode(nil)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range &n.Children {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
+ eb := rlp.NewEncoderBuffer(w)
+ n.encode(eb)
+ return eb.Flush()
}
func (n *fullNode) copy() *fullNode { copy := *n; return © }
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index fe691161b0..f4897edf60 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -28,7 +28,6 @@ import (
"github.com/PlatONnetwork/PlatON-Go/common"
"github.com/PlatONnetwork/PlatON-Go/ethdb"
"github.com/PlatONnetwork/PlatON-Go/log"
- "github.com/PlatONnetwork/PlatON-Go/rlp"
)
var ErrCommitDisabled = errors.New("no database for committing")
@@ -56,8 +55,8 @@ func returnToPool(st *StackTrie) {
type StackTrie struct {
nodeType uint8 // node type (as in branch, ext, leaf)
val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (full|ext) node
- children [16]*StackTrie // list of children (for fullnodes and exts)
+ key []byte // key chunk covered by this (leaf|ext) node
+ children [16]*StackTrie // list of children (for branch and exts)
db ethdb.KeyValueWriter // Pointer to the commit db, can be nil
}
@@ -70,7 +69,7 @@ func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
}
// NewFromBinary initialises a serialized stacktrie with the given db.
-func NewFromBinary(data []byte, db ethdb.KeyValueStore) (*StackTrie, error) {
+func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) {
var st StackTrie
if err := st.UnmarshalBinary(data); err != nil {
return nil, err
@@ -146,7 +145,7 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error {
return nil
}
-func (st *StackTrie) setDb(db ethdb.KeyValueStore) {
+func (st *StackTrie) setDb(db ethdb.KeyValueWriter) {
st.db = db
for _, child := range st.children {
if child != nil {
@@ -224,6 +223,7 @@ func (st *StackTrie) insert(key, value []byte) {
switch st.nodeType {
case branchNode: /* Branch */
idx := int(key[0])
+
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
@@ -233,12 +233,14 @@ func (st *StackTrie) insert(key, value []byte) {
break
}
}
+
// Add new child
if st.children[idx] == nil {
st.children[idx] = newLeaf(key[1:], value, st.db)
} else {
st.children[idx].insert(key[1:], value)
}
+
case extNode: /* Ext */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
@@ -326,10 +328,9 @@ func (st *StackTrie) insert(key, value []byte) {
p = st.children[0]
}
- // Create the two child leaves: the one containing the
- // original value and the one containing the new value
- // The child leave will be hashed directly in order to
- // free up some memory.
+ // Create the two child leaves: one containing the original
+ // value and another containing the new value. The child leaf
+ // is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val, st.db)
p.children[origIdx].hash()
@@ -341,19 +342,22 @@ func (st *StackTrie) insert(key, value []byte) {
// over to the children.
st.key = st.key[:diffidx]
st.val = nil
+
case emptyNode: /* Empty */
st.nodeType = leafNode
st.key = key
st.val = value
+
case hashedNode:
panic("trying to insert into hash")
+
default:
panic("invalid type")
}
}
-// hash() hashes the node 'st' and converts it into 'hashedNode', if possible.
-// Possible outcomes:
+// hash converts st into a 'hashedNode', if possible. Possible outcomes:
+//
// 1. The rlp-encoded value was >= 32 bytes:
// - Then the 32-byte `hash` will be accessible in `st.val`.
// - And the 'st.type' will be 'hashedNode'
@@ -362,119 +366,116 @@ func (st *StackTrie) insert(key, value []byte) {
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
// - And the 'st.type' will be 'hashedNode' AGAIN
//
-// This method will also:
-// set 'st.type' to hashedNode
-// clear 'st.key'
+// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
func (st *StackTrie) hash() {
- /* Shortcut if node is already hashed */
- if st.nodeType == hashedNode {
- return
- }
- // The 'hasher' is taken from a pool, but we don't actually
- // claim an instance until all children are done with their hashing,
- // and we actually need one
- var h *hasher
+ h := newHasher()
+ defer returnHasherToPool(h)
+
+ st.hashRec(h)
+}
+
+func (st *StackTrie) hashRec(hasher *hasher) {
+ // The switch below sets this to the RLP-encoding of this node.
+ var encodedNode []byte
switch st.nodeType {
+ case hashedNode:
+ return
+
+ case emptyNode:
+ st.val = emptyRoot.Bytes()
+ st.key = st.key[:0]
+ st.nodeType = hashedNode
+ return
+
case branchNode:
- var nodes [17]node
+ var nodes rawFullNode
for i, child := range st.children {
if child == nil {
nodes[i] = nilValueNode
continue
}
- child.hash()
+
+ child.hashRec(hasher)
if len(child.val) < 32 {
nodes[i] = rawNode(child.val)
} else {
nodes[i] = hashNode(child.val)
}
- st.children[i] = nil // Reclaim mem from subtree
+
+ // Release child back to pool.
+ st.children[i] = nil
returnToPool(child)
}
- nodes[16] = nilValueNode
- h = newHasher()
- defer returnHasherToPool(h)
- h.tmp.Reset()
- if err := rlp.Encode(&h.tmp, nodes); err != nil {
- panic(err)
- }
+
+ nodes.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
case extNode:
- st.children[0].hash()
- h = newHasher()
- defer returnHasherToPool(h)
- h.tmp.Reset()
- var valuenode node
+ st.children[0].hashRec(hasher)
+
+ sz := hexToCompactInPlace(st.key)
+ n := rawShortNode{Key: st.key[:sz]}
if len(st.children[0].val) < 32 {
- valuenode = rawNode(st.children[0].val)
+ n.Val = rawNode(st.children[0].val)
} else {
- valuenode = hashNode(st.children[0].val)
- }
- n := struct {
- Key []byte
- Val node
- }{
- Key: hexToCompact(st.key),
- Val: valuenode,
- }
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
+ n.Val = hashNode(st.children[0].val)
}
+
+ n.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
+ // Release child back to pool.
returnToPool(st.children[0])
- st.children[0] = nil // Reclaim mem from subtree
+ st.children[0] = nil
+
case leafNode:
- h = newHasher()
- defer returnHasherToPool(h)
- h.tmp.Reset()
st.key = append(st.key, byte(16))
sz := hexToCompactInPlace(st.key)
- n := [][]byte{st.key[:sz], st.val}
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
- }
- case emptyNode:
- st.val = emptyRoot.Bytes()
- st.key = st.key[:0]
- st.nodeType = hashedNode
- return
+ n := rawShortNode{Key: st.key[:sz], Val: valueNode(st.val)}
+
+ n.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
default:
- panic("Invalid node type")
+ panic("invalid node type")
}
- st.key = st.key[:0]
+
st.nodeType = hashedNode
- if len(h.tmp) < 32 {
- st.val = common.CopyBytes(h.tmp)
+ st.key = st.key[:0]
+ if len(encodedNode) < 32 {
+ st.val = common.CopyBytes(encodedNode)
return
}
+
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
- st.val = make([]byte, 32)
- h.sha.Reset()
- h.sha.Write(h.tmp)
- h.sha.Read(st.val)
+ st.val = hasher.hashData(encodedNode)
if st.db != nil {
// TODO! Is it safe to Put the slice here?
// Do all db implementations copy the value provided?
- st.db.Put(st.val, h.tmp)
+ st.db.Put(st.val, encodedNode)
}
}
-// Hash returns the hash of the current node
+// Hash returns the hash of the current node.
func (st *StackTrie) Hash() (h common.Hash) {
- st.hash()
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed, and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing.
- ret := make([]byte, 32)
- h := newHasher()
- defer returnHasherToPool(h)
- h.sha.Reset()
- h.sha.Write(st.val)
- h.sha.Read(ret)
- return common.BytesToHash(ret)
+ hasher := newHasher()
+ defer returnHasherToPool(hasher)
+
+ st.hashRec(hasher)
+ if len(st.val) == 32 {
+ copy(h[:], st.val)
+ return h
}
- return common.BytesToHash(st.val)
+
+ // If the node's RLP isn't 32 bytes long, the node will not
+ // be hashed, and instead contain the rlp-encoding of the
+ // node. For the top level node, we need to force the hashing.
+ hasher.sha.Reset()
+ hasher.sha.Write(st.val)
+ hasher.sha.Read(h[:])
+ return h
}
// Commit will firstly hash the entrie trie if it's still not hashed
@@ -484,23 +485,26 @@ func (st *StackTrie) Hash() (h common.Hash) {
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
-func (st *StackTrie) Commit() (common.Hash, error) {
+func (st *StackTrie) Commit() (h common.Hash, err error) {
if st.db == nil {
return common.Hash{}, ErrCommitDisabled
}
- st.hash()
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed (and committed), and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing+commit.
- ret := make([]byte, 32)
- h := newHasher()
- defer returnHasherToPool(h)
- h.sha.Reset()
- h.sha.Write(st.val)
- h.sha.Read(ret)
- st.db.Put(ret, st.val)
- return common.BytesToHash(ret), nil
+
+ hasher := newHasher()
+ defer returnHasherToPool(hasher)
+
+ st.hashRec(hasher)
+ if len(st.val) == 32 {
+ copy(h[:], st.val)
+ return h, nil
}
- return common.BytesToHash(st.val), nil
+
+ // If the node's RLP isn't 32 bytes long, the node will not
+ // be hashed (and committed), and instead contain the rlp-encoding of the
+ // node. For the top level node, we need to force the hashing+commit.
+ hasher.sha.Reset()
+ hasher.sha.Write(st.val)
+ hasher.sha.Read(h[:])
+ st.db.Put(h[:], st.val)
+ return h, nil
}
diff --git a/trie/trie.go b/trie/trie.go
index c93969fad5..e0419e136b 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -512,6 +512,15 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
// Deprecated: use Hash instead.
func (t *Trie) Root() []byte { return t.Hash().Bytes() }
+func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) {
+ hash := common.BytesToHash(n)
+ blob, _ := t.db.Node(hash)
+ if len(blob) != 0 {
+ return blob, nil
+ }
+ return nil, &MissingNodeError{NodeHash: hash, Path: prefix}
+}
+
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {