diff --git a/.github/workflows/ci-e2e-no-metrics-tests.yml b/.github/workflows/ci-e2e-no-metrics-tests.yml new file mode 100644 index 0000000..ebbcc55 --- /dev/null +++ b/.github/workflows/ci-e2e-no-metrics-tests.yml @@ -0,0 +1,48 @@ +name: Continuous Integration (E2E Testing Checks without metrics database) + +on: + workflow_call: +jobs: + e2e-test: + runs-on: ubuntu-latest + steps: + - name: checkout repo from current commit + uses: actions/checkout@v3 + - name: set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + cache: true + - name: pull pre-built images + run: sudo docker compose -f ci.docker-compose.yml pull + env: + METRIC_DATABASE_ENABLED: "false" + # In this step, this action saves a list of existing images, + # the cache is created without them in the post run. + # It also restores the cache if it exists. + # TODO(yevhenii): this step failed with "No space left on device" error, debug it and enable back + # - name: cache docker images + # uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true + - name: build and start proxy service and it's dependencies + run: sudo docker compose -f ci.docker-compose.yml up -d --build + - name: wait for proxy service to be running + run: bash ${GITHUB_WORKSPACE}/scripts/wait-for-proxy-service-running.sh + env: + PROXY_CONTAINER_PORT: 7777 + - name: wait for proxy service metric partitions database tables to be created + run: bash ${GITHUB_WORKSPACE}/scripts/wait-for-proxy-service-database-metric-partitions.sh + env: + # needs to be 1 + number of partitions created by /clients/database/migrations/20230523101344_partition_proxied_request_metrics_table.up.sql + MINIMUM_REQUIRED_PARTITIONS: 30 + PROXY_CONTAINER_PORT: 7777 + - name: run e2e tests + run: make e2e-test-no-metrics + - name: print proxy service logs + run: sudo docker compose -f ci.docker-compose.yml logs proxy + # because we especially want the logs if the test(s) fail 😅 + if: always() + # Finally, "Post Run jpribyl/action-docker-layer-caching@v0.1.1", + # which is the process of saving the cache, will be executed. diff --git a/Makefile b/Makefile index 506d6d0..dbb6fc4 100644 --- a/Makefile +++ b/Makefile @@ -42,8 +42,15 @@ unit-test: .PHONY: e2e-test # run tests that execute against a local or remote instance of the API e2e-test: - #go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETest*" - go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETestCachingMdwForGetTxByHashMethod" + PROXY_CONTAINER_PORT=7777 bash scripts/wait-for-proxy-service-running.sh + PROXY_CONTAINER_PORT=7777 MINIMUM_REQUIRED_PARTITIONS=30 bash scripts/wait-for-proxy-service-running.sh + go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETest*" + #go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETestCachingMdwForGetTxByHashMethod" + +.PHONY: e2e-test-no-metrics +# run tests that execute against a local or remote instance of the API without database for metrics +e2e-test-no-metrics: + go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestNoMetricsE2E*" .PHONY: ci-setup # set up your local environment such that running `make e2e-test` runs against testnet (like in CI) diff --git a/clients/database/database.go b/clients/database/database.go index acd40c1..bdff4e0 100644 --- a/clients/database/database.go +++ b/clients/database/database.go @@ -14,7 +14,11 @@ import ( // that haven't been run on the database being used by the proxy service // returning error (if any) and a list of migrations that have been // run and any that were not +// If db is nil, returns empty slice and nil error, as there is no database to migrate. func Migrate(ctx context.Context, db *bun.DB, migrations migrate.Migrations, logger *logging.ServiceLogger) (*migrate.MigrationSlice, error) { + if db == nil { + return &migrate.MigrationSlice{}, nil + } // set up migration config migrator := migrate.NewMigrator(db, &migrations) diff --git a/clients/database/database_test.go b/clients/database/database_test.go new file mode 100644 index 0000000..20e779d --- /dev/null +++ b/clients/database/database_test.go @@ -0,0 +1,14 @@ +package database + +import ( + "context" + "github.com/stretchr/testify/require" + "github.com/uptrace/bun/migrate" + "testing" +) + +func TestMigrateNoDatabase(t *testing.T) { + migrations, err := Migrate(context.Background(), nil, migrate.Migrations{}, nil) + require.NoError(t, err) + require.Empty(t, migrations) +} diff --git a/clients/database/postgres.go b/clients/database/postgres.go index 7133831..ee3585e 100644 --- a/clients/database/postgres.go +++ b/clients/database/postgres.go @@ -16,12 +16,15 @@ import ( // PostgresDatabaseConfig contains values for creating a // new connection to a postgres database type PostgresDatabaseConfig struct { + // DatabaseDisabled is used to disable the database, and it won't be used at all. All operations will be skipped. + DatabaseDisabled bool + DatabaseName string DatabaseEndpointURL string DatabaseUsername string DatabasePassword string ReadTimeoutSeconds int64 - WriteTimeousSeconds int64 + WriteTimeoutsSeconds int64 DatabaseMaxIdleConnections int64 DatabaseConnectionMaxIdleSeconds int64 DatabaseMaxOpenConnections int64 @@ -33,12 +36,19 @@ type PostgresDatabaseConfig struct { // PostgresClient wraps a connection to a postgres database type PostgresClient struct { + isDisabled bool *bun.DB } // NewPostgresClient returns a new connection to the specified // postgres data and error (if any) func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { + if config.DatabaseDisabled { + return PostgresClient{ + isDisabled: true, + }, nil + } + // configure postgres database connection options var pgOptions *pgdriver.Connector @@ -54,7 +64,7 @@ func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { pgdriver.WithPassword(config.DatabasePassword), pgdriver.WithDatabase(config.DatabaseName), pgdriver.WithReadTimeout(time.Second*time.Duration(config.ReadTimeoutSeconds)), - pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeousSeconds)), + pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeoutsSeconds)), ) } else { pgOptions = pgdriver.NewConnector( @@ -64,7 +74,7 @@ func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { pgdriver.WithPassword(config.DatabasePassword), pgdriver.WithDatabase(config.DatabaseName), pgdriver.WithReadTimeout(time.Second*time.Duration(config.ReadTimeoutSeconds)), - pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeousSeconds)), + pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeoutsSeconds)), ) } diff --git a/clients/database/postgres_test.go b/clients/database/postgres_test.go new file mode 100644 index 0000000..74d2235 --- /dev/null +++ b/clients/database/postgres_test.go @@ -0,0 +1,15 @@ +package database + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestDisabledDBCreation(t *testing.T) { + config := PostgresDatabaseConfig{ + DatabaseDisabled: true, + } + db, err := NewPostgresClient(config) + require.NoError(t, err) + require.True(t, db.isDisabled) +} diff --git a/clients/database/request_metric.go b/clients/database/request_metric.go index 2ec3e9c..9b98c12 100644 --- a/clients/database/request_metric.go +++ b/clients/database/request_metric.go @@ -34,8 +34,13 @@ type ProxiedRequestMetric struct { } // Save saves the current ProxiedRequestMetric to -// the database, returning error (if any) +// the database, returning error (if any). +// If db is nil, returns nil error. func (prm *ProxiedRequestMetric) Save(ctx context.Context, db *bun.DB) error { + if db == nil { + return nil + } + _, err := db.NewInsert().Model(prm).Exec(ctx) return err @@ -44,8 +49,13 @@ func (prm *ProxiedRequestMetric) Save(ctx context.Context, db *bun.DB) error { // ListProxiedRequestMetricsWithPagination returns a page of max // `limit` ProxiedRequestMetrics from the offset specified by`cursor` // error (if any) along with a cursor to use to fetch the next page -// if the cursor is 0 no more pages exists +// if the cursor is 0 no more pages exists. +// Uses only in tests. If db is nil, returns empty slice and 0 cursor. func ListProxiedRequestMetricsWithPagination(ctx context.Context, db *bun.DB, cursor int64, limit int) ([]ProxiedRequestMetric, int64, error) { + if db == nil { + return []ProxiedRequestMetric{}, 0, nil + } + var proxiedRequestMetrics []ProxiedRequestMetric var nextCursor int64 @@ -62,8 +72,13 @@ func ListProxiedRequestMetricsWithPagination(ctx context.Context, db *bun.DB, cu // CountAttachedProxiedRequestMetricPartitions returns the current // count of attached partitions for the ProxiedRequestMetricsTableName -// and error (if any) +// and error (if any). +// If db is nil, returns 0 and nil error. func CountAttachedProxiedRequestMetricPartitions(ctx context.Context, db *bun.DB) (int64, error) { + if db == nil { + return 0, nil + } + var count int64 countPartitionsQuery := fmt.Sprintf(` @@ -88,7 +103,12 @@ func CountAttachedProxiedRequestMetricPartitions(ctx context.Context, db *bun.DB // GetLastCreatedAttachedProxiedRequestMetricsPartitionName gets the table name // for the last created (and attached) proxied request metrics partition +// Used for status check. If db is nil, returns empty string and nil error. func GetLastCreatedAttachedProxiedRequestMetricsPartitionName(ctx context.Context, db *bun.DB) (string, error) { + if db == nil { + return "", nil + } + var lastCreatedAttachedPartitionName string lastCreatedAttachedPartitionNameQuery := fmt.Sprintf(` @@ -114,8 +134,13 @@ WHERE parent.relname='%s' order by child.oid desc limit 1;`, ProxiedRequestMetri // DeleteProxiedRequestMetricsOlderThanNDays deletes // all proxied request metrics older than the specified -// days, returning error (if any) +// days, returning error (if any). +// Used during pruning process. If db is nil, returns nil error. func DeleteProxiedRequestMetricsOlderThanNDays(ctx context.Context, db *bun.DB, n int64) error { + if db == nil { + return nil + } + _, err := db.NewDelete().Model((*ProxiedRequestMetric)(nil)).Where(fmt.Sprintf("request_time < now() - interval '%d' day", n)).Exec(ctx) return err diff --git a/clients/database/request_metric_test.go b/clients/database/request_metric_test.go new file mode 100644 index 0000000..37afe30 --- /dev/null +++ b/clients/database/request_metric_test.go @@ -0,0 +1,37 @@ +package database + +import ( + "context" + "github.com/stretchr/testify/require" + "testing" +) + +func TestNoDatabaseSave(t *testing.T) { + prm := ProxiedRequestMetric{} + err := prm.Save(context.Background(), nil) + require.NoError(t, err) +} + +func TestNoDatabaseListProxiedRequestMetricsWithPagination(t *testing.T) { + proxiedRequestMetrics, cursor, err := ListProxiedRequestMetricsWithPagination(context.Background(), nil, 0, 0) + require.NoError(t, err) + require.Empty(t, proxiedRequestMetrics) + require.Zero(t, cursor) +} + +func TestNoDatabaseCountAttachedProxiedRequestMetricPartitions(t *testing.T) { + count, err := CountAttachedProxiedRequestMetricPartitions(context.Background(), nil) + require.NoError(t, err) + require.Zero(t, count) +} + +func TestGetLastCreatedAttachedProxiedRequestMetricsPartitionName(t *testing.T) { + partitionName, err := GetLastCreatedAttachedProxiedRequestMetricsPartitionName(context.Background(), nil) + require.NoError(t, err) + require.Empty(t, partitionName) +} + +func TestDeleteProxiedRequestMetricsOlderThanNDays(t *testing.T) { + err := DeleteProxiedRequestMetricsOlderThanNDays(context.Background(), nil, 0) + require.NoError(t, err) +} diff --git a/config/config.go b/config/config.go index 46ec3c2..029c349 100644 --- a/config/config.go +++ b/config/config.go @@ -48,7 +48,7 @@ type Config struct { MetricPruningRoutineInterval time.Duration MetricPruningRoutineDelayFirstRun time.Duration MetricPruningMaxRequestMetricsHistoryDays int - MetricOperationsEnabled bool + MetricDatabaseEnabled bool CacheEnabled bool RedisEndpointURL string RedisPassword string @@ -102,8 +102,8 @@ const ( DEFAULT_METRIC_PRUNING_ENABLED = true METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS_ENVIRONMENT_KEY = "METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS" // 60 seconds * 60 minutes * 24 hours = 1 day - METRIC_OPERATIONS_ENABLED_ENVIRONMENT_KEY = "METRIC_OPERATIONS_ENABLED" - DEFAULT_METRIC_OPERATIONS_ENABLED = true + METRIC_DATABASE_ENABLED_ENVIRONMENT_KEY = "METRIC_DATABASE_ENABLED" + DEFAULT_METRIC_DATABASE_ENABLED = true DEFAULT_METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS = 86400 METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS_ENVIRONMENT_KEY = "METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS" DEFAULT_METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS = 10 @@ -383,7 +383,7 @@ func ReadConfig() Config { MetricPruningRoutineInterval: time.Duration(time.Duration(EnvOrDefaultInt(METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS)) * time.Second), MetricPruningRoutineDelayFirstRun: time.Duration(time.Duration(EnvOrDefaultInt(METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS)) * time.Second), MetricPruningMaxRequestMetricsHistoryDays: EnvOrDefaultInt(METRIC_PRUNING_MAX_REQUEST_METRICS_HISTORY_DAYS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_MAX_REQUEST_METRICS_HISTORY_DAYS), - MetricOperationsEnabled: EnvOrDefaultBool(METRIC_OPERATIONS_ENABLED_ENVIRONMENT_KEY, DEFAULT_METRIC_OPERATIONS_ENABLED), + MetricDatabaseEnabled: EnvOrDefaultBool(METRIC_DATABASE_ENABLED_ENVIRONMENT_KEY, DEFAULT_METRIC_DATABASE_ENABLED), CacheEnabled: EnvOrDefaultBool(CACHE_ENABLED_ENVIRONMENT_KEY, false), RedisEndpointURL: os.Getenv(REDIS_ENDPOINT_URL_ENVIRONMENT_KEY), RedisPassword: os.Getenv(REDIS_PASSWORD_ENVIRONMENT_KEY), diff --git a/main.go b/main.go index 6b3cb20..78ac832 100644 --- a/main.go +++ b/main.go @@ -37,7 +37,7 @@ func init() { } func startMetricPartitioningRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { - if !serviceConfig.MetricOperationsEnabled { + if !serviceConfig.MetricDatabaseEnabled { serviceLogger.Info().Msg("skipping starting metric partitioning routine since it is disabled via config") return nil @@ -73,7 +73,7 @@ func startMetricPartitioningRoutine(serviceConfig config.Config, service service } func startMetricCompactionRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { - if !serviceConfig.MetricOperationsEnabled { + if !serviceConfig.MetricDatabaseEnabled { serviceLogger.Info().Msg("skipping starting metric compaction routine since it is disabled via config") return nil @@ -107,7 +107,7 @@ func startMetricCompactionRoutine(serviceConfig config.Config, service service.P } func startMetricPruningRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { - if !serviceConfig.MetricPruningEnabled || !serviceConfig.MetricOperationsEnabled { + if !serviceConfig.MetricPruningEnabled || !serviceConfig.MetricDatabaseEnabled { serviceLogger.Info().Msg("skipping starting metric pruning routine since it is disabled via config") return make(<-chan error) diff --git a/main_no_metrics_test.go b/main_no_metrics_test.go new file mode 100644 index 0000000..a32e50a --- /dev/null +++ b/main_no_metrics_test.go @@ -0,0 +1,1003 @@ +package main_test + +import ( + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "io" + "log" + "math/big" + "strings" + "testing" + + "github.com/kava-labs/kava-proxy-service/service" + "github.com/kava-labs/kava-proxy-service/service/cachemdw" +) + +func TestNoMetricsE2ETestProxyReturnsNonZeroLatestBlockHeader(t *testing.T) { + client, err := ethclient.Dial(proxyServiceURL) + + require.NoError(t, err) + + header, err := client.HeaderByNumber(testContext, nil) + require.NoError(t, err) + + require.Greater(t, int(header.Number.Int64()), 0) +} + +func TestNoMetricsE2ETestProxyProxiesForMultipleHosts(t *testing.T) { + client, err := ethclient.Dial(proxyServiceURL) + + require.NoError(t, err) + + header, err := client.HeaderByNumber(testContext, nil) + require.NoError(t, err) + + require.Greater(t, int(header.Number.Int64()), 0) + + pruningClient, err := ethclient.Dial(proxyServicePruningURL) + + require.NoError(t, err) + + header, err = pruningClient.HeaderByNumber(testContext, nil) + require.NoError(t, err) + + require.Greater(t, int(header.Number.Int64()), 0) +} + +func TestNoMetricsE2ETestProxyTracksBlockNumberForEth_getBlockByNumberRequest(t *testing.T) { + // create api and database clients + client, err := ethclient.Dial(proxyServiceURL) + require.NoError(t, err) + + // get the latest queryable block number + // need to do this dynamically since not all blocks + // are queryable for a given network + response, err := client.HeaderByNumber(testContext, nil) + require.NoError(t, err) + + requestBlockNumber := response.Number + + // make request to api and track start / end time of the request to + _, err = client.HeaderByNumber(testContext, requestBlockNumber) + require.NoError(t, err) +} + +func TestNoMetricsE2ETestProxyTracksBlockNumberForMethodsWithBlockNumberParam(t *testing.T) { + testRandomAddressHex := "0x6767114FFAA17C6439D7AEA480738B982CE63A02" + testAddress := common.HexToAddress(testRandomAddressHex) + testRandomHash := common.HexToHash(testRandomAddressHex) + + // create api and database clients + client, err := ethclient.Dial(proxyServiceURL) + require.NoError(t, err) + + // get the latest queryable block number + // need to do this dynamically since not all blocks + // are queryable for a given network + latestBlock, err := client.HeaderByNumber(testContext, nil) + + require.NoError(t, err) + + requestBlockNumber := latestBlock.Number + + // make requests to api and track start / end time of the request + // we don't check response errors because the proxy will create metrics + // for each request whether the kava node api returns an error or not + // and if it doesn't the test itself will fail due to missing metrics + + // eth_getBalance + _, err = client.BalanceAt(testContext, testAddress, requestBlockNumber) + require.NoError(t, err) + + // eth_getStorageAt + _, err = client.StorageAt(testContext, testAddress, testRandomHash, requestBlockNumber) + require.NoError(t, err) + + // eth_getTransactionCount + _, err = client.NonceAt(testContext, testAddress, requestBlockNumber) + require.NoError(t, err) + + // eth_getBlockTransactionCountByNumber + _, err = client.PendingTransactionCount(testContext) + require.NoError(t, err) + + // eth_getCode + _, err = client.CodeAt(testContext, testAddress, requestBlockNumber) + require.NoError(t, err) + + // eth_getBlockByNumber + _, err = client.HeaderByNumber(testContext, requestBlockNumber) + require.NoError(t, err) + + // eth_call + _, err = client.CallContract(testContext, ethereum.CallMsg{}, requestBlockNumber) + require.NoError(t, err) +} + +func TestNoMetricsE2ETest_HeightBasedRouting(t *testing.T) { + if !proxyServiceHeightBasedRouting { + t.Skip("TEST_PROXY_HEIGHT_BASED_ROUTING_ENABLED is false. skipping height-based routing e2e test") + } + + rpc, err := rpc.Dial(proxyServiceURL) + require.NoError(t, err) + + testCases := []struct { + name string + method string + params []interface{} + expectRoute string + }{ + { + name: "request for non-latest height -> default", + method: "eth_getBlockByNumber", + params: []interface{}{"0x15", false}, // block 21 is beyond shards + expectRoute: service.ResponseBackendDefault, + }, + { + name: "request for height in 1st shard -> shard", + method: "eth_getBlockByNumber", + params: []interface{}{"0x2", false}, // block 2 + expectRoute: service.ResponseBackendShard, + }, + { + name: "request for height in 2nd shard -> shard", + method: "eth_getBlockByNumber", + params: []interface{}{"0xF", false}, // block 15 + expectRoute: service.ResponseBackendShard, + }, + { + name: "request for earliest height -> 1st shard", + method: "eth_getBlockByNumber", + params: []interface{}{"earliest", false}, + expectRoute: service.ResponseBackendShard, + }, + { + name: "request for latest height -> pruning", + method: "eth_getBlockByNumber", + params: []interface{}{"latest", false}, + expectRoute: service.ResponseBackendPruning, + }, + { + name: "request for finalized height -> pruning", + method: "eth_getBlockByNumber", + params: []interface{}{"finalized", false}, + expectRoute: service.ResponseBackendPruning, + }, + { + name: "request with empty height -> pruning", + method: "eth_getBlockByNumber", + params: []interface{}{nil, false}, + expectRoute: service.ResponseBackendPruning, + }, + { + name: "request not requiring height -> pruning", + method: "eth_chainId", + params: []interface{}{}, + expectRoute: service.ResponseBackendPruning, + }, + { + name: "request by hash -> default", + method: "eth_getBlockByHash", + params: []interface{}{"0xe9bd10bc1d62b4406dd1fb3dbf3adb54f640bdb9ebbe3dd6dfc6bcc059275e54", false}, + expectRoute: service.ResponseBackendDefault, + }, + { + name: "un-parseable (invalid) height -> default", + method: "eth_getBlockByNumber", + params: []interface{}{"not-a-block-tag", false}, + expectRoute: service.ResponseBackendDefault, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := rpc.Call(nil, tc.method, tc.params...) + require.NoError(t, err) + }) + } +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam(t *testing.T) { + // create api and database clients + client, err := ethclient.Dial(proxyServiceURL) + if err != nil { + t.Fatal(err) + } + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + }{ + { + desc: "test case #1", + method: "eth_getBlockByNumber", + params: []interface{}{"0x1", true}, + keysNum: 1, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // test cache MISS and cache HIT scenarios for specified method + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // check that cached and non-cached responses are equal + + // eth_getBlockByNumber - cache MISS + cacheMissResp := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, cacheMissResp.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(cacheMissResp.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + expectedKey := "local-chain:evm-request:eth_getBlockByNumber:sha256:d08b426164eacf6646fb1817403ec0af5d37869a0f32a01ebfab3096fa4999be" + containsKey(t, redisClient, expectedKey) + // don't check CORs because proxy only force-sets header for cache hits. + + // eth_getBlockByNumber - cache HIT + cacheHitResp := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheHitHeaderValue, cacheHitResp.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(cacheHitResp.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, expectedKey) + + // check that response bodies are the same + require.JSONEq(t, string(body1), string(body2), "blocks should be the same") + + // check that response headers are the same + equalHeaders(t, cacheMissResp.Header, cacheHitResp.Header) + + // check that CORS headers are present for cache hit scenario + require.Equal(t, cacheHitResp.Header[accessControlAllowOriginHeaderName], []string{"*"}) + + // eth_getBlockByNumber for request with different id - cache HIT + diffIdResp := mkJsonRpcRequest(t, proxyServiceURL, "a string id!", tc.method, tc.params) + require.Equal(t, cachemdw.CacheHitHeaderValue, cacheHitResp.Header[cachemdw.CacheHeaderKey][0]) + body3, err := io.ReadAll(diffIdResp.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body3) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, expectedKey) + + // check that response bodies are the same, except the id matches the request + expectedRes := strings.Replace(string(body1), "\"id\":1", "\"id\":\"a string id!\"", 1) + require.JSONEq(t, expectedRes, string(body3), "blocks should be the same") + + // check that response headers are the same + equalHeaders(t, cacheMissResp.Header, diffIdResp.Header) + + // check that CORS headers are present for cache hit scenario + require.Equal(t, diffIdResp.Header[accessControlAllowOriginHeaderName], []string{"*"}) + }) + } + + // test cache MISS and cache HIT scenarios for eth_getBlockByNumber method + // check that cached and non-cached responses are equal + { + // eth_getBlockByNumber - cache MISS + block1, err := client.BlockByNumber(testContext, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 2) + expectedKey := "local-chain:evm-request:eth_getBlockByNumber:sha256:0bfa7c5affc525ed731803c223042b4b1eb16ee7a6a539ae213b47a3ef6e3a7d" + containsKey(t, redisClient, expectedKey) + + // eth_getBlockByNumber - cache HIT + block2, err := client.BlockByNumber(testContext, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 2) + containsKey(t, redisClient, expectedKey) + + require.Equal(t, block1, block2, "blocks should be the same") + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam_Metrics(t *testing.T) { + client, err := ethclient.Dial(proxyServiceURL) + require.NoError(t, err) + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + }{ + { + desc: "test case #1", + method: "eth_getBlockByNumber", + params: []interface{}{"0x1", true}, + keysNum: 1, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // test cache MISS and cache HIT scenarios for specified method + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // check that cached and non-cached responses are equal + + // eth_getBlockByNumber - cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + expectedKey := "local-chain:evm-request:eth_getBlockByNumber:sha256:d08b426164eacf6646fb1817403ec0af5d37869a0f32a01ebfab3096fa4999be" + containsKey(t, redisClient, expectedKey) + + // eth_getBlockByNumber - cache HIT + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheHitHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, expectedKey) + + require.JSONEq(t, string(body1), string(body2), "blocks should be the same") + }) + } + + // test cache MISS and cache HIT scenarios for eth_getBlockByNumber method + // check that cached and non-cached responses are equal + { + // eth_getBlockByNumber - cache MISS + block1, err := client.BlockByNumber(testContext, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 2) + expectedKey := "local-chain:evm-request:eth_getBlockByNumber:sha256:0bfa7c5affc525ed731803c223042b4b1eb16ee7a6a539ae213b47a3ef6e3a7d" + containsKey(t, redisClient, expectedKey) + + // eth_getBlockByNumber - cache HIT + block2, err := client.BlockByNumber(testContext, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 2) + containsKey(t, redisClient, expectedKey) + + require.Equal(t, block1, block2, "blocks should be the same") + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam_EmptyResult(t *testing.T) { + testRandomAddressHex := "0x6767114FFAA17C6439D7AEA480738B982CE63A02" + testAddress := common.HexToAddress(testRandomAddressHex) + + // create api and database clients + client, err := ethclient.Dial(proxyServiceURL) + if err != nil { + t.Fatal(err) + } + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + }{ + { + desc: "test case #1", + method: "eth_getTransactionCount", + params: []interface{}{testAddress, "0x1"}, + keysNum: 0, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // both calls should lead to cache MISS scenario, because empty results aren't cached + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // check that responses are equal + + // eth_getBlockByNumber - cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + + // eth_getBlockByNumber - cache MISS again (empty results aren't cached) + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + + require.JSONEq(t, string(body1), string(body2), "blocks should be the same") + }) + } + + // both calls should lead to cache MISS scenario, because empty results aren't cached + // check that responses are equal + { + // eth_getTransactionCount - cache MISS + bal1, err := client.NonceAt(testContext, testAddress, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 0) + + // eth_getTransactionCount - cache MISS again (empty results aren't cached) + bal2, err := client.NonceAt(testContext, testAddress, big.NewInt(2)) + require.NoError(t, err) + expectKeysNum(t, redisClient, 0) + + require.Equal(t, bal1, bal2, "balances should be the same") + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam_ErrorResult(t *testing.T) { + testRandomAddressHex := "0x6767114FFAA17C6439D7AEA480738B982CE63A02" + testAddress := common.HexToAddress(testRandomAddressHex) + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + }{ + { + desc: "test case #1", + method: "eth_getBalance", + params: []interface{}{testAddress, "0x3B9ACA00"}, // block # 1000_000_000, which doesn't exist + keysNum: 0, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // both calls should lead to cache MISS scenario, because error results aren't cached + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // + // cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.Error(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + + // cache MISS again (error results aren't cached) + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.Error(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + }) + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam_FutureBlocks(t *testing.T) { + futureBlockNumber := "0x3B9ACA00" // block # 1000_000_000, which doesn't exist + testRandomAddressHex := "0x6767114FFAA17C6439D7AEA480738B982CE63A02" + testAddress := common.HexToAddress(testRandomAddressHex) + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + errorMsg string + }{ + { + desc: "test case #1", + method: "eth_getBalance", + params: []interface{}{testAddress, futureBlockNumber}, + keysNum: 0, + errorMsg: "height 1000000000 must be less than or equal to the current blockchain height", + }, + { + desc: "test case #2", + method: "eth_getStorageAt", + params: []interface{}{testAddress, "0x6661e9d6d8b923d5bbaab1b96e1dd51ff6ea2a93520fdc9eb75d059238b8c5e9", futureBlockNumber}, + keysNum: 0, + errorMsg: "invalid height: cannot query with height in the future; please provide a valid height", + }, + { + desc: "test case #3", + method: "eth_getTransactionCount", + params: []interface{}{testAddress, futureBlockNumber}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #4", + method: "eth_getBlockTransactionCountByNumber", + params: []interface{}{futureBlockNumber}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #5", + method: "eth_getUncleCountByBlockNumber", + params: []interface{}{futureBlockNumber}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #6", + method: "eth_getCode", + params: []interface{}{testAddress, futureBlockNumber}, + keysNum: 0, + errorMsg: "invalid height: cannot query with height in the future; please provide a valid height", + }, + { + desc: "test case #7", + method: "eth_getBlockByNumber", + params: []interface{}{futureBlockNumber, false}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #8", + method: "eth_getTransactionByBlockNumberAndIndex", + params: []interface{}{futureBlockNumber, "0x0"}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #9", + method: "eth_getUncleByBlockNumberAndIndex", + params: []interface{}{futureBlockNumber, "0x0"}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #10", + method: "eth_call", + params: []interface{}{struct{}{}, futureBlockNumber}, + keysNum: 0, + errorMsg: "header not found", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // both calls should lead to cache MISS scenario, because error results aren't cached + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // + // cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + if tc.errorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorMsg) + } + expectKeysNum(t, redisClient, tc.keysNum) + + // cache MISS again (error results aren't cached) + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + if tc.errorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorMsg) + } + expectKeysNum(t, redisClient, tc.keysNum) + }) + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockHashParam_UnexistingBlockHashes(t *testing.T) { + unexistingBlockHash := "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238" + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + errorMsg string + }{ + { + desc: "test case #1", + method: "eth_getBlockTransactionCountByHash", + params: []interface{}{unexistingBlockHash}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #2", + method: "eth_getUncleCountByBlockHash", + params: []interface{}{unexistingBlockHash}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #3", + method: "eth_getBlockByHash", + params: []interface{}{unexistingBlockHash, false}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #4", + method: "eth_getUncleByBlockHashAndIndex", + params: []interface{}{unexistingBlockHash, "0x0"}, + keysNum: 0, + errorMsg: "", + }, + { + desc: "test case #5", + method: "eth_getTransactionByBlockHashAndIndex", + params: []interface{}{unexistingBlockHash, "0x0"}, + keysNum: 0, + errorMsg: "", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // both calls should lead to cache MISS scenario, because error results aren't cached + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // + // cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + if tc.errorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorMsg) + } + expectKeysNum(t, redisClient, tc.keysNum) + + // cache MISS again (error results aren't cached) + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + if tc.errorMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorMsg) + } + expectKeysNum(t, redisClient, tc.keysNum) + }) + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwWithBlockNumberParam_DiffJsonRpcReqIDs(t *testing.T) { + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + }{ + { + desc: "test case #1", + method: "eth_getBlockByNumber", + params: []interface{}{"0x1", true}, + keysNum: 1, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // test cache MISS and cache HIT scenarios for specified method + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // NOTE: JSON-RPC request IDs are different + // check that cached and non-cached responses differ only in response ID + + // eth_getBlockByNumber - cache MISS + resp1 := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, resp1.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(resp1.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + expectedKey := "local-chain:evm-request:eth_getBlockByNumber:sha256:d08b426164eacf6646fb1817403ec0af5d37869a0f32a01ebfab3096fa4999be" + containsKey(t, redisClient, expectedKey) + + // eth_getBlockByNumber - cache HIT + resp2 := mkJsonRpcRequest(t, proxyServiceURL, 2, tc.method, tc.params) + require.Equal(t, cachemdw.CacheHitHeaderValue, resp2.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(resp2.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, expectedKey) + + rpcResp1, err := cachemdw.UnmarshalJsonRpcResponse(body1) + require.NoError(t, err) + rpcResp2, err := cachemdw.UnmarshalJsonRpcResponse(body2) + require.NoError(t, err) + + // JSON-RPC Version and Result should be equal + require.Equal(t, rpcResp1.Version, rpcResp2.Version) + require.Equal(t, rpcResp1.Result, rpcResp2.Result) + + // JSON-RPC response ID should correspond to JSON-RPC request ID + require.Equal(t, string(rpcResp1.ID), "1") + require.Equal(t, string(rpcResp2.ID), "2") + + // JSON-RPC error should be empty + require.Empty(t, rpcResp1.JsonRpcError) + require.Empty(t, rpcResp2.JsonRpcError) + + // Double-check that JSON-RPC responses differ only in response ID + rpcResp2.ID = []byte("1") + require.Equal(t, rpcResp1, rpcResp2) + }) + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwForStaticMethods(t *testing.T) { + // create api and database clients + client, err := ethclient.Dial(proxyServiceURL) + if err != nil { + t.Fatal(err) + } + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + for _, tc := range []struct { + desc string + method string + params []interface{} + keysNum int + expectedKey string + }{ + { + desc: "test case #1", + method: "eth_chainId", + params: []interface{}{}, + keysNum: 1, + expectedKey: "local-chain:evm-request:eth_chainId:sha256:*", + }, + { + desc: "test case #2", + method: "net_version", + params: []interface{}{}, + keysNum: 2, + expectedKey: "local-chain:evm-request:net_version:sha256:*", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // test cache MISS and cache HIT scenarios for specified method + // check corresponding values in cachemdw.CacheHeaderKey HTTP header + // check that cached and non-cached responses are equal + + // cache MISS + cacheMissResp := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheMissHeaderValue, cacheMissResp.Header[cachemdw.CacheHeaderKey][0]) + body1, err := io.ReadAll(cacheMissResp.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body1) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, tc.expectedKey) + + // cache HIT + cacheHitResp := mkJsonRpcRequest(t, proxyServiceURL, 1, tc.method, tc.params) + require.Equal(t, cachemdw.CacheHitHeaderValue, cacheHitResp.Header[cachemdw.CacheHeaderKey][0]) + body2, err := io.ReadAll(cacheHitResp.Body) + require.NoError(t, err) + err = checkJsonRpcErr(body2) + require.NoError(t, err) + expectKeysNum(t, redisClient, tc.keysNum) + containsKey(t, redisClient, tc.expectedKey) + + // check that response bodies are the same + require.JSONEq(t, string(body1), string(body2), "blocks should be the same") + + // check that response headers are the same + equalHeaders(t, cacheMissResp.Header, cacheHitResp.Header) + + // check that CORS headers are present for cache hit scenario + require.Equal(t, cacheHitResp.Header[accessControlAllowOriginHeaderName], []string{"*"}) + }) + } + + cleanUpRedis(t, redisClient) + // test cache MISS and cache HIT scenarios for eth_chainId method + // check that cached and non-cached responses are equal + { + // eth_getBlockByNumber - cache MISS + block1, err := client.ChainID(testContext) + require.NoError(t, err) + expectKeysNum(t, redisClient, 1) + expectedKey := "local-chain:evm-request:eth_chainId:sha256:*" + containsKey(t, redisClient, expectedKey) + + // eth_getBlockByNumber - cache HIT + block2, err := client.ChainID(testContext) + require.NoError(t, err) + expectKeysNum(t, redisClient, 1) + containsKey(t, redisClient, expectedKey) + + require.Equal(t, block1, block2, "blocks should be the same") + } + + cleanUpRedis(t, redisClient) +} + +func TestNoMetricsE2ETestCachingMdwForGetTxByHashMethod(t *testing.T) { + // create api and database clients + evmClient, err := ethclient.Dial(proxyServiceURL) + if err != nil { + t.Fatal(err) + } + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + addr := common.HexToAddress(evmFaucetAddressHex) + balance, err := evmClient.BalanceAt(testContext, addr, nil) + if err != nil { + log.Fatalf("can't get balance for evm faucet: %v\n", err) + } + require.NotEqual(t, "0", balance.String()) + + addressToFund := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d") + // submit eth tx + tx := fundEVMAddress(t, evmClient, addressToFund) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + expectedKey := "local-chain:evm-request:eth_getTransactionByHash:sha256:*" + // getting tx by hash in the loop until JSON-RPC response result won't be null + // NOTE: it's Cache Miss scenario, because we don't cache null responses + waitUntilTxAppearsInMempool(t, tx.Hash()) + expectKeysNum(t, redisClient, 0) + // getting tx by hash in the loop until JSON-RPC response result won't indicate that tx included in block + // NOTE: it's Cache Miss scenario, because we don't cache txs which is in mempool + cacheMissBody, cacheMissHeaders := getTxByHashFromBlock(t, tx.Hash(), cachemdw.CacheMissHeaderValue) + expectKeysNum(t, redisClient, 1) + containsKey(t, redisClient, expectedKey) + // on previous step we already got tx which is included in block, so calling this again triggers Cache Hit scenario + cacheHitBody, cacheHitHeaders := getTxByHashFromBlock(t, tx.Hash(), cachemdw.CacheHitHeaderValue) + expectKeysNum(t, redisClient, 1) + containsKey(t, redisClient, expectedKey) + + // check that response bodies are the same + require.JSONEq(t, string(cacheMissBody), string(cacheHitBody), "blocks should be the same") + + // check that response headers are the same + equalHeaders(t, cacheMissHeaders, cacheHitHeaders) + + // check that CORS headers are present for cache hit scenario + require.Equal(t, cacheHitHeaders[accessControlAllowOriginHeaderName], []string{"*"}) +} + +func TestNoMetricsE2ETestCachingMdwForGetTxReceiptByHashMethod(t *testing.T) { + // create api and database clients + evmClient, err := ethclient.Dial(proxyServiceURL) + if err != nil { + t.Fatal(err) + } + + redisClient := redis.NewClient(&redis.Options{ + Addr: redisURL, + Password: redisPassword, + DB: 0, + }) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + addr := common.HexToAddress(evmFaucetAddressHex) + balance, err := evmClient.BalanceAt(testContext, addr, nil) + if err != nil { + log.Fatalf("can't get balance for evm faucet: %v\n", err) + } + require.NotEqual(t, "0", balance.String()) + + addressToFund := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d") + // submit eth tx + tx := fundEVMAddress(t, evmClient, addressToFund) + cleanUpRedis(t, redisClient) + expectKeysNum(t, redisClient, 0) + + expectedKey := "local-chain:evm-request:eth_getTransactionReceipt:sha256:*" + // getting tx receipt by hash in the loop until JSON-RPC response result won't be null + // it's Cache Miss scenario, because we don't cache null responses + // NOTE: eth_getTransactionReceipt returns null JSON-RPC response result for txs in mempool, so at this point + // tx already included in block + cacheMissBody, cacheMissHeaders := getTxReceiptByHash(t, tx.Hash(), cachemdw.CacheMissHeaderValue) + expectKeysNum(t, redisClient, 1) + containsKey(t, redisClient, expectedKey) + // on previous step we already got tx which is included in block, so calling this again triggers Cache Hit scenario + cacheHitBody, cacheHitHeaders := getTxReceiptByHash(t, tx.Hash(), cachemdw.CacheHitHeaderValue) + expectKeysNum(t, redisClient, 1) + containsKey(t, redisClient, expectedKey) + + // check that response bodies are the same + require.JSONEq(t, string(cacheMissBody), string(cacheHitBody), "blocks should be the same") + + // check that response headers are the same + equalHeaders(t, cacheMissHeaders, cacheHitHeaders) + + // check that CORS headers are present for cache hit scenario + require.Equal(t, cacheHitHeaders[accessControlAllowOriginHeaderName], []string{"*"}) +} diff --git a/service/service.go b/service/service.go index 7b41c6c..01ba48e 100644 --- a/service/service.go +++ b/service/service.go @@ -138,10 +138,11 @@ func New(ctx context.Context, config config.Config, serviceLogger *logging.Servi // createDatabaseClient creates a connection to the database // using the specified config and runs migrations async -// (only if migration flag in config is true) returning the +// (only if migration flag in config is true) // returning the database connection and error (if any) func createDatabaseClient(ctx context.Context, config config.Config, logger *logging.ServiceLogger) (*database.PostgresClient, error) { databaseConfig := database.PostgresDatabaseConfig{ + DatabaseDisabled: !config.MetricDatabaseEnabled, DatabaseName: config.DatabaseName, DatabaseEndpointURL: config.DatabaseEndpointURL, DatabaseUsername: config.DatabaseUserName, @@ -149,7 +150,7 @@ func createDatabaseClient(ctx context.Context, config config.Config, logger *log SSLEnabled: config.DatabaseSSLEnabled, QueryLoggingEnabled: config.DatabaseQueryLoggingEnabled, ReadTimeoutSeconds: config.DatabaseReadTimeoutSeconds, - WriteTimeousSeconds: config.DatabaseWriteTimeoutSeconds, + WriteTimeoutsSeconds: config.DatabaseWriteTimeoutSeconds, DatabaseMaxIdleConnections: config.DatabaseMaxIdleConnections, DatabaseConnectionMaxIdleSeconds: config.DatabaseConnectionMaxIdleSeconds, DatabaseMaxOpenConnections: config.DatabaseMaxOpenConnections, @@ -173,7 +174,7 @@ func createDatabaseClient(ctx context.Context, config config.Config, logger *log // run migrations async so waiting for the database to // be reachable doesn't block the ability of the proxy service // to degrade gracefully and continue to proxy requests even - // without it's database + // without its database go func() { // wait for database to be reachable var databaseOnline bool