Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(aws): add aws tooling and setup #26

Draft
wants to merge 13 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .env.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
TF_WORKSPACE=
TF_VAR_private_key=
TF_VAR_indexing_service_did=did:web:staging.indexer.storacha.network
TF_VAR_indexing_service_url=https://staging.indexer.storacha.network
TF_VAR_indexing_service_proof=
TF_VAR_did=
TF_VAR_use_pdp=false
TF_VAR_pdp_proofset=
TF_VAR_curio_url=
56 changes: 56 additions & 0 deletions .github/workflows/deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
name: Deploy

on:
push:
branches:
- main
tags:
- "*"
paths:
- ".github/workflows/deploy.yml"
- "cmd/**"
- "deploy/**"
- "pkg/**"
pull_request:
branches: [main]
workflow_run:
workflows: [Releaser]
types: [completed]
branches: [main]
workflow_dispatch:

permissions:
id-token: write # This is required for requesting the JWT
contents: read # This is required for actions/checkout

jobs:
# always deploy to staging
staging:
uses: ./.github/workflows/terraform.yml
with:
env: staging
workspace: staging
apply: ${{ github.event_name != 'pull_request' }}
secrets:
did: ${{ secrets.STAGING_DID }}
aws-account-id: ${{ secrets.STAGING_AWS_ACCOUNT_ID }}
aws-region: ${{ secrets.STAGING_AWS_REGION }}
allowed-account-ids: ${{ secrets.STAGING_ALLOWED_ACCOUNT_IDS }}
private-key: ${{ secrets.STAGING_PRIVATE_KEY }}
indexing-service-proof: ${{ secrets.STAGING_INDEXING_SERVICE_PROOF }}

# deploy to prod on new releases
production:
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' }}
uses: ./.github/workflows/terraform.yml
with:
env: production
workspace: prod
apply: true
secrets:
did: ${{ secrets.PROD_DID }}
aws-account-id: ${{ secrets.PROD_AWS_ACCOUNT_ID }}
aws-region: ${{ secrets.PROD_AWS_REGION }}
allowed-account-ids: ${{ secrets.PROD_ALLOWED_ACCOUNT_IDS }}
private-key: ${{ secrets.PROD_PRIVATE_KEY }}
indexing-service-proof: ${{ secrets.PROD_INDEXING_SERVICE_PROOF }}
83 changes: 83 additions & 0 deletions .github/workflows/terraform.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
name: Terraform

on:
workflow_call:
inputs:
env:
required: true
type: string
workspace:
required: true
type: string
did:
required: true
type: string
apply:
required: true
type: boolean
secrets:
aws-account-id:
required: true
allowed-account-ids:
required: true
aws-region:
required: true
private-key:
required: true
indexing-service-proof:
required: true
honeycomb-api-key:
required: false

concurrency:
group: ${{ github.workflow }}-${{ inputs.workspace }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

env:
AWS_ACCOUNT_ID: ${{ secrets.aws-account-id }}
AWS_REGION: ${{ secrets.aws-region }}
ENV: ${{ inputs.env }}
TF_WORKSPACE: ${{ inputs.workspace }}
TF_VAR_allowed_account_ids: ${{ secrets.allowed-account-ids }}
TF_VAR_private_key: ${{ secrets.private-key }}
TF_VAR_did: ${{ secrets.did }}
TF_VAR_indexing_service_proof: ${{ secrets.indexing-service-proof }}
TF_VAR_honeycomb_api_key: ${{ secrets.honeycomb-api-key }}

permissions:
id-token: write # This is required for requesting the JWT
contents: read # This is required for actions/checkout

jobs:
terraform:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: arn:aws:iam::${{ env.AWS_ACCOUNT_ID }}:role/terraform-ci

- uses: opentofu/setup-opentofu@v1
- uses: actions/setup-go@v5

- name: Tofu Init
run: |
tofu -chdir="deploy/app" init

- name: Build Go Apps
run: |
touch .env
make lambdas

- name: Terraform Plan
if: ${{ !inputs.apply }}
run: |
tofu -chdir="deploy/app" plan

- name: Terraform Apply
if: ${{ inputs.apply }}
run: |
tofu -chdir="deploy/app" apply -input=false --auto-approve
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
/.vscode
/cmd/storage
/storage
*.car
/build
.terraform
.env
.tfworkspace
65 changes: 65 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
ifneq (,$(wildcard ./.env))
include .env
export
else
$(error You haven't setup your .env file. Please refer to the readme)
endif
LAMBDA_GOOS=linux
LAMBDA_GOARCH=arm64
LAMBDA_GOCC?=go
LAMBDA_GOFLAGS=-tags=lambda.norpc
LAMBDA_CGO_ENABLED=0
LAMBDAS=build/aggregatesubmitter/bootstrap build/getblob/bootstrap build/getclaim/bootstrap build/getroot/bootstrap build/pieceaccepter/bootstrap build/pieceaggregator/bootstrap build/postroot/bootstrap build/putblob/bootstrap

.PHONY: clean-lambda

clean-lambda:
rm -rf build

.PHONY: clean-terraform

clean-terraform:
tofu -chdir=deploy/app destroy

.PHONY: clean

clean: clean-terraform clean-lambda

lambdas: $(LAMBDAS)

.PHONY: $(LAMBDAS)

$(LAMBDAS): build/%/bootstrap:
GOOS=$(LAMBDA_GOOS) GOARCH=$(LAMBDA_GOARCH) CGO_ENABLED=$(LAMBDA_CGO_ENABLED) $(LAMBDA_GOCC) build $(LAMBDA_GOFLAGS) -o $@ cmd/lambda/$*/main.go

deploy/app/.terraform:
tofu -chdir=deploy/app init

.tfworkspace: deploy/app/.terraform
tofu -chdir=deploy/app workspace new $(TF_WORKSPACE)
touch .tfworkspace

.PHONY: init

init: deploy/app/.terraform .tfworkspace

.PHONY: validate

validate: deploy/app/.terraform .tfworkspace
tofu -chdir=deploy/app validate

.PHONY: plan

plan: deploy/app/.terraform .tfworkspace $(LAMBDAS)
tofu -chdir=deploy/app plan

.PHONY: apply

apply: deploy/app/.terraform .tfworkspace $(LAMBDAS)
tofu -chdir=deploy/app apply


deploy/app/.terraform:
tofu -chdir=deploy/app init

shared:
41 changes: 41 additions & 0 deletions cmd/aggregate/aggregate.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package main

import (
"encoding/json"
"fmt"
"os"

"github.com/ipfs/go-cid"
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
"github.com/storacha/go-piece/pkg/piece"
"github.com/storacha/storage/pkg/pdp/aggregator/aggregate"
)

func main() {
argsWithoutProg := os.Args[1:]
pieceLinks := make([]piece.PieceLink, 0, len(argsWithoutProg))
for _, arg := range argsWithoutProg {
c, err := cid.Decode(arg)
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
}
pl, err := piece.FromLink(cidlink.Link{Cid: c})
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
}
pieceLinks = append(pieceLinks, pl)
}
aggregate, err := aggregate.NewAggregate(pieceLinks)
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
}
asJson, err := json.MarshalIndent(aggregate, "", " ")
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
}
fmt.Printf(string(asJson))
}
64 changes: 64 additions & 0 deletions cmd/lambda/aggregatesubmitter/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
package main

import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"

"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/ipld/go-ipld-prime/datamodel"
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
"github.com/storacha/go-capabilities/pkg/types"
"github.com/storacha/storage/internal/ipldstore"
"github.com/storacha/storage/pkg/aws"
"github.com/storacha/storage/pkg/pdp/aggregator"
"github.com/storacha/storage/pkg/pdp/aggregator/aggregate"
"github.com/storacha/storage/pkg/pdp/curio"
)

var log = logging.Logger("lambda/providercache")

func makeHandler(aggregateSubmitter *aggregator.AggregateSubmitter) func(ctx context.Context, sqsEvent events.SQSEvent) error {
return func(ctx context.Context, sqsEvent events.SQSEvent) error {
// process messages in parallel
aggregateLinks := make([]datamodel.Link, 0, len(sqsEvent.Records))
for _, msg := range sqsEvent.Records {
var pieceLinkMessage aws.PieceLinkMessage
err := json.Unmarshal([]byte(msg.Body), &pieceLinkMessage)
if err != nil {
return fmt.Errorf("deserializing message json: %w", err)
}
c, err := cid.Decode(pieceLinkMessage.Piece)
if err != nil {
return fmt.Errorf("decoding piece link: %w", err)
}

aggregateLinks = append(aggregateLinks, cidlink.Link{Cid: c})
}
return aggregateSubmitter.SubmitAggregates(ctx, aggregateLinks)
}
}

func main() {
config := aws.FromEnv(context.Background())
curioURL, err := url.Parse(config.CurioURL)
if err != nil {
panic(fmt.Errorf("parsing curio URL: %w", err))
}
curioAuth, err := curio.CreateCurioJWTAuthHeader("storacha", config.Signer)
if err != nil {
panic(fmt.Errorf("generating curio JWT: %w", err))
}
curioClient := curio.New(http.DefaultClient, curioURL, curioAuth)
aggregateStore := ipldstore.IPLDStore[datamodel.Link, aggregate.Aggregate](
aws.NewS3Store(config.Config, config.AggregatesBucket, config.AggregatesPrefix),
aggregate.AggregateType(), types.Converters...)
aggregateSubmitterQueue := aws.NewSQSAggregateQueue(config.Config, config.SQSPDPPieceAggregatorURL)
aggregateSubmitter := aggregator.NewAggregateSubmitteer(config.PDPProofSet, aggregateStore, curioClient, aggregateSubmitterQueue.Queue)
lambda.Start(makeHandler(aggregateSubmitter))
}
23 changes: 23 additions & 0 deletions cmd/lambda/getblob/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
package main

import (
"context"
"net/http"
"strings"

"github.com/aws/aws-lambda-go/lambda"
"github.com/awslabs/aws-lambda-go-api-proxy/httpadapter"
"github.com/storacha/storage/pkg/aws"
)

func makeHandler(blobsPublicURL string, blobsKeyPrefix string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
parts := strings.Split(r.URL.Path, "/")
blobStr := parts[len(parts)-1]
http.Redirect(w, r, blobsPublicURL+"/"+blobsKeyPrefix+blobStr, http.StatusTemporaryRedirect)
}
}
func main() {
config := aws.FromEnv(context.Background())
lambda.Start(httpadapter.NewV2(http.HandlerFunc(makeHandler(config.BlobsPublicURL, config.BlobStorePrefix))).ProxyWithContext)
}
21 changes: 21 additions & 0 deletions cmd/lambda/getclaim/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package main

import (
"context"
"net/http"

"github.com/aws/aws-lambda-go/lambda"
"github.com/awslabs/aws-lambda-go-api-proxy/httpadapter"
"github.com/storacha/storage/pkg/aws"
"github.com/storacha/storage/pkg/service/claims"
)

func main() {
config := aws.FromEnv(context.Background())
service, err := aws.Construct(config)
if err != nil {
panic(err)
}
handler := claims.NewHandler(service.Claims().Store())
lambda.Start(httpadapter.NewV2(http.HandlerFunc(handler)).ProxyWithContext)
}
17 changes: 17 additions & 0 deletions cmd/lambda/getroot/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package main

import (
"context"
"net/http"

"github.com/aws/aws-lambda-go/lambda"
"github.com/awslabs/aws-lambda-go-api-proxy/httpadapter"
"github.com/storacha/storage/pkg/aws"
"github.com/storacha/storage/pkg/server"
)

func main() {
config := aws.FromEnv(context.Background())
handler := server.NewHandler(config.Signer)
lambda.Start(httpadapter.NewV2(http.HandlerFunc(handler)).ProxyWithContext)
}
Loading