Skip to content

Commit

Permalink
Merge pull request #67 from lightstep/jmacd/update_recs
Browse files Browse the repository at this point in the history
Consolidate the OTel-Arrow build with top-level gateway build recommendations
  • Loading branch information
jmacd authored Jan 4, 2024
2 parents 7747a92 + 8a3275c commit 3c95ca6
Show file tree
Hide file tree
Showing 8 changed files with 268 additions and 251 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/publish-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ jobs:
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
# TODO: remove -EXPERIMENTAL below.
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-EXPERIMENTAL
tags: |
# Set the OTELCOL_VERSION here
Expand Down Expand Up @@ -67,4 +68,4 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ This is the repository for recommended [Helm](https://helm.sh/) charts for runni
In order to use an arrow trace collector, you can use (1) the prebuilt image available via the Github Container Registry (GHCR) or you may (2) build your own custom image.

### 1. Use the prebuilt Docker image
1. We have built a Docker image using the example [build config](https://github.com/lightstep/otel-collector-charts/blob/main/arrow/otelcolarrow-build.yaml)
1. We have built a Docker image using the recommended [build config](https://github.com/lightstep/otel-collector-charts/blob/main/arrow/otelcolarrow-build.yaml)
2. This Docker [image](https://github.com/lightstep/otel-collector-charts/pkgs/container/otel-collector-charts%2Fotelarrowcol-experimental) can be pulled by running: `docker pull ghcr.io/lightstep/otel-collector-charts/otelarrowcol-experimental:latest`
3. You can use the example collector config (`/arrow/config/saas-config.yaml`) by running:
`docker run -it -v ./config/:/config --entrypoint /otelarrowcol ghcr.io/lightstep/otel-collector-charts/otelarrowcol-experimental:latest --config=/config/saas-collector.yaml`
3. You can use the collector config (`/arrow/config/gateway-config.yaml`) by running:
`docker run -it -v $(PWD)/config/:/config --entrypoint /otelarrowcol ghcr.io/lightstep/otel-collector-charts/otelarrowcol-experimental:latest --config=/config/gateway-collector.yaml`


### 2. Build your own custom image
Expand All @@ -33,5 +33,9 @@ In order to use an arrow trace collector, you can use (1) the prebuilt image av
Some of the features available in these charts are optional because
they rely on components that have not been released in the
OpenTelemetry Contrib Collector. Specifically, to make use of the new
OTel-Arrow protocol requires building a customer collector at this
time. See a [recommended custom collector build configuration](./gateway-build.yaml).
OpenTelemetry Protocol With Apache Arrow support requires using either
the prebuilt image or a customer collector build at this time.

See the [recommended custom collector build
configuration](./arrow/otelcolarrow-build.yaml.yaml) as a starting
point.
2 changes: 1 addition & 1 deletion arrow/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ ENV CGO_ENABLED=0

# Note the version MUST MATCH otelarrowcol-build.yaml
# Future optimization - curl the release.
RUN go install go.opentelemetry.io/collector/cmd/builder@v0.89.0
RUN go install go.opentelemetry.io/collector/cmd/builder@v0.91.0

# This command generates main.go, go.mod but does not update deps.
RUN builder --config=/otelarrowcol/otelcolarrow-build.yaml
Expand Down
6 changes: 4 additions & 2 deletions arrow/Makefile
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
current_dir = $(shell pwd)

build-amd64:
docker build . -t otelarrowcol-amd64 --platform linux/amd64

build-arm64:
docker build . -t otelarrowcol-arm64 --platform linux/arm64

run-amd64:
docker run -it -v ./config/:/config --entrypoint /otelarrowcol otelarrowcol-amd64 --config=/config/saas-collector.yaml
docker run -it -v $(current_dir)/config/:/config --entrypoint /otelarrowcol otelarrowcol-amd64 --config=/config/gateway-collector.yaml

run-arm64:
docker run -it -v ./config/:/config --entrypoint /otelarrowcol otelarrowcol-arm64 --config=/config/saas-collector.yaml
docker run -it -v $(current_dir)/config/:/config --entrypoint /otelarrowcol otelarrowcol-arm64 --config=/config/gateway-collector.yaml
127 changes: 127 additions & 0 deletions arrow/config/gateway-collector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# This is a simple but functional recommended gateway configuration.
#
# Depending on the number of senders and size of data, the amount of
# resources required will vary.
#
# These configurations have been tested in a small configuration:
#
# resources:
# requests:
# cpu: 2
# memory: 3Gi
# limits:
# cpu: 2
# memory: 8Gi
#
# and a large configuration
#
# resources:
# requests:
# cpu: 8
# memory: 6Gi
# limits:
# cpu: 8
# memory: 24Gi
#
# In the larger configuration tested, we used `max_in_flight_size_mib: 256`.

receivers:
# otelarrow is the OpenTelemetry Protocol with Apache Arrow receiver
# which combines support for standard OTLP/gRPC.
otelarrow:
protocols:
grpc:
# This is the default OTLP/gRPC port. OTel-Arrow is served on the
# same port.
endpoint: "0.0.0.0:4317"

# allow large arriving telemetry payloads. they will be split
# into a reasonable size by the batch processor.
max_recv_msg_size_mib: 128

# Limit received OTel-Arrow stream length to 3m in total.
keepalive:
server_parameters:
max_connection_age: 30s
max_connection_age_grace: 2m30s

# otlp is the core OTLP exporter, which we enable to receive
# OTLP/HTTP data.
otlp:
protocols:
http:
endpoint: "0.0.0.0:4318"

processors:
# The concurrent batch processor is recommended instead of the
# core `batchprocessor` component, when available.
concurrentbatch:
send_batch_max_size: 1500
send_batch_size: 1000
timeout: 1s

# For larger configurations, consider raising this parameter.
max_in_flight_size_mib: 128

exporters:
otelarrow:
endpoint: "ingest.lightstep.com:443"
headers:
"lightstep-access-token": "${LS_TOKEN}"

arrow:
disabled: false
max_stream_lifetime: 2m
num_streams: 6

# The pipeline will continue trying requests until they timeout.
# Timeout and retry settings are independent. If retry_on_failure
# is also enabled, each (retried) request will also have this
# timeout.
timeout: 30s

# Retries are disabled by default. Since the most likely reason
# for failure is timeout, having retry-on-failure enabled implies
# dedicating a significant amount of additional memory to the task.
retry_on_failure:
enabled: false

# Do not enable the sending queue. The concurrent batch processor
# is a better way to parallelize the export.
sending_queue:
enabled: false

service:
pipelines:
traces:
receivers: [otelarrow, otlp]
processors: [concurrentbatch]
exporters: [otelarrow]

metrics:
receivers: [otelarrow, otlp]
processors: [concurrentbatch]
exporters: [otelarrow]

telemetry:
metrics:
level: detailed
readers:
- periodic:
exporter:
otlp:
protocol: grpc/protobuf
endpoint: https://ingest.lightstep.com:443
headers:
lightstep-access-token: "${LS_TOKEN}"
traces:
processors:
- batch:
exporter:
otlp:
protocol: grpc/protobuf
endpoint: https://ingest.lightstep.com:443
headers:
lightstep-access-token: "${LS_TOKEN}"
resource:
service.name: otelarrow-gateway-collector
65 changes: 0 additions & 65 deletions arrow/config/saas-collector.yaml

This file was deleted.

Loading

0 comments on commit 3c95ca6

Please sign in to comment.