Grafana Tempo can ingest traces from Zipkin clients directly, bypassing the need for a separate Zipkin collector.

Let’s see how this works with a simple Go application instrumented with OpenTelemetry.

First, ensure you have Tempo running. A minimal docker-compose.yml might look like this:

version: '3.7'

services:
  tempo:
    image: grafana/tempo:latest
    command: ["-config.file=/etc/tempo/config.yaml"]
    ports:
      - "3200:3200" # Tempo API
      - "14317:14317" # Metrics (optional)
      - "4317:4317" # OTLP gRPC
      - "4318:4318" # OTLP HTTP
    volumes:
      - ./tempo-config.yaml:/etc/tempo/config.yaml

  # Optional: Prometheus for metrics and Grafana for visualization
  # prometheus:
  #   image: prom/prometheus:latest
  #   ports:
  #     - "9090:9090"
  #   volumes:
  #     - ./prometheus-config.yml:/etc/prometheus/prometheus.yml

  # grafana:
  #   image: grafana/grafana:latest
  #   ports:
  #     - "3000:3000"
  #   volumes:
  #     - ./grafana-datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml

And a basic tempo-config.yaml:

auth_enabled: false
server:
  http_listen_port: 3200
  grpc_listen_port: 9090 # Tempo's internal gRPC, not the OTLP one

# If you want to use OTLP directly, you need to configure the receiver.
# The default config might not expose OTLP ports.
distributor:
  receivers:
    otlp:
      protocols:
        grpc:
          endpoint: 0.0.0.0:4317
        http:
          endpoint: 0.0.0.0:4318

# For demonstration, we'll use the 'local' storage.
# In production, use object storage or another backend.
storage:
  trace:
    backend: local
    local:
      path: /tmp/tempo/traces

# Example of enabling the Zipkin receiver if you were migrating *from* Zipkin
# zipkin:
#   http_endpoint: 0.0.0.0:9411

# Example of enabling Jaeger receiver
# jaeger:
#   http_endpoint: 0.0.0.0:14268

# Example of enabling Prometheus receiver
# prometheus:
#   endpoint: 0.0.0.0:9091

# Add this to enable the OTLP receiver for OpenTelemetry
# Note: The OTLP receiver is often enabled by default in recent versions,
# but explicitly configuring it here ensures clarity.
otelcol:
  receivers:
    otlp:
      protocols:
        grpc:
          endpoint: 0.0.0.0:4317
        http:
          endpoint: 0.0.0.0:4318

# The following are essential for Tempo to function
ingester:
  trace:
    # Tempo's internal configuration for ingesting traces.
    # This section is crucial for processing incoming traces.
    # The specific values here are defaults and can be tuned.
    max_block_size: 262144 # Maximum size of a trace block in bytes
    max_concurrent_requests: 20 # Maximum concurrent requests to the ingester
    # queue_size: 5000 # Number of traces to buffer before processing

# For local storage, you also need to configure the backend.
# This is already covered by the 'storage' section above.

# You also need to configure the exporter to send traces to storage.
# For local storage, this is handled by the 'local' backend.

# If you were using a different backend like S3, you'd configure it here.
# storage:
#   trace:
#     backend: s3
#     s3:
#       bucket: my-tempo-bucket
#       endpoint: s3.amazonaws.com
#       region: us-east-1

Now, let’s instrument a Go application using OpenTelemetry’s SDK.

package main

import (
	"context"
	"fmt"
	"log"
	"net/http"
	"os"
	"time"

	"go.opentelemetry.io/otel"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
	"go.opentelemetry.io/otel/sdk/resource"
	sdktrace "go.opentelemetry.io/otel/sdk/trace"
	semconv "go.opentelemetry.io/otel/semconv/v1.20.0" // Use a recent version
	"go.opentelemetry.io/otel/trace"
)

var tracer trace.Tracer

func main() {
	ctx := context.Background()

	// 1. Configure the OpenTelemetry SDK
	//    This sets up the resource (application info) and the exporter.

	// Set up the OTLP gRPC exporter to send traces to Tempo
	// Tempo's OTLP gRPC receiver is typically on port 4317.
	// We use `WithInsecure()` because we're not setting up TLS for this example.
	// In production, you'd use `WithTLSCredentials()`
	exporter, err := otlptracegrpc.New(ctx,
		otlptracegrpc.WithEndpoint("localhost:4317"),
		otlptracegrpc.WithInsecure(), // Use WithTLSCredentials() in production
	)
	if err != nil {
		log.Fatalf("failed to create OTLP exporter: %v", err)
	}

	// Define the resource attributes for your service.
	// These are crucial for filtering and identifying traces in Tempo.
	res, err := resource.New(ctx,
		resource.WithAttributes(
			semconv.ServiceName("my-go-app"),
			semconv.ServiceVersion("1.0.0"),
			attribute.String("environment", "production"),
		),
	)
	if err != nil {
		log.Fatalf("failed to create resource: %v", err)
	}

	// Create a tracer provider and set it as the global tracer provider.
	// This tracer provider will use the exporter to send spans.
	tp := sdktrace.NewTracerProvider(
		sdktrace.WithBatcher(exporter), // Use a batch span processor for efficiency
		sdktrace.WithResource(res),
	)
	defer func() {
		if err := tp.Shutdown(ctx); err != nil {
			log.Printf("Error shutting down tracer provider: %v", err)
		}
	}()
	otel.SetTracerProvider(tp)

	// Get a tracer instance for your application.
	tracer = otel.Tracer("my-go-app/tracer")

	// 2. Instrument your application
	//    Now, use the tracer to create spans for your operations.

	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		// Start a new trace or join an existing one from incoming headers.
		// In a real web server, you'd use a middleware to handle this.
		ctx, span := tracer.Start(r.Context(), "HTTP GET /")
		defer span.End()

		span.SetAttributes(attribute.String("http.method", "GET"),
			attribute.String("http.url", "/"),
			attribute.String("http.user_agent", r.UserAgent()))

		// Simulate some work
		processRequest(ctx)

		fmt.Fprintf(w, "Hello, World!")
	})

	log.Println("Starting server on :8080")
	if err := http.ListenAndServe(":8080", nil); err != nil {
		log.Fatalf("Server failed: %v", err)
	}
}

func processRequest(ctx context.Context) {
	// Start a child span for this operation.
	// The parent context `ctx` contains the span from the HTTP handler.
	_, span := tracer.Start(ctx, "processRequest")
	defer span.End()

	span.SetAttributes(attribute.String("operation", "data_processing"))

	// Simulate database call
	time.Sleep(50 * time.Millisecond)
	span.AddEvent("simulated_db_call", trace.WithAttributes(attribute.Int("db.rows_affected", 10)))

	// Simulate external API call
	time.Sleep(100 * time.Millisecond)
	span.AddEvent("simulated_api_call", trace.WithAttributes(attribute.String("api.endpoint", "/users")))
}

To run this:

  1. Save the docker-compose.yml and tempo-config.yaml.
  2. Save the Go code as main.go.
  3. Run docker compose up -d.
  4. Run go mod init myapp && go mod tidy in the directory with main.go.
  5. Run go run main.go.
  6. Access http://localhost:8080 in your browser.

Now, open Grafana (if you set it up) and add Tempo as a data source, or use the Tempo UI directly if available. You should see traces from my-go-app appearing.

The core idea is that OpenTelemetry SDKs (like the Go one) can be configured to export traces using the OTLP (OpenTelemetry Protocol) gRPC or HTTP. Tempo, in its recent versions, has a built-in OTLP receiver. By pointing the OpenTelemetry exporter at Tempo’s OTLP port (defaulting to localhost:4317 for gRPC), the traces flow directly from your application to Tempo without needing a Zipkin collector or a separate OpenTelemetry Collector intermediate.

The tempo-config.yaml snippet shows how to explicitly enable the OTLP receiver. Even if not explicitly listed, modern Tempo images often enable it by default. The key is that the distributor or otelcol component within Tempo is listening on the OTLP ports.

The Go application’s otlptracegrpc.New call is the crucial part for the application side. It tells the OpenTelemetry SDK where to send the traces. The WithEndpoint("localhost:4317") directs the spans to Tempo. The sdktrace.WithBatcher(exporter) ensures that spans are collected and sent in batches, improving efficiency.

You can also configure Tempo to receive traces in the Zipkin v2 format if you have legacy clients or need compatibility. This involves uncommenting and configuring the zipkin section in tempo-config.yaml and pointing your Zipkin clients to Tempo’s Zipkin endpoint (e.g., http://localhost:9411). However, the OTLP receiver is the modern and recommended approach for OpenTelemetry-native applications.

If you were migrating from a Zipkin server, you’d configure Tempo to listen on the Zipkin port (usually 9411) and then update your Zipkin clients to point to Tempo instead of the old Zipkin collector. Tempo would then ingest those traces.

The most surprising thing about this setup is how seamlessly Tempo integrates with OpenTelemetry’s native protocol. You often think of the OpenTelemetry Collector as a mandatory intermediary, but Tempo can act as a direct backend for OTLP, simplifying your observability stack considerably. The Collector is still valuable for advanced processing, sampling, and routing, but for direct ingestion, Tempo’s OTLP receiver is a powerful feature.

The next step is typically exploring Tempo’s advanced configuration for high availability, different storage backends, and integration with Grafana for powerful trace visualization and correlation with metrics.

Want structured learning?

Take the full Tempo course →