diff --git a/jdbc/ydb-token-app/Dockerfile b/jdbc/ydb-token-app/Dockerfile
new file mode 100644
index 0000000..a2e4aea
--- /dev/null
+++ b/jdbc/ydb-token-app/Dockerfile
@@ -0,0 +1,13 @@
+FROM maven:3.9.11-eclipse-temurin-17 AS build
+WORKDIR /workspace
+
+COPY . .
+RUN mvn -B -pl jdbc/ydb-token-app -am dependency:go-offline
+RUN mvn -B -pl jdbc/ydb-token-app -am clean package org.springframework.boot:spring-boot-maven-plugin:repackage -DskipTests
+
+FROM eclipse-temurin:17-jre
+WORKDIR /app
+
+COPY --from=build /workspace/jdbc/ydb-token-app/target/ydb-token-app-1.1.0-SNAPSHOT.jar /app/app.jar
+
+ENTRYPOINT ["java", "-jar", "/app/app.jar"]
diff --git a/jdbc/ydb-token-app/README.md b/jdbc/ydb-token-app/README.md
index d95f6bf..aac8fc9 100644
--- a/jdbc/ydb-token-app/README.md
+++ b/jdbc/ydb-token-app/README.md
@@ -74,3 +74,29 @@ The main parameters list:
All parameters can be passed directly when launching the application (in the format `--param_name=value`) or can be
preconfigured in an `application.properties` file saved next to the executable jar of the application.
+
+### Local observability infrastructure
+
+All Docker and observability-related configs are stored in `infra/` to keep the example sources clean:
+
+- `infra/compose-e2e.yaml`
+- `infra/application/application.properties`
+- `infra/otel/otel-collector-config.yaml`
+- `infra/prometheus/prometheus.yaml`
+- `infra/tempo/tempo.yaml`
+- `infra/grafana/...`
+- `infra/ydb/...`
+
+Run the full stack:
+
+```bash
+cd jdbc/ydb-token-app
+docker compose -f infra/compose-e2e.yaml up -d
+```
+
+Run one-shot app commands in the same stack:
+
+```bash
+cd jdbc/ydb-token-app
+docker compose -f infra/compose-e2e.yaml run --rm ydb-token-app clean init load run
+```
diff --git a/jdbc/ydb-token-app/infra/application/application.properties b/jdbc/ydb-token-app/infra/application/application.properties
new file mode 100644
index 0000000..341fd51
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/application/application.properties
@@ -0,0 +1,7 @@
+app.connection=grpc://ydb:2136/local
+app.threadsCount=32
+app.workload.duration=180
+app.rpsLimit=-1
+app.otel.enabled=true
+app.otel.endpoint=http://otel-collector:4317
+app.otel.serviceName=ydb-token-app
diff --git a/jdbc/ydb-token-app/infra/compose-e2e.yaml b/jdbc/ydb-token-app/infra/compose-e2e.yaml
new file mode 100644
index 0000000..977e6ed
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/compose-e2e.yaml
@@ -0,0 +1,76 @@
+services:
+ ydb-token-app:
+ build:
+ context: ../../..
+ dockerfile: jdbc/ydb-token-app/Dockerfile
+ depends_on:
+ ydb:
+ condition: service_healthy
+ otel-collector:
+ condition: service_started
+ command:
+ - "clean"
+ - "init"
+ - "load"
+ - "run"
+ volumes:
+ - ./application/application.properties:/app/config/application.properties:ro
+ restart: on-failure
+
+ ydb:
+ image: ydbplatform/local-ydb:trunk
+ platform: linux/amd64
+ command: [ "--config-path", "/ydb_config/ydb-config.yaml" ]
+ environment:
+ YDB_DEFAULT_LOG_LEVEL: NOTICE
+ GRPC_TLS_PORT: "2135"
+ GRPC_PORT: "2136"
+ MON_PORT: "8765"
+ YDB_USE_IN_MEMORY_PDISKS: "true"
+ ports:
+ - "2135:2135"
+ - "2136:2136"
+ - "8765:8765"
+ volumes:
+ - ./ydb:/ydb_config:ro
+
+ otel-collector:
+ image: otel/opentelemetry-collector-contrib:latest
+ command: [ "--config=/etc/otelcol/config.yaml" ]
+ depends_on: [ tempo ]
+ volumes:
+ - ./otel/otel-collector-config.yaml:/etc/otelcol/config.yaml:ro
+ ports:
+ - "4317:4317"
+ - "4318:4318"
+ - "9464:9464"
+ - "13133:13133"
+ - "55679:55679"
+
+ prometheus:
+ image: prom/prometheus:latest
+ volumes:
+ - ./prometheus/prometheus.yaml:/etc/prometheus/prometheus.yml:ro
+ ports:
+ - "9090:9090"
+ depends_on: [ otel-collector ]
+
+ tempo:
+ image: grafana/tempo:2.4.1
+ command: [ "-config.file=/etc/tempo.yaml" ]
+ volumes:
+ - ./tempo/tempo.yaml:/etc/tempo.yaml:ro
+ ports:
+ - "3200:3200"
+
+ grafana:
+ image: grafana/grafana:10.4.2
+ environment:
+ GF_AUTH_ANONYMOUS_ENABLED: "true"
+ GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin"
+ volumes:
+ - ./grafana/provisioning:/etc/grafana/provisioning:ro
+ - ./grafana/dashboards:/var/lib/grafana/dashboards:ro
+ ports:
+ - "3000:3000"
+ depends_on: [ prometheus, tempo ]
diff --git a/jdbc/ydb-token-app/infra/grafana/dashboards/README.md b/jdbc/ydb-token-app/infra/grafana/dashboards/README.md
new file mode 100644
index 0000000..9941fc8
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/grafana/dashboards/README.md
@@ -0,0 +1,3 @@
+This folder is intentionally left empty.
+
+Grafana is provisioned with Tempo + Prometheus datasources; use **Explore** to search traces.
diff --git a/jdbc/ydb-token-app/infra/grafana/provisioning/dashboards/dashboards.yaml b/jdbc/ydb-token-app/infra/grafana/provisioning/dashboards/dashboards.yaml
new file mode 100644
index 0000000..48b8582
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/grafana/provisioning/dashboards/dashboards.yaml
@@ -0,0 +1,11 @@
+apiVersion: 1
+
+providers:
+ - name: 'default'
+ orgId: 1
+ folder: ''
+ type: file
+ disableDeletion: true
+ editable: false
+ options:
+ path: /var/lib/grafana/dashboards
diff --git a/jdbc/ydb-token-app/infra/grafana/provisioning/datasources/datasources.yaml b/jdbc/ydb-token-app/infra/grafana/provisioning/datasources/datasources.yaml
new file mode 100644
index 0000000..1b0d125
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/grafana/provisioning/datasources/datasources.yaml
@@ -0,0 +1,20 @@
+apiVersion: 1
+
+datasources:
+ - name: Prometheus
+ type: prometheus
+ access: proxy
+ url: http://prometheus:9090
+ isDefault: true
+ editable: false
+
+ - name: Tempo
+ type: tempo
+ access: proxy
+ url: http://tempo:3200
+ editable: false
+ jsonData:
+ tracesToMetrics:
+ datasourceUid: Prometheus
+ serviceMap:
+ datasourceUid: Prometheus
diff --git a/jdbc/ydb-token-app/infra/otel/otel-collector-config.yaml b/jdbc/ydb-token-app/infra/otel/otel-collector-config.yaml
new file mode 100644
index 0000000..7f78444
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/otel/otel-collector-config.yaml
@@ -0,0 +1,44 @@
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+
+processors:
+ batch: { }
+
+exporters:
+ prometheus:
+ endpoint: 0.0.0.0:9464
+ resource_to_telemetry_conversion:
+ enabled: true
+
+ otlp/tempo:
+ endpoint: tempo:4317
+ tls:
+ insecure: true
+
+ debug:
+ verbosity: detailed
+
+extensions:
+ health_check:
+ endpoint: 0.0.0.0:13133
+
+ zpages:
+ endpoint: 0.0.0.0:55679
+
+service:
+ extensions: [ health_check, zpages ]
+ pipelines:
+ metrics:
+ receivers: [ otlp ]
+ processors: [ batch ]
+ exporters: [ prometheus ]
+
+ traces:
+ receivers: [ otlp ]
+ processors: [ batch ]
+ exporters: [ otlp/tempo, debug ]
diff --git a/jdbc/ydb-token-app/infra/prometheus/prometheus.yaml b/jdbc/ydb-token-app/infra/prometheus/prometheus.yaml
new file mode 100644
index 0000000..64b3182
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/prometheus/prometheus.yaml
@@ -0,0 +1,7 @@
+global:
+ scrape_interval: 5s
+
+scrape_configs:
+ - job_name: otel-collector
+ static_configs:
+ - targets: ["otel-collector:9464"]
diff --git a/jdbc/ydb-token-app/infra/tempo/tempo.yaml b/jdbc/ydb-token-app/infra/tempo/tempo.yaml
new file mode 100644
index 0000000..43dbb19
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/tempo/tempo.yaml
@@ -0,0 +1,15 @@
+server:
+ http_listen_port: 3200
+
+distributor:
+ receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+
+storage:
+ trace:
+ backend: local
+ local:
+ path: /tmp/tempo
diff --git a/jdbc/ydb-token-app/infra/ydb/README.md b/jdbc/ydb-token-app/infra/ydb/README.md
new file mode 100644
index 0000000..704ec33
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/ydb/README.md
@@ -0,0 +1,28 @@
+# YDB server-side tracing (OpenTelemetry)
+
+This folder is used to keep a **custom YDB config** that enables server-side OpenTelemetry tracing.
+
+## 1) Export the default config from a running container
+
+If YDB is running as `ydb-local`:
+
+```bash
+docker cp ydb-local:/ydb_data/cluster/kikimr_configs/config.yaml ./infra/ydb/ydb-config.yaml
+```
+
+## 2) Enable OpenTelemetry exporter in the config
+
+Edit `ydb-config.yaml` and add the contents of `otel-tracing-snippet.yaml` (usually as a top-level section).
+
+Default OTLP endpoint (inside docker-compose network): `grpc://otel-collector:4317`
+Default service name (so you can find it in Tempo/Grafana): `ydb`
+
+## 3) Run with the overridden config
+
+Restart YDB:
+
+```bash
+docker compose -f infra/compose-e2e.yaml up -d --force-recreate ydb
+```
+
+Now you should see additional server-side traces in Tempo/Grafana.
diff --git a/jdbc/ydb-token-app/infra/ydb/otel-tracing-snippet.yaml b/jdbc/ydb-token-app/infra/ydb/otel-tracing-snippet.yaml
new file mode 100644
index 0000000..bd5978d
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/ydb/otel-tracing-snippet.yaml
@@ -0,0 +1,26 @@
+tracing_config:
+ backend:
+ opentelemetry:
+ collector_url: grpc://otel-collector:4317
+ service_name: ydb
+ external_throttling:
+ - scope:
+ database: /local
+ max_traces_per_minute: 60
+ max_traces_burst: 3
+ # Highest tracing detail for *sampled* traces (YDB-generated trace-id).
+ # Note: requests with an external `traceparent` are traced at level 13 (Detailed) per YDB docs.
+ sampling:
+ - scope:
+ database: /local
+ fraction: 1
+ level: 15
+ max_traces_per_minute: 1000
+ max_traces_burst: 100
+ uploader:
+ max_exported_spans_per_second: 30
+ max_spans_in_batch: 100
+ max_bytes_in_batch: 10485760 # 10 MiB
+ max_export_requests_inflight: 3
+ max_batch_accumulation_milliseconds: 5000
+ span_export_timeout_seconds: 120
diff --git a/jdbc/ydb-token-app/infra/ydb/ydb-config.yaml b/jdbc/ydb-token-app/infra/ydb/ydb-config.yaml
new file mode 100644
index 0000000..a6ae5ab
--- /dev/null
+++ b/jdbc/ydb-token-app/infra/ydb/ydb-config.yaml
@@ -0,0 +1,354 @@
+actor_system_config:
+ batch_executor: 2
+ executor:
+ - name: System
+ spin_threshold: 0
+ threads: 2
+ type: BASIC
+ - name: User
+ spin_threshold: 0
+ threads: 3
+ type: BASIC
+ - name: Batch
+ spin_threshold: 0
+ threads: 2
+ type: BASIC
+ - name: IO
+ threads: 1
+ time_per_mailbox_micro_secs: 100
+ type: IO
+ - name: IC
+ spin_threshold: 10
+ threads: 1
+ time_per_mailbox_micro_secs: 100
+ type: BASIC
+ io_executor: 3
+ scheduler:
+ progress_threshold: 10000
+ resolution: 1024
+ spin_threshold: 0
+ service_executor:
+ - executor_id: 4
+ service_name: Interconnect
+ sys_executor: 0
+ user_executor: 1
+blob_storage_config:
+ service_set:
+ availability_domains: 1
+ groups:
+ - erasure_species: 0
+ group_generation: 1
+ group_id: 0
+ rings:
+ - fail_domains:
+ - vdisk_locations:
+ - node_id: 1
+ pdisk_guid: 1
+ pdisk_id: 1
+ vdisk_slot_id: 0
+ pdisks:
+ - node_id: 1
+ path: SectorMap:1:64
+ pdisk_category: 0
+ pdisk_guid: 1
+ pdisk_id: 1
+ vdisks:
+ - vdisk_id:
+ domain: 0
+ group_generation: 1
+ group_id: 0
+ ring: 0
+ vdisk: 0
+ vdisk_location:
+ node_id: 1
+ pdisk_guid: 1
+ pdisk_id: 1
+ vdisk_slot_id: 0
+channel_profile_config:
+ profile:
+ - channel:
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ profile_id: 0
+ - channel:
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ - erasure_species: none
+ pdisk_category: 0
+ storage_pool_kind: hdd
+ profile_id: 1
+domains_config:
+ domain:
+ - domain_id: 1
+ name: local
+ storage_pool_types:
+ - kind: hdd
+ pool_config:
+ box_id: 1
+ erasure_species: none
+ kind: hdd
+ pdisk_filter:
+ - property:
+ - type: ROT
+ vdisk_kind: Default
+ - kind: hdd1
+ pool_config:
+ box_id: 1
+ erasure_species: none
+ kind: hdd
+ pdisk_filter:
+ - property:
+ - type: ROT
+ vdisk_kind: Default
+ - kind: hdd2
+ pool_config:
+ box_id: 1
+ erasure_species: none
+ kind: hdd
+ pdisk_filter:
+ - property:
+ - type: ROT
+ vdisk_kind: Default
+ - kind: hdde
+ pool_config:
+ box_id: 1
+ encryption_mode: 1
+ erasure_species: none
+ kind: hdd
+ pdisk_filter:
+ - property:
+ - type: ROT
+ vdisk_kind: Default
+ security_config:
+ default_users:
+ - name: root
+ password: '1234'
+ state_storage:
+ - ring:
+ nto_select: 1
+ ring:
+ - node:
+ - 1
+ use_ring_specific_node_selection: true
+ ssid: 1
+feature_flags:
+ enable_drain_on_shutdown: false
+ enable_mvcc_snapshot_reads: true
+ enable_persistent_query_stats: true
+ enable_public_api_external_blobs: false
+ enable_scheme_transactions_at_scheme_shard: true
+federated_query_config:
+ audit:
+ enabled: false
+ uaconfig:
+ uri: ''
+ checkpoint_coordinator:
+ checkpointing_period_millis: 1000
+ enabled: true
+ max_inflight: 1
+ storage:
+ endpoint: ''
+ common:
+ ids_prefix: pt
+ use_bearer_for_ydb: true
+ control_plane_proxy:
+ enabled: true
+ request_timeout: 30s
+ control_plane_storage:
+ available_binding:
+ - DATA_STREAMS
+ - OBJECT_STORAGE
+ available_connection:
+ - YDB_DATABASE
+ - CLICKHOUSE_CLUSTER
+ - DATA_STREAMS
+ - OBJECT_STORAGE
+ - MONITORING
+ enabled: true
+ storage:
+ endpoint: ''
+ db_pool:
+ enabled: true
+ storage:
+ endpoint: ''
+ enabled: false
+ gateways:
+ dq:
+ default_settings: []
+ enabled: true
+ pq:
+ cluster_mapping: []
+ solomon:
+ cluster_mapping: []
+ nodes_manager:
+ enabled: true
+ pending_fetcher:
+ enabled: true
+ pinger:
+ ping_period: 30s
+ private_api:
+ enabled: true
+ private_proxy:
+ enabled: true
+ resource_manager:
+ enabled: true
+ token_accessor:
+ enabled: true
+grpc_config:
+ ca: /ydb_certs/ca.pem
+ cert: /ydb_certs/cert.pem
+ host: '[::]'
+ key: /ydb_certs/key.pem
+ services:
+ - legacy
+ - tablet_service
+ - yql
+ - discovery
+ - cms
+ - locking
+ - kesus
+ - pq
+ - pqcd
+ - pqv1
+ - topic
+ - datastreams
+ - scripting
+ - clickhouse_internal
+ - rate_limiter
+ - analytics
+ - export
+ - import
+ - yq
+ - keyvalue
+ - monitoring
+ - auth
+ - query_service
+ - view
+interconnect_config:
+ start_tcp: true
+kafka_proxy_config:
+ enable_kafka_proxy: true
+ listening_port: 9092
+kqpconfig:
+ settings:
+ - name: _ResultRowsLimit
+ value: '1000'
+ - name: _KqpYqlSyntaxVersion
+ value: '1'
+ - name: _KqpAllowNewEngine
+ value: 'true'
+log_config:
+ default_level: 5
+ entry: []
+ sys_log: false
+nameservice_config:
+ node:
+ - address: ::1
+ host: localhost
+ node_id: 1
+ port: 19001
+ walle_location:
+ body: 1
+ data_center: '1'
+ rack: '1'
+net_classifier_config:
+ cms_config_timeout_seconds: 30
+ net_data_file_path: /ydb_data/netData.tsv
+ updater_config:
+ net_data_update_interval_seconds: 60
+ retry_interval_seconds: 30
+pqcluster_discovery_config:
+ enabled: false
+pqconfig:
+ check_acl: false
+ cluster_table_path: ''
+ clusters_update_timeout_sec: 1
+ enable_proto_source_id_info: true
+ enabled: true
+ max_storage_node_port: 65535
+ meta_cache_timeout_sec: 1
+ quoting_config:
+ enable_quoting: false
+ require_credentials_in_new_protocol: false
+ root: ''
+ topics_are_first_class_citizen: true
+ version_table_path: ''
+sqs_config:
+ enable_dead_letter_queues: true
+ enable_sqs: false
+ force_queue_creation_v2: true
+ force_queue_deletion_v2: true
+ scheme_cache_hard_refresh_time_seconds: 0
+ scheme_cache_soft_refresh_time_seconds: 0
+static_erasure: none
+system_tablets:
+ default_node:
+ - 1
+ flat_schemeshard:
+ - info:
+ tablet_id: 72057594046678944
+ flat_tx_coordinator:
+ - node:
+ - 1
+ tx_allocator:
+ - node:
+ - 1
+ tx_mediator:
+ - node:
+ - 1
+table_service_config:
+ filter_pushdown_over_join_optional_side: false
+ resource_manager:
+ channel_buffer_size: 262144
+ mkql_heavy_program_memory_limit: 1048576
+ mkql_light_program_memory_limit: 65536
+ verbose_memory_limit_exception: true
+tracing_config:
+ backend:
+ opentelemetry:
+ collector_url: grpc://otel-collector:4317
+ service_name: ydb
+ external_throttling:
+ - scope:
+ database: /local
+ max_traces_per_minute: 60
+ max_traces_burst: 3
+ # Highest tracing detail for *sampled* traces (YDB-generated trace-id).
+ # Note: requests with an external `traceparent` are traced at level 13 (Detailed) per YDB docs.
+ sampling:
+ - scope:
+ database: /local
+ fraction: 1
+ level: 15
+ max_traces_per_minute: 1000
+ max_traces_burst: 100
+ uploader:
+ max_exported_spans_per_second: 30
+ max_spans_in_batch: 100
+ max_bytes_in_batch: 10485760 # 10 MiB
+ max_export_requests_inflight: 3
+ max_batch_accumulation_milliseconds: 5000
+ span_export_timeout_seconds: 120
diff --git a/jdbc/ydb-token-app/pom.xml b/jdbc/ydb-token-app/pom.xml
index 80ab123..ef609be 100644
--- a/jdbc/ydb-token-app/pom.xml
+++ b/jdbc/ydb-token-app/pom.xml
@@ -18,7 +18,9 @@
2.7.18
0.9.3
-
+ 1.58.0
+ 2.22.0-alpha
+ 2.2.20
tech.ydb.apps.Application
@@ -63,6 +65,29 @@
hibernate-ydb-dialect-v5
${ydb.hibernate.dialect.version}
+
+
+ io.opentelemetry
+ opentelemetry-api
+
+
+ io.opentelemetry
+ opentelemetry-sdk
+
+
+ io.opentelemetry
+ opentelemetry-exporter-otlp
+
+
+ io.opentelemetry.instrumentation
+ opentelemetry-grpc-1.6
+ ${opentelemetry.instrumentation.version}
+
+
+ org.jetbrains.kotlin
+ kotlin-stdlib
+ ${kotlin.version}
+
@@ -96,6 +121,13 @@
import
+
+ io.opentelemetry
+ opentelemetry-bom
+ ${opentelemetry.version}
+ pom
+ import
+
org.springframework.retry
spring-retry
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/AppMetrics.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/AppMetrics.java
index 43282e7..87578f6 100644
--- a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/AppMetrics.java
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/AppMetrics.java
@@ -1,10 +1,26 @@
package tech.ydb.apps;
+import io.micrometer.core.instrument.Counter;
+import io.micrometer.core.instrument.MeterRegistry;
+import io.micrometer.core.instrument.Timer;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.trace.Span;
+import io.opentelemetry.api.trace.SpanKind;
+import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.context.Scope;
+import org.slf4j.Logger;
+import org.springframework.retry.RetryCallback;
+import org.springframework.retry.RetryContext;
+import org.springframework.retry.RetryListener;
+import tech.ydb.core.StatusCode;
+import tech.ydb.jdbc.exception.YdbStatusable;
+
import java.sql.SQLException;
import java.time.Duration;
import java.util.Arrays;
import java.util.EnumMap;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
@@ -13,17 +29,6 @@
import java.util.concurrent.atomic.LongAdder;
import java.util.function.Function;
-import io.micrometer.core.instrument.Counter;
-import io.micrometer.core.instrument.MeterRegistry;
-import io.micrometer.core.instrument.Timer;
-import org.slf4j.Logger;
-import org.springframework.retry.RetryCallback;
-import org.springframework.retry.RetryContext;
-import org.springframework.retry.RetryListener;
-
-import tech.ydb.core.StatusCode;
-import tech.ydb.jdbc.exception.YdbStatusable;
-
/**
*
* @author Aleksandr Gorshenin
@@ -44,6 +49,7 @@ public class AppMetrics {
public class Method {
private final String name;
+ private final String spanName;
private final LongAdder totalCount = new LongAdder();
private final LongAdder totalTimeMs = new LongAdder();
@@ -62,8 +68,9 @@ public class Method {
private volatile long lastPrinted = 0;
- public Method(MeterRegistry registry, String name, String label) {
+ public Method(MeterRegistry registry, String name, String label, String spanName) {
this.name = name;
+ this.spanName = spanName;
this.executionsCounter = SDK_OPERATIONS.tag("operation_type", label).register(registry);
this.successCounter = SDK_OPERATIONS_SUCCESS.tag("operation_type", label).register(registry);
this.errorCounter = code -> errorsCountersMap.computeIfAbsent(code, key -> SDK_OPERATIONS_FAILTURE
@@ -88,16 +95,23 @@ public void measure(Runnable run) {
executionsCounter.increment();
+ Span span = tracer.spanBuilder(Objects.requireNonNull(spanName, "spanName"))
+ .setSpanKind(SpanKind.CLIENT)
+ .startSpan();
StatusCode code = StatusCode.SUCCESS;
long startedAt = System.currentTimeMillis();
+ Scope scope = span.makeCurrent();
try {
run.run();
successCounter.increment();
} catch (RuntimeException ex) {
code = extractStatusCode(ex);
errorCounter.apply(code).increment();
+ span.recordException(ex);
+ span.setStatus(io.opentelemetry.api.trace.StatusCode.ERROR);
throw ex;
} finally {
+ scope.close();
LOCAL.remove();
long ms = System.currentTimeMillis() - startedAt;
@@ -107,6 +121,7 @@ public void measure(Runnable run) {
totalTimeMs.add(ms);
durationTimer.apply(code).record(Duration.ofMillis(ms));
+ span.end();
}
}
@@ -147,6 +162,7 @@ private void printTotal(Logger logger) {
private final Method fetch;
private final Method update;
private final Method batchUpdate;
+ private final Tracer tracer;
private final AtomicInteger executionsCount = new AtomicInteger(0);
private final AtomicInteger failturesCount = new AtomicInteger(0);
@@ -156,12 +172,13 @@ private void printTotal(Logger logger) {
r -> new Thread(r, "ticker")
);
- public AppMetrics(Logger logger, MeterRegistry meterRegistry) {
+ public AppMetrics(Logger logger, MeterRegistry meterRegistry, OpenTelemetry openTelemetry) {
this.logger = logger;
- this.load = new Method(meterRegistry, "LOAD ", "load");
- this.fetch = new Method(meterRegistry, "FETCH ", "read");
- this.update = new Method(meterRegistry, "UPDATE", "update");
- this.batchUpdate = new Method(meterRegistry, "BULK_UP", "batch_update");
+ this.tracer = openTelemetry.getTracer("tech.ydb.apps.business");
+ this.load = new Method(meterRegistry, "LOAD ", "load", "business.load_batch");
+ this.fetch = new Method(meterRegistry, "FETCH ", "read", "business.fetch");
+ this.update = new Method(meterRegistry, "UPDATE", "update", "business.update");
+ this.batchUpdate = new Method(meterRegistry, "BULK_UP", "batch_update", "business.batch_update");
}
public Method getLoad() {
@@ -187,9 +204,12 @@ public void incrementFaiture() {
public void runWithMonitor(Runnable runnable) {
Arrays.asList(load, fetch, update, batchUpdate).forEach(Method::reset);
final ScheduledFuture> future = scheduler.scheduleAtFixedRate(this::print, 1, 10, TimeUnit.SECONDS);
- runnable.run();
- future.cancel(false);
- print();
+ try {
+ runnable.run();
+ } finally {
+ future.cancel(false);
+ print();
+ }
}
public void close() throws InterruptedException {
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Application.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Application.java
index a6f1373..4d8ddba 100644
--- a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Application.java
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Application.java
@@ -1,21 +1,7 @@
package tech.ydb.apps;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import javax.annotation.PreDestroy;
-
import io.micrometer.core.instrument.MeterRegistry;
+import io.opentelemetry.api.OpenTelemetry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.CommandLineRunner;
@@ -25,11 +11,24 @@
import org.springframework.context.annotation.Bean;
import org.springframework.retry.RetryListener;
import org.springframework.retry.annotation.EnableRetry;
-
import tech.ydb.apps.service.SchemeService;
import tech.ydb.apps.service.WorkloadService;
import tech.ydb.jdbc.YdbTracer;
+import javax.annotation.PreDestroy;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
/**
*
* @author Aleksandr Gorshenin
@@ -59,13 +58,20 @@ public static void main(String[] args) {
private final AtomicLong logCounter = new AtomicLong(0);
private volatile boolean isStopped = false;
- public Application(Config config, SchemeService scheme, WorkloadService worload, MeterRegistry registry) {
+ public Application(
+ Config config,
+ SchemeService scheme,
+ WorkloadService worload,
+ MeterRegistry registry,
+ OpenTelemetry openTelemetry
+ ) {
GrpcMetrics.init(registry);
+ GrpcTracing.init(openTelemetry);
this.config = config;
this.schemeService = scheme;
this.workloadService = worload;
- this.ticker = new AppMetrics(logger, registry);
+ this.ticker = new AppMetrics(logger, registry, openTelemetry);
logger.info("Create fixed thread pool with size {}", config.getThreadCount());
this.executor = Executors.newFixedThreadPool(config.getThreadCount(), this::threadFactory);
@@ -140,7 +146,7 @@ private void loadData() {
while (id < recordsCount) {
final int first = id;
id += batchSize;
- final int last = id < recordsCount ? id : recordsCount;
+ final int last = Math.min(id, recordsCount);
futures.add(CompletableFuture.runAsync(() -> {
if (isStopped) {
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Config.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Config.java
index c3f5aa3..7b3ab29 100644
--- a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Config.java
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/Config.java
@@ -1,9 +1,8 @@
package tech.ydb.apps;
-
-import io.micrometer.core.instrument.MeterRegistry;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.ConstructorBinding;
+import org.springframework.boot.context.properties.bind.DefaultValue;
import org.springframework.boot.context.properties.bind.Name;
/**
@@ -20,15 +19,26 @@ public class Config {
private final int loadBatchSize;
private final int workloadDurationSec;
private final int rpsLimit;
+ private final boolean otelEnabled;
+ private final String otelEndpoint;
+ private final String otelServiceName;
- public Config(String connection, int threadsCount, int recordsCount, @Name("load.batchSize") int loadBatchSize,
- @Name("workload.duration") int workloadDuration, int rpsLimit, MeterRegistry registy) {
+ public Config(
+ String connection, int threadsCount, int recordsCount,
+ @Name("load.batchSize") int loadBatchSize,
+ @Name("workload.duration") int workloadDuration, int rpsLimit,
+ @Name("otel.enabled") @DefaultValue("false") boolean otelEnabled,
+ @Name("otel.endpoint") @DefaultValue("http://otel-collector:4317") String otelEndpoint,
+ @Name("otel.serviceName") @DefaultValue("ydb-token-app") String otelServiceName) {
this.connection = connection;
this.threadsCount = threadsCount <= 0 ? Runtime.getRuntime().availableProcessors() : threadsCount;
this.recordsCount = recordsCount;
this.loadBatchSize = loadBatchSize;
this.workloadDurationSec = workloadDuration;
this.rpsLimit = rpsLimit;
+ this.otelEnabled = otelEnabled;
+ this.otelEndpoint = otelEndpoint;
+ this.otelServiceName = otelServiceName;
}
public String getConnection() {
@@ -54,4 +64,16 @@ public int getWorkloadDurationSec() {
public RateLimiter getRpsLimiter() {
return rpsLimit <= 0 ? RateLimiter.noLimit() : RateLimiter.withRps(rpsLimit);
}
+
+ public boolean isOtelEnabled() {
+ return otelEnabled;
+ }
+
+ public String getOtelEndpoint() {
+ return otelEndpoint;
+ }
+
+ public String getOtelServiceName() {
+ return otelServiceName;
+ }
}
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcObservability.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcObservability.java
new file mode 100644
index 0000000..57b7b3f
--- /dev/null
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcObservability.java
@@ -0,0 +1,16 @@
+package tech.ydb.apps;
+
+import java.util.function.Consumer;
+
+import io.grpc.ManagedChannelBuilder;
+
+/**
+ * Adds both metrics and tracing interceptors to YDB gRPC channels.
+ */
+public class GrpcObservability implements Consumer> {
+ @Override
+ public void accept(ManagedChannelBuilder> builder) {
+ new GrpcMetrics().accept(builder);
+ new GrpcTracing().accept(builder);
+ }
+}
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcTracing.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcTracing.java
new file mode 100644
index 0000000..6226664
--- /dev/null
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/GrpcTracing.java
@@ -0,0 +1,23 @@
+package tech.ydb.apps;
+
+import io.grpc.ManagedChannelBuilder;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTelemetry;
+
+import java.util.function.Consumer;
+
+/**
+ * gRPC client tracing based on official OpenTelemetry gRPC interceptor.
+ */
+public class GrpcTracing implements Consumer> {
+ private static volatile OpenTelemetry openTelemetry = OpenTelemetry.noop();
+
+ public static void init(OpenTelemetry telemetry) {
+ openTelemetry = telemetry;
+ }
+
+ @Override
+ public void accept(ManagedChannelBuilder> builder) {
+ builder.intercept(GrpcTelemetry.create(openTelemetry).newClientInterceptor());
+ }
+}
diff --git a/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/OpenTelemetryConfiguration.java b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/OpenTelemetryConfiguration.java
new file mode 100644
index 0000000..d6647e3
--- /dev/null
+++ b/jdbc/ydb-token-app/src/main/java/tech/ydb/apps/OpenTelemetryConfiguration.java
@@ -0,0 +1,46 @@
+package tech.ydb.apps;
+
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator;
+import io.opentelemetry.context.propagation.ContextPropagators;
+import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.resources.Resource;
+import io.opentelemetry.sdk.trace.SdkTracerProvider;
+import io.opentelemetry.sdk.trace.SdkTracerProviderBuilder;
+import io.opentelemetry.sdk.trace.samplers.Sampler;
+import io.opentelemetry.sdk.trace.export.BatchSpanProcessor;
+import io.opentelemetry.sdk.trace.export.SpanExporter;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class OpenTelemetryConfiguration {
+
+ @Bean(destroyMethod = "close")
+ public OpenTelemetry openTelemetry(Config config) {
+ Resource resource = Resource.getDefault().merge(Resource.builder()
+ .put(AttributeKey.stringKey("service.name"), config.getOtelServiceName())
+ .build());
+
+ SdkTracerProviderBuilder tracerProviderBuilder = SdkTracerProvider.builder()
+ .setResource(resource);
+
+ if (config.isOtelEnabled()) {
+ SpanExporter exporter = OtlpGrpcSpanExporter.builder()
+ .setEndpoint(config.getOtelEndpoint())
+ .build();
+ tracerProviderBuilder.addSpanProcessor(BatchSpanProcessor.builder(exporter).build());
+ } else {
+ tracerProviderBuilder.setSampler(Sampler.alwaysOff());
+ }
+
+ SdkTracerProvider tracerProvider = tracerProviderBuilder.build();
+
+ return OpenTelemetrySdk.builder()
+ .setTracerProvider(tracerProvider)
+ .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance()))
+ .build();
+ }
+}
diff --git a/jdbc/ydb-token-app/src/main/resources/application.properties b/jdbc/ydb-token-app/src/main/resources/application.properties
index 1cc894b..c7cabd7 100644
--- a/jdbc/ydb-token-app/src/main/resources/application.properties
+++ b/jdbc/ydb-token-app/src/main/resources/application.properties
@@ -13,7 +13,9 @@ spring.datasource.driver-class-name=tech.ydb.jdbc.YdbDriver
spring.datasource.hikari.maximum-pool-size=200
spring.datasource.hikari.data-source-properties.enableTxTracer=true
-spring.datasource.hikari.data-source-properties.channelInitializer=tech.ydb.apps.GrpcMetrics
+spring.datasource.hikari.data-source-properties.channelInitializer=tech.ydb.apps.GrpcObservability
+
+app.otel.enabled=false
spring.jpa.properties.hibernate.jdbc.batch_size=1000
spring.jpa.properties.hibernate.order_updates=true