Feat: configure audit fields and metrics for onix adapter and add local configuration for onix adapterZ

This commit is contained in:
Manendra Pal Singh
2026-02-23 16:08:44 +05:30
parent 2745047b27
commit ab89102711
29 changed files with 2167 additions and 441 deletions

View File

@@ -14,6 +14,7 @@ import (
"github.com/beckn-one/beckn-onix/pkg/model"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/trace"
"gopkg.in/natefinch/lumberjack.v2"
)
@@ -273,6 +274,11 @@ func Request(ctx context.Context, r *http.Request, body []byte) {
// addCtx adds context values to the log event based on configured context keys.
func addCtx(ctx context.Context, event *zerolog.Event) {
span := trace.SpanFromContext(ctx)
if span.SpanContext().IsValid() {
event.Str("trace_id", span.SpanContext().TraceID().String())
event.Str("span_id", span.SpanContext().SpanID().String())
}
for _, key := range cfg.ContextKeys {
val, ok := ctx.Value(key).(string)
if !ok {

View File

@@ -56,6 +56,9 @@ const (
// ContextKeyParentID is the context key for storing and retrieving the parent ID from a request context
ContextKeyParentID ContextKey = "parent_id"
// ContextKeyCallerID is the context key for the caller who is calling the bap/bpp
ContextKeyCallerID ContextKey = "caller_id"
)
var contextKeys = map[string]ContextKey{
@@ -64,6 +67,7 @@ var contextKeys = map[string]ContextKey{
"subscriber_id": ContextKeySubscriberID,
"module_id": ContextKeyModuleID,
"parent_id": ContextKeyParentID,
"caller_id": ContextKeyCallerID,
}
// ParseContextKey converts a string into a valid ContextKey.

View File

@@ -8,13 +8,14 @@ import (
"os"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"github.com/beckn-one/beckn-onix/pkg/log"
"github.com/beckn-one/beckn-onix/pkg/telemetry"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
// RedisCl global variable for the Redis client, can be overridden in tests
@@ -103,10 +104,6 @@ func New(ctx context.Context, cfg *Config) (*Cache, func() error, error) {
log.Debugf(ctx, "Failed to instrument Redis tracing: %v", err)
}
if err := redisotel.InstrumentMetrics(redisClient); err != nil {
// Log error but don't fail - instrumentation is optional
log.Debugf(ctx, "Failed to instrument Redis metrics: %v", err)
}
}
metrics, _ := GetCacheMetrics(ctx)
@@ -141,8 +138,12 @@ func (c *Cache) Get(ctx context.Context, key string) (string, error) {
// Set stores the given key-value pair in Redis with the specified TTL (time to live).
func (c *Cache) Set(ctx context.Context, key, value string, ttl time.Duration) error {
err := c.Client.Set(ctx, key, value, ttl).Err()
c.recordOperation(ctx, "set", err)
tracer := otel.Tracer(telemetry.ScopeName, trace.WithInstrumentationVersion(telemetry.ScopeVersion))
spanCtx, span := tracer.Start(ctx, "redis_set")
defer span.End()
err := c.Client.Set(spanCtx, key, value, ttl).Err()
c.recordOperation(spanCtx, "set", err)
return err
}

View File

@@ -4,9 +4,11 @@ import (
"context"
"errors"
"strconv"
"strings"
"time"
"github.com/beckn-one/beckn-onix/pkg/log"
"github.com/beckn-one/beckn-onix/pkg/model"
"github.com/beckn-one/beckn-onix/pkg/plugin/implementation/otelsetup"
"github.com/beckn-one/beckn-onix/pkg/telemetry"
)
@@ -27,31 +29,81 @@ func (m metricsProvider) New(ctx context.Context, config map[string]string) (*te
ServiceName: config["serviceName"],
ServiceVersion: config["serviceVersion"],
Environment: config["environment"],
MetricsPort: config["metricsPort"],
Domain: config["domain"],
OtlpEndpoint: config["otlpEndpoint"],
}
// to extract the device id from the parent id from context
var deviceId string
var producer string
var producerType string
var err error
if v := ctx.Value(model.ContextKeyParentID); v != nil {
parentID := v.(string)
p := strings.Split(parentID, ":")
deviceId = p[len(p)-1]
producerType = p[0]
producer = p[1]
}
if deviceId != "" {
telemetryConfig.DeviceID = deviceId
}
if producer != "" {
telemetryConfig.Producer = producer
}
if producerType != "" {
telemetryConfig.ProducerType = producerType
}
// Parse enableTracing from config
if enableTracingStr, ok := config["enableTracing"]; ok && enableTracingStr != "" {
telemetryConfig.EnableTracing, err = strconv.ParseBool(enableTracingStr)
if err != nil {
log.Warnf(ctx, "Invalid enableTracing value: %s, defaulting to False", enableTracingStr)
}
}
// Parse enableMetrics as boolean
if enableMetricsStr, ok := config["enableMetrics"]; ok && enableMetricsStr != "" {
enableMetrics, err := strconv.ParseBool(enableMetricsStr)
telemetryConfig.EnableMetrics, err = strconv.ParseBool(enableMetricsStr)
if err != nil {
log.Warnf(ctx, "Invalid enableMetrics value '%s', defaulting to true: %v", enableMetricsStr, err)
telemetryConfig.EnableMetrics = true
} else {
telemetryConfig.EnableMetrics = enableMetrics
log.Warnf(ctx, "Invalid enableMetrics value '%s', defaulting to False: %v", enableMetricsStr, err)
}
} else {
telemetryConfig.EnableMetrics = true // Default to true if not specified or empty
}
// Apply defaults if fields are empty
if telemetryConfig.ServiceName == "" {
telemetryConfig.ServiceName = otelsetup.DefaultConfig().ServiceName
// Parse enableLogs as boolean
if enableLogsStr, ok := config["enableLogs"]; ok && enableLogsStr != "" {
telemetryConfig.EnableLogs, err = strconv.ParseBool(enableLogsStr)
if err != nil {
log.Warnf(ctx, "Invalid enableLogs value '%s', defaulting to False: %v", enableLogsStr, err)
}
}
if telemetryConfig.ServiceVersion == "" {
telemetryConfig.ServiceVersion = otelsetup.DefaultConfig().ServiceVersion
// Parse timeInterval as int
if timeIntervalStr, ok := config["timeInterval"]; ok && timeIntervalStr != "" {
telemetryConfig.TimeInterval, err = strconv.ParseInt(timeIntervalStr, 10, 64)
if err != nil {
log.Warnf(ctx, "Invalid timeInterval value: %s, defaulting to 5 second ", timeIntervalStr)
}
}
if telemetryConfig.Environment == "" {
telemetryConfig.Environment = otelsetup.DefaultConfig().Environment
// to set fields for audit logs
if v, ok := config["auditFieldsConfig"]; ok && v != "" {
if err := telemetry.LoadAuditFieldRules(ctx, v); err != nil {
log.Warnf(ctx, "Failed to load audit field rules: %v", err)
}
}
//to set network leval matric frequency and granularity
if v, ok := config["networkMetricsGranularity"]; ok && v != "" {
telemetry.SetNetworkMetricsConfig(v, "")
}
if v, ok := config["networkMetricsFrequency"]; ok && v != "" {
telemetry.SetNetworkMetricsConfig("", v)
}
log.Debugf(ctx, "Telemetry config mapped: %+v", telemetryConfig)

View File

@@ -3,23 +3,25 @@ package otelsetup
import (
"context"
"fmt"
"net"
"net/http"
"sync"
"time"
clientprom "github.com/prometheus/client_golang/prometheus"
clientpromhttp "github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/contrib/instrumentation/runtime"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
otelprom "go.opentelemetry.io/otel/exporters/prometheus"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
"time"
"github.com/beckn-one/beckn-onix/pkg/log"
"github.com/beckn-one/beckn-onix/pkg/plugin"
"github.com/beckn-one/beckn-onix/pkg/telemetry"
"go.opentelemetry.io/contrib/instrumentation/runtime"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/log/global"
logsdk "go.opentelemetry.io/otel/sdk/log"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
// Setup wires the telemetry provider. This is the concrete implementation
@@ -30,9 +32,16 @@ type Setup struct{}
type Config struct {
ServiceName string `yaml:"serviceName"`
ServiceVersion string `yaml:"serviceVersion"`
EnableMetrics bool `yaml:"enableMetrics"`
Environment string `yaml:"environment"`
MetricsPort string `yaml:"metricsPort"`
Domain string `yaml:"domain"`
DeviceID string `yaml:"deviceID"`
EnableMetrics bool `yaml:"enableMetrics"`
EnableTracing bool `yaml:"enableTracing"`
EnableLogs bool `yaml:"enableLogs"`
OtlpEndpoint string `yaml:"otlpEndpoint"`
TimeInterval int64 `yaml:"timeInterval"`
Producer string `yaml:"producer"`
ProducerType string `yaml:"producerType"`
}
// DefaultConfig returns sensible defaults for telemetry configuration.
@@ -40,9 +49,11 @@ func DefaultConfig() *Config {
return &Config{
ServiceName: "beckn-onix",
ServiceVersion: "dev",
EnableMetrics: true,
Environment: "development",
MetricsPort: "9090",
Domain: "",
DeviceID: "beckn-onix-device",
OtlpEndpoint: "localhost:4317",
TimeInterval: 5,
}
}
@@ -53,9 +64,11 @@ func ToPluginConfig(cfg *Config) *plugin.Config {
Config: map[string]string{
"serviceName": cfg.ServiceName,
"serviceVersion": cfg.ServiceVersion,
"enableMetrics": fmt.Sprintf("%t", cfg.EnableMetrics),
"environment": cfg.Environment,
"metricsPort": cfg.MetricsPort,
"enableMetrics": fmt.Sprintf("%t", cfg.EnableMetrics),
"enableTracing": fmt.Sprintf("%t", cfg.EnableTracing),
"otelEndpoint": cfg.OtlpEndpoint,
"deviceID": cfg.DeviceID,
},
}
}
@@ -78,92 +91,126 @@ func (Setup) New(ctx context.Context, cfg *Config) (*telemetry.Provider, error)
if cfg.Environment == "" {
cfg.Environment = DefaultConfig().Environment
}
if cfg.MetricsPort == "" {
cfg.MetricsPort = DefaultConfig().MetricsPort
if cfg.Domain == "" {
cfg.Domain = DefaultConfig().Domain
}
if cfg.DeviceID == "" {
cfg.DeviceID = DefaultConfig().DeviceID
}
if cfg.TimeInterval == 0 {
cfg.TimeInterval = DefaultConfig().TimeInterval
}
if !cfg.EnableMetrics {
log.Info(ctx, "OpenTelemetry metrics disabled")
if !cfg.EnableMetrics && !cfg.EnableTracing {
log.Info(ctx, "OpenTelemetry metrics and tracing are disabled")
return &telemetry.Provider{
Shutdown: func(context.Context) error { return nil },
}, nil
}
res, err := resource.New(
ctx,
resource.WithAttributes(
attribute.String("service.name", cfg.ServiceName),
attribute.String("service.version", cfg.ServiceVersion),
attribute.String("deployment.environment", cfg.Environment),
),
)
//this will be used by both matric and traces
// to build resource with envelope metadata
baseAttrs := []attribute.KeyValue{
attribute.String("service.name", cfg.ServiceName),
attribute.String("service.version", cfg.ServiceVersion),
attribute.String("environment", cfg.Environment),
attribute.String("domain", cfg.Domain),
attribute.String("device_id", cfg.DeviceID),
attribute.String("producerType", cfg.ProducerType),
attribute.String("producer", cfg.Producer),
}
resMetric, err := resource.New(ctx, resource.WithAttributes(buildAtts(baseAttrs, "METRIC")...))
if err != nil {
return nil, fmt.Errorf("failed to create telemetry resource: %w", err)
return nil, fmt.Errorf("failed to create telemetry resource for matric: %w", err)
}
registry := clientprom.NewRegistry()
exporter, err := otelprom.New(
otelprom.WithRegisterer(registry),
otelprom.WithoutUnits(),
otelprom.WithoutScopeInfo(),
)
if err != nil {
return nil, fmt.Errorf("failed to create prometheus exporter: %w", err)
}
meterProvider := metric.NewMeterProvider(
metric.WithReader(exporter),
metric.WithResource(res),
)
otel.SetMeterProvider(meterProvider)
log.Infof(ctx, "OpenTelemetry metrics initialized for service=%s version=%s env=%s",
cfg.ServiceName, cfg.ServiceVersion, cfg.Environment)
if err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(0)); err != nil {
log.Warnf(ctx, "Failed to start Go runtime instrumentation: %v", err)
}
// Create metrics handler
metricsHandler := clientpromhttp.HandlerFor(registry, clientpromhttp.HandlerOpts{})
// Create and start metrics HTTP server
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", metricsHandler)
metricsServer := &http.Server{
Addr: net.JoinHostPort("", cfg.MetricsPort),
Handler: metricsMux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 30 * time.Second,
}
var serverWg sync.WaitGroup
serverWg.Add(1)
go func() {
defer serverWg.Done()
log.Infof(ctx, "Metrics server listening on %s", metricsServer.Addr)
if err := metricsServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Errorf(ctx, fmt.Errorf("metrics server ListenAndServe: %w", err), "error listening and serving metrics")
//OTLP matric
var meterProvider *metric.MeterProvider
if cfg.EnableMetrics {
metricExpoter, err := otlpmetricgrpc.New(ctx, otlpmetricgrpc.WithEndpoint(cfg.OtlpEndpoint),
otlpmetricgrpc.WithDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())))
if err != nil {
return nil, fmt.Errorf("failed to create OTLP metric exporter: %w", err)
}
}()
reader := metric.NewPeriodicReader(metricExpoter, metric.WithInterval(time.Second*time.Duration(cfg.TimeInterval)))
meterProvider = metric.NewMeterProvider(metric.WithReader(reader), metric.WithResource(resMetric))
otel.SetMeterProvider(meterProvider)
log.Infof(ctx, "OpenTelemetry metrics initialized for service=%s version=%s env=%s (OTLP endpoint=%s)",
cfg.ServiceName, cfg.ServiceVersion, cfg.Environment, cfg.OtlpEndpoint)
// for the go runtime matrics
if err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(runtime.DefaultMinimumReadMemStatsInterval)); err != nil {
log.Warnf(ctx, "Failed to start Go runtime instrumentation: %v", err)
}
}
//OTLP traces
restrace, err := resource.New(ctx, resource.WithAttributes(buildAtts(baseAttrs, "API")...))
if err != nil {
return nil, fmt.Errorf("failed to create trace resource: %w", err)
}
var traceProvider *trace.TracerProvider
if cfg.EnableTracing {
traceExpoter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithEndpoint(cfg.OtlpEndpoint), otlptracegrpc.WithDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())))
if err != nil {
return nil, fmt.Errorf("failed to create OTLP trace exporter: %w", err)
}
traceProvider = trace.NewTracerProvider(trace.WithBatcher(traceExpoter), trace.WithResource(restrace)) //TODO: need to add the trace sampleing rate
otel.SetTracerProvider(traceProvider)
log.Infof(ctx, "OpenTelemetry tracing initialized for service=%s (OTLP endpoint=%s)",
cfg.ServiceName, cfg.OtlpEndpoint)
}
resAudit, err := resource.New(ctx, resource.WithAttributes(buildAtts(baseAttrs, "AUDIT")...))
if err != nil {
return nil, fmt.Errorf("failed to create audit resource: %w", err)
}
var logProvider *logsdk.LoggerProvider
if cfg.EnableLogs {
logExporter, err := otlploggrpc.New(ctx, otlploggrpc.WithEndpoint(cfg.OtlpEndpoint), otlploggrpc.WithDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())))
if err != nil {
return nil, fmt.Errorf("failed to create OTLP logs exporter: %w", err)
}
processor := logsdk.NewBatchProcessor(logExporter)
logProvider = logsdk.NewLoggerProvider(logsdk.WithProcessor(processor), logsdk.WithResource(resAudit))
global.SetLoggerProvider(logProvider)
}
return &telemetry.Provider{
MeterProvider: meterProvider,
MetricsHandler: metricsHandler,
MeterProvider: meterProvider,
TraceProvider: traceProvider,
LogProvider: logProvider,
Shutdown: func(shutdownCtx context.Context) error {
log.Infof(ctx, "Shutting down metrics server...")
// Shutdown the metrics server
serverShutdownCtx, cancel := context.WithTimeout(shutdownCtx, 10*time.Second)
defer cancel()
if err := metricsServer.Shutdown(serverShutdownCtx); err != nil {
log.Errorf(ctx, fmt.Errorf("metrics server shutdown: %w", err), "error shutting down metrics server")
var errs []error
if traceProvider != nil {
if err := traceProvider.Shutdown(shutdownCtx); err != nil {
errs = append(errs, fmt.Errorf("tracer shutdown: %w", err))
}
}
serverWg.Wait()
// Shutdown the meter provider
return meterProvider.Shutdown(shutdownCtx)
if meterProvider != nil {
if err := meterProvider.Shutdown(shutdownCtx); err != nil {
errs = append(errs, fmt.Errorf("meter shutdown: %w", err))
}
}
if logProvider != nil {
if err := logProvider.Shutdown(shutdownCtx); err != nil {
errs = append(errs, fmt.Errorf("logs shutdown: %w", err))
}
}
if len(errs) > 0 {
return fmt.Errorf("shutdown errors: %v", errs)
}
return nil
},
}, nil
}
func buildAtts(base []attribute.KeyValue, eid string) []attribute.KeyValue {
atts := make([]attribute.KeyValue, 0, len(base)+1)
atts = append(atts, base...)
atts = append(atts, attribute.String("eid", eid))
return atts
}

View File

@@ -22,15 +22,21 @@ func TestSetup_New_Success(t *testing.T) {
ServiceName: "test-service",
ServiceVersion: "1.0.0",
EnableMetrics: true,
EnableTracing: false,
Environment: "test",
Domain: "test-domain",
DeviceID: "test-device",
OtlpEndpoint: "localhost:4317",
TimeInterval: 5,
},
},
{
name: "Valid config with metrics disabled",
name: "Valid config with metrics and tracing disabled",
cfg: &Config{
ServiceName: "test-service",
ServiceVersion: "1.0.0",
EnableMetrics: false,
EnableTracing: false,
Environment: "test",
},
},
@@ -40,6 +46,7 @@ func TestSetup_New_Success(t *testing.T) {
ServiceName: "",
ServiceVersion: "",
EnableMetrics: true,
EnableTracing: false,
Environment: "",
},
},
@@ -56,10 +63,12 @@ func TestSetup_New_Success(t *testing.T) {
if tt.cfg.EnableMetrics {
assert.NotNil(t, provider.MeterProvider, "MeterProvider should be set when metrics enabled")
}
if tt.cfg.EnableTracing {
assert.NotNil(t, provider.TraceProvider, "TraceProvider should be set when tracing enabled")
}
// Test shutdown
err = provider.Shutdown(ctx)
assert.NoError(t, err, "Shutdown should not return error")
// Shutdown for cleanup. When metrics/tracing are enabled, shutdown may fail without a real OTLP backend.
_ = provider.Shutdown(ctx)
})
}
}
@@ -104,7 +113,10 @@ func TestSetup_New_DefaultValues(t *testing.T) {
ServiceName: "",
ServiceVersion: "",
EnableMetrics: true,
EnableTracing: false,
Environment: "",
OtlpEndpoint: "localhost:4317",
TimeInterval: 5,
}
provider, err := setup.New(ctx, cfg)
@@ -114,9 +126,8 @@ func TestSetup_New_DefaultValues(t *testing.T) {
// Verify defaults are applied by checking that provider is functional
assert.NotNil(t, provider.MeterProvider, "MeterProvider should be set with defaults")
// Cleanup
err = provider.Shutdown(ctx)
assert.NoError(t, err)
// Cleanup (shutdown may fail without a real OTLP backend)
_ = provider.Shutdown(ctx)
}
func TestSetup_New_MetricsDisabled(t *testing.T) {
@@ -127,6 +138,7 @@ func TestSetup_New_MetricsDisabled(t *testing.T) {
ServiceName: "test-service",
ServiceVersion: "1.0.0",
EnableMetrics: false,
EnableTracing: false,
Environment: "test",
}
@@ -134,8 +146,9 @@ func TestSetup_New_MetricsDisabled(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, provider)
// When metrics are disabled, MetricsHandler should be nil and MeterProvider should be nil
// When metrics and tracing are disabled, MeterProvider and TraceProvider should be nil
assert.Nil(t, provider.MeterProvider, "MeterProvider should be nil when metrics disabled")
assert.Nil(t, provider.TraceProvider, "TraceProvider should be nil when tracing disabled")
// Shutdown should still work
err = provider.Shutdown(ctx)
@@ -155,32 +168,42 @@ func TestToPluginConfig_Success(t *testing.T) {
ServiceName: "test-service",
ServiceVersion: "1.0.0",
EnableMetrics: true,
EnableTracing: true,
Environment: "test",
Domain: "test-domain",
DeviceID: "test-device",
OtlpEndpoint: "localhost:4317",
TimeInterval: 5,
},
expectedID: "otelsetup",
expectedConfig: map[string]string{
"serviceName": "test-service",
"serviceVersion": "1.0.0",
"enableMetrics": "true",
"environment": "test",
"metricsPort": "",
"enableMetrics": "true",
"enableTracing": "true",
"otelEndpoint": "localhost:4317",
"deviceID": "test-device",
},
},
{
name: "Config with enableMetrics false",
name: "Config with enableMetrics and enableTracing false",
cfg: &Config{
ServiceName: "my-service",
ServiceVersion: "2.0.0",
EnableMetrics: false,
EnableTracing: false,
Environment: "production",
},
expectedID: "otelsetup",
expectedConfig: map[string]string{
"serviceName": "my-service",
"serviceVersion": "2.0.0",
"enableMetrics": "false",
"environment": "production",
"metricsPort": "",
"enableMetrics": "false",
"enableTracing": "false",
"otelEndpoint": "",
"deviceID": "",
},
},
{
@@ -189,15 +212,21 @@ func TestToPluginConfig_Success(t *testing.T) {
ServiceName: "",
ServiceVersion: "",
EnableMetrics: true,
EnableTracing: false,
Environment: "",
Domain: "",
DeviceID: "",
OtlpEndpoint: "",
},
expectedID: "otelsetup",
expectedConfig: map[string]string{
"serviceName": "",
"serviceVersion": "",
"enableMetrics": "true",
"environment": "",
"metricsPort": "",
"enableMetrics": "true",
"enableTracing": "false",
"otelEndpoint": "",
"deviceID": "",
},
},
}
@@ -224,19 +253,32 @@ func TestToPluginConfig_NilConfig(t *testing.T) {
func TestToPluginConfig_BooleanConversion(t *testing.T) {
tests := []struct {
name string
enableMetrics bool
expected string
name string
enableMetrics bool
enableTracing bool
expectedMetric string
expectedTrace string
}{
{
name: "EnableMetrics true",
enableMetrics: true,
expected: "true",
name: "EnableMetrics and EnableTracing true",
enableMetrics: true,
enableTracing: true,
expectedMetric: "true",
expectedTrace: "true",
},
{
name: "EnableMetrics false",
enableMetrics: false,
expected: "false",
name: "EnableMetrics and EnableTracing false",
enableMetrics: false,
enableTracing: false,
expectedMetric: "false",
expectedTrace: "false",
},
{
name: "EnableMetrics true, EnableTracing false",
enableMetrics: true,
enableTracing: false,
expectedMetric: "true",
expectedTrace: "false",
},
}
@@ -246,14 +288,18 @@ func TestToPluginConfig_BooleanConversion(t *testing.T) {
ServiceName: "test",
ServiceVersion: "1.0.0",
EnableMetrics: tt.enableMetrics,
EnableTracing: tt.enableTracing,
Environment: "test",
MetricsPort: "",
OtlpEndpoint: "localhost:4317",
DeviceID: "test-device",
}
result := ToPluginConfig(cfg)
require.NotNil(t, result)
assert.Equal(t, tt.expected, result.Config["enableMetrics"], "enableMetrics should be converted to string correctly")
assert.Equal(t, "", result.Config["metricsPort"], "metricsPort should be included even when empty")
assert.Equal(t, tt.expectedMetric, result.Config["enableMetrics"], "enableMetrics should be converted to string correctly")
assert.Equal(t, tt.expectedTrace, result.Config["enableTracing"], "enableTracing should be converted to string correctly")
assert.Equal(t, "localhost:4317", result.Config["otelEndpoint"], "otelEndpoint should be included")
assert.Equal(t, "test-device", result.Config["deviceID"], "deviceID should be included")
})
}
}

View File

@@ -48,6 +48,7 @@ func NewPreProcessor(cfg *Config) (func(http.Handler) http.Handler, error) {
http.Error(w, fmt.Sprintf("%s field not found or invalid.", contextKey), http.StatusBadRequest)
return
}
var subID any
switch cfg.Role {
case "bap":
@@ -55,6 +56,14 @@ func NewPreProcessor(cfg *Config) (func(http.Handler) http.Handler, error) {
case "bpp":
subID = reqContext["bpp_id"]
}
var callerID any
switch cfg.Role {
case "bap":
callerID = reqContext["bpp_id"]
case "bpp":
callerID = reqContext["bap_id"]
}
if subID != nil {
log.Debugf(ctx, "adding subscriberId to request:%s, %v", model.ContextKeySubscriberID, subID)
ctx = context.WithValue(ctx, model.ContextKeySubscriberID, subID)
@@ -64,6 +73,11 @@ func NewPreProcessor(cfg *Config) (func(http.Handler) http.Handler, error) {
log.Debugf(ctx, "adding parentID to request:%s, %v", model.ContextKeyParentID, cfg.ParentID)
ctx = context.WithValue(ctx, model.ContextKeyParentID, cfg.ParentID)
}
if callerID != nil {
log.Debugf(ctx, "adding callerID to request:%s, %v", model.ContextKeyCallerID, callerID)
ctx = context.WithValue(ctx, model.ContextKeyCallerID, callerID)
}
for _, key := range cfg.ContextKeys {
ctxKey, _ := model.ParseContextKey(key)
if v, ok := reqContext[key]; ok {

View File

@@ -15,7 +15,10 @@ import (
"github.com/beckn-one/beckn-onix/pkg/log"
"github.com/beckn-one/beckn-onix/pkg/model"
"github.com/beckn-one/beckn-onix/pkg/plugin/definition"
"github.com/beckn-one/beckn-onix/pkg/telemetry"
"github.com/google/uuid"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
// Config holds configuration parameters for SimpleKeyManager.
@@ -245,28 +248,43 @@ func (skm *SimpleKeyMgr) LookupNPKeys(ctx context.Context, subscriberID, uniqueK
return "", "", err
}
tracer := otel.Tracer(telemetry.ScopeName, trace.WithInstrumentationVersion(telemetry.ScopeVersion))
cacheKey := fmt.Sprintf("%s_%s", subscriberID, uniqueKeyID)
cachedData, err := skm.Cache.Get(ctx, cacheKey)
if err == nil {
var keys model.Keyset
if err := json.Unmarshal([]byte(cachedData), &keys); err == nil {
log.Debugf(ctx, "Found cached keys for subscriber: %s, uniqueKeyID: %s", subscriberID, uniqueKeyID)
return keys.SigningPublic, keys.EncrPublic, nil
var cachedData string
{
spanCtx, span := tracer.Start(ctx, "redis lookup")
defer span.End()
var err error
cachedData, err = skm.Cache.Get(spanCtx, cacheKey)
if err == nil {
var keys model.Keyset
if err := json.Unmarshal([]byte(cachedData), &keys); err == nil {
log.Debugf(ctx, "Found cached keys for subscriber: %s, uniqueKeyID: %s", subscriberID, uniqueKeyID)
return keys.SigningPublic, keys.EncrPublic, nil
}
}
}
log.Debugf(ctx, "Cache miss, looking up registry for subscriber: %s, uniqueKeyID: %s", subscriberID, uniqueKeyID)
subscribers, err := skm.Registry.Lookup(ctx, &model.Subscription{
Subscriber: model.Subscriber{
SubscriberID: subscriberID,
},
KeyID: uniqueKeyID,
})
if err != nil {
return "", "", fmt.Errorf("failed to lookup registry: %w", err)
}
if len(subscribers) == 0 {
return "", "", ErrSubscriberNotFound
var subscribers []model.Subscription
{
spanCtx, span := tracer.Start(ctx, "registry lookup")
defer span.End()
var err error
subscribers, err = skm.Registry.Lookup(spanCtx, &model.Subscription{
Subscriber: model.Subscriber{
SubscriberID: subscriberID,
},
KeyID: uniqueKeyID,
})
if err != nil {
return "", "", fmt.Errorf("failed to lookup registry: %w", err)
}
if len(subscribers) == 0 {
return "", "", ErrSubscriberNotFound
}
}
log.Debugf(ctx, "Successfully looked up keys for subscriber: %s, uniqueKeyID: %s", subscriberID, uniqueKeyID)

View File

@@ -197,9 +197,7 @@ func (m *Manager) Middleware(ctx context.Context, cfg *Config) (func(http.Handle
return mwp.New(ctx, cfg.Config)
}
// OtelSetup initializes OpenTelemetry via a dedicated plugin. The plugin is
// expected to return a telemetry Provider that the core application can use for
// instrumentation.
// OtelSetup initializes OpenTelemetry via a dedicated plugin. The plugin is expected to return a telemetry Provider that the core application can use for instrumentation.
func (m *Manager) OtelSetup(ctx context.Context, cfg *Config) (*telemetry.Provider, error) {
if cfg == nil {
log.Info(ctx, "Telemetry config not provided; skipping OpenTelemetry setup")

56
pkg/telemetry/audit.go Normal file
View File

@@ -0,0 +1,56 @@
package telemetry
import (
"context"
"crypto/sha256"
"encoding/hex"
"time"
logger "github.com/beckn-one/beckn-onix/pkg/log"
"github.com/beckn-one/beckn-onix/pkg/model"
"github.com/google/uuid"
"go.opentelemetry.io/otel/log"
"go.opentelemetry.io/otel/log/global"
)
const auditLoggerName = "Beckn_ONIX"
func EmitAuditLogs(ctx context.Context, body []byte, attrs ...log.KeyValue) {
provider := global.GetLoggerProvider()
if provider == nil {
logger.Warnf(ctx, "failed to emit audit logs, logs disabled")
return
}
//maskedBody := MaskPIIInAuditBody(body)
sum := sha256.Sum256(body)
auditBody := selectAuditPayload(ctx, body)
auditlog := provider.Logger(auditLoggerName)
record := log.Record{}
record.SetBody(log.StringValue(string(auditBody)))
record.SetTimestamp(time.Now())
record.SetObservedTimestamp(time.Now())
record.SetSeverity(log.SeverityInfo)
checkSum := hex.EncodeToString(sum[:])
txnID, _ := ctx.Value(model.ContextKeyTxnID).(string)
msgID, _ := ctx.Value(model.ContextKeyMsgID).(string)
parentID, _ := ctx.Value(model.ContextKeyParentID).(string)
record.AddAttributes(
log.String("checkSum", checkSum),
log.String("log_uuid", uuid.New().String()),
log.String("transaction_id", txnID),
log.String("message_id", msgID),
log.String("parent_id", parentID),
)
if len(attrs) > 0 {
record.AddAttributes(attrs...)
}
auditlog.Emit(ctx, record)
}

View File

@@ -0,0 +1,216 @@
package telemetry
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"sync"
"github.com/beckn-one/beckn-onix/pkg/log"
"gopkg.in/yaml.v3"
)
type auditFieldsRules struct {
AuditRules map[string][]string `yaml:"auditRules"`
}
var (
auditRules = map[string][]string{}
auditRulesMutex sync.RWMutex
)
func LoadAuditFieldRules(ctx context.Context, configPath string) error {
if strings.TrimSpace(configPath) == "" {
err := fmt.Errorf("config file path is empty")
log.Error(ctx, err, "there are no audit rules defined")
return err
}
data, err := os.ReadFile(configPath)
if err != nil {
log.Error(ctx, err, "failed to read audit rules file")
return err
}
var config auditFieldsRules
if err := yaml.Unmarshal(data, &config); err != nil {
log.Error(ctx, err, "failed to parse audit rules file")
return err
}
if config.AuditRules == nil {
log.Warn(ctx, "audit rules are not defined")
config.AuditRules = map[string][]string{}
}
auditRulesMutex.Lock()
auditRules = config.AuditRules
auditRulesMutex.Unlock()
log.Info(ctx, "audit rules loaded")
return nil
}
func selectAuditPayload(ctx context.Context, body []byte) []byte {
var root map[string]interface{}
if err := json.Unmarshal(body, &root); err != nil {
log.Warn(ctx, "failed to unmarshal audit payload ")
return nil
}
action := ""
if c, ok := root["context"].(map[string]interface{}); ok {
if v, ok := c["action"].(string); ok {
action = strings.TrimSpace(v)
}
}
fields := getFieldForAction(ctx, action)
if len(fields) == 0 {
return nil
}
out := map[string]interface{}{}
for _, field := range fields {
parts := strings.Split(field, ".")
partial, ok := projectPath(root, parts)
if !ok {
continue
}
merged := deepMerge(out, partial)
if m, ok := merged.(map[string]interface{}); ok {
out = m
}
}
body, err := json.Marshal(out)
if err != nil {
log.Warn(ctx, "failed to marshal audit payload")
return nil
}
return body
}
func getFieldForAction(ctx context.Context, action string) []string {
auditRulesMutex.RLock()
defer auditRulesMutex.RUnlock()
if action != "" {
if fields, ok := auditRules[action]; ok && len(fields) > 0 {
return fields
}
}
log.Warn(ctx, "audit rules are not defined for this action send default")
return auditRules["default"]
}
//func getByPath(root map[string]interface{}, path string) (interface{}, bool) {
//
// parts := strings.Split(path, ".")
// var cur interface{} = root
//
// for _, part := range parts {
// m, ok := cur.(map[string]interface{})
// if !ok {
// return nil, false
// }
// v, ok := m[part]
// if !ok {
// return nil, false
// }
// cur = v
// }
// return cur, true
//}
//
//func setByPath(root map[string]interface{}, path string, value interface{}) {
// parts := strings.Split(path, ".")
// cur := root
//
// for i := 0; i < len(parts)-1; i++ {
// k := parts[i]
// next, ok := cur[k].(map[string]interface{})
// if !ok {
// next = map[string]interface{}{}
// cur[k] = next
// }
// cur = next
// }
// cur[parts[len(parts)-1]] = value
//}
func projectPath(cur interface{}, parts []string) (interface{}, bool) {
if len(parts) == 0 {
return cur, true
}
switch node := cur.(type) {
case map[string]interface{}:
next, ok := node[parts[0]]
if !ok {
return nil, false
}
child, ok := projectPath(next, parts[1:])
if !ok {
return nil, false
}
return map[string]interface{}{parts[0]: child}, true
case []interface{}:
out := make([]interface{}, 0, len(node))
found := false
for _, n := range node {
child, ok := projectPath(n, parts)
if ok {
out = append(out, child)
found = true
}
}
if !found {
return nil, false
}
return out, true
default:
return nil, false
}
}
func deepMerge(dst, src interface{}) interface{} {
if dst == nil {
return src
}
dm, dok := dst.(map[string]interface{})
sm, sok := src.(map[string]interface{})
if dok && sok {
for k, sv := range sm {
if dv, ok := dm[k]; ok {
dm[k] = deepMerge(dv, sv)
} else {
dm[k] = sv
}
}
return dm
}
da, dok := dst.([]interface{})
sa, sok := src.([]interface{})
if dok && sok {
if len(da) < len(sa) {
ext := make([]interface{}, len(sa)-len(da))
da = append(da, ext...)
}
for i := range sa {
da[i] = deepMerge(da[i], sa[i])
}
return da
}
return src
}

View File

@@ -0,0 +1,518 @@
package telemetry
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test projectPath
func TestProjectPath_EmptyParts(t *testing.T) {
root := map[string]interface{}{"a": "v"}
got, ok := projectPath(root, nil)
require.True(t, ok)
assert.Equal(t, root, got)
got, ok = projectPath(root, []string{})
require.True(t, ok)
assert.Equal(t, root, got)
}
func TestProjectPath_MapSingleLevel(t *testing.T) {
root := map[string]interface{}{"context": map[string]interface{}{"action": "search"}}
got, ok := projectPath(root, []string{"context"})
require.True(t, ok)
assert.Equal(t, map[string]interface{}{"context": map[string]interface{}{"action": "search"}}, got)
}
func TestProjectPath_MapNested(t *testing.T) {
root := map[string]interface{}{
"context": map[string]interface{}{
"action": "select",
"transaction_id": "tx-1",
},
}
got, ok := projectPath(root, []string{"context", "action"})
require.True(t, ok)
assert.Equal(t, map[string]interface{}{"context": map[string]interface{}{"action": "select"}}, got)
}
func TestProjectPath_MissingKey(t *testing.T) {
root := map[string]interface{}{"context": map[string]interface{}{"action": "search"}}
got, ok := projectPath(root, []string{"context", "missing"})
require.False(t, ok)
assert.Nil(t, got)
}
func TestProjectPath_ArrayTraverseAndProject(t *testing.T) {
root := map[string]interface{}{
"message": map[string]interface{}{
"order": map[string]interface{}{
"beckn:orderItems": []interface{}{
map[string]interface{}{"beckn:orderedItem": "item-1"},
map[string]interface{}{"beckn:orderedItem": "item-2"},
},
},
},
}
parts := []string{"message", "order", "beckn:orderItems", "beckn:orderedItem"}
got, ok := projectPath(root, parts)
require.True(t, ok)
expected := map[string]interface{}{
"message": map[string]interface{}{
"order": map[string]interface{}{
"beckn:orderItems": []interface{}{
map[string]interface{}{"beckn:orderedItem": "item-1"},
map[string]interface{}{"beckn:orderedItem": "item-2"},
},
},
},
}
assert.Equal(t, expected, got)
}
func TestProjectPath_NonMapOrSlice(t *testing.T) {
_, ok := projectPath("string", []string{"a"})
require.False(t, ok)
_, ok = projectPath(42, []string{"a"})
require.False(t, ok)
}
func TestProjectPath_EmptyArray(t *testing.T) {
root := map[string]interface{}{"items": []interface{}{}}
got, ok := projectPath(root, []string{"items", "id"})
require.False(t, ok)
assert.Nil(t, got)
}
// Test deepMerge
func TestDeepMerge_NilDst(t *testing.T) {
src := map[string]interface{}{"a": 1}
got := deepMerge(nil, src)
assert.Equal(t, src, got)
}
func TestDeepMerge_MapIntoMap(t *testing.T) {
dst := map[string]interface{}{"a": 1, "b": 2}
src := map[string]interface{}{"b": 20, "c": 3}
got := deepMerge(dst, src)
assert.Equal(t, map[string]interface{}{"a": 1, "b": 20, "c": 3}, got)
}
func TestDeepMerge_MapNested(t *testing.T) {
dst := map[string]interface{}{
"context": map[string]interface{}{"action": "search", "domain": "retail"},
}
src := map[string]interface{}{
"context": map[string]interface{}{"action": "search", "transaction_id": "tx-1"},
}
got := deepMerge(dst, src)
ctx, ok := got.(map[string]interface{})["context"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "search", ctx["action"])
assert.Equal(t, "retail", ctx["domain"])
assert.Equal(t, "tx-1", ctx["transaction_id"])
}
func TestDeepMerge_ArrayIntoArray(t *testing.T) {
dst := []interface{}{
map[string]interface{}{"id": "a"},
map[string]interface{}{"id": "b"},
}
src := []interface{}{
map[string]interface{}{"id": "a", "name": "A"},
map[string]interface{}{"id": "b", "name": "B"},
}
got := deepMerge(dst, src)
sl, ok := got.([]interface{})
require.True(t, ok)
require.Len(t, sl, 2)
assert.Equal(t, map[string]interface{}{"id": "a", "name": "A"}, sl[0])
assert.Equal(t, map[string]interface{}{"id": "b", "name": "B"}, sl[1])
}
func TestDeepMerge_ArraySrcLonger(t *testing.T) {
dst := []interface{}{map[string]interface{}{"a": 1}}
src := []interface{}{
map[string]interface{}{"a": 1},
map[string]interface{}{"a": 2},
}
got := deepMerge(dst, src)
sl, ok := got.([]interface{})
require.True(t, ok)
require.Len(t, sl, 2)
}
func TestDeepMerge_ScalarSrc(t *testing.T) {
dst := map[string]interface{}{"a": 1}
src := "overwrite"
got := deepMerge(dst, src)
assert.Equal(t, "overwrite", got)
}
// Test getFieldForAction and selectAuditPayload (require loaded rules via temp file)
func writeAuditRulesFile(t *testing.T, content string) string {
t.Helper()
dir := t.TempDir()
path := filepath.Join(dir, "audit-fields.yaml")
err := os.WriteFile(path, []byte(content), 0600)
require.NoError(t, err)
return path
}
func TestGetFieldForAction_ActionMatch(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.transaction_id
- context.action
search:
- context.action
- context.timestamp
select:
- context.action
- message.order
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
fields := getFieldForAction(ctx, "search")
assert.Equal(t, []string{"context.action", "context.timestamp"}, fields)
fields = getFieldForAction(ctx, "select")
assert.Equal(t, []string{"context.action", "message.order"}, fields)
}
func TestGetFieldForAction_FallbackToDefault(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.transaction_id
- context.message_id
search:
- context.action
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
fields := getFieldForAction(ctx, "unknown_action")
assert.Equal(t, []string{"context.transaction_id", "context.message_id"}, fields)
fields = getFieldForAction(ctx, "")
assert.Equal(t, []string{"context.transaction_id", "context.message_id"}, fields)
}
func TestGetFieldForAction_EmptyDefault(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default: []
search:
- context.action
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
fields := getFieldForAction(ctx, "other")
assert.Empty(t, fields)
}
func TestSelectAuditPayload_InvalidJSON(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.action
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
got := selectAuditPayload(ctx, []byte("not json"))
assert.Nil(t, got)
}
func TestSelectAuditPayload_NoRulesLoaded(t *testing.T) {
ctx := context.Background()
// use a fresh context without loading any rules; auditRules may be from previous test
path := writeAuditRulesFile(t, `
auditRules:
default: []
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
body := []byte(`{"context":{"action":"search"}}`)
got := selectAuditPayload(ctx, body)
assert.Nil(t, got)
}
func TestSelectAuditPayload_ContextAndActionOnly(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.transaction_id
- context.message_id
- context.action
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
body := []byte(`{
"context": {
"action": "search",
"transaction_id": "tx-1",
"message_id": "msg-1",
"domain": "retail"
},
"message": {"intent": "buy"}
}`)
got := selectAuditPayload(ctx, body)
require.NotNil(t, got)
var out map[string]interface{}
require.NoError(t, json.Unmarshal(got, &out))
ctxMap, ok := out["context"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "search", ctxMap["action"])
assert.Equal(t, "tx-1", ctxMap["transaction_id"])
assert.Equal(t, "msg-1", ctxMap["message_id"])
_, hasMessage := out["message"]
assert.False(t, hasMessage)
}
func TestSelectAuditPayload_ActionSpecificRules(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.action
search:
- context.action
- context.timestamp
- message.intent
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
body := []byte(`{
"context": {"action": "search", "timestamp": "2024-01-15T10:30:00Z", "domain": "retail"},
"message": {"intent": {"item": {"id": "x"}}}
}`)
got := selectAuditPayload(ctx, body)
require.NotNil(t, got)
var out map[string]interface{}
require.NoError(t, json.Unmarshal(got, &out))
ctxMap := out["context"].(map[string]interface{})
assert.Equal(t, "search", ctxMap["action"])
assert.Equal(t, "2024-01-15T10:30:00Z", ctxMap["timestamp"])
msg := out["message"].(map[string]interface{})
assert.NotNil(t, msg["intent"])
}
func TestSelectAuditPayload_ArrayFieldProjection(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default:
- context.action
select:
- context.transaction_id
- context.action
- message.order.beckn:orderItems.beckn:orderedItem
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
body := []byte(`{
"context": {"action": "select", "transaction_id": "tx-2"},
"message": {
"order": {
"beckn:orderItems": [
{"beckn:orderedItem": "item-A", "other": "x"},
{"beckn:orderedItem": "item-B", "other": "y"}
]
}
}
}`)
got := selectAuditPayload(ctx, body)
require.NotNil(t, got)
var out map[string]interface{}
require.NoError(t, json.Unmarshal(got, &out))
ctxMap := out["context"].(map[string]interface{})
assert.Equal(t, "select", ctxMap["action"])
assert.Equal(t, "tx-2", ctxMap["transaction_id"])
order := out["message"].(map[string]interface{})["order"].(map[string]interface{})
items := order["beckn:orderItems"].([]interface{})
require.Len(t, items, 2)
assert.Equal(t, map[string]interface{}{"beckn:orderedItem": "item-A"}, items[0])
assert.Equal(t, map[string]interface{}{"beckn:orderedItem": "item-B"}, items[1])
}
// TestSelectAuditPayload_SelectOrderExample uses a full select request payload and
// select audit rules to verify that only configured fields are projected into the
// audit log body. The request mirrors a real select with context, message.order,
// beckn:orderItems (array), beckn:acceptedOffer, and beckn:orderAttributes.
// Rules include array traversal (e.g. message.order.beckn:orderItems.beckn:orderedItem
// projects that field from each array element) and nested paths like
// message.order.beckn:orderItems.beckn:acceptedOffer.beckn:price.value.
func TestSelectAuditPayload_SelectOrderExample(t *testing.T) {
ctx := context.Background()
path := writeAuditRulesFile(t, `
auditRules:
default: []
select:
- context.transaction_id
- context.message_id
- context.action
- context.timestamp
- message.order
- message.order.beckn:seller
- message.order.beckn:buyer
- message.order.beckn:buyer.beckn:id
- message.order.beckn:orderItems
- message.order.beckn:orderItems.beckn:orderedItem
- message.order.beckn:orderItems.beckn:acceptedOffer
- message.order.beckn:orderItems.beckn:acceptedOffer.beckn:id
- message.order.beckn:orderItems.beckn:acceptedOffer.beckn:price
- message.order.beckn:orderItems.beckn:acceptedOffer.beckn:price.value
- message.order.beckn:orderAttributes
- message.order.beckn:orderAttributes.preferences
- message.order.beckn:orderAttributes.preferences.startTime
`)
require.NoError(t, LoadAuditFieldRules(ctx, path))
// Full select request example: context (version, action, domain, timestamp, ids, URIs, ttl)
// and message.order with orderStatus, seller, buyer, orderItems array (orderedItem, quantity,
// acceptedOffer with id, descriptor, items, provider, price), orderAttributes (buyerFinderFee, preferences).
body := []byte(`{
"context": {
"version": "1.0.0",
"action": "select",
"domain": "ev_charging",
"timestamp": "2024-01-15T10:30:00Z",
"message_id": "bb9f86db-9a3d-4e9c-8c11-81c8f1a7b901",
"transaction_id": "2b4d69aa-22e4-4c78-9f56-5a7b9e2b2002",
"bap_id": "bap.example.com",
"bap_uri": "https://bap.example.com",
"ttl": "PT30S",
"bpp_id": "bpp.example.com",
"bpp_uri": "https://bpp.example.com"
},
"message": {
"order": {
"@context": "https://raw.githubusercontent.com/beckn/protocol-specifications-new/refs/heads/main/schema/core/v2/context.jsonld",
"@type": "beckn:Order",
"beckn:orderStatus": "CREATED",
"beckn:seller": "ecopower-charging",
"beckn:buyer": {
"@context": "https://raw.githubusercontent.com/beckn/protocol-specifications-new/refs/heads/main/schema/core/v2/context.jsonld",
"@type": "beckn:Buyer",
"beckn:id": "user-123",
"beckn:role": "BUYER",
"beckn:displayName": "Ravi Kumar",
"beckn:telephone": "+91-9876543210",
"beckn:email": "ravi.kumar@example.com",
"beckn:taxID": "GSTIN29ABCDE1234F1Z5"
},
"beckn:orderItems": [
{
"beckn:orderedItem": "IND*ecopower-charging*cs-01*IN*ECO*BTM*01*CCS2*A*CCS2-A",
"beckn:quantity": {
"unitText": "Kilowatt Hour",
"unitCode": "KWH",
"unitQuantity": 2.5
},
"beckn:acceptedOffer": {
"@context": "https://raw.githubusercontent.com/beckn/protocol-specifications-new/refs/heads/main/schema/core/v2/context.jsonld",
"@type": "beckn:Offer",
"beckn:id": "offer-ccs2-60kw-kwh",
"beckn:descriptor": {
"@type": "beckn:Descriptor",
"schema:name": "Per-kWh Tariff - CCS2 60kW"
},
"beckn:items": [
"IND*ecopower-charging*cs-01*IN*ECO*BTM*01*CCS2*A*CCS2-A"
],
"beckn:provider": "ecopower-charging",
"beckn:price": {
"currency": "INR",
"value": 45.0,
"applicableQuantity": {
"unitText": "Kilowatt Hour",
"unitCode": "KWH",
"unitQuantity": 1
}
}
}
}
],
"beckn:orderAttributes": {
"@context": "https://raw.githubusercontent.com/beckn/protocol-specifications-new/refs/heads/main/schema/EvChargingSession/v1/context.jsonld",
"@type": "ChargingSession",
"buyerFinderFee": {
"feeType": "PERCENTAGE",
"feeValue": 2.5
},
"preferences": {
"startTime": "2026-01-04T08:00:00+05:30",
"endTime": "2026-01-04T20:00:00+05:30"
}
}
}
}
}`)
got := selectAuditPayload(ctx, body)
require.NotNil(t, got, "selectAuditPayload should return projected body for select action")
var out map[string]interface{}
require.NoError(t, json.Unmarshal(got, &out))
// Context: only transaction_id, message_id, action, timestamp
ctxMap, ok := out["context"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "select", ctxMap["action"])
assert.Equal(t, "2b4d69aa-22e4-4c78-9f56-5a7b9e2b2002", ctxMap["transaction_id"])
assert.Equal(t, "bb9f86db-9a3d-4e9c-8c11-81c8f1a7b901", ctxMap["message_id"])
assert.Equal(t, "2024-01-15T10:30:00Z", ctxMap["timestamp"])
_, hasBapID := ctxMap["bap_id"]
assert.False(t, hasBapID, "context should not include bap_id when not in audit rules")
// message.order: full order merged with projected array fields
msg, ok := out["message"].(map[string]interface{})
require.True(t, ok)
order, ok := msg["order"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "ecopower-charging", order["beckn:seller"])
buyer, ok := order["beckn:buyer"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "user-123", buyer["beckn:id"])
// beckn:orderItems: array with projected fields from each element (beckn:orderedItem, beckn:acceptedOffer with id, price, price.value)
items, ok := order["beckn:orderItems"].([]interface{})
require.True(t, ok)
require.Len(t, items, 1)
item0, ok := items[0].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "IND*ecopower-charging*cs-01*IN*ECO*BTM*01*CCS2*A*CCS2-A", item0["beckn:orderedItem"])
acceptedOffer, ok := item0["beckn:acceptedOffer"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "offer-ccs2-60kw-kwh", acceptedOffer["beckn:id"])
price, ok := acceptedOffer["beckn:price"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, 45.0, price["value"])
// beckn:orderAttributes: only preferences and preferences.startTime
orderAttrs, ok := order["beckn:orderAttributes"].(map[string]interface{})
require.True(t, ok)
prefs, ok := orderAttrs["preferences"].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "2026-01-04T08:00:00+05:30", prefs["startTime"])
}

View File

@@ -2,9 +2,9 @@ package telemetry
import (
"context"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -13,16 +13,31 @@ func TestNewProviderAndMetrics(t *testing.T) {
provider, err := NewTestProvider(ctx)
require.NoError(t, err)
require.NotNil(t, provider)
require.NotNil(t, provider.MetricsHandler)
require.NotNil(t, provider.MeterProvider, "MeterProvider should be set")
metrics, err := GetMetrics(ctx)
require.NoError(t, err)
require.NotNil(t, metrics)
rec := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/metrics", nil)
provider.MetricsHandler.ServeHTTP(rec, req)
require.Equal(t, 200, rec.Code)
require.NoError(t, provider.Shutdown(context.Background()))
require.NoError(t, provider.Shutdown(ctx))
}
func TestNewProviderAndTraces(t *testing.T) {
ctx := context.Background()
provider, sr, err := NewTestProviderWithTrace(ctx)
require.NoError(t, err)
require.NotNil(t, provider)
require.NotNil(t, provider.MeterProvider, "MeterProvider should be set")
require.NotNil(t, provider.TraceProvider, "TraceProvider should be set")
require.NotNil(t, sr, "SpanRecorder should be set")
tracer := provider.TraceProvider.Tracer("test-instrumentation")
_, span := tracer.Start(ctx, "test-span")
span.End()
ended := sr.Ended()
require.Len(t, ended, 1, "exactly one span should be recorded")
assert.Equal(t, "test-span", ended[0].Name(), "recorded span should have expected name")
require.NoError(t, provider.Shutdown(ctx))
}

View File

@@ -30,24 +30,52 @@ var (
// Attribute keys shared across instruments.
var (
AttrModule = attribute.Key("module")
AttrSubsystem = attribute.Key("subsystem")
AttrName = attribute.Key("name")
AttrStep = attribute.Key("step")
AttrRole = attribute.Key("role")
AttrAction = attribute.Key("action")
AttrHTTPMethod = attribute.Key("http_method")
AttrHTTPStatus = attribute.Key("http_status_code")
AttrStatus = attribute.Key("status")
AttrErrorType = attribute.Key("error_type")
AttrPluginID = attribute.Key("plugin_id")
AttrPluginType = attribute.Key("plugin_type")
AttrOperation = attribute.Key("operation")
AttrRouteType = attribute.Key("route_type")
AttrTargetType = attribute.Key("target_type")
AttrSchemaVersion = attribute.Key("schema_version")
AttrModule = attribute.Key("module")
AttrCaller = attribute.Key("caller") // who is calling bab/bpp with there name
AttrStep = attribute.Key("step")
AttrRole = attribute.Key("role")
AttrAction = attribute.Key("action") // action is context.action
AttrHTTPStatus = attribute.Key("http_status_code") // status code is 2xx/3xx/4xx/5xx
AttrStatus = attribute.Key("status")
AttrErrorType = attribute.Key("error_type")
AttrPluginID = attribute.Key("plugin_id") // id for the plugine
AttrPluginType = attribute.Key("plugin_type") // type for the plugine
AttrOperation = attribute.Key("operation")
AttrRouteType = attribute.Key("route_type") // publish/ uri
AttrTargetType = attribute.Key("target_type")
AttrSchemaVersion = attribute.Key("schema_version")
AttrMetricUUID = attribute.Key("metric_uuid")
AttrMetricCode = attribute.Key("metric.code")
AttrMetricCategory = attribute.Key("metric.category")
AttrMetricGranularity = attribute.Key("metric.granularity")
AttrMetricFrequency = attribute.Key("metric.frequency")
AttrObservedTimeUnixNano = attribute.Key("observedTimeUnixNano")
AttrMatricLabels = attribute.Key("metric.labels")
)
var (
networkMetricsCfgMu sync.RWMutex
networkMetricsGranularity = "10mim" // default
networkMetricsFrequency = "10mim" // default
)
func SetNetworkMetricsConfig(granularity, frequency string) {
networkMetricsCfgMu.Lock()
defer networkMetricsCfgMu.Unlock()
if granularity != "" {
networkMetricsGranularity = granularity
}
if frequency != "" {
networkMetricsFrequency = frequency
}
}
func GetNetworkMetricsConfig() (granularity, frequency string) {
networkMetricsCfgMu.RLock()
defer networkMetricsCfgMu.RUnlock()
return networkMetricsGranularity, networkMetricsFrequency
}
// GetMetrics lazily initializes instruments and returns a cached reference.
func GetMetrics(ctx context.Context) (*Metrics, error) {
metricsOnce.Do(func() {
@@ -58,8 +86,8 @@ func GetMetrics(ctx context.Context) (*Metrics, error) {
func newMetrics() (*Metrics, error) {
meter := otel.GetMeterProvider().Meter(
"github.com/beckn-one/beckn-onix/telemetry",
metric.WithInstrumentationVersion("1.0.0"),
ScopeName,
metric.WithInstrumentationVersion(ScopeVersion),
)
m := &Metrics{}

View File

@@ -2,14 +2,21 @@ package telemetry
import (
"context"
"net/http"
"go.opentelemetry.io/otel/sdk/log"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/trace"
)
const (
ScopeName = "beckn-onix"
ScopeVersion = "v2.0.0"
)
// Provider holds references to telemetry components that need coordinated shutdown.
type Provider struct {
MeterProvider *metric.MeterProvider
MetricsHandler http.Handler
Shutdown func(context.Context) error
MeterProvider *metric.MeterProvider
TraceProvider *trace.TracerProvider
LogProvider *log.LoggerProvider
Shutdown func(context.Context) error
}

View File

@@ -4,12 +4,13 @@ import (
"context"
clientprom "github.com/prometheus/client_golang/prometheus"
clientpromhttp "github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
otelprom "go.opentelemetry.io/otel/exporters/prometheus"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
)
// NewTestProvider creates a minimal telemetry provider for testing purposes.
@@ -45,10 +46,56 @@ func NewTestProvider(ctx context.Context) (*Provider, error) {
otel.SetMeterProvider(meterProvider)
return &Provider{
MeterProvider: meterProvider,
MetricsHandler: clientpromhttp.HandlerFor(registry, clientpromhttp.HandlerOpts{}),
MeterProvider: meterProvider,
Shutdown: func(ctx context.Context) error {
return meterProvider.Shutdown(ctx)
},
}, nil
}
// NewTestProviderWithTrace creates a telemetry provider with both metrics and
// tracing enabled, using an in-memory span recorder. It returns the provider
// and the SpanRecorder so tests can assert on recorded spans.
func NewTestProviderWithTrace(ctx context.Context) (*Provider, *tracetest.SpanRecorder, error) {
provider, err := NewTestProvider(ctx)
if err != nil {
return nil, nil, err
}
res, err := resource.New(
ctx,
resource.WithAttributes(
attribute.String("service.name", "test-service"),
attribute.String("service.version", "test"),
attribute.String("deployment.environment", "test"),
),
)
if err != nil {
return nil, nil, err
}
sr := tracetest.NewSpanRecorder()
traceProvider := trace.NewTracerProvider(
trace.WithSpanProcessor(sr),
trace.WithResource(res),
)
otel.SetTracerProvider(traceProvider)
return &Provider{
MeterProvider: provider.MeterProvider,
TraceProvider: traceProvider,
Shutdown: func(ctx context.Context) error {
var errs []error
if err := traceProvider.Shutdown(ctx); err != nil {
errs = append(errs, err)
}
if err := provider.MeterProvider.Shutdown(ctx); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return errs[0]
}
return nil
},
}, sr, nil
}