From 2570a11b4856c1bf186438bceeb851adb6114c0c Mon Sep 17 00:00:00 2001 From: Jens Date: Wed, 9 Aug 2023 09:10:16 +0200 Subject: [PATCH] Add kamon-pekko instrumentation (#1264) * Add kamon-pekko module for Apache Pekko support Replace with advice should be in correct package * Use released 1.0.0 for Apache Pekko * Update sbt to 1.9.2 * Update Pekko to 1.0.1 https://pekko.apache.org/docs/pekko/1.0/release-notes/index.html#1-0-1 * Change scaladoc references from Akka to Pekko --- build.sbt | 9 + instrumentation/kamon-pekko/build.sbt | 37 + .../ActorCellInvokeAdvice.java | 40 + .../instrumentations/PekkoPrivateAccess.java | 64 + .../SchedulerRunnableAdvice.java | 50 + .../instrumentation/ReplaceWithAdvice.java | 21 + .../remote/ContextAwareWireFormats_Pekko.java | 2802 +++++++++++++++++ .../src/main/protobuf/ContainerFormats.proto | 99 + .../protobuf/ContextAwareWireFormats.proto | 30 + .../src/main/protobuf/WireFormats.proto | 222 ++ .../src/main/resources/reference.conf | 210 ++ .../pekko/PekkoClusterShardingMetrics.scala | 118 + .../pekko/PekkoInstrumentation.scala | 133 + .../instrumentation/pekko/PekkoMetrics.scala | 193 ++ .../pekko/PekkoRemoteInstrumentation.scala | 27 + .../pekko/PekkoRemoteMetrics.scala | 45 + .../instrumentations/ActorCellInfo.scala | 104 + .../ActorInstrumentation.scala | 147 + .../ActorLoggingInstrumentation.scala | 48 + .../pekko/instrumentations/ActorMonitor.scala | 462 +++ .../ActorMonitorInstrumentation.scala | 42 + .../ActorRefInstrumentation.scala | 57 + .../AskPatternInstrumentation.scala | 94 + .../ClusterInstrumentation.scala | 172 + .../instrumentations/DispatcherInfo.scala | 49 + .../DispatcherInstrumentation.scala | 129 + .../EnvelopeInstrumentation.scala | 43 + .../EventStreamInstrumentation.scala | 66 + .../RouterInstrumentation.scala | 94 + .../instrumentations/RouterMonitor.scala | 98 + .../SchedulerInstrumentation.scala | 28 + .../SystemMessageInstrumentation.scala | 29 + .../internal/CellWrapper.scala | 77 + .../remote/MessageBufferInstrumentation.scala | 19 + .../remote/RemotingInstrumentation.scala | 172 + .../remote/ShardingInstrumentation.scala | 167 + .../remote/artery/KamonRemoteInstrument.scala | 98 + .../internal/ArterySerializationAdvice.scala | 153 + .../remote/internal/KamonOptionVal.scala | 9 + ...decConstructMessageMethodInterceptor.scala | 81 + ...tobufCodecDecodeMessageMethodAdvisor.scala | 34 + .../src/test/resources/application.conf | 166 + .../src/test/resources/logback.xml | 12 + .../pekko/ActorCellInstrumentationSpec.scala | 159 + .../pekko/ActorGroupMetricsSpec.scala | 188 ++ .../ActorLoggingInstrumentationSpec.scala | 63 + .../pekko/ActorMetricsSpec.scala | 156 + .../pekko/ActorMetricsTestActor.scala | 61 + .../pekko/ActorSystemMetricsSpec.scala | 126 + .../pekko/AskPatternInstrumentationSpec.scala | 99 + .../pekko/AutoGroupingSpec.scala | 139 + .../pekko/ContextEchoActor.scala | 40 + .../pekko/ContextTesting.scala | 9 + .../pekko/DispatcherMetricsSpec.scala | 146 + .../instrumentation/pekko/EnvelopeSpec.scala | 66 + .../pekko/MessageTracingSpec.scala | 239 ++ .../pekko/PekkoTestKitInstrumentation.scala | 24 + .../pekko/RouterMetricsSpec.scala | 368 +++ .../pekko/RouterMetricsTestActor.scala | 53 + .../pekko/SchedulerInstrumentationSpec.scala | 50 + .../SystemMessageInstrumentationSpec.scala | 184 ++ .../instrumentation/pekko/TestLogger.scala | 23 + .../pekko/remote/MessageBufferTest.scala | 34 + .../ShardingInstrumentationSpec.scala | 171 + .../ShardingMessageBufferingSpec.scala | 91 + project/build.properties | 2 +- 66 files changed, 9240 insertions(+), 1 deletion(-) create mode 100644 instrumentation/kamon-pekko/build.sbt create mode 100644 instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/ActorCellInvokeAdvice.java create mode 100644 instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/PekkoPrivateAccess.java create mode 100644 instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/SchedulerRunnableAdvice.java create mode 100644 instrumentation/kamon-pekko/src/main/java/org/apache/pekko/actor/instrumentation/ReplaceWithAdvice.java create mode 100644 instrumentation/kamon-pekko/src/main/java/pekko/remote/ContextAwareWireFormats_Pekko.java create mode 100644 instrumentation/kamon-pekko/src/main/protobuf/ContainerFormats.proto create mode 100644 instrumentation/kamon-pekko/src/main/protobuf/ContextAwareWireFormats.proto create mode 100644 instrumentation/kamon-pekko/src/main/protobuf/WireFormats.proto create mode 100644 instrumentation/kamon-pekko/src/main/resources/reference.conf create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoClusterShardingMetrics.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoMetrics.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteMetrics.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorCellInfo.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorLoggingInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitor.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitorInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorRefInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/AskPatternInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ClusterInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInfo.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EnvelopeInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EventStreamInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterMonitor.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SchedulerInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SystemMessageInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/internal/CellWrapper.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/MessageBufferInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/RemotingInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/ShardingInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/artery/KamonRemoteInstrument.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/ArterySerializationAdvice.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/KamonOptionVal.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecConstructMessageMethodInterceptor.scala create mode 100644 instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecDecodeMessageMethodAdvisor.scala create mode 100644 instrumentation/kamon-pekko/src/test/resources/application.conf create mode 100644 instrumentation/kamon-pekko/src/test/resources/logback.xml create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorCellInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorGroupMetricsSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorLoggingInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsTestActor.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorSystemMetricsSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AskPatternInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AutoGroupingSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextEchoActor.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextTesting.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/DispatcherMetricsSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/EnvelopeSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/MessageTracingSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/PekkoTestKitInstrumentation.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsTestActor.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SchedulerInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SystemMessageInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/TestLogger.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/remote/MessageBufferTest.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingInstrumentationSpec.scala create mode 100644 instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingMessageBufferingSpec.scala diff --git a/build.sbt b/build.sbt index efe2995cf..5ad3e036b 100644 --- a/build.sbt +++ b/build.sbt @@ -487,6 +487,15 @@ lazy val `kamon-akka-http` = (project in file("instrumentation/kamon-akka-http") ), )).dependsOn(`kamon-akka`, `kamon-testkit` % "test") + +lazy val `kamon-pekko` = (project in file("instrumentation/kamon-pekko")) + .enablePlugins(JavaAgent) + .disablePlugins(AssemblyPlugin) + .settings(instrumentationSettings: _*) + .dependsOn( + `kamon-scala-future` % "compile", + `kamon-testkit` % "test" + ) lazy val `kamon-akka-grpc` = (project in file("instrumentation/kamon-akka-grpc")) .enablePlugins(JavaAgent, AkkaGrpcPlugin) .disablePlugins(AssemblyPlugin) diff --git a/instrumentation/kamon-pekko/build.sbt b/instrumentation/kamon-pekko/build.sbt new file mode 100644 index 000000000..83c083f55 --- /dev/null +++ b/instrumentation/kamon-pekko/build.sbt @@ -0,0 +1,37 @@ +// The Common configuration should always depend on the latest version of Pekko. All code in the Common configuration +// should be source compatible with all Pekko versions. +inConfig(Compile)(Defaults.compileSettings ++ Seq( + crossScalaVersions := Seq(`scala_2.12_version`, `scala_2.13_version`) +)) + +val pekkoVersion = "1.0.1" +libraryDependencies ++= { if(scalaBinaryVersion.value == "2.11") Seq.empty else Seq( + kanelaAgent, + scalatest % Test, + logbackClassic % Test, + "org.apache.pekko" %% "pekko-actor" % pekkoVersion, + "org.apache.pekko" %% "pekko-testkit" % pekkoVersion, + "org.apache.pekko" %% "pekko-slf4j" % pekkoVersion, + "org.apache.pekko" %% "pekko-remote" % pekkoVersion, + "org.apache.pekko" %% "pekko-cluster" % pekkoVersion, + "org.apache.pekko" %% "pekko-cluster-sharding" % pekkoVersion, + "org.apache.pekko" %% "pekko-protobuf" % pekkoVersion, + "org.apache.pekko" %% "pekko-testkit" % pekkoVersion % Test +)} + +exportJars := true + +/** + * Test-related settings + */ + +lazy val baseTestSettings = Seq( + fork := true, + parallelExecution := false, + javaOptions := (Test / javaOptions).value, + dependencyClasspath += (Compile / packageBin).value +) + +inConfig(Test)(Defaults.testSettings ++ instrumentationSettings ++ baseTestSettings ++ Seq( + crossScalaVersions := Seq(`scala_2.12_version`, `scala_2.13_version`) +)) diff --git a/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/ActorCellInvokeAdvice.java b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/ActorCellInvokeAdvice.java new file mode 100644 index 000000000..ded91c6a1 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/ActorCellInvokeAdvice.java @@ -0,0 +1,40 @@ +package kamon.instrumentation.pekko.instrumentations; + +import org.apache.pekko.dispatch.Envelope; +import kamon.context.Context; +import kamon.instrumentation.context.HasContext; +import kamon.instrumentation.context.HasTimestamp; +import kanela.agent.libs.net.bytebuddy.asm.Advice; + +final public class ActorCellInvokeAdvice { + + @Advice.OnMethodEnter(suppress = Throwable.class) + public static void enter( + @Advice.This Object cell, + @Advice.Argument(0) Object envelope, + @Advice.Local("stateFromStart") Object stateFromStart, + @Advice.Local("processingStartTimestamp") Long processingStartTimestamp, + @Advice.Local("envelopeTimestamp") Long envelopeTimestamp, + @Advice.Local("context") Context context) { + + final ActorMonitor actorMonitor = ((HasActorMonitor) cell).actorMonitor(); + + processingStartTimestamp = actorMonitor.captureProcessingStartTimestamp(); + context = ((HasContext) envelope).context(); + envelopeTimestamp = ((HasTimestamp) envelope).timestamp(); + stateFromStart = actorMonitor.onMessageProcessingStart(context, envelopeTimestamp, (Envelope) envelope); + } + + @Advice.OnMethodExit(suppress = Throwable.class) + public static void exit( + @Advice.This Object cell, + @Advice.Local("stateFromStart") Object stateFromStart, + @Advice.Local("processingStartTimestamp") Long processingStartTimestamp, + @Advice.Local("envelopeTimestamp") Long envelopeTimestamp, + @Advice.Local("context") Context context) { + + final ActorMonitor actorMonitor = ((HasActorMonitor) cell).actorMonitor(); + actorMonitor.onMessageProcessingEnd(context, envelopeTimestamp, processingStartTimestamp, stateFromStart); + } + +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/PekkoPrivateAccess.java b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/PekkoPrivateAccess.java new file mode 100644 index 000000000..19c8e4f9d --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/PekkoPrivateAccess.java @@ -0,0 +1,64 @@ +package kamon.instrumentation.pekko.instrumentations; + +import org.apache.pekko.actor.*; +import org.apache.pekko.dispatch.Mailbox; +import org.apache.pekko.dispatch.sysmsg.SystemMessage; +import org.apache.pekko.pattern.PromiseActorRef; +import org.apache.pekko.routing.RoutedActorCell; +import org.apache.pekko.routing.RoutedActorRef; +import scala.Option; + +/** + * This class exposes access to several private[pekko] members that wouldn't be visible from the Scala codebase. + */ +public class PekkoPrivateAccess { + + public static boolean isSystemMessage(Object message) { + return message instanceof SystemMessage; + } + + public static boolean isPromiseActorRef(ActorRef ref) { + return ref instanceof PromiseActorRef; + } + + public static boolean isInternalAndActiveActorRef(ActorRef target) { + return target != null && target instanceof InternalActorRef && !((InternalActorRef) target).isTerminated(); + } + + public static boolean isRoutedActorRef(ActorRef target) { + return target instanceof RoutedActorRef; + } + + public static boolean isRoutedActorCell(Object cell) { + return cell instanceof RoutedActorCell; + } + + public static boolean isUnstartedActorCell(Object cell) { + return cell instanceof UnstartedCell; + } + + public static Class unstartedActorCellClass() { + return UnstartedCell.class; + } + + public static boolean isDeadLettersMailbox(Object cell, Object mailbox) { + final ActorCell actorCell = (ActorCell) cell; + return mailbox == actorCell.dispatcher().mailboxes().deadLetterMailbox(); + } + + public static long mailboxMessageCount(Object mailbox) { + return ((Mailbox) mailbox).numberOfMessages(); + } + + public static Option cellProps(Object cell) { + if(cell != null && cell instanceof Cell) + return Option.apply(((Cell) cell).props()); + else + return Option.empty(); + } + + public static Option lookupDeploy(ActorPath path, ActorSystem system) { + final Deployer deployer = new Deployer(system.settings(), ((ExtendedActorSystem) system).dynamicAccess()); + return deployer.lookup(path.$div("$a")); + } +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/SchedulerRunnableAdvice.java b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/SchedulerRunnableAdvice.java new file mode 100644 index 000000000..3aa83febc --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/java/kamon/instrumentation/pekko/instrumentations/SchedulerRunnableAdvice.java @@ -0,0 +1,50 @@ +/* ========================================================================================= + * Copyright © 2013-2022 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations; + +import kamon.Kamon; +import kamon.context.Context; +import kamon.context.Storage; +import kanela.agent.libs.net.bytebuddy.asm.Advice; + +public class SchedulerRunnableAdvice { + + @Advice.OnMethodEnter(suppress = Throwable.class) + public static void enter(@Advice.Argument(value = 1, readOnly = false) Runnable runnable) { + runnable = new ContextAwareRunnable(Kamon.currentContext(), runnable); + } + + public static class ContextAwareRunnable implements Runnable { + private final Context context; + private final Runnable underlyingRunnable; + + public ContextAwareRunnable(Context context, Runnable underlyingRunnable) { + this.context = context; + this.underlyingRunnable = underlyingRunnable; + } + + @Override + public void run() { + final Storage.Scope scope = Kamon.storeContext(context); + + try { + underlyingRunnable.run(); + } finally { + scope.close(); + } + } + } +} diff --git a/instrumentation/kamon-pekko/src/main/java/org/apache/pekko/actor/instrumentation/ReplaceWithAdvice.java b/instrumentation/kamon-pekko/src/main/java/org/apache/pekko/actor/instrumentation/ReplaceWithAdvice.java new file mode 100644 index 000000000..853d509e8 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/java/org/apache/pekko/actor/instrumentation/ReplaceWithAdvice.java @@ -0,0 +1,21 @@ +package org.apache.pekko.actor.instrumentation; + +import org.apache.pekko.actor.Cell; +import org.apache.pekko.actor.UnstartedCell; +import org.apache.pekko.actor.instrumentation.CellWrapper; +import kanela.agent.libs.net.bytebuddy.asm.Advice; + +public class ReplaceWithAdvice { + + @Advice.OnMethodEnter() + public static Cell enter(@Advice.Argument(value = 0, readOnly = false) Cell cell) { + Cell originalCell = cell; + cell = new CellWrapper(cell); + return originalCell; + } + + @Advice.OnMethodExit() + public static void exit(@Advice.This UnstartedCell self, @Advice.Enter Cell originalCell) { + self.self().swapCell(originalCell); + } +} diff --git a/instrumentation/kamon-pekko/src/main/java/pekko/remote/ContextAwareWireFormats_Pekko.java b/instrumentation/kamon-pekko/src/main/java/pekko/remote/ContextAwareWireFormats_Pekko.java new file mode 100644 index 000000000..0cdbaa4f2 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/java/pekko/remote/ContextAwareWireFormats_Pekko.java @@ -0,0 +1,2802 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ContextAwareWireFormats_Akka26.proto + +package pekko.remote; + +public final class ContextAwareWireFormats_Pekko { + private ContextAwareWireFormats_Pekko() {} + public static void registerAllExtensions( + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + org.apache.pekko.protobufv3.internal.ExtensionRegistry registry) { + registerAllExtensions( + (org.apache.pekko.protobufv3.internal.ExtensionRegistryLite) registry); + } + public interface AckAndContextAwareEnvelopeContainerOrBuilder extends + // @@protoc_insertion_point(interface_extends:AckAndContextAwareEnvelopeContainer) + org.apache.pekko.protobufv3.internal.MessageOrBuilder { + + /** + * optional .AcknowledgementInfo ack = 1; + */ + boolean hasAck(); + /** + * optional .AcknowledgementInfo ack = 1; + */ + org.apache.pekko.remote.WireFormats.AcknowledgementInfo getAck(); + /** + * optional .AcknowledgementInfo ack = 1; + */ + org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder getAckOrBuilder(); + + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + boolean hasEnvelope(); + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getEnvelope(); + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder getEnvelopeOrBuilder(); + } + /** + * Protobuf type {@code AckAndContextAwareEnvelopeContainer} + */ + public static final class AckAndContextAwareEnvelopeContainer extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:AckAndContextAwareEnvelopeContainer) + AckAndContextAwareEnvelopeContainerOrBuilder { + private static final long serialVersionUID = 0L; + // Use AckAndContextAwareEnvelopeContainer.newBuilder() to construct. + private AckAndContextAwareEnvelopeContainer(org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AckAndContextAwareEnvelopeContainer() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new AckAndContextAwareEnvelopeContainer(); + } + + @java.lang.Override + public final org.apache.pekko.protobufv3.internal.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AckAndContextAwareEnvelopeContainer( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + org.apache.pekko.protobufv3.internal.UnknownFieldSet.Builder unknownFields = + org.apache.pekko.protobufv3.internal.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) != 0)) { + subBuilder = ack_.toBuilder(); + } + ack_ = input.readMessage(org.apache.pekko.remote.WireFormats.AcknowledgementInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(ack_); + ack_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) != 0)) { + subBuilder = envelope_.toBuilder(); + } + envelope_ = input.readMessage(ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(envelope_); + envelope_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_AckAndContextAwareEnvelopeContainer_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_AckAndContextAwareEnvelopeContainer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.class, ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.Builder.class); + } + + private int bitField0_; + public static final int ACK_FIELD_NUMBER = 1; + private org.apache.pekko.remote.WireFormats.AcknowledgementInfo ack_; + /** + * optional .AcknowledgementInfo ack = 1; + */ + public boolean hasAck() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public org.apache.pekko.remote.WireFormats.AcknowledgementInfo getAck() { + return ack_ == null ? org.apache.pekko.remote.WireFormats.AcknowledgementInfo.getDefaultInstance() : ack_; + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder getAckOrBuilder() { + return ack_ == null ? org.apache.pekko.remote.WireFormats.AcknowledgementInfo.getDefaultInstance() : ack_; + } + + public static final int ENVELOPE_FIELD_NUMBER = 2; + private ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope envelope_; + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getEnvelope() { + return envelope_ == null ? ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance() : envelope_; + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder getEnvelopeOrBuilder() { + return envelope_ == null ? ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance() : envelope_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasAck()) { + if (!getAck().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasEnvelope()) { + if (!getEnvelope().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(org.apache.pekko.protobufv3.internal.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAck()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getEnvelope()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(1, getAck()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(2, getEnvelope()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer)) { + return super.equals(obj); + } + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer other = (ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer) obj; + + if (hasAck() != other.hasAck()) return false; + if (hasAck()) { + if (!getAck() + .equals(other.getAck())) return false; + } + if (hasEnvelope() != other.hasEnvelope()) return false; + if (hasEnvelope()) { + if (!getEnvelope() + .equals(other.getEnvelope())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAck()) { + hash = (37 * hash) + ACK_FIELD_NUMBER; + hash = (53 * hash) + getAck().hashCode(); + } + if (hasEnvelope()) { + hash = (37 * hash) + ENVELOPE_FIELD_NUMBER; + hash = (53 * hash) + getEnvelope().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + java.nio.ByteBuffer data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + java.nio.ByteBuffer data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom(byte[] data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + byte[] data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseDelimitedFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AckAndContextAwareEnvelopeContainer} + */ + public static final class Builder extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:AckAndContextAwareEnvelopeContainer) + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainerOrBuilder { + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_AckAndContextAwareEnvelopeContainer_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_AckAndContextAwareEnvelopeContainer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.class, ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.Builder.class); + } + + // Construct using remote.pekko.ContextAwareWireFormats_Akka26.AckAndContextAwareEnvelopeContainer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getAckFieldBuilder(); + getEnvelopeFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (ackBuilder_ == null) { + ack_ = null; + } else { + ackBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (envelopeBuilder_ == null) { + envelope_ = null; + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptorForType() { + return ContextAwareWireFormats_Pekko.internal_static_AckAndContextAwareEnvelopeContainer_descriptor; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer getDefaultInstanceForType() { + return ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.getDefaultInstance(); + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer build() { + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer buildPartial() { + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer result = new ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + if (ackBuilder_ == null) { + result.ack_ = ack_; + } else { + result.ack_ = ackBuilder_.build(); + } + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + if (envelopeBuilder_ == null) { + result.envelope_ = envelope_; + } else { + result.envelope_ = envelopeBuilder_.build(); + } + to_bitField0_ |= 0x00000002; + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + org.apache.pekko.protobufv3.internal.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(org.apache.pekko.protobufv3.internal.Message other) { + if (other instanceof ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer) { + return mergeFrom((ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer other) { + if (other == ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer.getDefaultInstance()) return this; + if (other.hasAck()) { + mergeAck(other.getAck()); + } + if (other.hasEnvelope()) { + mergeEnvelope(other.getEnvelope()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasAck()) { + if (!getAck().isInitialized()) { + return false; + } + } + if (hasEnvelope()) { + if (!getEnvelope().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + parsedMessage = (ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.pekko.remote.WireFormats.AcknowledgementInfo ack_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.AcknowledgementInfo, org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder, org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder> ackBuilder_; + /** + * optional .AcknowledgementInfo ack = 1; + */ + public boolean hasAck() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public org.apache.pekko.remote.WireFormats.AcknowledgementInfo getAck() { + if (ackBuilder_ == null) { + return ack_ == null ? org.apache.pekko.remote.WireFormats.AcknowledgementInfo.getDefaultInstance() : ack_; + } else { + return ackBuilder_.getMessage(); + } + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public Builder setAck(org.apache.pekko.remote.WireFormats.AcknowledgementInfo value) { + if (ackBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ack_ = value; + onChanged(); + } else { + ackBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public Builder setAck( + org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder builderForValue) { + if (ackBuilder_ == null) { + ack_ = builderForValue.build(); + onChanged(); + } else { + ackBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public Builder mergeAck(org.apache.pekko.remote.WireFormats.AcknowledgementInfo value) { + if (ackBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) && + ack_ != null && + ack_ != org.apache.pekko.remote.WireFormats.AcknowledgementInfo.getDefaultInstance()) { + ack_ = + org.apache.pekko.remote.WireFormats.AcknowledgementInfo.newBuilder(ack_).mergeFrom(value).buildPartial(); + } else { + ack_ = value; + } + onChanged(); + } else { + ackBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public Builder clearAck() { + if (ackBuilder_ == null) { + ack_ = null; + onChanged(); + } else { + ackBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder getAckBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getAckFieldBuilder().getBuilder(); + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + public org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder getAckOrBuilder() { + if (ackBuilder_ != null) { + return ackBuilder_.getMessageOrBuilder(); + } else { + return ack_ == null ? + org.apache.pekko.remote.WireFormats.AcknowledgementInfo.getDefaultInstance() : ack_; + } + } + /** + * optional .AcknowledgementInfo ack = 1; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.AcknowledgementInfo, org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder, org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder> + getAckFieldBuilder() { + if (ackBuilder_ == null) { + ackBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.AcknowledgementInfo, org.apache.pekko.remote.WireFormats.AcknowledgementInfo.Builder, org.apache.pekko.remote.WireFormats.AcknowledgementInfoOrBuilder>( + getAck(), + getParentForChildren(), + isClean()); + ack_ = null; + } + return ackBuilder_; + } + + private ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope envelope_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder> envelopeBuilder_; + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getEnvelope() { + if (envelopeBuilder_ == null) { + return envelope_ == null ? ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance() : envelope_; + } else { + return envelopeBuilder_.getMessage(); + } + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public Builder setEnvelope(ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope value) { + if (envelopeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + envelope_ = value; + onChanged(); + } else { + envelopeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public Builder setEnvelope( + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder builderForValue) { + if (envelopeBuilder_ == null) { + envelope_ = builderForValue.build(); + onChanged(); + } else { + envelopeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public Builder mergeEnvelope(ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope value) { + if (envelopeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) && + envelope_ != null && + envelope_ != ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance()) { + envelope_ = + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.newBuilder(envelope_).mergeFrom(value).buildPartial(); + } else { + envelope_ = value; + } + onChanged(); + } else { + envelopeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public Builder clearEnvelope() { + if (envelopeBuilder_ == null) { + envelope_ = null; + onChanged(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder getEnvelopeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getEnvelopeFieldBuilder().getBuilder(); + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder getEnvelopeOrBuilder() { + if (envelopeBuilder_ != null) { + return envelopeBuilder_.getMessageOrBuilder(); + } else { + return envelope_ == null ? + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance() : envelope_; + } + } + /** + * optional .ContextAwareRemoteEnvelope envelope = 2; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder> + getEnvelopeFieldBuilder() { + if (envelopeBuilder_ == null) { + envelopeBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder>( + getEnvelope(), + getParentForChildren(), + isClean()); + envelope_ = null; + } + return envelopeBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:AckAndContextAwareEnvelopeContainer) + } + + // @@protoc_insertion_point(class_scope:AckAndContextAwareEnvelopeContainer) + private static final ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer(); + } + + public static ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.pekko.protobufv3.internal.Parser + PARSER = new org.apache.pekko.protobufv3.internal.AbstractParser() { + @java.lang.Override + public AckAndContextAwareEnvelopeContainer parsePartialFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return new AckAndContextAwareEnvelopeContainer(input, extensionRegistry); + } + }; + + public static org.apache.pekko.protobufv3.internal.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ContextAwareRemoteEnvelopeOrBuilder extends + // @@protoc_insertion_point(interface_extends:ContextAwareRemoteEnvelope) + org.apache.pekko.protobufv3.internal.MessageOrBuilder { + + /** + * required .ActorRefData recipient = 1; + */ + boolean hasRecipient(); + /** + * required .ActorRefData recipient = 1; + */ + org.apache.pekko.remote.WireFormats.ActorRefData getRecipient(); + /** + * required .ActorRefData recipient = 1; + */ + org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getRecipientOrBuilder(); + + /** + * required .SerializedMessage message = 2; + */ + boolean hasMessage(); + /** + * required .SerializedMessage message = 2; + */ + org.apache.pekko.remote.WireFormats.SerializedMessage getMessage(); + /** + * required .SerializedMessage message = 2; + */ + org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder getMessageOrBuilder(); + + /** + * optional .ActorRefData sender = 4; + */ + boolean hasSender(); + /** + * optional .ActorRefData sender = 4; + */ + org.apache.pekko.remote.WireFormats.ActorRefData getSender(); + /** + * optional .ActorRefData sender = 4; + */ + org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getSenderOrBuilder(); + + /** + * optional fixed64 seq = 5; + */ + boolean hasSeq(); + /** + * optional fixed64 seq = 5; + */ + long getSeq(); + + /** + * optional .RemoteContext traceContext = 15; + */ + boolean hasTraceContext(); + /** + * optional .RemoteContext traceContext = 15; + */ + ContextAwareWireFormats_Pekko.RemoteContext getTraceContext(); + /** + * optional .RemoteContext traceContext = 15; + */ + ContextAwareWireFormats_Pekko.RemoteContextOrBuilder getTraceContextOrBuilder(); + } + /** + * Protobuf type {@code ContextAwareRemoteEnvelope} + */ + public static final class ContextAwareRemoteEnvelope extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:ContextAwareRemoteEnvelope) + ContextAwareRemoteEnvelopeOrBuilder { + private static final long serialVersionUID = 0L; + // Use ContextAwareRemoteEnvelope.newBuilder() to construct. + private ContextAwareRemoteEnvelope(org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ContextAwareRemoteEnvelope() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ContextAwareRemoteEnvelope(); + } + + @java.lang.Override + public final org.apache.pekko.protobufv3.internal.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ContextAwareRemoteEnvelope( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + org.apache.pekko.protobufv3.internal.UnknownFieldSet.Builder unknownFields = + org.apache.pekko.protobufv3.internal.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + org.apache.pekko.remote.WireFormats.ActorRefData.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) != 0)) { + subBuilder = recipient_.toBuilder(); + } + recipient_ = input.readMessage(org.apache.pekko.remote.WireFormats.ActorRefData.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(recipient_); + recipient_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.pekko.remote.WireFormats.SerializedMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) != 0)) { + subBuilder = message_.toBuilder(); + } + message_ = input.readMessage(org.apache.pekko.remote.WireFormats.SerializedMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(message_); + message_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 34: { + org.apache.pekko.remote.WireFormats.ActorRefData.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) != 0)) { + subBuilder = sender_.toBuilder(); + } + sender_ = input.readMessage(org.apache.pekko.remote.WireFormats.ActorRefData.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sender_); + sender_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 41: { + bitField0_ |= 0x00000008; + seq_ = input.readFixed64(); + break; + } + case 122: { + ContextAwareWireFormats_Pekko.RemoteContext.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) != 0)) { + subBuilder = traceContext_.toBuilder(); + } + traceContext_ = input.readMessage(ContextAwareWireFormats_Pekko.RemoteContext.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(traceContext_); + traceContext_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_ContextAwareRemoteEnvelope_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_ContextAwareRemoteEnvelope_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.class, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder.class); + } + + private int bitField0_; + public static final int RECIPIENT_FIELD_NUMBER = 1; + private org.apache.pekko.remote.WireFormats.ActorRefData recipient_; + /** + * required .ActorRefData recipient = 1; + */ + public boolean hasRecipient() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * required .ActorRefData recipient = 1; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData getRecipient() { + return recipient_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : recipient_; + } + /** + * required .ActorRefData recipient = 1; + */ + public org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getRecipientOrBuilder() { + return recipient_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : recipient_; + } + + public static final int MESSAGE_FIELD_NUMBER = 2; + private org.apache.pekko.remote.WireFormats.SerializedMessage message_; + /** + * required .SerializedMessage message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * required .SerializedMessage message = 2; + */ + public org.apache.pekko.remote.WireFormats.SerializedMessage getMessage() { + return message_ == null ? org.apache.pekko.remote.WireFormats.SerializedMessage.getDefaultInstance() : message_; + } + /** + * required .SerializedMessage message = 2; + */ + public org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder getMessageOrBuilder() { + return message_ == null ? org.apache.pekko.remote.WireFormats.SerializedMessage.getDefaultInstance() : message_; + } + + public static final int SENDER_FIELD_NUMBER = 4; + private org.apache.pekko.remote.WireFormats.ActorRefData sender_; + /** + * optional .ActorRefData sender = 4; + */ + public boolean hasSender() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional .ActorRefData sender = 4; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData getSender() { + return sender_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : sender_; + } + /** + * optional .ActorRefData sender = 4; + */ + public org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getSenderOrBuilder() { + return sender_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : sender_; + } + + public static final int SEQ_FIELD_NUMBER = 5; + private long seq_; + /** + * optional fixed64 seq = 5; + */ + public boolean hasSeq() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional fixed64 seq = 5; + */ + public long getSeq() { + return seq_; + } + + public static final int TRACECONTEXT_FIELD_NUMBER = 15; + private ContextAwareWireFormats_Pekko.RemoteContext traceContext_; + /** + * optional .RemoteContext traceContext = 15; + */ + public boolean hasTraceContext() { + return ((bitField0_ & 0x00000010) != 0); + } + /** + * optional .RemoteContext traceContext = 15; + */ + public ContextAwareWireFormats_Pekko.RemoteContext getTraceContext() { + return traceContext_ == null ? ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance() : traceContext_; + } + /** + * optional .RemoteContext traceContext = 15; + */ + public ContextAwareWireFormats_Pekko.RemoteContextOrBuilder getTraceContextOrBuilder() { + return traceContext_ == null ? ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance() : traceContext_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasRecipient()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMessage()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRecipient().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getMessage().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSender()) { + if (!getSender().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasTraceContext()) { + if (!getTraceContext().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(org.apache.pekko.protobufv3.internal.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getRecipient()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getMessage()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getSender()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeFixed64(5, seq_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(15, getTraceContext()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(1, getRecipient()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(2, getMessage()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(4, getSender()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeFixed64Size(5, seq_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeMessageSize(15, getTraceContext()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope)) { + return super.equals(obj); + } + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope other = (ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope) obj; + + if (hasRecipient() != other.hasRecipient()) return false; + if (hasRecipient()) { + if (!getRecipient() + .equals(other.getRecipient())) return false; + } + if (hasMessage() != other.hasMessage()) return false; + if (hasMessage()) { + if (!getMessage() + .equals(other.getMessage())) return false; + } + if (hasSender() != other.hasSender()) return false; + if (hasSender()) { + if (!getSender() + .equals(other.getSender())) return false; + } + if (hasSeq() != other.hasSeq()) return false; + if (hasSeq()) { + if (getSeq() + != other.getSeq()) return false; + } + if (hasTraceContext() != other.hasTraceContext()) return false; + if (hasTraceContext()) { + if (!getTraceContext() + .equals(other.getTraceContext())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRecipient()) { + hash = (37 * hash) + RECIPIENT_FIELD_NUMBER; + hash = (53 * hash) + getRecipient().hashCode(); + } + if (hasMessage()) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + } + if (hasSender()) { + hash = (37 * hash) + SENDER_FIELD_NUMBER; + hash = (53 * hash) + getSender().hashCode(); + } + if (hasSeq()) { + hash = (37 * hash) + SEQ_FIELD_NUMBER; + hash = (53 * hash) + org.apache.pekko.protobufv3.internal.Internal.hashLong( + getSeq()); + } + if (hasTraceContext()) { + hash = (37 * hash) + TRACECONTEXT_FIELD_NUMBER; + hash = (53 * hash) + getTraceContext().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + java.nio.ByteBuffer data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + java.nio.ByteBuffer data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom(byte[] data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + byte[] data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseDelimitedFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ContextAwareRemoteEnvelope} + */ + public static final class Builder extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:ContextAwareRemoteEnvelope) + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelopeOrBuilder { + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_ContextAwareRemoteEnvelope_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_ContextAwareRemoteEnvelope_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.class, ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.Builder.class); + } + + // Construct using remote.pekko.ContextAwareWireFormats_Akka26.ContextAwareRemoteEnvelope.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRecipientFieldBuilder(); + getMessageFieldBuilder(); + getSenderFieldBuilder(); + getTraceContextFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (recipientBuilder_ == null) { + recipient_ = null; + } else { + recipientBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (messageBuilder_ == null) { + message_ = null; + } else { + messageBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (senderBuilder_ == null) { + sender_ = null; + } else { + senderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + seq_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (traceContextBuilder_ == null) { + traceContext_ = null; + } else { + traceContextBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptorForType() { + return ContextAwareWireFormats_Pekko.internal_static_ContextAwareRemoteEnvelope_descriptor; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getDefaultInstanceForType() { + return ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance(); + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope build() { + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope buildPartial() { + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope result = new ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + if (recipientBuilder_ == null) { + result.recipient_ = recipient_; + } else { + result.recipient_ = recipientBuilder_.build(); + } + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + if (messageBuilder_ == null) { + result.message_ = message_; + } else { + result.message_ = messageBuilder_.build(); + } + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + if (senderBuilder_ == null) { + result.sender_ = sender_; + } else { + result.sender_ = senderBuilder_.build(); + } + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.seq_ = seq_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + if (traceContextBuilder_ == null) { + result.traceContext_ = traceContext_; + } else { + result.traceContext_ = traceContextBuilder_.build(); + } + to_bitField0_ |= 0x00000010; + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + org.apache.pekko.protobufv3.internal.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(org.apache.pekko.protobufv3.internal.Message other) { + if (other instanceof ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope) { + return mergeFrom((ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope other) { + if (other == ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope.getDefaultInstance()) return this; + if (other.hasRecipient()) { + mergeRecipient(other.getRecipient()); + } + if (other.hasMessage()) { + mergeMessage(other.getMessage()); + } + if (other.hasSender()) { + mergeSender(other.getSender()); + } + if (other.hasSeq()) { + setSeq(other.getSeq()); + } + if (other.hasTraceContext()) { + mergeTraceContext(other.getTraceContext()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (!hasRecipient()) { + return false; + } + if (!hasMessage()) { + return false; + } + if (!getRecipient().isInitialized()) { + return false; + } + if (!getMessage().isInitialized()) { + return false; + } + if (hasSender()) { + if (!getSender().isInitialized()) { + return false; + } + } + if (hasTraceContext()) { + if (!getTraceContext().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + parsedMessage = (ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.pekko.remote.WireFormats.ActorRefData recipient_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder> recipientBuilder_; + /** + * required .ActorRefData recipient = 1; + */ + public boolean hasRecipient() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * required .ActorRefData recipient = 1; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData getRecipient() { + if (recipientBuilder_ == null) { + return recipient_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : recipient_; + } else { + return recipientBuilder_.getMessage(); + } + } + /** + * required .ActorRefData recipient = 1; + */ + public Builder setRecipient(org.apache.pekko.remote.WireFormats.ActorRefData value) { + if (recipientBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + recipient_ = value; + onChanged(); + } else { + recipientBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .ActorRefData recipient = 1; + */ + public Builder setRecipient( + org.apache.pekko.remote.WireFormats.ActorRefData.Builder builderForValue) { + if (recipientBuilder_ == null) { + recipient_ = builderForValue.build(); + onChanged(); + } else { + recipientBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .ActorRefData recipient = 1; + */ + public Builder mergeRecipient(org.apache.pekko.remote.WireFormats.ActorRefData value) { + if (recipientBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) && + recipient_ != null && + recipient_ != org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance()) { + recipient_ = + org.apache.pekko.remote.WireFormats.ActorRefData.newBuilder(recipient_).mergeFrom(value).buildPartial(); + } else { + recipient_ = value; + } + onChanged(); + } else { + recipientBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .ActorRefData recipient = 1; + */ + public Builder clearRecipient() { + if (recipientBuilder_ == null) { + recipient_ = null; + onChanged(); + } else { + recipientBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .ActorRefData recipient = 1; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData.Builder getRecipientBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRecipientFieldBuilder().getBuilder(); + } + /** + * required .ActorRefData recipient = 1; + */ + public org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getRecipientOrBuilder() { + if (recipientBuilder_ != null) { + return recipientBuilder_.getMessageOrBuilder(); + } else { + return recipient_ == null ? + org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : recipient_; + } + } + /** + * required .ActorRefData recipient = 1; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder> + getRecipientFieldBuilder() { + if (recipientBuilder_ == null) { + recipientBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder>( + getRecipient(), + getParentForChildren(), + isClean()); + recipient_ = null; + } + return recipientBuilder_; + } + + private org.apache.pekko.remote.WireFormats.SerializedMessage message_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.SerializedMessage, org.apache.pekko.remote.WireFormats.SerializedMessage.Builder, org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder> messageBuilder_; + /** + * required .SerializedMessage message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * required .SerializedMessage message = 2; + */ + public org.apache.pekko.remote.WireFormats.SerializedMessage getMessage() { + if (messageBuilder_ == null) { + return message_ == null ? org.apache.pekko.remote.WireFormats.SerializedMessage.getDefaultInstance() : message_; + } else { + return messageBuilder_.getMessage(); + } + } + /** + * required .SerializedMessage message = 2; + */ + public Builder setMessage(org.apache.pekko.remote.WireFormats.SerializedMessage value) { + if (messageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + message_ = value; + onChanged(); + } else { + messageBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .SerializedMessage message = 2; + */ + public Builder setMessage( + org.apache.pekko.remote.WireFormats.SerializedMessage.Builder builderForValue) { + if (messageBuilder_ == null) { + message_ = builderForValue.build(); + onChanged(); + } else { + messageBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .SerializedMessage message = 2; + */ + public Builder mergeMessage(org.apache.pekko.remote.WireFormats.SerializedMessage value) { + if (messageBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) && + message_ != null && + message_ != org.apache.pekko.remote.WireFormats.SerializedMessage.getDefaultInstance()) { + message_ = + org.apache.pekko.remote.WireFormats.SerializedMessage.newBuilder(message_).mergeFrom(value).buildPartial(); + } else { + message_ = value; + } + onChanged(); + } else { + messageBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .SerializedMessage message = 2; + */ + public Builder clearMessage() { + if (messageBuilder_ == null) { + message_ = null; + onChanged(); + } else { + messageBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .SerializedMessage message = 2; + */ + public org.apache.pekko.remote.WireFormats.SerializedMessage.Builder getMessageBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getMessageFieldBuilder().getBuilder(); + } + /** + * required .SerializedMessage message = 2; + */ + public org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder getMessageOrBuilder() { + if (messageBuilder_ != null) { + return messageBuilder_.getMessageOrBuilder(); + } else { + return message_ == null ? + org.apache.pekko.remote.WireFormats.SerializedMessage.getDefaultInstance() : message_; + } + } + /** + * required .SerializedMessage message = 2; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.SerializedMessage, org.apache.pekko.remote.WireFormats.SerializedMessage.Builder, org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder> + getMessageFieldBuilder() { + if (messageBuilder_ == null) { + messageBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.SerializedMessage, org.apache.pekko.remote.WireFormats.SerializedMessage.Builder, org.apache.pekko.remote.WireFormats.SerializedMessageOrBuilder>( + getMessage(), + getParentForChildren(), + isClean()); + message_ = null; + } + return messageBuilder_; + } + + private org.apache.pekko.remote.WireFormats.ActorRefData sender_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder> senderBuilder_; + /** + * optional .ActorRefData sender = 4; + */ + public boolean hasSender() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional .ActorRefData sender = 4; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData getSender() { + if (senderBuilder_ == null) { + return sender_ == null ? org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : sender_; + } else { + return senderBuilder_.getMessage(); + } + } + /** + * optional .ActorRefData sender = 4; + */ + public Builder setSender(org.apache.pekko.remote.WireFormats.ActorRefData value) { + if (senderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sender_ = value; + onChanged(); + } else { + senderBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .ActorRefData sender = 4; + */ + public Builder setSender( + org.apache.pekko.remote.WireFormats.ActorRefData.Builder builderForValue) { + if (senderBuilder_ == null) { + sender_ = builderForValue.build(); + onChanged(); + } else { + senderBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .ActorRefData sender = 4; + */ + public Builder mergeSender(org.apache.pekko.remote.WireFormats.ActorRefData value) { + if (senderBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) && + sender_ != null && + sender_ != org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance()) { + sender_ = + org.apache.pekko.remote.WireFormats.ActorRefData.newBuilder(sender_).mergeFrom(value).buildPartial(); + } else { + sender_ = value; + } + onChanged(); + } else { + senderBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .ActorRefData sender = 4; + */ + public Builder clearSender() { + if (senderBuilder_ == null) { + sender_ = null; + onChanged(); + } else { + senderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .ActorRefData sender = 4; + */ + public org.apache.pekko.remote.WireFormats.ActorRefData.Builder getSenderBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getSenderFieldBuilder().getBuilder(); + } + /** + * optional .ActorRefData sender = 4; + */ + public org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder getSenderOrBuilder() { + if (senderBuilder_ != null) { + return senderBuilder_.getMessageOrBuilder(); + } else { + return sender_ == null ? + org.apache.pekko.remote.WireFormats.ActorRefData.getDefaultInstance() : sender_; + } + } + /** + * optional .ActorRefData sender = 4; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder> + getSenderFieldBuilder() { + if (senderBuilder_ == null) { + senderBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + org.apache.pekko.remote.WireFormats.ActorRefData, org.apache.pekko.remote.WireFormats.ActorRefData.Builder, org.apache.pekko.remote.WireFormats.ActorRefDataOrBuilder>( + getSender(), + getParentForChildren(), + isClean()); + sender_ = null; + } + return senderBuilder_; + } + + private long seq_ ; + /** + * optional fixed64 seq = 5; + */ + public boolean hasSeq() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional fixed64 seq = 5; + */ + public long getSeq() { + return seq_; + } + /** + * optional fixed64 seq = 5; + */ + public Builder setSeq(long value) { + bitField0_ |= 0x00000008; + seq_ = value; + onChanged(); + return this; + } + /** + * optional fixed64 seq = 5; + */ + public Builder clearSeq() { + bitField0_ = (bitField0_ & ~0x00000008); + seq_ = 0L; + onChanged(); + return this; + } + + private ContextAwareWireFormats_Pekko.RemoteContext traceContext_; + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.RemoteContext, ContextAwareWireFormats_Pekko.RemoteContext.Builder, ContextAwareWireFormats_Pekko.RemoteContextOrBuilder> traceContextBuilder_; + /** + * optional .RemoteContext traceContext = 15; + */ + public boolean hasTraceContext() { + return ((bitField0_ & 0x00000010) != 0); + } + /** + * optional .RemoteContext traceContext = 15; + */ + public ContextAwareWireFormats_Pekko.RemoteContext getTraceContext() { + if (traceContextBuilder_ == null) { + return traceContext_ == null ? ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance() : traceContext_; + } else { + return traceContextBuilder_.getMessage(); + } + } + /** + * optional .RemoteContext traceContext = 15; + */ + public Builder setTraceContext(ContextAwareWireFormats_Pekko.RemoteContext value) { + if (traceContextBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + traceContext_ = value; + onChanged(); + } else { + traceContextBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RemoteContext traceContext = 15; + */ + public Builder setTraceContext( + ContextAwareWireFormats_Pekko.RemoteContext.Builder builderForValue) { + if (traceContextBuilder_ == null) { + traceContext_ = builderForValue.build(); + onChanged(); + } else { + traceContextBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RemoteContext traceContext = 15; + */ + public Builder mergeTraceContext(ContextAwareWireFormats_Pekko.RemoteContext value) { + if (traceContextBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) && + traceContext_ != null && + traceContext_ != ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance()) { + traceContext_ = + ContextAwareWireFormats_Pekko.RemoteContext.newBuilder(traceContext_).mergeFrom(value).buildPartial(); + } else { + traceContext_ = value; + } + onChanged(); + } else { + traceContextBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RemoteContext traceContext = 15; + */ + public Builder clearTraceContext() { + if (traceContextBuilder_ == null) { + traceContext_ = null; + onChanged(); + } else { + traceContextBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .RemoteContext traceContext = 15; + */ + public ContextAwareWireFormats_Pekko.RemoteContext.Builder getTraceContextBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getTraceContextFieldBuilder().getBuilder(); + } + /** + * optional .RemoteContext traceContext = 15; + */ + public ContextAwareWireFormats_Pekko.RemoteContextOrBuilder getTraceContextOrBuilder() { + if (traceContextBuilder_ != null) { + return traceContextBuilder_.getMessageOrBuilder(); + } else { + return traceContext_ == null ? + ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance() : traceContext_; + } + } + /** + * optional .RemoteContext traceContext = 15; + */ + private org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.RemoteContext, ContextAwareWireFormats_Pekko.RemoteContext.Builder, ContextAwareWireFormats_Pekko.RemoteContextOrBuilder> + getTraceContextFieldBuilder() { + if (traceContextBuilder_ == null) { + traceContextBuilder_ = new org.apache.pekko.protobufv3.internal.SingleFieldBuilderV3< + ContextAwareWireFormats_Pekko.RemoteContext, ContextAwareWireFormats_Pekko.RemoteContext.Builder, ContextAwareWireFormats_Pekko.RemoteContextOrBuilder>( + getTraceContext(), + getParentForChildren(), + isClean()); + traceContext_ = null; + } + return traceContextBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:ContextAwareRemoteEnvelope) + } + + // @@protoc_insertion_point(class_scope:ContextAwareRemoteEnvelope) + private static final ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope(); + } + + public static ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.pekko.protobufv3.internal.Parser + PARSER = new org.apache.pekko.protobufv3.internal.AbstractParser() { + @java.lang.Override + public ContextAwareRemoteEnvelope parsePartialFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return new ContextAwareRemoteEnvelope(input, extensionRegistry); + } + }; + + public static org.apache.pekko.protobufv3.internal.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.ContextAwareRemoteEnvelope getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RemoteContextOrBuilder extends + // @@protoc_insertion_point(interface_extends:RemoteContext) + org.apache.pekko.protobufv3.internal.MessageOrBuilder { + + /** + * required bytes context = 1; + */ + boolean hasContext(); + /** + * required bytes context = 1; + */ + org.apache.pekko.protobufv3.internal.ByteString getContext(); + } + /** + * Protobuf type {@code RemoteContext} + */ + public static final class RemoteContext extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:RemoteContext) + RemoteContextOrBuilder { + private static final long serialVersionUID = 0L; + // Use RemoteContext.newBuilder() to construct. + private RemoteContext(org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RemoteContext() { + context_ = org.apache.pekko.protobufv3.internal.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RemoteContext(); + } + + @java.lang.Override + public final org.apache.pekko.protobufv3.internal.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoteContext( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + org.apache.pekko.protobufv3.internal.UnknownFieldSet.Builder unknownFields = + org.apache.pekko.protobufv3.internal.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + bitField0_ |= 0x00000001; + context_ = input.readBytes(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_RemoteContext_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_RemoteContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.RemoteContext.class, ContextAwareWireFormats_Pekko.RemoteContext.Builder.class); + } + + private int bitField0_; + public static final int CONTEXT_FIELD_NUMBER = 1; + private org.apache.pekko.protobufv3.internal.ByteString context_; + /** + * required bytes context = 1; + */ + public boolean hasContext() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * required bytes context = 1; + */ + public org.apache.pekko.protobufv3.internal.ByteString getContext() { + return context_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasContext()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(org.apache.pekko.protobufv3.internal.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeBytes(1, context_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += org.apache.pekko.protobufv3.internal.CodedOutputStream + .computeBytesSize(1, context_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ContextAwareWireFormats_Pekko.RemoteContext)) { + return super.equals(obj); + } + ContextAwareWireFormats_Pekko.RemoteContext other = (ContextAwareWireFormats_Pekko.RemoteContext) obj; + + if (hasContext() != other.hasContext()) return false; + if (hasContext()) { + if (!getContext() + .equals(other.getContext())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasContext()) { + hash = (37 * hash) + CONTEXT_FIELD_NUMBER; + hash = (53 * hash) + getContext().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + java.nio.ByteBuffer data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + java.nio.ByteBuffer data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + org.apache.pekko.protobufv3.internal.ByteString data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom(byte[] data) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + byte[] data, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseDelimitedFrom( + java.io.InputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static ContextAwareWireFormats_Pekko.RemoteContext parseFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(ContextAwareWireFormats_Pekko.RemoteContext prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RemoteContext} + */ + public static final class Builder extends + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:RemoteContext) + ContextAwareWireFormats_Pekko.RemoteContextOrBuilder { + public static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptor() { + return ContextAwareWireFormats_Pekko.internal_static_RemoteContext_descriptor; + } + + @java.lang.Override + protected org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return ContextAwareWireFormats_Pekko.internal_static_RemoteContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + ContextAwareWireFormats_Pekko.RemoteContext.class, ContextAwareWireFormats_Pekko.RemoteContext.Builder.class); + } + + // Construct using remote.pekko.ContextAwareWireFormats_Akka26.RemoteContext.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.pekko.protobufv3.internal.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + context_ = org.apache.pekko.protobufv3.internal.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + getDescriptorForType() { + return ContextAwareWireFormats_Pekko.internal_static_RemoteContext_descriptor; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.RemoteContext getDefaultInstanceForType() { + return ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance(); + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.RemoteContext build() { + ContextAwareWireFormats_Pekko.RemoteContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.RemoteContext buildPartial() { + ContextAwareWireFormats_Pekko.RemoteContext result = new ContextAwareWireFormats_Pekko.RemoteContext(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + to_bitField0_ |= 0x00000001; + } + result.context_ = context_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + org.apache.pekko.protobufv3.internal.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + org.apache.pekko.protobufv3.internal.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(org.apache.pekko.protobufv3.internal.Message other) { + if (other instanceof ContextAwareWireFormats_Pekko.RemoteContext) { + return mergeFrom((ContextAwareWireFormats_Pekko.RemoteContext)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ContextAwareWireFormats_Pekko.RemoteContext other) { + if (other == ContextAwareWireFormats_Pekko.RemoteContext.getDefaultInstance()) return this; + if (other.hasContext()) { + setContext(other.getContext()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (!hasContext()) { + return false; + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ContextAwareWireFormats_Pekko.RemoteContext parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException e) { + parsedMessage = (ContextAwareWireFormats_Pekko.RemoteContext) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.pekko.protobufv3.internal.ByteString context_ = org.apache.pekko.protobufv3.internal.ByteString.EMPTY; + /** + * required bytes context = 1; + */ + public boolean hasContext() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * required bytes context = 1; + */ + public org.apache.pekko.protobufv3.internal.ByteString getContext() { + return context_; + } + /** + * required bytes context = 1; + */ + public Builder setContext(org.apache.pekko.protobufv3.internal.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + context_ = value; + onChanged(); + return this; + } + /** + * required bytes context = 1; + */ + public Builder clearContext() { + bitField0_ = (bitField0_ & ~0x00000001); + context_ = getDefaultInstance().getContext(); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final org.apache.pekko.protobufv3.internal.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:RemoteContext) + } + + // @@protoc_insertion_point(class_scope:RemoteContext) + private static final ContextAwareWireFormats_Pekko.RemoteContext DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new ContextAwareWireFormats_Pekko.RemoteContext(); + } + + public static ContextAwareWireFormats_Pekko.RemoteContext getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.pekko.protobufv3.internal.Parser + PARSER = new org.apache.pekko.protobufv3.internal.AbstractParser() { + @java.lang.Override + public RemoteContext parsePartialFrom( + org.apache.pekko.protobufv3.internal.CodedInputStream input, + org.apache.pekko.protobufv3.internal.ExtensionRegistryLite extensionRegistry) + throws org.apache.pekko.protobufv3.internal.InvalidProtocolBufferException { + return new RemoteContext(input, extensionRegistry); + } + }; + + public static org.apache.pekko.protobufv3.internal.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.pekko.protobufv3.internal.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public ContextAwareWireFormats_Pekko.RemoteContext getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + internal_static_AckAndContextAwareEnvelopeContainer_descriptor; + private static final + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internal_static_AckAndContextAwareEnvelopeContainer_fieldAccessorTable; + private static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + internal_static_ContextAwareRemoteEnvelope_descriptor; + private static final + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internal_static_ContextAwareRemoteEnvelope_fieldAccessorTable; + private static final org.apache.pekko.protobufv3.internal.Descriptors.Descriptor + internal_static_RemoteContext_descriptor; + private static final + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable + internal_static_RemoteContext_fieldAccessorTable; + + public static org.apache.pekko.protobufv3.internal.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static org.apache.pekko.protobufv3.internal.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\035ContextAwareWireFormats.proto\032\021WireFor" + + "mats.proto\"w\n#AckAndContextAwareEnvelope" + + "Container\022!\n\003ack\030\001 \001(\0132\024.Acknowledgement" + + "Info\022-\n\010envelope\030\002 \001(\0132\033.ContextAwareRem" + + "oteEnvelope\"\265\001\n\032ContextAwareRemoteEnvelo" + + "pe\022 \n\trecipient\030\001 \002(\0132\r.ActorRefData\022#\n\007" + + "message\030\002 \002(\0132\022.SerializedMessage\022\035\n\006sen" + + "der\030\004 \001(\0132\r.ActorRefData\022\013\n\003seq\030\005 \001(\006\022$\n" + + "\014traceContext\030\017 \001(\0132\016.RemoteContext\" \n\rR" + + "emoteContext\022\017\n\007context\030\001 \002(\014B\017\n\013org.apache.pekko.re" + + "moteH\001" + }; + descriptor = org.apache.pekko.protobufv3.internal.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new org.apache.pekko.protobufv3.internal.Descriptors.FileDescriptor[] { + org.apache.pekko.remote.WireFormats.getDescriptor(), + }); + internal_static_AckAndContextAwareEnvelopeContainer_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_AckAndContextAwareEnvelopeContainer_fieldAccessorTable = new + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable( + internal_static_AckAndContextAwareEnvelopeContainer_descriptor, + new java.lang.String[] { "Ack", "Envelope", }); + internal_static_ContextAwareRemoteEnvelope_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_ContextAwareRemoteEnvelope_fieldAccessorTable = new + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable( + internal_static_ContextAwareRemoteEnvelope_descriptor, + new java.lang.String[] { "Recipient", "Message", "Sender", "Seq", "TraceContext", }); + internal_static_RemoteContext_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_RemoteContext_fieldAccessorTable = new + org.apache.pekko.protobufv3.internal.GeneratedMessageV3.FieldAccessorTable( + internal_static_RemoteContext_descriptor, + new java.lang.String[] { "Context", }); + org.apache.pekko.remote.WireFormats.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/instrumentation/kamon-pekko/src/main/protobuf/ContainerFormats.proto b/instrumentation/kamon-pekko/src/main/protobuf/ContainerFormats.proto new file mode 100644 index 000000000..ca1ef91b2 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/protobuf/ContainerFormats.proto @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +/* + * Copyright (C) 2009-2022 Lightbend Inc. + */ + +syntax = "proto2"; + +option java_package = "org.apache.pekko.remote"; +option optimize_for = SPEED; + +/****************************************** + ActorSelection related formats +*******************************************/ + +message SelectionEnvelope { + required bytes enclosedMessage = 1; + required int32 serializerId = 2; + repeated Selection pattern = 3; + optional bytes messageManifest = 4; + optional bool wildcardFanOut = 5; // optional for pre 2.3.4 compatibility +} + +enum PatternType { + PARENT = 0; + CHILD_NAME = 1; + CHILD_PATTERN = 2; +} + +message Selection { + required PatternType type = 1; + optional string matcher = 2; +} + +message Identify { + required Payload messageId = 1; +} + +message ActorIdentity { + required Payload correlationId = 1; + optional ActorRef ref = 2; +} + +message ActorRef { + required string path = 1; +} + +message Option { + optional Payload value = 1; +} + +message Payload { + required bytes enclosedMessage = 1; + required int32 serializerId = 2; + optional bytes messageManifest = 4; +} + +message WatcherHeartbeatResponse { + required uint64 uid = 1; +} + +message Throwable { + required string className = 1; + optional string message = 2; + optional Payload cause = 3; + repeated StackTraceElement stackTrace = 4; +} + +message ThrowableNotSerializable { + required string message = 1; + required string originalMessage = 2; + required string originalClassName = 3; +} + +message ActorInitializationException { + optional ActorRef actor = 1; + required string message = 2; + required Payload cause = 3; +} + +message StackTraceElement { + required string className = 1; + required string methodName = 2; + required string fileName = 3; + required int32 lineNumber = 4; +} + + +// ReplyWith pattern message(s) +message StatusReplyErrorMessage { + required string errorMessage = 1; +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/protobuf/ContextAwareWireFormats.proto b/instrumentation/kamon-pekko/src/main/protobuf/ContextAwareWireFormats.proto new file mode 100644 index 000000000..c8742a2c6 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/protobuf/ContextAwareWireFormats.proto @@ -0,0 +1,30 @@ +syntax = "proto2"; +import "WireFormats.proto"; + + +option java_package = "org.apache.pekko.remote"; +option optimize_for = SPEED; + + +/************************************************ + * Kamon-specific additions to the protocol + ************************************************/ + +message AckAndContextAwareEnvelopeContainer { + optional AcknowledgementInfo ack = 1; + optional ContextAwareRemoteEnvelope envelope = 2; +} + +message ContextAwareRemoteEnvelope { + required ActorRefData recipient = 1; + required SerializedMessage message = 2; + optional ActorRefData sender = 4; + optional fixed64 seq = 5; + + optional RemoteContext traceContext = 15; +} + +message RemoteContext { + required bytes context = 1; +} + diff --git a/instrumentation/kamon-pekko/src/main/protobuf/WireFormats.proto b/instrumentation/kamon-pekko/src/main/protobuf/WireFormats.proto new file mode 100644 index 000000000..9a1b696f3 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/protobuf/WireFormats.proto @@ -0,0 +1,222 @@ +// Extracted from https://github.com/apache/incubator-pekko/blob/main/remote/src/main/protobuf/WireFormats.proto +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +/* + * Copyright (C) 2009-2022 Lightbend Inc. + */ + +syntax = "proto2"; + +option java_package = "org.apache.pekko.remote"; +option optimize_for = SPEED; + +import "ContainerFormats.proto"; + +/****************************************** + * Remoting message formats + ******************************************/ + + +message AckAndEnvelopeContainer { + optional AcknowledgementInfo ack = 1; + optional RemoteEnvelope envelope = 2; +} + +/** + * Defines a remote message. + */ +message RemoteEnvelope { + required ActorRefData recipient = 1; + required SerializedMessage message = 2; + optional ActorRefData sender = 4; + optional fixed64 seq = 5; +} + +message AcknowledgementInfo { + required fixed64 cumulativeAck = 1; + repeated fixed64 nacks = 2; +} + +/** + * Defines a remote ActorRef that "remembers" and uses its original Actor instance + * on the original node. + */ +message ActorRefData { + required string path = 1; +} + +/** + * Defines a message. + */ +message SerializedMessage { + required bytes message = 1; + required int32 serializerId = 2; + optional bytes messageManifest = 3; +} + +/** + * Defines org.apache.pekko.remote.DaemonMsgCreate + */ +message DaemonMsgCreateData { + required PropsData props = 1; + required DeployData deploy = 2; + required string path = 3; + required ActorRefData supervisor = 4; +} + +/** + * Serialization of org.apache.pekko.actor.Props + */ +message PropsData { + required DeployData deploy = 2; + required string clazz = 3; + repeated bytes args = 4; + // serialized props parameters + // older wire protocol: contains class name for each arg + // newer wire protocol: contains string manifest for each arg + repeated string manifests = 5; + // newer wire protocol: serializer id for each arg + repeated int32 serializerIds = 6; + // additionally a flag per position to indicate if it was + // serialized with manifest or not + repeated bool hasManifest = 7; +} + +/** + * Serialization of org.apache.pekko.actor.Deploy + */ +message DeployData { + required string path = 1; + optional bytes config = 2; + optional bytes routerConfig = 3; + optional bytes scope = 4; + optional string dispatcher = 5; + // older wire protocol: hardcoded class used to look up serializer + // newer wire protocol: serializer id and manifest available for each + optional int32 scopeSerializerId = 6; + optional string scopeManifest = 7; + optional int32 configSerializerId = 8; + optional string configManifest = 9; + optional int32 routerConfigSerializerId = 10; + optional string routerConfigManifest = 11; + repeated string tags = 12; +} + + +/****************************************** + * Pekko Protocol message formats + ******************************************/ + +/** + * Message format of Pekko Protocol. + * Message contains either a payload or an instruction. + */ +message PekkoProtocolMessage { + optional bytes payload = 1; + optional PekkoControlMessage instruction = 2; +} + +/** + * Defines some control messages for the remoting + */ +message PekkoControlMessage { + required CommandType commandType = 1; + optional PekkoHandshakeInfo handshakeInfo = 2; +} + +message PekkoHandshakeInfo { + required AddressData origin = 1; + required fixed64 uid = 2; + optional string cookie = 3; + +} + +/** + * Defines the type of the PekkoControlMessage command type + */ +enum CommandType { + ASSOCIATE = 1; + DISASSOCIATE = 2; + HEARTBEAT = 3; + DISASSOCIATE_SHUTTING_DOWN = 4; // Remote system is going down and will not accepts new connections + DISASSOCIATE_QUARANTINED = 5; // Remote system refused the association since the current system is quarantined +} + +/** + * java.util.concurrent.TimeUnit enum + */ +enum TimeUnit { + NANOSECONDS = 1; + MICROSECONDS = 2; + MILLISECONDS = 3; + SECONDS = 4; + MINUTES = 5; + HOURS = 6; + DAYS = 7; +} + +message FiniteDuration { + required int64 value = 1; + required TimeUnit unit = 2; +} + +message RemoteScope { + required AddressData node = 1; +} + +// router configs + +message DefaultResizer { + required uint32 lowerBound = 1; + required uint32 upperBound = 2; + required uint32 pressureThreshold = 3; + required double rampupRate = 4; + required double backoffThreshold = 5; + required double backoffRate = 6; + required uint32 messagesPerResize = 7; +} + +message FromConfig { + optional Payload resizer = 1; + optional string routerDispatcher = 2; +} + +message GenericRoutingPool { + required uint32 nrOfInstances = 1; + optional string routerDispatcher = 2; + required bool usePoolDispatcher = 3; + optional Payload resizer = 4; +} + +message ScatterGatherPool { + required GenericRoutingPool generic = 1; + required FiniteDuration within = 2; +} + +message TailChoppingPool { + required GenericRoutingPool generic = 1; + required FiniteDuration within = 2; + required FiniteDuration interval = 3; +} + +/** + * Defines a remote address. + */ +message AddressData { + required string system = 1; + required string hostname = 2; + required uint32 port = 3; + optional string protocol = 4; +} + +message RemoteRouterConfig { + required Payload local = 1; + repeated AddressData nodes = 2; +} diff --git a/instrumentation/kamon-pekko/src/main/resources/reference.conf b/instrumentation/kamon-pekko/src/main/resources/reference.conf new file mode 100644 index 000000000..283119e6c --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/resources/reference.conf @@ -0,0 +1,210 @@ +# ================================== # +# Kamon-Pekko Reference Configuration # +# ================================== # + +kamon.instrumentation.pekko { + + # If ask-pattern-timeout-warning is enabled, a WARN level log message will be generated if a Future generated by the + # "ask" pattern fails with an AskTimeoutException and the log message will contain information depending of the + # selected strategy. The possible values are: + # + # - off: nothing gets logged. + # - lightweight: logs a warning when a timeout is reached using source location. + # - heavyweight: logs a warning when a timeout is reached using a stack trace captured at the moment the future was created. + # + ask-pattern-timeout-warning = off + + # Automatically creates Actor Groups that contain all actors of the same Class at the same level of the Actor System + # tree. This will only be applied to actors that are not individually tracked, that are not part of another groups and + # that match the "auto-grouping" filter below. This allows, for example, to automatically get metrics from anonymous + # actors without messing with filters and risking cardinality explosions. + # + auto-grouping = on + + # Filters control how and if the instrumentation will track the behavior of Pekko Actors, Routers, Dispatchers and + # Actor Groups. All filters have two groups of patterns: includes and excludes; inputs matching at least one of the + # includes patterns and none of the excludes patterns will be accepted. You can read more about filters on the Kamon + # documentation: https://kamon.io/docs/latest/core/utilities/ + # + # The test string for the filters is always starting with the Actor System name, followed by the path to the + # component being tested. For example, when testing for a "test" actor created at the root of the "pekko-example" Actor + # System, the test string for the filter will be "pekko-example/user/test" and, when testing the default dispatcher for + # that same Actor System the test string for the filters will be "pekko-example/pekko.actor.default-dispatcher". + # + filters { + + # Defines actor groups and the filters that match all actors that should be part of that group. To define a new + # group, add a configuration like the following: + # + # kamon.instrumentation.pekko.filters.groups { + # worker-actors { + # includes = [ "my-system/user/application/worker-*", "my-system/user/workers/**" ] + # excludes = [ ] + # } + # } + # + # The configuration key immediately inside the "groups" path corresponds to the group name and the configuration + # inside of it should contain a Kamon filter (with the includes/excludes settings). + groups { + + # Special filter used for auto-grouping. Auto-grouping will only act on actors that are not being explicitly + # tracked as inidividual actors, do not belong to any other groups and match this filter. + # + auto-grouping { + includes = [ "*/user/**" ] + excludes = ${?kamon.instrumentation.pekko.filters.groups.auto-grouping.excludes} [ ] + } + } + + # Decids how Actors are going to be tracked and traced. + # + actors { + + # Decides whether the "**" filter can be used for the "track" and "start-trace" filters. Historically, users have + # beeing using the "**" filters during testing because simplicitly and then go to production with the same filter + # configuration which usually results in cardinality explosions and/or OOM errors. Since the introduction of auto + # grouping, the instrumentation will do a much better work at providing metrics out of the box and forbids the use + # of the "doomsday" wildcard. Enable and use at your own risk. + # + doomsday-wildcard = off + + # Decides which actors will have metric tracking enabled. Beware that most of the time what you really need is to + # track Actor groups instead of individual actors because wildly targetting actors can lead to cardinality issues. + # + track { + includes = ${?kamon.instrumentation.pekko.filters.actors.track.includes} [ ] + excludes = [ "*/system/**", "*/user/IO-**" ] + } + + # Decides which actors generate Spans for the messages they process, given that there is already an ongoing trace + # in the Context of the processed message (i.e. there is a Sampled Span in the Context). + # + trace { + includes = [ "*/user/**", "*/system/sharding**" ] + excludes = ${?kamon.instrumentation.pekko.filters.actors.trace.excludes} [ ] + } + + # Decides which actors generate Spans for the messages they process, even if that requires them to start a new + # trace. Use with care, starting traces with a broad filter (e.g. using includes = [ "**" ]) can generate a huge + # amount of traces from scheduled actions and underlying system components that most likely will not improve + # observability of the system and burry useful traces underneath the noise. + # + start-trace { + includes = ${?kamon.instrumentation.pekko.filters.actors.start-trace.includes} [ ] + excludes = ${?kamon.instrumentation.pekko.filters.actors.start-trace.excludes} [ ] + } + } + + # Decides which routers should have metric tracking enabled. + # + routers { + includes = [ "**" ] + excludes = ${?kamon.instrumentation.pekko.filters.routers.excludes} [ ] + } + + # Decides which dispatchers should have metric tracking enabled. + # + dispatchers { + includes = [ "**" ] + excludes = ${?kamon.instrumentation.pekko.filters.dispatchers.excludes} [ ] + } + } + + remote { + + # Controls whether tracking of the serialization, deserialization and message size metrics should be tracked for + # messages going to and coming from a remoting channel. + track-serialization-metrics = true + } + + cluster-sharding { + # Sets the interval at which the Shard metrics (sampling of hosted entities and processed messages across all + # shards) will be sampled. + shard-metrics-sample-interval = ${kamon.metric.tick-interval} + } + + cluster { + + # !! EXPERIMENTAL !! + # + # Decides whether to expose the pekko.cluster.[members|datacenters] metrics. These metrics are considered + # experimental and must be explicitly enabled until a future release when they graduate to stable. The name of + # this setting might change in the future. + track-cluster-metrics = no + + } +} +# Signals to pekko that it should load KamonRemoteInstrument +pekko.remote.artery.advanced.instruments += "org.apache.pekko.remote.artery.KamonRemoteInstrument" +kanela.modules { + + pekko { + name = "Pekko Instrumentation" + description = "Provides metrics and message tracing for Pekko Actor Systems, Actors, Routers and Dispatchers" + enabled = yes + + instrumentations = [ + "kamon.instrumentation.pekko.instrumentations.EnvelopeInstrumentation", + "kamon.instrumentation.pekko.instrumentations.SystemMessageInstrumentation", + "kamon.instrumentation.pekko.instrumentations.RouterInstrumentation", + "kamon.instrumentation.pekko.instrumentations.ActorInstrumentation", + "kamon.instrumentation.pekko.instrumentations.ActorLoggingInstrumentation", + "kamon.instrumentation.pekko.instrumentations.AskPatternInstrumentation", + "kamon.instrumentation.pekko.instrumentations.EventStreamInstrumentation", + "kamon.instrumentation.pekko.instrumentations.ActorRefInstrumentation", + "kamon.instrumentation.pekko.instrumentations.DispatcherInstrumentation", + "kamon.instrumentation.pekko.instrumentations.ActorMonitorInstrumentation", + "kamon.instrumentation.pekko.instrumentations.SchedulerInstrumentation", + "kamon.instrumentation.pekko.instrumentations.ClusterInstrumentation" + ] + + within = [ + "^org.apache.pekko.dispatch..*", + "^org.apache.pekko.event..*", + "^org.apache.pekko.actor..*", + "^org.apache.pekko.pattern..*", + "^org.apache.pekko.cluster..*", + "^org.apache.pekko.routing..*", + "kamon.instrumentation.pekko.instrumentations..*" + ] + } + + pekko-remote { + name = "Pekko Remote Instrumentation" + description = "Provides distributed Context propagation and Cluster Metrics for Pekko" + enabled = yes + + instrumentations = [ + "kamon.instrumentation.pekko.remote.MessageBufferInstrumentation", + "kamon.instrumentation.pekko.instrumentations.remote.RemotingInstrumentation", + ] + + within = [ + "org.apache.pekko.dispatch..*", + "org.apache.pekko.util..*", + "org.apache.pekko.remote..*", + "org.apache.pekko.actor..*", + "org.apache.pekko.cluster..*", + "org.apache.pekko.serialization..*" + ] + } + + pekko-remote-sharding { + name = "Pekko Remote Cluster Sharding Monitoring" + description = "Provides cluster sharding metrics for Pekko" + enabled = yes + + instrumentations = [ + "kamon.instrumentation.pekko.remote.ShardingInstrumentation" + ] + + within = [ + "org.apache.pekko.dispatch..*", + "org.apache.pekko.util..*", + "org.apache.pekko.remote..*", + "org.apache.pekko.actor..*" + "org.apache.pekko.cluster..*" + "org.apache.pekko.serialization..*" + ] + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoClusterShardingMetrics.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoClusterShardingMetrics.scala new file mode 100644 index 000000000..858dc9b9b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoClusterShardingMetrics.scala @@ -0,0 +1,118 @@ +package kamon.instrumentation.pekko + +import com.typesafe.config.Config + +import java.util.concurrent.{ScheduledFuture, TimeUnit} +import java.util.concurrent.atomic.AtomicLong +import kamon.{AtomicGetOrElseUpdateOnTrieMap, Kamon} +import kamon.metric.{Histogram, InstrumentGroup} +import kamon.module.Module.Registration +import kamon.module.ScheduledAction +import kamon.tag.TagSet + +import scala.collection.concurrent.TrieMap + +object PekkoClusterShardingMetrics { + + val RegionHostedShards = Kamon.rangeSampler ( + name = "pekko.cluster.sharding.region.hosted-shards", + description = "Tracks the number of shards hosted by a region" + ) + + val RegionHostedEntities = Kamon.rangeSampler ( + name = "pekko.cluster.sharding.region.hosted-entities", + description = "Tracks the number of entities hosted by a region" + ) + + val RegionProcessedMessages = Kamon.counter ( + name = "pekko.cluster.sharding.region.processed-messages", + description = "Counts the number of messages processed by a region" + ) + + val ShardHostedEntities = Kamon.histogram ( + name = "pekko.cluster.sharding.shard.hosted-entities", + description = "Tracks the distribution of entity counts hosted per shard" + ) + + val ShardProcessedMessages = Kamon.histogram ( + name = "pekko.cluster.sharding.shard.processed-messages", + description = "Tracks the distribution of processed messages per shard" + ) + + class ShardingInstruments(system: String, typeName: String) extends InstrumentGroup(TagSet.of("type", typeName).withTag("system", system)) { + + val hostedShards = register(RegionHostedShards) + val hostedEntities = register(RegionHostedEntities) + val processedMessages = register(RegionProcessedMessages) + val shardHostedEntities = register(ShardHostedEntities) + val shardProcessedMessages = register(ShardProcessedMessages) + + private val _shardTelemetry = ShardingInstruments.shardTelemetry(system, typeName, shardHostedEntities, shardProcessedMessages) + + def hostedEntitiesPerShardCounter(shardID: String): AtomicLong = + _shardTelemetry.entitiesPerShard.atomicGetOrElseUpdate(shardID, new AtomicLong()) + + def processedMessagesPerShardCounter(shardID: String): AtomicLong = + _shardTelemetry.messagesPerShard.atomicGetOrElseUpdate(shardID, new AtomicLong()) + + // We should only remove when the ShardRegion actor is terminated. + override def remove(): Unit = { + ShardingInstruments.removeShardTelemetry(system, typeName) + super.remove() + } + } + + object ShardingInstruments { + + /** + * Assist with tracking the number of entities hosted by a Shard and the number of messages processed by each + * Shard. Note that there is a difference in the hosted entities and processed messages at the Region level versus + * at the Shard Level: there is only one Region per type per node, so the number of processed messages is a clear + * indication of how many messages were processed and how many entities are hosted in the region; there can (and + * will be) many Shards on the same node which could generate cardinality issues if we were tracking metrics for + * each individual Shard so, instead, we track the distribution of entities and processed messages across all + * Shards. This behavior can help uncover cases in which Shards are not evenly distributed (both in the messages + * volume and hosted entities aspects) but cannot point out which of the Shards deviates from the common case. + * + * The totals per Shard are tracked locally and sampled in a fixed interval. + */ + case class ShardTelemetry ( + entitiesPerShard: TrieMap[String, AtomicLong], + messagesPerShard: TrieMap[String, AtomicLong], + schedule: Registration + ) + + private val _shardTelemetryMap = TrieMap.empty[String, ShardTelemetry] + + private def shardTelemetry(system: String, typeName: String, shardEntities: Histogram, shardMessages: Histogram): ShardTelemetry = { + _shardTelemetryMap.atomicGetOrElseUpdate(shardTelemetryKey(system, typeName), { + val entitiesPerShard = TrieMap.empty[String, AtomicLong] + val messagesPerShard = TrieMap.empty[String, AtomicLong] + val samplingInterval = PekkoRemoteInstrumentation.settings().shardMetricsSampleInterval + + val schedule = Kamon.addScheduledAction( + s"pekko/shards/${typeName}", + Some(s"Updates health metrics for the ${system}/${typeName} shard every ${samplingInterval.getSeconds} seconds"), + new ScheduledAction { + override def run(): Unit = { + entitiesPerShard.foreach {case (shard, value) => shardEntities.record(value.get())} + messagesPerShard.foreach {case (shard, value) => shardMessages.record(value.getAndSet(0L))} + } + + override def stop(): Unit = {} + override def reconfigure(newConfig: Config): Unit = {} + + }, samplingInterval) + + + ShardTelemetry(entitiesPerShard, messagesPerShard, schedule) + }, _.schedule.cancel(): Unit, _ => ()) + } + + private def removeShardTelemetry(system: String, typeName: String): Unit = + _shardTelemetryMap.remove(shardTelemetryKey(system, typeName)).foreach(_.schedule.cancel()) + + private def shardTelemetryKey(system: String, typeName: String): String = + system + ":" + typeName + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoInstrumentation.scala new file mode 100644 index 000000000..e5fed7b7b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoInstrumentation.scala @@ -0,0 +1,133 @@ +package kamon.instrumentation.pekko + +import com.typesafe.config.{Config, ConfigFactory} +import kamon.Kamon +import kamon.instrumentation.pekko.PekkoInstrumentation.AskPatternTimeoutWarningSetting.{Heavyweight, Lightweight, Off} +import kamon.util.Filter + +import scala.collection.JavaConverters.{asScalaBufferConverter, asScalaSetConverter} + +object PekkoInstrumentation { + + val TrackActorFilterName = "kamon.instrumentation.pekko.filters.actors.track" + val TraceActorFilterName = "kamon.instrumentation.pekko.filters.actors.trace" + val StartTraceActorFilterName = "kamon.instrumentation.pekko.filters.actors.start-trace" + val TrackAutoGroupFilterName = "kamon.instrumentation.pekko.filters.groups.auto-grouping" + val TrackRouterFilterName = "kamon.instrumentation.pekko.filters.routers" + val TrackDispatcherFilterName = "kamon.instrumentation.pekko.filters.dispatchers" + + @volatile private var _settings = Settings.from(Kamon.config()) + @volatile private var _actorGroups = Map.empty[String, Filter] + @volatile private var _configProvidedActorGroups = Map.empty[String, Filter] + @volatile private var _codeProvidedActorGroups = Map.empty[String, Filter] + + loadConfiguration(Kamon.config()) + Kamon.onReconfigure(loadConfiguration(_)) + + /** + * Returns the current pekko Instrumentation settings. + */ + def settings(): PekkoInstrumentation.Settings = + _settings + + /** + * Returns all Actor Group names that should contain an actor with the provided path. + */ + def matchingActorGroups(path: String): Seq[String] = { + _actorGroups.filter { case (_, v) => v.accept(path) }.keys.toSeq + } + + /** + * Creates a new Actor Group definition. Take into account that Actors are added to Actor Groups during their + * initialization process only, which means that a newly defined Actor Group will only include matching actors + * created after the definition succeeded. + * + * Returns true if the definition was successful and false if a group with the defined name is already available. + */ + def defineActorGroup(groupName: String, filter: Filter): Boolean = synchronized { + if(_codeProvidedActorGroups.get(groupName).isEmpty) { + _codeProvidedActorGroups = _codeProvidedActorGroups + (groupName -> filter) + _actorGroups = _codeProvidedActorGroups ++ _configProvidedActorGroups + true + } else false + } + + /** + * Removes a programmatically created Actor Group definition. This method can only remove definitions that were + * created via the "defineActorGroup" method. + */ + def removeActorGroup(groupName: String): Unit = synchronized { + _codeProvidedActorGroups = _codeProvidedActorGroups - groupName + _actorGroups = _codeProvidedActorGroups ++ _configProvidedActorGroups + } + + private def loadConfiguration(config: Config): Unit = synchronized { + val pekkoConfig = config.getConfig("kamon.instrumentation.pekko") + val groupsConfig = pekkoConfig.getConfig("filters.groups") + + _configProvidedActorGroups = groupsConfig.root.entrySet().asScala + .filter(_.getKey != "auto-grouping") + .map(entry => { + val groupName = entry.getKey + groupName -> Filter.from(groupsConfig.getConfig(groupName)) + }).toMap + + _actorGroups = _codeProvidedActorGroups ++ _configProvidedActorGroups + _settings = Settings.from(config) + } + + /** + * pekko Instrumentation settings + */ + case class Settings ( + askPatternWarning: AskPatternTimeoutWarningSetting, + autoGrouping: Boolean, + allowDoomsdayWildcards: Boolean, + safeActorTrackFilter: Filter, + safeActorStartTraceFilter: Filter, + exposeClusterMetrics: Boolean + ) + + object Settings { + + def from(config: Config): Settings = { + val pekkoConfig = config.getConfig("kamon.instrumentation.pekko") + val allowDoomsdayWildcards = pekkoConfig.getBoolean("filters.actors.doomsday-wildcard") + val exposeClusterMetrics = pekkoConfig.getBoolean("cluster.track-cluster-metrics") + + val askPatternWarning = pekkoConfig.getString("ask-pattern-timeout-warning") match { + case "off" => Off + case "lightweight" => Lightweight + case "heavyweight" => Heavyweight + case other => sys.error(s"Unrecognized option [$other] for the kamon.pekko.ask-pattern-timeout-warning config.") + } + + PekkoInstrumentation.Settings( + askPatternWarning, + pekkoConfig.getBoolean("auto-grouping"), + allowDoomsdayWildcards, + safeFilter(config.getConfig(TrackActorFilterName), allowDoomsdayWildcards), + safeFilter(config.getConfig(StartTraceActorFilterName), allowDoomsdayWildcards), + exposeClusterMetrics + ) + } + + private def safeFilter(config: Config, allowDoomsday: Boolean): Filter = { + val includes = config.getStringList("includes").asScala + if(!allowDoomsday && includes.contains("**")) { + val newIncludes = "includes = " + includes.filter(_ == "**").map(s => s""""$s"""").mkString("[ ", ", ", " ]") + val safeFilterConfig = ConfigFactory.parseString(newIncludes).withFallback(config) + + Filter.from(safeFilterConfig) + + } else Filter.from(config) + } + } + + sealed trait AskPatternTimeoutWarningSetting + object AskPatternTimeoutWarningSetting { + case object Off extends AskPatternTimeoutWarningSetting + case object Lightweight extends AskPatternTimeoutWarningSetting + case object Heavyweight extends AskPatternTimeoutWarningSetting + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoMetrics.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoMetrics.scala new file mode 100644 index 000000000..69ae24fd2 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoMetrics.scala @@ -0,0 +1,193 @@ +package kamon.instrumentation.pekko + +import kamon.Kamon +import kamon.instrumentation.pekko.instrumentations.ActorCellInfo +import kamon.metric.InstrumentGroup +import kamon.tag.TagSet + +import scala.collection.concurrent.TrieMap + +object PekkoMetrics { + + private val _groupInstrumentsCache = TrieMap.empty[String, ActorGroupInstruments] + private val _systemInstrumentsCache = TrieMap.empty[String, ActorSystemInstruments] + + /** + * Actor Metrics + */ + + val ActorTimeInMailbox = Kamon.timer ( + name = "pekko.actor.time-in-mailbox", + description = "Tracks the time since the instant a message is enqueued in an Actor's mailbox until it is dequeued for processing" + ) + + val ActorProcessingTime = Kamon.timer ( + name = "pekko.actor.processing-time", + description = "Tracks the time taken for the actor to process the receive function" + ) + + val ActorMailboxSize = Kamon.rangeSampler( + name = "pekko.actor.mailbox-size", + description = "Tracks the behavior of an Actor's mailbox size" + ) + + val ActorErrors = Kamon.counter ( + name = "pekko.actor.errors", + description = "Counts the number of processing errors experienced by an Actor" + ) + + def forActor(path: String, system: String, dispatcher: String, actorClass: Class[_]): ActorInstruments = { + val tags = TagSet.builder() + .add("path", path) + .add("system", system) + .add("dispatcher", dispatcher) + if (!ActorCellInfo.isTyped(actorClass)) tags.add("class", actorClass.getName) + new ActorInstruments(tags.build()) + } + + class ActorInstruments(tags: TagSet) extends InstrumentGroup(tags) { + val timeInMailbox = register(ActorTimeInMailbox) + val processingTime = register(ActorProcessingTime) + val mailboxSize = register(ActorMailboxSize) + val errors = register(ActorErrors) + } + + + /** + * Router Metrics + */ + + val RouterRoutingTime = Kamon.timer ( + name = "pekko.router.routing-time", + description = "Tracks the time taken by a router to process its routing logic" + ) + + val RouterTimeInMailbox = Kamon.timer ( + name = "pekko.router.time-in-mailbox", + description = "Tracks the time since the instant a message is enqueued in a routee's mailbox until it is dequeued for processing" + ) + + val RouterProcessingTime = Kamon.timer ( + name = "pekko.router.processing-time", + description = "Tracks the time taken for a routee to process the receive function" + ) + + val RouterPendingMessages = Kamon.rangeSampler ( + name = "pekko.router.pending-messages", + description = "Tracks the number of messages waiting to be processed across all routees" + ) + + val RouterMembers = Kamon.rangeSampler ( + name = "pekko.router.members", + description = "Tracks the number of routees belonging to a router" + ) + + val RouterErrors = Kamon.counter ( + name = "pekko.router.errors", + description = "Counts the number of processing errors experienced by the routees of a router" + ) + + def forRouter(path: String, system: String, dispatcher: String, routerClass: Class[_], routeeClass: String): RouterInstruments = { + val tags = TagSet.builder() + .add("path", path) + .add("system", system) + .add("dispatcher", dispatcher) + .add("routeeClass", routeeClass) + if (!ActorCellInfo.isTyped(routerClass)) tags.add("routerClass", routerClass.getName) + new RouterInstruments(tags.build()) + + } + + class RouterInstruments(tags: TagSet) extends InstrumentGroup(tags) { + val routingTime = register(RouterRoutingTime) + val timeInMailbox = register(RouterTimeInMailbox) + val processingTime = register(RouterProcessingTime) + val pendingMessages = register(RouterPendingMessages) + val members = register(RouterMembers) + val errors = register(RouterErrors) + } + + + /** + * Actor Group Metrics + */ + + val GroupTimeInMailbox = Kamon.timer ( + name = "pekko.group.time-in-mailbox", + description = "Tracks the time since the instant a message is enqueued in a member's mailbox until it is dequeued for processing" + ) + + val GroupProcessingTime = Kamon.timer ( + name = "pekko.group.processing-time", + description = "Tracks the time taken for a member actor to process the receive function" + ) + + val GroupPendingMessages = Kamon.rangeSampler ( + name = "pekko.group.pending-messages", + description = "Tracks the number of messages waiting to be processed across all members" + ) + + val GroupMembers = Kamon.rangeSampler ( + name = "pekko.group.members", + description = "Tracks the number of routees belonging to a group" + ) + + val GroupErrors = Kamon.counter ( + name = "pekko.group.errors", + description = "Counts the number of processing errors experienced by the members of a group" + ) + + def forGroup(group: String, system: String): ActorGroupInstruments = + _groupInstrumentsCache.getOrElseUpdate(system + "/" + group, { + val tags = TagSet.builder() + .add("group", group) + .add("system", system) + + new ActorGroupInstruments(tags.build()) + }) + + + case class ActorGroupInstruments(tags: TagSet) extends InstrumentGroup(tags) { + val timeInMailbox = register(GroupTimeInMailbox) + val processingTime = register(GroupProcessingTime) + val pendingMessages = register(GroupPendingMessages) + val members = register(GroupMembers) + val errors = register(GroupErrors) + } + + + /** + * Actor System Metrics + */ + + val SystemDeadLetters = Kamon.counter ( + name = "pekko.system.dead-letters", + description = "Counts the number of dead letters in an Actor System" + ) + + val SystemUnhandledMessages = Kamon.counter ( + name = "pekko.system.unhandled-messages", + description = "Counts the number of unhandled messages in an Actor System" + ) + + val SystemProcessedMessages = Kamon.counter ( + name = "pekko.system.processed-messages", + description = "Counts the number of processed messages in an Actor System" + ) + + val SystemActiveActors = Kamon.rangeSampler ( + name = "pekko.system.active-actors", + description = "Tracks the number of active Actors in an Actor System" + ) + + def forSystem(name: String): ActorSystemInstruments = + _systemInstrumentsCache.atomicGetOrElseUpdate(name, new ActorSystemInstruments(TagSet.of("system", name))) + + class ActorSystemInstruments(tags: TagSet) extends InstrumentGroup(tags) { + val deadLetters = register(SystemDeadLetters) + val unhandledMessages = register(SystemUnhandledMessages) + val processedMessagesByTracked = register(SystemProcessedMessages, "tracked", true) + val processedMessagesByNonTracked = register(SystemProcessedMessages, "tracked", false) + val activeActors = register(SystemActiveActors) + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteInstrumentation.scala new file mode 100644 index 000000000..780265637 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteInstrumentation.scala @@ -0,0 +1,27 @@ +package kamon.instrumentation.pekko + +import java.time.Duration + +import com.typesafe.config.Config +import kamon.Kamon + + +object PekkoRemoteInstrumentation { + + @volatile private var _settings = readSettings(Kamon.config()) + Kamon.onReconfigure(newConfig => _settings = readSettings(newConfig)) + + def settings(): Settings = + _settings + + private def readSettings(config: Config): Settings = + Settings( + config.getBoolean("kamon.instrumentation.pekko.remote.track-serialization-metrics"), + config.getDuration("kamon.instrumentation.pekko.cluster-sharding.shard-metrics-sample-interval") + ) + + case class Settings( + trackSerializationMetrics: Boolean, + shardMetricsSampleInterval: Duration + ) +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteMetrics.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteMetrics.scala new file mode 100644 index 000000000..dca48d5a9 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/PekkoRemoteMetrics.scala @@ -0,0 +1,45 @@ +package kamon.instrumentation.pekko + +import kamon.Kamon +import kamon.metric.InstrumentGroup +import kamon.metric.MeasurementUnit.information +import kamon.tag.TagSet + +import scala.collection.concurrent.TrieMap + +object PekkoRemoteMetrics { + + val InboundMessageSize = Kamon.histogram ( + name = "pekko.remote.messages.inbound.size", + description = "Tracks the distribution of inbound message sizes", + unit = information.bytes + ) + + val OutboundMessageSize = Kamon.histogram ( + name = "pekko.remote.messages.outbound.size", + description = "Tracks the distribution of outbound message sizes", + unit = information.bytes + ) + + val SerializationTime = Kamon.timer ( + name = "pekko.remote.serialization-time", + description = "Tracks the time taken to serialize outgoing messages" + ) + + val DeserializationTime = Kamon.timer ( + name = "pekko.remote.deserialization-time", + description = "Tracks the time taken to deserialize incoming messages" + ) + + private val _serializationInstrumentsCache = TrieMap.empty[String, SerializationInstruments] + + class SerializationInstruments(systemName: String) extends InstrumentGroup(TagSet.of("system", systemName)) { + val inboundMessageSize = register(InboundMessageSize) + val outboundMessageSize = register(OutboundMessageSize) + val serializationTime = register(SerializationTime) + val deserializationTime = register(DeserializationTime) + } + + def serializationInstruments(system: String): SerializationInstruments = + _serializationInstrumentsCache.atomicGetOrElseUpdate(system, new SerializationInstruments(system)) +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorCellInfo.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorCellInfo.scala new file mode 100644 index 000000000..604f8047b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorCellInfo.scala @@ -0,0 +1,104 @@ +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorRef, ActorSystem, Props} +import org.apache.pekko.routing.{BalancingPool, NoRouter} + +import scala.language.existentials + +/** + * Basic information that should be read from an ActorCell for instrumentation purposes. + */ +case class ActorCellInfo ( + path: String, + name: String, + systemName: String, + dispatcherName: String, + isRouter: Boolean, + isRoutee: Boolean, + isRootSupervisor: Boolean, + isStreamImplementationActor: Boolean, + isTemporary: Boolean, + actorOrRouterClass: Class[_], + routeeClass: Option[Class[_]] +) + +object ActorCellInfo { + + private val StreamsSupervisorActorClassName = "org.apache.pekko.stream.impl.StreamSupervisor" + private val StreamsInterpreterActorClassName = "org.apache.pekko.stream.impl.fusing.ActorGraphInterpreter" + + /** + * Reads information from an ActorCell. + */ + def from(cell: Any, ref: ActorRef, parent: ActorRef, system: ActorSystem): ActorCellInfo = { + val props = PekkoPrivateAccess.cellProps(cell).get + val actorName = ref.path.name + + val pathString = ref.path.elements.mkString("/") + val isRootSupervisor = pathString.length == 0 || pathString == "user" || pathString == "system" + val isRouter = hasRouterProps(props) + val isRoutee = PekkoPrivateAccess.isRoutedActorRef(parent) + val isTemporary = PekkoPrivateAccess.isUnstartedActorCell(cell) + + val (actorOrRouterClass, routeeClass) = + if(isRouter) + (props.routerConfig.getClass, Some(ref.asInstanceOf[HasRouterProps].routeeProps.actorClass)) + else if (isRoutee) + (parent.asInstanceOf[HasRouterProps].routerProps.routerConfig.getClass, Some(props.actorClass)) + else + (props.actorClass(), None) + + val fullPath = if (isRoutee) cellName(system, parent) else cellName(system, ref) + val dispatcherName = if(isRouter) { + if(props.routerConfig.isInstanceOf[BalancingPool]) { + + // Even though the router actor for a BalancingPool can have a different dispatcher we will + // assign the name of the same dispatcher where the routees will run to ensure all metrics are + // correlated and cleaned up correctly. + val deployPath = ref.path.elements.drop(1).mkString("/", "/", "") + "BalancingPool-" + deployPath + + } else { + + // It might happen that the deployment configuration will provide a different dispatcher name + // for the routees and we should catch that case only when creating the router (the routees will + // be initialized with an updated Props instance. + PekkoPrivateAccess.lookupDeploy(ref.path, system).map(_.dispatcher).getOrElse(props.dispatcher) + } + } else props.dispatcher + + val actorClassName = actorOrRouterClass.getName + val isStreamImplementationActor = + actorClassName == StreamsSupervisorActorClassName || actorClassName == StreamsInterpreterActorClassName + + ActorCellInfo(fullPath, actorName, system.name, dispatcherName, isRouter, isRoutee, isRootSupervisor, + isStreamImplementationActor, isTemporary, actorOrRouterClass, routeeClass) + } + + /** + * Returns a simple Class name, working around issues that might arise when using double nested classes in Scala. + */ + def simpleClassName(cls: Class[_]): String = { + // Class.getSimpleName could fail if called on a double-nested class. + // See https://github.com/scala/bug/issues/2034 for more details. + try { cls.getSimpleName } catch { case _: Throwable => { + val className = cls.getName + val lastSeparator = className.lastIndexOf('.') + + if(lastSeparator > 0) + className.substring(lastSeparator + 1) + else + className + }} + } + + def isTyped(className: Class[_]): Boolean = { + simpleClassName(className) == "ActorAdapter" + } + + private def hasRouterProps(props: Props): Boolean = + props.deploy.routerConfig != NoRouter + + private def cellName(system: ActorSystem, ref: ActorRef): String = + system.name + "/" + ref.path.elements.mkString("/") +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorInstrumentation.scala new file mode 100644 index 000000000..1a7a5eefe --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorInstrumentation.scala @@ -0,0 +1,147 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorRef, ActorSystem} +import kamon.Kamon +import kamon.context.Storage.Scope +import kamon.instrumentation.pekko.instrumentations.HasActorMonitor.actorMonitor +import kamon.instrumentation.context.{HasContext, HasTimestamp} +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice +import kanela.agent.libs.net.bytebuddy.asm.Advice.{Argument, OnMethodEnter, OnMethodExit, This} +import org.apache.pekko.actor.instrumentation.ReplaceWithAdvice + +class ActorInstrumentation extends InstrumentationBuilder { + + /** + * This is where most of the Actor processing magic happens. Handling of messages, errors and system messages. + */ + onType("org.apache.pekko.actor.ActorCell") + .mixin(classOf[HasActorMonitor.Mixin]) + .advise(isConstructor, ActorCellConstructorAdvice) + .advise(method("invoke"), classOf[ActorCellInvokeAdvice]) + .advise(method("handleInvokeFailure"), HandleInvokeFailureMethodAdvice) + .advise(method("sendMessage").and(takesArguments(1)), SendMessageAdvice) + .advise(method("terminate"), TerminateMethodAdvice) + .advise(method("swapMailbox"), ActorCellSwapMailboxAdvice) + .advise(method("invokeAll$1"), InvokeAllMethodInterceptor) + + /** + * Ensures that the Context is properly propagated when messages are temporarily stored on an UnstartedCell. + */ + onType("org.apache.pekko.actor.UnstartedCell") + .mixin(classOf[HasActorMonitor.Mixin]) + .advise(isConstructor, RepointableActorCellConstructorAdvice) + .advise(method("sendMessage").and(takesArguments(1)), SendMessageAdvice) + .advise(method("replaceWith"), classOf[ReplaceWithAdvice]) + +} + +trait HasActorMonitor { + def actorMonitor: ActorMonitor + def setActorMonitor(actorMonitor: ActorMonitor): Unit +} + +object HasActorMonitor { + + class Mixin(var actorMonitor: ActorMonitor) extends HasActorMonitor { + override def setActorMonitor(actorMonitor: ActorMonitor): Unit = + this.actorMonitor = actorMonitor + } + + def actorMonitor(cell: Any): ActorMonitor = + cell.asInstanceOf[HasActorMonitor].actorMonitor +} + +object ActorCellSwapMailboxAdvice { + + @Advice.OnMethodEnter + def enter(@Advice.This cell: Any, @Advice.Argument(0) newMailbox: Any): Boolean = { + val isShuttingDown = PekkoPrivateAccess.isDeadLettersMailbox(cell, newMailbox) + if(isShuttingDown) + actorMonitor(cell).onTerminationStart() + + isShuttingDown + } + + @Advice.OnMethodExit + def exit(@Advice.This cell: Any, @Advice.Return oldMailbox: Any, @Advice.Enter isShuttingDown: Boolean): Unit = { + if(oldMailbox != null && isShuttingDown) { + actorMonitor(cell).onDroppedMessages(PekkoPrivateAccess.mailboxMessageCount(oldMailbox)) + } + } +} + +object InvokeAllMethodInterceptor { + + @Advice.OnMethodEnter + def enter(@Advice.Argument(0) message: Any): Option[Scope] = + message match { + case m: HasContext => Some(Kamon.storeContext(m.context)) + case _ => None + } + + @Advice.OnMethodExit + def exit(@Advice.Enter scope: Option[Scope]): Unit = + scope.foreach(_.close()) +} + +object SendMessageAdvice { + + @OnMethodEnter(suppress = classOf[Throwable]) + def onEnter(@This cell: Any, @Argument(0) envelope: Object): Unit = { + + val instrumentation = actorMonitor(cell) + envelope.asInstanceOf[HasContext].setContext(instrumentation.captureEnvelopeContext()) + envelope.asInstanceOf[HasTimestamp].setTimestamp(instrumentation.captureEnvelopeTimestamp()) + } +} + +object RepointableActorCellConstructorAdvice { + + @Advice.OnMethodExit(suppress = classOf[Throwable]) + def onExit(@This cell: Any, @Argument(0) system: ActorSystem, @Argument(1) ref: ActorRef, @Argument(3) parent: ActorRef): Unit = + cell.asInstanceOf[HasActorMonitor].setActorMonitor(ActorMonitor.from(cell, ref, parent, system)) +} + +object ActorCellConstructorAdvice { + + @OnMethodExit(suppress = classOf[Throwable]) + def onExit(@This cell: Any, @Argument(0) system: ActorSystem, @Argument(1) ref: ActorRef, @Argument(4) parent: ActorRef): Unit = + cell.asInstanceOf[HasActorMonitor].setActorMonitor(ActorMonitor.from(cell, ref, parent, system)) +} + +object HandleInvokeFailureMethodAdvice { + + @OnMethodEnter(suppress = classOf[Throwable]) + def onEnter(@This cell: Any, @Argument(1) failure: Throwable): Unit = + actorMonitor(cell).onFailure(failure) + +} + +object TerminateMethodAdvice { + + @OnMethodEnter(suppress = classOf[Throwable]) + def onEnter(@This cell: Any): Unit = { + actorMonitor(cell).cleanup() + + if (PekkoPrivateAccess.isRoutedActorCell(cell)) { + cell.asInstanceOf[HasRouterMonitor].routerMonitor.cleanup() + } + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorLoggingInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorLoggingInstrumentation.scala new file mode 100644 index 000000000..64ced4df0 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorLoggingInstrumentation.scala @@ -0,0 +1,48 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.event.Logging.LogEvent +import kamon.Kamon +import kamon.context.Storage.Scope +import kamon.instrumentation.context.HasContext +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice.{Argument, Enter, OnMethodEnter, OnMethodExit} + +class ActorLoggingInstrumentation extends InstrumentationBuilder { + + /** + * Captures the Context that was present when a logging event was created and then sets it as current when it is + * being processed by the logging actor. + */ + onSubTypesOf("org.apache.pekko.event.Logging$LogEvent") + .mixin(classOf[HasContext.MixinWithInitializer]) + + onType("org.apache.pekko.event.slf4j.Slf4jLogger") + .advise(method("withMdc"), WithMdcMethodAdvice) +} + +object WithMdcMethodAdvice { + + @OnMethodEnter + def enter(@Argument(1) logEvent: LogEvent): Scope = + Kamon.storeContext(logEvent.asInstanceOf[HasContext].context) + + @OnMethodExit + def exit(@Enter scope: Scope): Unit = + scope.close() +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitor.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitor.scala new file mode 100644 index 000000000..f17bd61fc --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitor.scala @@ -0,0 +1,462 @@ +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorRef, ActorSystem} +import org.apache.pekko.dispatch.Envelope +import kamon.Kamon +import kamon.context.Context +import kamon.context.Storage.Scope +import kamon.instrumentation.pekko.PekkoInstrumentation._ +import kamon.instrumentation.pekko.PekkoMetrics.{ActorGroupInstruments, ActorInstruments, RouterInstruments} +import kamon.instrumentation.pekko.{PekkoInstrumentation, PekkoMetrics} +import kamon.trace.Span + +/** + * Exposes the callbacks to be executed when an instrumented ActorCell is performing its duties. These callbacks apply + * to all Actors, regardless of them being "regular" actors or routees in a Router. + */ +trait ActorMonitor { + + /** + * Captures the Context to be injected in Envelopes for targeting the monitored actor. + */ + def captureEnvelopeContext(): Context + + /** + * Captures the timestamp to be injected in Envelopes for targeting the monitored actor. + */ + def captureEnvelopeTimestamp(): Long + + /** + * Captures the timestamp at the instant when message processing starts. The value captured here will be passed to + * the onMessageProcessingEnd method. + */ + def captureProcessingStartTimestamp(): Long + + /** + * Callback executed when message processing is about to start. Any value returned by this method will be used as the + * last parameter of the onMessageProcessingEnd method. + */ + def onMessageProcessingStart(context: Context, envelopeTimestamp: Long, envelope: Envelope): Any + + /** + * Callback executed when message processing has ended. + */ + def onMessageProcessingEnd(context: Context, envelopeTimestamp: Long, processingStartTimestamp: Long, stateFromStart: Any): Unit + + /** + * Callback executed when an exception is thrown by the monitored Actor. + */ + def onFailure(failure: Throwable): Unit + + /** + * Callback executed when messages in the monitored Actor's mailbox need to be dropped. + */ + def onDroppedMessages(count: Long): Unit + + /** + * Callback executed when the actor is about to terminate. + */ + def onTerminationStart(): Unit + + /** + * Callback executed when an Actor has been stopped and any state or resources related to it should be cleaned up. + */ + def cleanup(): Unit + +} + +object ActorMonitor { + + /** + * Creates an ActorMonitor based on all configuration settings on the Pekko instrumentation. + */ + def from(actorCell: Any, ref: ActorRef, parent: ActorRef, system: ActorSystem): ActorMonitor = { + val cell = ActorCellInfo.from(actorCell, ref, parent, system) + val settings = PekkoInstrumentation.settings() + val isTraced = Kamon.filter(TraceActorFilterName).accept(cell.path) + val startsTrace = settings.safeActorStartTraceFilter.accept(cell.path) + val participatesInTracing = (isTraced || startsTrace) && !cell.isStreamImplementationActor + val autoGroupingPath = resolveAutoGroupingPath(cell.actorOrRouterClass, ref, parent, system.name) + + def traceWrap(monitor: ActorMonitor): ActorMonitor = + if(participatesInTracing) new TracedMonitor(cell, startsTrace, monitor) else monitor + + val monitor = { + + if (cell.isRouter) { + + // A router cell is only used for Context propagation and most of the actual metrics are being + // tracked in the routees' cells. + new ContextPropagationOnly(cell, participatesInTracing, trackActiveActors = false) + + } else { + + val trackedFilter = if (cell.isRouter || cell.isRoutee) Kamon.filter(TrackRouterFilterName) else settings.safeActorTrackFilter + val isTracked = !cell.isRootSupervisor && trackedFilter.accept(cell.path) + val trackingGroups: Seq[ActorGroupInstruments] = if (cell.isRootSupervisor) List() else { + val configuredMatchingGroups = PekkoInstrumentation.matchingActorGroups(cell.path) + + if (configuredMatchingGroups.isEmpty && !isTracked + && settings.autoGrouping && !cell.isRouter + && !cell.isRoutee && !ActorCellInfo.isTyped(cell.actorOrRouterClass)) { + if (!trackedFilter.excludes(cell.path) && Kamon.filter(TrackAutoGroupFilterName).accept(autoGroupingPath)) + List(PekkoMetrics.forGroup(autoGroupingPath, system.name)) + else + List.empty + + } else { + + configuredMatchingGroups.map(groupName => { + PekkoMetrics.forGroup(groupName, cell.systemName) + }) + } + } + + if (cell.isRoutee && isTracked) + createRouteeMonitor(cell, trackingGroups) + else + createRegularActorMonitor(cell, isTracked, participatesInTracing, trackingGroups) + } + } + + traceWrap(monitor) + } + + + private def createRegularActorMonitor(cellInfo: ActorCellInfo, isTracked: Boolean, participatesInTracing: Boolean, + groupMetrics: Seq[ActorGroupInstruments]): ActorMonitor = { + + if (isTracked || !groupMetrics.isEmpty) { + val actorMetrics: Option[PekkoMetrics.ActorInstruments] = if (!isTracked) None else { + Some(PekkoMetrics.forActor( + cellInfo.path, + cellInfo.systemName, + cellInfo.dispatcherName, + cellInfo.actorOrRouterClass + )) + } + + // Pretty much all actors will end up here because of auto-grouping being enabled by default. + new TrackedActor(actorMetrics, groupMetrics, cellInfo) + + } else { + + // If the actors is not doing any sort of tracking, it should at least do Context propagation. + new ActorMonitor.ContextPropagationOnly(cellInfo, participatesInTracing, trackActiveActors = true) + } + } + + private def createRouteeMonitor(cellInfo: ActorCellInfo, groupMetrics: Seq[ActorGroupInstruments]): ActorMonitor = { + val routerMetrics = PekkoMetrics.forRouter( + cellInfo.path, + cellInfo.systemName, + cellInfo.dispatcherName, + cellInfo.actorOrRouterClass, + cellInfo.routeeClass.filterNot(ActorCellInfo.isTyped).map(_.getName).getOrElse("Unknown") + ) + + new TrackedRoutee(routerMetrics, groupMetrics, cellInfo) + } + + private def resolveAutoGroupingPath(actorClass: Class[_], ref: ActorRef, parent: ActorRef, systemName: String): String = { + val name = ref.path.name + val elementCount = ref.path.elements.size + + val parentPath = if(parent.isInstanceOf[HasGroupPath]) parent.asInstanceOf[HasGroupPath].groupPath else "" + val refGroupName = { + if(elementCount == 1) + if(name == "/") "" else systemName + "/" + name + else + ActorCellInfo.simpleClassName(actorClass) + } + + val refGroupPath = if(parentPath.isEmpty) refGroupName else parentPath + "/" + refGroupName + ref.asInstanceOf[HasGroupPath].setGroupPath(refGroupPath) + refGroupPath + } + + /** + * Wraps another ActorMonitor implementation and provides tracing capabilities on top of it. + */ + class TracedMonitor(cellInfo: ActorCellInfo, startsTrace: Boolean, monitor: ActorMonitor) extends ActorMonitor { + + override def captureEnvelopeTimestamp(): Long = + monitor.captureEnvelopeTimestamp() + + override def captureEnvelopeContext(): Context = + monitor.captureEnvelopeContext() + + override def captureProcessingStartTimestamp(): Long = + monitor.captureProcessingStartTimestamp() + + override def onMessageProcessingStart(context: Context, envelopeTimestamp: Long, envelope: Envelope): Any = { + val incomingContext = context + if(incomingContext.get(Span.Key).isEmpty && !startsTrace) { + // We will not generate a Span unless message processing is happening inside of a trace. + new SpanAndMonitorState(null, monitor.onMessageProcessingStart(context, envelopeTimestamp, envelope)) + + } else { + val messageSpan = buildSpan(cellInfo, context, envelopeTimestamp, envelope).start() + val contextWithMessageSpan = incomingContext.withEntry(Span.Key, messageSpan) + new SpanAndMonitorState(messageSpan, monitor.onMessageProcessingStart(contextWithMessageSpan, envelopeTimestamp, envelope)) + } + } + + override def onMessageProcessingEnd(context: Context, envelopeTimestamp: Long, processingStartTimestamp: Long, stateFromStart: Any): Unit = { + val spanAndMonitor = stateFromStart.asInstanceOf[SpanAndMonitorState] + monitor.onMessageProcessingEnd(context, envelopeTimestamp, processingStartTimestamp, spanAndMonitor.wrappedMonitorState) + if (spanAndMonitor.span != null) + spanAndMonitor.span.asInstanceOf[Span].finish() + } + + override def onFailure(failure: Throwable): Unit = + monitor.onFailure(failure) + + override def onDroppedMessages(count: Long): Unit = + monitor.onDroppedMessages(count) + + override def onTerminationStart(): Unit = + monitor.onTerminationStart() + + override def cleanup(): Unit = + monitor.cleanup() + + private def extractMessageClass(envelope: Envelope): String = { + ActorCellInfo.simpleClassName(envelope.message.getClass) + } + + private def buildSpan(cellInfo: ActorCellInfo, context: Context, envelopeTimestamp: Long, envelope: Envelope): Span.Delayed = { + val messageClass = extractMessageClass(envelope) + val parentSpan = context.get(Span.Key) + + val spanBuilder = Kamon.internalSpanBuilder(operationName(messageClass, envelope.sender), "pekko.actor") + .asChildOf(parentSpan) + .doNotTrackMetrics() + .tag("pekko.system", cellInfo.systemName) + .tag("pekko.actor.path", cellInfo.path) + .tag("pekko.actor.message-class", messageClass) + if (!ActorCellInfo.isTyped(cellInfo.actorOrRouterClass)) { + spanBuilder.tag("pekko.actor.class", cellInfo.actorOrRouterClass.getName) + } + spanBuilder.delay(Kamon.clock().toInstant(envelopeTimestamp)) + } + + private def operationName(messageClass: String, sender: ActorRef): String = { + val operationType = if(PekkoPrivateAccess.isPromiseActorRef(sender)) "ask" else "tell" + + StringBuilder.newBuilder + .append(operationType) + .append("(") + .append(messageClass) + .append(")") + .result() + } + + private class SpanAndMonitorState(val span: Span, val wrappedMonitorState: Any) + } + + /** + * Basic implementation that only provides Context propagation across Actors. + */ + class ContextPropagationOnly(cellInfo: ActorCellInfo, participatesInTracing: Boolean, trackActiveActors: Boolean) extends ActorMonitor { + private val _systemMetrics = PekkoMetrics.forSystem(cellInfo.systemName) + + if(trackActiveActors && !cellInfo.isTemporary) { + _systemMetrics.activeActors.increment() + } + + override def captureEnvelopeTimestamp(): Long = + if(participatesInTracing) Kamon.clock().nanos() else 0L + + override def captureEnvelopeContext(): Context = + Kamon.currentContext() + + override def captureProcessingStartTimestamp(): Long = + if(participatesInTracing) Kamon.clock().nanos() else 0L + + override def onMessageProcessingStart(context: Context, envelopeTimestamp: Long, envelope: Envelope): Any = { + _systemMetrics.processedMessagesByNonTracked.increment() + Kamon.storeContext(context) + } + + override def onMessageProcessingEnd(context: Context, envelopeTimestamp: Long, processingStartTimestamp: Long, stateFromStart: Any): Unit = + stateFromStart.asInstanceOf[Scope].close() + + override def onFailure(failure: Throwable): Unit = {} + + override def onDroppedMessages(count: Long): Unit = {} + + override def onTerminationStart(): Unit = {} + + def cleanup(): Unit = { + if(trackActiveActors && !cellInfo.isTemporary) + _systemMetrics.activeActors.decrement() + } + } + + /** + * ActorMonitor that tracks Actor and/or Group metrics and performs Context propagation. + */ + class TrackedActor(actorMetrics: Option[ActorInstruments], groupMetrics: Seq[ActorGroupInstruments], cellInfo: ActorCellInfo) + extends GroupMetricsTrackingActor(groupMetrics, cellInfo) { + + private val _processedMessagesCounter = PekkoMetrics.forSystem(cellInfo.systemName).processedMessagesByTracked + + override def captureEnvelopeTimestamp(): Long = + super.captureEnvelopeTimestamp() + + override def captureEnvelopeContext(): Context = { + actorMetrics.foreach { am => am.mailboxSize.increment() } + super.captureEnvelopeContext() + } + + override def onMessageProcessingStart(context: Context, envelopeTimestamp: Long, envelope: Envelope): Any = { + _processedMessagesCounter.increment() + Kamon.storeContext(context) + } + + override def onMessageProcessingEnd(context: Context, envelopeTimestamp: Long, processingStartTimestamp: Long, stateFromStart: Any): Unit = { + try stateFromStart.asInstanceOf[Scope].close() finally { + val timestampAfterProcessing = clock.nanos() + val timeInMailbox = processingStartTimestamp - envelopeTimestamp + val processingTime = timestampAfterProcessing - processingStartTimestamp + + actorMetrics.foreach { am => + am.processingTime.record(processingTime) + am.timeInMailbox.record(timeInMailbox) + am.mailboxSize.decrement() + } + recordGroupMetrics(processingTime, timeInMailbox) + } + } + + override def onFailure(failure: Throwable): Unit = { + actorMetrics.foreach { am => am.errors.increment() } + super.onFailure(failure: Throwable) + } + + override def cleanup(): Unit = { + super.cleanup() + actorMetrics.foreach(_.remove()) + } + } + + /** + * ActorMonitor that tracks the activity of a Routee and possibly Actor Group metrics. + */ + class TrackedRoutee(routerMetrics: RouterInstruments, groupMetrics: Seq[ActorGroupInstruments], cellInfo: ActorCellInfo) + extends GroupMetricsTrackingActor(groupMetrics, cellInfo) { + + routerMetrics.members.increment() + private val processedMessagesCounter = PekkoMetrics.forSystem(cellInfo.systemName).processedMessagesByTracked + + override def captureEnvelopeContext(): Context = { + routerMetrics.pendingMessages.increment() + super.captureEnvelopeContext() + } + + override def onMessageProcessingStart(context: Context, envelopeTimestamp: Long, envelope: Envelope): Any = { + processedMessagesCounter.increment() + Kamon.storeContext(context) + } + + override def onMessageProcessingEnd(context: Context, envelopeTimestamp: Long, processingStartTimestamp: Long, stateFromStart: Any): Unit = { + try stateFromStart.asInstanceOf[Scope].close() finally { + val timestampAfterProcessing = Kamon.clock().nanos() + val timeInMailbox = processingStartTimestamp - envelopeTimestamp + val processingTime = timestampAfterProcessing - processingStartTimestamp + + routerMetrics.processingTime.record(processingTime) + routerMetrics.timeInMailbox.record(timeInMailbox) + routerMetrics.pendingMessages.decrement() + recordGroupMetrics(processingTime, timeInMailbox) + } + } + + override def onFailure(failure: Throwable): Unit = { + routerMetrics.errors.increment() + super.onFailure(failure) + } + + override def onDroppedMessages(count: Long): Unit = { + super.onDroppedMessages(count) + routerMetrics.pendingMessages.decrement(count) + } + + override def cleanup(): Unit = { + super.cleanup() + routerMetrics.members.decrement() + } + } + + /** + * Base actor tracking class that brings support for Actor Group metrics. + */ + abstract class GroupMetricsTrackingActor(groupMetrics: Seq[ActorGroupInstruments], cellInfo: ActorCellInfo) extends ActorMonitor { + @volatile private var _isAlive = true + private val _shouldTrackActiveActors = !cellInfo.isTemporary + protected val clock = Kamon.clock() + protected val systemMetrics = PekkoMetrics.forSystem(cellInfo.systemName) + + // We might need to create an instance when a RepointableActorRef creates an UnstartedCell and in that case, + // we don't want to increment the number of members in the groups. + if (_shouldTrackActiveActors) { + systemMetrics.activeActors.increment() + + groupMetrics.foreach { gm => + gm.members.increment() + } + } + + override def captureEnvelopeTimestamp(): Long = + clock.nanos() + + override def captureEnvelopeContext(): Context = { + if(_isAlive && !cellInfo.isTemporary) { + groupMetrics.foreach { gm => + gm.pendingMessages.increment() + } + } + + Kamon.currentContext() + } + + override def captureProcessingStartTimestamp(): Long = + clock.nanos() + + override def onFailure(failure: Throwable): Unit = { + groupMetrics.foreach { gm => + gm.errors.increment() + } + } + + override def onDroppedMessages(count: Long): Unit = { + groupMetrics.foreach { gm => + gm.pendingMessages.decrement(count) + } + } + + protected def recordGroupMetrics(processingTime: Long, timeInMailbox: Long): Unit = { + groupMetrics.foreach { gm => + gm.processingTime.record(processingTime) + gm.timeInMailbox.record(timeInMailbox) + gm.pendingMessages.decrement() + } + } + + override def onTerminationStart(): Unit = + _isAlive = false + + def cleanup(): Unit = { + + // Similarly to the code in the constructor, we only decrement when we are not in a temporary cell. + if (_shouldTrackActiveActors) { + systemMetrics.activeActors.decrement() + + groupMetrics.foreach { gm => + gm.members.decrement() + } + } + } + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitorInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitorInstrumentation.scala new file mode 100644 index 000000000..cb314ddea --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorMonitorInstrumentation.scala @@ -0,0 +1,42 @@ +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.WrappedMessage +import org.apache.pekko.dispatch.Envelope +import kamon.instrumentation.pekko.instrumentations.ActorCellInfo +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.implementation.bind.annotation.Argument +import org.slf4j.LoggerFactory + +import scala.util.control.NonFatal + +class ActorMonitorInstrumentation extends InstrumentationBuilder { + /* + * Changes implementation of extractMessageClass for our ActorMonitor. + * In Pekko, all typed messages are converted to AdaptMessage, + * so we're forced to extract the original message type. + */ + onSubTypesOf("kamon.instrumentation.pekko.instrumentations.ActorMonitor") + .intercept(method("extractMessageClass"), MessageClassAdvice) +} + +class MessageClassAdvice +object MessageClassAdvice { + private val logger = LoggerFactory.getLogger(classOf[MessageClassAdvice]) + + def extractMessageClass(@Argument(0) envelope: Envelope): String = { + try { + envelope.message match { + case message: WrappedMessage => ActorCellInfo.simpleClassName(message.message.getClass) + case _ => ActorCellInfo.simpleClassName(envelope.message.getClass) + } + } catch { + // NoClassDefFound is thrown in early versions of akka 2.6 + // so we can safely fallback to the original method + case _: NoClassDefFoundError => + ActorCellInfo.simpleClassName(envelope.message.getClass) + case NonFatal(e) => + logger.info(s"Expected NoClassDefFoundError, got: ${e}") + ActorCellInfo.simpleClassName(envelope.message.getClass) + } + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorRefInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorRefInstrumentation.scala new file mode 100644 index 000000000..ccfaab2a9 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ActorRefInstrumentation.scala @@ -0,0 +1,57 @@ +package kamon.instrumentation.pekko.instrumentations + +import kamon.Kamon +import kamon.context.Context +import kamon.context.Storage.Scope +import kamon.instrumentation.context.HasContext +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice + +class ActorRefInstrumentation extends InstrumentationBuilder { + + /** + * This instrumentation helps with keeping a track of types in the entire actor path of any given actor, which allows + * to have proper information when evaluating auto-grouping. + */ + onTypes("org.apache.pekko.actor.LocalActorRef", "org.apache.pekko.actor.RepointableActorRef") + .mixin(classOf[HasGroupPath.Mixin]) + + /** + * This ensures that if there was any Context available when an Actor was created, it will also be available when its + * messages are being transferred from the Unstarted cell to the actual cell. + */ + onType("org.apache.pekko.actor.RepointableActorRef") + .mixin(classOf[HasContext.MixinWithInitializer]) + .advise(method("point"), RepointableActorRefPointAdvice) +} + +trait HasGroupPath { + def groupPath: String + def setGroupPath(groupPath: String): Unit +} + +object HasGroupPath { + + class Mixin(@volatile var groupPath: String) extends HasGroupPath { + override def setGroupPath(groupPath: String): Unit = + this.groupPath = groupPath + } +} + +object RepointableActorRefPointAdvice { + + @Advice.OnMethodEnter + def enter(@Advice.This repointableActorRef: Object): Scope = + Kamon.storeContext(repointableActorRef.asInstanceOf[HasContext].context) + + @Advice.OnMethodExit + def exit(@Advice.Enter scope: Scope, @Advice.This repointableActorRef: Object): Unit = { + scope.close() + + repointableActorRef + .asInstanceOf[HasContext] + .setContext(Context.Empty) + } +} + + diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/AskPatternInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/AskPatternInstrumentation.scala new file mode 100644 index 000000000..251017200 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/AskPatternInstrumentation.scala @@ -0,0 +1,94 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.ActorRef +import org.apache.pekko.pattern.AskTimeoutException +import org.apache.pekko.util.Timeout +import kamon.Kamon +import kamon.instrumentation.pekko.PekkoInstrumentation +import kamon.instrumentation.pekko.PekkoInstrumentation.AskPatternTimeoutWarningSetting.{Heavyweight, Lightweight, Off} +import kamon.util.CallingThreadExecutionContext +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice.{Argument, OnMethodExit, Origin, Return} +import org.slf4j.LoggerFactory + +import scala.compat.Platform.EOL +import scala.concurrent.Future + +class AskPatternInstrumentation extends InstrumentationBuilder { + + /** + * Logs a warning message with various levels of detail when a Future[X] returned by the Ask pattern times out. + */ + onType("org.apache.pekko.pattern.AskableActorRef$") + .advise(method("$qmark$extension"), classOf[AskPatternInstrumentation]) + +} + +object AskPatternInstrumentation { + + private val _log = LoggerFactory.getLogger(classOf[AskPatternInstrumentation]) + + private class StackTraceCaptureException extends Throwable + + private case class SourceLocation ( + declaringType: String, + method: String + ) + + @OnMethodExit(suppress = classOf[Throwable]) + def onExit(@Origin origin: String, @Return future: Future[AnyRef], @Argument(0) actor: ActorRef, @Argument(2) timeout: Timeout) = { + + if(PekkoPrivateAccess.isInternalAndActiveActorRef(actor) && Kamon.currentContext().nonEmpty()) { + PekkoInstrumentation.settings().askPatternWarning match { + case Off => + case Lightweight => hookLightweightWarning(future, sourceLocation(origin), actor) + case Heavyweight => hookHeavyweightWarning(future, new StackTraceCaptureException, actor) + } + } + } + + private def ifAskTimeoutException(code: => Unit): PartialFunction[Throwable, Unit] = { + case _: AskTimeoutException => code + case _ => + } + + + private def hookLightweightWarning(future: Future[AnyRef], sourceLocation: SourceLocation, actor: ActorRef): Unit = { + val locationString = Option(sourceLocation) + .map(location => s"${location.declaringType}:${location.method}") + .getOrElse("") + + future.failed.foreach(ifAskTimeoutException { + _log.warn(s"Timeout triggered for ask pattern to actor [${actor.path.name}] at [$locationString]") + })(CallingThreadExecutionContext) + } + + private def hookHeavyweightWarning(future: Future[AnyRef], captureException: StackTraceCaptureException, actor: ActorRef): Unit = { + val locationString = captureException.getStackTrace.drop(3).mkString("", System.lineSeparator, System.lineSeparator) + + future.failed.foreach(ifAskTimeoutException { + _log.warn(s"Timeout triggered for ask pattern to actor [${actor.path.name}] at [$locationString]") + })(CallingThreadExecutionContext) + } + + private def sourceLocation(origin: String): SourceLocation = { + val methodDescription = origin.split(" ") + SourceLocation(methodDescription(0), methodDescription(1)) + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ClusterInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ClusterInstrumentation.scala new file mode 100644 index 000000000..f2d641c3b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/ClusterInstrumentation.scala @@ -0,0 +1,172 @@ +package kamon.instrumentation +package pekko.instrumentations + +import org.apache.pekko.actor.{Actor, Address, ExtendedActorSystem, Props} +import org.apache.pekko.cluster.{Cluster, ClusterEvent, MemberStatus} +import kamon.Kamon +import kamon.instrumentation.pekko.PekkoInstrumentation +import kamon.metric.Gauge +import kamon.tag.TagSet +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice + +import scala.collection.mutable + +class ClusterInstrumentation extends InstrumentationBuilder { + + onType("org.apache.pekko.cluster.Cluster$") + .advise(method("createExtension").and(takesArguments(1)), AfterClusterInitializationAdvice) +} + +object AfterClusterInitializationAdvice { + + @Advice.OnMethodExit + def onClusterExtensionCreated(@Advice.Argument(0) system: ExtendedActorSystem, @Advice.Return clusterExtension: Cluster): Unit = { + val settings = PekkoInstrumentation.settings() + if(settings.exposeClusterMetrics) { + val stateExporter = system.systemActorOf(Props[ClusterInstrumentation.ClusterStateExporter], "kamon-cluster-state-exporter") + clusterExtension.subscribe(stateExporter, classOf[ClusterEvent.ClusterDomainEvent]) + } + } +} + +object ClusterInstrumentation { + + class ClusterStateExporter extends Actor { + private val clusterExtension = Cluster(context.system) + private val clusterTags = TagSet.of("pekko.system.name", context.system.name) + + private val joiningMembers = ClusterMembersJoining.withTags(clusterTags) + private val weaklyUpMembers = ClusterMembersWeaklyUp.withTags(clusterTags) + private val upMembers = ClusterMembersUp.withTags(clusterTags) + private val leavingMembers = ClusterMembersLeaving.withTags(clusterTags) + private val exitingMembers = ClusterMembersExiting.withTags(clusterTags) + private val downMembers = ClusterMembersDown.withTags(clusterTags) + private val removedMembers = ClusterMembersRemoved.withTags(clusterTags) + private val totalMembers = ClusterMembersTotal.withTags(clusterTags) + private val unreachableMembers = ClusterMembersUnreachable.withTags(clusterTags) + private val unreachableDatacenters = ClusterDatacentersUnreachable.withTags(clusterTags) + private val monitoredNodes = mutable.HashMap.empty[Address, (Gauge, Gauge)] + + override def receive: Receive = { + case _: ClusterEvent.ClusterDomainEvent => updateAllStates(clusterExtension.state) + case initialState: ClusterEvent.CurrentClusterState => updateAllStates(initialState) + } + + private def updateAllStates(clusterState: ClusterEvent.CurrentClusterState): Unit = { + val membersPerStatus = clusterState.members.groupBy(_.status) + joiningMembers.update(membersPerStatus.getOrElse(MemberStatus.Joining, Set.empty).size) + weaklyUpMembers.update(membersPerStatus.getOrElse(MemberStatus.WeaklyUp, Set.empty).size) + upMembers.update(membersPerStatus.getOrElse(MemberStatus.Up, Set.empty).size) + leavingMembers.update(membersPerStatus.getOrElse(MemberStatus.Leaving, Set.empty).size) + exitingMembers.update(membersPerStatus.getOrElse(MemberStatus.Exiting, Set.empty).size) + downMembers.update(membersPerStatus.getOrElse(MemberStatus.Down, Set.empty).size) + + val removedMembersCount = membersPerStatus.getOrElse(MemberStatus.Removed, Set.empty).size + val totalMembersCount = clusterState.members.size - removedMembersCount + removedMembers.update(removedMembersCount) + totalMembers.update(totalMembersCount) + + unreachableMembers.update(clusterState.unreachable.size) + unreachableDatacenters.update(clusterState.unreachableDataCenters.size) + + // The status and reachability gauges will only be published for the subset of members that are currently being + // monitored by this node. + val currentlyMonitoredMembers = clusterState.members.filter(m => clusterExtension.failureDetector.isMonitoring(m.address)) + val currentlyMonitoredAddresses = currentlyMonitoredMembers.map { member => + val (statusGauge, reachabilityGauge) = monitoredNodes.getOrElseUpdate(member.address, { + val memberTags = clusterTags.withTag("member", member.address.toString) + + ( + ClusterMemberStatus.withTags(memberTags), + ClusterMemberReachability.withTags(memberTags) + ) + }) + + statusGauge.update(statusToGaugeValue(member.status)) + reachabilityGauge.update(if(clusterState.unreachable(member)) 1D else 0D) + member.address + } + + // Remove any cached Gauges for members that we might not be monitoring anymore + monitoredNodes.keys.filterNot(a => currentlyMonitoredAddresses(a)).foreach { addressToRemove => + monitoredNodes.remove(addressToRemove).foreach { + case (statusGauge, reachabilityGauge) => + statusGauge.remove() + reachabilityGauge.remove() + } + } + } + + private def statusToGaugeValue(memberStatus: MemberStatus): Double = memberStatus match { + case MemberStatus.Joining => 1 + case MemberStatus.WeaklyUp => 2 + case MemberStatus.Up => 3 + case MemberStatus.Leaving => 4 + case MemberStatus.Exiting => 5 + case MemberStatus.Down => 6 + case MemberStatus.Removed => 7 + case _ => 0 // This should never happen, but covering the bases here + } + } + + val ClusterMembersJoining = Kamon.gauge( + name = "pekko.cluster.members.joining.count", + description = "Tracks the number of cluster members in the Joining state" + ) + + val ClusterMembersWeaklyUp = Kamon.gauge( + name = "pekko.cluster.members.weakly-up.count", + description = "Tracks the number of cluster members in the Weakly-Up state" + ) + + val ClusterMembersUp = Kamon.gauge( + name = "pekko.cluster.members.up.count", + description = "Tracks the number of cluster members in the Up state" + ) + + val ClusterMembersLeaving = Kamon.gauge( + name = "pekko.cluster.members.leaving.count", + description = "Tracks the number of cluster members in the Leaving state" + ) + + val ClusterMembersExiting = Kamon.gauge( + name = "pekko.cluster.members.exiting.count", + description = "Tracks the number of cluster members in the Exiting state" + ) + + val ClusterMembersDown = Kamon.gauge( + name = "pekko.cluster.members.down.count", + description = "Tracks the number of cluster members in the Down state" + ) + + val ClusterMembersRemoved = Kamon.gauge( + name = "pekko.cluster.members.removed.count", + description = "Tracks the number of cluster members in the Removed state" + ) + + val ClusterMembersTotal = Kamon.gauge( + name = "pekko.cluster.members.total.count", + description = "Tracks the total number of cluster members, without including Removed members" + ) + + val ClusterMembersUnreachable = Kamon.gauge( + name = "pekko.cluster.members.unreachable.count", + description = "Tracks the total number of cluster members marked as unreachable" + ) + + val ClusterDatacentersUnreachable = Kamon.gauge( + name = "pekko.cluster.datacenters.unreachable.count", + description = "Tracks the total number of cluster members marked as unreachable" + ) + + val ClusterMemberStatus = Kamon.gauge( + name = "pekko.cluster.members.status", + description = "Tracks the current status of all monitored nodes by a cluster member" + ) + + val ClusterMemberReachability = Kamon.gauge( + name = "pekko.cluster.members.reachability", + description = "Tracks the current reachability status of all monitored nodes by a cluster member" + ) +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInfo.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInfo.scala new file mode 100644 index 000000000..df907ef2a --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInfo.scala @@ -0,0 +1,49 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.dispatch.DispatcherPrerequisites + +object DispatcherInfo { + + trait HasDispatcherPrerequisites { + def dispatcherPrerequisites: DispatcherPrerequisites + def setDispatcherPrerequisites(dispatcherPrerequisites: DispatcherPrerequisites): Unit + } + + object HasDispatcherPrerequisites { + class Mixin extends HasDispatcherPrerequisites { + @volatile private var _dispatcherPrerequisites: DispatcherPrerequisites = _ + override def dispatcherPrerequisites: DispatcherPrerequisites = _dispatcherPrerequisites + override def setDispatcherPrerequisites(dispatcherPrerequisites: DispatcherPrerequisites): Unit = + _dispatcherPrerequisites = dispatcherPrerequisites + } + } + + trait HasDispatcherName { + def dispatcherName: String + def setDispatcherName(dispatcherName: String): Unit + } + + object HasDispatcherName { + class Mixin extends HasDispatcherName { + @volatile private var _dispatcherName: String = _ + override def dispatcherName: String = _dispatcherName + override def setDispatcherName(dispatcherName: String): Unit = _dispatcherName = dispatcherName + } + } +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInstrumentation.scala new file mode 100644 index 000000000..c1b64dae4 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/DispatcherInstrumentation.scala @@ -0,0 +1,129 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import java.util.concurrent.{AbstractExecutorService, Callable, ExecutorService, ThreadFactory, TimeUnit} + +import org.apache.pekko.dispatch.{DefaultExecutorServiceConfigurator, DispatcherPrerequisites, Dispatchers, ExecutorServiceFactory, ExecutorServiceFactoryProvider, ForkJoinExecutorConfigurator, PinnedDispatcherConfigurator, ThreadPoolExecutorConfigurator} +import kamon.Kamon +import kamon.instrumentation.pekko.PekkoInstrumentation +import kamon.instrumentation.pekko.instrumentations.DispatcherInfo.{HasDispatcherName, HasDispatcherPrerequisites} +import kamon.instrumentation.executor.ExecutorInstrumentation +import kamon.tag.TagSet +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice +import kanela.agent.libs.net.bytebuddy.implementation.bind.annotation.{Argument, SuperCall, This} + +class DispatcherInstrumentation extends InstrumentationBuilder { + + /** + * This is where the actual ExecutorService instances are being created, but at this point we don't have access to + * the Actor System Name nor the Dispatcher name, which is why there is additional instrumentation to carry these two + * names down to the ExecutorServiceFactory and use them to tag the newly instrumented ExecutorService. + */ + onSubTypesOf("org.apache.pekko.dispatch.ExecutorServiceFactory") + .mixin(classOf[HasDispatcherPrerequisites.Mixin]) + .mixin(classOf[HasDispatcherName.Mixin]) + .intercept(method("createExecutorService"), InstrumentNewExecutorServiceOnPekko) + + /** + * First step on getting the Actor System name is to read it from the prerequisites instance passed to the + * constructors of these two classes. + */ + onTypes( + "org.apache.pekko.dispatch.ThreadPoolExecutorConfigurator", + "org.apache.pekko.dispatch.ForkJoinExecutorConfigurator", + "org.apache.pekko.dispatch.PinnedDispatcherConfigurator", + "org.apache.pekko.dispatch.DefaultExecutorServiceConfigurator") + .mixin(classOf[HasDispatcherPrerequisites.Mixin]) + .advise(isConstructor, CaptureDispatcherPrerequisitesOnExecutorConfigurator) + + /** + * Copies the Actor System and Dispatcher names to the ExecutorServiceFactory instances for the two types of + * executors instrumented by Kamon. + */ + onTypes( + "org.apache.pekko.dispatch.ThreadPoolConfig", + "org.apache.pekko.dispatch.ForkJoinExecutorConfigurator", + "org.apache.pekko.dispatch.PinnedDispatcherConfigurator", + "org.apache.pekko.dispatch.DefaultExecutorServiceConfigurator") + .mixin(classOf[HasDispatcherName.Mixin]) + .advise(method("createExecutorServiceFactory"), CopyDispatcherInfoToExecutorServiceFactory) + + /** + * This ensures that the ActorSystem name is not lost when creating PinnedDispatcher instances. + */ + onType("org.apache.pekko.dispatch.ThreadPoolConfig") + .mixin(classOf[HasDispatcherPrerequisites.Mixin]) + .advise(method("copy"), ThreadPoolConfigCopyAdvice) + +} + +object CaptureDispatcherPrerequisitesOnExecutorConfigurator { + + @Advice.OnMethodExit(suppress = classOf[Throwable]) + def exit(@Advice.This configurator: Any, @Advice.Argument(1) prerequisites: DispatcherPrerequisites): Unit = { + configurator match { + case fjec: ForkJoinExecutorConfigurator => fjec.asInstanceOf[HasDispatcherPrerequisites].setDispatcherPrerequisites(prerequisites) + case tpec: ThreadPoolExecutorConfigurator => tpec.threadPoolConfig.asInstanceOf[HasDispatcherPrerequisites].setDispatcherPrerequisites(prerequisites) + case pdc: PinnedDispatcherConfigurator => pdc.asInstanceOf[HasDispatcherPrerequisites].setDispatcherPrerequisites(prerequisites) + case desc: DefaultExecutorServiceConfigurator => desc.asInstanceOf[HasDispatcherPrerequisites].setDispatcherPrerequisites(prerequisites) + case _ => // just ignore any other case. + } + } +} + +object CopyDispatcherInfoToExecutorServiceFactory { + + @Advice.OnMethodExit + def exit(@Advice.This poolConfig: HasDispatcherPrerequisites, @Advice.Argument(0) dispatcherName: String, @Advice.Return factory: Any): Unit = { + val factoryWithMixins = factory.asInstanceOf[HasDispatcherName with HasDispatcherPrerequisites] + factoryWithMixins.setDispatcherPrerequisites(poolConfig.dispatcherPrerequisites) + factoryWithMixins.setDispatcherName(dispatcherName) + } +} + +object InstrumentNewExecutorServiceOnPekko { + + def around(@This factory: HasDispatcherPrerequisites with HasDispatcherName, @SuperCall callable: Callable[ExecutorService]): ExecutorService = { + val executor = callable.call() + val actorSystemName = factory.dispatcherPrerequisites.settings.name + val dispatcherName = factory.dispatcherName + val scheduledActionName = actorSystemName + "/" + dispatcherName + val systemTags = TagSet.of("pekko.system", actorSystemName) + + if(Kamon.filter(PekkoInstrumentation.TrackDispatcherFilterName).accept(dispatcherName)) { + val defaultEcOption = factory.dispatcherPrerequisites.defaultExecutionContext + + if(dispatcherName == Dispatchers.DefaultDispatcherId && defaultEcOption.isDefined) { + ExecutorInstrumentation.instrumentExecutionContext(defaultEcOption.get, dispatcherName, systemTags, scheduledActionName, ExecutorInstrumentation.DefaultSettings) + .underlyingExecutor.getOrElse(executor) + } else { + ExecutorInstrumentation.instrument(executor, dispatcherName, systemTags, scheduledActionName, ExecutorInstrumentation.DefaultSettings) + } + } else executor + } +} + +object ThreadPoolConfigCopyAdvice { + + @Advice.OnMethodExit + def exit(@Advice.This original: Any, @Advice.Return copy: Any): Unit = { + copy.asInstanceOf[HasDispatcherPrerequisites].setDispatcherPrerequisites(original.asInstanceOf[HasDispatcherPrerequisites].dispatcherPrerequisites) + copy.asInstanceOf[HasDispatcherName].setDispatcherName(original.asInstanceOf[HasDispatcherName].dispatcherName) + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EnvelopeInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EnvelopeInstrumentation.scala new file mode 100644 index 000000000..e606b16b8 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EnvelopeInstrumentation.scala @@ -0,0 +1,43 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import kamon.instrumentation.context.{HasContext, HasTimestamp} +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice + + +class EnvelopeInstrumentation extends InstrumentationBuilder { + + /** + * Ensures that the Pekko Envelope is able to carry around a Context and a Timestamp. + */ + onType("org.apache.pekko.dispatch.Envelope") + .mixin(classOf[HasContext.Mixin]) + .mixin(classOf[HasTimestamp.Mixin]) + .advise(method("copy"), EnvelopeCopyAdvice) +} + +object EnvelopeCopyAdvice { + + @Advice.OnMethodExit + def exit(@Advice.Return newEnvelope: Any, @Advice.This envelope: Any): Unit = { + newEnvelope.asInstanceOf[HasContext].setContext(envelope.asInstanceOf[HasContext].context) + newEnvelope.asInstanceOf[HasTimestamp].setTimestamp(envelope.asInstanceOf[HasTimestamp].timestamp) + } +} + diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EventStreamInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EventStreamInstrumentation.scala new file mode 100644 index 000000000..8fc97c988 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/EventStreamInstrumentation.scala @@ -0,0 +1,66 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorSystem, DeadLetter, UnhandledMessage} +import kamon.instrumentation.pekko.PekkoMetrics +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice.{Argument, OnMethodExit, This} + +class EventStreamInstrumentation extends InstrumentationBuilder { + + /** + * Counts dead letters and unhandled messages as they are published on the EventStream. + */ + onType("org.apache.pekko.event.EventStream") + .mixin(classOf[HasSystem.Mixin]) + .advise(isConstructor.and(takesArguments(2)), ConstructorAdvice) + .advise(method("publish").and(takesArguments(1)), PublishMethodAdvice) +} + + +object ConstructorAdvice { + + @OnMethodExit(suppress = classOf[Throwable]) + def exit(@This eventStream: HasSystem, @Argument(0) system:ActorSystem): Unit = { + eventStream.setSystem(system) + } +} + +object PublishMethodAdvice { + + @OnMethodExit(suppress = classOf[Throwable]) + def exit(@This stream:HasSystem, @Argument(0) event: AnyRef):Unit = event match { + case _: DeadLetter => PekkoMetrics.forSystem(stream.system.name).deadLetters.increment() + case _: UnhandledMessage => PekkoMetrics.forSystem(stream.system.name).unhandledMessages.increment() + case _ => () + } +} + +trait HasSystem { + def system: ActorSystem + def setSystem(system: ActorSystem): Unit +} + +object HasSystem { + + class Mixin(var system: ActorSystem) extends HasSystem { + + override def setSystem(system: ActorSystem): Unit = + this.system = system + } +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterInstrumentation.scala new file mode 100644 index 000000000..eea73102d --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterInstrumentation.scala @@ -0,0 +1,94 @@ +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorRef, ActorSystem, Props} +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice._ + +class RouterInstrumentation extends InstrumentationBuilder { + + /** + * Provides the router metrics tracking implementation. + */ + onType("org.apache.pekko.routing.RoutedActorCell") + .mixin(classOf[HasRouterMonitor.Mixin]) + .advise(isConstructor, RoutedActorCellConstructorAdvice) + .advise(method("sendMessage").and(takesArguments(1)), SendMessageAdvice) + .advise(method("sendMessage").and(takesArguments(1)), SendMessageOnRouterAdvice) + + /** + * Captures the router and routee Props so that we can properly apply tags to the router metrics. + */ + onType("org.apache.pekko.routing.RoutedActorRef") + .mixin(classOf[HasRouterProps.Mixin]) + .advise(isConstructor, RoutedActorRefConstructorAdvice) +} + + +/** + * Helps with capturing the Props for both the router and the routees. + */ +trait HasRouterProps { + def routeeProps: Props + def routerProps: Props + def setRouteeProps(props: Props): Unit + def setRouterProps(props: Props): Unit +} + +object HasRouterProps { + + class Mixin(var routeeProps: Props, var routerProps: Props) extends HasRouterProps { + + override def setRouteeProps(props: Props): Unit = + this.routeeProps = props + + override def setRouterProps(props: Props): Unit = + this.routerProps = props + } +} + +trait HasRouterMonitor { + def routerMonitor: RouterMonitor + def setRouterMonitor(routerMonitor: RouterMonitor): Unit +} + +object HasRouterMonitor { + + class Mixin(var routerMonitor: RouterMonitor) extends HasRouterMonitor { + + override def setRouterMonitor(routerMonitor: RouterMonitor): Unit = + this.routerMonitor = routerMonitor + } +} + +object RoutedActorRefConstructorAdvice { + + @OnMethodExit(suppress = classOf[Throwable]) + def exit(@This ref: ActorRef, @Argument(1) routerProps: Props, @Argument(4) routeeProps: Props): Unit = { + val routedRef = ref.asInstanceOf[HasRouterProps] + routedRef.setRouteeProps(routeeProps) + routedRef.setRouterProps(routerProps) + } +} + +object RoutedActorCellConstructorAdvice { + + @OnMethodExit(suppress = classOf[Throwable]) + def exit(@This cell: Any, @Argument(0) system: ActorSystem, @Argument(1) ref: ActorRef, @Argument(5) parent: ActorRef): Unit = { + cell.asInstanceOf[HasRouterMonitor].setRouterMonitor(RouterMonitor.from(cell, ref, parent, system)) + } +} + +object SendMessageOnRouterAdvice { + + def routerInstrumentation(cell: Any): RouterMonitor = + cell.asInstanceOf[HasRouterMonitor].routerMonitor + + @OnMethodEnter(suppress = classOf[Throwable]) + def onEnter(@This cell: Any): Long = + routerInstrumentation(cell).processMessageStart() + + @OnMethodExit(suppress = classOf[Throwable]) + def onExit(@This cell: Any, @Enter timestampBeforeProcessing: Long): Unit = + routerInstrumentation(cell).processMessageEnd(timestampBeforeProcessing) +} + diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterMonitor.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterMonitor.scala new file mode 100644 index 000000000..87ecab9bf --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/RouterMonitor.scala @@ -0,0 +1,98 @@ +package kamon.instrumentation.pekko.instrumentations + +import org.apache.pekko.actor.{ActorRef, ActorSystem} +import kamon.Kamon +import kamon.instrumentation.pekko.PekkoInstrumentation.TrackRouterFilterName +import kamon.instrumentation.pekko.PekkoMetrics +import kamon.instrumentation.pekko.PekkoMetrics.RouterInstruments + +/** + * Exposes the necessary callbacks for instrumenting a Router. + */ +trait RouterMonitor { + + /** + * Signals that a routee has been added to the router. + */ + def routeeAdded(): Unit + + /** + * Signals that a routee has been removed from the router. + */ + def routeeRemoved(): Unit + + /** + * Callback executed with processing message starts. In a router, the processing time is actually just about routing + * the message to the right routee and there is no mailbox involved. + */ + def processMessageStart(): Long + + /** + * Callback executed when the message has been routed. + */ + def processMessageEnd(timestampBeforeProcessing: Long): Unit + + /** + * Callback executed when a router fails to route a message. + */ + def processFailure(failure: Throwable): Unit + + /** + * Cleans up all resources used by the router monitor. + */ + def cleanup(): Unit +} + +object RouterMonitor { + + def from(actorCell: Any, ref: ActorRef, parent: ActorRef, system: ActorSystem): RouterMonitor = { + val cell = ActorCellInfo.from(actorCell, ref, parent, system) + + if (Kamon.filter(TrackRouterFilterName).accept(cell.path)) + new MetricsOnlyRouterMonitor( + PekkoMetrics.forRouter( + cell.path, + cell.systemName, + cell.dispatcherName, + cell.actorOrRouterClass, + cell.routeeClass.filterNot(ActorCellInfo.isTyped).map(_.getName).getOrElse("Unknown") + ) + ) + else NoOpRouterMonitor + } + + /** + * Router monitor that doesn't perform any actions. + */ + object NoOpRouterMonitor extends RouterMonitor { + override def routeeAdded(): Unit = {} + override def routeeRemoved(): Unit = {} + override def processMessageStart(): Long = 0L + override def processMessageEnd(timestampBeforeProcessing: Long): Unit = {} + override def processFailure(failure: Throwable): Unit = {} + override def cleanup(): Unit = {} + } + + /** + * Router monitor that tracks routing metrics for the router. + */ + class MetricsOnlyRouterMonitor(routerMetrics: RouterInstruments) extends RouterMonitor { + private val _clock = Kamon.clock() + + override def routeeAdded(): Unit = {} + override def routeeRemoved(): Unit = {} + override def processFailure(failure: Throwable): Unit = {} + + override def processMessageStart(): Long = + _clock.nanos() + + override def processMessageEnd(timestampBeforeProcessing: Long): Unit = { + val timestampAfterProcessing = _clock.nanos() + val routingTime = timestampAfterProcessing - timestampBeforeProcessing + routerMetrics.routingTime.record(routingTime) + } + + override def cleanup(): Unit = + routerMetrics.remove() + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SchedulerInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SchedulerInstrumentation.scala new file mode 100644 index 000000000..8e3e0b1cd --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SchedulerInstrumentation.scala @@ -0,0 +1,28 @@ +/* ========================================================================================= + * Copyright © 2013-2022 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import kanela.agent.api.instrumentation.InstrumentationBuilder + +class SchedulerInstrumentation extends InstrumentationBuilder { + + /** + * Captures the current context when calling `scheduler.scheduleOnce` and restores it when the submitted runnable + * runs. This ensures that certain Akka patterns like retry and after work as expected. + */ + onSubTypesOf("org.apache.pekko.actor.Scheduler") + .advise(method("scheduleOnce").and(withArgument(1, classOf[Runnable])), classOf[SchedulerRunnableAdvice]) +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SystemMessageInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SystemMessageInstrumentation.scala new file mode 100644 index 000000000..4c82b0c8b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/SystemMessageInstrumentation.scala @@ -0,0 +1,29 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko.instrumentations + +import kamon.instrumentation.context.HasContext +import kanela.agent.api.instrumentation.InstrumentationBuilder + +class SystemMessageInstrumentation extends InstrumentationBuilder { + + /** + * Captures the current Context when a System Message is created. + */ + onSubTypesOf("org.apache.pekko.dispatch.sysmsg.SystemMessage") + .mixin(classOf[HasContext.MixinWithInitializer]) +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/internal/CellWrapper.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/internal/CellWrapper.scala new file mode 100644 index 000000000..288bc91fd --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/instrumentations/internal/CellWrapper.scala @@ -0,0 +1,77 @@ +/* + * ========================================================================================= + * Copyright © 2013-2018 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package org.apache.pekko.actor.instrumentation + +import org.apache.pekko.actor.dungeon.ChildrenContainer + +import org.apache.pekko.actor.{ActorRef, ActorSystem, ActorSystemImpl, Cell, ChildStats, InternalActorRef, Props} +import org.apache.pekko.dispatch.Envelope +import org.apache.pekko.dispatch.sysmsg.SystemMessage +import kamon.Kamon +import kamon.instrumentation.context.HasContext + +/** + * Thin wrapper used while exchanging an UnstartedCell for a real ActorCell instance. This wrapper is only + * meant to be used during the execution of UnstartedCell.replaceWith(...), for the sole purpose of ensuring + * that all messages that might have been accumulated in the UnstartedCell will get their Context propagated + * as expected. + * + * For reference, we used to have a copy/pasted and modified version of UnstartedCell.replaceWith(...) as part + * of the instrumentation, but there were tiny bugs related to accessing internal state on the UnstartedCell while + * running our modified version of the method. These bugs lead to losing System Messages in certain situations, + * which eventually leads to actors not being shut down. The CellWrapper approach ensures that the internal calls + * to UnstartedCell.replaceWith#drainSysmsgQueue() are unchanged, while still ensuring that the Kamon Context + * will be propagated for all queued messages. + */ +class CellWrapper(val underlying: Cell) extends Cell { + override def sendMessage(msg: Envelope): Unit = { + if(msg.isInstanceOf[HasContext]) { + val context = msg.asInstanceOf[HasContext].context + Kamon.runWithContext(context) { + underlying.sendMessage(msg) + } + } + else { + underlying.sendMessage(msg) + } + } + + override def sendSystemMessage(msg: SystemMessage): Unit = + underlying.sendSystemMessage(msg) + + // None of these methods below this point will ever be called. + + override def self: ActorRef = underlying.self + override def system: ActorSystem = underlying.system + override def systemImpl: ActorSystemImpl = underlying.systemImpl + override def start(): this.type = underlying.start().asInstanceOf[this.type] + override def suspend(): Unit = underlying.suspend() + override def resume(causedByFailure: Throwable): Unit = underlying.resume(causedByFailure) + override def restart(cause: Throwable): Unit = underlying.restart(cause) + override def stop(): Unit = underlying.stop() + override private[pekko] def isTerminated = underlying.isTerminated + override def parent: InternalActorRef = underlying.parent + override def childrenRefs: ChildrenContainer = underlying.childrenRefs + override def getChildByName(name: String): Option[ChildStats] = underlying.getChildByName(name) + override def getSingleChild(name: String): InternalActorRef = underlying.getSingleChild(name) + override def isLocal: Boolean = underlying.isLocal + override def hasMessages: Boolean = underlying.hasMessages + override def numberOfMessages: Int = underlying.numberOfMessages + override def props: Props = underlying.props +} + + diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/MessageBufferInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/MessageBufferInstrumentation.scala new file mode 100644 index 000000000..9beeb6324 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/MessageBufferInstrumentation.scala @@ -0,0 +1,19 @@ +package kamon.instrumentation.pekko.remote + + +import _root_.kanela.agent.api.instrumentation.InstrumentationBuilder +import kamon.instrumentation.context.{CaptureCurrentContextOnExit, HasContext, InvokeWithCapturedContext} + +class MessageBufferInstrumentation extends InstrumentationBuilder { + + /** + * Ensures that the Context traveling with outgoing messages will be properly propagated if those messages are + * temporarily held on a MessageBuffer. This happens, for example, when sending messages to shard that has not yet + * started. + */ + onType("org.apache.pekko.util.MessageBuffer$Node") + .mixin(classOf[HasContext.Mixin]) + .advise(isConstructor, CaptureCurrentContextOnExit) + .advise(method("apply"), InvokeWithCapturedContext) + +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/RemotingInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/RemotingInstrumentation.scala new file mode 100644 index 000000000..12a3cbd58 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/RemotingInstrumentation.scala @@ -0,0 +1,172 @@ +package kamon.instrumentation.pekko.instrumentations.remote + +import org.apache.pekko.actor.ActorSystem +import kamon.Kamon +import kamon.context.{Context, Storage} +import kamon.context.Storage.Scope +import kamon.instrumentation.pekko.PekkoRemoteInstrumentation +import kamon.instrumentation.pekko.PekkoRemoteMetrics.SerializationInstruments +import kamon.instrumentation.pekko.instrumentations.PekkoPrivateAccess +import kamon.instrumentation.context.{CaptureCurrentContextOnExit, HasContext} +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice +import org.apache.pekko.kamon.instrumentation.pekko.remote.internal.{PekkoPduProtobufCodecConstructMessageMethodInterceptor, PekkoPduProtobufCodecDecodeMessage} +import org.apache.pekko.remote.artery.CaptureCurrentInboundEnvelope + + +class RemotingInstrumentation extends InstrumentationBuilder { + + /** + * Send messages might be buffered if they reach the EndpointWriter before it has been initialized and the current + * Context might be lost after the buffering, so we make sure we capture the context when the Send command was + * created and then apply it during the EndpointWrite.writeSend method execution (see below). + */ + onType("org.apache.pekko.remote.EndpointManager$Send") + .mixin(classOf[HasContext.Mixin]) + .advise(isConstructor, CaptureCurrentContextOnExit) + + onType("org.apache.pekko.remote.EndpointWriter") + .advise(method("writeSend"), WriteSendWithContext) + + /** + * Reads and writes the Pekko PDU using a modified version of the Protobuf that has an extra field for a Context + * instance. + */ + onType("org.apache.pekko.remote.transport.PekkoPduProtobufCodec$") + .intercept(method("constructMessage"), new PekkoPduProtobufCodecConstructMessageMethodInterceptor()) + .advise(method("decodeMessage"), classOf[PekkoPduProtobufCodecDecodeMessage]) + + /** + * Mixin Serialization Instruments to the Actor System and use them to record the serialization and deserialization + * time metrics. + */ + onType("org.apache.pekko.actor.ActorSystemImpl") + .mixin(classOf[HasSerializationInstruments.Mixin]) + .advise(isConstructor, InitializeActorSystemAdvice) + + /** + * Artery + */ + onType("org.apache.pekko.remote.artery.ReusableOutboundEnvelope") + .mixin(classOf[HasContext.Mixin]) + .advise(method("copy"), CopyContextOnReusableEnvelope) + + onType("org.apache.pekko.remote.artery.Association") + .advise(method("createOutboundEnvelope$1"), CaptureCurrentContextOnReusableEnvelope) + + onType("org.apache.pekko.remote.artery.RemoteInstruments") + .advise(method("deserialize"), classOf[CaptureCurrentInboundEnvelope]) + + onType("org.apache.pekko.remote.artery.ReusableInboundEnvelope") + .mixin(classOf[HasContext.Mixin]) + .advise(method("copyForLane"), CopyContextOnReusableEnvelope) + + onType("org.apache.pekko.remote.artery.MessageDispatcher") + .advise(method("dispatch"), ArteryMessageDispatcherAdvice) + +} + +object ArteryMessageDispatcherAdvice { + + @Advice.OnMethodEnter + def enter(@Advice.Argument(0) envelope: Any): Storage.Scope = + Kamon.storeContext(envelope.asInstanceOf[HasContext].context) + + @Advice.OnMethodExit + def exit(@Advice.Enter scope: Storage.Scope): Unit = + scope.close() +} + +object CopyContextOnReusableEnvelope { + + @Advice.OnMethodExit + def exit(@Advice.This oldEnvelope: Any, @Advice.Return newEnvelope: Any): Unit = + newEnvelope.asInstanceOf[HasContext].setContext(oldEnvelope.asInstanceOf[HasContext].context) +} + +object CaptureCurrentContextOnReusableEnvelope { + + @Advice.OnMethodExit + def exit(@Advice.Return envelope: Any): Unit = { + envelope.asInstanceOf[HasContext].setContext(Kamon.currentContext()) + } +} + +object WriteSendWithContext { + + @Advice.OnMethodEnter + def enter(@Advice.Argument(0) send: Any): Scope = { + Kamon.storeContext(send.asInstanceOf[HasContext].context) + } + + @Advice.OnMethodExit + def exit(@Advice.Enter scope: Scope): Unit = { + scope.asInstanceOf[Scope].close() + } +} + +trait HasSerializationInstruments { + def serializationInstruments: SerializationInstruments + def setSerializationInstruments(instruments: SerializationInstruments): Unit +} + +object HasSerializationInstruments { + + class Mixin(var serializationInstruments: SerializationInstruments) extends HasSerializationInstruments { + override def setSerializationInstruments(instruments: SerializationInstruments): Unit = + serializationInstruments = instruments + } +} + +object InitializeActorSystemAdvice { + + @Advice.OnMethodExit + def exit(@Advice.This system: ActorSystem with HasSerializationInstruments): Unit = + system.setSerializationInstruments(new SerializationInstruments(system.name)) + +} + +object MeasureSerializationTime { + + @Advice.OnMethodEnter + def enter(): Long = { + if(PekkoRemoteInstrumentation.settings().trackSerializationMetrics) System.nanoTime() else 0L + } + + @Advice.OnMethodExit + def exit(@Advice.Argument(0) system: AnyRef, @Advice.Enter startNanoTime: Long): Unit = { + if(startNanoTime != 0L) { + system.asInstanceOf[HasSerializationInstruments] + .serializationInstruments + .serializationTime + .record(System.nanoTime() - startNanoTime) + } + } +} + +object MeasureDeserializationTime { + + @Advice.OnMethodEnter + def enter(): Long = { + if(PekkoRemoteInstrumentation.settings().trackSerializationMetrics) System.nanoTime() else 0L + } + + @Advice.OnMethodExit + def exit(@Advice.Argument(0) system: AnyRef, @Advice.Enter startNanoTime: Long, @Advice.Return msg: Any): Unit = { + + if(PekkoPrivateAccess.isSystemMessage(msg)) { + msg match { + case hc: HasContext if hc.context == null => + hc.setContext(Kamon.currentContext()) + case _ => + } + } + + if(startNanoTime != 0L) { + system.asInstanceOf[HasSerializationInstruments] + .serializationInstruments + .deserializationTime + .record(System.nanoTime() - startNanoTime) + } + } +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/ShardingInstrumentation.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/ShardingInstrumentation.scala new file mode 100644 index 000000000..b769f7073 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/ShardingInstrumentation.scala @@ -0,0 +1,167 @@ +package kamon.instrumentation.pekko.remote + +import java.util.concurrent.atomic.AtomicLong +import org.apache.pekko.actor.Actor +import kamon.instrumentation.pekko.PekkoClusterShardingMetrics.ShardingInstruments +import kamon.instrumentation.pekko.PekkoInstrumentation +import kamon.util.Filter +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice + +class ShardingInstrumentation extends InstrumentationBuilder { + + /** + * The ShardRegion instrumentation just takes care of counting the Region messages and, when stopped, cleans up the + * instruments and Shard sampling schedules. + */ + onType("org.apache.pekko.cluster.sharding.ShardRegion") + .mixin(classOf[HasShardingInstruments.Mixin]) + .advise(isConstructor, InitializeShardRegionAdvice) + .advise(method("deliverMessage"), DeliverMessageOnShardRegion) + .advise(method("postStop"), RegionPostStopAdvice) + + + /** + * Shards control most of metrics generated by the module and we use the internal helper methods to know when + * entities were created or shutdown as well as measuring how many messages were sent to them through each Shard. One + * implementation note is that messages sent to unstarted entities are going to be counted more than once because + * they will first by buffered and then delivered when the Entity is read, we are doing this to avoid having to double + * the work done by "deliverMessage" (like calling he "extractEntityId" function on each message and determine + * whether it should be buffered or forwarded). + */ + onType("org.apache.pekko.cluster.sharding.Shard") + .mixin(classOf[HasShardingInstruments.Mixin]) + .mixin(classOf[HasShardCounters.Mixin]) + .advise(isConstructor, InitializeShardAdvice) + .advise(method("onLeaseAcquired"), ShardInitializedAdvice) + .advise(method("postStop"), ShardPostStopStoppedAdvice) + .advise(method("getOrCreateEntity"), ShardGetOrCreateEntityAdvice) + .advise(method("entityTerminated"), ShardEntityTerminatedAdvice) + .advise(method("org$apache$pekko$cluster$sharding$Shard$$deliverMessage"), ShardDeliverMessageAdvice) + .advise(method("deliverMessage"), ShardDeliverMessageAdvice) + + onType("org.apache.pekko.cluster.sharding.Shard") + .advise(method("shardInitialized"), ShardInitializedAdvice) +} + + +trait HasShardingInstruments { + def shardingInstruments: ShardingInstruments + def setShardingInstruments(shardingInstruments: ShardingInstruments): Unit +} + +object HasShardingInstruments { + + class Mixin(var shardingInstruments: ShardingInstruments) extends HasShardingInstruments { + override def setShardingInstruments(shardingInstruments: ShardingInstruments): Unit = + this.shardingInstruments = shardingInstruments + } +} + +trait HasShardCounters { + def hostedEntitiesCounter: AtomicLong + def processedMessagesCounter: AtomicLong + def setCounters(hostedEntitiesCounter: AtomicLong, processedMessagesCounter: AtomicLong): Unit +} + +object HasShardCounters { + + class Mixin(var hostedEntitiesCounter: AtomicLong, var processedMessagesCounter: AtomicLong) extends HasShardCounters { + override def setCounters(hostedEntitiesCounter: AtomicLong, processedMessagesCounter: AtomicLong): Unit = { + this.hostedEntitiesCounter = hostedEntitiesCounter + this.processedMessagesCounter = processedMessagesCounter + } + } +} + +object InitializeShardRegionAdvice { + + @Advice.OnMethodExit + def exit(@Advice.This region: Actor with HasShardingInstruments, @Advice.Argument(0) typeName: String): Unit = { + region.setShardingInstruments(new ShardingInstruments(region.context.system.name, typeName)) + + val system = region.context.system + val shardingGuardian = system.settings.config.getString("pekko.cluster.sharding.guardian-name") + val entitiesPath = s"${system.name}/system/$shardingGuardian/$typeName/*/*" + + PekkoInstrumentation.defineActorGroup(s"shardRegion/$typeName", Filter.fromGlob(entitiesPath)) + } +} + +object InitializeShardAdvice { + + @Advice.OnMethodExit + def exit(@Advice.This shard: Actor with HasShardingInstruments with HasShardCounters, @Advice.Argument(0) typeName: String, + @Advice.Argument(1) shardID: String): Unit = { + + val shardingInstruments = new ShardingInstruments(shard.context.system.name, typeName) + shard.setShardingInstruments(shardingInstruments) + shard.setCounters( + shardingInstruments.hostedEntitiesPerShardCounter(shardID), + shardingInstruments.processedMessagesPerShardCounter(shardID) + ) + } +} + +object DeliverMessageOnShardRegion { + + @Advice.OnMethodEnter + def enter(@Advice.This region: HasShardingInstruments, @Advice.Argument(0) message: Any): Unit = { + // NOTE: The "deliverMessage" method also handles the "RestartShard" message, which is not an user-facing message + // but it should not happen so often so we wont do any additional matching on it to filter it out of the + // metric. + region.shardingInstruments.processedMessages.increment() + } + +} + +object RegionPostStopAdvice { + + @Advice.OnMethodExit + def enter(@Advice.This shard: HasShardingInstruments): Unit = + shard.shardingInstruments.remove() +} + + +object ShardInitializedAdvice { + + @Advice.OnMethodExit + def enter(@Advice.This shard: HasShardingInstruments): Unit = + shard.shardingInstruments.hostedShards.increment() +} + +object ShardPostStopStoppedAdvice { + + @Advice.OnMethodExit + def enter(@Advice.This shard: HasShardingInstruments): Unit = + shard.shardingInstruments.hostedShards.decrement() +} + +object ShardGetOrCreateEntityAdvice { + + @Advice.OnMethodEnter + def enter(@Advice.This shard: Actor with HasShardingInstruments with HasShardCounters, @Advice.Argument(0) entityID: String): Unit = { + if(shard.context.child(entityID).isEmpty) { + // The entity is not created just yet, but we know that it will be created right after this. + shard.shardingInstruments.hostedEntities.increment() + shard.hostedEntitiesCounter.incrementAndGet() + } + } +} + +object ShardEntityTerminatedAdvice { + + @Advice.OnMethodEnter + def enter(@Advice.This shard: Actor with HasShardingInstruments with HasShardCounters): Unit = { + shard.shardingInstruments.hostedEntities.decrement() + shard.hostedEntitiesCounter.decrementAndGet() + } +} + +object ShardDeliverMessageAdvice { + @Advice.OnMethodEnter + def enter(@Advice.This shard: Actor with HasShardingInstruments with HasShardCounters): Unit = { + shard.processedMessagesCounter.incrementAndGet() + } + +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/artery/KamonRemoteInstrument.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/artery/KamonRemoteInstrument.scala new file mode 100644 index 000000000..a82ae5e9b --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/artery/KamonRemoteInstrument.scala @@ -0,0 +1,98 @@ +package org.apache.pekko.remote.artery + +import org.apache.pekko.actor.{ActorRef, ExtendedActorSystem} +import kamon.Kamon +import kamon.context.BinaryPropagation.{ByteStreamReader, ByteStreamWriter} +import kamon.instrumentation.pekko.PekkoRemoteMetrics +import kamon.instrumentation.context.HasContext +import kanela.agent.libs.net.bytebuddy.asm.Advice +import org.slf4j.LoggerFactory + +import java.nio.ByteBuffer +import scala.util.control.NonFatal + +class KamonRemoteInstrument(system: ExtendedActorSystem) extends RemoteInstrument { + private val logger = LoggerFactory.getLogger(classOf[KamonRemoteInstrument]) + private val lengthMask: Int = ~(31 << 26) + private val serializationInstruments = PekkoRemoteMetrics.serializationInstruments(system.name) + + override def identifier: Byte = 8 + + override def serializationTimingEnabled: Boolean = true + + override def remoteWriteMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + val currentContext = Kamon.currentContext() + if (currentContext.nonEmpty()) { + Kamon.defaultBinaryPropagation().write(currentContext, ByteStreamWriter.of(buffer)) + } + } + + override def remoteReadMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + def getLength(kl: Int): Int = kl & lengthMask + + try { + + // We need to figure out the length of the incoming Context before passing it to BinaryPropagation and + // the only way we can do so at this point is to go back a few positions on `buffer` to read the key/length + // Integer stored by Pekko and figure out the length from there. + val keyLength = buffer.getInt(buffer.position() - 4) + val contextLength = getLength(keyLength) + val contextData = Array.ofDim[Byte](contextLength) + buffer.get(contextData) + + val incomingContext = Kamon.defaultBinaryPropagation().read(ByteStreamReader.of(contextData)) + + Option(CaptureCurrentInboundEnvelope.CurrentInboundEnvelope.get()) + .foreach(_.asInstanceOf[HasContext].setContext(incomingContext)) + + } catch { + case NonFatal(t) => + logger.warn("Failed to deserialized incoming Context", t) + } + } + + override def remoteMessageSent(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = { + serializationInstruments.outboundMessageSize.record(size) + serializationInstruments.serializationTime.record(time) + } + + override def remoteMessageReceived(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = { + serializationInstruments.inboundMessageSize.record(size) + serializationInstruments.deserializationTime.record(time) + } + + /** + * Creates a new [[ByteStreamWriter]] from a ByteBuffer. + */ + def of(byteBuffer: ByteBuffer): ByteStreamWriter = new ByteStreamWriter { + override def write(bytes: Array[Byte]): Unit = + byteBuffer.put(bytes) + + override def write(bytes: Array[Byte], offset: Int, count: Int): Unit = + byteBuffer.put(bytes, offset, count) + + override def write(byte: Int): Unit = + byteBuffer.put(byte.toByte) + } +} + +class CaptureCurrentInboundEnvelope + +object CaptureCurrentInboundEnvelope { + + val CurrentInboundEnvelope = new ThreadLocal[InboundEnvelope]() { + override def initialValue(): InboundEnvelope = null + } + + @Advice.OnMethodEnter + def enter(@Advice.Argument(0) inboundEnvelope: InboundEnvelope): Unit = { + CurrentInboundEnvelope.set(inboundEnvelope) + } + + @Advice.OnMethodExit + def exit(): Unit = { + CurrentInboundEnvelope.remove() + } +} + + diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/ArterySerializationAdvice.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/ArterySerializationAdvice.scala new file mode 100644 index 000000000..5641210e4 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/ArterySerializationAdvice.scala @@ -0,0 +1,153 @@ +package org.apache.pekko.remote.kamon.instrumentation.pekko.remote.internal.remote + +import java.nio.ByteBuffer + +import org.apache.pekko.remote.artery._ +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.serialization.Serialization +import kamon.Kamon +import kamon.context.{BinaryPropagation, Context} +import kamon.instrumentation.pekko.PekkoRemoteMetrics +import kamon.instrumentation.context.HasContext +import kanela.agent.libs.net.bytebuddy.asm.Advice + + +/** + * For Artery messages we will always add two sections to the end of each serialized message: the Context and the size + * of the Context. The layout will look something like this: + * + * |------------------ Actual Message ------------------||-- Kamon Context --||-- Context Size (4 bytes) --| + * + * If the Context is empty the Context size will be zero. + */ + +class SerializeForArteryAdvice +object SerializeForArteryAdvice { + + @Advice.OnMethodEnter + def enter(): Long = { + System.nanoTime() + } + + @Advice.OnMethodExit + def exit(@Advice.Argument(0) serialization: Serialization, @Advice.Argument(1) envelope: OutboundEnvelope, + @Advice.Argument(3) envelopeBuffer: EnvelopeBuffer, @Advice.Enter startTime: Long): Unit = { + + val instruments = PekkoRemoteMetrics.serializationInstruments(serialization.system.name) + val messageBuffer = envelopeBuffer.byteBuffer + val context = envelope.asInstanceOf[HasContext].context + val positionBeforeContext = messageBuffer.position() + + if(context.nonEmpty()) { + Kamon.defaultBinaryPropagation().write(context, byteBufferWriter(messageBuffer)) + } + + instruments.serializationTime.record(System.nanoTime() - startTime) + instruments.outboundMessageSize.record(positionBeforeContext) + + val contextSize = messageBuffer.position() - positionBeforeContext + messageBuffer.putInt(contextSize) + } + + def byteBufferWriter(bb: ByteBuffer): BinaryPropagation.ByteStreamWriter = new BinaryPropagation.ByteStreamWriter { + override def write(bytes: Array[Byte]): Unit = + bb.put(bytes) + + override def write(bytes: Array[Byte], offset: Int, count: Int): Unit = + bb.put(bytes, offset, count) + + override def write(byte: Int): Unit = + bb.put(byte.toByte) + } +} + +class DeserializeForArteryAdvice +object DeserializeForArteryAdvice { + + val LastDeserializedContext = new ThreadLocal[Context]() { + override def initialValue(): Context = null + } + + case class DeserializationInfo( + context: Context, + timeStamp: Long, + messageSize: Long + ) + + @Advice.OnMethodEnter + def exit(@Advice.Argument(5) envelopeBuffer: EnvelopeBuffer): DeserializationInfo = { + val startTime = System.nanoTime() + val messageBuffer = envelopeBuffer.byteBuffer + val messageStart = messageBuffer.position() + + messageBuffer.mark() + messageBuffer.position(messageBuffer.limit() - 4) + val contextSize = messageBuffer.getInt() + val contextStart = messageBuffer.limit() - (contextSize + 4) + val messageSize = contextStart - messageStart + + val context = if(contextSize == 0) + Context.Empty + else { + messageBuffer + .position(contextStart) + .limit(contextStart + contextSize) + + Kamon.defaultBinaryPropagation().read(byteBufferReader(messageBuffer)) + } + + messageBuffer.reset() + messageBuffer.limit(contextStart) + DeserializationInfo(context, startTime, messageSize) + } + + @Advice.OnMethodExit(onThrowable = classOf[Throwable]) + def exit(@Advice.Argument(0) system: ActorSystem, @Advice.Argument(5) envelopeBuffer: EnvelopeBuffer, + @Advice.Enter deserializationInfo: DeserializationInfo, @Advice.Thrown error: Throwable): Unit = { + + if(error == null) { + LastDeserializedContext.set(deserializationInfo.context) + + val instruments = PekkoRemoteMetrics.serializationInstruments(system.name) + instruments.deserializationTime.record(System.nanoTime() - deserializationInfo.timeStamp) + instruments.inboundMessageSize.record(deserializationInfo.messageSize) + } + } + + + def byteBufferReader(bb: ByteBuffer): BinaryPropagation.ByteStreamReader = new BinaryPropagation.ByteStreamReader { + override def available(): Int = + bb.remaining() + + override def read(target: Array[Byte]): Int = { + bb.get(target) + target.length + } + + override def read(target: Array[Byte], offset: Int, count: Int): Int = { + bb.get(target, offset, count) + target.length + } + + override def readAll(): Array[Byte] = { + val array = Array.ofDim[Byte](bb.remaining()) + bb.get(array) + array + } + } +} + + +class CaptureContextOnInboundEnvelope +object CaptureContextOnInboundEnvelope { + + @Advice.OnMethodEnter + def enter(@Advice.This inboundEnvelope: Any): Unit = { + val lastContext = DeserializeForArteryAdvice.LastDeserializedContext.get() + if(lastContext != null) { + inboundEnvelope.asInstanceOf[HasContext].setContext(lastContext) + DeserializeForArteryAdvice.LastDeserializedContext.set(null) + } + } + +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/KamonOptionVal.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/KamonOptionVal.scala new file mode 100644 index 000000000..52afc2110 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/KamonOptionVal.scala @@ -0,0 +1,9 @@ +package org.apache.pekko + +import org.apache.pekko.util.{ OptionVal => PekkoOptionVal } +/** + * The sole purpose of this object is to provide access to the otherwise internal class [[org.apache.pekko.util.OptionVal]]. + */ +object KamonOptionVal { + type OptionVal[+T >: Null] = PekkoOptionVal[T] +} diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecConstructMessageMethodInterceptor.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecConstructMessageMethodInterceptor.scala new file mode 100644 index 000000000..86bf233b1 --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecConstructMessageMethodInterceptor.scala @@ -0,0 +1,81 @@ +package org.apache.pekko.kamon.instrumentation.pekko.remote.internal + +import java.io.ByteArrayOutputStream + +import org.apache.pekko.KamonOptionVal.OptionVal +import org.apache.pekko.actor.{ActorRef, Address} +import pekko.remote.ContextAwareWireFormats_Pekko.{AckAndContextAwareEnvelopeContainer, ContextAwareRemoteEnvelope, RemoteContext} +import org.apache.pekko.remote.WireFormats.{AcknowledgementInfo, ActorRefData, AddressData, SerializedMessage} +import org.apache.pekko.remote.{Ack, SeqNo} +import org.apache.pekko.util.ByteString +import kamon.Kamon +import kamon.context.BinaryPropagation.ByteStreamWriter +import kamon.instrumentation.pekko.PekkoRemoteMetrics +import kanela.agent.libs.net.bytebuddy.implementation.bind.annotation.{Argument, RuntimeType} + +/** + * Interceptor for org.apache.pekko.remote.transport.PekkoPduProtobufCodec$::constructMessage + */ +class PekkoPduProtobufCodecConstructMessageMethodInterceptor { + + @RuntimeType + def aroundConstructMessage(@Argument(0) localAddress: Address, + @Argument(1) recipient: ActorRef, + @Argument(2) serializedMessage: SerializedMessage, + @Argument(3) senderOption: OptionVal[ActorRef], + @Argument(4) seqOption: Option[SeqNo], + @Argument(5) ackOption: Option[Ack]): AnyRef = { + + val ackAndEnvelopeBuilder = AckAndContextAwareEnvelopeContainer.newBuilder + val envelopeBuilder = ContextAwareRemoteEnvelope.newBuilder + + envelopeBuilder.setRecipient(serializeActorRef(recipient.path.address, recipient)) + if (senderOption.isDefined) + envelopeBuilder.setSender(serializeActorRef(localAddress, senderOption.get)) + seqOption foreach { seq => envelopeBuilder.setSeq(seq.rawValue) } + ackOption foreach { ack => ackAndEnvelopeBuilder.setAck(ackBuilder(ack)) } + envelopeBuilder.setMessage(serializedMessage) + + val out = new ByteArrayOutputStream() + Kamon.defaultBinaryPropagation().write(Kamon.currentContext(), ByteStreamWriter.of(out)) + + val remoteTraceContext = RemoteContext.newBuilder().setContext( + org.apache.pekko.protobufv3.internal.ByteString.copyFrom(out.toByteArray) + ) + envelopeBuilder.setTraceContext(remoteTraceContext) + + ackAndEnvelopeBuilder.setEnvelope(envelopeBuilder) + + val messageSize = envelopeBuilder.getMessage.getMessage.size() + PekkoRemoteMetrics.serializationInstruments(localAddress.system).outboundMessageSize.record(messageSize) + + ByteString.ByteString1C(ackAndEnvelopeBuilder.build.toByteArray) //Reuse Byte Array (naughty!) + } + + // Copied from org.apache.pekko.remote.transport.PekkoPduProtobufCodec because of private access. + private def ackBuilder(ack: Ack): AcknowledgementInfo.Builder = { + val ackBuilder = AcknowledgementInfo.newBuilder() + ackBuilder.setCumulativeAck(ack.cumulativeAck.rawValue) + ack.nacks foreach { nack => ackBuilder.addNacks(nack.rawValue) } + ackBuilder + } + + // Copied from org.apache.pekko.remote.transport.PekkoPduProtobufCodec because of private access. + private def serializeActorRef(defaultAddress: Address, ref: ActorRef): ActorRefData = { + ActorRefData.newBuilder.setPath( + if (ref.path.address.host.isDefined) ref.path.toSerializationFormat + else ref.path.toSerializationFormatWithAddress(defaultAddress)).build() + } + + // Copied from org.apache.pekko.remote.transport.PekkoPduProtobufCodec because of private access. + private def serializeAddress(address: Address): AddressData = address match { + case Address(protocol, system, Some(host), Some(port)) => + AddressData.newBuilder + .setHostname(host) + .setPort(port) + .setSystem(system) + .setProtocol(protocol) + .build() + case _ => throw new IllegalArgumentException(s"Address [$address] could not be serialized: host or port missing.") + } +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecDecodeMessageMethodAdvisor.scala b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecDecodeMessageMethodAdvisor.scala new file mode 100644 index 000000000..6fc297e4f --- /dev/null +++ b/instrumentation/kamon-pekko/src/main/scala/kamon/instrumentation/pekko/remote/internal/PekkoPduProtobufCodecDecodeMessageMethodAdvisor.scala @@ -0,0 +1,34 @@ +package org.apache.pekko.kamon.instrumentation.pekko.remote.internal + +import org.apache.pekko.actor.Address +import pekko.remote.ContextAwareWireFormats_Pekko.AckAndContextAwareEnvelopeContainer +import org.apache.pekko.remote.RemoteActorRefProvider +import org.apache.pekko.util.ByteString +import kamon.Kamon +import kamon.context.BinaryPropagation.ByteStreamReader +import kamon.instrumentation.pekko.PekkoRemoteMetrics +import kanela.agent.libs.net.bytebuddy.asm.Advice.{Argument, OnMethodEnter} + +/** + * Advisor for org.apache.pekko.remote.transport.PekkoPduProtobufCodec$::decodeMessage + */ +class PekkoPduProtobufCodecDecodeMessage + +object PekkoPduProtobufCodecDecodeMessage { + + @OnMethodEnter + def enter(@Argument(0) bs: ByteString, @Argument(1) provider: RemoteActorRefProvider, @Argument(2) localAddress: Address): Unit = { + val ackAndEnvelope = AckAndContextAwareEnvelopeContainer.parseFrom(bs.toArray) + if (ackAndEnvelope.hasEnvelope && ackAndEnvelope.getEnvelope.hasTraceContext) { + val remoteCtx = ackAndEnvelope.getEnvelope.getTraceContext + + if(remoteCtx.getContext.size() > 0) { + val ctx = Kamon.defaultBinaryPropagation().read(ByteStreamReader.of(remoteCtx.getContext.toByteArray)) + Kamon.storeContext(ctx) + } + + val messageSize = ackAndEnvelope.getEnvelope.getMessage.getMessage.size() + PekkoRemoteMetrics.serializationInstruments(localAddress.system).inboundMessageSize.record(messageSize) + } + } +} diff --git a/instrumentation/kamon-pekko/src/test/resources/application.conf b/instrumentation/kamon-pekko/src/test/resources/application.conf new file mode 100644 index 000000000..ca685c1f3 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/resources/application.conf @@ -0,0 +1,166 @@ +pekko { + loglevel = INFO + loggers = [ "kamon.instrumentation.pekko.TestLogger" ] + logger-startup-timeout = 30s + log-dead-letters = 0 + + actor { + serialize-messages = on + warn-about-java-serializer-usage = no + + deployment { + /picking-the-right-dispatcher-in-pool-router { + router = round-robin-pool + resizer = { + lower-bound = 5 + upper-bound = 64 + messages-per-resize = 20 + } + } + + "/picking-the-right-dispatcher-in-pool-router/*" { + dispatcher = custom-dispatcher + } + } + } + + cluster.jmx.multi-mbeans-in-same-jvm = on +} + + +custom-dispatcher { + executor = "thread-pool-executor" + type = PinnedDispatcher +} + +tracked-pinned-dispatcher { + executor = "thread-pool-executor" + type = PinnedDispatcher +} + +kamon { + instrumentation.pekko { + filters { + + actors { + track { + includes = [ "*/user/tracked-*", "*/user/measuring-*", "*/user/clean-after-collect", "*/user/stop", "*/user/repointable*", "*/" ] + excludes = [ "*/system/**", "*/user/tracked-explicitly-excluded", "*/user/non-tracked-actor" ] + } + + trace { + excludes = [ "*/user/filteredout*" ] + } + + start-trace { + includes = [ "*/user/traced*" ] + excludes = [] + } + } + + routers { + includes = [ "*/user/tracked-*", "*/user/measuring-*", "*/user/cleanup-*", "*/user/picking-*", "*/user/stop-*" ] + excludes = [ "*/user/tracked-explicitly-excluded-*"] + } + + dispatchers { + includes = [ "**" ] + excludes = [ "explicitly-excluded" ] + } + + groups { + auto-grouping { + excludes = [ "*/user/ActorMetricsTestActor", "*/user/SecondLevelGrouping"] + } + + group-of-actors { + includes = ["*/user/group-of-actors-*"] + excludes = [] + } + + group-of-actors-for-cleaning { + includes = ["*/user/group-of-actors-for-cleaning*"] + excludes = [] + } + + second-level-group { + includes = ["*/user/second-level-group/*"] + } + + group-of-routees { + includes = ["*/user/group-of-routees*"] + excludes = [] + } + + } + } + + cluster-sharding.shard-metrics-sample-interval = 100 millisecond + } + + metric { + tick-interval = 1 hour + + factory { + default-settings { + range-sampler.auto-update-interval = 20 millis + } + + custom-settings { + "pekko.actor.mailbox-size" { + auto-update-interval = 50 millisecond + } + + "pekko.group.members" { + auto-update-interval = 1 millisecond + } + } + + } + } + + trace.sampler = "always" +} + +explicitly-excluded { + type = "Dispatcher" + executor = "fork-join-executor" +} + +tracked-fjp { + type = "Dispatcher" + executor = "fork-join-executor" + + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 100.0 + parallelism-max = 22 + } +} + +tracked-tpe { + type = "Dispatcher" + executor = "thread-pool-executor" + + thread-pool-executor { + core-pool-size-min = 7 + core-pool-size-factor = 100.0 + max-pool-size-factor = 100.0 + max-pool-size-max = 21 + core-pool-size-max = 21 + } +} + + +kanela.modules.pekko-testkit { + name = "Pekko Testkit Instrumentation" + description = "Delays messages received by the Test Kit Actors to give enough time for other Threads to finish their work" + + instrumentations = [ + "kamon.instrumentation.pekko.PekkoTestKitInstrumentation" + ] + + within = [ + "^org.apache.pekko.testkit.*" + ] +} \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/test/resources/logback.xml b/instrumentation/kamon-pekko/src/test/resources/logback.xml new file mode 100644 index 000000000..2e8c7ddb9 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/resources/logback.xml @@ -0,0 +1,12 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + \ No newline at end of file diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorCellInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorCellInstrumentationSpec.scala new file mode 100644 index 000000000..c85ed0d29 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorCellInstrumentationSpec.scala @@ -0,0 +1,159 @@ +/* =================================================== + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================================================== */ +package kamon.instrumentation.pekko + +import org.apache.pekko.actor.{Actor, ActorRef, ActorSystem, PoisonPill, Props} +import org.apache.pekko.pattern.{ask, pipe} +import org.apache.pekko.routing._ +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import org.apache.pekko.util.Timeout +import kamon.Kamon +import kamon.testkit.{InitAndStopKamonAfterAll, MetricInspection} +import kamon.tag.Lookups._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.collection.mutable.ListBuffer +import scala.concurrent.duration._ + + +class ActorCellInstrumentationSpec extends TestKit(ActorSystem("ActorCellInstrumentationSpec")) with AnyWordSpecLike + with BeforeAndAfterAll with ImplicitSender with Eventually with MetricInspection.Syntax with Matchers with InitAndStopKamonAfterAll { + implicit lazy val executionContext = system.dispatcher + import ContextTesting._ + + "the message passing instrumentation" should { + "capture and propagate the current context when using bang" in new EchoActorFixture { + Kamon.runWithContext(testContext("propagate-with-bang")) { + contextEchoActor ! "test" + } + + expectMsg("propagate-with-bang") + } + + "capture and propagate the current context for messages sent when the target actor might be a repointable ref" in { + for (_ <- 1 to 100) { + val ta = system.actorOf(Props[ContextStringEcho]) + Kamon.runWithContext(testContext("propagate-with-tell")) { + ta.tell("test", testActor) + } + + expectMsg("propagate-with-tell") + system.stop(ta) + } + } + + "propagate the current context when using the ask pattern" in new EchoActorFixture { + implicit val timeout = Timeout(1 seconds) + Kamon.runWithContext(testContext("propagate-with-ask")) { + // The pipe pattern use Futures internally, so FutureTracing test should cover the underpinnings of it. + (contextEchoActor ? "test") pipeTo (testActor) + } + + expectMsg("propagate-with-ask") + } + + + "propagate the current context to actors behind a simple router" in new EchoSimpleRouterFixture { + Kamon.runWithContext(testContext("propagate-with-router")) { + router.route("test", testActor) + } + + expectMsg("propagate-with-router") + } + + "propagate the current context to actors behind a pool router" in new EchoPoolRouterFixture { + Kamon.runWithContext(testContext("propagate-with-pool")) { + pool ! "test" + } + + expectMsg("propagate-with-pool") + } + + "propagate the current context to actors behind a group router" in new EchoGroupRouterFixture { + Kamon.runWithContext(testContext("propagate-with-group")) { + group ! "test" + } + + expectMsg("propagate-with-group") + } + + "cleanup the metric recorders when a RepointableActorRef is killed early" in { + def actorPathTag(ref: ActorRef): String = system.name + "/" + ref.path.elements.mkString("/") + val trackedActors = new ListBuffer[String] + + for(j <- 1 to 10) { + for (i <- 1 to 1000) { + val a = system.actorOf(Props[ContextStringEcho], s"repointable-$j-$i") + a ! PoisonPill + trackedActors.append(actorPathTag(a)) + } + + eventually(timeout(1 second)) { + val trackedActors = kamon.instrumentation.pekko.PekkoMetrics.ActorProcessingTime.tagValues("path") + for(p <- trackedActors) { + trackedActors.find(_ == p) shouldBe empty + } + } + + trackedActors.clear() + } + } + } + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + trait EchoActorFixture { + val contextEchoActor = system.actorOf(Props[ContextStringEcho]) + } + + trait EchoSimpleRouterFixture { + val router = { + val routees = Vector.fill(5) { + val r = system.actorOf(Props[ContextStringEcho]) + ActorRefRoutee(r) + } + Router(RoundRobinRoutingLogic(), routees) + } + } + + trait EchoPoolRouterFixture { + val pool = system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[ContextStringEcho]), "pool-router") + } + + trait EchoGroupRouterFixture { + val routees = Vector.fill(5) { + system.actorOf(Props[ContextStringEcho]) + } + + val group = system.actorOf(RoundRobinGroup(routees.map(_.path.toStringWithoutAddress)).props(), "group-router") + } +} + +class ContextStringEcho extends Actor { + import ContextTesting._ + + def receive = { + case _: String => + sender ! Kamon.currentContext().getTag(plain(TestKey)) + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorGroupMetricsSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorGroupMetricsSpec.scala new file mode 100644 index 000000000..31ef342d1 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorGroupMetricsSpec.scala @@ -0,0 +1,188 @@ +/* ========================================================================================= + * Copyright © 2013-2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import org.apache.pekko.routing.RoundRobinPool +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import ActorMetricsTestActor.{Block, Die} +import kamon.instrumentation.pekko.PekkoMetrics._ +import kamon.tag.TagSet +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import kamon.util.Filter +import org.scalactic.TimesOnInt.convertIntToRepeater +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ +import scala.util.Random + +class ActorGroupMetricsSpec extends TestKit(ActorSystem("ActorGroupMetricsSpec")) with AnyWordSpecLike with MetricInspection.Syntax + with InstrumentInspection.Syntax with Matchers with InitAndStopKamonAfterAll with ImplicitSender with Eventually { + + "the Kamon actor-group metrics" should { + "increase the member count when an actor matching the pattern is created" in new ActorGroupMetricsFixtures { + val trackedActor1 = watch(createTestActor("group-of-actors-1")) + val trackedActor2 = watch(createTestActor("group-of-actors-2")) + val trackedActor3 = watch(createTestActor("group-of-actors-3")) + val nonTrackedActor = createTestActor("someone-else") + + eventually (timeout(5 seconds)) { + GroupMembers.withTags(groupTags("group-of-actors")).distribution().max shouldBe(3) + } + + system.stop(trackedActor1) + expectTerminated(trackedActor1) + system.stop(trackedActor2) + expectTerminated(trackedActor2) + system.stop(trackedActor3) + expectTerminated(trackedActor3) + + eventually (timeout(5 seconds)) { + GroupMembers.withTags(groupTags("group-of-actors")).distribution().max shouldBe (0) + } + } + + + "increase the member count when a routee matching the pattern is created" in new ActorGroupMetricsFixtures { + val trackedRouter = createTestPoolRouter("group-of-routees") + val nonTrackedRouter = createTestPoolRouter("non-tracked-group-of-routees") + + eventually (timeout(5 seconds)) { + val valueNow = GroupMembers.withTags(groupTags("group-of-routees")).distribution().max + valueNow shouldBe(5) + } + + val trackedRouter2 = createTestPoolRouter("group-of-routees-2") + val trackedRouter3 = createTestPoolRouter("group-of-routees-3") + + eventually(GroupMembers.withTags(groupTags("group-of-routees")).distribution().max shouldBe(15)) + + system.stop(trackedRouter) + system.stop(trackedRouter2) + system.stop(trackedRouter3) + + eventually(GroupMembers.withTags(groupTags("group-of-routees")).distribution(resetState = true).max shouldBe(0)) + } + + "allow defining groups by configuration" in { + PekkoInstrumentation.matchingActorGroups("system/user/group-provided-by-code-actor") shouldBe empty + PekkoInstrumentation.defineActorGroup("group-by-code", Filter.fromGlob("*/user/group-provided-by-code-actor")) shouldBe true + PekkoInstrumentation.matchingActorGroups("system/user/group-provided-by-code-actor") should contain only("group-by-code") + PekkoInstrumentation.removeActorGroup("group-by-code") + PekkoInstrumentation.matchingActorGroups("system/user/group-provided-by-code-actor") shouldBe empty + } + + "cleanup pending-messages metric on member shutdown" in new ActorGroupMetricsFixtures { + val actors = (1 to 10).map(id => watch(createTestActor(s"group-of-actors-for-cleaning-$id"))) + + eventually { + val memberCountDistribution = GroupMembers.withTags(groupTags("group-of-actors-for-cleaning")).distribution() + memberCountDistribution.min shouldBe (10) + memberCountDistribution.max shouldBe (10) + } + + val hangQueue = Block(1 milliseconds) + Random.shuffle(actors).foreach { groupMember => + 1000 times { + groupMember ! hangQueue + } + } + + actors.foreach(system.stop) + + eventually { + val pendingMessagesDistribution = GroupPendingMessages.withTags(groupTags("group-of-actors-for-cleaning")).distribution() + pendingMessagesDistribution.count should be > 0L + pendingMessagesDistribution.max shouldBe 0L + } + } + + "cleanup pending-messages metric on member shutdown there are messages still being sent to the members" in new ActorGroupMetricsFixtures { + val parent = watch(system.actorOf(Props[SecondLevelGrouping], "second-level-group")) + + eventually { + val memberCountDistribution = GroupMembers.withTags(groupTags("second-level-group")).distribution() + memberCountDistribution.min shouldBe (10) + memberCountDistribution.max shouldBe (10) + } + + 1000 times { + parent ! Die + } + + eventually(timeout(5 seconds)) { + val pendingMessagesDistribution = GroupPendingMessages.withTags(groupTags("second-level-group")).distribution() + + pendingMessagesDistribution.count should be > 0L + // We leave some room here because there is a small period of time in which the instrumentation might increment + // the mailbox-size range sampler if messages are being sent while shutting down the actor. + // + // TODO: Find a way to only increment the mailbox size when the messages are actually going to a mailbox. + pendingMessagesDistribution.max should be(0L +- 5L) + } + } + } + + override implicit def patienceConfig: PatienceConfig = PatienceConfig(timeout = 5 seconds, interval = 5 milliseconds) + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + def groupTags(group: String): TagSet = + TagSet.from( + Map( + "system" -> "ActorGroupMetricsSpec", + "group" -> group + ) + ) + + trait ActorGroupMetricsFixtures { + def createTestActor(name: String): ActorRef = { + val actor = system.actorOf(Props[ActorMetricsTestActor], name) + val initialiseListener = TestProbe() + + // Ensure that the actor has been created before returning. + actor.tell(ActorMetricsTestActor.Ping, initialiseListener.ref) + initialiseListener.expectMsg(ActorMetricsTestActor.Pong) + + actor + } + + def createTestPoolRouter(routerName: String): ActorRef = { + val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + router.tell(RouterMetricsTestActor.Ping, initialiseListener.ref) + initialiseListener.expectMsg(RouterMetricsTestActor.Pong) + + router + } + } +} + +class SecondLevelGrouping extends Actor { + (1 to 10).foreach(id => context.actorOf(Props[ActorMetricsTestActor], s"child-$id")) + + def receive: Actor.Receive = { + case any => Random.shuffle(context.children).headOption.foreach(_.forward(any)) + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorLoggingInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorLoggingInstrumentationSpec.scala new file mode 100644 index 000000000..71b4f9711 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorLoggingInstrumentationSpec.scala @@ -0,0 +1,63 @@ +/* + * ========================================================================================= + * Copyright © 2013-2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon.instrumentation.pekko + + +import org.apache.pekko.actor.{Actor, ActorLogging, ActorSystem, Props} +import org.apache.pekko.event.Logging.LogEvent +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import kamon.Kamon +import kamon.instrumentation.context.HasContext +import kamon.tag.Lookups._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +class ActorLoggingInstrumentationSpec extends TestKit(ActorSystem("ActorCellInstrumentationSpec")) with AnyWordSpecLike with Matchers + with BeforeAndAfterAll with ImplicitSender { + import ContextTesting._ + + "the ActorLogging instrumentation" should { + "capture the current context and attach it to log events" in { + val loggerActor = system.actorOf(Props[LoggerActor]) + Kamon.runWithContext(testContext("propagate-when-logging")) { + loggerActor ! "info" + } + + val logEvent = fishForMessage() { + case event: LogEvent if event.message.toString startsWith "TestLogEvent" => true + case _: LogEvent => false + } + + Kamon.runWithContext(logEvent.asInstanceOf[HasContext].context) { + val keyValueFromContext = Kamon.currentContext().getTag(option(ContextTesting.TestKey)).getOrElse("Missing Context Tag") + keyValueFromContext should be("propagate-when-logging") + } + } + } + + + override protected def beforeAll(): Unit = system.eventStream.subscribe(testActor, classOf[LogEvent]) + + override protected def afterAll(): Unit = shutdown() +} + +class LoggerActor extends Actor with ActorLogging { + def receive = { + case "info" => log.info("TestLogEvent") + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsSpec.scala new file mode 100644 index 000000000..d5dde5804 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsSpec.scala @@ -0,0 +1,156 @@ +/* ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import kamon.instrumentation.pekko.ActorMetricsTestActor._ +import kamon.instrumentation.pekko.PekkoMetrics._ +import kamon.tag.TagSet +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import org.scalactic.TimesOnInt._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + + +class ActorMetricsSpec extends TestKit(ActorSystem("ActorMetricsSpec")) with AnyWordSpecLike with MetricInspection.Syntax with InstrumentInspection.Syntax with Matchers + with ImplicitSender with Eventually with InitAndStopKamonAfterAll { + + "the Kamon actor metrics" should { + "respect the configured include and exclude filters" in new ActorMetricsFixtures { + val trackedActor = createTestActor("tracked-actor") + ActorProcessingTime.tagValues("path") should contain("ActorMetricsSpec/user/tracked-actor") + + val nonTrackedActor = createTestActor("non-tracked-actor") + ActorProcessingTime.tagValues("path") shouldNot contain("ActorMetricsSpec/user/non-tracked-actor") + + val trackedButExplicitlyExcluded = createTestActor("tracked-explicitly-excluded") + ActorProcessingTime.tagValues("path") shouldNot contain("ActorMetricsSpec/user/tracked-explicitly-excluded") + } + + "not pick up the root supervisor" in { + ActorProcessingTime.tagValues("path") shouldNot contain("ActorMetricsSpec/") + } + + "record the processing-time of the receive function" in new ActorMetricsFixtures { + createTestActor("measuring-processing-time", true) ! TrackTimings(sleep = Some(100 millis)) + + val timings = expectMsgType[TrackedTimings] + val processingTimeDistribution = ActorProcessingTime. + withTags(actorTags("ActorMetricsSpec/user/measuring-processing-time")).distribution() + + processingTimeDistribution.count should be(1L) + processingTimeDistribution.buckets.size should be(1L) + processingTimeDistribution.buckets.head.value should be(timings.approximateProcessingTime +- 10.millis.toNanos) + } + + "record the number of errors" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-errors") + 10.times(trackedActor ! Fail) + + trackedActor ! Ping + expectMsg(Pong) + ActorErrors.withTags(actorTags("ActorMetricsSpec/user/measuring-errors")).value() should be(10) + } + + "record the mailbox-size" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-mailbox-size", true) + trackedActor ! TrackTimings(sleep = Some(1 second)) + 10.times(trackedActor ! Discard) + trackedActor ! Ping + + val timings = expectMsgType[TrackedTimings] + expectMsg(Pong) + + val mailboxSizeDistribution = ActorMailboxSize + .withTags(actorTags("ActorMetricsSpec/user/measuring-mailbox-size")).distribution() + + mailboxSizeDistribution.min should be(0L +- 1L) + mailboxSizeDistribution.max should be(11L +- 1L) + } + + "record the time-in-mailbox" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-time-in-mailbox", true) + trackedActor ! TrackTimings(sleep = Some(100 millis)) + val timings = expectMsgType[TrackedTimings] + + val timeInMailboxDistribution = ActorTimeInMailbox + .withTags(actorTags("ActorMetricsSpec/user/measuring-time-in-mailbox")).distribution() + + timeInMailboxDistribution.count should be(1L) + timeInMailboxDistribution.buckets.head.frequency should be(1L) + timeInMailboxDistribution.buckets.head.value should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + "clean up the associated recorder when the actor is stopped" in new ActorMetricsFixtures { + val trackedActor = createTestActor("stop") + + // Killing the actor should remove it's ActorMetrics and registering again below should create a new one. + val deathWatcher = TestProbe() + deathWatcher.watch(trackedActor) + trackedActor ! PoisonPill + deathWatcher.expectTerminated(trackedActor) + + eventually(timeout(1 second)) { + ActorProcessingTime.tagValues("path") shouldNot contain("ActorMetricsSpec/user/stop") + } + } + } + + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + def actorTags(path: String): TagSet = + TagSet.from( + Map( + "path" -> path, + "system" -> "ActorMetricsSpec", + "dispatcher" -> "pekko.actor.default-dispatcher", + "class" -> "kamon.instrumentation.pekko.ActorMetricsTestActor" + ) + ) + + trait ActorMetricsFixtures { + + def createTestActor(name: String, resetState: Boolean = false): ActorRef = { + val actor = system.actorOf(Props[ActorMetricsTestActor], name) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + actor.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + if(resetState) { + val tags = actorTags(s"ActorMetricsSpec/user/$name") + + ActorTimeInMailbox.withTags(tags).distribution(resetState = true) + ActorProcessingTime.withTags(tags).distribution(resetState = true) + ActorMailboxSize.withTags(tags).distribution(resetState = true) + ActorErrors.withTags(tags).value(resetState = true) + } + + actor + } + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsTestActor.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsTestActor.scala new file mode 100644 index 000000000..1993b3ae9 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorMetricsTestActor.scala @@ -0,0 +1,61 @@ +/* ========================================================================================= + * Copyright © 2013-2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import kamon.Kamon + +import scala.concurrent.duration._ + +class ActorMetricsTestActor extends Actor { + import ActorMetricsTestActor._ + + override def receive = { + case Discard => + case Die => context.stop(self) + case Fail => throw new ArithmeticException("Division by zero.") + case Ping => sender ! Pong + case Block(forDuration) => + Thread.sleep(forDuration.toMillis) + case BlockAndDie(forDuration) => + Thread.sleep(forDuration.toMillis) + context.stop(self) + case TrackTimings(sendTimestamp, sleep) => { + val dequeueTimestamp = Kamon.clock().nanos() + sleep.map(s => Thread.sleep(s.toMillis)) + val afterReceiveTimestamp = Kamon.clock().nanos() + + sender ! TrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp) + } + } +} + +object ActorMetricsTestActor { + case object Ping + case object Pong + case object Fail + case object Die + case object Discard + + case class Block(duration: Duration) + case class BlockAndDie(duration: Duration) + case class TrackTimings(sendTimestamp: Long = Kamon.clock().nanos(), sleep: Option[Duration] = None) + case class TrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) { + def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp + def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorSystemMetricsSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorSystemMetricsSpec.scala new file mode 100644 index 000000000..ff2065fcf --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ActorSystemMetricsSpec.scala @@ -0,0 +1,126 @@ +/* ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.Version +import org.apache.pekko.actor._ +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import kamon.instrumentation.pekko.ActorMetricsTestActor._ +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import org.scalactic.TimesOnInt._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + + +class ActorSystemMetricsSpec extends TestKit(ActorSystem("ActorSystemMetricsSpec")) with AnyWordSpecLike with MetricInspection.Syntax + with InstrumentInspection.Syntax with Matchers with InitAndStopKamonAfterAll with ImplicitSender with Eventually { + + val (baseActorCount, totalActorCount) = (8L, 29) + + val systemMetrics = PekkoMetrics.forSystem(system.name) + + "the Actor System metrics" should { + "record active actor counts" in { + testActor.tell("wake up!", testActor) + + eventually(timeout(10 seconds)) { + val activeActors = systemMetrics.activeActors.distribution() + + // This establishes a baseline on actor counts for the rest of the test. + activeActors.count should be > 0L + + activeActors.min shouldBe baseActorCount + activeActors.max shouldBe baseActorCount + } + + val actors = (1 to 10).map(id => watch(system.actorOf(Props[ActorMetricsTestActor], s"just-some-actor-$id"))) + val parent = watch(system.actorOf(Props[SecondLevelGrouping], "just-some-parent-actor")) + + 1000 times { + actors.foreach(_ ! Discard) + parent ! Discard + } + + eventually(timeout(10 seconds)) { + val activeActors = systemMetrics.activeActors.distribution() + activeActors.count should be > 0L + activeActors.min shouldBe totalActorCount + activeActors.max shouldBe totalActorCount + } + + actors.foreach(system.stop) + system.stop(parent) + + eventually(timeout(10 seconds)) { + val activeActors = systemMetrics.activeActors.distribution() + activeActors.count should be > 0L + + activeActors.min shouldBe baseActorCount + activeActors.max shouldBe baseActorCount + } + } + + "record dead letters" in { + val doaActor = system.actorOf(Props[ActorMetricsTestActor], "doa") + val deathWatcher = TestProbe() + systemMetrics.deadLetters.value(true) + deathWatcher.watch(doaActor) + doaActor ! PoisonPill + deathWatcher.expectTerminated(doaActor) + + 7 times { doaActor ! "deadonarrival" } + + eventually { + systemMetrics.deadLetters.value(false).toInt should be(7) + } + } + + "record unhandled messages" in { + val unhandled = system.actorOf(Props[ActorMetricsTestActor], "unhandled") + 10 times { unhandled ! "CantHandleStrings" } + + eventually { + systemMetrics.unhandledMessages.value(false).toInt should be(10) + } + } + + "record processed messages counts" in { + systemMetrics.processedMessagesByTracked.value(true) + systemMetrics.processedMessagesByNonTracked.value(true) + systemMetrics.processedMessagesByNonTracked.value(false) should be(0) + + val tracked = system.actorOf(Props[ActorMetricsTestActor], "tracked-actor-counts") + val nonTracked = system.actorOf(Props[ActorMetricsTestActor], "non-tracked-actor-counts") + + (1 to 10).foreach(_ => tracked ! Discard) + (1 to 15).foreach(_ => nonTracked ! Discard) + + eventually(timeout(3 second)) { + systemMetrics.processedMessagesByTracked.value(false) should be >= (10L) + systemMetrics.processedMessagesByNonTracked.value(false) should be >= (15L) + } + } + } + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AskPatternInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AskPatternInstrumentationSpec.scala new file mode 100644 index 000000000..ab4fc7fec --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AskPatternInstrumentationSpec.scala @@ -0,0 +1,99 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + + +import org.apache.pekko.actor._ +import org.apache.pekko.pattern.ask +import org.apache.pekko.testkit.{EventFilter, ImplicitSender, TestKit} +import org.apache.pekko.util.Timeout +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.testkit.InitAndStopKamonAfterAll +import kamon.instrumentation.pekko.ContextTesting._ +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + +class AskPatternInstrumentationSpec extends TestKit(ActorSystem("AskPatternInstrumentationSpec")) with AnyWordSpecLike + with InitAndStopKamonAfterAll with ImplicitSender { + + implicit lazy val ec = system.dispatcher + implicit val askTimeout = Timeout(10 millis) + + // TODO: Make this work with ActorSelections + + "the AskPatternInstrumentation" when { + "configured in heavyweight mode" should { + "log a warning with a full stack trace and the context captured the moment when the ask was triggered for an actor" in { + val noReplyActorRef = system.actorOf(Props[NoReply], "no-reply-1") + setAskPatternTimeoutWarningMode("heavyweight") + + EventFilter.warning(start = "Timeout triggered for ask pattern to actor [no-reply-1] at").intercept { + Kamon.runWithContext(testContext("ask-timeout-warning")) { + noReplyActorRef ? "hello" + } + } + } + } + + "configured in lightweight mode" should { + "log a warning with a short source location description and the context taken from the moment the ask was triggered for a actor" in { + val noReplyActorRef = system.actorOf(Props[NoReply], "no-reply-2") + setAskPatternTimeoutWarningMode("lightweight") + + EventFilter.warning(start = "Timeout triggered for ask pattern to actor [no-reply-2] at").intercept { + Kamon.runWithContext(testContext("ask-timeout-warning")) { + noReplyActorRef ? "hello" + } + } + } + } + + "configured in off mode" should { + "should not log any warning messages" in { + val noReplyActorRef = system.actorOf(Props[NoReply], "no-reply-3") + setAskPatternTimeoutWarningMode("off") + + intercept[AssertionError] { // No message will be logged and the event filter will fail. + EventFilter.warning(start = "Timeout triggered for ask pattern to actor", occurrences = 1).intercept { + Kamon.runWithContext(testContext("ask-timeout-warning")) { + noReplyActorRef ? "hello" + } + } + } + } + } + } + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + def setAskPatternTimeoutWarningMode(mode: String): Unit = { + val newConfiguration = ConfigFactory.parseString(s"kamon.pekko.ask-pattern-timeout-warning=$mode").withFallback(Kamon.config()) + Kamon.reconfigure(newConfiguration) + } +} + +class NoReply extends Actor { + def receive = { + case _ => + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AutoGroupingSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AutoGroupingSpec.scala new file mode 100644 index 000000000..2b6497b45 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/AutoGroupingSpec.scala @@ -0,0 +1,139 @@ +/* ========================================================================================= + * Copyright © 2013-2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import org.apache.pekko.routing.RoundRobinPool +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import org.scalactic.TimesOnInt.convertIntToRepeater +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + +class AutoGroupingSpec extends TestKit(ActorSystem("AutoGroupingSpec")) with AnyWordSpecLike with MetricInspection.Syntax + with InstrumentInspection.Syntax with Matchers with InitAndStopKamonAfterAll with ImplicitSender with Eventually { + + import AutoGroupingSpec._ + + val preExistingGroups = PekkoMetrics.GroupMembers.tagValues("group") + + "auto-grouping" should { + "ignore actors that belong to defined groups or are being tracked" in { + system.actorOf(reproducer(1, 0), "tracked-reproducer") ! "ping" + system.actorOf(dummy(), "tracked-dummy") + expectMsg("pong") + + withoutPreExisting(PekkoMetrics.GroupMembers.tagValues("group")) shouldBe empty + } + + "ignore routers and routees" in { + createTestRouter("tracked-router") ! "ping" + createTestRouter("tracked-explicitly-excluded-router") ! "ping" + expectMsg("pong") + expectMsg("pong") + + withoutPreExisting(PekkoMetrics.GroupMembers.tagValues("group")) shouldBe empty + } + + "automatically create groups for actors that are not being explicitly tracked" in { + + // This will create three levels of actors, all of type "Reproducer" and all should be auto-grouped on their + // own level. + system.actorOf(reproducer(3, 2)) + system.actorOf(dummy()) + + eventually { + withoutPreExisting(PekkoMetrics.GroupMembers.tagValues("group")) should contain allOf ( + "AutoGroupingSpec/user/Dummy", + "AutoGroupingSpec/user/Reproducer", + "AutoGroupingSpec/user/Reproducer/Reproducer", + "AutoGroupingSpec/user/Reproducer/Reproducer/Reproducer" + ) + } + + val topGroup = PekkoMetrics.forGroup("AutoGroupingSpec/user/Reproducer", system.name) + val secondLevelGroup = PekkoMetrics.forGroup("AutoGroupingSpec/user/Reproducer/Reproducer", system.name) + val thirdLevelGroup = PekkoMetrics.forGroup("AutoGroupingSpec/user/Reproducer/Reproducer/Reproducer", system.name) + val dummyGroup = PekkoMetrics.forGroup("AutoGroupingSpec/user/Dummy", system.name) + + eventually { + topGroup.members.distribution(resetState = false).max shouldBe 1 + secondLevelGroup.members.distribution(resetState = false).max shouldBe 2 + thirdLevelGroup.members.distribution(resetState = false).max shouldBe 8 + dummyGroup.members.distribution(resetState = false).max shouldBe 1 + } + } + } + + def withoutPreExisting(values: Seq[String]): Seq[String] = + values.filter(v => preExistingGroups.indexOf(v) < 0) + + override implicit def patienceConfig: PatienceConfig = + PatienceConfig(timeout = 5 seconds, interval = 5 milliseconds) + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + def createTestRouter(routerName: String): ActorRef = { + val router = system.actorOf(RoundRobinPool(5).props(reproducer(1, 0)), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + router.tell("ping", initialiseListener.ref) + initialiseListener.expectMsg("pong") + + router + } +} + +object AutoGroupingSpec { + + class Reproducer(pendingDepth: Int, childCount: Int) extends Actor { + + override def receive: Receive = { + case "ping" => sender() ! "pong" + case other => context.children.foreach(_.forward(other)) + } + + override def preStart(): Unit = { + super.preStart() + + if(pendingDepth >= 0) { + childCount.times { + context.actorOf(reproducer(pendingDepth - 1, childCount * 2)) ! "ping" + } + } + } + } + + class Dummy extends Actor { + override def receive: Receive = { + case _ => + } + } + + def reproducer(depth: Int, childCount: Int): Props = + Props(new Reproducer(depth - 1, childCount)) + + def dummy(): Props = + Props[Dummy] +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextEchoActor.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextEchoActor.scala new file mode 100644 index 000000000..1464fb4ec --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextEchoActor.scala @@ -0,0 +1,40 @@ +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import org.apache.pekko.remote.RemoteScope +import kamon.Kamon +import kamon.tag.Lookups._ + +class ContextEchoActor(creationListener: Option[ActorRef]) extends Actor with ActorLogging { + + creationListener foreach { recipient => + recipient ! currentTraceContextInfo + } + + def receive = { + case "die" => + throw new ArithmeticException("Division by zero.") + + case "reply-trace-token" => + sender ! currentTraceContextInfo + } + + def currentTraceContextInfo: String = { + val ctx = Kamon.currentContext() + val name = ctx.getTag(option(ContextEchoActor.EchoTag)).getOrElse("") + s"name=$name" + } +} + +object ContextEchoActor { + + val EchoTag = "tests" + + def props(creationListener: Option[ActorRef]): Props = + Props(classOf[ContextEchoActor], creationListener) + + def remoteProps(creationTraceContextListener: Option[ActorRef], remoteAddress: Address): Props = + Props(classOf[ContextEchoActor], creationTraceContextListener) + .withDeploy(Deploy(scope = RemoteScope(remoteAddress))) + +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextTesting.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextTesting.scala new file mode 100644 index 000000000..0564a718b --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/ContextTesting.scala @@ -0,0 +1,9 @@ +package kamon.instrumentation.pekko + +import kamon.context.Context +import kamon.tag.TagSet + +object ContextTesting { + val TestKey = "testkey" + def testContext(value: String) = Context.of(TagSet.of(TestKey, value)) +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/DispatcherMetricsSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/DispatcherMetricsSpec.scala new file mode 100644 index 000000000..e714ea7ce --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/DispatcherMetricsSpec.scala @@ -0,0 +1,146 @@ +/* ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.Version +import org.apache.pekko.actor.{ActorSystem, Props} +import org.apache.pekko.dispatch.MessageDispatcher +import org.apache.pekko.routing.BalancingPool +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import kamon.instrumentation.pekko.RouterMetricsTestActor._ +import kamon.instrumentation.executor.ExecutorMetrics +import kamon.tag.Lookups.plain +import kamon.testkit.MetricInspection +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import java.util.concurrent.Executors +import scala.concurrent.{Await, ExecutionContext, Future} +import scala.concurrent.duration._ + +class DispatcherMetricsSpec extends TestKit(ActorSystem("DispatcherMetricsSpec")) with AnyWordSpecLike with Matchers with MetricInspection.Syntax + with BeforeAndAfterAll with ImplicitSender with Eventually { + + "the Kamon dispatcher metrics" should { + + val trackedDispatchers = Seq( + "pekko.actor.default-dispatcher", + "tracked-pinned-dispatcher", + "tracked-fjp", + "tracked-tpe", + "pekko.actor.internal-dispatcher") + + val excluded = "explicitly-excluded" + val allDispatchers = trackedDispatchers :+ excluded + val builtInDispatchers = Seq("pekko.actor.default-dispatcher", "pekko.actor.internal-dispatcher") + + + "track dispatchers configured in the pekko.dispatcher filter" in { + allDispatchers.foreach(id => forceInit(system.dispatchers.lookup(id))) + + val threads = ExecutorMetrics.ThreadsActive.tagValues("name") + val queues = ExecutorMetrics.QueueSize.tagValues("name") + val tasks = ExecutorMetrics.TasksCompleted.tagValues("name") + + trackedDispatchers.forall { dispatcherName => + threads.contains(dispatcherName) && + queues.contains(dispatcherName) && + tasks.contains(dispatcherName) + } should be (true) + + Seq(threads, queues, tasks).flatten should not contain excluded + } + + "include the actor system name in the executor tags" in { + val instrumentExecutorsWithSystem = ExecutorMetrics.ThreadsActive.instruments().keys + .filter(_.get(plain("pekko.system")) == system.name) + .map(_.get(plain("name"))) + + instrumentExecutorsWithSystem should contain only(trackedDispatchers: _*) + } + + + "clean up the metrics recorders after a dispatcher is shutdown" in { + ExecutorMetrics.Parallelism.tagValues("name") should contain("tracked-fjp") + shutdownDispatcher(system.dispatchers.lookup("tracked-fjp")) + Thread.sleep(2000) + ExecutorMetrics.Parallelism.tagValues("name") shouldNot contain("tracked-fjp") + } + + "play nicely when dispatchers are looked up from a BalancingPool router" in { + val balancingPoolRouter = system.actorOf(BalancingPool(5).props(Props[RouterMetricsTestActor]), "test-balancing-pool") + balancingPoolRouter ! Ping + expectMsg(Pong) + + ExecutorMetrics.Parallelism.tagValues("name") should contain("BalancingPool-/test-balancing-pool") + } + + "pick up default execution contexts provided when creating an actor system" in { + val dec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(8)) + val system = ActorSystem(name = "with-default-ec", defaultExecutionContext = Some(dec)) + + val instrumentExecutorsWithSystem = ExecutorMetrics.ThreadsActive.instruments().keys + .filter(_.get(plain("pekko.system")) == system.name) + .map(_.get(plain("name"))) + + instrumentExecutorsWithSystem should contain only(builtInDispatchers: _*) + Await.result(system.terminate(), 5 seconds) + } + + "pick up default execution contexts provided when creating an actor system when the type is unknown" in { + val dec = new WrappingExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(8))) + val system = ActorSystem(name = "with-unknown-default-ec", defaultExecutionContext = Some(dec)) + + val instrumentExecutorsWithSystem = ExecutorMetrics.ThreadsActive.instruments().keys + .filter(_.get(plain("pekko.system")) == system.name) + .map(_.get(plain("name"))) + + val builtInWithoutDefaultDispatcher = builtInDispatchers.filterNot(_.endsWith("default-dispatcher")) + if(builtInWithoutDefaultDispatcher.isEmpty) + instrumentExecutorsWithSystem shouldBe empty + else + instrumentExecutorsWithSystem should contain only(builtInWithoutDefaultDispatcher: _*) + + Await.result(system.terminate(), 5 seconds) + } + } + + + def forceInit(dispatcher: MessageDispatcher): MessageDispatcher = { + val listener = TestProbe() + Future { + listener.ref ! "init done" + }(dispatcher) + listener.expectMsg("init done") + + dispatcher + } + + def shutdownDispatcher(dispatcher: MessageDispatcher): Unit = { + val shutdownMethod = dispatcher.getClass.getDeclaredMethod("shutdown") + shutdownMethod.setAccessible(true) + shutdownMethod.invoke(dispatcher) + } + + override protected def afterAll(): Unit = system.terminate() + + class WrappingExecutionContext(ec: ExecutionContext) extends ExecutionContext { + override def execute(runnable: Runnable): Unit = ec.execute(runnable) + override def reportFailure(cause: Throwable): Unit = ec.reportFailure(cause) + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/EnvelopeSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/EnvelopeSpec.scala new file mode 100644 index 000000000..ff89049da --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/EnvelopeSpec.scala @@ -0,0 +1,66 @@ +/* + * ========================================================================================= + * Copyright © 2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + + +import org.apache.pekko.actor.{ActorSystem, ExtendedActorSystem, Props} +import org.apache.pekko.dispatch.Envelope +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import kamon.Kamon +import kamon.instrumentation.context.{HasContext, HasTimestamp} +import org.scalatest.BeforeAndAfterAll +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +class EnvelopeSpec extends TestKit(ActorSystem("EnvelopeSpec")) with AnyWordSpecLike with Matchers + with BeforeAndAfterAll with ImplicitSender { + + "EnvelopeInstrumentation" should { + "mixin EnvelopeContext" in { + val actorRef = system.actorOf(Props[NoReply]) + val env = Envelope("msg", actorRef, system).asInstanceOf[Object] + env match { + case e: Envelope with HasContext with HasTimestamp => + e.setContext(Kamon.currentContext()) + e.setTimestamp(Kamon.clock().nanos()) + + case _ => fail("InstrumentedEnvelope is not mixed in") + } + env match { + case s: Serializable => { + import java.io._ + val bos = new ByteArrayOutputStream + val oos = new ObjectOutputStream(bos) + oos.writeObject(env) + oos.close() + org.apache.pekko.serialization.JavaSerializer.currentSystem.withValue(system.asInstanceOf[ExtendedActorSystem]) { + val ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())) + val obj = ois.readObject() + ois.close() + obj match { + case e: Envelope with HasContext with HasTimestamp => + e.timestamp should not be 0L + e.context should not be null + case _ => fail("InstrumentedEnvelope is not mixed in") + } + } + } + case _ => fail("envelope is not serializable") + } + } + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/MessageTracingSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/MessageTracingSpec.scala new file mode 100644 index 000000000..c11cd5877 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/MessageTracingSpec.scala @@ -0,0 +1,239 @@ +package kamon.instrumentation.pekko + +import java.util.concurrent.TimeUnit +import org.apache.pekko.actor.{Actor, ActorRef, ActorSystem, Props} +import org.apache.pekko.pattern.ask +import org.apache.pekko.routing.{RoundRobinGroup, RoundRobinPool} +import org.apache.pekko.stream.{ActorMaterializer, Materializer} +import org.apache.pekko.stream.scaladsl.Source +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import org.apache.pekko.util.Timeout +import kamon.Kamon +import kamon.tag.Lookups +import kamon.testkit.{InitAndStopKamonAfterAll, MetricInspection, Reconfigure, SpanInspection, TestSpanReporter} +import kamon.trace.Span +import org.scalactic.TimesOnInt.convertIntToRepeater +import org.scalatest.concurrent.{Eventually, ScalaFutures} +import org.scalatest.matchers.should.Matchers +import org.scalatest.time.SpanSugar._ +import org.scalatest.wordspec.AnyWordSpecLike +import org.scalatest.{BeforeAndAfterAll, OptionValues} + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.concurrent.{Await, Future} + + +class MessageTracingSpec extends TestKit(ActorSystem("MessageTracing")) with AnyWordSpecLike with MetricInspection.Syntax + with Matchers with SpanInspection with Reconfigure with InitAndStopKamonAfterAll with ImplicitSender with Eventually + with OptionValues with ScalaFutures with TestSpanReporter { + + "Message tracing instrumentation" should { + "skip filtered out actors" in { + val traced = system.actorOf(Props[TracingTestActor], "traced-probe-1") + val nonTraced = system.actorOf(Props[TracingTestActor], "filteredout") + nonTraced ! "ping" + expectMsg("pong") + + traced ! "ping" + expectMsg("pong") + + eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + spanTags("component") shouldBe "pekko.actor" + span.operationName shouldBe("tell(String)") + spanTags("pekko.actor.path") shouldNot include ("filteredout") + spanTags("pekko.actor.path") should be ("MessageTracing/user/traced-probe-1") + } + } + + "construct span for traced actors" in { + val traced = system.actorOf(Props[TracingTestActor], "traced") + traced ! "ping" + expectMsg("pong") + + eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + span.operationName shouldBe("tell(String)") + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") shouldBe "String" + } + + val pong = traced.ask("ping")(Timeout(10, TimeUnit.SECONDS)) + Await.ready(pong, 10 seconds) + + eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + span.operationName shouldBe("ask(String)") + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") shouldBe "String" + } + } + + "create child spans for messages between traced actors" in { + val first = system.actorOf(Props[TracingTestActor], "traced-first") + val second = system.actorOf(Props[TracingTestActor], "traced-second") + + first ! second + expectMsg("pong") + + // Span for the first actor message + val firstSpanID = eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced-first" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") should include("ActorRef") + span.id + } + + // Span for the second actor message + eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + span.parentId shouldBe firstSpanID + span.operationName should include("tell(String)") + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced-second" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") shouldBe "String" + } + } + + "create hierarchy of spans even across propagation-only actors" in { + val first = system.actorOf(Props[TracingTestActor], "traced-chain-first") + val nonInstrumented = system.actorOf(Props[TracingTestActor], "filteredout-middle") + val last = system.actorOf(Props[TracingTestActor], "traced-chain-last") + + first ! (nonInstrumented, last) + expectMsg("pong") + + // Span for the first actor message + val firstSpanID = eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + span.operationName shouldBe("tell(Tuple2)") + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced-chain-first" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") should include("Tuple2") + + span.id + } + + // Span for the second actor message + eventually(timeout(2 seconds)) { + val span = testSpanReporter.nextSpan().value + val spanTags = stringTag(span) _ + span.parentId shouldBe firstSpanID + span.operationName shouldBe("tell(String)") + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.system") shouldBe "MessageTracing" + spanTags("pekko.actor.path") shouldBe "MessageTracing/user/traced-chain-last" + spanTags("pekko.actor.class") shouldBe "kamon.instrumentation.pekko.TracingTestActor" + spanTags("pekko.actor.message-class") shouldBe "String" + } + } + + "create actor message spans when behind a group router " in { + val routee = system.actorOf(Props[TracingTestActor],"traced-routee-one") + val router = system.actorOf(RoundRobinGroup(Vector(routee.path.toStringWithoutAddress)).props(), "nontraced-group-router") + + router ! "ping" + expectMsg("pong") + + eventually(timeout(2 seconds)) { + val spanTags = stringTag(testSpanReporter.nextSpan().value) _ + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.actor.path") shouldNot include ("nontraced-pool-router") + spanTags("pekko.actor.path") should be ("MessageTracing/user/traced-routee-one") + } + } + + "create actor message spans when behind a pool router" in { + val router = system.actorOf(Props[TracingTestActor].withRouter(RoundRobinPool(2)), "traced-pool-router") + + router ! "ping-and-wait" + expectMsg("pong") + + eventually(timeout(2 seconds)) { + val spanTags = stringTag(testSpanReporter.nextSpan().value) _ + spanTags("component") shouldBe "pekko.actor" + spanTags("pekko.actor.path") should be ("MessageTracing/user/traced-pool-router") + } + } + + "not track Pekko Streams actors" in { + implicit val timeout = Timeout(10 seconds) + val actorWithMaterializer = system.actorOf(Props[ActorWithMaterializer]) + + val finishedStream = Kamon.runWithSpan(Kamon.serverSpanBuilder("wrapper", "test").start()) { + actorWithMaterializer.ask("stream").mapTo[String] + } + + 5 times { + val allSpans = testSpanReporter() + .spans() + .filterNot(s => s.operationName == "wrapper" || s.operationName == "ask(String)") + + allSpans shouldBe empty + Thread.sleep(1000) + } + } + + def stringTag(span: Span.Finished)(tag: String): String = { + span.tags.withTags(span.metricTags).get(Lookups.plain(tag)) + } + + } +} + +class TracingTestActor extends Actor { + + override def receive: Receive = { + case (forwardTo: ActorRef, target: ActorRef) => + Thread.sleep(50) + forwardTo.forward(target) + + case forwardTo: ActorRef => + forwardTo.forward("ping-and-wait") + + case "ping" => + sender ! "pong" + + case "ping-and-wait" => + Thread.sleep(50) + sender ! "pong" + } +} + +class ActorWithMaterializer extends Actor { + implicit val mat = ActorMaterializer() + + override def receive: Receive = { + case "stream" => + Await.result ( + Source(1 to 10) + .async + .map(x => x + x) + .runReduce(_ + _), + 5 seconds + ) + + sender() ! "done" + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/PekkoTestKitInstrumentation.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/PekkoTestKitInstrumentation.scala new file mode 100644 index 000000000..1bc202951 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/PekkoTestKitInstrumentation.scala @@ -0,0 +1,24 @@ +package kamon.instrumentation.pekko + +import kanela.agent.api.instrumentation.InstrumentationBuilder +import kanela.agent.libs.net.bytebuddy.asm.Advice + +class PekkoTestKitInstrumentation extends InstrumentationBuilder { + + /** + * We believe that tests fail randomly because every now and then the tests thread receives a message from one of the + * echo actors and continues processing before the execution of the receive function on the echo actor's thread + * finishes and metrics are recorded. This instrumentation delays the waiting on the test thread to get better + * chances that the echo actor receive finishes. + */ + onSubTypesOf("org.apache.pekko.testkit.TestKitBase") + .advise(method("receiveOne"), DelayReceiveOne) +} + +object DelayReceiveOne { + + @Advice.OnMethodExit(suppress = classOf[Throwable]) + def exit(): Unit = + Thread.sleep(5) + +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsSpec.scala new file mode 100644 index 000000000..a422a6864 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsSpec.scala @@ -0,0 +1,368 @@ +/* ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import org.apache.pekko.routing._ +import org.apache.pekko.testkit.{ImplicitSender, TestKit, TestProbe} +import kamon.instrumentation.pekko.PekkoMetrics._ +import kamon.instrumentation.pekko.RouterMetricsTestActor._ +import kamon.tag.Lookups._ +import kamon.tag.TagSet +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import org.scalactic.TimesOnInt._ +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + +class RouterMetricsSpec extends TestKit(ActorSystem("RouterMetricsSpec")) with AnyWordSpecLike with MetricInspection.Syntax + with InstrumentInspection.Syntax with Matchers with InitAndStopKamonAfterAll with ImplicitSender with Eventually { + + "the Kamon router metrics" should { + "respect the configured include and exclude filters" in new RouterMetricsFixtures { + createTestPoolRouter("tracked-pool-router") + createTestPoolRouter("non-tracked-pool-router") + createTestPoolRouter("tracked-explicitly-excluded-pool-router") + + RouterProcessingTime.tagValues("path") should contain("RouterMetricsSpec/user/tracked-pool-router") + RouterProcessingTime.tagValues("path") shouldNot contain("RouterMetricsSpec/user/non-tracked-pool-router") + RouterProcessingTime.tagValues("path") shouldNot contain("RouterMetricsSpec/user/tracked-explicitly-excluded-pool-router") + } + + + "record the routing-time of the receive function for pool routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestPoolRouter("measuring-routing-time-in-pool-router", true) + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + + eventually { + RouterRoutingTime.withTags(routerTags("RouterMetricsSpec/user/measuring-routing-time-in-pool-router")) + .distribution(resetState = false).count should be(1L) + } + } + + "record the processing-time of the receive function for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("measuring-processing-time-in-pool-router", true) + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + val processingTimeDistribution = RouterProcessingTime + .withTags(routerTags("RouterMetricsSpec/user/measuring-processing-time-in-pool-router")).distribution() + + processingTimeDistribution.count should be(1L) + processingTimeDistribution.buckets.head.frequency should be(1L) + processingTimeDistribution.buckets.head.value should be(timings.approximateProcessingTime +- 10.millis.toNanos) + } + + "record the number of errors for pool routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestPoolRouter("measuring-errors-in-pool-router") + + 10.times(router.tell(Fail, listener.ref)) + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + + eventually { + RouterErrors + .withTags(routerTags("RouterMetricsSpec/user/measuring-errors-in-pool-router")).value(resetState = false) should be(10L) + } + } + + "record the time-in-mailbox for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + // If we don't initialize the listener upfront the timings might be wrong. + timingsListener.testActor.tell("hello", timingsListener.ref) + timingsListener.expectMsg("hello") + val router = createTestPoolRouter("measuring-time-in-mailbox-in-pool-router", true) + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + + val timeInMailboxDistribution = RouterTimeInMailbox + .withTags(routerTags("RouterMetricsSpec/user/measuring-time-in-mailbox-in-pool-router")).distribution() + + timeInMailboxDistribution.count should be(1L) + timeInMailboxDistribution.buckets.head.frequency should be(1L) + timeInMailboxDistribution.buckets.head.value should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + "record the time-in-mailbox for balancing pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + // If we don't initialize the listener upfront the timings might be wrong. + timingsListener.testActor.tell("hello", timingsListener.ref) + timingsListener.expectMsg("hello") + val router = createTestBalancingPoolRouter("measuring-time-in-mailbox-in-balancing-pool-router", true) + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + + val timeInMailboxDistribution = RouterTimeInMailbox + .withTags( + routerTags("RouterMetricsSpec/user/measuring-time-in-mailbox-in-balancing-pool-router").withTags( + TagSet.from(Map("dispatcher" -> "BalancingPool-/measuring-time-in-mailbox-in-balancing-pool-router")) + ) + + ).distribution() + + timeInMailboxDistribution.count should be(1L) + timeInMailboxDistribution.buckets.head.frequency should be(1L) + timeInMailboxDistribution.buckets.head.value should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + + "record pending-messages for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("measuring-pending-messages-in-pool-router", true) + def pendingMessagesDistribution = RouterPendingMessages + .withTags(routerTags("RouterMetricsSpec/user/measuring-pending-messages-in-pool-router")).distribution() + + 10 times { router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)} + 10 times { timingsListener.expectMsgType[RouterTrackedTimings] } + + eventually(pendingMessagesDistribution.max should be >= (5L)) + eventually(pendingMessagesDistribution.max should be (0L)) + } + + "record pending-messages for balancing pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestBalancingPoolRouter("measuring-pending-messages-in-balancing-pool-router", true) + def pendingMessagesDistribution = RouterPendingMessages + .withTags( + routerTags("RouterMetricsSpec/user/measuring-pending-messages-in-balancing-pool-router") + .withTag("dispatcher", "BalancingPool-/measuring-pending-messages-in-balancing-pool-router") + ).distribution() + + 10 times { router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)} + 10 times { timingsListener.expectMsgType[RouterTrackedTimings] } + + eventually(pendingMessagesDistribution.max should be >= (5L)) + eventually(pendingMessagesDistribution.max should be (0L)) + } + + "record member count for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("measuring-members-in-pool-router", true) + def membersDistribution = RouterMembers + .withTags(routerTags("RouterMetricsSpec/user/measuring-members-in-pool-router")).distribution() + + for(routeesLeft <- 4 to 0 by -1) { + 100 times { router.tell(Discard, timingsListener.ref) } + router.tell(Die, timingsListener.ref) + + eventually { + membersDistribution.max should be (routeesLeft) + } + } + } + + "record member count for balancing pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestBalancingPoolRouter("measuring-members-in-balancing-pool-router", true) + def membersDistribution = RouterMembers + .withTags( + routerTags("RouterMetricsSpec/user/measuring-members-in-balancing-pool-router") + .withTags(TagSet.from(Map("dispatcher" -> "BalancingPool-/measuring-members-in-balancing-pool-router"))) + ).distribution() + + for(routeesLeft <- 4 to 0 by -1) { + 100 times { router.tell(Discard, timingsListener.ref) } + router.tell(Die, timingsListener.ref) + + eventually { + membersDistribution.max should be (routeesLeft) + } + } + } + + + "pick the right dispatcher name when the routees have a custom dispatcher set via deployment configuration" in new RouterMetricsFixtures { + val testProbe = TestProbe() + val router = system.actorOf(FromConfig.props(Props[RouterMetricsTestActor]), "picking-the-right-dispatcher-in-pool-router") + + 10 times { + router.tell(Ping, testProbe.ref) + testProbe.expectMsg(Pong) + } + + val routerMetrics = RouterMembers.instruments( + TagSet.from(Map("path" -> "RouterMetricsSpec/user/picking-the-right-dispatcher-in-pool-router")) + ) + + routerMetrics + .map(_._1.get(plain("dispatcher"))) should contain only("custom-dispatcher") + } + + "clean the pending messages metric when a routee dies in pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("cleanup-pending-messages-in-pool-router", true) + def pendingMessagesDistribution = RouterPendingMessages + .withTags(routerTags("RouterMetricsSpec/user/cleanup-pending-messages-in-pool-router")).distribution() + + 10 times { router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)} + 1 times { router.tell(Die, timingsListener.ref)} + 500 times { router.tell(Discard, timingsListener.ref)} + + eventually { + pendingMessagesDistribution.max should be >= (500L) + } + + 10 times { timingsListener.expectMsgType[RouterTrackedTimings] } + + eventually { + pendingMessagesDistribution.max should be (0L) + } + } + + "clean the pending messages metric when a routee dies in balancing pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestBalancingPoolRouter("cleanup-pending-messages-in-balancing-pool-router", true) + def pendingMessagesDistribution = RouterPendingMessages + .withTags(routerTags("RouterMetricsSpec/user/cleanup-pending-messages-in-balancing-pool-router") + .withTags(TagSet.from(Map("dispatcher" -> "BalancingPool-/cleanup-pending-messages-in-balancing-pool-router"))) + + ).distribution() + + 10 times { router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)} + 1 times { router.tell(Die, timingsListener.ref)} + 500 times { router.tell(Discard, timingsListener.ref)} + + eventually { + pendingMessagesDistribution.max should be >= (100L) + } + + 10 times { timingsListener.expectMsgType[RouterTrackedTimings] } + + eventually { + pendingMessagesDistribution.max should be (0L) + } + } + + "clean up the associated recorder when the pool router is stopped" in new RouterMetricsFixtures { + val trackedRouter = createTestPoolRouter("stop-in-pool-router") + RouterProcessingTime.tagValues("path") should contain("RouterMetricsSpec/user/stop-in-pool-router") + + // Killing the router should remove it's RouterMetrics and registering again below should create a new one. + val deathWatcher = TestProbe() + deathWatcher.watch(trackedRouter) + trackedRouter ! PoisonPill + deathWatcher.expectTerminated(trackedRouter) + + + eventually { + RouterProcessingTime.tagValues("path") shouldNot contain("RouterMetricsSpec/user/stop-in-pool-router") + } + } + } + + override implicit def patienceConfig: PatienceConfig = PatienceConfig(timeout = 5 seconds, interval = 5 milliseconds) + + override protected def afterAll(): Unit = { + shutdown() + super.afterAll() + } + + def routerTags(path: String): TagSet = { + val routerClass = if(path.contains("balancing")) "org.apache.pekko.routing.BalancingPool" else "org.apache.pekko.routing.RoundRobinPool" + + TagSet.from( + Map( + "path" -> path, + "system" -> "RouterMetricsSpec", + "dispatcher" -> "pekko.actor.default-dispatcher", + "routeeClass" -> "kamon.instrumentation.pekko.RouterMetricsTestActor", + "routerClass" -> routerClass + ) + ) + } + + + trait RouterMetricsFixtures { + def createTestGroupRouter(routerName: String, resetState: Boolean = false): ActorRef = { + val routees = Vector.fill(5) { + system.actorOf(Props[RouterMetricsTestActor]) + } + + val group = system.actorOf(RoundRobinGroup(routees.map(_.path.toStringWithoutAddress)).props(), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + group.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + if(resetState) { + RouterRoutingTime.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterTimeInMailbox.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterProcessingTime.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterErrors.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).value(resetState = true) + } + + group + } + + def createTestPoolRouter(routerName: String, resetState: Boolean = false): ActorRef = { + val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + router.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + if(resetState) { + RouterRoutingTime.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterTimeInMailbox.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterProcessingTime.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterErrors.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).value(resetState = true) + RouterPendingMessages.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + RouterMembers.withTags(routerTags(s"RouterMetricsSpec/user/$routerName")).distribution(resetState = true) + } + + router + } + + def createTestBalancingPoolRouter(routerName: String, resetState: Boolean = false): ActorRef = { + val router = system.actorOf(BalancingPool(5).props(Props[RouterMetricsTestActor]), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + router.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + if(resetState) { + val tags = routerTags(s"RouterMetricsSpec/user/$routerName") + .withTag("dispatcher", s"BalancingPool-/$routerName") + + RouterRoutingTime.withTags(tags).distribution(resetState = true) + RouterTimeInMailbox.withTags(tags).distribution(resetState = true) + RouterProcessingTime.withTags(tags).distribution(resetState = true) + RouterPendingMessages.withTags(tags).distribution(resetState = true) + RouterMembers.withTags(tags).distribution(resetState = true) + RouterErrors.withTags(tags).value(resetState = true) + } + + router + } + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsTestActor.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsTestActor.scala new file mode 100644 index 000000000..fa14de42b --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/RouterMetricsTestActor.scala @@ -0,0 +1,53 @@ +/* ========================================================================================= + * Copyright © 2013-2017 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor._ +import kamon.Kamon + +import scala.concurrent.duration._ + +class RouterMetricsTestActor extends Actor { + import RouterMetricsTestActor._ + override def receive = { + case Discard => + case Die => context.stop(self) + case Fail => throw new ArithmeticException("Division by zero.") + case Ping => sender ! Pong + case RouterTrackTimings(sendTimestamp, sleep) => { + val dequeueTimestamp = Kamon.clock().nanos() + sleep.map(s => Thread.sleep(s.toMillis)) + val afterReceiveTimestamp = Kamon.clock().nanos() + + sender ! RouterTrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp) + } + } +} + +object RouterMetricsTestActor { + case object Ping + case object Pong + case object Fail + case object Discard + case object Die + + case class RouterTrackTimings(sendTimestamp: Long = Kamon.clock().nanos(), sleep: Option[Duration] = None) + case class RouterTrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) { + def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp + def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SchedulerInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SchedulerInstrumentationSpec.scala new file mode 100644 index 000000000..4c40373a0 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SchedulerInstrumentationSpec.scala @@ -0,0 +1,50 @@ +/* ========================================================================================= + * Copyright © 2013-2022 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import kamon.Kamon +import kamon.tag.Lookups.plain +import kamon.testkit.InitAndStopKamonAfterAll +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.Promise +import scala.concurrent.duration._ + +class SchedulerInstrumentationSpec extends TestKit(ActorSystem("SchedulerInstrumentationSpec")) with AnyWordSpecLike + with Matchers with InitAndStopKamonAfterAll with ImplicitSender with Eventually { + + + "the Pekko Scheduler instrumentation" should { + "propagate the current context in calls to scheduler.scheduleOnce" in { + val contextTagPromise = Promise[String]() + val tagValueFuture = contextTagPromise.future + + Kamon.runWithContextTag("key", "one") { + system.scheduler.scheduleOnce(100 millis) { + contextTagPromise.success(Kamon.currentContext().getTag(plain("key"))) + } (system.dispatcher) + } + + eventually(timeout(5 seconds)) { + tagValueFuture.value.get.get shouldBe "one" + } + } + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SystemMessageInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SystemMessageInstrumentationSpec.scala new file mode 100644 index 000000000..3aeb7c84d --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/SystemMessageInstrumentationSpec.scala @@ -0,0 +1,184 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.pekko + + +import org.apache.pekko.actor.SupervisorStrategy.{Escalate, Restart, Resume, Stop} +import org.apache.pekko.actor._ +import org.apache.pekko.testkit.{ImplicitSender, TestKit} +import kamon.Kamon +import kamon.instrumentation.pekko.ContextTesting._ +import kamon.tag.Lookups._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.{AnyWordSpec, AnyWordSpecLike} + +import scala.util.control.NonFatal + +class SystemMessageInstrumentationSpec extends TestKit(ActorSystem("ActorSystemMessageInstrumentationSpec")) with AnyWordSpecLike with Matchers + with BeforeAndAfterAll with ImplicitSender { + implicit lazy val executionContext = system.dispatcher + + "the system message passing instrumentation" should { + "capture and propagate the current context while processing the Create message in top level actors" in { + Kamon.runWithContext(testContext("creating-top-level-actor")) { + system.actorOf(Props(new Actor { + testActor ! propagatedContextKey() + def receive: Actor.Receive = { case any => } + })) + } + + expectMsg("creating-top-level-actor") + } + + "capture and propagate the current context when processing the Create message in non top level actors" in { + Kamon.runWithContext(testContext("creating-non-top-level-actor")) { + system.actorOf(Props(new Actor { + def receive: Actor.Receive = { + case _ => + context.actorOf(Props(new Actor { + testActor ! propagatedContextKey() + def receive: Actor.Receive = { case _ => } + })) + } + })) ! "any" + } + + expectMsg("creating-non-top-level-actor") + } + + "keep the current context in the supervision cycle" when { + "the actor is resumed" in { + val supervisor = supervisorWithDirective(Resume) + Kamon.runWithContext(testContext("fail-and-resume")) { + supervisor ! "fail" + } + + expectMsg("fail-and-resume") // From the parent executing the supervision strategy + + // Ensure we didn't tie the actor with the initially captured context + supervisor ! "context" + expectMsg("MissingContext") + } + + "the actor is restarted" in { + val supervisor = supervisorWithDirective(Restart, sendPreRestart = true, sendPostRestart = true) + Kamon.runWithContext(testContext("fail-and-restart")) { + supervisor ! "fail" + } + + expectMsg("fail-and-restart") // From the parent executing the supervision strategy + expectMsg("fail-and-restart") // From the preRestart hook + expectMsg("fail-and-restart") // From the postRestart hook + + // Ensure we didn't tie the actor with the context + supervisor ! "context" + expectMsg("MissingContext") + } + + "the actor is stopped" in { + val supervisor = supervisorWithDirective(Stop, sendPostStop = true) + Kamon.runWithContext(testContext("fail-and-stop")) { + supervisor ! "fail" + } + + expectMsg("fail-and-stop") // From the parent executing the supervision strategy + expectMsg("fail-and-stop") // From the postStop hook + //TODO: FIXME expectNoMessage(1 second) + } + + "the failure is escalated" in { + val supervisor = supervisorWithDirective(Escalate, sendPostStop = true) + Kamon.runWithContext(testContext("fail-and-escalate")) { + supervisor ! "fail" + } + + expectMsg("fail-and-escalate") // From the parent executing the supervision strategy + expectMsg("fail-and-escalate") // From the grandparent executing the supervision strategy + expectMsg("fail-and-escalate") // From the postStop hook in the child + expectMsg("fail-and-escalate") // From the postStop hook in the parent + //TODO: FIXME expectNoMessage(1 second) + } + } + } + + private def propagatedContextKey(): String = + Kamon.currentContext().getTag(option(TestKey)).getOrElse("MissingContext") + + def supervisorWithDirective(directive: SupervisorStrategy.Directive, sendPreRestart: Boolean = false, sendPostRestart: Boolean = false, + sendPostStop: Boolean = false, sendPreStart: Boolean = false): ActorRef = { + + class GrandParent extends Actor { + val child = context.actorOf(Props(new Parent)) + + override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() { + case NonFatal(_) => testActor ! propagatedContextKey(); Stop + } + + def receive = { + case any => child forward any + } + } + + class Parent extends Actor { + val child = context.actorOf(Props(new Child)) + + override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() { + case NonFatal(_) => testActor ! propagatedContextKey(); directive + } + + def receive: Actor.Receive = { + case any => child forward any + } + + override def postStop(): Unit = { + if (sendPostStop) testActor ! propagatedContextKey() + super.postStop() + } + } + + class Child extends Actor { + def receive = { + case "fail" => throw new ArithmeticException("Division by zero.") + case "context" => sender ! propagatedContextKey() + } + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + if (sendPreRestart) testActor ! propagatedContextKey() + super.preRestart(reason, message) + } + + override def postRestart(reason: Throwable): Unit = { + if (sendPostRestart) testActor ! propagatedContextKey() + super.postRestart(reason) + } + + override def postStop(): Unit = { + if (sendPostStop) testActor ! propagatedContextKey() + super.postStop() + } + + override def preStart(): Unit = { + if (sendPreStart) testActor ! propagatedContextKey() + super.preStart() + } + } + + system.actorOf(Props(new GrandParent)) + } +} + diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/TestLogger.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/TestLogger.scala new file mode 100644 index 000000000..38a823cc2 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/TestLogger.scala @@ -0,0 +1,23 @@ +package kamon.instrumentation.pekko + +import org.apache.pekko.actor.Actor +import org.apache.pekko.event.Logging.LogEvent +import org.apache.pekko.event.slf4j.Slf4jLogger + +class TestLogger extends Slf4jLogger { + override def receive: PartialFunction[Any, Unit] = { + val filteredReceive: Actor.Receive = { + case event: LogEvent if(shouldDropEvent(event)) => + } + + filteredReceive.orElse(super.receive) + } + + private def shouldDropEvent(event: LogEvent): Boolean = + Option(event.message) + .map(_.toString) + .map(message => { + message.contains("Division") || + message.contains("ask pattern") + }).getOrElse(false) +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/remote/MessageBufferTest.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/remote/MessageBufferTest.scala new file mode 100644 index 000000000..6986878e4 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/remote/MessageBufferTest.scala @@ -0,0 +1,34 @@ +package kamon.instrumentation.pekko.remote + +import org.apache.pekko.actor.Actor +import org.apache.pekko.util.MessageBuffer +import kamon.Kamon +import kamon.context.Context +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +class MessageBufferTest extends AnyWordSpec with Matchers { + + "the MessageBuffer instrumentation" should { + "remember the current context when appending message and apply it when foreach is called when used directly" in { + val messageBuffer = MessageBuffer.empty + val key = Context.key("some_key", "") + + Kamon.runWithContext(Context.of(key, "some_value")) { + messageBuffer.append("scala", Actor.noSender) + } + + Kamon.currentContext().get(key) shouldBe "" + + var iterated = false + messageBuffer.foreach { (msg, ref) => + iterated = true + Kamon.currentContext().get(key) shouldBe "some_value" + } + + iterated shouldBe true + + } + } + +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingInstrumentationSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingInstrumentationSpec.scala new file mode 100644 index 000000000..06b82f444 --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingInstrumentationSpec.scala @@ -0,0 +1,171 @@ +package org.apache.pekko.kamon.instrumentation.pekko.sharding + +import org.apache.pekko.actor._ +import org.apache.pekko.cluster.sharding.ShardCoordinator.Internal.{HandOff, ShardStopped} +import org.apache.pekko.cluster.sharding.ShardCoordinator.ShardAllocationStrategy +import org.apache.pekko.cluster.sharding.ShardRegion.{GracefulShutdown, ShardId} +import org.apache.pekko.cluster.sharding.{ClusterSharding, ClusterShardingSettings, ShardRegion} +import org.apache.pekko.testkit.TestActor.Watch +import org.apache.pekko.testkit.{ImplicitSender, TestKitBase} +import com.typesafe.config.ConfigFactory +import kamon.instrumentation.pekko.PekkoClusterShardingMetrics._ +import kamon.tag.TagSet +import kamon.testkit.{InitAndStopKamonAfterAll, InstrumentInspection, MetricInspection} +import org.scalactic.TimesOnInt.convertIntToRepeater +import org.scalatest.concurrent.Eventually +import org.scalatest.matchers.should.Matchers +import org.scalatest.time._ +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.collection.immutable +import scala.concurrent.Future +import scala.util.Random + +case class TestMessage(shard: String, entity: String) + +class ShardingInstrumentationSpec + extends TestKitBase + with AnyWordSpecLike with Matchers + with ImplicitSender + with MetricInspection.Syntax + with InstrumentInspection.Syntax + with InitAndStopKamonAfterAll + with Eventually { + + lazy val system: ActorSystem = { + ActorSystem( + "sharding", + ConfigFactory + .parseString(""" + |pekko { + | loglevel = WARNING + | actor.provider = "cluster" + | remote.artery { + | canonical { + | hostname = "127.0.0.1" + | port = 2551 + | } + | } + | cluster { + | seed-nodes = ["pekko://sharding@127.0.0.1:2551"] + | log-info = on + | cluster.jmx.multi-mbeans-in-same-jvm = on + | } + |} + """.stripMargin) + .withFallback(ConfigFactory.load()) + ) + } + + val entityIdExtractor: ShardRegion.ExtractEntityId = { case msg @ TestMessage(_, entity) => (entity, msg) } + val shardIdExtractor: ShardRegion.ExtractShardId = { case msg @ TestMessage(shard, _) => shard } + + val StaticAllocationStrategy = new ShardAllocationStrategy { + override def allocateShard( + requester: ActorRef, + shardId: ShardId, + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]) + : Future[ActorRef] = { + Future.successful(requester) + } + + override def rebalance( + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + Future.successful(Set.empty) + } + } + + def registerTypes(shardedType: String, props: Props, system: ActorSystem, allocationStrategy: ShardAllocationStrategy): ActorRef = + ClusterSharding(system).start( + typeName = shardedType, + entityProps = props, + settings = ClusterShardingSettings(system), + extractEntityId = entityIdExtractor, + extractShardId = shardIdExtractor, + allocationStrategy = allocationStrategy, + handOffStopMessage = PoisonPill + ) + + class ShardedTypeContext { + val shardType = s"TestType-${Random.nextLong()}" + val region = registerTypes(shardType, TestActor.props(testActor), system, StaticAllocationStrategy) + val shardTags = TagSet.builder() + .add("type", shardType) + .add("system", system.name) + .build() + } + + "the Cluster sharding instrumentation" should { + "track shards, entities and messages" in new ShardedTypeContext { + region ! TestMessage("s1", "e1") + region ! TestMessage("s1", "e2") + region ! TestMessage("s2", "e3") + + 3 times { + expectMsg("OK") + } + + RegionProcessedMessages.withTags(shardTags).value() shouldBe 3L + + eventually(timeout(Span(2, Seconds))) { + RegionHostedShards.withTags(shardTags).distribution().max shouldBe 2L + RegionHostedEntities.withTags(shardTags).distribution().max shouldBe 3L + } + + eventually(timeout(Span(2, Seconds))) { + ShardProcessedMessages.withTags(shardTags).distribution(resetState = false).sum shouldBe 3L + ShardHostedEntities.withTags(shardTags).distribution(resetState = false).max shouldBe 2L + } + } + + "clean metrics on handoff" in new ShardedTypeContext { + region ! TestMessage("s1", "e1") + expectMsg("OK") + + eventually(timeout(Span(2, Seconds))) { + RegionHostedShards.withTags(shardTags).distribution().max shouldBe 1L + RegionHostedEntities.withTags(shardTags).distribution().max shouldBe 1L + } + + region ! HandOff("s1") + expectMsg(ShardStopped("s1")) + + eventually(timeout(Span(10, Seconds))) { + RegionHostedShards.withTags(shardTags).distribution().max shouldBe 0L + RegionHostedEntities.withTags(shardTags).distribution().max shouldBe 0L + } + } + + "clean metrics on shutdown" in new ShardedTypeContext { + region ! TestMessage("s1", "e1") + expectMsg("OK") + + RegionHostedShards.tagValues("type") should contain(shardType) + RegionHostedEntities.tagValues("type") should contain(shardType) + RegionProcessedMessages.tagValues("type") should contain(shardType) + + testActor ! Watch(region) + region ! GracefulShutdown + expectTerminated(region) + + RegionHostedShards.tagValues("type") should not contain(shardType) + RegionHostedEntities.tagValues("type") should not contain(shardType) + RegionProcessedMessages.tagValues("type") should not contain(shardType) + } + } + +} + +object TestActor { + + def props(testActor: ActorRef) = + Props(classOf[TestActor], testActor) +} + +class TestActor(testActor: ActorRef) extends Actor { + + override def receive: Actor.Receive = { + case _ => testActor ! "OK" + } +} diff --git a/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingMessageBufferingSpec.scala b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingMessageBufferingSpec.scala new file mode 100644 index 000000000..8d30c377c --- /dev/null +++ b/instrumentation/kamon-pekko/src/test/scala/kamon/instrumentation/pekko/sharding/ShardingMessageBufferingSpec.scala @@ -0,0 +1,91 @@ +package kamon.instrumentation.pekko.sharding + +import org.apache.pekko.actor._ +import org.apache.pekko.cluster.Cluster +import org.apache.pekko.cluster.sharding.{ClusterSharding, ClusterShardingSettings, ShardRegion} +import org.apache.pekko.testkit.{ImplicitSender, TestKitBase} +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.context.Context +import kamon.instrumentation.pekko.ContextEchoActor +import kamon.testkit.{InitAndStopKamonAfterAll, MetricInspection} +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike + +import scala.concurrent.duration._ + +class ShardingMessageBufferingSpec extends TestKitBase with AnyWordSpecLike with Matchers with ImplicitSender + with MetricInspection.Syntax with InitAndStopKamonAfterAll { + + implicit lazy val system: ActorSystem = { + ActorSystem("cluster-sharding-spec-system", ConfigFactory.parseString( + """ + |pekko { + | loglevel = INFO + | loggers = [ "org.apache.pekko.event.slf4j.Slf4jLogger" ] + | + | actor { + | provider = "cluster" + | } + | + | remote.artery { + | canonical { + | hostname = "127.0.0.1" + | port = 2556 + | } + | } + |} + """.stripMargin)) + } + + val remoteSystem: ActorSystem = ActorSystem("cluster-sharding-spec-remote-system", ConfigFactory.parseString( + """ + |pekko { + | loglevel = INFO + | loggers = [ "org.apache.pekko.event.slf4j.Slf4jLogger" ] + | + | actor { + | provider = "cluster" + | } + | + | remote.artery { + | canonical { + | hostname = "127.0.0.1" + | port = 2557 + | } + | } + |} + """.stripMargin)) + + def contextWithBroadcast(name: String): Context = + Context.Empty.withTag( + ContextEchoActor.EchoTag, name + ) + + val extractEntityId: ShardRegion.ExtractEntityId = { + case entityId:String => (entityId, "reply-trace-token") + } + val extractShardId: ShardRegion.ExtractShardId = { + case entityId:String => (entityId.toInt % 10).toString + } + + "The MessageBuffer instrumentation" should { + "propagate the current Context when sending message to a sharding region that has not been started" in { + Cluster(system).join(Cluster(system).selfAddress) + Cluster(remoteSystem).join(Cluster(system).selfAddress) + + val replierRegion: ActorRef = ClusterSharding(system).start( + typeName = "replier", + entityProps = ContextEchoActor.props(None), + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) + + Kamon.runWithContext(contextWithBroadcast("cluster-sharding-actor-123")) { + replierRegion ! "123" + } + + expectMsg(10 seconds, "name=cluster-sharding-actor-123") + } + } +} diff --git a/project/build.properties b/project/build.properties index 3161d2146..875b706a8 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.6.1 +sbt.version=1.9.2