Skip to content

Commit

Permalink
jmh fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
vishramachandran committed Dec 2, 2024
1 parent aff7f80 commit 82a40ca
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import filodb.timeseries.TestTimeseriesProducer
*/
@State(Scope.Thread)
class Base2ExponentialHistogramQueryBenchmark extends StrictLogging {
org.slf4j.LoggerFactory.getLogger("filodb").asInstanceOf[Logger].setLevel(Level.DEBUG)
org.slf4j.LoggerFactory.getLogger("filodb").asInstanceOf[Logger].setLevel(Level.WARN)

import filodb.coordinator._
import client.Client.{actorAsk, asyncAsk}
Expand All @@ -46,7 +46,7 @@ class Base2ExponentialHistogramQueryBenchmark extends StrictLogging {

// TODO: move setup and ingestion to another trait
val system = ActorSystem("test", ConfigFactory.load("filodb-defaults.conf")
.withValue("filodb.memstore.ingestion-buffer-mem-size", ConfigValueFactory.fromAnyRef("30MB")))
.withValue("filodb.memstore.ingestion-buffer-mem-size", ConfigValueFactory.fromAnyRef("300MB")))

private val cluster = FilodbCluster(system)
cluster.join()
Expand Down Expand Up @@ -90,11 +90,12 @@ class Base2ExponentialHistogramQueryBenchmark extends StrictLogging {
val (producingFut, containerStream) = TestTimeseriesProducer.metricsToContainerStream(startTime, numShards, numSeries,
numMetricNames = 1, numSamplesPerTs * numSeries, dataset, shardMapper, spread,
publishIntervalSec = 10, numBuckets = numBuckets, expHist = true)
val endTime = startTime + (numSamplesPerTs * 10000)
val ingestTask = containerStream.groupBy(_._1)
// Asynchronously subcribe and ingest each shard
.mapParallelUnordered(numShards) { groupedStream =>
val shard = groupedStream.key
println(s"Starting ingest exp histograms on shard $shard...")
println(s"Starting ingest exp histograms on shard $shard from timestamp $startTime to $endTime")
val shardStream = groupedStream.zipWithIndex.flatMap { case ((_, bytes), idx) =>
val data = bytes.map { array => SomeData(RecordContainer(array), idx) }
Observable.fromIterable(data)
Expand All @@ -117,13 +118,15 @@ class Base2ExponentialHistogramQueryBenchmark extends StrictLogging {
val histQuantileQuery =
"""histogram_quantile(0.7, sum(rate(http_request_latency_delta{_ws_="demo", _ns_="App-0"}[5m])))"""
val queries = Seq(histQuantileQuery)
val queryTime = startTime + (7 * 60 * 1000) // 5 minutes from start until 60 minutes from start
val queryStartTime = startTime/1000 + (5 * 60) // 5 minutes from start until 60 minutes from start
val queryStep = 120 // # of seconds between each query sample "step"
val qParams = TimeStepParams(queryTime/1000, queryStep, (queryTime/1000) + queryIntervalMin*60)
val queryEndTime = queryStartTime + queryIntervalMin*60
val qParams = TimeStepParams(queryStartTime, queryStep, queryEndTime)
val logicalPlans = queries.map { q => Parser.queryRangeToLogicalPlan(q, qParams) }
val queryCommands = logicalPlans.map { plan =>
LogicalPlan2Query(dataset.ref, plan, QueryContext(Some(new StaticSpreadProvider(SpreadChange(0, spread))), 20000))
}
println(s"Querying data from $queryStartTime to $queryEndTime")

var queriesSucceeded = 0
var queriesFailed = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,8 @@ final case class GeometricBuckets(firstBucket: Double,

object Base2ExpHistogramBuckets {
// TODO: make maxBuckets default configurable; not straightforward to get handle to global config from here
val maxBuckets = 200
// see PR for benchmark test results based on which maxBuckets was fixed. Dont increase without analysis.
val maxBuckets = 180
val maxAbsScale = 100
val maxAbsBucketIndex = 500
}
Expand Down
12 changes: 9 additions & 3 deletions run_benchmarks.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
#!/bin/bash
sbt -Drust.optimize=true "jmh/jmh:run -rf json -i 5 -wi 3 -f 1 -jvmArgsAppend -XX:MaxInlineLevel=20 \
-jvmArgsAppend -Xmx4g -jvmArgsAppend -XX:MaxInlineSize=99 -jvmArgsAppend -Dkamon.enabled=false \

sbt -Drust.optimize=true "jmh/jmh:run -rf json -i 2 -wi 2 -f 1 \
-jvmArgsAppend -Dlogback.configurationFile=../conf/logback-perf.xml
-jvmArgsAppend -XX:MaxInlineLevel=20 \
-jvmArgsAppend -Xmx4g \
-jvmArgsAppend -XX:MaxInlineSize=99 \
-jvmArgsAppend -Dkamon.enabled=false \
filodb.jmh.Base2ExponentialHistogramQueryBenchmark \
filodb.jmh.QueryHiCardInMemoryBenchmark \
filodb.jmh.QueryInMemoryBenchmark \
Expand All @@ -11,4 +16,5 @@ sbt -Drust.optimize=true "jmh/jmh:run -rf json -i 5 -wi 3 -f 1 -jvmArgsAppend -X
filodb.jmh.PartKeyLuceneIndexBenchmark \
filodb.jmh.PartKeyTantivyIndexBenchmark"

# -prof 'async:libPath=/path/to/async-profiler-3.0-macos/lib/libasyncProfiler.dylib;event=cpu;output=flamegraphdir=./profile-results' \
# Add below argument to enable profiling
# -prof \"async:libPath=/path/to/async-profiler-3.0-macos/lib/libasyncProfiler.dylib;event=cpu;output=flamegraph;dir=./profile-results\" \

0 comments on commit 82a40ca

Please sign in to comment.