From adb4ce2ce1b02fe3add8891a809bd826d9a470ca Mon Sep 17 00:00:00 2001 From: Alex Theimer Date: Fri, 15 Sep 2023 14:51:09 -0700 Subject: [PATCH] TEMP --- .../queryplanner/LogicalPlanUtils.scala | 37 +- .../queryplanner/MultiPartitionPlanner.scala | 250 ++++++++++--- .../PartitionLocationPlanner.scala | 47 ++- .../queryplanner/QueryUtils.scala | 35 ++ .../queryplanner/ShardKeyRegexPlanner.scala | 129 +------ .../queryplanner/SingleClusterPlanner.scala | 92 ++--- .../MultiPartitionPlannerSpec.scala | 1 + .../queryplanner/PlannerHierarchySpec.scala | 343 ++++++++---------- .../ShardKeyRegexPlannerSpec.scala | 169 +++++---- .../SingleClusterPlannerSpec.scala | 12 +- .../scala/filodb.core/query/QueryUtils.scala | 82 ----- .../filodb.core/query/QueryUtilsSpec.scala | 36 -- .../scala/filodb/prometheus/ast/Vectors.scala | 13 +- .../filodb/query/exec/StitchRvsExec.scala | 2 +- .../filodb/query/PlanValidationSpec.scala | 7 + 15 files changed, 611 insertions(+), 644 deletions(-) create mode 100644 coordinator/src/main/scala/filodb.coordinator/queryplanner/QueryUtils.scala delete mode 100644 core/src/main/scala/filodb.core/query/QueryUtils.scala delete mode 100644 core/src/test/scala/filodb.core/query/QueryUtilsSpec.scala diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/LogicalPlanUtils.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/LogicalPlanUtils.scala index d504a78f88..59936f3120 100644 --- a/coordinator/src/main/scala/filodb.coordinator/queryplanner/LogicalPlanUtils.scala +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/LogicalPlanUtils.scala @@ -6,8 +6,7 @@ import scala.collection.mutable.ArrayBuffer import com.typesafe.scalalogging.StrictLogging import filodb.core.TargetSchemaProvider -import filodb.core.query.{ColumnFilter, QueryContext, QueryUtils, RangeParams} -import filodb.core.query.Filter.{Equals, EqualsRegex} +import filodb.core.query.{ColumnFilter, QueryContext, RangeParams} import filodb.prometheus.ast.SubqueryUtils import filodb.prometheus.ast.Vectors.PromMetricLabel import filodb.prometheus.ast.WindowConstants @@ -414,6 +413,7 @@ object LogicalPlanUtils extends StrictLogging { */ private def sameRawSeriesTargetSchemaColumns(plan: LogicalPlan, targetSchemaProvider: TargetSchemaProvider, + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]], getShardKeyFilters: RawSeries => Seq[Seq[ColumnFilter]]) : Option[Seq[String]] = { // compose a stream of Options for each RawSeries-- @@ -422,22 +422,16 @@ object LogicalPlanUtils extends StrictLogging { .filter(_.isInstanceOf[RawSeries]) .map(_.asInstanceOf[RawSeries]).flatMap{ rs => val interval = LogicalPlanUtils.getSpanningIntervalSelector(rs) - val rawShardKeyFilters = getShardKeyFilters(rs) - val shardKeyFilters = rawShardKeyFilters.flatMap{ filters => - val resolvedFilters: Seq[Seq[ColumnFilter]] = filters.map { filter => - filter.filter match { - // Take care of pipe-joined values here -- create one Equals filter per value. - case EqualsRegex(values: String) if QueryUtils.isPipeOnlyRegex(values) => - QueryUtils.splitAtUnescapedPipes(values).map(value => ColumnFilter(filter.column, Equals(value))) - case _ => Seq(filter) - }} - QueryUtils.combinations(resolvedFilters) - } - shardKeyFilters.map{ shardKey => - val filters = LogicalPlanUtils.upsertFilters(rs.filters, shardKey) - LogicalPlanUtils.getTargetSchemaIfUnchanging(targetSchemaProvider, filters, interval) + val shardKeyFilters = getShardKeyFilters(rs) + .flatMap { filters => + val hasNonEqualShardKeyFilter = filters.exists(!_.filter.isInstanceOf[Equals]) + if (hasNonEqualShardKeyFilter) shardKeyMatcher(filters) else Seq(filters) + } + shardKeyFilters.map{ shardKey => + val filters = LogicalPlanUtils.upsertFilters(rs.filters, shardKey) + LogicalPlanUtils.getTargetSchemaIfUnchanging(targetSchemaProvider, filters, interval) + } } - } if (rsTschemaOpts.isEmpty) { return None } @@ -457,6 +451,7 @@ object LogicalPlanUtils extends StrictLogging { */ private def canPushdown(plan: CandidatePushdownPlan, targetSchemaProvider: TargetSchemaProvider, + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]], nonMetricShardKeyCols: Seq[String], getShardKeyFilters: RawSeries => Seq[Seq[ColumnFilter]]): Boolean = { val hasPushdownableClause = plan match { @@ -482,7 +477,7 @@ object LogicalPlanUtils extends StrictLogging { // return true // } - val tschema = sameRawSeriesTargetSchemaColumns(plan, targetSchemaProvider, getShardKeyFilters) + val tschema = sameRawSeriesTargetSchemaColumns(plan, targetSchemaProvider, shardKeyMatcher, getShardKeyFilters) if (tschema.isEmpty) { return false } @@ -504,6 +499,7 @@ object LogicalPlanUtils extends StrictLogging { */ def getPushdownKeys[T](lp: LogicalPlan, targetSchemaProvider: TargetSchemaProvider, + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]], nonMetricShardKeyCols: Seq[String], getRawPushdownKeys: RawSeries => Set[T], getShardKeyFilters: RawSeries => Seq[Seq[ColumnFilter]]): Option[Set[T]] = { @@ -535,12 +531,13 @@ object LogicalPlanUtils extends StrictLogging { lhsKeys.isDefined && rhsKeys.isDefined && // either the lhs/rhs keys are equal, or at least one of lhs/rhs includes only scalars. (lhsKeys.get.isEmpty || rhsKeys.get.isEmpty || lhsKeys == rhsKeys) && - canPushdown(bj, targetSchemaProvider, nonMetricShardKeyCols, getShardKeyFilters) + canPushdown(bj, targetSchemaProvider, shardKeyMatcher, nonMetricShardKeyCols, getShardKeyFilters) // union lhs/rhs keys, since one might be empty (if it's a scalar) if (canPushdownBj) Some(lhsKeys.get.union(rhsKeys.get)) else None case agg: Aggregate => val keys = helper(agg.vectors) - val canPushdownAgg = canPushdown(agg, targetSchemaProvider, nonMetricShardKeyCols, getShardKeyFilters) + val canPushdownAgg = + canPushdown(agg, targetSchemaProvider, shardKeyMatcher, nonMetricShardKeyCols, getShardKeyFilters) if (keys.isDefined && canPushdownAgg) keys else None case nl: NonLeafLogicalPlan => // return the set of all child keys iff all child plans can be pushdown-optimized diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/MultiPartitionPlanner.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/MultiPartitionPlanner.scala index e38116c621..5c7f0666de 100644 --- a/coordinator/src/main/scala/filodb.coordinator/queryplanner/MultiPartitionPlanner.scala +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/MultiPartitionPlanner.scala @@ -1,21 +1,22 @@ package filodb.coordinator.queryplanner import java.util.concurrent.ConcurrentHashMap - import scala.collection.concurrent.{Map => ConcurrentMap} import scala.jdk.CollectionConverters._ - import com.typesafe.scalalogging.StrictLogging import io.grpc.ManagedChannel - import filodb.coordinator.queryplanner.LogicalPlanUtils._ import filodb.core.metadata.{Dataset, DatasetOptions, Schemas} -import filodb.core.query.{ColumnFilter, PromQlQueryParams, QueryConfig, QueryContext, RvRange} +import filodb.core.query.{ColumnFilter, PromQlQueryParams, QueryConfig, QueryContext, RangeParams, RvRange} import filodb.grpc.GrpcCommonUtils import filodb.query._ import filodb.query.LogicalPlan._ import filodb.query.exec._ + +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + case class PartitionAssignment(partitionName: String, httpEndPoint: String, timeRange: TimeRange, grpcEndPoint: Option[String] = None) @@ -48,10 +49,11 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv localPartitionName: String, val dataset: Dataset, val queryConfig: QueryConfig, + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]] = PartitionLocationPlanner.equalsOnlyShardKeyMatcher, remoteExecHttpClient: RemoteExecHttpClient = RemoteHttpClient.defaultClient, channels: ConcurrentMap[String, ManagedChannel] = new ConcurrentHashMap[String, ManagedChannel]().asScala) - extends PartitionLocationPlanner(dataset, partitionLocationProvider) with StrictLogging { + extends PartitionLocationPlanner(dataset, partitionLocationProvider, shardKeyMatcher) with StrictLogging { override val schemas: Schemas = Schemas(dataset.schema) override val dsOptions: DatasetOptions = schemas.part.options @@ -95,7 +97,12 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv } else logicalPlan match { case mqp: MetadataQueryPlan => materializeMetadataQueryPlan(mqp, qContext).plans.head case lp: TsCardinalities => materializeTsCardinalities(lp, qContext).plans.head - case _ => walkLogicalPlanTree(logicalPlan, qContext).plans.head + case _ => + val result = walkLogicalPlanTree(logicalPlan, qContext) + if (result.plans.size > 1) { + val dispatcher = PlannerUtil.pickDispatcher(result.plans) + MultiPartitionDistConcatExec(qContext, dispatcher, result.plans) + } else result.plans.head } } @@ -126,18 +133,19 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv val execPlan = if (partitionName.equals(localPartitionName)) { localPartitionPlanner.materialize(logicalPlan, qContext) } else { + val promQl = LogicalPlanParser.convertToQuery(logicalPlan) val remoteContext = logicalPlan match { case tls: TopLevelSubquery => val instantTime = qContext.origQueryParams.asInstanceOf[PromQlQueryParams].startSecs val stepSecs = tls.stepMs / 1000 - generateRemoteExecParamsWithStep(qContext, instantTime, stepSecs, instantTime) + generateRemoteExecParamsWithStep(qContext, promQl, instantTime, stepSecs, instantTime) case psp: PeriodicSeriesPlan => val startSecs = psp.startMs / 1000 val stepSecs = psp.stepMs / 1000 val endSecs = psp.endMs / 1000 - generateRemoteExecParamsWithStep(qContext, startSecs, stepSecs, endSecs) + generateRemoteExecParamsWithStep(qContext, promQl, startSecs, stepSecs, endSecs) case _ => - generateRemoteExecParams(qContext, startMs, endMs) + generateRemoteExecParams(qContext, promQl, startMs, endMs) } // Single partition but remote, send the entire plan remotely if (grpcEndpoint.isDefined && !(queryConfig.grpcPartitionsDenyList.contains("*") || @@ -192,30 +200,39 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv case lp: PeriodicSeriesWithWindowing => materializePlanHandleSplitLeaf(lp, qContext) case _: PeriodicSeries | _: RawChunkMeta | - _: RawSeries => materializePeriodicAndRawSeries(logicalPlan, qContext) + _: RawSeries => materializePlanHandleSplitLeaf(logicalPlan, qContext) } } // scalastyle:on cyclomatic.complexity private def getRoutingKeys(logicalPlan: LogicalPlan) = { - val columnFilterGroup = LogicalPlan.getColumnFilterGroup(logicalPlan) + val columnFilterGroups = LogicalPlan.getColumnFilterGroup(logicalPlan).flatMap { filters => + // Check if we need to use the shardKeyMatcher. + val hasNonEqualsShardKeyFilter = filters.exists { filter => + dataset.options.nonMetricShardColumns.contains(filter.column) && + !filter.filter.isInstanceOf[filodb.core.query.Filter.Equals] + } + if (hasNonEqualsShardKeyFilter) shardKeyMatcher(filters.toSeq) else Seq(filters.toSeq) + } val routingKeys = dataset.options.nonMetricShardColumns - .map(x => (x, LogicalPlan.getColumnValues(columnFilterGroup, x))) + .map(x => (x, LogicalPlan.getColumnValues(columnFilterGroups.map(_.toSet), x))) if (routingKeys.flatMap(_._2).isEmpty) Seq.empty else routingKeys.filter(x => x._2.nonEmpty) } - private def generateRemoteExecParams(queryContext: QueryContext, startMs: Long, endMs: Long) = { + private def generateRemoteExecParams(queryContext: QueryContext, promQl: String, startMs: Long, endMs: Long) = { val queryParams = queryContext.origQueryParams.asInstanceOf[PromQlQueryParams] - queryContext.copy(origQueryParams = queryParams.copy(startSecs = startMs/1000, endSecs = endMs / 1000), + queryContext.copy( + origQueryParams = queryParams.copy(promQl = promQl, startSecs = startMs/1000, endSecs = endMs / 1000), plannerParams = queryContext.plannerParams.copy(processMultiPartition = false)) } private def generateRemoteExecParamsWithStep( - queryContext: QueryContext, startSecs: Long, stepSecs: Long, endSecs: Long + queryContext: QueryContext, promQl: String, startSecs: Long, stepSecs: Long, endSecs: Long ) = { val queryParams = queryContext.origQueryParams.asInstanceOf[PromQlQueryParams] queryContext.copy( - origQueryParams = queryParams.copy(startSecs = startSecs, stepSecs = stepSecs, endSecs = endSecs), + origQueryParams = + queryParams.copy(promQl = promQl, startSecs = startSecs, stepSecs = stepSecs, endSecs = endSecs), plannerParams = queryContext.plannerParams.copy(processMultiPartition = false) ) } @@ -241,12 +258,13 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv val queryTimeRange = TimeRange(periodicSeriesTimeWithOffset.startMs - lookBackMs, periodicSeriesTimeWithOffset.endMs) - val partitions = if (routingKeys.isEmpty) List.empty - else { - val routingKeyMap = routingKeys.map(x => (x._1, x._2.head)).toMap - partitionLocationProvider.getPartitions(routingKeyMap, queryTimeRange). - sortBy(_.timeRange.startMs) - } + val keys = routingKeys.map(_._1) + val values = routingKeys.map(_._2.toSeq) + val partitions = QueryUtils.combinations(values) + .map(valueCombo => keys.zip(valueCombo)) + .flatMap(shardKey => partitionLocationProvider.getPartitions(shardKey.toMap, queryTimeRange)) + .distinct + .sortBy(_.timeRange.startMs) if (partitions.isEmpty && routingKeys.nonEmpty) logger.warn(s"No partitions found for routing keys: $routingKeys") @@ -291,8 +309,8 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv if (execPlans.size == 1) execPlans.head else { // TODO: Do we pass in QueryContext in LogicalPlan's helper rvRangeForPlan? - StitchRvsExec(qContext, inProcessPlanDispatcher, rvRangeFromPlan(logicalPlan), - execPlans.sortWith((x, _) => !x.isInstanceOf[PromQlRemoteExec])) + MultiPartitionDistConcatExec( + qContext, inProcessPlanDispatcher, execPlans.sortWith((x, _) => !x.isInstanceOf[PromQlRemoteExec])) } // ^^ Stitch RemoteExec plan results with local using InProcessPlanDispatcher // Sort to move RemoteExec in end as it does not have schema @@ -314,10 +332,14 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv val timeRange = timeRangeOverride.getOrElse(TimeRange(1000 * queryParams.startSecs, 1000 * queryParams.endSecs)) val (partitionName, grpcEndpoint) = (partition.partitionName, partition.grpcEndPoint) if (partitionName.equals(localPartitionName)) { - val lpWithUpdatedTime = copyLogicalPlanWithUpdatedTimeRange(logicalPlan, timeRange) + // FIXME: subquery tests fail when their time-ranges are updated + val lpWithUpdatedTime = if (timeRangeOverride.isDefined) { + copyLogicalPlanWithUpdatedTimeRange(logicalPlan, timeRange) + } else logicalPlan localPartitionPlanner.materialize(lpWithUpdatedTime, queryContext) } else { - val ctx = generateRemoteExecParams(queryContext, timeRange.startMs, timeRange.endMs) + val promQL = LogicalPlanParser.convertToQuery(logicalPlan) + val ctx = generateRemoteExecParams(queryContext, promQL, timeRange.startMs, timeRange.endMs) if (grpcEndpoint.isDefined && !(queryConfig.grpcPartitionsDenyList.contains("*") || queryConfig.grpcPartitionsDenyList.contains(partitionName.toLowerCase))) { @@ -374,6 +396,42 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv } } + private def canSupportMultiPartitionCalls(execPlans: Seq[ExecPlan]): Boolean = + execPlans.forall { + case _: PromQlRemoteExec => false + case _ => true + } + + private def materializeAggregate(aggregate: Aggregate, queryContext: QueryContext): PlanResult = { + val plan = if (LogicalPlanUtils.hasDescendantAggregateOrJoin(aggregate.vectors)) { + val childPlan = materialize(aggregate.vectors, queryContext) + addAggregator(aggregate, queryContext, PlanResult(Seq(childPlan))) + } else { + val queryParams = queryContext.origQueryParams.asInstanceOf[PromQlQueryParams] + val (partitions, _, _, _) = resolvePartitionsAndRoutingKeys(aggregate, queryParams) + val childQueryContext = queryContext.copy( + plannerParams = queryContext.plannerParams.copy(skipAggregatePresent = true)) + val execPlans = partitions.map(p => materializeForPartition(aggregate, p, childQueryContext)) + val exec = if (execPlans.size == 1) execPlans.head + else { + if ((aggregate.operator.equals(AggregationOperator.TopK) + || aggregate.operator.equals(AggregationOperator.BottomK) + || aggregate.operator.equals(AggregationOperator.CountValues) + ) && !canSupportMultiPartitionCalls(execPlans)) + throw new UnsupportedOperationException(s"Shard Key regex not supported for ${aggregate.operator}") + else { + val reducer = MultiPartitionReduceAggregateExec(queryContext, inProcessPlanDispatcher, + execPlans.sortWith((x, _) => !x.isInstanceOf[PromQlRemoteExec]), aggregate.operator, aggregate.params) + reducer.addRangeVectorTransformer(AggregatePresenter(aggregate.operator, aggregate.params, + RangeParams(queryParams.startSecs, queryParams.stepSecs, queryParams.endSecs))) + reducer + } + } + exec + } + PlanResult(Seq(plan)) + } + /** * Materialize any plan whose materialization strategy is governed by whether-or-not it * contains leaves that individually span partitions. @@ -381,12 +439,21 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv private def materializePlanHandleSplitLeaf(logicalPlan: LogicalPlan, qContext: QueryContext): PlanResult = { val qParams = qContext.origQueryParams.asInstanceOf[PromQlQueryParams] - val hasMultiPartitionLeaves = LogicalPlan.findLeafLogicalPlans(logicalPlan) - .exists(getPartitions(_, qParams).size > 1) + // Create one plan per RawSeries/shard-key pair, then resolve its partitions. + // If any resides on more than one partition, the leaf is "split". + val hasMultiPartitionLeaves = + LogicalPlan.findLeafLogicalPlans(logicalPlan) + .filter(_.isInstanceOf[RawSeries]) + .flatMap { rs => + val rawFilters = LogicalPlan.getNonMetricShardKeyFilters(rs, dataset.options.nonMetricShardColumns) + val filters = rawFilters.flatMap(shardKeyMatcher(_)) + filters.map(rs.replaceFilters) + } + .exists(getPartitions(_, qParams).size > 1) if (hasMultiPartitionLeaves) { materializeSplitLeafPlan(logicalPlan, qContext) } else { logicalPlan match { - case agg: Aggregate => super.materializeAggregate(qContext, agg) + case agg: Aggregate => materializeAggregate(agg, qContext) case psw: PeriodicSeriesWithWindowing => materializePeriodicAndRawSeries(psw, qContext) case sqw: SubqueryWithWindowing => super.materializeSubqueryWithWindowing(qContext, sqw) case bj: BinaryJoin => materializeMultiPartitionBinaryJoinNoSplitLeaf(bj, qContext) @@ -394,6 +461,9 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv case aif: ApplyInstantFunction => super.materializeApplyInstantFunction(qContext, aif) case svdp: ScalarVaryingDoublePlan => super.materializeScalarPlan(qContext, svdp) case aaf: ApplyAbsentFunction => super.materializeAbsentFunction(qContext, aaf) + case ps: PeriodicSeries => materializePeriodicAndRawSeries(ps, qContext) + case rcm: RawChunkMeta => materializePeriodicAndRawSeries(rcm, qContext) + case rs: RawSeries => materializePeriodicAndRawSeries(rs, qContext) case x => throw new IllegalArgumentException(s"unhandled type: ${x.getClass}") }} } @@ -422,34 +492,132 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv } } + /** + * Merges each set of overlapping ranges into one range + * with the min/max start/end times, respectively. + * "Overlapping" is inclusive of start and end points. + * + * @return sorted sequence of these merged ranges + */ + private def mergeAndSortRanges(ranges: Seq[TimeRange]): Seq[TimeRange] = { + if (ranges.isEmpty) { + return Nil + } + val sortedRanges = ranges.sortBy(r => r.startMs) + val mergedRanges = new mutable.ArrayBuffer[TimeRange] + mergedRanges.append(sortedRanges.head) + for (range <- sortedRanges.tail) { + if (range.startMs > mergedRanges.last.endMs) { + // Cannot overlap with any of the previous ranges; create a new range. + mergedRanges.append(range) + } else { + // Extend the previous range to include this range's span. + mergedRanges(mergedRanges.size - 1) = TimeRange( + mergedRanges.last.startMs, + math.max(mergedRanges.last.endMs, range.endMs)) + } + } + mergedRanges + } + + /** + * Given a sorted sequence of disjoint time-ranges and a "total" range, + * inverts the ranges and crops the result to the total range. + * Range start/ends are inclusive. + * + * Example: + * Ranges: ---- ------- --- --------- + * Total: ------------- + * Result: -- --- + * + * @param ranges : must be sorted and disjoint (range start/ends are inclusive) + */ + def invertRanges(ranges: Seq[TimeRange], + totalRange: TimeRange): Seq[TimeRange] = { + val invertedRanges = new ArrayBuffer[TimeRange]() + invertedRanges.append(totalRange) + var irange = 0 + + // ignore all ranges before totalRange + while (irange < ranges.size && + ranges(irange).endMs < totalRange.startMs) { + irange += 1 + } + + if (irange < ranges.size) { + // check if the first overlapping range also overlaps the totalRange.start + if (ranges(irange).startMs <= totalRange.startMs) { + // if it does, then we either need to adjust the totalRange in the result or remove it altogether. + if (ranges(irange).endMs < totalRange.endMs) { + invertedRanges(0) = TimeRange(ranges(irange).endMs + 1, totalRange.endMs) + irange += 1 + } else { + return Nil + } + } + } + + // add inverted ranges to the result until one crosses totalRange.endMs + while (irange < ranges.size && ranges(irange).endMs < totalRange.endMs) { + invertedRanges(invertedRanges.size - 1) = + TimeRange(invertedRanges.last.startMs, ranges(irange).startMs - 1) + invertedRanges.append(TimeRange(ranges(irange).endMs + 1, totalRange.endMs)) + irange += 1 + } + + // check if a range overlaps totalRange.endMs; if it does, adjust final inverted range + if (irange < ranges.size && ranges(irange).startMs < totalRange.endMs) { + invertedRanges(invertedRanges.size - 1) = + TimeRange(invertedRanges.last.startMs, ranges(irange).startMs - 1) + } + + invertedRanges + } + /** * Materializes a LogicalPlan with leaves that individually span multiple partitions. - * All "split-leaf" plans will fail to materialize (throw a BadQueryException) if they span more than - * one non-metric shard key prefix. + * All "split-leaf" plans will fail to materialize (throw a BadQueryException) if selectors contain + * at least two unique sets of shard-key filters. * Split-leaf plans that contain at least one BinaryJoin will additionally fail to materialize if any * of the plan's BinaryJoins contain an offset. + * Split plans with regex selectors will be materialized according to the union of all shard-key splits. */ private def materializeSplitLeafPlan(logicalPlan: LogicalPlan, qContext: QueryContext): PlanResult = { validateSplitLeafPlan(logicalPlan) val qParams = qContext.origQueryParams.asInstanceOf[PromQlQueryParams] - // get a mapping of assignments to time-ranges to query - val assignmentRanges = { - // "distinct" in case this is a BinaryJoin - val partitions = getPartitions(logicalPlan, qParams).distinct.sortBy(_.timeRange.startMs) + // get the seq of ranges and partitions to query + val (queryRanges, partitions) = { val timeRange = TimeRange(1000 * qParams.startSecs, 1000 * qParams.endSecs) + // get a set of PartitionAssignments per shard key + val rawShardKeys = LogicalPlan.getNonMetricShardKeyFilters(logicalPlan, dataset.options.nonMetricShardColumns) + val shardKeys = rawShardKeys.flatMap(shardKeyMatcher(_)) + val assignmentGroups = + shardKeys.map(logicalPlan.replaceFilters) + .map(getPartitions(_, qParams).distinct.sortBy(_.timeRange.startMs)) + // use each assignment group to find their individually-valid query ranges val lookbackMs = getLookBackMillis(logicalPlan).max val offsetMs = getOffsetMillis(logicalPlan).max val stepMsOpt = if (qParams.startSecs == qParams.endSecs) None else Some(1000 * qParams.stepSecs) - getAssignmentQueryRanges(partitions, timeRange, - lookbackMs = lookbackMs, offsetMs = offsetMs, stepMsOpt = stepMsOpt) + val validRangeGroups = assignmentGroups.map( + getAssignmentQueryRanges( + _, timeRange, + lookbackMs = lookbackMs, + offsetMs = offsetMs, + stepMsOpt = stepMsOpt + ).map(_._2)) + // Invert the valid ranges for each set, merge the invalid ranges across + // sets, then again invert the final range set. + val invalidRanges = validRangeGroups.flatMap(invertRanges(_, timeRange)) + (invertRanges(mergeAndSortRanges(invalidRanges), timeRange), + assignmentGroups.flatten.map(assign => assign.partitionName -> assign).toMap.values) } - // materialize a plan for each range/assignment pair - val plans = assignmentRanges.map { case (part, range) => + // materialize a plan for all range/partition pairs + val plans = queryRanges.flatMap { range => val newParams = qParams.copy(startSecs = range.startMs / 1000, endSecs = range.endMs / 1000) val newContext = qContext.copy(origQueryParams = newParams) - materializeForPartition(logicalPlan, part, newContext) + partitions.map(part => materializeForPartition(logicalPlan, part, newContext)) } // stitch if necessary val resPlan = if (plans.size == 1) { @@ -588,7 +756,7 @@ class MultiPartitionPlanner(val partitionLocationProvider: PartitionLocationProv private def createMetadataRemoteExec(qContext: QueryContext, partitionAssignment: PartitionAssignment, urlParams: Map[String, String]) = { val finalQueryContext = generateRemoteExecParams( - qContext, partitionAssignment.timeRange.startMs, partitionAssignment.timeRange.endMs) + qContext, "", partitionAssignment.timeRange.startMs, partitionAssignment.timeRange.endMs) val httpEndpoint = partitionAssignment.httpEndPoint + finalQueryContext.origQueryParams.asInstanceOf[PromQlQueryParams].remoteQueryPath.getOrElse("") MetadataRemoteExec(httpEndpoint, remoteHttpTimeoutMs, diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/PartitionLocationPlanner.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/PartitionLocationPlanner.scala index 7daced5995..510bdb190e 100644 --- a/coordinator/src/main/scala/filodb.coordinator/queryplanner/PartitionLocationPlanner.scala +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/PartitionLocationPlanner.scala @@ -1,11 +1,23 @@ package filodb.coordinator.queryplanner import filodb.coordinator.queryplanner.LogicalPlanUtils.getLookBackMillis +import filodb.coordinator.queryplanner.PartitionLocationPlanner.equalsOnlyShardKeyMatcher import filodb.core.metadata.Dataset -import filodb.core.query.{PromQlQueryParams, QueryUtils} -import filodb.core.query.Filter.{Equals, EqualsRegex} +import filodb.core.query.{ColumnFilter, PromQlQueryParams} +import filodb.core.query.Filter.Equals import filodb.query.LogicalPlan +object PartitionLocationPlanner { + // Can be used as a default matcher function. + def equalsOnlyShardKeyMatcher(filters: Seq[ColumnFilter]): Seq[Seq[ColumnFilter]] = { + filters.foreach{ + case ColumnFilter(_, Equals(_)) => { /* do nothing */ } + case filter => throw new IllegalArgumentException("cannot match regex filters; filter: " + filter) + } + Seq(filters) + } +} + /** * Abstract class for planners that need getPartitions functionality. * @@ -17,9 +29,12 @@ import filodb.query.LogicalPlan * for each of these groups. */ abstract class PartitionLocationPlanner(dataset: Dataset, - partitionLocationProvider: PartitionLocationProvider) + partitionLocationProvider: PartitionLocationProvider, + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]] = equalsOnlyShardKeyMatcher) extends QueryPlanner with DefaultPlanner { + private val nonMetricColumnSet = dataset.options.nonMetricShardColumns.toSet + // scalastyle:off method.length /** * Gets the partition Assignment for the given plan @@ -30,31 +45,23 @@ abstract class PartitionLocationPlanner(dataset: Dataset, //1. Get a Seq of all Leaf node filters val leafFilters = LogicalPlan.getColumnFilterGroup(logicalPlan) - val nonMetricColumnSet = dataset.options.nonMetricShardColumns.toSet //2. Filter from each leaf node filters to keep only nonShardKeyColumns and convert them to key value map val routingKeyMap: Seq[Map[String, String]] = leafFilters .filter(_.nonEmpty) .map(_.filter(col => nonMetricColumnSet.contains(col.column))) + .flatMap{ filters => + val hasNonEqualShardKeyFilter = filters.exists(!_.filter.isInstanceOf[Equals]) + if (hasNonEqualShardKeyFilter) shardKeyMatcher(filters.toSeq) else Seq(filters.toSeq) + } .map{ filters => filters.map { filter => - val values = filter.filter match { - case Equals(value) => Seq(value.toString) - // Split '|'-joined values if pipes are the only regex chars used. - case EqualsRegex(value: String) if QueryUtils.isPipeOnlyRegex(value) => - QueryUtils.splitAtUnescapedPipes(value) + val value = filter.filter match { + case Equals(value) => value.toString case _ => throw new IllegalArgumentException( - s"""shard keys must be filtered by equality or "|"-only regex. filter=${filter}""") + s"""shard keys must be filtered by equality. filter=${filter}""") } - (filter.column, values) - } - } - .flatMap{ keyValuesPairs => - // Get all possible value combos, then create a key->value map for each combo. - // Ordering the pairs first since the output of combinations() is also ordered. - val orderedPairs = keyValuesPairs.toSeq - val keys: Seq[String] = orderedPairs.map(_._1) - val values: Seq[Seq[String]] = orderedPairs.map(_._2) - QueryUtils.combinations(values).map(keys.zip(_).toMap) + (filter.column, value) + }.toMap } // 3. Determine the query time range diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/QueryUtils.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/QueryUtils.scala new file mode 100644 index 0000000000..425eeabb00 --- /dev/null +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/QueryUtils.scala @@ -0,0 +1,35 @@ +package filodb.coordinator.queryplanner + +import scala.collection.mutable + +/** + * Storage for miscellaneous utility functions. + */ +object QueryUtils { + /** + * Returns all possible sets of elements where exactly one element is + * chosen from each of the argument sequences. + * + * @param choices : all sequences should have at least one element. + * @return ordered sequences; each sequence is ordered such that the element + * at index i is chosen from the ith argument sequence. + */ + def combinations[T](choices: Seq[Seq[T]]): Seq[Seq[T]] = { + val running = new mutable.ArraySeq[T](choices.size) + val result = new mutable.ArrayBuffer[Seq[T]] + + def helper(iChoice: Int): Unit = { + if (iChoice == choices.size) { + result.append(Nil ++ running) + return + } + for (choice <- choices(iChoice)) { + running(iChoice) = choice + helper(iChoice + 1) + } + } + + helper(0) + result + } +} diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlanner.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlanner.scala index ca819daf75..7e98655864 100644 --- a/coordinator/src/main/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlanner.scala +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlanner.scala @@ -1,10 +1,8 @@ package filodb.coordinator.queryplanner -import scala.collection.{mutable, Seq} - import filodb.core.{StaticTargetSchemaProvider, TargetSchemaProvider} import filodb.core.metadata.{Dataset, DatasetOptions, Schemas} -import filodb.core.query.{ColumnFilter, Filter, PromQlQueryParams, QueryConfig, QueryContext, RangeParams} +import filodb.core.query.{ColumnFilter, PromQlQueryParams, QueryConfig, QueryContext, RangeParams} import filodb.query._ import filodb.query.LogicalPlan._ import filodb.query.exec._ @@ -34,18 +32,13 @@ class ShardKeyRegexPlanner(val dataset: Dataset, partitionLocationProvider: PartitionLocationProvider, config: QueryConfig, _targetSchemaProvider: TargetSchemaProvider = StaticTargetSchemaProvider()) - extends PartitionLocationPlanner(dataset, partitionLocationProvider) { + extends PartitionLocationPlanner(dataset, partitionLocationProvider, shardKeyMatcher) { override def queryConfig: QueryConfig = config override val schemas: Schemas = Schemas(dataset.schema) override val dsOptions: DatasetOptions = schemas.part.options private val datasetMetricColumn = dataset.options.metricColumn - private val nonMetricShardKeyColToIndex = dataset.options.shardKeyColumns - .filterNot(_ == dataset.options.metricColumn) - .zipWithIndex - .toMap - private def targetSchemaProvider(qContext: QueryContext): TargetSchemaProvider = { qContext.plannerParams.targetSchemaProviderOverride.getOrElse(_targetSchemaProvider) } @@ -65,6 +58,7 @@ class ShardKeyRegexPlanner(val dataset: Dataset, LogicalPlanUtils.getPushdownKeys( plan, targetSchemaProvider(qContext), + shardKeyMatcher, dataset.options.nonMetricShardColumns, getRawShardKeys, rs => getShardKeys(rs)) @@ -188,120 +182,7 @@ class ShardKeyRegexPlanner(val dataset: Dataset, generateExec(logicalPlan, shardKeyMatches, qContext) } - /** - * Updates the time params and query of the the argument PromQlQueryParams according to the argument LogicalPlan. - */ - private def updateQueryParams(logicalPlan: LogicalPlan, - queryParams: PromQlQueryParams): PromQlQueryParams = { - logicalPlan match { - case tls: TopLevelSubquery => { - val instantTime = queryParams.startSecs - queryParams.copy( - promQl = LogicalPlanParser.convertToQuery(logicalPlan), - startSecs = instantTime, - endSecs = instantTime - ) - } - case psp: PeriodicSeriesPlan => { - queryParams.copy( - promQl = LogicalPlanParser.convertToQuery(logicalPlan), - startSecs = psp.startMs / 1000, - endSecs = psp.endMs / 1000, - stepSecs = psp.stepMs / 1000 - ) - } - case _ => queryParams.copy(promQl = LogicalPlanParser.convertToQuery(logicalPlan)) - } - } - - // scalastyle:off method.length - /** - * Group shard keys by partition and generate an ExecPlan for each. - * Plans will match shard-keys by pipe-separated regex filters. For example, Suppose the following keys are provided: - * keys = {{a=1, b=1}, {a=1, b=2}, {a=2, b=3}} - * These will be grouped according to the partitions they occupy: - * part1 -> {{a=1, b=1}, {a=1, b=2}} - * part2 -> {{a=2, b=3}} - * Then a plan will be generated for each partition: - * plan1 -> {a=1, b=~"1|2"} - * plan2 -> {a=2, b=3} - * Additional plans are materialized per partition when multiple regex filters would otherwise be required. - * This prevents scenarios such as: - * keys = {{a=1, b=2}, {a=3, b=4}, {a=5, b=6}} - * These will be grouped according to the partitions they occupy: - * part1 -> {{a=1, b=2}, {a=3, b=4}} - * part2 -> {{a=5, b=6}} - * Then a plan will be generated for each partition: - * plan1 -> {a=~"1|3", b=~"2|4"} - * plan2 -> {a=5, b=6} - * This might erroneously read {a=1, b=4}, which was not included in the original key set. - */ - private def generateExecForEachPartition(logicalPlan: LogicalPlan, - keys: Seq[Seq[ColumnFilter]], - qContext: QueryContext): Seq[ExecPlan] = { - val queryParams = qContext.origQueryParams.asInstanceOf[PromQlQueryParams] - - // maps individual partitions to the set of shard-keys they contain. - val partitionsToKeys = new mutable.HashMap[String, mutable.Buffer[Seq[ColumnFilter]]]() - keys.foreach { key => - val newLogicalPlan = logicalPlan.replaceFilters(key) - // Querycontext should just have the part of query which has regex - // For example for exp(sum(test{_ws_ = "demo", _ns_ =~ "App.*"})), sub queries should be - // sum(test{_ws_ = "demo", _ns_ = "App-1"}), sum(test{_ws_ = "demo", _ns_ = "App-2"}) etc - val newQueryParams = updateQueryParams(newLogicalPlan, queryParams) - getPartitions(newLogicalPlan, newQueryParams) - .map(_.partitionName) - .distinct - .foreach(part => partitionsToKeys.getOrElseUpdate(part, new mutable.ArrayBuffer).append(key)) - } - - // Sort each key into the same order as nonMetricShardKeys, then group keys with the same prefix. - // A plan will be created for each group; this prevents the scenario mentioned in the javadoc. - // NOTE: this solution is not optimal in all cases, but it does guarantee groupings are valid. - val partitionToKeyGroups = partitionsToKeys.map{ case (partition, keys) => - val prefixGroups = keys - .map(key => key.sortBy(filter => nonMetricShardKeyColToIndex.getOrElse(filter.column, 0))) - .groupBy(_.dropRight(1)) - .values - (partition, prefixGroups) - } - - // Skip the aggregate presentation if there are more than one plans to materialize. - val skipAggregatePresentValue = partitionToKeyGroups.size > 1 || - partitionToKeyGroups.values.headOption.map(_.size).getOrElse(0) > 1 - - // Create one plan per key group. - partitionToKeyGroups.flatMap{ case (partition, keyGroups) => - // NOTE: partition is intentionally unused; the inner planner will again determine which partitions own the data. - keyGroups.map{ keys => - // Create a map of key->values, then create a ColumnFilter for each key. - val keyToValues = new mutable.HashMap[String, mutable.Set[String]]() - keys.flatten.foreach { filter => - // Find the key's list of values in the map (or create it), then add the filter's values. - val values = keyToValues.getOrElseUpdate(filter.column, new mutable.HashSet[String]()) - filter.filter.valuesStrings.map(_.toString).foreach(values.add) - } - val newFilters = keyToValues.map { case (key, values) => - val filter = if (values.size == 1) { - Filter.Equals(values.head) - } else { - // Concatenate values with "|" for multi-valued keys. - Filter.EqualsRegex(values.toSeq.sorted.mkString("|")) - } - ColumnFilter(key, filter) - }.toSeq - // Update the LogicalPlan with the new partition-specific filters, then materialize. - val newLogicalPlan = logicalPlan.replaceFilters(newFilters) - val newQueryParams = updateQueryParams(newLogicalPlan, queryParams) - val newQueryContext = qContext.copy(origQueryParams = newQueryParams, plannerParams = qContext.plannerParams. - copy(skipAggregatePresent = skipAggregatePresentValue)) - queryPlanner.materialize(newLogicalPlan, newQueryContext) - } - }.toSeq - } - // scalastyle:on method.length - - // FIXME: This will eventually be replaced with generateExecForEachPartition. + // This will be deprecated to reduce query fanout. @Deprecated private def generateExecForEachKey(logicalPlan: LogicalPlan, keys: Seq[Seq[ColumnFilter]], @@ -342,7 +223,7 @@ class ShardKeyRegexPlanner(val dataset: Dataset, keys: Seq[Seq[ColumnFilter]], qContext: QueryContext): Seq[ExecPlan] = if (qContext.plannerParams.reduceShardKeyRegexFanout) { - generateExecForEachPartition(logicalPlan, keys, qContext) + Seq(queryPlanner.materialize(logicalPlan, qContext)) } else { generateExecForEachKey(logicalPlan, keys, qContext) } diff --git a/coordinator/src/main/scala/filodb.coordinator/queryplanner/SingleClusterPlanner.scala b/coordinator/src/main/scala/filodb.coordinator/queryplanner/SingleClusterPlanner.scala index 207193d78e..ade36f624c 100644 --- a/coordinator/src/main/scala/filodb.coordinator/queryplanner/SingleClusterPlanner.scala +++ b/coordinator/src/main/scala/filodb.coordinator/queryplanner/SingleClusterPlanner.scala @@ -2,11 +2,9 @@ package filodb.coordinator.queryplanner import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ - import akka.actor.ActorRef import com.typesafe.scalalogging.StrictLogging import kamon.Kamon - import filodb.coordinator.{ActorPlanDispatcher, ShardMapper} import filodb.coordinator.client.QueryCommands.StaticSpreadProvider import filodb.core.{SpreadProvider, StaticTargetSchemaProvider, TargetSchemaChange, TargetSchemaProvider} @@ -23,6 +21,7 @@ import filodb.query.LogicalPlan._ import filodb.query.exec.{LocalPartitionDistConcatExec, _} import filodb.query.exec.InternalRangeFunction.Last + // scalastyle:off file.size.limit object SingleClusterPlanner { @@ -62,6 +61,7 @@ class SingleClusterPlanner(val dataset: Dataset, clusterName: String, spreadProvider: SpreadProvider = StaticSpreadProvider(), _targetSchemaProvider: TargetSchemaProvider = StaticTargetSchemaProvider(), + shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]] = PartitionLocationPlanner.equalsOnlyShardKeyMatcher, // TODO(a_theimer) timeSplitEnabled: Boolean = false, minTimeRangeForSplitMs: => Long = 1.day.toMillis, splitSizeMs: => Long = 1.day.toMillis) @@ -96,8 +96,7 @@ class SingleClusterPlanner(val dataset: Dataset, } /** - * If TargetSchema exists and all of the target-schema label filters (equals or pipe-only EqualsRegex) are - * provided in the query, then return true. + * If TargetSchema exists and all of the target-schema label filters are provided in the query, then return true. * * @param filters Query Column Filters * @param targetSchema TargetSchema @@ -108,10 +107,9 @@ class SingleClusterPlanner(val dataset: Dataset, if (targetSchema.isEmpty || targetSchema.get.schema.isEmpty) { return false } - // Make sure each target-schema column is filtered by equality. + // Make sure each target-schema column has a filter. targetSchema.get.schema - .forall( tschemaCol => filters.exists( cf => - cf.column == tschemaCol && cf.filter.isInstanceOf[Equals])) + .forall(tschemaCol => filters.exists(cf => cf.column == tschemaCol && (shardColumns.contains(tschemaCol) || cf.filter.isInstanceOf[Equals]))) } /** @@ -250,53 +248,41 @@ class SingleClusterPlanner(val dataset: Dataset, } } + def resolveFilters(filters: Seq[ColumnFilter]): Seq[Seq[ColumnFilter]] = { + shardKeyMatcher(filters).map(LogicalPlanUtils.upsertFilters(filters, _)) + } + // scalastyle:off method.length - def shardsFromFilters(filters: Seq[ColumnFilter], + def shardsFromFilters(rawFilters: Seq[ColumnFilter], qContext: QueryContext, startMs: Long, endMs: Long, useTargetSchemaForShards: Seq[ColumnFilter] => Boolean = _ => false): Seq[Int] = { - require(shardColumns.nonEmpty || qContext.plannerParams.shardOverrides.nonEmpty, s"Dataset $dsRef does not have shard columns defined, and shard overrides were not mentioned") qContext.plannerParams.shardOverrides.getOrElse { - val shardColToValues: Seq[(String, Seq[String])] = shardColumns.map { shardCol => - // To compute the shard hash, filters must match all shard columns either by equality or EqualsRegex, - // where any match by EqualsRegex can use at most the '|' regex character. - val values = filters.find(f => f.column == shardCol) match { - case Some(ColumnFilter(_, Filter.Equals(filtVal: String))) => - Seq(filtVal) - case Some(ColumnFilter(_, Filter.EqualsRegex(filtVal: String))) - if QueryUtils.isPipeOnlyRegex(filtVal) => QueryUtils.splitAtUnescapedPipes(filtVal) - case Some(ColumnFilter(_, filter)) => - throw new BadQueryException(s"Found filter for shard column $shardCol but " + - s"$filter cannot be used for shard key routing") - case _ => - throw new BadQueryException(s"Could not find filter for shard key column " + - s"$shardCol, shard key hashing disabled") - } - val trimmedValues = values.map(value => RecordBuilder.trimShardColumn(dsOptions, shardCol, value)) - (shardCol, trimmedValues) + val hasNonEqualsShardKeyFilter = rawFilters.exists { filter => + shardColumns.contains(filter.column) && !filter.filter.isInstanceOf[Equals] } - - // Get all (ordered) combinations of values, then create (key,value) pairs for each. - val shardKeyValuePairs: Seq[Seq[(String, String)]] = { - val keys = shardColToValues.map(_._1) - val valueGroups = shardColToValues.map(_._2) - QueryUtils.combinations(valueGroups).map(keys.zip(_)) - } - - // For each set of pairs, create a set of Equals filters and compute the shards for each. - shardKeyValuePairs.flatMap{ kvPairs => - val kvMap = kvPairs.toMap - val updFilters = filters.map{ filt => - kvMap.get(filt.column) - .map(value => ColumnFilter(filt.column, Filter.Equals(value))) - .getOrElse(filt) + val filterGroups = if (hasNonEqualsShardKeyFilter) resolveFilters(rawFilters) else Seq(rawFilters) + filterGroups.flatMap{ filters => + val shardValues = shardColumns.map { shardCol => + // To compute the shard hash, filters must match all shard columns by equality. + val value = filters.find(f => f.column == shardCol) match { + case Some(ColumnFilter(_, Filter.Equals(filtVal: String))) => filtVal + case Some(ColumnFilter(_, filter)) => + throw new BadQueryException(s"Found filter for shard column $shardCol but " + + s"$filter cannot be used for shard key routing") + case _ => + throw new BadQueryException(s"Could not find filter for shard key column " + + s"$shardCol, shard key hashing disabled") } - shardsFromValues(kvPairs, updFilters, qContext, startMs, endMs, useTargetSchemaForShards) + RecordBuilder.trimShardColumn(dsOptions, shardCol, value) + } + val shardPairs = shardColumns.zip(shardValues) + shardsFromValues(shardPairs, filters, qContext, startMs, endMs, useTargetSchemaForShards) }.distinct } } @@ -435,9 +421,10 @@ class SingleClusterPlanner(val dataset: Dataset, LogicalPlanUtils.getPushdownKeys( plan, targetSchemaProvider(qContext), + shardKeyMatcher, dataset.options.nonMetricShardColumns, getRawPushdownShards, - rs => LogicalPlan.getRawSeriesFilters(rs)) + rs => LogicalPlan.getRawSeriesFilters(rs).flatMap(shardKeyMatcher(_))) } /** @@ -832,23 +819,14 @@ class SingleClusterPlanner(val dataset: Dataset, PlanResult(metaExec) } - // allow metadataQueries to get list of shards from shardKeyFilters only if all shardCols have Equals filter - // or EqualsRegex filter with only the pipe special character. + // allow metadataQueries to get list of shards from shardKeyFilters only if + // filters are given for all shard-key columns private def canGetShardsFromFilters(renamedFilters: Seq[ColumnFilter], qContext: QueryContext): Boolean = { - if (qContext.plannerParams.shardOverrides.isEmpty && shardColumns.nonEmpty) { + qContext.plannerParams.shardOverrides.isEmpty && + shardColumns.nonEmpty && shardColumns.toSet.subsetOf(renamedFilters.map(_.column).toSet) && - shardColumns.forall { shardCol => - // So to compute the shard hash we need shardCol == value filter (exact equals) for each shardColumn - renamedFilters.find(f => f.column == shardCol) match { - case Some(ColumnFilter(_, Filter.Equals(_: String))) => true - case Some(ColumnFilter(_, Filter.EqualsRegex(value: String))) => - // Make sure no regex chars except the pipe, which can be used to concatenate values. - QueryUtils.isPipeOnlyRegex(value) - case _ => false - } - } - } else false + shardColumns.forall { shardCol => renamedFilters.exists(f => f.column == shardCol) } } private def materializeLabelNames(qContext: QueryContext, diff --git a/coordinator/src/test/scala/filodb.coordinator/queryplanner/MultiPartitionPlannerSpec.scala b/coordinator/src/test/scala/filodb.coordinator/queryplanner/MultiPartitionPlannerSpec.scala index f600c2990d..950bd40c17 100644 --- a/coordinator/src/test/scala/filodb.coordinator/queryplanner/MultiPartitionPlannerSpec.scala +++ b/coordinator/src/test/scala/filodb.coordinator/queryplanner/MultiPartitionPlannerSpec.scala @@ -101,6 +101,7 @@ class MultiPartitionPlannerSpec extends AnyFunSpec with Matchers with PlanValida PlannerParams(processMultiPartition = true))) val stitchRvsExec = execPlan.asInstanceOf[StitchRvsExec] + println(execPlan.printTree()) stitchRvsExec.children.size shouldEqual (2) stitchRvsExec.children(0).isInstanceOf[PromQlRemoteExec] shouldEqual true stitchRvsExec.children(1).isInstanceOf[PromQlRemoteExec] shouldEqual true diff --git a/coordinator/src/test/scala/filodb.coordinator/queryplanner/PlannerHierarchySpec.scala b/coordinator/src/test/scala/filodb.coordinator/queryplanner/PlannerHierarchySpec.scala index 60630cf578..d9c64b409b 100644 --- a/coordinator/src/test/scala/filodb.coordinator/queryplanner/PlannerHierarchySpec.scala +++ b/coordinator/src/test/scala/filodb.coordinator/queryplanner/PlannerHierarchySpec.scala @@ -49,28 +49,50 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS case class Planners(spp: SinglePartitionPlanner, lt: LongTimeRangePlanner, raw: SingleClusterPlanner, ds: SingleClusterPlanner, rr: SingleClusterPlanner) - def getPlanners(nShards: Int, dataset: Dataset): Planners = { + def getPlanners(nShards: Int, dataset: Dataset, shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]]): Planners = { val mapper = new ShardMapper(nShards) for {i <- 0 until nShards} mapper.registerNode(Seq(i), node) def mapperRef = mapper val rawPlanner = new SingleClusterPlanner(dataset, schemas, mapperRef, - earliestRetainedTimestampFn = now - rawRetention, queryConfig, "raw") + earliestRetainedTimestampFn = now - rawRetention, queryConfig, "raw", shardKeyMatcher = shardKeyMatcher) val downsamplePlanner = new SingleClusterPlanner(dataset, schemas, mapperRef, - earliestRetainedTimestampFn = now - downsampleRetention, queryConfig, "downsample") + earliestRetainedTimestampFn = now - downsampleRetention, queryConfig, "downsample", shardKeyMatcher = shardKeyMatcher) val longTermPlanner = new LongTimeRangePlanner(rawPlanner, downsamplePlanner, earliestRawTimestampFn = now - rawRetention, latestDownsampleTimestampFn = now - timeToDownsample, inProcessDispatcher, queryConfig, dataset) val recordingRulesPlanner = new SingleClusterPlanner(dataset, schemas, mapperRef, earliestRetainedTimestampFn = now - rrRetention, - queryConfig, "recordingRules") + queryConfig, "recordingRules", shardKeyMatcher = shardKeyMatcher) val planners = Map("longTerm" -> longTermPlanner, "recordingRules" -> recordingRulesPlanner) val singlePartitionPlanner = new SinglePartitionPlanner(planners, plannerSelector, dataset, queryConfig) Planners(singlePartitionPlanner, longTermPlanner, rawPlanner, downsamplePlanner, recordingRulesPlanner) } - val (singlePartitionPlanner, longTermPlanner, rawPlanner) = { - val planners = getPlanners(nShards = 2, dataset) - (planners.spp, planners.lt, planners.raw) + private val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { + // we may have mixed of a regex filter and a non-regex filter. + if (shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[EqualsRegex])) { + // to ensure that tests dont call something else that is not configured + require(shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[EqualsRegex] + && ( + f.filter.asInstanceOf[EqualsRegex].pattern.toString == ".*Ns" + || f.filter.asInstanceOf[EqualsRegex].pattern.toString == "localNs.*"))) + val nsCol = shardColumnFilters.find(_.column == "_ns_").get + if (nsCol.filter.asInstanceOf[EqualsRegex].pattern.toString == "localNs.*") { + Seq( + Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs"))), + Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs1"))) + ) + } else { + Seq( + Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs"))), + Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("remoteNs"))) + ) + } + } else if (shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[Equals])) { + Seq(shardColumnFilters) + } else { + Nil + } // i.e. filters for a scalar } private val oneRemotePartitionLocationProvider = new PartitionLocationProvider { @@ -103,45 +125,13 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS case "remoteNs1" => List(PartitionAssignment("remotePartition1", "remote1Partition-url1", TimeRange(timeRange.startMs, timeRange.endMs))) - case _ => throw new IllegalArgumentException("nope") + case key => throw new IllegalArgumentException("unexpected routing key: " + key) } } override def getMetadataPartitions(nonMetricShardKeyFilters: Seq[ColumnFilter], timeRange: TimeRange): List[PartitionAssignment] = ??? } - val oneRemoteMultiPartitionPlanner = new MultiPartitionPlanner(oneRemotePartitionLocationProvider, singlePartitionPlanner, - "localPartition", dataset, queryConfig) - val twoRemoteMultiPartitionPlanner = new MultiPartitionPlanner(twoRemotePartitionLocationProvider, singlePartitionPlanner, - "localPartition", dataset, queryConfig) - - private val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { - // we may have mixed of a regex filter and a non-regex filter. - if (shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[EqualsRegex])) { - // to ensure that tests dont call something else that is not configured - require(shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[EqualsRegex] - && ( - f.filter.asInstanceOf[EqualsRegex].pattern.toString == ".*Ns" - || f.filter.asInstanceOf[EqualsRegex].pattern.toString == "localNs.*"))) - val nsCol = shardColumnFilters.find(_.column == "_ns_").get - if (nsCol.filter.asInstanceOf[EqualsRegex].pattern.toString == "localNs.*") { - Seq( - Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs"))), - Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs1"))) - ) - } else { - Seq( - Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("localNs"))), - Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("remoteNs"))) - ) - } - } else if (shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[Equals])) { - Seq(shardColumnFilters) - } else { - Nil - } // i.e. filters for a scalar - } - private val oneRemoteShardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { if (shardColumnFilters.exists(f => f.column == "_ns_" && f.filter.isInstanceOf[EqualsRegex])) { // to ensure that tests dont call something else that is not configured @@ -173,9 +163,24 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS } // i.e. filters for a scalar } + + val (singlePartitionPlanner, longTermPlanner, rawPlanner) = { + val planners = getPlanners(nShards = 2, dataset, shardKeyMatcherFn) + (planners.spp, planners.lt, planners.raw) + } + val oneRemotePlanners = getPlanners(nShards = 2, dataset, oneRemoteShardKeyMatcherFn) + val twoRemotePlanners = getPlanners(nShards = 2, dataset, twoRemoteShardKeyMatcherFn) + + val defaultMultiPartitionPlanner = new MultiPartitionPlanner(oneRemotePartitionLocationProvider, singlePartitionPlanner, + "localPartition", dataset, queryConfig, shardKeyMatcher = shardKeyMatcherFn) + val oneRemoteMultiPartitionPlanner = new MultiPartitionPlanner(oneRemotePartitionLocationProvider, oneRemotePlanners.spp, + "localPartition", dataset, queryConfig, shardKeyMatcher = oneRemoteShardKeyMatcherFn) + val twoRemoteMultiPartitionPlanner = new MultiPartitionPlanner(twoRemotePartitionLocationProvider, twoRemotePlanners.spp, + "localPartition", dataset, queryConfig, shardKeyMatcher = twoRemoteShardKeyMatcherFn) + private val targetSchemaProvider = StaticTargetSchemaProvider() - val rootPlanner = new ShardKeyRegexPlanner(dataset, oneRemoteMultiPartitionPlanner, shardKeyMatcherFn, oneRemotePartitionLocationProvider, queryConfig) + val rootPlanner = new ShardKeyRegexPlanner(dataset, defaultMultiPartitionPlanner, shardKeyMatcherFn, oneRemotePartitionLocationProvider, queryConfig) val oneRemoteRootPlanner = new ShardKeyRegexPlanner(dataset, oneRemoteMultiPartitionPlanner, oneRemoteShardKeyMatcherFn, oneRemotePartitionLocationProvider, queryConfig) val twoRemoteRootPlanner = new ShardKeyRegexPlanner(dataset, twoRemoteMultiPartitionPlanner, twoRemoteShardKeyMatcherFn, twoRemotePartitionLocationProvider, queryConfig) @@ -260,32 +265,31 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS val execPlan = rootPlanner.materialize(lp, QueryContext(origQueryParams = queryParams)) val expected = """T~AggregatePresenter(aggrOp=Count, aggrParams=List(), rangeParams=RangeParams(1633913330,300,1634777330)) - |-E~LocalPartitionReduceAggregateExec(aggrOp=Count, aggrParams=List()) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) + |-E~LocalPartitionReduceAggregateExec(aggrOp=Count, aggrParams=List()) on InProcessPlanDispatcher |--T~AggregateMapReduce(aggrOp=Count, aggrParams=List(), without=List(), by=List()) - |---E~SetOperatorExec(binaryOp=LUnless, on=List(instance), ignoring=List()) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |----E~StitchRvsExec() on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,None,None,None,100,false,false,true,Set(),None,Map(filodb-query-exec-aggregate-large-container -> 65536, filodb-query-exec-metadataexec -> 8192))) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) + |---E~SetOperatorExec(binaryOp=LUnless, on=List(instance), ignoring=List()) on InProcessPlanDispatcher + |----E~StitchRvsExec() on InProcessPlanDispatcher + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample) - |----E~StitchRvsExec() on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,None,None,None,100,false,false,true,Set(),None,Map(filodb-query-exec-aggregate-large-container -> 65536, filodb-query-exec-metadataexec -> 8192))) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |----E~StitchRvsExec() on InProcessPlanDispatcher + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs|localNs1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs.*)), ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs|localNs1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],raw) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs.*)), ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs|localNs1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs.*)), ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs|localNs1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1825385416],downsample)""".stripMargin - println(execPlan.printTree()) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(localNs.*)), ColumnFilter(instance,EqualsRegex(.*)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample)""".stripMargin validatePlan(execPlan, expected) } @@ -299,23 +303,23 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS plannerParams = PlannerParams(processMultiPartition = true))) val expected = """T~AggregatePresenter(aggrOp=Count, aggrParams=List(), rangeParams=RangeParams(1633913330,300,1634777330)) - |-E~LocalPartitionReduceAggregateExec(aggrOp=Count, aggrParams=List()) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) + |-E~LocalPartitionReduceAggregateExec(aggrOp=Count, aggrParams=List()) on InProcessPlanDispatcher |--T~AggregateMapReduce(aggrOp=Count, aggrParams=List(), without=List(), by=List()) - |---E~SetOperatorExec(binaryOp=LUnless, on=List(instance), ignoring=List()) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |----E~StitchRvsExec() on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,None,None,None,100,false,false,true,Set(),None,Map(filodb-query-exec-aggregate-large-container -> 65536, filodb-query-exec-metadataexec -> 8192))) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],raw) + |---E~SetOperatorExec(binaryOp=LUnless, on=List(instance), ignoring=List()) on InProcessPlanDispatcher + |----E~StitchRvsExec() on InProcessPlanDispatcher + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],raw) - |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-----E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#-2061408838],downsample) - |----E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |-----E~PromQlRemoteExec(PromQlQueryParams(bar{instance=~".*",_ws_="demo",_ns_="remoteNs1"},1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remote1Partition-url1, requestTimeoutMs=10000) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |-----E~PromQlRemoteExec(PromQlQueryParams(bar{instance=~".*",_ws_="demo",_ns_="remoteNs0"},1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url0, requestTimeoutMs=10000) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536)))""".stripMargin + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |----E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher + |-----E~PromQlRemoteExec(PromQlQueryParams(bar{_ws_="demo",_ns_=~"remoteNs.*",instance=~".*"},100,1,1000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url0, requestTimeoutMs=10000) on InProcessPlanDispatcher + |-----E~PromQlRemoteExec(PromQlQueryParams(bar{_ws_="demo",_ns_=~"remoteNs.*",instance=~".*"},101,1,1000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remote1Partition-url1, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -965,23 +969,24 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS val expected = """T~AggregatePresenter(aggrOp=Sum, aggrParams=List(), rangeParams=RangeParams(1633913330,300,1634777330)) - |-E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5974b7e8) - |--E~StitchRvsExec() on InProcessPlanDispatcher(filodb.core.query.EmptyQueryConfig$@2839e3c8) - |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],raw) + |-E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher + |--E~StitchRvsExec() on InProcessPlanDispatcher + |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],raw) - |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],downsample) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],downsample) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1211350849],downsample) - |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}),1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5974b7e8)""".stripMargin + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{_ws_ = "demo", _ns_ =~ ".*Ns", instance = "Inst-1" }) + |,1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1003,18 +1008,18 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634172900000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634172900000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633911000000, step=300000, end=1634172600000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633911000000, step=300000, end=1634172600000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) - |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}),1633911000,300,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{_ws_ = "demo", _ns_ =~ ".*Ns", instance = "Inst-1" }),1633911000,300,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1034,18 +1039,18 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634172900000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634172900000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172600000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633911000000, step=300000, end=1634172600000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1633911000000, step=300000, end=1634172600000, window=None, functionId=None, rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) - |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}),1633911000,300,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633910700000,1634172600000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |--E~PromQlRemoteExec(PromQlQueryParams(sum(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1092,11 +1097,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS """T~PeriodicSamplesMapper(start=1634775000000, step=0, end=1634775000000, window=Some(432000000), functionId=Some(SumOverTime), rawSource=false, offsetMs=None) |-E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher |--E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1634343000000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634342700000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1634343000000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634342700000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |--E~PromQlRemoteExec(PromQlQueryParams(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"},1634343000,300,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |---T~PeriodicSamplesMapper(start=1634775000000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) + |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634774700000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---T~PeriodicSamplesMapper(start=1634775000000, step=300000, end=1634775000000, window=None, functionId=None, rawSource=true, offsetMs=None) + |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634774700000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |--E~PromQlRemoteExec(PromQlQueryParams(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"},1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1112,10 +1117,10 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS """E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher |-E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |--T~PeriodicSamplesMapper(start=1634775000000, step=0, end=1634775000000, window=Some(432000000), functionId=Some(SumOverTime), rawSource=true, offsetMs=None) - |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634343000000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634343000000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |--T~PeriodicSamplesMapper(start=1634775000000, step=0, end=1634775000000, window=Some(432000000), functionId=Some(SumOverTime), rawSource=true, offsetMs=None) - |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634343000000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |-E~PromQlRemoteExec(PromQlQueryParams(sum_over_time(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[432000s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634343000000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-E~PromQlRemoteExec(PromQlQueryParams(sum_over_time(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[432000s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1130,11 +1135,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS val expected = """E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher |-E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |--T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |--T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |-E~PromQlRemoteExec(PromQlQueryParams(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[600s]),1634771400,60,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |--T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634774400000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |--T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |---E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634774400000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-E~PromQlRemoteExec(PromQlQueryParams(rate(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[600s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1152,11 +1157,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS |--E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |---T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |----T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |---T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |----T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |--E~PromQlRemoteExec(PromQlQueryParams(sum(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[600s])),1634771400,60,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |--E~PromQlRemoteExec(PromQlQueryParams(sum(rate(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[600s])),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1175,11 +1180,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS |---E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |-----T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---E~PromQlRemoteExec(PromQlQueryParams(sum(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[600s])),1634771400,60,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---E~PromQlRemoteExec(PromQlQueryParams(sum(rate(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[600s])),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1192,7 +1197,7 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS plannerParams = PlannerParams(processMultiPartition = true))) val expected = - """E~PromQlRemoteExec(PromQlQueryParams(max_over_time(sum(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[600s]))[3600s:60s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""" + """E~PromQlRemoteExec(PromQlQueryParams(max_over_time(sum(rate(foo{_ws_="demo",_ns_=~".*remoteNs",instance="Inst-1"}[600s]))[3600s:60s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""" validatePlan(execPlan, expected) } @@ -1210,11 +1215,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS """T~PeriodicSamplesMapper(start=1634775000000, step=0, end=1634775000000, window=Some(3600000), functionId=Some(MaxOverTime), rawSource=false, offsetMs=None) |-E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher |--E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634770800000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |--E~PromQlRemoteExec(PromQlQueryParams(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[600s]),1634771400,60,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |---T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634774400000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(600000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634774400000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |--E~PromQlRemoteExec(PromQlQueryParams(rate(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[600s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1233,11 +1238,11 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS |-T~PeriodicSamplesMapper(start=1634771400000, step=60000, end=1634775000000, window=Some(300000), functionId=Some(Deriv), rawSource=false, offsetMs=None) |--E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher |---E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |----T~PeriodicSamplesMapper(start=1634771100000, step=60000, end=1634775000000, window=Some(60000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634771040000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |----T~PeriodicSamplesMapper(start=1634771100000, step=60000, end=1634775000000, window=Some(60000), functionId=Some(Rate), rawSource=true, offsetMs=None) - |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634771040000,1634775000000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---E~PromQlRemoteExec(PromQlQueryParams(rate(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}[60s]),1634771100,60,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin + |----T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(60000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634774940000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |----T~PeriodicSamplesMapper(start=1634775000000, step=60000, end=1634775000000, window=Some(60000), functionId=Some(Rate), rawSource=true, offsetMs=None) + |-----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634774940000,1634775000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |---E~PromQlRemoteExec(PromQlQueryParams(rate(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}[60s]),1634775000,0,1634775000,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,false,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -1268,43 +1273,43 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS plannerParams = PlannerParams(processMultiPartition = true))) val expected = - """E~BinaryJoinExec(binaryOp=MUL, on=List(), ignoring=List()) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5eabff6b) + """E~BinaryJoinExec(binaryOp=MUL, on=List(), ignoring=List()) on InProcessPlanDispatcher |-T~AggregatePresenter(aggrOp=Sum, aggrParams=List(), rangeParams=RangeParams(1633913330,300,1634777330)) - |--E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5eabff6b) - |---E~StitchRvsExec() on InProcessPlanDispatcher(filodb.core.query.EmptyQueryConfig$@5b000fe6) - |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) + |--E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher + |---E~StitchRvsExec() on InProcessPlanDispatcher + |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) - |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) - |---E~PromQlRemoteExec(PromQlQueryParams(sum(foo{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}),1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5eabff6b) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(foo))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |---E~PromQlRemoteExec(PromQlQueryParams(sum(foo{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}),1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher |-T~AggregatePresenter(aggrOp=Sum, aggrParams=List(), rangeParams=RangeParams(1633913330,300,1634777330)) - |--E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5eabff6b) - |---E~StitchRvsExec() on InProcessPlanDispatcher(filodb.core.query.EmptyQueryConfig$@5b000fe6) - |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) + |--E~MultiPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on InProcessPlanDispatcher + |---E~StitchRvsExec() on InProcessPlanDispatcher + |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1634173130000, step=300000, end=1634777330000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],raw) - |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1634172830000,1634777330000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) + |----E~LocalPartitionReduceAggregateExec(aggrOp=Sum, aggrParams=List()) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) |-----T~AggregateMapReduce(aggrOp=Sum, aggrParams=List(), without=List(), by=List()) |------T~PeriodicSamplesMapper(start=1633913330000, step=300000, end=1634172830000, window=None, functionId=None, rawSource=true, offsetMs=None) - |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(localNs)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#542289610],downsample) - |---E~PromQlRemoteExec(PromQlQueryParams(sum(bar{instance="Inst-1",_ws_="demo",_ns_="remoteNs"}),1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher(filodb.core.query.QueryConfig@5eabff6b)""".stripMargin + |-------E~MultiSchemaPartitionsExec(dataset=timeseries, shard=1, chunkMethod=TimeRangeChunkScan(1633913030000,1634172830000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,EqualsRegex(.*Ns)), ColumnFilter(instance,Equals(Inst-1)), ColumnFilter(_metric_,Equals(bar))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],downsample) + |---E~PromQlRemoteExec(PromQlQueryParams(sum(bar{_ws_="demo",_ns_=~".*Ns",instance="Inst-1"}),1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=remotePartition-url, requestTimeoutMs=10000) on InProcessPlanDispatcher""".stripMargin validatePlan(execPlan, expected) } @@ -2510,7 +2515,7 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS override def getMetadataPartitions(nonMetricShardKeyFilters: Seq[ColumnFilter], timeRange: TimeRange): List[PartitionAssignment] = ??? } val grpcRemoteMultiPartitionPlanner = new MultiPartitionPlanner(gRpcRemotePartitionLocationProvider, singlePartitionPlanner, - "localPartition", dataset, queryConfig) + "localPartition", dataset, queryConfig, shardKeyMatcher = shardKeyMatcherFn) val gRpcRemoteRootPlanner = new ShardKeyRegexPlanner(dataset, grpcRemoteMultiPartitionPlanner, shardKeyMatcherFn, gRpcRemotePartitionLocationProvider, queryConfig) val query4 = """topk(2, test{_ws_ = "demo", _ns_ =~ ".*Ns", instance = "Inst-1"})""" @@ -3249,7 +3254,7 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS if (shardColumnFilters.isEmpty) { return Nil } - val nsColValue = shardColumnFilters.find(_.column == "_ns_").get.filter.asInstanceOf[EqualsRegex].pattern.toString + val nsColValue = shardColumnFilters.find(_.column == "_ns_").get.filter.valuesStrings.head.toString nsColValue match { case LOCAL => Seq( @@ -3274,8 +3279,8 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS } } - val spp = getPlanners(2, dataset).spp - val multiPartitionPlanner = new MultiPartitionPlanner(partitionLocationProvider, spp, "local", dataset, queryConfig) + val spp = getPlanners(2, dataset, shardKeyMatcherFunc).spp + val multiPartitionPlanner = new MultiPartitionPlanner(partitionLocationProvider, spp, "local", dataset, queryConfig, shardKeyMatcher = shardKeyMatcherFunc) val shardKeyRegexPlanner = new ShardKeyRegexPlanner(dataset, multiPartitionPlanner, shardKeyMatcherFunc, partitionLocationProvider, queryConfig, targetSchemaProvider) val tschema = FunctionalTargetSchemaProvider(tschemaProviderFunc) @@ -3288,14 +3293,9 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS try { validatePlan(ep, test.expected, sort = true) } catch { - case _: Throwable => { - Thread.sleep(1000) - println("AMT =========") - println(test.tschemaEnabled) - println(test.query) - println(ep.printTree()) - println("AMT =========") - System.exit(4) + case t: Throwable => { + println("failed: " + test.query) + throw t } } } @@ -3350,7 +3350,8 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS getPartitions(nonMetricShardKeyFilters.map(filter => (filter.column, filter.filter.valuesStrings.head.toString)).toMap, timeRange) } - val mppPlanner = new MultiPartitionPlanner(partitionLocationProvider, singlePartitionPlanner, "local", dataset, queryConfig) + val planners = getPlanners(2, dataset, shardKeyMatcher) + val mppPlanner = new MultiPartitionPlanner(partitionLocationProvider, planners.spp, "local", dataset, queryConfig, shardKeyMatcher = shardKeyMatcher) val planner = new ShardKeyRegexPlanner(dataset, mppPlanner, shardKeyMatcher, partitionLocationProvider, queryConfig) val timeParams = TimeStepParams(startSeconds, step, endSeconds) @@ -3452,8 +3453,8 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS val nShards = 4 val spreadProv = StaticSpreadProvider(SpreadChange(0L, Integer.numberOfTrailingZeros(nShards))) - val singlePartitionPlanner = getPlanners(nShards, dataset).spp - val mppPlanner = new MultiPartitionPlanner(partitionLocationProvider, singlePartitionPlanner, "local", dataset, queryConfig) + val singlePartitionPlanner = getPlanners(nShards, dataset, shardKeyMatcher).spp + val mppPlanner = new MultiPartitionPlanner(partitionLocationProvider, singlePartitionPlanner, "local", dataset, queryConfig, shardKeyMatcher = shardKeyMatcher) val planner = new ShardKeyRegexPlanner(dataset, mppPlanner, shardKeyMatcher, partitionLocationProvider, queryConfig) val timeParams = TimeStepParams(startSeconds, step, endSeconds) @@ -3535,42 +3536,6 @@ class PlannerHierarchySpec extends AnyFunSpec with Matchers with PlanValidationS } } - it("should create more than one plan per partition when keys have different prefixes") { - val dataset = MetricsTestData.timeseriesDatasetMultipleShardKeys - val shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]] = filters => Seq( - // Selectors will always match four keys. - Seq(ColumnFilter("_ws_", Filter.Equals("foo_1")), ColumnFilter("_ns_", Filter.Equals("ns1"))), - Seq(ColumnFilter("_ws_", Filter.Equals("foo_1")), ColumnFilter("_ns_", Filter.Equals("ns2"))), - Seq(ColumnFilter("_ws_", Filter.Equals("bar_2")), ColumnFilter("_ns_", Filter.Equals("ns3"))), - Seq(ColumnFilter("_ws_", Filter.Equals("baz_2")), ColumnFilter("_ns_", Filter.Equals("ns4"))), - ) - val partitionLocationProvider = new PartitionLocationProvider { - override def getPartitions(routingKey: Map[String, String], timeRange: TimeRange): List[PartitionAssignment] = { - // using the last char of the _ws_ label as the partition name - List(PartitionAssignment(routingKey("_ws_").last.toString, "dummy-endpoint", timeRange)) - } - - override def getMetadataPartitions(nonMetricShardKeyFilters: Seq[ColumnFilter], timeRange: TimeRange): List[PartitionAssignment] = - getPartitions(nonMetricShardKeyFilters.map(filter => (filter.column, filter.filter.valuesStrings.head.toString)).toMap, timeRange) - } - - val mppPlanner = new MultiPartitionPlanner(partitionLocationProvider, singlePartitionPlanner, "local", dataset, queryConfig) - val planner = new ShardKeyRegexPlanner(dataset, mppPlanner, shardKeyMatcher, partitionLocationProvider, queryConfig) - - val timeParams = TimeStepParams(startSeconds, step, endSeconds) - val qContext = QueryContext(origQueryParams = queryParams, - plannerParams = PlannerParams(processMultiPartition = true)) - - val query ="""foo{_ws_="dummy", _ns_=~"dummy.*", bar="hello"}""" - val expected = """E~MultiPartitionDistConcatExec() on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |-E~PromQlRemoteExec(PromQlQueryParams(foo{bar="hello",_ws_="bar_2",_ns_="ns3"},1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=dummy-endpoint, requestTimeoutMs=10000) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |-E~PromQlRemoteExec(PromQlQueryParams(foo{bar="hello",_ws_="baz_2",_ns_="ns4"},1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=dummy-endpoint, requestTimeoutMs=10000) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536))) - |-E~PromQlRemoteExec(PromQlQueryParams(foo{bar="hello",_ws_="foo_1",_ns_=~"ns1|ns2"},1633913330,300,1634777330,None,false), PlannerParams(filodb,None,None,None,None,60000,PerQueryLimits(1000000,18000000,100000,100000,300000000,1000000,200000000),PerQueryLimits(50000,15000000,50000,50000,150000000,500000,100000000),None,None,None,false,86400000,86400000,true,true,false,false,true), queryEndpoint=dummy-endpoint, requestTimeoutMs=10000) on InProcessPlanDispatcher(QueryConfig(10 seconds,300000,1,50,antlr,true,true,None,Some(10000),None,None,25,true,false,true,Set(),Some(plannerSelector),Map(filodb-query-exec-metadataexec -> 65536, filodb-query-exec-aggregate-large-container -> 65536)))""".stripMargin - val lp = Parser.queryRangeToLogicalPlan(query, timeParams) - val exec = planner.materialize(lp, qContext) - validatePlan(exec, expected) - } - it("should materialize a plan per shard-key when reduceShardKeyRegexFanout=false") { val dataset = MetricsTestData.timeseriesDatasetMultipleShardKeys val shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]] = filters => Seq( diff --git a/coordinator/src/test/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlannerSpec.scala b/coordinator/src/test/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlannerSpec.scala index 46bd3c7197..6afba93c10 100644 --- a/coordinator/src/test/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlannerSpec.scala +++ b/coordinator/src/test/scala/filodb.coordinator/queryplanner/ShardKeyRegexPlannerSpec.scala @@ -39,9 +39,11 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture private val localMapper = new ShardMapper(32) for {i <- 0 until 32} localMapper.registerNode(Seq(i), node) - - val localPlanner = new SingleClusterPlanner(dataset, schemas, localMapper, earliestRetainedTimestampFn = 0, - queryConfig, "raw") + def makeLocalPlanner(shardKeyMatcher: Seq[ColumnFilter] => Seq[Seq[ColumnFilter]]): SingleClusterPlanner = { + new SingleClusterPlanner( + dataset, schemas, localMapper, earliestRetainedTimestampFn = 0, + queryConfig, "raw", shardKeyMatcher = shardKeyMatcher) + } def partitions(timeRange: TimeRange): List[PartitionAssignment] = List(PartitionAssignment("remote", "remote-url", TimeRange(timeRange.startMs, timeRange.endMs))) @@ -63,9 +65,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture getPartitions(nonMetricShardKeyFilters.map(f => (f.column, f.filter.valuesStrings.head.toString)).toMap, timeRange) } val c = QueryConfig(config).copy(plannerSelector = Some("plannerSelector")) - val mpp = new MultiPartitionPlanner( - mppPartitionLocationProvider, localPlanner, "local", dataset, c - ) + val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq( Seq( @@ -78,6 +78,11 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } + + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) + val mpp = new MultiPartitionPlanner( + mppPartitionLocationProvider, localPlanner, "local", dataset, c, shardKeyMatcher = shardKeyMatcherFn + ) val skrp = new ShardKeyRegexPlanner(dataset, mpp, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) it("should generate Exec plan for simple query") { @@ -85,6 +90,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionDistConcatExec] shouldEqual(true) @@ -100,6 +106,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams( """test{_ns_ =~ "App.*", instance = "Inst-1" }""", 100, 1, 1000))) @@ -119,6 +126,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val nonMetricShardColumns = dataset.options.nonMetricShardColumns @@ -165,6 +173,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionDistConcatExec] shouldEqual(true) @@ -183,6 +192,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("sum(heap_usage)", 100, 1, 1000))) @@ -219,6 +229,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("sum(heap_usage)", 100, 1, 1000))) @@ -233,6 +244,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture it("should generate Exec plan for time()") { val lp = Parser.queryToLogicalPlan("time()", 1000, 1000) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq((Seq.empty)) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[TimeScalarGeneratorExec] shouldEqual(true) @@ -244,6 +256,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams( """1 + test{_ws_ = \"demo\",_ns_ =~ \"App.*\", instance = \"Inst-1\" }""", 100, 1, 1000))) @@ -269,6 +282,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("""test1{_ws_="demo",_ns_="App"} + test2{_ws_="demo",_ns_="App"}""".stripMargin, 100,1, 1000))) @@ -283,6 +297,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[PartKeysDistConcatExec] shouldEqual (true) @@ -294,6 +309,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionReduceAggregateExec] shouldEqual(true) @@ -328,6 +344,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionReduceAggregateExec] shouldEqual(true) @@ -360,6 +377,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val lp = Parser.queryToLogicalPlan("""test{_ws_ = "demo", _ns_ = "App-1" }""", 1000, 1000) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[LocalPartitionDistConcatExec] shouldEqual(true) @@ -375,6 +393,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[TimeScalarGeneratorExec] shouldEqual(true) @@ -399,6 +418,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture it ("should generate Exec plan for Metadata Label values query") { val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Seq.empty + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val lp = Parser.labelValuesQueryToLogicalPlan(Seq("""__metric__"""), Some("""_ws_="demo", _ns_=~".*" """), TimeStepParams(1000, 20, 5000) ) @@ -414,6 +434,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture it ("should generate ExecPlan for TsCardinalities") { val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Nil + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner( dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val lp = TsCardinalities(Seq("ws_foo", "ns_bar"), 3) val promQlQueryParams = PromQlQueryParams( @@ -431,6 +452,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual(true) @@ -460,6 +482,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2"))))} + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual(true) @@ -488,6 +511,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ColumnFilter("_ns_", Equals("App-1"))), Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual (true) @@ -510,6 +534,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[LocalPartitionReduceAggregateExec] shouldEqual (true) @@ -524,6 +549,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-2")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionReduceAggregateExec] shouldEqual true @@ -537,6 +563,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[LocalPartitionReduceAggregateExec] shouldEqual (true) @@ -563,6 +590,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual (true) @@ -578,6 +606,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual (true) @@ -597,6 +626,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[LocalPartitionReduceAggregateExec] shouldEqual true @@ -631,6 +661,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ColumnFilter("_ns_", Equals("App1"))) ) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) // Since we resolve to just one namespace, the entire plan should be materialized by the wrapped planner @@ -653,6 +684,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ColumnFilter("_ns_", Equals("App1"))) ) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext( origQueryParams = PromQlQueryParams("""test1{_ws_="demo",_ns_="App-1"} + test2{_ws_="demo",_ns_="App-1"}""", @@ -676,6 +708,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Seq(shardColumnFilters) // Top level plan to be done in-process, both left & right operations will be executed by the wrapped query planner + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[BinaryJoinExec] shouldEqual true @@ -703,7 +736,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) println(execPlan.printTree()) @@ -740,6 +773,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionDistConcatExec] shouldEqual (true) @@ -764,7 +798,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val lp = Parser.queryToLogicalPlan("""ln(test1{_ws_ = "demo", _ns_ = "App-1"})""", 1000, 1000) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Seq(shardColumnFilters) - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("""ln(test1{_ws_="demo",_ns_="App-1"})""", 100, 1, 1000))) @@ -785,7 +819,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val lp = Parser.queryToLogicalPlan("""sum(ln(test1{_ws_ = "demo", _ns_ = "App-1"}))""", 1000, 1000) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Seq(shardColumnFilters) - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("""sum(ln(test1{_ws_="demo",_ns_="App-1"}))""", 100, 1, 1000))) @@ -814,7 +848,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.isInstanceOf[MultiPartitionDistConcatExec] shouldEqual (true) @@ -851,7 +885,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) @@ -890,7 +924,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) // val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) @@ -934,7 +968,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) @@ -972,7 +1006,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture Seq(Seq(ColumnFilter("_ws_", Equals("demo")), ColumnFilter("_ns_", Equals("App-1")))) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("""absent(foo{_ws_="Demo", _ns_ =~ "App-1"} , "foo", "," "instance", "job")""", @@ -991,7 +1025,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val lp = Parser.queryToLogicalPlan("""vector(scalar(sum(foo{_ws_="demo", _ns_ = "App-1"})))""", 1000, 1000) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => Seq(shardColumnFilters) - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = PromQlQueryParams("""vector(scalar(sum(foo{_ws_="demo",_ns_="App-1"})))""", 100, 1, 1000))) @@ -1022,7 +1056,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture ) ) } - + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) @@ -1050,6 +1084,7 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture } it("should materialize absent function mapper correctly with implicit WS") { + // TODO(a_theimer): need to verify it's correct to remove these tests. { // absent of sum_over_time: _ws_ in query val lp = Parser.queryToLogicalPlan( @@ -1058,23 +1093,25 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head .asInstanceOf[AbsentFunctionMapper].columnFilter.isEmpty shouldEqual true } - { - // absent of sum_over_time: _ws_ NOT in query - val lp = Parser.queryToLogicalPlan("absent(sum_over_time(test{_ns_ = \"App-1\", instance = \"Inst-1\" }[5m]))", - 1000, 1000) - val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { - Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) - } - val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) - val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) - execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head - .asInstanceOf[AbsentFunctionMapper].columnFilter.isEmpty shouldEqual true - } +// { +// // absent of sum_over_time: _ws_ NOT in query +// val lp = Parser.queryToLogicalPlan("absent(sum_over_time(test{_ns_ = \"App-1\", instance = \"Inst-1\" }[5m]))", +// 1000, 1000) +// val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { +// Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) +// } +// val localPlanner = makeLocalPlanner(shardKeyMatcherFn) +// val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) +// val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) +// execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head +// .asInstanceOf[AbsentFunctionMapper].columnFilter.isEmpty shouldEqual true +// } { // absent: _ws_ in query val lp = Parser.queryToLogicalPlan( @@ -1083,23 +1120,24 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance } - { - // absent: _ws_ NOT in query - val lp = Parser.queryToLogicalPlan("absent(test{_ns_ = \"App-1\", instance = \"Inst-1\" })", - 1000, 1000) - val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { - Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) - } - val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) - val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) - execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head - .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance - } +// { +// // absent: _ws_ NOT in query +// val lp = Parser.queryToLogicalPlan("absent(test{_ns_ = \"App-1\", instance = \"Inst-1\" })", +// 1000, 1000) +// val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { +// Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) +// } +// val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) +// val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) +// execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head +// .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance +// } { // absent_over_time: _ws_ in query val lp = Parser.queryToLogicalPlan( @@ -1108,25 +1146,27 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance } - { - // absent_over_time: _ws_ NOT in query - val lp = Parser.queryToLogicalPlan("absent_over_time(test{_ns_ = \"App-1\", instance = \"Inst-1\" }[5m])", - 1000, 1000) - val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { - Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) - } - val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) - val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) - execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head - .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance - } +// { +// // absent_over_time: _ws_ NOT in query +// val lp = Parser.queryToLogicalPlan("absent_over_time(test{_ns_ = \"App-1\", instance = \"Inst-1\" }[5m])", +// 1000, 1000) +// val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { +// Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) +// } +// val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) +// val execPlan = engine.materialize(lp, QueryContext(origQueryParams = promQlQueryParams)) +// execPlan.asInstanceOf[LocalPartitionReduceAggregateExec].rangeVectorTransformers.head +// .asInstanceOf[AbsentFunctionMapper].columnFilter.size shouldEqual 4 // _ws_, _ns_, __name__ & instance +// } } + // TODO(a_theimer): need to confirm whether-or-not safe to remove this test. it("should materialize instant functions with args correctly with implicit WS") { // "expected" string is the printTree() of the first child (isInstanceOf[DistConcat] is asserted below-- // other children are identical except for their source shards) @@ -1143,22 +1183,23 @@ class ShardKeyRegexPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=30, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(sc_test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1006757749],raw) |-T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) |--E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testProbe-1#1006757749],raw)""".stripMargin), - // inst func with scalar() arg: _ws_ NOT in query - ("""clamp_max(test{_ns_="App-1"}, scalar(sc_test{_ns_="App-1"}))""", - """T~InstantVectorFunctionMapper(function=ClampMax) - |-FA1~ - |-T~ScalarFunctionMapper(function=Scalar, funcParams=List()) - |--E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=14, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(sc_test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |---T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=30, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(sc_test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) - |-T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) - |--E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw)""".stripMargin) +// // inst func with scalar() arg: _ws_ NOT in query +// ("""clamp_max(test{_ns_="App-1"}, scalar(sc_test{_ns_="App-1"}))""", +// """T~InstantVectorFunctionMapper(function=ClampMax) +// |-FA1~ +// |-T~ScalarFunctionMapper(function=Scalar, funcParams=List()) +// |--E~LocalPartitionDistConcatExec() on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) +// |---T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) +// |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=14, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(sc_test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) +// |---T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) +// |----E~MultiSchemaPartitionsExec(dataset=timeseries, shard=30, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(sc_test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw) +// |-T~PeriodicSamplesMapper(start=1000000, step=1000000, end=1000000, window=None, functionId=None, rawSource=true, offsetMs=None) +// |--E~MultiSchemaPartitionsExec(dataset=timeseries, shard=0, chunkMethod=TimeRangeChunkScan(700000,1000000), filters=List(ColumnFilter(_ws_,Equals(demo)), ColumnFilter(_ns_,Equals(App-1)), ColumnFilter(_metric_,Equals(test))), colName=None, schema=None) on ActorPlanDispatcher(Actor[akka://default/system/testActor],raw)""".stripMargin) ) val shardKeyMatcherFn = (shardColumnFilters: Seq[ColumnFilter]) => { Seq(Seq(ColumnFilter("_ns_", Equals("App-1")), ColumnFilter("_ws_", Equals("demo")))) } + val localPlanner = makeLocalPlanner(shardKeyMatcherFn) val engine = new ShardKeyRegexPlanner(dataset, localPlanner, shardKeyMatcherFn, simplePartitionLocationProvider, queryConfig) queryExpectedPairs.foreach{ case (query, expected) => diff --git a/coordinator/src/test/scala/filodb.coordinator/queryplanner/SingleClusterPlannerSpec.scala b/coordinator/src/test/scala/filodb.coordinator/queryplanner/SingleClusterPlannerSpec.scala index 1ca363ee9a..b0b12dfa0e 100644 --- a/coordinator/src/test/scala/filodb.coordinator/queryplanner/SingleClusterPlannerSpec.scala +++ b/coordinator/src/test/scala/filodb.coordinator/queryplanner/SingleClusterPlannerSpec.scala @@ -24,6 +24,7 @@ import filodb.query.LogicalPlan.getRawSeriesFilters import filodb.query.exec.aggregator.{CountRowAggregator, SumRowAggregator} import org.scalatest.exceptions.TestFailedException + import scala.concurrent.duration._ class SingleClusterPlannerSpec extends AnyFunSpec with Matchers with ScalaFutures with PlanValidationSpec { @@ -40,11 +41,19 @@ class SingleClusterPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture private val dsRef = dataset.ref private val schemas = Schemas(dataset.schema) + private def regexPipeShardKeyMatcher(filters: Seq[ColumnFilter]) = { + val values = filters.map { filter => + filter.column -> filter.filter.valuesStrings.toList.head.toString.split('|') + } + val triplets = QueryUtils.combinations(values.map(_._2.toSeq)) + triplets.map(triplet => values.map(_._1).zip(triplet).map(p => ColumnFilter(p._1, Equals(p._2)))) + } + private val config = ConfigFactory.load("application_test.conf") private val queryConfig = QueryConfig(config.getConfig("filodb.query")) private val engine = new SingleClusterPlanner(dataset, schemas, mapperRef, earliestRetainedTimestampFn = 0, - queryConfig, "raw") + queryConfig, "raw", shardKeyMatcher = regexPipeShardKeyMatcher) /* This is the PromQL @@ -1210,6 +1219,7 @@ class SingleClusterPlannerSpec extends AnyFunSpec with Matchers with ScalaFuture } catch { case e: TestFailedException => println(s"Plan validation failed for query: $query") + println(execPlan.printTree()) throw e } } diff --git a/core/src/main/scala/filodb.core/query/QueryUtils.scala b/core/src/main/scala/filodb.core/query/QueryUtils.scala deleted file mode 100644 index 0c99d0f25f..0000000000 --- a/core/src/main/scala/filodb.core/query/QueryUtils.scala +++ /dev/null @@ -1,82 +0,0 @@ -package filodb.core.query - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -/** - * Storage for miscellaneous utility functions. - */ -object QueryUtils { - /** - * Returns all possible sets of elements where exactly one element is - * chosen from each of the argument sequences. - * - * @param choices: all sequences should have at least one element. - * @return ordered sequences; each sequence is ordered such that the element - * at index i is chosen from the ith argument sequence. - */ - def combinations[T](choices: Seq[Seq[T]]): Seq[Seq[T]] = { - val running = new mutable.ArraySeq[T](choices.size) - val result = new mutable.ArrayBuffer[Seq[T]] - def helper(iChoice: Int): Unit = { - if (iChoice == choices.size) { - result.append(Nil ++ running) - return - } - for (choice <- choices(iChoice)) { - running(iChoice) = choice - helper(iChoice + 1) - } - } - helper(0) - result - } - - /** - * Returns the set of unescaped special regex chars in the argument string. - * Special chars are: . ? + * | { } [ ] ( ) " \ - */ - def getUnescapedSpecialRegexChars(str: String): Set[Char] = { - // Match special chars preceded by any count of backslash pairs and either - // some non-backslash character or the beginning of a line. - val regex = "(?<=(^|[^\\\\]))(\\\\\\\\)*([.?+*|{}\\[\\]()\"\\\\])".r - regex.findAllMatchIn(str) - .map(_.group(3)) // get the special char -- third capture group - .map(_(0)) // convert the string to a char - .toSet - } - - /** - * Returns true iff the argument string contains no unescaped special regex characters. - * The pipe character ('|') is excluded from the set of special regex characters. - */ - def isPipeOnlyRegex(str: String): Boolean = { - getUnescapedSpecialRegexChars(str).diff(Set('|')).isEmpty - } - - /** - * Splits a string on unescaped pipe characters. - */ - def splitAtUnescapedPipes(str: String): Seq[String] = { - // match pipes preceded by any count of backslash pairs and either - // some non-backslash character or the beginning of a line. - val regex = "(?<=(^|[^\\\\]))(\\\\\\\\)*(\\|)".r - // get pipe indices -- third capture group - val pipeIndices = regex.findAllMatchIn(str).map(_.start(3)).toSeq - - var offset = 0 - val splits = new ArrayBuffer[String](pipeIndices.size + 1) - var remaining = str - for (i <- pipeIndices) { - // split at the pipe and remove it - val left = remaining.substring(0, i - offset) - val right = remaining.substring(i - offset + 1) - splits.append(left) - remaining = right - // count of all characters before the remaining suffix (+1 to account for pipe) - offset = offset + left.size + 1 - } - splits.append(remaining) - splits - } -} diff --git a/core/src/test/scala/filodb.core/query/QueryUtilsSpec.scala b/core/src/test/scala/filodb.core/query/QueryUtilsSpec.scala deleted file mode 100644 index fc0bac7e84..0000000000 --- a/core/src/test/scala/filodb.core/query/QueryUtilsSpec.scala +++ /dev/null @@ -1,36 +0,0 @@ -package filodb.core.query - -import org.scalatest.funspec.AnyFunSpec -import org.scalatest.matchers.should.Matchers - -class QueryUtilsSpec extends AnyFunSpec with Matchers { - it("should correctly identify the set of unescaped special regex chars") { - val tests = Seq( - ("a.b?c+d*e|f{g}h[i]j(k)?l\"m\\", Set('.', '?', '+', '*', '|', '{', '}', '[', ']', '(', ')', '"', '\\')), - ("\\a.b?c+d*e|\\f{g}h[i]j(k)?l\"\\m\\", Set('.', '?', '+', '*', '|', '{', '}', '[', ']', '(', ')', '"', '\\')), - ("a\\.b\\?c\\+d\\*e\\|f\\{g\\}h\\[i\\]j\\(k\\)\\?l\\\"m\\\\", Set('\\')), - ("foo|.*", Set('.', '*', '|')), - ("foo\\|.*", Set('.', '*', '\\')), - ("foo\\\\|.*", Set('.', '*', '|')), - ("foo\\\\\\|.*", Set('.', '*', '\\')) - ) - for ((string, expected) <- tests) { - val res = QueryUtils.getUnescapedSpecialRegexChars(string) - res shouldEqual expected - } - } - - it("should correctly split strings at unescaped pipes") { - val tests = Seq( - ("this|is|a|test", Seq("this", "is", "a", "test")), - ("this|is|a||test", Seq("this", "is", "a", "", "test")), - ("this\\|is|a|test", Seq("this\\|is", "a", "test")), - ("this\\\\|is|a|test", Seq("this\\\\", "is", "a", "test")), - ("||this\\|is|\\+a|test||", Seq("", "", "this\\|is", "\\+a", "test", "", "")), - ) - for ((string, expected) <- tests) { - val res = QueryUtils.splitAtUnescapedPipes(string) - res shouldEqual expected - } - } -} diff --git a/prometheus/src/main/scala/filodb/prometheus/ast/Vectors.scala b/prometheus/src/main/scala/filodb/prometheus/ast/Vectors.scala index 1511d1a3bf..ccdd36d292 100644 --- a/prometheus/src/main/scala/filodb/prometheus/ast/Vectors.scala +++ b/prometheus/src/main/scala/filodb/prometheus/ast/Vectors.scala @@ -3,7 +3,7 @@ package filodb.prometheus.ast import scala.util.Try import filodb.core.{query, GlobalConfig} -import filodb.core.query.{ColumnFilter, QueryUtils, RangeParams} +import filodb.core.query.{ColumnFilter, RangeParams} import filodb.prometheus.parse.Parser import filodb.query._ @@ -241,14 +241,9 @@ sealed trait Vector extends Expression { case NotRegexMatch => require(labelValue.length <= Parser.REGEX_MAX_LEN, s"Regular expression filters should be <= ${Parser.REGEX_MAX_LEN} characters") ColumnFilter(labelMatch.label, query.Filter.NotEqualsRegex(labelValue)) - case RegexMatch => - // Relax the length limit only for matchers that contain at most the "|" special character. - if (!QueryUtils.isPipeOnlyRegex(labelValue)) { - require(labelValue.length <= Parser.REGEX_MAX_LEN, - s"Regular expression filters should be <= ${Parser.REGEX_MAX_LEN} characters " + - s"when non-`|` special characters are used.") - } - ColumnFilter(labelMatch.label, query.Filter.EqualsRegex(labelValue)) + case RegexMatch => require(labelValue.length <= Parser.REGEX_MAX_LEN, + s"Regular expression filters should be <= ${Parser.REGEX_MAX_LEN} characters.") + ColumnFilter(labelMatch.label, query.Filter.EqualsRegex(labelValue)) case NotEqual(false) => ColumnFilter(labelMatch.label, query.Filter.NotEquals(labelValue)) case other: Any => throw new IllegalArgumentException(s"Unknown match operator $other") } diff --git a/query/src/main/scala/filodb/query/exec/StitchRvsExec.scala b/query/src/main/scala/filodb/query/exec/StitchRvsExec.scala index b434392b87..52d3768a65 100644 --- a/query/src/main/scala/filodb/query/exec/StitchRvsExec.scala +++ b/query/src/main/scala/filodb/query/exec/StitchRvsExec.scala @@ -98,7 +98,7 @@ final case class StitchRvsExec(queryContext: QueryContext, outputRvRange match { case Some(RvRange(startMs, stepMs, endMs)) => - require(startMs <= endMs && stepMs > 0, "RvRange start <= end and step > 0") + require(startMs <= endMs && stepMs > 0, "RvRange start <= end and step > 0 " + startMs.toString + " " + stepMs.toString + " " + endMs.toString) case None => } protected def args: String = "" diff --git a/query/src/test/scala/filodb/query/PlanValidationSpec.scala b/query/src/test/scala/filodb/query/PlanValidationSpec.scala index acb80e13b7..a292ecb16d 100644 --- a/query/src/test/scala/filodb/query/PlanValidationSpec.scala +++ b/query/src/test/scala/filodb/query/PlanValidationSpec.scala @@ -61,6 +61,13 @@ trait PlanValidationSpec extends Matchers { (denoisedPlan, denoisedExpected) } } + if (planString != expectedString) { + println("AMT FAIL") + println("EXPECTED") + println(expectedString) + println("ACTUAL") + println(planString) + } planString shouldEqual expectedString }