Skip to content

Commit

Permalink
[GOBBLIN-2185] Recommend GoT Dynamic Auto-Scaling using heuristics ba…
Browse files Browse the repository at this point in the history
…sed on `WorkUnitsSizeSummary` (#4087)

* Implement GoT Dynamic auto-scaling PoC of `WorkUnitsSizeSummary`-driven linear heuristic

* Do not generate `@Setter`s for `@Data` POJOs, for which deserialization support prevents having `final` members

* Align choice of directory between `FsScalingDirectivesRecipient` and `FsScalingDirectivesSource`, and ensure various handles get closed
  • Loading branch information
phet authored and Will-Lo committed Dec 24, 2024
1 parent 6920851 commit 4e589a6
Show file tree
Hide file tree
Showing 43 changed files with 1,085 additions and 112 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,8 @@ public class ConfigurationKeys {
public static final String DEFAULT_FORK_OPERATOR_CLASS = "org.apache.gobblin.fork.IdentityForkOperator";
public static final String JOB_COMMIT_POLICY_KEY = "job.commit.policy";
public static final String DEFAULT_JOB_COMMIT_POLICY = "full";
public static final String JOB_TARGET_COMPLETION_DURATION_IN_MINUTES_KEY = "job.duration.target.completion.in.minutes";
public static final long DEFAULT_JOB_TARGET_COMPLETION_DURATION_IN_MINUTES = 360;

public static final String PARTIAL_FAIL_TASK_FAILS_JOB_COMMIT = "job.commit.partial.fail.task.fails.job.commit";
// If true, commit of different datasets will be performed in parallel
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,8 @@ private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobStat

try {
if (!canCleanStagingData(jobState)) {
LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
// TODO: decide whether should be `.warn`, stay as `.info`, or change back to `.error`
LOG.info("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
return;
}
} catch (IOException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@

package org.apache.gobblin.runtime;

import lombok.AccessLevel;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.Setter;

import org.apache.gobblin.metrics.DatasetMetric;

Expand All @@ -30,6 +32,7 @@
* that can be reported as a single event in the commit phase.
*/
@Data
@Setter(AccessLevel.NONE) // NOTE: non-`final` members solely to enable deserialization
@RequiredArgsConstructor
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
public class DatasetTaskSummary {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,7 @@ public interface GobblinTemporalConfigurationKeys {
* Prefix for Gobblin-on-Temporal Dynamic Scaling
*/
String DYNAMIC_SCALING_PREFIX = PREFIX + "dynamic.scaling.";

String DYNAMIC_SCALING_POLLING_INTERVAL_SECS = DYNAMIC_SCALING_PREFIX + "polling.interval.seconds";
int DEFAULT_DYNAMIC_SCALING_POLLING_INTERVAL_SECS = 60;
}
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ public class GobblinTemporalClusterManager implements ApplicationLauncher, Stand
@Getter
protected final FileSystem fs;

@Getter
protected final String applicationId;

@Getter
Expand Down Expand Up @@ -285,8 +286,11 @@ public Collection<StandardMetrics> getStandardMetricsCollection() {
* comment lifted from {@link org.apache.gobblin.cluster.GobblinClusterManager}
* TODO for now the cluster id is hardcoded to 1 both here and in the {@link GobblinTaskRunner}. In the future, the
* cluster id should be created by the {@link GobblinTemporalClusterManager} and passed to each {@link GobblinTaskRunner}
*
* NOTE: renamed from `getApplicationId` to avoid shadowing the `@Getter`-generated instance method of that name
* TODO: unravel what to make of the comment above. as it is, `GobblinTemporalApplicationMaster#main` is what runs, NOT `GobblinTemporalClusterManager#main`
*/
private static String getApplicationId() {
private static String getApplicationIdStatic() {
return "1";
}

Expand Down Expand Up @@ -332,7 +336,7 @@ public static void main(String[] args) throws Exception {
}

try (GobblinTemporalClusterManager GobblinTemporalClusterManager = new GobblinTemporalClusterManager(
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), getApplicationId(),
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), getApplicationIdStatic(),
config, Optional.<Path>absent())) {
GobblinTemporalClusterManager.start();
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.gobblin.temporal.ddm.activity;

import java.util.List;
import java.util.Properties;

import io.temporal.activity.ActivityInterface;
import io.temporal.activity.ActivityMethod;

import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.ddm.work.TimeBudget;
import org.apache.gobblin.temporal.ddm.work.WorkUnitsSizeSummary;
import org.apache.gobblin.temporal.dynamic.ScalingDirective;



/**
* Activity to suggest the Dynamic Scaling warranted to complete processing of some amount of {@link org.apache.gobblin.source.workunit.WorkUnit}s
* within {@link TimeBudget}, through a combination of Workforce auto-scaling and Worker right-sizing.
*
* As with all {@link ActivityInterface}s, this is stateless, so the {@link ScalingDirective}(s) returned "stand alone", presuming nothing of current
* {@link org.apache.gobblin.temporal.dynamic.WorkforceStaffing}. It thus falls to the caller to coordinate whether to apply the directive(s) as-is,
* or first to adjust in light of scaling levels already in the current {@link org.apache.gobblin.temporal.dynamic.WorkforcePlan}.
*/
@ActivityInterface
public interface RecommendScalingForWorkUnits {

/**
* Recommend the {@link ScalingDirective}s to process the {@link WorkUnit}s of {@link WorkUnitsSizeSummary} within {@link TimeBudget}.
*
* @param remainingWork may characterize a newly-generated batch of `WorkUnit`s for which no processing has yet begun - or be the sub-portion
* of an in-progress job that still awaits processing
* @param sourceClass contextualizes the `WorkUnitsSizeSummary` and should name a {@link org.apache.gobblin.source.Source}
* @param timeBudget the remaining target duration for processing the summarized `WorkUnit`s
* @param jobProps all job props, to either guide the recommendation or better contextualize the nature of the `remainingWork`
* @return the {@link ScalingDirective}s to process the summarized {@link WorkUnit}s within {@link TimeBudget}
*/
@ActivityMethod
List<ScalingDirective> recommendScaling(WorkUnitsSizeSummary remainingWork, String sourceClass, TimeBudget timeBudget, Properties jobProps);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.gobblin.temporal.ddm.activity.impl;

import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.Properties;

import lombok.extern.slf4j.Slf4j;

import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.temporal.ddm.activity.RecommendScalingForWorkUnits;
import org.apache.gobblin.temporal.ddm.work.TimeBudget;
import org.apache.gobblin.temporal.ddm.work.WorkUnitsSizeSummary;
import org.apache.gobblin.temporal.dynamic.ProfileDerivation;
import org.apache.gobblin.temporal.dynamic.ProfileOverlay;
import org.apache.gobblin.temporal.dynamic.ScalingDirective;
import org.apache.gobblin.temporal.dynamic.WorkforceProfiles;


/**
* Skeletal impl handling all foundational concerns, but leaving it to a concrete impl to actually choose the auto-scaling
* {@link ScalingDirective#getSetPoint()} for the exactly one {@link ScalingDirective} being recommended.
*/
@Slf4j
public abstract class AbstractRecommendScalingForWorkUnitsImpl implements RecommendScalingForWorkUnits {

// TODO: decide whether this name ought to be configurable - or instead a predictable name that callers may expect (and possibly adjust)
public static final String DEFAULT_PROFILE_DERIVATION_NAME = "workUnitsProc";

@Override
public List<ScalingDirective> recommendScaling(WorkUnitsSizeSummary remainingWork, String sourceClass, TimeBudget timeBudget, Properties jobProps) {
// NOTE: no attempt to determine the current scaling - per `RecommendScalingForWorkUnits` javadoc, the `ScalingDirective`(s) returned must "stand alone",
// presuming nothing of the current `WorkforcePlan`'s `WorkforceStaffing`
JobState jobState = new JobState(jobProps);
ScalingDirective procWUsWorkerScaling = new ScalingDirective(
calcProfileDerivationName(jobState),
calcDerivationSetPoint(remainingWork, sourceClass, timeBudget, jobState),
System.currentTimeMillis(),
Optional.of(calcProfileDerivation(calcBasisProfileName(jobState), remainingWork, sourceClass, jobState))
);
log.info("Recommended re-scaling to process work units: {}", procWUsWorkerScaling);
return Arrays.asList(procWUsWorkerScaling);
}

protected abstract int calcDerivationSetPoint(WorkUnitsSizeSummary remainingWork, String sourceClass, TimeBudget timeBudget, JobState jobState);

protected ProfileDerivation calcProfileDerivation(String basisProfileName, WorkUnitsSizeSummary remainingWork, String sourceClass, JobState jobState) {
// TODO: implement right-sizing!!! (for now just return unchanged)
return new ProfileDerivation(basisProfileName, ProfileOverlay.unchanged());
}

protected String calcProfileDerivationName(JobState jobState) {
// TODO: if we ever return > 1 directive, append a monotonically increasing number to avoid collisions
return DEFAULT_PROFILE_DERIVATION_NAME;
}

protected String calcBasisProfileName(JobState jobState) {
return WorkforceProfiles.BASELINE_NAME; // always build upon baseline
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,20 @@
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import javax.annotation.Nullable;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import lombok.extern.slf4j.Slf4j;

import com.google.api.client.util.Lists;
import com.google.common.base.Function;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;

import io.temporal.failure.ApplicationFailure;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;

import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
Expand Down Expand Up @@ -83,8 +84,7 @@ public CommitStats commit(WUProcessingSpec workSpec) {
int numDeserializationThreads = DEFAULT_NUM_DESERIALIZATION_THREADS;
Optional<String> optJobName = Optional.empty();
AutomaticTroubleshooter troubleshooter = null;
try {
FileSystem fs = Help.loadFileSystem(workSpec);
try (FileSystem fs = Help.loadFileSystem(workSpec)) {
JobState jobState = Help.loadJobState(workSpec, fs);
optJobName = Optional.ofNullable(jobState.getJobName());
SharedResourcesBroker<GobblinScopeTypes> instanceBroker = JobStateUtils.getSharedResourcesBroker(jobState);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import io.temporal.failure.ApplicationFailure;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;

Expand All @@ -37,6 +36,7 @@
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import com.tdunning.math.stats.TDigest;
import io.temporal.failure.ApplicationFailure;

import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
Expand Down Expand Up @@ -118,7 +118,7 @@ public GenerateWorkUnitsResult generateWorkUnits(Properties jobProps, EventSubmi
troubleshooter.start();
try (Closer closer = Closer.create()) {
// before embarking on (potentially expensive) WU creation, first pre-check that the FS is available
FileSystem fs = JobStateUtils.openFileSystem(jobState);
FileSystem fs = closer.register(JobStateUtils.openFileSystem(jobState));
fs.mkdirs(workDirRoot);

Set<String> pathsToCleanUp = new HashSet<>();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.gobblin.temporal.ddm.activity.impl;

import lombok.extern.slf4j.Slf4j;

import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys;
import org.apache.gobblin.temporal.ddm.work.TimeBudget;
import org.apache.gobblin.temporal.ddm.work.WorkUnitsSizeSummary;
import org.apache.gobblin.temporal.ddm.worker.WorkFulfillmentWorker;


/**
* Simple config-driven linear recommendation for how many containers to use to complete the "remaining work" within a given {@link TimeBudget}, per:
*
* a. from {@link WorkUnitsSizeSummary}, find how many (remaining) "top-level" {@link org.apache.gobblin.source.workunit.MultiWorkUnit}s of some mean size
* b. from the configured {@link #AMORTIZED_NUM_BYTES_PER_MINUTE}, find the expected "processing rate" in bytes / minute
* 1. estimate the time required for processing a mean-sized `MultiWorkUnit` (MWU)
* c. from {@link JobState}, find per-container `MultiWorkUnit` parallelism capacity (aka. "worker-slots") to base the recommendation upon
* 2. calculate the per-container throughput of MWUs per minute
* 3. estimate the total per-container-minutes required to process all MWUs
* d. from the {@link TimeBudget}, find the target number of minutes in which to complete processing of all MWUs
* 4. recommend the number of containers so all MWU processing should finish within the target number of minutes
*/
@Slf4j
public class RecommendScalingForWorkUnitsLinearHeuristicImpl extends AbstractRecommendScalingForWorkUnitsImpl {

public static final String AMORTIZED_NUM_BYTES_PER_MINUTE = GobblinTemporalConfigurationKeys.DYNAMIC_SCALING_PREFIX + "heuristic.params.numBytesPerMinute";
public static final long DEFAULT_AMORTIZED_NUM_BYTES_PER_MINUTE = 80 * 1000L * 1000L * 60L; // 80MB/sec

@Override
protected int calcDerivationSetPoint(WorkUnitsSizeSummary remainingWork, String sourceClass, TimeBudget jobTimeBudget, JobState jobState) {
// for simplicity, for now, consider only top-level work units (aka. `MultiWorkUnit`s - MWUs)
long numMWUs = remainingWork.getTopLevelWorkUnitsCount();
double meanBytesPerMWU = remainingWork.getTopLevelWorkUnitsMeanSize();
int numSimultaneousMWUsPerContainer = calcPerContainerWUCapacity(jobState); // (a worker-thread is a slot for top-level (MWUs) - not constituent sub-WUs)
long bytesPerMinuteProcRate = calcAmortizedBytesPerMinute(jobState);
log.info("Calculating auto-scaling (for {} remaining work units within {}) using: bytesPerMinuteProcRate = {}; meanBytesPerMWU = {}",
numMWUs, jobTimeBudget, bytesPerMinuteProcRate, meanBytesPerMWU);

// calc how many container*minutes to process all MWUs, based on mean MWU size
double minutesProcTimeForMeanMWU = meanBytesPerMWU / bytesPerMinuteProcRate;
double meanMWUsThroughputPerContainerMinute = numSimultaneousMWUsPerContainer / minutesProcTimeForMeanMWU;
double estContainerMinutesForAllMWUs = numMWUs / meanMWUsThroughputPerContainerMinute;

long targetNumMinutesForAllMWUs = jobTimeBudget.getMaxTargetDurationMinutes();
// TODO: take into account `jobTimeBudget.getPermittedOverageMinutes()` - e.g. to decide whether to use `Math.ceil` vs. `Math.floor`

// TODO: decide how to account for container startup; working est. for GoT-on-YARN ~ 3 mins (req to alloc ~ 30s; alloc to workers ready ~ 2.5m)
// e.g. can we amortize away / ignore when `targetNumMinutesForAllMWUs >> workerRequestToReadyNumMinutes`?
// TODO take into account that MWUs are quantized into discrete chunks; this est. uses avg and presumes to divide partial MWUs amongst workers
// can we we mostly ignore if we keep MWU "chunk size" "small-ish", like maybe even just `duration(max(MWU)) <= targetNumMinutesForAllMWUs/2)`?

int recommendedNumContainers = (int) Math.floor(estContainerMinutesForAllMWUs / targetNumMinutesForAllMWUs);
log.info("Recommended auto-scaling: {} containers, given: minutesToProc(mean(MWUs)) = {}; throughput = {} (MWUs / container*minute); "
+ "est. container*minutes to complete ALL ({}) MWUs = {}",
recommendedNumContainers, minutesProcTimeForMeanMWU, meanMWUsThroughputPerContainerMinute, numMWUs, estContainerMinutesForAllMWUs);
return recommendedNumContainers;
}

protected int calcPerContainerWUCapacity(JobState jobState) {
int numWorkersPerContainer = jobState.getPropAsInt(GobblinTemporalConfigurationKeys.TEMPORAL_NUM_WORKERS_PER_CONTAINER,
GobblinTemporalConfigurationKeys.DEFAULT_TEMPORAL_NUM_WORKERS_PER_CONTAINERS);
int numThreadsPerWorker = WorkFulfillmentWorker.MAX_EXECUTION_CONCURRENCY; // TODO: get from config, once that's implemented
return numWorkersPerContainer * numThreadsPerWorker;
}

protected long calcAmortizedBytesPerMinute(JobState jobState) {
return jobState.getPropAsLong(AMORTIZED_NUM_BYTES_PER_MINUTE, DEFAULT_AMORTIZED_NUM_BYTES_PER_MINUTE);
}
}
Loading

0 comments on commit 4e589a6

Please sign in to comment.