forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
clear_profiling.cpp
47 lines (39 loc) · 1.4 KB
/
clear_profiling.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#include <torch/csrc/jit/passes/clear_profiling.h>
#include <torch/csrc/jit/jit_log.h>
namespace torch::jit {
void unprofileGraphInputs(const std::shared_ptr<Graph>& graph) {
for (auto i : graph->inputs()) {
if (i->type()->isSubtypeOf(*TensorType::get())) {
i->setType(unshapedType(i->type()));
}
}
}
void unprofileBlock(Block* start_block) {
std::vector<Block*> stack;
stack.push_back(start_block);
while (!stack.empty()) {
Block* block = stack.back();
stack.pop_back();
for (auto n : block->nodes()) {
for (auto o : n->outputs()) {
if (o->type()->isSubtypeOf(*TensorType::get())) {
o->setType(unshapedType(o->type()));
}
}
stack.insert(stack.end(), n->blocks().begin(), n->blocks().end());
}
}
}
// We need to make sure that passes that use profiling information
// use it **only after** guards validating it are inserted
// Ideally, we would run any pass that relies on profiling information
// after `InsertBailOuts`, however, practically, some passes
// (e.g. Peephole) useful to run both w/ and w/o profiling information
// so we could run them in `preoptimizeGraph` and
// in `runProfilingInsensitiveOptimizations`
void ClearProfilingInformation(const std::shared_ptr<Graph>& graph) {
unprofileGraphInputs(graph);
unprofileBlock(graph->block());
GRAPH_DUMP("After ClearProfilingInformation: ", graph);
}
} // namespace torch::jit