Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Relax testing #175

Draft
wants to merge 10 commits into
base: develop
Choose a base branch
from
9 changes: 8 additions & 1 deletion mlonmcu/flow/tvm/backend/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ class TVMBackend(Backend):
"relay_debug": None, # Use "DEFAULT=2" to have most verbosity. Needs USE_RELAY_DEBUG during setup.
"refresh_model_info": False,
"generate_wrapper": "auto",
"relax_mode": False,
}

REQUIRED = set()
Expand Down Expand Up @@ -160,7 +161,6 @@ def extra_pass_config(self):
extra = {}
if isinstance(extra, str):
import ast

extra = ast.literal_eval(extra)
assert isinstance(extra, dict)
return extra
Expand Down Expand Up @@ -254,6 +254,11 @@ def use_tuning_results(self):
value = self.config["use_tuning_results"]
return str2bool(value) if not isinstance(value, (bool, int)) else value

@property
def relax_mode(self):
value = self.config["relax_mode"]
return str2bool(value) if not isinstance(value, (bool, int)) else value

@property
def tvmc_extra_args(self):
return self.config["tvmc_extra_args"]
Expand Down Expand Up @@ -388,6 +393,8 @@ def get_tvmc_compile_args(self, out, dump=None):
*["-f", self.fmt],
*["--model-format", self.model_format],
]
if self.relax_mode:
args.append("--relax")
return args

def invoke_tvmc(self, command, *args, cwd=None):
Expand Down
2 changes: 1 addition & 1 deletion mlonmcu/flow/tvm/backend/model_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def parse_relay_main(line):
if len(output_tensor_names_str) > 0:
output_tensor_names = re.compile(r"\"([a-zA-Z0-9_]+)\"").findall(output_tensor_names_str[0])
else:
output_tensor_names = [f"output{i}" for i in range(len(output_tensor_strs))]
output_tensor_names = [(f"output{i}" if i > 0 else "output") for i in range(len(output_tensor_strs))]

assert len(output_tensor_names) == len(output_tensor_strs)

Expand Down
1 change: 1 addition & 0 deletions mlonmcu/flow/tvm/backend/tvmaot.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ def generate(self) -> Tuple[dict, dict]:
self.prefix,
api="c" if self.unpacked_api else "packed",
debug_arena=self.debug_arena,
relax_mode=self.relax_mode,
)
artifacts.append(Artifact("aot_wrapper.c", content=wrapper_src, fmt=ArtifactFormat.SOURCE))
header_src = generate_wrapper_header()
Expand Down
21 changes: 15 additions & 6 deletions mlonmcu/flow/tvm/backend/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def getMeta(tensors, withNames=False):
return out


def generate_tvmaot_wrapper(model_info, workspace_size, mod_name, api="c", debug_arena=False):
def generate_tvmaot_wrapper(model_info, workspace_size, mod_name, api="c", debug_arena=False, relax_mode=False):
modPrefix = f"tvmgen_{mod_name}"

def writeTensors(in_tensors, out_tensors, modPrefix, api):
Expand All @@ -392,7 +392,7 @@ def writeTensors(in_tensors, out_tensors, modPrefix, api):
// Define data for input and output tensors
"""

def writeTensorsHelper(tensors, prefix, out=False):
def writeTensorsHelper(tensors, prefix, out=False, relax_mode=False):
lenTensors = len(tensors)
direction = "out" if out else "in"
ret = ""
Expand All @@ -402,13 +402,22 @@ def writeTensorsHelper(tensors, prefix, out=False):
ret += f"void* {direction}puts[] = {{" + ", ".join(names) + "};\n"
ret += f"struct {prefix}_{direction}puts {prefix}_{direction}puts = {{" + "\n"
for i, t in enumerate(tensors):
tensor_name = t.name.replace(":", "_").replace("/", "_").replace(".", "_").replace(";", "_")
ret += f" .{tensor_name} = {names[i]}," + "\n"
# if True:
# if not relax_mode:
# if not out:
if (relax_mode and not out) or not relax_mode:
tensor_name = t.name.replace(":", "_").replace("/", "_").replace(".", "_").replace(";", "_")
ret += f" .{tensor_name} = {names[i]}," + "\n"
else:
if i > 0:
ret += f" .{direction}put{i} = {names[i]}," + "\n"
else:
ret += f" .{direction}put = {names[i]}," + "\n"
ret += "};\n"
return ret

retStr += writeTensorsHelper(in_tensors, modPrefix, False)
retStr += writeTensorsHelper(out_tensors, modPrefix, True)
retStr += writeTensorsHelper(in_tensors, modPrefix, False, relax_mode=relax_mode)
retStr += writeTensorsHelper(out_tensors, modPrefix, True, relax_mode=relax_mode)
return retStr
elif api == "packed":
retStr = """
Expand Down
Loading