Skip to content

Commit

Permalink
Merge pull request #25 from stealthinu/onnx化
Browse files Browse the repository at this point in the history
PyInstallerを試しやすいように整備しました
  • Loading branch information
isletennos authored Dec 15, 2022
2 parents 817da67 + 9041c62 commit d9de818
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 0 deletions.
1 change: 1 addition & 0 deletions python/Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ scikit-learn = "==1.0.2"
sounddevice = "==0.4.4"
SoundFile = "==0.10.3.post1"
onnxruntime-directml = "1.13.1"
pyinstaller = "*"

[dev-packages]

Expand Down
10 changes: 10 additions & 0 deletions python/install_pipenv.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
pip install --upgrade pip
pip install pipenv
$pythonUserPath = python -m site --user-site
$pythonUserPath = $pythonUserPath.Replace('site-packages', 'Scripts')
$ENV:Path += ";" + $pythonUserPath
$userPath = [System.Environment]::GetEnvironmentVariable("Path", "User")
$userPath += ";" + $pythonUserPath
[System.Environment]::SetEnvironmentVariable("Path", $userPath, "User")
$ENV:PIPENV_VENV_IN_PROJECT = '.venv'
[System.Environment]::SetEnvironmentVariable("PIPENV_VENV_IN_PROJECT", ".venv", "User")
1 change: 1 addition & 0 deletions python/makeexe.bat → python/makeexe.ps1
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
pipenv run pyinstaller mmvc_client.py --add-binary "./.venv/Lib/site-packages/onnxruntime/capi/onnxruntime_providers_shared.dll;./onnxruntime/capi/" --add-binary "./.venv/Lib/site-packages/onnxruntime/capi/DirectML.dll;./onnxruntime/capi/" --collect-data librosa --onedir --clean -y
pipenv run pyinstaller output_audio_device_list.py --onefile
78 changes: 78 additions & 0 deletions python/onnx_bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import argparse
import time
import onnxruntime as ort
import torch


def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input_onnx", required=True)
return parser.parse_args()


def inspect_onnx(session):
print("inputs")
for i in session.get_inputs():
print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type))
print("outputs")
for i in session.get_outputs():
print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type))


def benchmark(session):
dummy_specs = torch.rand(1, 257, 60)
dummy_lengths = torch.LongTensor([60])
dummy_sid_src = torch.LongTensor([0])
dummy_sid_tgt = torch.LongTensor([1])

use_time_list = []
for i in range(30):
start = time.time()
output = session.run(
["audio"],
{
"specs": dummy_specs.numpy(),
"lengths": dummy_lengths.numpy(),
"sid_src": dummy_sid_src.numpy(),
"sid_tgt": dummy_sid_tgt.numpy()
}
)
use_time = time.time() - start
use_time_list.append(use_time)
#print("use time:{}".format(use_time))
use_time_list = use_time_list[5:]
mean_use_time = sum(use_time_list) / len(use_time_list)
print(f"mean_use_time:{mean_use_time}")


def main(args):
ort_session_cpu = ort.InferenceSession(
args.input_onnx,
providers=["CPUExecutionProvider"])

ort_session_cuda = ort.InferenceSession(
args.input_onnx,
providers=["CUDAExecutionProvider"])

# DirectMLで動かすための設定
ort_options = ort.SessionOptions()
ort_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
ort_options.enable_mem_pattern = False
ort_session_dml = ort.InferenceSession(
args.input_onnx,
sess_options=ort_options,
providers=["DmlExecutionProvider"])

print("vits onnx benchmark")
inspect_onnx(ort_session_cpu)
print("ONNX CPU")
benchmark(ort_session_cpu)
print("ONNX CUDA")
benchmark(ort_session_cuda)
print("ONNX DirectML")
benchmark(ort_session_dml)

if __name__ == '__main__':
args = get_args()
print(args)
main(args)

0 comments on commit d9de818

Please sign in to comment.