From f1ce849434dc7d394daec9976e3d4d2e57192f01 Mon Sep 17 00:00:00 2001
From: qwerty2501 <939468+qwerty2501@users.noreply.github.com>
Date: Mon, 9 May 2022 02:58:13 +0900
Subject: [PATCH 01/13] =?UTF-8?q?=E3=83=87=E3=83=95=E3=82=A9=E3=83=AB?=
 =?UTF-8?q?=E3=83=88=E5=BC=95=E6=95=B0=E3=81=AFC=E8=A8=80=E8=AA=9E?=
 =?UTF-8?q?=E3=81=A7=E3=81=AF=E4=BD=BF=E3=81=88=E3=81=AA=E3=81=84=E3=81=AE?=
 =?UTF-8?q?=E3=81=A7C++=E3=81=AE=E3=81=BF=E3=81=AB=E6=9C=89=E5=8A=B9?=
 =?UTF-8?q?=E3=81=AB=E3=81=AA=E3=82=8B=E3=82=88=E3=81=86=E3=81=AB=E5=A4=89?=
 =?UTF-8?q?=E6=9B=B4=E3=81=97=E3=81=9F=20(#122)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

本当はデフォルト引数を消したかったが、使ってる人がいる可能性があるためC++で使ってる場合はそのままにするように修正した
---
 core/src/core.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/core/src/core.h b/core/src/core.h
index f87ca5ba8..15608dd1f 100644
--- a/core/src/core.h
+++ b/core/src/core.h
@@ -43,7 +43,11 @@ typedef enum {
  * 何度も実行可能。use_gpuを変更して実行しなおすことも可能。
  * 最後に実行したuse_gpuに従って他の関数が実行される。
  */
-VOICEVOX_CORE_API bool initialize(bool use_gpu, int cpu_num_threads = 0);
+VOICEVOX_CORE_API bool initialize(bool use_gpu, int cpu_num_threads
+#ifdef __cplusplus
+                                                = 0
+#endif
+);
 
 /**
  * @fn

From c66081ebf8cb53bf4b6f57cb95ac9c00ac774232 Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Tue, 10 May 2022 00:10:38 +0900
Subject: [PATCH 02/13] =?UTF-8?q?initialize=E3=81=A7=E5=85=A8=E3=83=A2?=
 =?UTF-8?q?=E3=83=87=E3=83=AB=E3=82=92=E8=AA=AD=E3=81=BF=E8=BE=BC=E3=81=BE?=
 =?UTF-8?q?=E3=81=AA=E3=81=8F=E3=81=A6=E3=82=82=E8=89=AF=E3=81=84=E3=82=88?=
 =?UTF-8?q?=E3=81=86=E3=81=AB=E3=81=97=E3=81=9F=20(#124)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* load_model関数を切り出す

* load_modelとis_model_loadedを足した

* 使われてないエラーメッセージを消去

* Update core/src/core.cpp

Co-authored-by: qwerty2501 <939468+qwerty2501@users.noreply.github.com>

* Update core/src/core.cpp

Co-authored-by: qwerty2501 <939468+qwerty2501@users.noreply.github.com>

* Update core/src/core.cpp

Co-authored-by: qwerty2501 <939468+qwerty2501@users.noreply.github.com>

* pythonのラッパーの型を変更

* load_all_models追加

* return true

Co-authored-by: qwerty2501 <939468+qwerty2501@users.noreply.github.com>
---
 core/_core.py     |   6 +-
 core/src/core.cpp | 149 +++++++++++++++++++++++++++++++---------------
 core/src/core.h   |  28 ++++++++-
 3 files changed, 130 insertions(+), 53 deletions(-)

diff --git a/core/_core.py b/core/_core.py
index a6fefa448..f9306db4b 100644
--- a/core/_core.py
+++ b/core/_core.py
@@ -27,7 +27,7 @@
 lib = cdll.LoadLibrary(str(core_dll_path))
 
 # 関数型定義
-lib.initialize.argtypes = (c_bool, c_int)
+lib.initialize.argtypes = (c_bool, c_int, c_bool)
 lib.initialize.restype = c_bool
 
 lib.finalize.argtypes = ()
@@ -52,8 +52,8 @@
 
 
 # ラッパー関数
-def initialize(use_gpu: bool, cpu_num_threads=0):
-    success = lib.initialize(use_gpu, cpu_num_threads)
+def initialize(use_gpu: bool, cpu_num_threads=0, load_all_models=True):
+    success = lib.initialize(use_gpu, cpu_num_threads, load_all_models)
     if not success:
         raise Exception(lib.last_error_message().decode())
 
diff --git a/core/src/core.cpp b/core/src/core.cpp
index ee0315714..e7b7cc4d8 100644
--- a/core/src/core.cpp
+++ b/core/src/core.cpp
@@ -7,6 +7,7 @@
 #include <array>
 #include <exception>
 #include <memory>
+#include <optional>
 #include <string>
 #include <unordered_set>
 
@@ -19,9 +20,7 @@
 #include "core.h"
 
 #define NOT_INITIALIZED_ERR "Call initialize() first."
-#define NOT_FOUND_ERR "No such file or directory: "
-#define FAILED_TO_OPEN_MODEL_ERR "Unable to open model files."
-#define FAILED_TO_OPEN_METAS_ERR "Unable to open metas.json."
+#define NOT_LOADED_ERR "Model is not loaded."
 #define ONNX_ERR "ONNX raise exception: "
 #define JSON_ERR "JSON parser raise exception: "
 #define GPU_NOT_SUPPORTED_ERR "This library is CPU version. GPU is not supported."
@@ -43,13 +42,19 @@ EMBED_DECL(YUKARIN_S);
 EMBED_DECL(YUKARIN_SA);
 EMBED_DECL(DECODE);
 
-const struct {
+/**
+ * 3種類のモデルを一纏めにしたもの
+ */
+struct VVMODEL {
   embed::EMBED_RES (*YUKARIN_S)();
   embed::EMBED_RES (*YUKARIN_SA)();
   embed::EMBED_RES (*DECODE)();
-} MODELS_LIST[] = {{YUKARIN_S, YUKARIN_SA, DECODE}};
+};
+const VVMODEL VVMODEL_LIST[] = {
+    {YUKARIN_S, YUKARIN_SA, DECODE},
+};
 }  // namespace EMBED_DECL_NAMESPACE
-using EMBED_DECL_NAMESPACE::MODELS_LIST;
+using EMBED_DECL_NAMESPACE::VVMODEL_LIST;
 
 // 複数モデルある場合のspeaker_idマッピング
 // {元のspeaker_id: {モデル番号, 新しいspeaker_id}}
@@ -76,8 +81,23 @@ SupportedDevices get_supported_devices() {
 }
 
 struct Status {
-  Status(bool use_gpu_)
-      : use_gpu(use_gpu_), memory_info(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)) {}
+  Status(int model_count, bool use_gpu, int cpu_num_threads)
+      : memory_info(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)) {
+    yukarin_s_list = std::vector<std::optional<Ort::Session>>(model_count);
+    yukarin_sa_list = std::vector<std::optional<Ort::Session>>(model_count);
+    decode_list = std::vector<std::optional<Ort::Session>>(model_count);
+
+    session_options.SetInterOpNumThreads(cpu_num_threads).SetIntraOpNumThreads(cpu_num_threads);
+    if (use_gpu) {
+#ifdef DIRECTML
+      session_options.DisableMemPattern().SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
+      Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_DML(session_options, 0));
+#else
+      const OrtCUDAProviderOptions cuda_options;
+      session_options.AppendExecutionProvider_CUDA(cuda_options);
+#endif
+    }
+  }
   /**
    * Loads the metas.json.
    *
@@ -89,7 +109,7 @@ struct Status {
    *  version: string
    * }]
    */
-  bool load(int cpu_num_threads) {
+  bool load_metas() {
     embed::Resource metas_file = METAS();
 
     metas = nlohmann::json::parse(metas_file.data, metas_file.data + metas_file.size);
@@ -100,36 +120,32 @@ struct Status {
         supported_styles.insert(style["id"].get<int64_t>());
       }
     }
+    return true;
+  }
 
-    for (const auto MODELS : MODELS_LIST) {
-      embed::Resource yukarin_s_model = MODELS.YUKARIN_S();
-      embed::Resource yukarin_sa_model = MODELS.YUKARIN_SA();
-      embed::Resource decode_model = MODELS.DECODE();
-
-      Ort::SessionOptions session_options;
-      session_options.SetInterOpNumThreads(cpu_num_threads).SetIntraOpNumThreads(cpu_num_threads);
-      yukarin_s_list.push_back(Ort::Session(env, yukarin_s_model.data, yukarin_s_model.size, session_options));
-      yukarin_sa_list.push_back(Ort::Session(env, yukarin_sa_model.data, yukarin_sa_model.size, session_options));
-      if (use_gpu) {
-#ifdef DIRECTML
-        session_options.DisableMemPattern().SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
-        Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_DML(session_options, 0));
-#else
-        const OrtCUDAProviderOptions cuda_options;
-        session_options.AppendExecutionProvider_CUDA(cuda_options);
-#endif
-      }
-      decode_list.push_back(Ort::Session(env, decode_model.data, decode_model.size, session_options));
-    }
+  /**
+   * モデルを読み込む
+   */
+  bool load_model(int model_index) {
+    const auto VVMODEL = VVMODEL_LIST[model_index];
+    embed::Resource yukarin_s_model = VVMODEL.YUKARIN_S();
+    embed::Resource yukarin_sa_model = VVMODEL.YUKARIN_SA();
+    embed::Resource decode_model = VVMODEL.DECODE();
+
+    yukarin_s_list[model_index] =
+        std::move(Ort::Session(env, yukarin_s_model.data, yukarin_s_model.size, session_options));
+    yukarin_sa_list[model_index] =
+        std::move(Ort::Session(env, yukarin_sa_model.data, yukarin_sa_model.size, session_options));
+    decode_list[model_index] = std::move(Ort::Session(env, decode_model.data, decode_model.size, session_options));
     return true;
   }
 
   std::string root_dir_path;
-  bool use_gpu;
+  Ort::SessionOptions session_options;
   Ort::MemoryInfo memory_info;
 
   Ort::Env env{ORT_LOGGING_LEVEL_ERROR};
-  std::vector<Ort::Session> yukarin_s_list, yukarin_sa_list, decode_list;
+  std::vector<std::optional<Ort::Session>> yukarin_s_list, yukarin_sa_list, decode_list;
 
   nlohmann::json metas;
   std::string metas_str;
@@ -166,7 +182,7 @@ std::pair<int64_t, int64_t> get_model_index_and_speaker_id(int64_t speaker_id) {
   return found->second;
 }
 
-bool initialize(bool use_gpu, int cpu_num_threads) {
+bool initialize(bool use_gpu, int cpu_num_threads, bool load_all_models) {
   initialized = false;
 
 #ifdef DIRECTML
@@ -178,18 +194,29 @@ bool initialize(bool use_gpu, int cpu_num_threads) {
     return false;
   }
   try {
-    status = std::make_unique<Status>(use_gpu);
-    if (!status->load(cpu_num_threads)) {
+    const int model_count = std::size(VVMODEL_LIST);
+    status = std::make_unique<Status>(model_count, use_gpu, cpu_num_threads);
+    if (!status->load_metas()) {
       return false;
     }
-    if (use_gpu) {
-      // 一回走らせて十分なGPUメモリを確保させる
-      int length = 500;
-      int phoneme_size = 45;
-      std::vector<float> phoneme(length * phoneme_size), f0(length);
-      int64_t speaker_id = 0;
-      std::vector<float> output(length * 256);
-      decode_forward(length, phoneme_size, f0.data(), phoneme.data(), &speaker_id, output.data());
+
+    if (load_all_models) {
+      for (int model_index = 0; model_index < model_count; model_index++) {
+        if (!status->load_model(model_index)) {
+          return false;
+        }
+      }
+
+      if (use_gpu) {
+        // 一回走らせて十分なGPUメモリを確保させる
+        // TODO: 全MODELに対して行う
+        int length = 500;
+        int phoneme_size = 45;
+        std::vector<float> phoneme(length * phoneme_size), f0(length);
+        int64_t speaker_id = 0;
+        std::vector<float> output(length * 256);
+        decode_forward(length, phoneme_size, f0.data(), phoneme.data(), &speaker_id, output.data());
+      }
     }
   } catch (const Ort::Exception &e) {
     error_message = ONNX_ERR;
@@ -208,6 +235,17 @@ bool initialize(bool use_gpu, int cpu_num_threads) {
   return true;
 }
 
+bool load_model(int64_t speaker_id) {
+  auto [model_index, _] = get_model_index_and_speaker_id(speaker_id);
+  return status->load_model(model_index);
+}
+
+bool is_model_loaded(int64_t speaker_id) {
+  auto [model_index, _] = get_model_index_and_speaker_id(speaker_id);
+  return (status->yukarin_s_list[model_index].has_value() && status->yukarin_sa_list[model_index].has_value() &&
+          status->decode_list[model_index].has_value());
+}
+
 void finalize() {
   initialized = false;
   status.reset();
@@ -231,6 +269,11 @@ bool yukarin_s_forward(int64_t length, int64_t *phoneme_list, int64_t *speaker_i
     return false;
   }
   auto [model_index, model_speaker_id] = get_model_index_and_speaker_id(*speaker_id);
+  auto &model = status->yukarin_s_list[model_index];
+  if (!model) {
+    error_message = NOT_LOADED_ERR;
+    return false;
+  }
   try {
     const char *inputs[] = {"phoneme_list", "speaker_id"};
     const char *outputs[] = {"phoneme_length"};
@@ -240,8 +283,8 @@ bool yukarin_s_forward(int64_t length, int64_t *phoneme_list, int64_t *speaker_i
                                                to_tensor(&model_speaker_id, speaker_shape)};
     Ort::Value output_tensor = to_tensor(output, phoneme_shape);
 
-    status->yukarin_s_list[model_index].Run(Ort::RunOptions{nullptr}, inputs, input_tensors.data(),
-                                            input_tensors.size(), outputs, &output_tensor, 1);
+    model.value().Run(Ort::RunOptions{nullptr}, inputs, input_tensors.data(), input_tensors.size(), outputs,
+                      &output_tensor, 1);
 
     for (int64_t i = 0; i < length; i++) {
       if (output[i] < PHONEME_LENGTH_MINIMAL) output[i] = PHONEME_LENGTH_MINIMAL;
@@ -266,6 +309,11 @@ bool yukarin_sa_forward(int64_t length, int64_t *vowel_phoneme_list, int64_t *co
     return false;
   }
   auto [model_index, model_speaker_id] = get_model_index_and_speaker_id(*speaker_id);
+  auto &model = status->yukarin_sa_list[model_index];
+  if (!model) {
+    error_message = NOT_LOADED_ERR;
+    return false;
+  }
   try {
     const char *inputs[] = {
         "length",          "vowel_phoneme_list",       "consonant_phoneme_list", "start_accent_list",
@@ -283,8 +331,8 @@ bool yukarin_sa_forward(int64_t length, int64_t *vowel_phoneme_list, int64_t *co
                                                to_tensor(&model_speaker_id, speaker_shape)};
     Ort::Value output_tensor = to_tensor(output, phoneme_shape);
 
-    status->yukarin_sa_list[model_index].Run(Ort::RunOptions{nullptr}, inputs, input_tensors.data(),
-                                             input_tensors.size(), outputs, &output_tensor, 1);
+    model.value().Run(Ort::RunOptions{nullptr}, inputs, input_tensors.data(), input_tensors.size(), outputs,
+                      &output_tensor, 1);
   } catch (const Ort::Exception &e) {
     error_message = ONNX_ERR;
     error_message += e.what();
@@ -346,6 +394,11 @@ bool decode_forward(int64_t length, int64_t phoneme_size, float *f0, float *phon
     return false;
   }
   auto [model_index, model_speaker_id] = get_model_index_and_speaker_id(*speaker_id);
+  auto &model = status->decode_list[model_index];
+  if (!model) {
+    error_message = NOT_LOADED_ERR;
+    return false;
+  }
   try {
     // 音が途切れてしまうのを避けるworkaround処理が入っている
     // TODO: 改善したらここのpadding処理を取り除く
@@ -381,8 +434,8 @@ bool decode_forward(int64_t length, int64_t phoneme_size, float *f0, float *phon
     const char *inputs[] = {"f0", "phoneme", "speaker_id"};
     const char *outputs[] = {"wave"};
 
-    status->decode_list[model_index].Run(Ort::RunOptions{nullptr}, inputs, input_tensor.data(), input_tensor.size(),
-                                         outputs, &output_tensor, 1);
+    model.value().Run(Ort::RunOptions{nullptr}, inputs, input_tensor.data(), input_tensor.size(), outputs,
+                      &output_tensor, 1);
 
     // TODO: 改善したらここのcopy処理を取り除く
     copy_output_with_padding_to_output(output_with_padding, output, padding_f0_size);
diff --git a/core/src/core.h b/core/src/core.h
index 15608dd1f..5c13e90fc 100644
--- a/core/src/core.h
+++ b/core/src/core.h
@@ -38,17 +38,41 @@ typedef enum {
  * @brief 音声合成するための初期化を行う。他の関数を正しく実行するには先に初期化が必要
  * @param use_gpu trueならGPU用、falseならCPU用の初期化を行う
  * @param cpu_num_threads 推論に用いるスレッド数を設定する。0の場合論理コア数の半分か、物理コア数が設定される
+ * @param load_all_models trueなら全てのモデルをロードする
  * @return 成功したらtrue、失敗したらfalse
  * @detail
  * 何度も実行可能。use_gpuを変更して実行しなおすことも可能。
  * 最後に実行したuse_gpuに従って他の関数が実行される。
  */
-VOICEVOX_CORE_API bool initialize(bool use_gpu, int cpu_num_threads
+VOICEVOX_CORE_API bool initialize(bool use_gpu,
+                                  int cpu_num_threads
 #ifdef __cplusplus
-                                                = 0
+                                  = 0
+#endif
+                                  ,
+                                  bool load_all_models
+#ifdef __cplusplus
+                                  = true
 #endif
 );
 
+/**
+ * モデルをロードする
+ * @param speaker_id 話者番号
+ * @return 成功したらtrue、失敗したらfalse
+ * @detail
+ * 必ずしも話者とモデルが1:1対応しているわけではない。
+ */
+VOICEVOX_CORE_API bool load_model(int64_t speaker_id);
+
+/**
+ * @fn
+ * モデルがロード済みかどうか
+ * @param speaker_id 話者番号
+ * @return ロード済みならtrue、そうでないならfalse
+ */
+VOICEVOX_CORE_API bool is_model_loaded(int64_t speaker_id);
+
 /**
  * @fn
  * 終了処理を行う

From 53d43f84bc5a62bf704c7c9817098d068d3618a2 Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Wed, 11 May 2022 09:27:53 +0900
Subject: [PATCH 03/13] =?UTF-8?q?workflow=5Fdispatch=E3=81=A7=E3=82=82?=
 =?UTF-8?q?=E3=83=93=E3=83=AB=E3=83=89=E9=96=8B=E5=A7=8B=E5=8F=AF=E8=83=BD?=
 =?UTF-8?q?=E3=81=AB=20(#127)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .github/workflows/build.yml | 109 ++++++++++++++++++------------------
 1 file changed, 56 insertions(+), 53 deletions(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index e24d81036..ef7d4a475 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -4,13 +4,20 @@ on:
   push:
     branches:
       # - main
+  pull_request:
   release:
     types:
       - published
-  pull_request:
   workflow_dispatch:
+    inputs:
+      version:
+        description: "バージョン情報(A.BB.C / A.BB.C-preview.D)"
+        required: true
 
 env:
+  # releaseタグ名か、workflow_dispatchでのバージョン名か、DEBUGが入る
+  VERSION: ${{ github.event.release.tag_name || github.event.inputs.version || 'DEBUG' }}
+
   # Raw character weights are not public.
   # Skip uploading to GitHub Release on public repo.
   SKIP_UPLOADING_RELEASE_ASSET: ${{ secrets.SKIP_UPLOADING_RELEASE_ASSET || '1' }}
@@ -23,19 +30,19 @@ jobs:
         include:
           - os: windows-2019
             device: gpu
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-win-x64-gpu-1.10.0.zip
             artifact_name: windows-x64-cuda
 
           - os: windows-2019
             device: cpu-x64
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-win-x64-1.10.0.zip
             artifact_name: windows-x64-cpu
 
           - os: windows-2019
             device: cpu-x86
-            python_architecture: 'x86'
+            python_architecture: "x86"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-win-x86-1.10.0.zip
             cmake_additional_options: -DCMAKE_GENERATOR_PLATFORM=Win32
             artifact_name: windows-x86-cpu
@@ -54,7 +61,7 @@ jobs:
 
           - os: windows-2019
             device: directml
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/Microsoft.ML.OnnxRuntime.DirectML.1.10.0.zip
             directml_url: https://www.nuget.org/api/v2/package/Microsoft.AI.DirectML/1.8.0
             cmake_additional_options: -DDIRECTML=ON -DDIRECTML_DIR=download/directml
@@ -62,40 +69,40 @@ jobs:
 
           - os: macos-10.15
             device: cpu-x64
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-osx-universal2-1.10.0.tgz
             artifact_name: osx-universal2-cpu
 
           - os: ubuntu-18.04
             device: gpu
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-linux-x64-gpu-1.10.0.tgz
             artifact_name: linux-x64-gpu
-            cc_version: '8'
-            cxx_version: '8'
+            cc_version: "8"
+            cxx_version: "8"
 
           - os: ubuntu-18.04
             device: cpu-x64
-            python_architecture: 'x64'
+            python_architecture: "x64"
             onnxruntime_url: https://github.com/microsoft/onnxruntime/releases/download/v1.10.0/onnxruntime-linux-x64-1.10.0.tgz
             artifact_name: linux-x64-cpu
-            cc_version: '8'
-            cxx_version: '8'
+            cc_version: "8"
+            cxx_version: "8"
 
           - os: ubuntu-18.04
             device: cpu-armhf
             onnxruntime_url: https://github.com/VOICEVOX/onnxruntime-builder/releases/download/1.10.0.1/onnxruntime-linux-armhf-cpu-v1.10.0.tgz
             artifact_name: linux-armhf-cpu
-            cc_version: '8'
-            cxx_version: '8'
+            cc_version: "8"
+            cxx_version: "8"
             arch: arm-linux-gnueabihf
 
           - os: ubuntu-18.04
             device: cpu-arm64
             onnxruntime_url: https://github.com/VOICEVOX/onnxruntime-builder/releases/download/1.10.0.1/onnxruntime-linux-arm64-cpu-v1.10.0.tgz
             artifact_name: linux-arm64-cpu
-            cc_version: '8'
-            cxx_version: '8'
+            cc_version: "8"
+            cxx_version: "8"
             arch: aarch64-linux-gnu
 
     runs-on: ${{ matrix.os }}
@@ -255,15 +262,10 @@ jobs:
           pip install -r requirements.txt
           python setup.py test
 
-      - name: Set BUILD_IDENTIFIER env var
-        shell: bash
-        run: |
-          echo "BUILD_IDENTIFIER=${GITHUB_REF##*/}" >> $GITHUB_ENV
-
       - name: Set ASSET_NAME env var
         shell: bash
         run: |
-          echo "ASSET_NAME=voicevox_core-${{ matrix.artifact_name }}-${{ env.BUILD_IDENTIFIER }}" >> $GITHUB_ENV
+          echo "ASSET_NAME=voicevox_core-${{ matrix.artifact_name }}-${{ env.VERSION }}" >> $GITHUB_ENV
 
       - name: Organize artifact
         shell: bash
@@ -286,23 +288,24 @@ jobs:
           retention-days: 7
 
       - name: Archive artifact
-        if: github.event.release.tag_name != '' && !startsWith(matrix.os, 'windows-')
+        if: env.VERSION != 'DEBUG' && !startsWith(matrix.os, 'windows-')
         shell: bash
         run: |
           cd artifact
           zip -r "../${{ env.ASSET_NAME }}.zip" "${{ env.ASSET_NAME }}"
 
       - name: Archive artifact (Windows)
-        if: github.event.release.tag_name != '' && startsWith(matrix.os, 'windows-')
+        if: env.VERSION != 'DEBUG' && startsWith(matrix.os, 'windows-')
         run: |
           powershell Compress-Archive -Path "artifact/${{ env.ASSET_NAME }}" -DestinationPath "${{ env.ASSET_NAME }}.zip"
 
       - name: Upload to Release
-        if: github.event.release.tag_name != '' && env.SKIP_UPLOADING_RELEASE_ASSET == '0'
+        if: env.VERSION != 'DEBUG' && env.SKIP_UPLOADING_RELEASE_ASSET == '0'
         uses: svenstaro/upload-release-action@v2
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
-          tag: ${{ github.ref }} # ==> github.event.release.tag_name
+          prerelease: true
+          tag: ${{ env.VERSION }}
           file: ${{ env.ASSET_NAME }}.zip
 
   build-win-cpp-example:
@@ -319,33 +322,33 @@ jobs:
       BUILD_CONFIGURATION: Release
 
     steps:
-    - uses: actions/checkout@v3
-
-    - name: Add MSBuild to PATH
-      uses: microsoft/setup-msbuild@v1.0.2
-
-    - name: Restore NuGet packages
-      working-directory: ${{env.GITHUB_WORKSPACE}}
-      run: nuget restore ${{env.SOLUTION_FILE_PATH}}
-
-    - name: Download and extract artifact
-      uses: actions/download-artifact@v2
-      id: download
-      with:
-        name: windows-x64-cpu-cpp-shared
-        path: artifacts\
-
-    - name: Copy core.lib
-      working-directory: ${{env.GITHUB_WORKSPACE}}
-      run: |
-        mkdir example\cpp\windows\simple_tts\lib\x64
-        copy ${{steps.download.outputs.download-path}}\core.lib example\cpp\windows\simple_tts\lib\x64
-
-    - name: Build
-      working-directory: ${{env.GITHUB_WORKSPACE}}
-      # Add additional options to the MSBuild command line here (like platform or verbosity level).
-      # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference
-      run: msbuild /m /p:Configuration=${{env.BUILD_CONFIGURATION}} ${{env.SOLUTION_FILE_PATH}}
+      - uses: actions/checkout@v3
+
+      - name: Add MSBuild to PATH
+        uses: microsoft/setup-msbuild@v1.0.2
+
+      - name: Restore NuGet packages
+        working-directory: ${{env.GITHUB_WORKSPACE}}
+        run: nuget restore ${{env.SOLUTION_FILE_PATH}}
+
+      - name: Download and extract artifact
+        uses: actions/download-artifact@v2
+        id: download
+        with:
+          name: windows-x64-cpu-cpp-shared
+          path: artifacts\
+
+      - name: Copy core.lib
+        working-directory: ${{env.GITHUB_WORKSPACE}}
+        run: |
+          mkdir example\cpp\windows\simple_tts\lib\x64
+          copy ${{steps.download.outputs.download-path}}\core.lib example\cpp\windows\simple_tts\lib\x64
+
+      - name: Build
+        working-directory: ${{env.GITHUB_WORKSPACE}}
+        # Add additional options to the MSBuild command line here (like platform or verbosity level).
+        # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference
+        run: msbuild /m /p:Configuration=${{env.BUILD_CONFIGURATION}} ${{env.SOLUTION_FILE_PATH}}
 
   build-unix-cpp-example:
     needs: [build-cpp-shared]

From 4592b93b9c1e1916bad0801cf49faa1942508b1b Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Sat, 14 May 2022 19:45:05 +0900
Subject: [PATCH 04/13] =?UTF-8?q?=E3=83=93=E3=83=AB=E3=83=89=E6=99=82?=
 =?UTF-8?q?=E3=81=ABREADME=E3=83=95=E3=82=A1=E3=82=A4=E3=83=AB=E3=82=92?=
 =?UTF-8?q?=E8=BF=BD=E5=8A=A0=E3=81=99=E3=82=8B=20(#131)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .github/workflows/build.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index ef7d4a475..cccaad696 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -279,6 +279,8 @@ jobs:
 
           echo "${{ env.BUILD_IDENTIFIER }}" > "artifact/${{ env.ASSET_NAME }}/VERSION"
 
+          cp README.md "artifact/${{ env.ASSET_NAME }}/README.txt"
+
       # Upload
       - name: Upload artifact
         uses: actions/upload-artifact@v2

From 6253a8d4e7a22915e545a7eae5efc17e07175de7 Mon Sep 17 00:00:00 2001
From: qwerty2501 <939468+qwerty2501@users.noreply.github.com>
Date: Sun, 15 May 2022 22:10:05 +0900
Subject: [PATCH 05/13] append unit testing framework (#121)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* Catch2をプロジェクト導入して参考となる1ケースを追加した

* リリース用ビルドにはテストビルドは不要のため無効化する変数を追加した

* テストコードの短縮化を行った

* READMEにテスト実行方法を追加

* テストをビルドするかどうかのフラグをデフォルトOFFにした

* テストをビルドするかどうかのフラグを変更したことによりREADMEを修正

* MSVCに対して足りないオプションを追加

- utf8文字列でコンパイルするように指定
- C++20を明示的に指定
- __cplusplusの値を利用中のC++のバージョンに合わせるように指定(ないとC++98相当になるとか)

* Configがリリースの場合のみに最適化オプションを指定するように修正

* オプションの指定を短くまとめた

* coreの標準ライブラリもバージョンアップした

* compile optionsの修正

* Catch2にもCXX_STANDARD 20を追加

* Windows環境でビルドできるように設定を修正

https://github.com/VOICEVOX/voicevox_core/pull/121#discussion_r872589554

* format 効いてしまっていたところを修正
---
 .gitignore                                    |  1 +
 CMakeLists.txt                                | 19 ++++++++--
 README.md                                     |  5 +++
 core/CMakeLists.txt                           | 18 ++++++++--
 tests/CMakeLists.txt                          | 31 ++++++++++++++++
 .../core/engine/kana_parser_test.cpp          | 36 +++++++++++++++++++
 6 files changed, 105 insertions(+), 5 deletions(-)
 create mode 100644 tests/CMakeLists.txt
 create mode 100644 tests/unit_tests/core/engine/kana_parser_test.cpp

diff --git a/.gitignore b/.gitignore
index d15e18d32..10388a58d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,6 +13,7 @@ directml*/
 
 # Build artifacts
 build/
+test_build/
 lib/
 bin/
 core/_core.cpp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cca6d19e2..1e40b4347 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,8 +2,18 @@ cmake_minimum_required(VERSION 3.16)
 project(VoiceVoxCore)
 
 # TODO: download onnxruntime
-set(ONNXRUNTIME_DIR "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime" CACHE PATH "Path to ONNX Runtime")
-set(MODEL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/model" CACHE PATH "Path to model")
+set(ONNXRUNTIME_DIR
+    "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime"
+    CACHE PATH "Path to ONNX Runtime")
+set(MODEL_DIR
+    "${CMAKE_CURRENT_SOURCE_DIR}/model"
+    CACHE PATH "Path to model")
+set(CORE_DIR
+    "${CMAKE_CURRENT_SOURCE_DIR}/core"
+    CACHE PATH "Path to core")
+set(DEPENDENT_DLLS
+    ""
+    CACHE INTERNAL "Dependent DLLs of core.dll")
 
 set(CMAKE_POSITION_INDEPENDENT_CODE ON)
 
@@ -18,3 +28,8 @@ endif()
 
 add_subdirectory(core)
 add_subdirectory(open_jtalk/src)
+
+if(BUILD_TEST)
+  enable_testing()
+  add_subdirectory(tests)
+endif()
diff --git a/README.md b/README.md
index 29adfccc4..ec7644f08 100644
--- a/README.md
+++ b/README.md
@@ -148,6 +148,11 @@ cmake --build . --config Release
 cmake --install .
 cd ..
 
+#(省略可能) C++のテスト実行
+cmake -S . -B test_build -DBUILD_TEST=YES
+cmake --build test_build
+ctest --test-dir test_build --verbose
+
 # (省略可能) pythonモジュールのテスト
 python setup.py test
 
diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt
index 41223dbbf..bd8d97412 100644
--- a/core/CMakeLists.txt
+++ b/core/CMakeLists.txt
@@ -27,6 +27,7 @@ message("core will be installed to: ${CMAKE_INSTALL_PREFIX}")
 
 file(GLOB_RECURSE core_sources "src/*.cpp")
 # coreライブラリのビルド設定
+set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
 add_library(core
 		SHARED ${core_sources}
 		${EMBED_YUKARIN_S_OUTPUTS}
@@ -51,6 +52,9 @@ if(NOT DIRECTML)
 		"${ONNXRUNTIME_DIR}/lib/*.lib"
 		"${ONNXRUNTIME_DIR}/lib/*.so"
 		"${ONNXRUNTIME_DIR}/lib/*.so.*")
+	file(GLOB ONNXRUNTIME_DLLS
+		"${ONNXRUNTIME_DIR}/lib/*.dll")
+	set(DEPENDENT_DLLS "${DEPENDENT_DLLS};${ONNXRUNTIME_DLLS}" PARENT_SCOPE)
 	target_include_directories(core
 		PRIVATE ${ONNXRUNTIME_DIR}/include)
 	target_link_directories(core PUBLIC ${ONNXRUNTIME_DIR}/lib)
@@ -79,6 +83,9 @@ else()
 		file(GLOB DIRECTML_LIBS
 		"${DIRECTML_DIR}/bin/${DML_ARCH}-win/*.dll"
 		"${DIRECTML_DIR}/bin/${DML_ARCH}-win/*.lib")
+		file(GLOB DIRECTML_DLLS
+		"${DIRECTML_DIR}/bin/${DML_ARCH}-win/*.dll")
+		set(DEPENDENT_DLLS "${DEPENDENT_DLLS};${DIRECTML_DLLS}" PARENT_SCOPE)
 
 		target_include_directories(core PRIVATE ${DIRECTML_DIR}/include)
 		target_link_directories(core PUBLIC ${DIRECTML_DIR}/bin/${DML_ARCH}-win/)
@@ -91,7 +98,7 @@ else()
 	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D DIRECTML")
 endif()
 
-set_property(TARGET core PROPERTY CXX_STANDARD 17)
+set_property(TARGET core PROPERTY CXX_STANDARD 20)
 set_property(TARGET core PROPERTY POSITION_INDEPENDENT_CODE ON) # fPIC
 # rpath設定
 if (APPLE)
@@ -102,8 +109,13 @@ elseif (UNIX)
 endif ()
 
 target_compile_options(core PRIVATE
-	$<$<CXX_COMPILER_ID:MSVC>: /W4 /O2 /utf-8 /DVOICEVOX_CORE_EXPORTS>
-	$<$<CXX_COMPILER_ID:GNU>: -Wall -Wextra -O2 -DVOICEVOX_CORE_EXPORTS>
+	$<$<CXX_COMPILER_ID:MSVC>: /W4 /utf-8 /DVOICEVOX_CORE_EXPORTS>
+	$<$<CXX_COMPILER_ID:GNU>: -Wall -Wextra  -DVOICEVOX_CORE_EXPORTS>
+)
+
+add_compile_options(TARGET core 
+	$<$<CONFIG:Release>:$<CXX_COMPILER_ID:MSVC>: /O2>
+	$<$<CONFIG:Release>:$<CXX_COMPILER_ID:GNU>: -O2>
 )
 
 target_include_directories(core
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
new file mode 100644
index 000000000..1e013a3df
--- /dev/null
+++ b/tests/CMakeLists.txt
@@ -0,0 +1,31 @@
+cmake_minimum_required(VERSION 3.16)
+
+project(VoiceVoxCoreTest)
+
+include(FetchContent)
+FetchContent_Declare(
+  Catch2
+  GIT_REPOSITORY https://github.com/catchorg/Catch2.git
+  GIT_TAG v3.0.0-preview5)
+FetchContent_MakeAvailable(Catch2)
+file(GLOB_RECURSE unit_test_files "unit_tests/*.cpp")
+add_executable(unit_test ${unit_test_files})
+target_compile_options(
+  unit_test PRIVATE $<$<CXX_COMPILER_ID:MSVC>: /W4 /utf-8>
+                    $<$<CXX_COMPILER_ID:GNU>: -Wall -Wextra>)
+set_property(TARGET unit_test PROPERTY CXX_STANDARD 20)
+set_property(TARGET Catch2 PROPERTY CXX_STANDARD 20)
+target_include_directories(unit_test PRIVATE ${Catch2_SOURCE_DIR}/src)
+target_include_directories(unit_test PRIVATE ${CORE_DIR}/src)
+target_link_libraries(unit_test PRIVATE Catch2::Catch2WithMain)
+target_link_libraries(unit_test PRIVATE core)
+if (WIN32)
+  add_custom_command(TARGET unit_test POST_BUILD
+    COMMAND ${CMAKE_COMMAND} -E copy_if_different
+    "$<TARGET_FILE:core>;${DEPENDENT_DLLS}" $<TARGET_FILE_DIR:unit_test>
+    COMMAND_EXPAND_LISTS )
+endif (WIN32)
+list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras)
+include(Catch)
+include(CTest)
+catch_discover_tests(unit_test)
diff --git a/tests/unit_tests/core/engine/kana_parser_test.cpp b/tests/unit_tests/core/engine/kana_parser_test.cpp
new file mode 100644
index 000000000..36f94d4bc
--- /dev/null
+++ b/tests/unit_tests/core/engine/kana_parser_test.cpp
@@ -0,0 +1,36 @@
+#include "engine/kana_parser.h"
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators.hpp>
+
+using namespace voicevox::core::engine;
+
+TEST_CASE("extract_one_character") {
+  struct TestCase {
+    std::string name;
+    struct {
+      std::string text;
+      size_t pos;
+    } given;
+    struct {
+      std::string one_char;
+      size_t after_size;
+    } expected;
+  };
+  auto t = GENERATE(TestCase{.name = "target_is_alphabet",
+                             .given = {.text = "abcd", .pos = 2},
+                             .expected = {.one_char = "c", .after_size = 1}},
+                    TestCase{.name = "target_is_hiragana",
+                             .given = {.text = "acあd", .pos = 2},
+                             .expected = {.one_char = "あ", .after_size = 3}},
+                    TestCase{.name = "target_is_4byte_kanji",
+                             .given = {.text = "ace𠀋", .pos = 3},
+                             .expected = {.one_char = "𠀋", .after_size = 4}});
+
+  SECTION(t.name) {
+    size_t size;
+    auto actual_one_char = extract_one_character(t.given.text, t.given.pos, size);
+    CHECK(t.expected.one_char == actual_one_char);
+    CHECK(t.expected.after_size == size);
+  }
+}

From d62a44b87048ba487ee4313e23b2adb59687189c Mon Sep 17 00:00:00 2001
From: Yuto Ashida <y-chan@y-chan.dev>
Date: Sat, 28 May 2022 12:22:39 +0900
Subject: [PATCH 06/13] =?UTF-8?q?static=20cast=E3=82=84nullopt=E6=AF=94?=
 =?UTF-8?q?=E8=BC=83=E3=82=92=E3=82=84=E3=82=81=E3=80=81value=E9=96=A2?=
 =?UTF-8?q?=E6=95=B0=E3=81=AA=E3=81=A9=E3=82=92=E4=BD=BF=E3=81=86=E3=82=88?=
 =?UTF-8?q?=E3=81=86=E3=81=AB=E3=81=99=E3=82=8B=20(#118)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* use value and has value

* remove has_value
---
 core/src/engine/full_context_label.cpp |  4 ++--
 core/src/engine/kana_parser.cpp        | 12 ++++++------
 core/src/engine/synthesis_engine.cpp   | 22 +++++++++++-----------
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/core/src/engine/full_context_label.cpp b/core/src/engine/full_context_label.cpp
index f2e46cb9b..d71e701ab 100644
--- a/core/src/engine/full_context_label.cpp
+++ b/core/src/engine/full_context_label.cpp
@@ -40,14 +40,14 @@ bool Phoneme::is_pause() const { return contexts.at("f1") == "xx"; }
 void Mora::set_context(const std::string &key, const std::string &value) {
   vowel.contexts[key] = value;
 
-  if (!consonant.has_value()) {
+  if (!consonant) {
     consonant.value().contexts[key] = value;
   }
 }
 
 std::vector<Phoneme> Mora::phonemes() const {
   std::vector<Phoneme> phonemes;
-  if (consonant.has_value()) {
+  if (consonant) {
     phonemes = {consonant.value(), vowel};
   } else {
     phonemes = {vowel};
diff --git a/core/src/engine/kana_parser.cpp b/core/src/engine/kana_parser.cpp
index 6b33c0fa0..7d840aef6 100644
--- a/core/src/engine/kana_parser.cpp
+++ b/core/src/engine/kana_parser.cpp
@@ -97,13 +97,13 @@ AccentPhraseModel text_to_accent_phrase(const std::string& phrase) {
         matched_text = stack;
       }
     }
-    if (matched_text == std::nullopt) {
-      throw std::runtime_error("unknown text in accent phrase: " + stack);
-    } else {
-      moras.push_back(text2mora.at(*matched_text));
+    if (matched_text) {
+      moras.push_back(text2mora.at(matched_text.value()));
       base_index += matched_text->size();
       stack = "";
       matched_text = std::nullopt;
+    } else {
+      throw std::runtime_error("unknown text in accent phrase: " + stack);
     }
     if (outer_loop > LOOP_LIMIT) throw std::runtime_error("detect infinity loop!");
   }
@@ -111,7 +111,7 @@ AccentPhraseModel text_to_accent_phrase(const std::string& phrase) {
 
   AccentPhraseModel accent_phrase = {
       moras,
-      static_cast<unsigned int>(*accent_index),
+      accent_index.value(),
   };
   return accent_phrase;
 }
@@ -178,7 +178,7 @@ std::string create_kana(std::vector<AccentPhraseModel> accent_phrases) {
     }
 
     if (i < accent_phrases.size()) {
-      if (phrase.pause_mora != std::nullopt)
+      if (phrase.pause_mora)
         text += PAUSE_DELIMITER;
       else
         text += NOPAUSE_DELIMITER;
diff --git a/core/src/engine/synthesis_engine.cpp b/core/src/engine/synthesis_engine.cpp
index ca2a9dd47..b6c2983a6 100644
--- a/core/src/engine/synthesis_engine.cpp
+++ b/core/src/engine/synthesis_engine.cpp
@@ -20,8 +20,8 @@ std::vector<MoraModel> to_flatten_moras(std::vector<AccentPhraseModel> accent_ph
     for (MoraModel mora : moras) {
       flatten_moras.push_back(mora);
     }
-    if (accent_phrase.pause_mora != std::nullopt) {
-      MoraModel pause_mora = static_cast<MoraModel>(*accent_phrase.pause_mora);
+    if (accent_phrase.pause_mora) {
+      MoraModel pause_mora = accent_phrase.pause_mora.value();
       flatten_moras.push_back(pause_mora);
     }
   }
@@ -138,7 +138,7 @@ std::vector<AccentPhraseModel> SynthesisEngine::create_accent_phrases(std::strin
         if (moras_text == "n") moras_text = "N";
         std::optional<std::string> consonant = std::nullopt;
         std::optional<float> consonant_length = std::nullopt;
-        if (mora.consonant.has_value()) {
+        if (mora.consonant) {
           consonant = mora.consonant.value().phoneme();
           consonant_length = 0.0f;
         }
@@ -204,7 +204,7 @@ std::vector<AccentPhraseModel> SynthesisEngine::replace_phoneme_length(std::vect
     std::vector<MoraModel> moras = accent_phrase.moras;
     for (size_t j = 0; j < moras.size(); j++) {
       MoraModel mora = moras[j];
-      if (mora.consonant != std::nullopt) {
+      if (mora.consonant) {
         mora.consonant_length = phoneme_length[vowel_indexes_data[index + 1] - 1];
       }
       mora.vowel_length = phoneme_length[vowel_indexes_data[index + 1]];
@@ -212,7 +212,7 @@ std::vector<AccentPhraseModel> SynthesisEngine::replace_phoneme_length(std::vect
       moras[j] = mora;
     }
     accent_phrase.moras = moras;
-    if (accent_phrase.pause_mora != std::nullopt) {
+    if (accent_phrase.pause_mora) {
       std::optional<MoraModel> pause_mora = accent_phrase.pause_mora;
       pause_mora->vowel_length = phoneme_length[vowel_indexes_data[index + 1]];
       index++;
@@ -310,7 +310,7 @@ std::vector<AccentPhraseModel> SynthesisEngine::replace_mora_pitch(std::vector<A
       moras[j] = mora;
     }
     accent_phrase.moras = moras;
-    if (accent_phrase.pause_mora != std::nullopt) {
+    if (accent_phrase.pause_mora) {
       std::optional<MoraModel> pause_mora = accent_phrase.pause_mora;
       pause_mora->pitch = f0_list[index + 1];
       index++;
@@ -427,8 +427,8 @@ std::vector<float> SynthesisEngine::synthesis(AudioQueryModel query, int64_t *sp
   int count = 0;
 
   for (MoraModel mora : flatten_moras) {
-    if (mora.consonant != std::nullopt) {
-      phoneme_length_list.push_back(static_cast<float>(*mora.consonant_length));
+    if (mora.consonant) {
+      phoneme_length_list.push_back(mora.consonant_length.value());
     }
     phoneme_length_list.push_back(mora.vowel_length);
     float f0_single = mora.pitch * std::pow(2.0f, pitch_scale);
@@ -509,7 +509,7 @@ void SynthesisEngine::initial_process(std::vector<AccentPhraseModel> &accent_phr
   phoneme_str_list.push_back("pau");
   for (MoraModel mora : flatten_moras) {
     std::optional<std::string> consonant = mora.consonant;
-    if (consonant != std::nullopt) phoneme_str_list.push_back(static_cast<std::string>(*consonant));
+    if (consonant) phoneme_str_list.push_back(consonant.value());
     phoneme_str_list.push_back(mora.vowel);
   }
   phoneme_str_list.push_back("pau");
@@ -530,11 +530,11 @@ void SynthesisEngine::create_one_accent_list(std::vector<int64_t> &accent_list,
     else
       value = 0;
     one_accent_list.push_back(value);
-    if (mora.consonant != std::nullopt) {
+    if (mora.consonant) {
       one_accent_list.push_back(value);
     }
   }
-  if (accent_phrase.pause_mora != std::nullopt) one_accent_list.push_back(0);
+  if (accent_phrase.pause_mora) one_accent_list.push_back(0);
   std::copy(one_accent_list.begin(), one_accent_list.end(), std::back_inserter(accent_list));
 }
 }  // namespace voicevox::core::engine

From c60dc215e58725b85343c92286dd4280f3666f15 Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Tue, 31 May 2022 01:33:50 +0900
Subject: [PATCH 07/13] =?UTF-8?q?core=E3=81=AE=E3=83=93=E3=83=AB=E3=83=89?=
 =?UTF-8?q?=E6=99=82=E3=81=AB=E3=83=90=E3=83=BC=E3=82=B8=E3=83=A7=E3=83=B3?=
 =?UTF-8?q?=E6=83=85=E5=A0=B1=E3=81=8C=E3=81=A1=E3=82=83=E3=82=93=E3=81=A8?=
 =?UTF-8?q?=20=E5=85=A5=E3=82=8B=E3=82=88=E3=81=86=E3=81=AB=E3=81=99?=
 =?UTF-8?q?=E3=82=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .github/workflows/build.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index cccaad696..eedf68621 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -277,7 +277,7 @@ jobs:
           # copy Linux/macOS shared library if exists
           cp -v core/lib/libcore* "artifact/${{ env.ASSET_NAME }}" || true
 
-          echo "${{ env.BUILD_IDENTIFIER }}" > "artifact/${{ env.ASSET_NAME }}/VERSION"
+          echo "${{ env.VERSION }}" > "artifact/${{ env.ASSET_NAME }}/VERSION"
 
           cp README.md "artifact/${{ env.ASSET_NAME }}/README.txt"
 

From 7ff2e7f2ae7e635d2557e2a6114e31b8f8cf3ddd Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Fri, 3 Jun 2022 23:55:15 +0900
Subject: [PATCH 08/13] =?UTF-8?q?hotfix=20=E3=82=A4=E3=83=B3=E3=83=88?=
 =?UTF-8?q?=E3=83=8D=E3=83=BC=E3=82=B7=E3=83=A7=E3=83=B3=E3=81=AE=E6=8E=A8?=
 =?UTF-8?q?=E8=AB=96=E3=82=92CPU=E3=81=A7=E8=A1=8C=E3=81=86=E3=82=88?=
 =?UTF-8?q?=E3=81=86=E3=81=AB=20(#146)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 core/src/core.cpp | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/core/src/core.cpp b/core/src/core.cpp
index e7b7cc4d8..048f6852a 100644
--- a/core/src/core.cpp
+++ b/core/src/core.cpp
@@ -87,14 +87,18 @@ struct Status {
     yukarin_sa_list = std::vector<std::optional<Ort::Session>>(model_count);
     decode_list = std::vector<std::optional<Ort::Session>>(model_count);
 
-    session_options.SetInterOpNumThreads(cpu_num_threads).SetIntraOpNumThreads(cpu_num_threads);
+    // 軽いモデルの場合はCPUの方が速い
+    light_session_options.SetInterOpNumThreads(cpu_num_threads).SetIntraOpNumThreads(cpu_num_threads);
+
+    // 重いモデルはGPUを使ったほうが速い
+    heavy_session_options.SetInterOpNumThreads(cpu_num_threads).SetIntraOpNumThreads(cpu_num_threads);
     if (use_gpu) {
 #ifdef DIRECTML
-      session_options.DisableMemPattern().SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
-      Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_DML(session_options, 0));
+      heavy_session_options.DisableMemPattern().SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
+      Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_DML(heavy_session_options, 0));
 #else
       const OrtCUDAProviderOptions cuda_options;
-      session_options.AppendExecutionProvider_CUDA(cuda_options);
+      heavy_session_options.AppendExecutionProvider_CUDA(cuda_options);
 #endif
     }
   }
@@ -133,15 +137,17 @@ struct Status {
     embed::Resource decode_model = VVMODEL.DECODE();
 
     yukarin_s_list[model_index] =
-        std::move(Ort::Session(env, yukarin_s_model.data, yukarin_s_model.size, session_options));
+        std::move(Ort::Session(env, yukarin_s_model.data, yukarin_s_model.size, light_session_options));
     yukarin_sa_list[model_index] =
-        std::move(Ort::Session(env, yukarin_sa_model.data, yukarin_sa_model.size, session_options));
-    decode_list[model_index] = std::move(Ort::Session(env, decode_model.data, decode_model.size, session_options));
+        std::move(Ort::Session(env, yukarin_sa_model.data, yukarin_sa_model.size, light_session_options));
+    decode_list[model_index] =
+        std::move(Ort::Session(env, decode_model.data, decode_model.size, heavy_session_options));
     return true;
   }
 
   std::string root_dir_path;
-  Ort::SessionOptions session_options;
+  Ort::SessionOptions light_session_options;  // 軽いモデルはこちらを使う
+  Ort::SessionOptions heavy_session_options;  // 重いモデルはこちらを使う
   Ort::MemoryInfo memory_info;
 
   Ort::Env env{ORT_LOGGING_LEVEL_ERROR};

From de8394f6b5d968b9e90551db6f8763473404839c Mon Sep 17 00:00:00 2001
From: Yosshi999 <Yosshi999@users.noreply.github.com>
Date: Sat, 4 Jun 2022 02:16:21 +0900
Subject: [PATCH 09/13] python example for 0.12, update FFI (#138)

---
 README.md                                    |   9 +-
 core/_core.py                                |  52 ++
 example/python/acoustic_feature_extractor.py | 226 --------
 example/python/core.pxd                      |  41 --
 example/python/core.pyx                      |  80 ---
 example/python/forwarder.py                  | 191 -------
 example/python/full_context_label.py         | 512 -------------------
 example/python/makelib.bat                   |  32 --
 example/python/requirements.txt              |   4 -
 example/python/run.py                        |  30 +-
 example/python/setup.py                      |  24 -
 11 files changed, 65 insertions(+), 1136 deletions(-)
 delete mode 100644 example/python/acoustic_feature_extractor.py
 delete mode 100644 example/python/core.pxd
 delete mode 100644 example/python/core.pyx
 delete mode 100644 example/python/forwarder.py
 delete mode 100644 example/python/full_context_label.py
 delete mode 100644 example/python/makelib.bat
 delete mode 100644 example/python/requirements.txt
 delete mode 100644 example/python/setup.py

diff --git a/README.md b/README.md
index ec7644f08..91cc83ca1 100644
--- a/README.md
+++ b/README.md
@@ -100,11 +100,13 @@ sudo apt install libgomp1
 ```
 
 ## サンプル実行
+まずOpen JTalk辞書フォルダを配置します。
+http://open-jtalk.sourceforge.net/ を開き、Dictionary for Open JTalk 欄の Binary Package (UTF-8)をクリックして「open_jtalk_dic_utf_8-1.11.tar.gz」をダウンロードします。  
+これを展開してできた「open_jtalk_dic_utf_8-1.11」フォルダをexample/pythonに配置します。
+
 ```bash
 cd example/python
 
-# サンプルコード実行のための依存モジュールのインストール
-pip install -r requirements.txt
 python run.py \
     --text "これは本当に実行できているんですか" \
     --speaker_id 1
@@ -113,8 +115,7 @@ python run.py \
 # --text 読み上げるテキスト
 # --speaker_id 話者ID
 # --use_gpu GPUを使う
-# --f0_speaker_id 音高の話者ID(デフォルト値はspeaker_id)
-# --f0_correct 音高の補正値(デフォルト値は0。+-0.3くらいで結果が大きく変わります)
+# --openjtalk_dict OpenJtalk辞書フォルダへのパス
 ```
 
 ### その他の言語
diff --git a/core/_core.py b/core/_core.py
index f9306db4b..423727ece 100644
--- a/core/_core.py
+++ b/core/_core.py
@@ -30,6 +30,12 @@
 lib.initialize.argtypes = (c_bool, c_int, c_bool)
 lib.initialize.restype = c_bool
 
+lib.load_model.argtypes = (c_int64,)
+lib.load_model.restype = c_bool
+
+lib.is_model_loaded.argtypes = (c_int64,)
+lib.is_model_loaded.restype = c_bool
+
 lib.finalize.argtypes = ()
 
 lib.metas.restype = c_char_p
@@ -50,6 +56,19 @@
 
 lib.last_error_message.restype = c_char_p
 
+lib.voicevox_load_openjtalk_dict.argtypes = (c_char_p,)
+lib.voicevox_load_openjtalk_dict.restype = c_int
+
+lib.voicevox_tts.argtypes = (c_char_p, c_int64, POINTER(c_int), POINTER(POINTER(c_uint8)))
+lib.voicevox_tts.restype = c_int
+
+lib.voicevox_tts_from_kana.argtypes = (c_char_p, c_int64, POINTER(c_int), POINTER(POINTER(c_uint8)))
+lib.voicevox_tts_from_kana.restype = c_int
+
+lib.voicevox_wav_free.argtypes = (POINTER(c_uint8),)
+
+lib.voicevox_error_result_to_message.argtypes = (c_int,)
+lib.voicevox_load_openjtalk_dict.argtypes = (c_char_p,)
 
 # ラッパー関数
 def initialize(use_gpu: bool, cpu_num_threads=0, load_all_models=True):
@@ -57,6 +76,13 @@ def initialize(use_gpu: bool, cpu_num_threads=0, load_all_models=True):
     if not success:
         raise Exception(lib.last_error_message().decode())
 
+def load_model(speaker_id: int):
+    success = lib.load_model(speaker_id)
+    if not success:
+        raise Exception(lib.last_error_message().decode())
+
+def is_model_loaded(speaker_id: int) -> bool:
+    return lib.is_model_loaded(speaker_id)
 
 def metas() -> str:
     return lib.metas().decode()
@@ -102,6 +128,32 @@ def decode_forward(length: int, phoneme_size: int, f0, phoneme, speaker_id):
         raise Exception(lib.last_error_message().decode())
     return output
 
+def voicevox_load_openjtalk_dict(dict_path: str):
+    errno = lib.voicevox_load_openjtalk_dict(dict_path.encode())
+    if errno != 0:
+        raise Exception(lib.voicevox_error_result_to_message(errno).decode())
+
+def voicevox_tts(text: str, speaker_id: int) -> bytes:
+    output_binary_size = c_int()
+    output_wav = POINTER(c_uint8)()
+    errno = lib.voicevox_tts(text.encode(), speaker_id, byref(output_binary_size), byref(output_wav))
+    if errno != 0:
+        raise Exception(lib.voicevox_error_result_to_message(errno).decode())
+    output = create_string_buffer(output_binary_size.value * sizeof(c_uint8))
+    memmove(output, output_wav, output_binary_size.value * sizeof(c_uint8))
+    lib.voicevox_wav_free(output_wav)
+    return output
+
+def voicevox_tts_from_kana(text: str, speaker_id: int) -> bytes:
+    output_binary_size = c_int()
+    output_wav = POINTER(c_uint8)()
+    errno = lib.voicevox_tts_from_kana(text.encode(), speaker_id, byref(output_binary_size), byref(output_wav))
+    if errno != 0:
+        raise Exception(lib.voicevox_error_result_to_message(errno).decode())
+    output = create_string_buffer(output_binary_size.value * sizeof(c_uint8))
+    memmove(output, output_wav, output_binary_size.value * sizeof(c_uint8))
+    lib.voicevox_wav_free(output_wav)
+    return output
 
 def finalize():
     lib.finalize()
diff --git a/example/python/acoustic_feature_extractor.py b/example/python/acoustic_feature_extractor.py
deleted file mode 100644
index e8afcf955..000000000
--- a/example/python/acoustic_feature_extractor.py
+++ /dev/null
@@ -1,226 +0,0 @@
-from abc import abstractmethod
-from dataclasses import dataclass
-from enum import Enum
-from pathlib import Path
-from typing import List, Sequence
-
-import numpy
-
-
-@dataclass
-class SamplingData:
-    array: numpy.ndarray  # shape: (N, ?)
-    rate: float
-
-    def resample(self, sampling_rate: float, index: int = 0, length: int = None):
-        if length is None:
-            length = int(len(self.array) / self.rate * sampling_rate)
-        indexes = (numpy.random.rand() + index + numpy.arange(length)) * (
-            self.rate / sampling_rate
-        )
-        return self.array[indexes.astype(int)]
-
-
-class BasePhoneme(object):
-    phoneme_list: Sequence[str]
-    num_phoneme: int
-    space_phoneme: str
-
-    def __init__(
-        self,
-        phoneme: str,
-        start: float,
-        end: float,
-    ):
-        self.phoneme = phoneme
-        self.start = numpy.round(start, decimals=2)
-        self.end = numpy.round(end, decimals=2)
-
-    def __repr__(self):
-        return f"Phoneme(phoneme='{self.phoneme}', start={self.start}, end={self.end})"
-
-    def __eq__(self, o: object):
-        return isinstance(o, BasePhoneme) and (
-            self.phoneme == o.phoneme and self.start == o.start and self.end == o.end
-        )
-
-    def verify(self):
-        assert self.phoneme in self.phoneme_list, f"{self.phoneme} is not defined."
-
-    @property
-    def phoneme_id(self):
-        return self.phoneme_list.index(self.phoneme)
-
-    @property
-    def duration(self):
-        return self.end - self.start
-
-    @property
-    def onehot(self):
-        array = numpy.zeros(self.num_phoneme, dtype=bool)
-        array[self.phoneme_id] = True
-        return array
-
-    @classmethod
-    def parse(cls, s: str):
-        """
-        >>> BasePhoneme.parse('1.7425000 1.9125000 o:')
-        Phoneme(phoneme='o:', start=1.74, end=1.91)
-        """
-        words = s.split()
-        return cls(
-            start=float(words[0]),
-            end=float(words[1]),
-            phoneme=words[2],
-        )
-
-    @classmethod
-    @abstractmethod
-    def convert(cls, phonemes: List["BasePhoneme"]) -> List["BasePhoneme"]:
-        pass
-
-    @classmethod
-    def load_julius_list(cls, path: Path):
-        phonemes = [cls.parse(s) for s in path.read_text().split("\n") if len(s) > 0]
-        phonemes = cls.convert(phonemes)
-
-        for phoneme in phonemes:
-            phoneme.verify()
-        return phonemes
-
-    @classmethod
-    def save_julius_list(cls, phonemes: List["BasePhoneme"], path: Path):
-        text = "\n".join(
-            [
-                f"{numpy.round(p.start, decimals=2):.2f}\t"
-                f"{numpy.round(p.end, decimals=2):.2f}\t"
-                f"{p.phoneme}"
-                for p in phonemes
-            ]
-        )
-        path.write_text(text)
-
-
-class JvsPhoneme(BasePhoneme):
-    phoneme_list = (
-        "pau",
-        "I",
-        "N",
-        "U",
-        "a",
-        "b",
-        "by",
-        "ch",
-        "cl",
-        "d",
-        "dy",
-        "e",
-        "f",
-        "g",
-        "gy",
-        "h",
-        "hy",
-        "i",
-        "j",
-        "k",
-        "ky",
-        "m",
-        "my",
-        "n",
-        "ny",
-        "o",
-        "p",
-        "py",
-        "r",
-        "ry",
-        "s",
-        "sh",
-        "t",
-        "ts",
-        "u",
-        "v",
-        "w",
-        "y",
-        "z",
-    )
-    num_phoneme = len(phoneme_list)
-    space_phoneme = "pau"
-
-    @classmethod
-    def convert(cls, phonemes: List["JvsPhoneme"]):
-        if "sil" in phonemes[0].phoneme:
-            phonemes[0].phoneme = cls.space_phoneme
-        if "sil" in phonemes[-1].phoneme:
-            phonemes[-1].phoneme = cls.space_phoneme
-        return phonemes
-
-
-class OjtPhoneme(BasePhoneme):
-    phoneme_list = (
-        "pau",
-        "A",
-        "E",
-        "I",
-        "N",
-        "O",
-        "U",
-        "a",
-        "b",
-        "by",
-        "ch",
-        "cl",
-        "d",
-        "dy",
-        "e",
-        "f",
-        "g",
-        "gw",
-        "gy",
-        "h",
-        "hy",
-        "i",
-        "j",
-        "k",
-        "kw",
-        "ky",
-        "m",
-        "my",
-        "n",
-        "ny",
-        "o",
-        "p",
-        "py",
-        "r",
-        "ry",
-        "s",
-        "sh",
-        "t",
-        "ts",
-        "ty",
-        "u",
-        "v",
-        "w",
-        "y",
-        "z",
-    )
-    num_phoneme = len(phoneme_list)
-    space_phoneme = "pau"
-
-    @classmethod
-    def convert(cls, phonemes: List["OjtPhoneme"]):
-        if "sil" in phonemes[0].phoneme:
-            phonemes[0].phoneme = cls.space_phoneme
-        if "sil" in phonemes[-1].phoneme:
-            phonemes[-1].phoneme = cls.space_phoneme
-        return phonemes
-
-
-class PhonemeType(str, Enum):
-    jvs = "jvs"
-    openjtalk = "openjtalk"
-
-
-phoneme_type_to_class = {
-    PhonemeType.jvs: JvsPhoneme,
-    PhonemeType.openjtalk: OjtPhoneme,
-}
diff --git a/example/python/core.pxd b/example/python/core.pxd
deleted file mode 100644
index f022104ee..000000000
--- a/example/python/core.pxd
+++ /dev/null
@@ -1,41 +0,0 @@
-from libcpp cimport bool
-
-cdef extern from "core.h":
-    bool c_initialize "initialize" (
-        const char *root_dir_path,
-        bool use_gpu
-    )
-
-    void c_finalize "finalize" ()
-
-    const char *c_metas "metas" ()
-
-    bool c_yukarin_s_forward "yukarin_s_forward" (
-        int length,
-        long *phoneme_list,
-        long *speaker_id,
-        float *output
-    )
-
-    bool c_yukarin_sa_forward "yukarin_sa_forward" (
-        int length,
-        long *vowel_phoneme_list,
-        long *consonant_phoneme_list,
-        long *start_accent_list,
-        long *end_accent_list,
-        long *start_accent_phrase_list,
-        long *end_accent_phrase_list,
-        long *speaker_id,
-        float *output
-    )
-
-    bool c_decode_forward "decode_forward" (
-        int length,
-        int phoneme_size,
-        float *f0,
-        float *phoneme,
-        long *speaker_id,
-        float *output
-    )
-
-    const char *c_last_error_message "last_error_message" ()
diff --git a/example/python/core.pyx b/example/python/core.pyx
deleted file mode 100644
index be2fde430..000000000
--- a/example/python/core.pyx
+++ /dev/null
@@ -1,80 +0,0 @@
-cimport numpy
-import numpy
-
-from libcpp cimport bool
-
-cpdef initialize(
-    str root_dir_path,
-    bool use_gpu,
-):
-    cdef bool success = c_initialize(
-        root_dir_path.encode(),
-        use_gpu,
-    )
-    if not success: raise Exception(c_last_error_message().decode())
-
-cpdef finalize():
-    c_finalize()
-
-cpdef metas():
-    return c_metas().decode()
-
-cpdef numpy.ndarray[numpy.float32_t, ndim=1] yukarin_s_forward(
-    int length,
-    numpy.ndarray[numpy.int64_t, ndim=1] phoneme_list,
-    numpy.ndarray[numpy.int64_t, ndim=1] speaker_id,
-):
-    cdef numpy.ndarray[numpy.float32_t, ndim=1] output = numpy.zeros((length,), dtype=numpy.float32)
-    cdef bool success = c_yukarin_s_forward(
-        length,
-        <long*> phoneme_list.data,
-        <long*> speaker_id.data,
-        <float*> output.data,
-    )
-    if not success: raise Exception(c_last_error_message().decode())
-    return output
-
-
-cpdef numpy.ndarray[numpy.float32_t, ndim=2] yukarin_sa_forward(
-    int length,
-    numpy.ndarray[numpy.int64_t, ndim=2] vowel_phoneme_list,
-    numpy.ndarray[numpy.int64_t, ndim=2] consonant_phoneme_list,
-    numpy.ndarray[numpy.int64_t, ndim=2] start_accent_list,
-    numpy.ndarray[numpy.int64_t, ndim=2] end_accent_list,
-    numpy.ndarray[numpy.int64_t, ndim=2] start_accent_phrase_list,
-    numpy.ndarray[numpy.int64_t, ndim=2] end_accent_phrase_list,
-    numpy.ndarray[numpy.int64_t, ndim=1] speaker_id,
-):
-    cdef numpy.ndarray[numpy.float32_t, ndim=2] output = numpy.empty((len(speaker_id), length,), dtype=numpy.float32)
-    cdef bool success = c_yukarin_sa_forward(
-        length,
-        <long*> vowel_phoneme_list.data,
-        <long*> consonant_phoneme_list.data,
-        <long*> start_accent_list.data,
-        <long*> end_accent_list.data,
-        <long*> start_accent_phrase_list.data,
-        <long*> end_accent_phrase_list.data,
-        <long*> speaker_id.data,
-        <float*> output.data,
-    )
-    if not success: raise Exception(c_last_error_message().decode())
-    return output
-
-cpdef numpy.ndarray[numpy.float32_t, ndim=1] decode_forward(
-    int length,
-    int phoneme_size,
-    numpy.ndarray[numpy.float32_t, ndim=2] f0,
-    numpy.ndarray[numpy.float32_t, ndim=2] phoneme,
-    numpy.ndarray[numpy.int64_t, ndim=1] speaker_id,
-):
-    cdef numpy.ndarray[numpy.float32_t, ndim=1] output = numpy.empty((length*256,), dtype=numpy.float32)
-    cdef bool success = c_decode_forward(
-        length,
-        phoneme_size,
-        <float*> f0.data,
-        <float*> phoneme.data,
-        <long*> speaker_id.data,
-        <float*> output.data,
-    )
-    if not success: raise Exception(c_last_error_message().decode())
-    return output
diff --git a/example/python/forwarder.py b/example/python/forwarder.py
deleted file mode 100644
index 12515f87e..000000000
--- a/example/python/forwarder.py
+++ /dev/null
@@ -1,191 +0,0 @@
-from typing import List, Optional
-
-import numpy
-from full_context_label import extract_full_context_label
-
-from acoustic_feature_extractor import BasePhoneme, JvsPhoneme, OjtPhoneme, SamplingData
-
-unvoiced_mora_phoneme_list = ["A", "I", "U", "E", "O", "cl", "pau"]
-mora_phoneme_list = ["a", "i", "u", "e", "o", "N"] + unvoiced_mora_phoneme_list
-
-
-def split_mora(phoneme_list: List[BasePhoneme]):
-    vowel_indexes = [
-        i for i, p in enumerate(phoneme_list) if p.phoneme in mora_phoneme_list
-    ]
-    vowel_phoneme_list = [phoneme_list[i] for i in vowel_indexes]
-    consonant_phoneme_list: List[Optional[BasePhoneme]] = [None] + [
-        None if post - prev == 1 else phoneme_list[post - 1]
-        for prev, post in zip(vowel_indexes[:-1], vowel_indexes[1:])
-    ]
-    return consonant_phoneme_list, vowel_phoneme_list, vowel_indexes
-
-
-class Forwarder:
-    def __init__(
-        self,
-        yukarin_s_forwarder,
-        yukarin_sa_forwarder,
-        decode_forwarder,
-    ):
-        super().__init__()
-        self.yukarin_s_forwarder = yukarin_s_forwarder
-        self.yukarin_sa_forwarder = yukarin_sa_forwarder
-        self.decode_forwarder = decode_forwarder
-        self.yukarin_s_phoneme_class = OjtPhoneme
-        self.yukarin_soso_phoneme_class = OjtPhoneme
-
-    def forward(
-        self, text: str, speaker_id: int, f0_speaker_id: int, f0_correct: float = 0
-    ):
-        rate = 200
-
-        # phoneme
-        utterance = extract_full_context_label(text)
-        label_data_list = utterance.phonemes
-
-        is_type1 = False
-        phoneme_str_list = []
-        start_accent_list = (
-            numpy.ones(len(label_data_list), dtype=numpy.int64) * numpy.nan
-        )
-        end_accent_list = (
-            numpy.ones(len(label_data_list), dtype=numpy.int64) * numpy.nan
-        )
-        start_accent_phrase_list = (
-            numpy.ones(len(label_data_list), dtype=numpy.int64) * numpy.nan
-        )
-        end_accent_phrase_list = (
-            numpy.ones(len(label_data_list), dtype=numpy.int64) * numpy.nan
-        )
-        for i, label in enumerate(label_data_list):
-            is_end_accent = label.contexts["a1"] == "0"
-
-            if label.contexts["a2"] == "1":
-                is_type1 = is_end_accent
-
-            if label.contexts["a2"] == "1" and is_type1:
-                is_start_accent = True
-            elif label.contexts["a2"] == "2" and not is_type1:
-                is_start_accent = True
-            else:
-                is_start_accent = False
-
-            phoneme_str_list.append(label.phoneme)
-            start_accent_list[i] = is_start_accent
-            end_accent_list[i] = is_end_accent
-            start_accent_phrase_list[i] = label.contexts["a2"] == "1"
-            end_accent_phrase_list[i] = label.contexts["a3"] == "1"
-
-        start_accent_list = numpy.array(start_accent_list, dtype=numpy.int64)
-        end_accent_list = numpy.array(end_accent_list, dtype=numpy.int64)
-        start_accent_phrase_list = numpy.array(
-            start_accent_phrase_list, dtype=numpy.int64
-        )
-        end_accent_phrase_list = numpy.array(end_accent_phrase_list, dtype=numpy.int64)
-
-        # forward yukarin s
-        assert self.yukarin_s_phoneme_class is not None
-
-        phoneme_data_list = [
-            self.yukarin_s_phoneme_class(phoneme=p, start=i, end=i + 1)
-            for i, p in enumerate(phoneme_str_list)
-        ]
-        phoneme_data_list = self.yukarin_s_phoneme_class.convert(phoneme_data_list)
-        phoneme_list_s = numpy.array(
-            [p.phoneme_id for p in phoneme_data_list], dtype=numpy.int64
-        )
-
-        phoneme_length = self.yukarin_s_forwarder(
-            length=len(phoneme_list_s),
-            phoneme_list=numpy.ascontiguousarray(phoneme_list_s),
-            speaker_id=numpy.array(f0_speaker_id, dtype=numpy.int64).reshape(-1),
-        )
-        phoneme_length[0] = phoneme_length[-1] = 0.1
-        phoneme_length = numpy.round(phoneme_length * rate) / rate
-
-        # forward yukarin sa
-        (
-            consonant_phoneme_data_list,
-            vowel_phoneme_data_list,
-            vowel_indexes_data,
-        ) = split_mora(phoneme_data_list)
-
-        vowel_indexes = numpy.array(vowel_indexes_data, dtype=numpy.int64)
-
-        vowel_phoneme_list = numpy.array(
-            [p.phoneme_id for p in vowel_phoneme_data_list], dtype=numpy.int64
-        )
-        consonant_phoneme_list = numpy.array(
-            [
-                p.phoneme_id if p is not None else -1
-                for p in consonant_phoneme_data_list
-            ],
-            dtype=numpy.int64,
-        )
-        phoneme_length_sa = numpy.array(
-            [a.sum() for a in numpy.split(phoneme_length, vowel_indexes[:-1] + 1)],
-            dtype=numpy.float32,
-        )
-
-        f0_list = self.yukarin_sa_forwarder(
-            length=vowel_phoneme_list.shape[0],
-            vowel_phoneme_list=vowel_phoneme_list[numpy.newaxis],
-            consonant_phoneme_list=consonant_phoneme_list[numpy.newaxis],
-            start_accent_list=start_accent_list[vowel_indexes][numpy.newaxis],
-            end_accent_list=end_accent_list[vowel_indexes][numpy.newaxis],
-            start_accent_phrase_list=start_accent_phrase_list[vowel_indexes][
-                numpy.newaxis
-            ],
-            end_accent_phrase_list=end_accent_phrase_list[vowel_indexes][numpy.newaxis],
-            speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1),
-        )[0]
-        f0_list += f0_correct
-
-        for i, p in enumerate(vowel_phoneme_data_list):
-            if p.phoneme in unvoiced_mora_phoneme_list:
-                f0_list[i] = 0
-
-        # use numpy.int32 as the number of repeats to avoid casting int64 to int32 in numpy internal
-        phoneme = numpy.repeat(
-            phoneme_list_s, numpy.round(phoneme_length * rate).astype(numpy.int32)
-        )
-        f0 = numpy.repeat(
-            f0_list, numpy.round(phoneme_length_sa * rate).astype(numpy.int32)
-        )
-
-        # forward decode
-        assert self.yukarin_soso_phoneme_class is not None
-
-        if (
-            self.yukarin_soso_phoneme_class is not JvsPhoneme
-            and self.yukarin_soso_phoneme_class is not self.yukarin_s_phoneme_class
-        ):
-            phoneme = numpy.array(
-                [
-                    self.yukarin_soso_phoneme_class.phoneme_list.index(
-                        JvsPhoneme.phoneme_list[p]
-                    )
-                    for p in phoneme
-                ],
-                dtype=numpy.int64,
-            )
-
-        array = numpy.zeros(
-            (len(phoneme), self.yukarin_soso_phoneme_class.num_phoneme),
-            dtype=numpy.float32,
-        )
-        array[numpy.arange(len(phoneme)), phoneme] = 1
-        phoneme = array
-
-        f0 = SamplingData(array=f0, rate=rate).resample(24000 / 256)
-        phoneme = SamplingData(array=phoneme, rate=rate).resample(24000 / 256)
-
-        wave = self.decode_forwarder(
-            length=phoneme.shape[0],
-            phoneme_size=phoneme.shape[1],
-            f0=f0[:, numpy.newaxis],
-            phoneme=phoneme,
-            speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1),
-        )
-        return wave
diff --git a/example/python/full_context_label.py b/example/python/full_context_label.py
deleted file mode 100644
index 44f288d39..000000000
--- a/example/python/full_context_label.py
+++ /dev/null
@@ -1,512 +0,0 @@
-import re
-from dataclasses import dataclass
-from itertools import chain
-from typing import Dict, List, Optional
-
-import pyopenjtalk
-
-
-@dataclass
-class Phoneme:
-    """
-    音素(母音・子音)クラス、音素の元となるcontextを保持する
-    音素には、母音や子音以外にも無音(silent/pause)も含まれる
-    Attributes
-    ----------
-    contexts: Dict[str, str]
-        音素の元
-    """
-
-    contexts: Dict[str, str]
-
-    @classmethod
-    def from_label(cls, label: str):
-        """
-        pyopenjtalk.extract_fullcontextで得られる音素の元(ラベル)から、Phonemeクラスを作成する
-        Parameters
-        ----------
-        label : str
-            pyopenjtalk.extract_fullcontextで得られるラベルを渡す
-        Returns
-        -------
-        phoneme: Phoneme
-            Phonemeクラスを返す
-        """
-        contexts = re.search(
-            r"^(?P<p1>.+?)\^(?P<p2>.+?)\-(?P<p3>.+?)\+(?P<p4>.+?)\=(?P<p5>.+?)"
-            r"/A\:(?P<a1>.+?)\+(?P<a2>.+?)\+(?P<a3>.+?)"
-            r"/B\:(?P<b1>.+?)\-(?P<b2>.+?)\_(?P<b3>.+?)"
-            r"/C\:(?P<c1>.+?)\_(?P<c2>.+?)\+(?P<c3>.+?)"
-            r"/D\:(?P<d1>.+?)\+(?P<d2>.+?)\_(?P<d3>.+?)"
-            r"/E\:(?P<e1>.+?)\_(?P<e2>.+?)\!(?P<e3>.+?)\_(?P<e4>.+?)\-(?P<e5>.+?)"
-            r"/F\:(?P<f1>.+?)\_(?P<f2>.+?)\#(?P<f3>.+?)\_(?P<f4>.+?)\@(?P<f5>.+?)\_(?P<f6>.+?)\|(?P<f7>.+?)\_(?P<f8>.+?)"  # noqa
-            r"/G\:(?P<g1>.+?)\_(?P<g2>.+?)\%(?P<g3>.+?)\_(?P<g4>.+?)\_(?P<g5>.+?)"
-            r"/H\:(?P<h1>.+?)\_(?P<h2>.+?)"
-            r"/I\:(?P<i1>.+?)\-(?P<i2>.+?)\@(?P<i3>.+?)\+(?P<i4>.+?)\&(?P<i5>.+?)\-(?P<i6>.+?)\|(?P<i7>.+?)\+(?P<i8>.+?)"  # noqa
-            r"/J\:(?P<j1>.+?)\_(?P<j2>.+?)"
-            r"/K\:(?P<k1>.+?)\+(?P<k2>.+?)\-(?P<k3>.+?)$",
-            label,
-        ).groupdict()
-        return cls(contexts=contexts)
-
-    @property
-    def label(self):
-        """
-        pyopenjtalk.extract_fullcontextで得られるラベルと等しい
-        Returns
-        -------
-        lebel: str
-            ラベルを返す
-        """
-        return (
-            "{p1}^{p2}-{p3}+{p4}={p5}"
-            "/A:{a1}+{a2}+{a3}"
-            "/B:{b1}-{b2}_{b3}"
-            "/C:{c1}_{c2}+{c3}"
-            "/D:{d1}+{d2}_{d3}"
-            "/E:{e1}_{e2}!{e3}_{e4}-{e5}"
-            "/F:{f1}_{f2}#{f3}_{f4}@{f5}_{f6}|{f7}_{f8}"
-            "/G:{g1}_{g2}%{g3}_{g4}_{g5}"
-            "/H:{h1}_{h2}"
-            "/I:{i1}-{i2}@{i3}+{i4}&{i5}-{i6}|{i7}+{i8}"
-            "/J:{j1}_{j2}"
-            "/K:{k1}+{k2}-{k3}"
-        ).format(**self.contexts)
-
-    @property
-    def phoneme(self):
-        """
-        音素クラスの中で、発声に必要な要素を返す
-        Returns
-        -------
-        phoneme : str
-            発声に必要な要素を返す
-        """
-        return self.contexts["p3"]
-
-    def is_pause(self):
-        """
-        音素がポーズ(無音、silent/pause)であるかを返す
-        Returns
-        -------
-        is_pose : bool
-            音素がポーズ(無音、silent/pause)であるか(True)否か(False)
-        """
-        return self.contexts["f1"] == "xx"
-
-    def __repr__(self):
-        return f"<Phoneme phoneme='{self.phoneme}'>"
-
-
-@dataclass
-class Mora:
-    """
-    モーラクラス
-    モーラは1音素(母音や促音「っ」、撥音「ん」など)か、2音素(母音と子音の組み合わせ)で成り立つ
-    Attributes
-    ----------
-    consonant : Optional[Phoneme]
-        子音
-    vowel : Phoneme
-        母音
-    """
-
-    consonant: Optional[Phoneme]
-    vowel: Phoneme
-
-    def set_context(self, key: str, value: str):
-        """
-        Moraクラス内に含まれるPhonemeのcontextのうち、指定されたキーの値を変更する
-        consonantが存在する場合は、vowelと同じようにcontextを変更する
-        Parameters
-        ----------
-        key : str
-            変更したいcontextのキー
-        value : str
-            変更したいcontextの値
-        """
-        self.vowel.contexts[key] = value
-        if self.consonant is not None:
-            self.consonant.contexts[key] = value
-
-    @property
-    def phonemes(self):
-        """
-        音素群を返す
-        Returns
-        -------
-        phonemes : List[Phoneme]
-            母音しかない場合は母音のみ、子音もある場合は子音、母音の順番でPhonemeのリストを返す
-        """
-        if self.consonant is not None:
-            return [self.consonant, self.vowel]
-        else:
-            return [self.vowel]
-
-    @property
-    def labels(self):
-        """
-        ラベル群を返す
-        Returns
-        -------
-        labels : List[str]
-            Moraに含まれるすべてのラベルを返す
-        """
-        return [p.label for p in self.phonemes]
-
-
-@dataclass
-class AccentPhrase:
-    """
-    アクセント句クラス
-    同じアクセントのMoraを複数保持する
-    Attributes
-    ----------
-    moras : List[Mora]
-        音韻のリスト
-    accent : int
-        アクセント
-    """
-
-    moras: List[Mora]
-    accent: int
-
-    @classmethod
-    def from_phonemes(cls, phonemes: List[Phoneme]):
-        """
-        PhonemeのリストからAccentPhraseクラスを作成する
-        Parameters
-        ----------
-        phonemes : List[Phoneme]
-            phonemeのリストを渡す
-        Returns
-        -------
-        accent_phrase : AccentPhrase
-            AccentPhraseクラスを返す
-        """
-        moras: List[Mora] = []
-
-        mora_phonemes: List[Phoneme] = []
-        for phoneme, next_phoneme in zip(phonemes, phonemes[1:] + [None]):
-            # workaround for Hihosiba/voicevox_engine#57
-            # (py)openjtalk によるアクセント句内のモーラへの附番は 49 番目まで
-            # 49 番目のモーラについて、続く音素のモーラ番号を単一モーラの特定に使えない
-            if int(phoneme.contexts["a2"]) == 49:
-                break
-
-            mora_phonemes.append(phoneme)
-
-            if (
-                next_phoneme is None
-                or phoneme.contexts["a2"] != next_phoneme.contexts["a2"]
-            ):
-                if len(mora_phonemes) == 1:
-                    consonant, vowel = None, mora_phonemes[0]
-                elif len(mora_phonemes) == 2:
-                    consonant, vowel = mora_phonemes[0], mora_phonemes[1]
-                else:
-                    raise ValueError(mora_phonemes)
-                mora = Mora(consonant=consonant, vowel=vowel)
-                moras.append(mora)
-                mora_phonemes = []
-
-        accent = int(moras[0].vowel.contexts["f2"])
-        # workaround for Hihosiba/voicevox_engine#55
-        # アクセント位置とするキー f2 の値がアクセント句内のモーラ数を超える場合がある
-        accent = accent if accent <= len(moras) else len(moras)
-        return cls(moras=moras, accent=accent)
-
-    def set_context(self, key: str, value: str):
-        """
-        AccentPhraseに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する
-        Parameters
-        ----------
-        key : str
-            変更したいcontextのキー
-        value : str
-            変更したいcontextの値
-        """
-        for mora in self.moras:
-            mora.set_context(key, value)
-
-    @property
-    def phonemes(self):
-        """
-        音素群を返す
-        Returns
-        -------
-        phonemes : List[Phoneme]
-            AccentPhraseに間接的に含まれる全てのPhonemeを返す
-        """
-        return list(chain.from_iterable(m.phonemes for m in self.moras))
-
-    @property
-    def labels(self):
-        """
-        ラベル群を返す
-        Returns
-        -------
-        labels : List[str]
-            AccentPhraseに間接的に含まれる全てのラベルを返す
-        """
-        return [p.label for p in self.phonemes]
-
-    def merge(self, accent_phrase: "AccentPhrase"):
-        """
-        AccentPhraseを合成する
-        (このクラスが保持するmorasの後ろに、引数として渡されたAccentPhraseのmorasを合成する)
-        Parameters
-        ----------
-        accent_phrase : AccentPhrase
-            合成したいAccentPhraseを渡す
-        Returns
-        -------
-        accent_phrase : AccentPhrase
-            合成されたAccentPhraseを返す
-        """
-        return AccentPhrase(
-            moras=self.moras + accent_phrase.moras,
-            accent=self.accent,
-        )
-
-
-@dataclass
-class BreathGroup:
-    """
-    発声の区切りクラス
-    アクセントの異なるアクセント句を複数保持する
-    Attributes
-    ----------
-    accent_phrases : List[AccentPhrase]
-        アクセント句のリスト
-    """
-
-    accent_phrases: List[AccentPhrase]
-
-    @classmethod
-    def from_phonemes(cls, phonemes: List[Phoneme]):
-        """
-        PhonemeのリストからBreathGroupクラスを作成する
-        Parameters
-        ----------
-        phonemes : List[Phoneme]
-            phonemeのリストを渡す
-        Returns
-        -------
-        breath_group : BreathGroup
-            BreathGroupクラスを返す
-        """
-        accent_phrases: List[AccentPhrase] = []
-        accent_phonemes: List[Phoneme] = []
-        for phoneme, next_phoneme in zip(phonemes, phonemes[1:] + [None]):
-            accent_phonemes.append(phoneme)
-
-            if (
-                next_phoneme is None
-                or phoneme.contexts["i3"] != next_phoneme.contexts["i3"]
-                or phoneme.contexts["f5"] != next_phoneme.contexts["f5"]
-            ):
-                accent_phrase = AccentPhrase.from_phonemes(accent_phonemes)
-                accent_phrases.append(accent_phrase)
-                accent_phonemes = []
-
-        return cls(accent_phrases=accent_phrases)
-
-    def set_context(self, key: str, value: str):
-        """
-        BreathGroupに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する
-        Parameters
-        ----------
-        key : str
-            変更したいcontextのキー
-        value : str
-            変更したいcontextの値
-        """
-        for accent_phrase in self.accent_phrases:
-            accent_phrase.set_context(key, value)
-
-    @property
-    def phonemes(self):
-        """
-        音素群を返す
-        Returns
-        -------
-        phonemes : List[Phoneme]
-            BreathGroupに間接的に含まれる全てのPhonemeを返す
-        """
-        return list(
-            chain.from_iterable(
-                accent_phrase.phonemes for accent_phrase in self.accent_phrases
-            )
-        )
-
-    @property
-    def labels(self):
-        """
-        ラベル群を返す
-        Returns
-        -------
-        labels : List[str]
-            BreathGroupに間接的に含まれる全てのラベルを返す
-        """
-        return [p.label for p in self.phonemes]
-
-
-@dataclass
-class Utterance:
-    """
-    発声クラス
-    発声の区切りと無音を複数保持する
-    Attributes
-    ----------
-    breath_groups : List[BreathGroup]
-        発声の区切りのリスト
-    pauses : List[Phoneme]
-        無音のリスト
-    """
-
-    breath_groups: List[BreathGroup]
-    pauses: List[Phoneme]
-
-    @classmethod
-    def from_phonemes(cls, phonemes: List[Phoneme]):
-        """
-        Phonemeの完全なリストからUtteranceクラスを作成する
-        Parameters
-        ----------
-        phonemes : List[Phoneme]
-            phonemeのリストを渡す
-        Returns
-        -------
-        utterance : Utterance
-            Utteranceクラスを返す
-        """
-        pauses: List[Phoneme] = []
-
-        breath_groups: List[BreathGroup] = []
-        group_phonemes: List[Phoneme] = []
-        for phoneme in phonemes:
-            if not phoneme.is_pause():
-                group_phonemes.append(phoneme)
-
-            else:
-                pauses.append(phoneme)
-
-                if len(group_phonemes) > 0:
-                    breath_group = BreathGroup.from_phonemes(group_phonemes)
-                    breath_groups.append(breath_group)
-                    group_phonemes = []
-
-        return cls(breath_groups=breath_groups, pauses=pauses)
-
-    def set_context(self, key: str, value: str):
-        """
-        Utteranceに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する
-        Parameters
-        ----------
-        key : str
-            変更したいcontextのキー
-        value : str
-            変更したいcontextの値
-        """
-        for breath_group in self.breath_groups:
-            breath_group.set_context(key, value)
-
-    @property
-    def phonemes(self):
-        """
-        音素群を返す
-        Returns
-        -------
-        phonemes : List[Phoneme]
-            Utteranceクラスに直接的・間接的に含まれる、全てのPhonemeを返す
-        """
-        accent_phrases = list(
-            chain.from_iterable(
-                breath_group.accent_phrases for breath_group in self.breath_groups
-            )
-        )
-        for prev, cent, post in zip(
-            [None] + accent_phrases[:-1],
-            accent_phrases,
-            accent_phrases[1:] + [None],
-        ):
-            mora_num = len(cent.moras)
-            accent = cent.accent
-
-            if prev is not None:
-                prev.set_context("g1", str(mora_num))
-                prev.set_context("g2", str(accent))
-
-            if post is not None:
-                post.set_context("e1", str(mora_num))
-                post.set_context("e2", str(accent))
-
-            cent.set_context("f1", str(mora_num))
-            cent.set_context("f2", str(accent))
-            for i_mora, mora in enumerate(cent.moras):
-                mora.set_context("a1", str(i_mora - accent + 1))
-                mora.set_context("a2", str(i_mora + 1))
-                mora.set_context("a3", str(mora_num - i_mora))
-
-        for prev, cent, post in zip(
-            [None] + self.breath_groups[:-1],
-            self.breath_groups,
-            self.breath_groups[1:] + [None],
-        ):
-            accent_phrase_num = len(cent.accent_phrases)
-
-            if prev is not None:
-                prev.set_context("j1", str(accent_phrase_num))
-
-            if post is not None:
-                post.set_context("h1", str(accent_phrase_num))
-
-            cent.set_context("i1", str(accent_phrase_num))
-            cent.set_context(
-                "i5", str(accent_phrases.index(cent.accent_phrases[0]) + 1)
-            )
-            cent.set_context(
-                "i6",
-                str(len(accent_phrases) - accent_phrases.index(cent.accent_phrases[0])),
-            )
-
-        self.set_context(
-            "k2",
-            str(
-                sum(
-                    [
-                        len(breath_group.accent_phrases)
-                        for breath_group in self.breath_groups
-                    ]
-                )
-            ),
-        )
-
-        phonemes: List[Phoneme] = []
-        for i in range(len(self.pauses)):
-            if self.pauses[i] is not None:
-                phonemes += [self.pauses[i]]
-
-            if i < len(self.pauses) - 1:
-                phonemes += self.breath_groups[i].phonemes
-
-        return phonemes
-
-    @property
-    def labels(self):
-        """
-        ラベル群を返す
-        Returns
-        -------
-        labels : List[str]
-            Utteranceクラスに直接的・間接的に含まれる全てのラベルを返す
-        """
-        return [p.label for p in self.phonemes]
-
-
-def extract_full_context_label(text: str):
-    labels = pyopenjtalk.extract_fullcontext(text)
-    phonemes = [Phoneme.from_label(label=label) for label in labels]
-    utterance = Utterance.from_phonemes(phonemes)
-    return utterance
diff --git a/example/python/makelib.bat b/example/python/makelib.bat
deleted file mode 100644
index f2256081a..000000000
--- a/example/python/makelib.bat
+++ /dev/null
@@ -1,32 +0,0 @@
-::https://github.com/idanmiara/addlib/blob/main/src/addlib/makelib.bat Copyright (c) 2021 Idan Miara
-
-@echo off
-
-::https://stackoverflow.com/questions/9946322/how-to-generate-an-import-library-lib-file-from-a-dll
-if %1x neq x goto step1
-echo missing library name
-
-goto exit
-:step1
-SET NAME=%~d1%~p1%~n1
-if exist "%NAME%.dll" goto step2
-echo file not found "%NAME%.dll"
-goto exit
-
-:step2
-SET ARCH=x64
-
-echo Creating LIB file from DLL file for %NAME%...
-dumpbin /exports "%NAME%.dll"
-
-echo creating "%NAME%.def"
-
-echo LIBRARY %NAME% > "%NAME%.def"
-echo EXPORTS >> "%NAME%.def"
-for /f "skip=19 tokens=4" %%A in ('dumpbin /exports "%NAME%.dll"') do echo %%A >> "%NAME%.def"
-
-echo creating "%NAME%.lib" from "%NAME%.def"
-lib /def:"%NAME%.def" /out:"%NAME%.lib" /machine:%ARCH%
-
-:exit
-pause
diff --git a/example/python/requirements.txt b/example/python/requirements.txt
deleted file mode 100644
index 39b58f278..000000000
--- a/example/python/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-numpy
-cython
-soundfile
-git+https://github.com/VOICEVOX/pyopenjtalk@69e5f354634f98098113f9cac5a6ea736443f9c9#egg=pyopenjtalk
diff --git a/example/python/run.py b/example/python/run.py
index dbdb9c7fe..2d8b04061 100644
--- a/example/python/run.py
+++ b/example/python/run.py
@@ -1,40 +1,27 @@
 import argparse
-from typing import Optional
 
 import core
-import soundfile
-
-from forwarder import Forwarder
 
 
 def run(
     use_gpu: bool,
     text: str,
     speaker_id: int,
-    f0_speaker_id: Optional[int],
-    f0_correct: float,
-    cpu_num_threads: int
+    cpu_num_threads: int,
+    openjtalk_dict: str
 ) -> None:
     # コアの初期化
     core.initialize(use_gpu, cpu_num_threads)
 
-    # 音声合成処理モジュールの初期化
-    forwarder = Forwarder(
-        yukarin_s_forwarder=core.yukarin_s_forward,
-        yukarin_sa_forwarder=core.yukarin_sa_forward,
-        decode_forwarder=core.decode_forward,
-    )
+    # openjtalk辞書のロード
+    core.voicevox_load_openjtalk_dict(openjtalk_dict)
 
     # 音声合成
-    wave = forwarder.forward(
-        text=text,
-        speaker_id=speaker_id,
-        f0_speaker_id=f0_speaker_id if f0_speaker_id is not None else speaker_id,
-        f0_correct=f0_correct,
-    )
+    wavefmt = core.voicevox_tts(text, speaker_id)
 
     # 保存
-    soundfile.write(f"{text}-{speaker_id}.wav", data=wave, samplerate=24000)
+    with open(f"{text}-{speaker_id}.wav", "wb") as f:
+        f.write(wavefmt)
 
     core.finalize()
 
@@ -44,7 +31,6 @@ def run(
     parser.add_argument("--use_gpu", action="store_true")
     parser.add_argument("--text", required=True)
     parser.add_argument("--speaker_id", type=int, required=True)
-    parser.add_argument("--f0_speaker_id", type=int)
-    parser.add_argument("--f0_correct", type=float, default=0)
     parser.add_argument("--cpu_num_threads", type=int, default=0)
+    parser.add_argument("--openjtalk_dict", type=str, default="open_jtalk_dic_utf_8-1.11")
     run(**vars(parser.parse_args()))
diff --git a/example/python/setup.py b/example/python/setup.py
deleted file mode 100644
index 34be7d230..000000000
--- a/example/python/setup.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from distutils.core import setup
-from distutils.extension import Extension
-
-import numpy
-from Cython.Build import cythonize
-from Cython.Distutils import build_ext
-
-ext_modules = [
-    Extension(
-        name="core",
-        sources=["core.pyx"],
-        language="c++",
-        libraries=["core"],
-    )
-]
-
-setup(
-    name="core",
-    cmdclass={"build_ext": build_ext},
-    ext_modules=cythonize(ext_modules),
-    include_dirs=[
-        numpy.get_include(),
-    ],
-)

From 1ca62a84d515f493b15715fc8c720e07642c5a0b Mon Sep 17 00:00:00 2001
From: nebocco <73807432+nebocco@users.noreply.github.com>
Date: Sun, 5 Jun 2022 06:51:14 +0900
Subject: [PATCH 10/13] .gitignore open_jtalk_dic in example (#148)

* .gitignore open_jtalk_dic in example

* modify: example/python/.gitignore
---
 example/python/.gitignore | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/example/python/.gitignore b/example/python/.gitignore
index a81c8ee12..a0f63e6cc 100644
--- a/example/python/.gitignore
+++ b/example/python/.gitignore
@@ -136,3 +136,6 @@ dmypy.json
 
 # Cython debug symbols
 cython_debug/
+
+# OpenJTalk-dictionary's dir
+open_jtalk_dic_utf_8-*

From c47d05576e04360735c29de3e4fc73424440c48c Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Thu, 9 Jun 2022 18:28:26 +0900
Subject: [PATCH 11/13] =?UTF-8?q?C++=E3=82=B5=E3=83=B3=E3=83=97=E3=83=AB?=
 =?UTF-8?q?=E3=82=B3=E3=83=BC=E3=83=89=E3=81=B8=E3=81=AE=E3=83=AA=E3=83=B3?=
 =?UTF-8?q?=E3=82=AF=E3=82=92=E8=BF=BD=E5=8A=A0=20(#142)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* C++サンプルコードへのリンクを追加

* #readme

* Update README.md
---
 README.md | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/README.md b/README.md
index 91cc83ca1..80b01c2f3 100644
--- a/README.md
+++ b/README.md
@@ -100,6 +100,14 @@ sudo apt install libgomp1
 ```
 
 ## サンプル実行
+
+### C++ サンプルコード
+
+* [Linux・macOS サンプルコード](./example/cpp/unix#readme)
+* [Windows サンプルコード](./example/cpp/windows#readme)
+
+### Python サンプルコード
+
 まずOpen JTalk辞書フォルダを配置します。
 http://open-jtalk.sourceforge.net/ を開き、Dictionary for Open JTalk 欄の Binary Package (UTF-8)をクリックして「open_jtalk_dic_utf_8-1.11.tar.gz」をダウンロードします。  
 これを展開してできた「open_jtalk_dic_utf_8-1.11」フォルダをexample/pythonに配置します。

From 73a621fdaed4d923a37c6577d1b8c3a24c5b0ebd Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Mon, 4 Jul 2022 16:49:32 +0900
Subject: [PATCH 12/13] =?UTF-8?q?=E3=82=B3=E3=83=BC=E3=83=89=E7=BD=B2?=
 =?UTF-8?q?=E5=90=8D=E3=81=A7=E3=81=8D=E3=82=8B=E3=82=88=E3=81=86=E3=81=AB?=
 =?UTF-8?q?=E3=81=99=E3=82=8B=20(#164)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* コード署名

* build_util

* artifact/

* a

* remove
---
 .github/workflows/build.yml | 13 ++++++++++
 build_util/codesign.bash    | 49 +++++++++++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)
 create mode 100644 build_util/codesign.bash

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index eedf68621..e3bb8f02a 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -13,6 +13,9 @@ on:
       version:
         description: "バージョン情報(A.BB.C / A.BB.C-preview.D)"
         required: true
+      code_signing:
+        description: "コード署名する"
+        type: boolean
 
 env:
   # releaseタグ名か、workflow_dispatchでのバージョン名か、DEBUGが入る
@@ -24,6 +27,7 @@ env:
 
 jobs:
   build-cpp-shared:
+    environment: ${{ github.event.inputs.code_signing && 'code_signing' }} # コード署名用のenvironment
     strategy:
       fail-fast: false
       matrix:
@@ -281,6 +285,15 @@ jobs:
 
           cp README.md "artifact/${{ env.ASSET_NAME }}/README.txt"
 
+      - name: Code signing (Windows)
+        if: startsWith(matrix.os, 'windows') && github.event.inputs.code_signing
+        shell: bash
+        run: |
+          bash build_util/codesign.bash "artifact/${{ env.ASSET_NAME }}/core.dll"
+        env:
+          CERT_BASE64: ${{ secrets.CERT_BASE64 }}
+          CERT_PASSWORD: ${{ secrets.CERT_PASSWORD }}
+
       # Upload
       - name: Upload artifact
         uses: actions/upload-artifact@v2
diff --git a/build_util/codesign.bash b/build_util/codesign.bash
new file mode 100644
index 000000000..72ea0f5b5
--- /dev/null
+++ b/build_util/codesign.bash
@@ -0,0 +1,49 @@
+# !!! コードサイニング証明書を取り扱うので取り扱い注意 !!!
+
+set -eu
+
+if [ -v "${CERT_BASE64}" ]; then
+    echo "CERT_BASE64が未定義です"
+    exit 1
+fi
+if [ -v "${CERT_PASSWORD}" ]; then
+    echo "CERT_PASSWORDが未定義です"
+    exit 1
+fi
+
+if [ $# -ne 1 ]; then
+    echo "引数の数が一致しません"
+    exit 1
+fi
+target_file_glob="$1"
+
+# 証明書
+CERT_PATH=cert.pfx
+echo -n "$CERT_BASE64" | base64 -d - > $CERT_PATH
+
+# 指定ファイルに署名する
+function codesign() {
+    TARGET="$1"
+    SIGNTOOL=$(find "C:/Program Files (x86)/Windows Kits/10/App Certification Kit" -name "signtool.exe" | sort -V | tail -n 1)
+    powershell "& '$SIGNTOOL' sign /fd SHA256 /td SHA256 /tr http://timestamp.digicert.com /f $CERT_PATH /p $CERT_PASSWORD '$TARGET'"
+}
+
+# 指定ファイルが署名されているか
+function is_signed() {
+    TARGET="$1"
+    SIGNTOOL=$(find "C:/Program Files (x86)/Windows Kits/10/App Certification Kit" -name "signtool.exe" | sort -V | tail -n 1)
+    powershell "& '$SIGNTOOL' verify /pa '$TARGET'" || return 1
+}
+
+# 署名されていなければ署名
+ls $target_file_glob | while read target_file; do
+    if is_signed "$target_file"; then
+        echo "署名済み: $target_file"
+    else
+        echo "署名: $target_file"
+        codesign "$target_file"
+    fi
+done
+
+# 証明書を消去
+rm $CERT_PATH

From 50082b69e535851a358647477bfc7b8512b43880 Mon Sep 17 00:00:00 2001
From: Hiroshiba <hihokaruta@gmail.com>
Date: Sun, 10 Jul 2022 20:19:42 +0900
Subject: [PATCH 13/13] =?UTF-8?q?input=E3=81=AEboolean=E3=81=AF=E6=96=87?=
 =?UTF-8?q?=E5=AD=97=E5=88=97=E3=81=A8=E3=81=97=E3=81=A6=E6=B8=A1=E3=81=A3?=
 =?UTF-8?q?=E3=81=A6=E3=81=8F=E3=82=8B=E3=81=AE=E3=81=A7=E5=88=A4=E5=AE=9A?=
 =?UTF-8?q?=E3=82=92=E4=BF=AE=E6=AD=A3=20(#166)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* inputのbooleanは文字列として渡ってくるので判定を修正

* 修正もれ
---
 .github/workflows/build.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index e3bb8f02a..57b48427f 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -27,7 +27,7 @@ env:
 
 jobs:
   build-cpp-shared:
-    environment: ${{ github.event.inputs.code_signing && 'code_signing' }} # コード署名用のenvironment
+    environment: ${{ github.event.inputs.code_signing == 'true' && 'code_signing' }} # コード署名用のenvironment(false時の挙動は2022年7月10日時点で未定義動作)
     strategy:
       fail-fast: false
       matrix:
@@ -286,7 +286,7 @@ jobs:
           cp README.md "artifact/${{ env.ASSET_NAME }}/README.txt"
 
       - name: Code signing (Windows)
-        if: startsWith(matrix.os, 'windows') && github.event.inputs.code_signing
+        if: startsWith(matrix.os, 'windows') && github.event.inputs.code_signing == 'true'
         shell: bash
         run: |
           bash build_util/codesign.bash "artifact/${{ env.ASSET_NAME }}/core.dll"