diff --git a/xinference/deploy/docker/Dockerfile b/xinference/deploy/docker/Dockerfile index 1975adb5eb..7ede36a9f0 100644 --- a/xinference/deploy/docker/Dockerfile +++ b/xinference/deploy/docker/Dockerfile @@ -26,6 +26,7 @@ ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/usr/local/lib/python3.10/dist-packages/nvi ARG PIP_INDEX=https://pypi.org/simple RUN pip install --upgrade -i "$PIP_INDEX" pip && \ pip install -i "$PIP_INDEX" "diskcache>=5.6.1" "jinja2>=2.11.3" && \ + pip install -i "$PIP_INDEX" matcha-tts && \ # use pre-built whl package for llama-cpp-python, otherwise may core dump when init llama in some envs pip install "llama-cpp-python>=0.2.82" -i https://abetlen.github.io/llama-cpp-python/whl/cu124 && \ pip install -i "$PIP_INDEX" --upgrade-strategy only-if-needed -r /opt/inference/xinference/deploy/docker/requirements.txt && \ diff --git a/xinference/deploy/docker/cpu.Dockerfile b/xinference/deploy/docker/cpu.Dockerfile index d7bd45c463..a22eac50d4 100644 --- a/xinference/deploy/docker/cpu.Dockerfile +++ b/xinference/deploy/docker/cpu.Dockerfile @@ -19,6 +19,7 @@ ENV PATH $NVM_DIR/versions/node/v$NODE_VERSION/bin:$PATH ARG PIP_INDEX=https://pypi.org/simple RUN python -m pip install --upgrade -i "$PIP_INDEX" pip && \ + pip install -i "$PIP_INDEX" matcha-tts && \ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu && \ pip install -i "$PIP_INDEX" --upgrade-strategy only-if-needed -r /opt/inference/xinference/deploy/docker/requirements_cpu.txt && \ CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python && \ diff --git a/xinference/deploy/docker/requirements.txt b/xinference/deploy/docker/requirements.txt index 5408194367..7ee4776a71 100644 --- a/xinference/deploy/docker/requirements.txt +++ b/xinference/deploy/docker/requirements.txt @@ -52,7 +52,7 @@ torchaudio # For ChatTTS ChatTTS>0.1 xxhash # For ChatTTS HyperPyYAML # For CosyVoice -matcha-tts>=0.0.7 # For CosyVoice +# matcha-tts>=0.0.7 # For CosyVoice onnxruntime-gpu==1.16.0; sys_platform == 'linux' # For CosyVoice onnxruntime==1.16.0; sys_platform == 'darwin' or sys_platform == 'windows' # For CosyVoice openai-whisper # For CosyVoice diff --git a/xinference/deploy/docker/requirements_cpu.txt b/xinference/deploy/docker/requirements_cpu.txt index 00a33dae9c..48d6a74e8d 100644 --- a/xinference/deploy/docker/requirements_cpu.txt +++ b/xinference/deploy/docker/requirements_cpu.txt @@ -49,7 +49,7 @@ torchaudio # For ChatTTS ChatTTS>0.1 xxhash # For ChatTTS HyperPyYAML # For CosyVoice -matcha-tts>=0.0.7 # For CosyVoice +# matcha-tts>=0.0.7 # For CosyVoice onnxruntime-gpu==1.16.0; sys_platform == 'linux' # For CosyVoice onnxruntime==1.16.0; sys_platform == 'darwin' or sys_platform == 'windows' # For CosyVoice openai-whisper # For CosyVoice