Skip to content

Commit

Permalink
Merge branch 'main' into 6-audio-generation-component
Browse files Browse the repository at this point in the history
  • Loading branch information
daavoo authored Nov 28, 2024
2 parents 9ee2c91 + 9ff8e23 commit efa5708
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 33 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/docs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: '3.10'

cache: "pip"
- name: Configure git
run: |
git config user.name 'github-actions[bot]'
Expand Down
20 changes: 9 additions & 11 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,21 @@ jobs:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- name: Check out the repository
uses: actions/checkout@v4

- name: Install uv
uses: astral-sh/setup-uv@v3
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'

- name: Set up venv
run: |
uv venv
source .venv/bin/activate
uv pip install pre-commit
- name: Install pre-commit
run: pip install pre-commit

- uses: actions/cache@v4
with:
path: ~/.cache/pre-commit/
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}

- name: pre-commit
run: |
source .venv/bin/activate
pre-commit run --all-files
run: pre-commit run --all-files
28 changes: 28 additions & 0 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Tests

on:
push:
branches: [main]
pull_request:
workflow_dispatch:

jobs:
run-linter:
timeout-minutes: 30
runs-on: ubuntu-latest

steps:
- name: Check out the repository
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
cache: "pip"

- name: Install
run: pip install -e '.[tests]'

- name: Run tests
run: pytest -v tests
19 changes: 13 additions & 6 deletions demo/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,20 @@
)

PODCAST_PROMPT = """
You are a helpful podcast writer.
You will take the input text and generate a conversation between 2 speakers.
Example of response:
You are a podcast scriptwriter generating engaging and natural-sounding conversations in JSON format. The script features two speakers:
Speaker 1: Laura, the main host. She explains topics clearly using anecdotes and analogies, teaching in an engaging and captivating way.
Speaker 2: Jon, the co-host. He keeps the conversation on track, asks curious follow-up questions, and reacts with excitement or confusion, often using interjections like “hmm” or “umm.”
Instructions:
- Write dynamic, easy-to-follow dialogue.
- Include natural interruptions and interjections.
- Avoid repetitive phrasing between speakers.
- Format output as a JSON conversation.
Example:
{
"Speaker 1": "Welcome to our podcast, where we explore the latest advancements in AI and technology. I'm your host, and today we're going to dive into the exciting world of TrustWorthy AI.",
"Speaker 2": "Hi, I'm excited to be here, so what is TrustWorthy AI?",
"Speaker 1":"Ah, great question! It is a term used by the European High Level Expert Group on AI. Mozilla defines trustworthy AI as AI that is demonstrably worthy of trust, tech that considers accountability, agency, and individual and collective well-being."
"Speaker 1": "Welcome to our podcast! Today, we’re exploring...",
"Speaker 2": "Hi Laura! I’m excited to hear about this. Can you explain...",
"Speaker 1": "Sure! Imagine it like this...",
"Speaker 2": "Oh, that’s cool! But how does..."
}
"""

Expand Down
17 changes: 2 additions & 15 deletions tests/integration/test_model_load_and_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ def test_model_load_and_inference_text_to_text():
"HuggingFaceTB/smollm-135M-instruct-v0.2-Q8_0-GGUF/smollm-135m-instruct-add-basics-q8_0.gguf"
)
result = text_to_text(
"What is the capital of France?",
"Answer to: What is the capital of France?",
model=model,
system_prompt="",
)
assert isinstance(result, str)
assert json.loads(result)["Capital"] == "Paris"
assert json.loads(result)


def test_model_load_and_inference_text_to_text_no_json():
Expand All @@ -37,19 +37,6 @@ def test_model_load_and_inference_text_to_text_no_json():
assert result.startswith("The capital of France is Paris")


def test_model_load_and_inference_text_to_text_stream():
model = load_llama_cpp_model(
"HuggingFaceTB/smollm-135M-instruct-v0.2-Q8_0-GGUF/smollm-135m-instruct-add-basics-q8_0.gguf"
)
result = text_to_text_stream(
"What is the capital of France?",
model=model,
system_prompt="",
)
assert isinstance(result, Iterator)
assert json.loads("".join(result))["Capital"] == "Paris"


def test_model_load_and_inference_text_to_text_stream_no_json():
model = load_llama_cpp_model(
"HuggingFaceTB/smollm-135M-instruct-v0.2-Q8_0-GGUF/smollm-135m-instruct-add-basics-q8_0.gguf"
Expand Down

0 comments on commit efa5708

Please sign in to comment.