Skip to content

[app] Render each ToolInvocation in separate bubble #945

[app] Render each ToolInvocation in separate bubble

[app] Render each ToolInvocation in separate bubble #945

Workflow file for this run

name: Rust Build and Test
on:
push:
branches:
- v1.0
pull_request:
branches:
- v1.0
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Install Libs
run: |
sudo apt install libdbus-1-dev pkg-config
- name: Install UV
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Run download_tokenizer_files.py
run: uv run download_tokenizer_files.py
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Cache Cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-registry-
- name: Cache Cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/index
key: ${{ runner.os }}-cargo-index
restore-keys: |
${{ runner.os }}-cargo-index
- name: Cache Cargo build
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-build-
- name: Install Ollama
run: curl -fsSL https://ollama.com/install.sh | sh
- name: Start Ollama
run: |
# Run the background, in a way that survives to the next step
nohup ollama serve > ollama.log 2>&1 &
# Block using the ready endpoint
time curl --retry 5 --retry-connrefused --retry-delay 1 -sf http://localhost:11434
- name: Test Ollama model
run: ollama run qwen2.5 hello || cat ollama.log
- name: Build the Rust project
run: cargo build
- name: Run Tests
run: cargo test --verbose
env:
OLLAMA_MODEL: "qwen2.5"
- name: check lint
run: |
cargo clippy