diff --git a/README.md b/README.md index 7f31461..03ca423 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ # llama_leap -- WIP - Nim library to work with the Ollama API ## Example @@ -9,6 +8,8 @@ - you may pass an alternate to `newOllamaAPI()` ```nim +import llama_leap + let ollama = newOllamaAPI() echo ollama.generate("llama2", "How are you today?") ``` @@ -16,6 +17,7 @@ echo ollama.generate("llama2", "How are you today?") ## Generate - Only the non-streaming generate API is currently supported + - streaming is coming soon (TM) ```nim # simple interface @@ -75,4 +77,10 @@ echo "Embedding Length: " & $resp.embedding.len - ensure ollama is running on the default port - `./ollama serve` -- run `nim c -r tests/test_llama_leap.nim` +- run `nimble test` + +## Related Repos + +- [llama_leap](https://github.com/monofuel/openai_leap) is a Nim client for the OpenAI API. +- [vertex_leap](https://github.com/monofuel/vertex_leap) is a client for Google's VertexAI API. +- [mono_llm](https://github.com/monofuel/mono_llm) is a higher-level Nim library that creates a unified interface for OpenAI, Ollama, and VertexAI. diff --git a/examples/config.nims b/examples/config.nims new file mode 100644 index 0000000..a119208 --- /dev/null +++ b/examples/config.nims @@ -0,0 +1 @@ +--path:"../src" diff --git a/examples/example.nim b/examples/example.nim new file mode 100644 index 0000000..f54f8b2 --- /dev/null +++ b/examples/example.nim @@ -0,0 +1,4 @@ +import llama_leap + +let ollama = newOllamaAPI() +echo ollama.generate("llama2", "How are you today?")