From 92bef3a7d2dbd49af9e22cf44a30b33bb0af9202 Mon Sep 17 00:00:00 2001 From: J / Jacob Babich Date: Thu, 10 Oct 2024 18:20:51 -0400 Subject: [PATCH] fix: implicitly use `c_char` type instead of hardcoding `i8` --- llama-cpp-2/src/model.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama-cpp-2/src/model.rs b/llama-cpp-2/src/model.rs index 49b3fb5..54c82bd 100644 --- a/llama-cpp-2/src/model.rs +++ b/llama-cpp-2/src/model.rs @@ -523,7 +523,7 @@ impl LlamaModel { let message_length = chat.iter().fold(0, |acc, c| { acc + c.role.to_bytes().len() + c.content.to_bytes().len() }); - let mut buff: Vec = vec![0_i8; message_length * 4]; + let mut buff = vec![0; message_length * 4]; // Build our llama_cpp_sys_2 chat messages let chat: Vec = chat @@ -548,7 +548,7 @@ impl LlamaModel { chat.as_ptr(), chat.len(), add_ass, - buff.as_mut_ptr().cast::(), + buff.as_mut_ptr(), buff.len() as i32, ); // A buffer twice the size should be sufficient for all models, if this is not the case for a new model, we can increase it