Skip to content

Commit

Permalink
removed deprecated functions
Browse files Browse the repository at this point in the history
  • Loading branch information
MarcusDunn committed Jan 2, 2024
1 parent c314e7c commit 8971bf5
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 58 deletions.
25 changes: 0 additions & 25 deletions llama-cpp-2/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,36 +132,11 @@ impl<'model> LlamaContext<'model> {
unsafe { slice::from_raw_parts_mut(logits_ptr, n_vocab * n_tokens) }
}

/// get the logits
///
/// # Panics
///
/// - `n_vocab` does not fit into a usize
#[deprecated]
#[must_use]
pub fn logits(&self, n_tokens: usize) -> &[f32] {
let n_vocab = usize::try_from(self.model.n_vocab()).expect("n_vocab should be positive");
let logits_ptr = unsafe { llama_cpp_sys_2::llama_get_logits(self.context.as_ptr()) };

unsafe { slice::from_raw_parts(logits_ptr, n_vocab * n_tokens) }
}

/// Returns the timings for the context.
pub fn timings(&mut self) -> LlamaTimings {
let timings = unsafe { llama_cpp_sys_2::llama_get_timings(self.context.as_ptr()) };
LlamaTimings { timings }
}

/// Create a new `LlamaContext` from a model.
#[deprecated(note = "use `Model::new_context` instead")]
#[tracing::instrument(skip_all)]
pub fn new_with_model(
backend: &LlamaBackend,
model: &'model mut LlamaModel,
context_params: &LlamaContextParams,
) -> Result<Self, LlamaContextLoadError> {
model.new_context(backend, context_params)
}
}

impl Drop for LlamaContext<'_> {
Expand Down
33 changes: 0 additions & 33 deletions llama-cpp-2/src/llama_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,33 +22,6 @@ impl LlamaBatch {
self.initialized_logits.clear();
}

/// Set the last token in the batch to [value]. If [value] is true, the token will be initilized
/// after a decode and can be read from. If [value] is false, the token will not be initilized (this is the default).
///
/// # Panics
///
/// Panics if there are no tokens in the batch.
#[deprecated(
note = "not compatible with multiple sequences. prefer setting logits while adding tokens"
)]
pub fn set_last_logit(&mut self, value: bool) {
let last_index = self.llama_batch.n_tokens - 1;
let last_index_usize =
usize::try_from(last_index).expect("cannot fit n_tokens - 1 into a usize");

if value {
self.initialized_logits.push(last_index);
} else {
self.initialized_logits.retain(|&x| x != last_index);
}

let value = i8::from(value);
unsafe {
let last: *mut i8 = self.llama_batch.logits.add(last_index_usize);
*last = value;
}
}

/// add a token to the batch for sequences [`seq_ids`] at position [pos]. If [logits] is true, the
/// token will be initilized and can be read from after the next decode.
///
Expand Down Expand Up @@ -115,12 +88,6 @@ impl LlamaBatch {
}
}

/// add a prompt to the batch at sequence id 0
#[deprecated(note = "not compatible with multiple sequences. use `add_prompt_seq` instead")]
pub fn add_prompt(&mut self, prompt: &[LlamaToken]) {
self.add_prompt_seq(prompt, &[0]);
}

/// add a prompt to the batch at the given sequence ids. This must be the initial prompt as it
/// will be added to the batch starting at position 0.
pub fn add_prompt_seq(&mut self, prompt: &[LlamaToken], seq_ids: &[i32]) {
Expand Down

0 comments on commit 8971bf5

Please sign in to comment.