Skip to content

Commit

Permalink
clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
MarcusDunn committed Feb 5, 2024
1 parent c631133 commit aec18f1
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 20 deletions.
4 changes: 2 additions & 2 deletions llama-cpp-2/benches/grammar_bias.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ fn criterion_benchmark(c: &mut Criterion) {
.unwrap();
let backend = LlamaBackend::init().unwrap();
let model_params = LlamaModelParams::default();
let model = LlamaModel::load_from_file(&backend, &file, &model_params).unwrap();
let model = LlamaModel::load_from_file(&backend, file, &model_params).unwrap();
let mut ctx = model
.new_context(&backend, &LlamaContextParams::default())
.new_context(&backend, LlamaContextParams::default())
.unwrap();
let grammar = LlamaGrammar::from_str(include_str!("../src/grammar/json.gbnf")).unwrap();

Expand Down
2 changes: 1 addition & 1 deletion llama-cpp-2/examples/simple.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//! This is an translation of simple.cpp in llama.cpp using llama-cpp-2.
#![allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation)]
#![allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation, clippy::cast_precision_loss, clippy::cast_sign_loss)]

use anyhow::{bail, Context, Result};
use clap::Parser;
Expand Down
34 changes: 17 additions & 17 deletions llama-cpp-2/src/context/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl LlamaContextParams {
/// let params = params.with_seed(1234);
/// assert_eq!(params.seed(), 1234);
/// ```
pub fn with_seed(mut self, seed: u32) -> Self {
#[must_use] pub fn with_seed(mut self, seed: u32) -> Self {
self.context_params.seed = seed;
self
}
Expand All @@ -99,7 +99,7 @@ impl LlamaContextParams {
/// .with_seed(1234);
/// assert_eq!(params.seed(), 1234);
/// ```
pub fn seed(&self) -> u32 {
#[must_use] pub fn seed(&self) -> u32 {
self.context_params.seed
}

Expand All @@ -114,8 +114,8 @@ impl LlamaContextParams {
/// let params = params.with_n_ctx(NonZeroU32::new(2048));
/// assert_eq!(params.n_ctx(), NonZeroU32::new(2048));
/// ```
pub fn with_n_ctx(mut self, n_ctx: Option<NonZeroU32>) -> Self {
self.context_params.n_ctx = n_ctx.map_or(0, |n_ctx| n_ctx.get());
#[must_use] pub fn with_n_ctx(mut self, n_ctx: Option<NonZeroU32>) -> Self {
self.context_params.n_ctx = n_ctx.map_or(0, std::num::NonZeroU32::get);
self
}

Expand All @@ -128,11 +128,11 @@ impl LlamaContextParams {
/// ```rust
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
/// assert_eq!(params.n_ctx(), std::num::NonZeroU32::new(512));
pub fn n_ctx(&self) -> Option<NonZeroU32> {
#[must_use] pub fn n_ctx(&self) -> Option<NonZeroU32> {
NonZeroU32::new(self.context_params.n_ctx)
}

/// Set the n_batch
/// Set the `n_batch`
///
/// # Examples
///
Expand All @@ -143,12 +143,12 @@ impl LlamaContextParams {
/// .with_n_batch(2048);
/// assert_eq!(params.n_batch(), 2048);
/// ```
pub fn with_n_batch(mut self, n_batch: u32) -> Self {
#[must_use] pub fn with_n_batch(mut self, n_batch: u32) -> Self {
self.context_params.n_batch = n_batch;
self
}

/// Get the n_batch
/// Get the `n_batch`
///
/// # Examples
///
Expand All @@ -157,7 +157,7 @@ impl LlamaContextParams {
/// let params = LlamaContextParams::default();
/// assert_eq!(params.n_batch(), 512);
/// ```
pub fn n_batch(&self) -> u32 {
#[must_use] pub fn n_batch(&self) -> u32 {
self.context_params.n_batch
}

Expand All @@ -171,7 +171,7 @@ impl LlamaContextParams {
/// .with_rope_scaling_type(RopeScalingType::Linear);
/// assert_eq!(params.rope_scaling_type(), RopeScalingType::Linear);
/// ```
pub fn with_rope_scaling_type(mut self, rope_scaling_type: RopeScalingType) -> Self {
#[must_use] pub fn with_rope_scaling_type(mut self, rope_scaling_type: RopeScalingType) -> Self {
self.context_params.rope_scaling_type = i32::from(rope_scaling_type);
self
}
Expand All @@ -184,7 +184,7 @@ impl LlamaContextParams {
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
/// assert_eq!(params.rope_scaling_type(), llama_cpp_2::context::params::RopeScalingType::Unspecified);
/// ```
pub fn rope_scaling_type(&self) -> RopeScalingType {
#[must_use] pub fn rope_scaling_type(&self) -> RopeScalingType {
RopeScalingType::from(self.context_params.rope_scaling_type)
}

Expand All @@ -198,7 +198,7 @@ impl LlamaContextParams {
/// .with_rope_freq_base(0.5);
/// assert_eq!(params.rope_freq_base(), 0.5);
/// ```
pub fn with_rope_freq_base(mut self, rope_freq_base: f32) -> Self {
#[must_use] pub fn with_rope_freq_base(mut self, rope_freq_base: f32) -> Self {
self.context_params.rope_freq_base = rope_freq_base;
self
}
Expand All @@ -211,7 +211,7 @@ impl LlamaContextParams {
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
/// assert_eq!(params.rope_freq_base(), 0.0);
/// ```
pub fn rope_freq_base(&self) -> f32 {
#[must_use] pub fn rope_freq_base(&self) -> f32 {
self.context_params.rope_freq_base
}

Expand All @@ -225,7 +225,7 @@ impl LlamaContextParams {
/// .with_rope_freq_scale(0.5);
/// assert_eq!(params.rope_freq_scale(), 0.5);
/// ```
pub fn with_rope_freq_scale(mut self, rope_freq_scale: f32) -> Self {
#[must_use] pub fn with_rope_freq_scale(mut self, rope_freq_scale: f32) -> Self {
self.context_params.rope_freq_scale = rope_freq_scale;
self
}
Expand All @@ -238,7 +238,7 @@ impl LlamaContextParams {
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
/// assert_eq!(params.rope_freq_scale(), 0.0);
/// ```
pub fn rope_freq_scale(&self) -> f32 {
#[must_use] pub fn rope_freq_scale(&self) -> f32 {
self.context_params.rope_freq_scale
}

Expand All @@ -250,7 +250,7 @@ impl LlamaContextParams {
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
/// assert_eq!(params.n_threads(), 4);
/// ```
pub fn n_threads(&self) -> u32 {
#[must_use] pub fn n_threads(&self) -> u32 {
self.context_params.n_threads
}

Expand All @@ -264,7 +264,7 @@ impl LlamaContextParams {
/// .with_n_threads(8);
/// assert_eq!(params.n_threads(), 8);
/// ```
pub fn with_n_threads(mut self, n_threads: u32) -> Self {
#[must_use] pub fn with_n_threads(mut self, n_threads: u32) -> Self {
self.context_params.n_threads = n_threads;
self
}
Expand Down
4 changes: 4 additions & 0 deletions llama-cpp-2/src/llama_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ impl LlamaBatch {
///
/// - [`self.llama_batch.n_tokens`] does not fit into a usize
/// - [`seq_ids.len()`] does not fit into a [`llama_seq_id`]
///
/// # Errors
///
/// returns a error if there is insufficient space in the buffer
pub fn add(
&mut self,
LlamaToken(id): LlamaToken,
Expand Down

0 comments on commit aec18f1

Please sign in to comment.