From 9c6c8652db788bb3132a8b7a72937f8d4c83ee34 Mon Sep 17 00:00:00 2001 From: bskrlj Date: Thu, 31 Aug 2023 11:54:09 +0200 Subject: [PATCH 1/2] 0w --- src/block_ffm.rs | 4 +++- src/block_helpers.rs | 2 ++ src/block_misc.rs | 2 ++ src/block_neural.rs | 9 ++++----- src/block_relu.rs | 2 ++ src/cache.rs | 2 ++ src/cmdline.rs | 2 ++ src/feature_buffer.rs | 2 ++ src/feature_transform_executor.rs | 2 ++ src/feature_transform_implementations.rs | 6 ++++-- src/feature_transform_parser.rs | 5 +++-- src/graph.rs | 2 ++ src/main.rs | 5 ++--- src/model_instance.rs | 2 ++ src/optimizer.rs | 1 + src/parser.rs | 2 ++ src/persistence.rs | 2 ++ src/radix_tree.rs | 2 ++ src/regressor.rs | 2 ++ src/serving.rs | 2 ++ src/vwmap.rs | 2 ++ 21 files changed, 47 insertions(+), 13 deletions(-) diff --git a/src/block_ffm.rs b/src/block_ffm.rs index 9930a020..3f2b46df 100644 --- a/src/block_ffm.rs +++ b/src/block_ffm.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports, unused_mut, invalid_value)] + use core::arch::x86_64::*; use rustc_hash::FxHashSet; use std::any::Any; @@ -283,7 +285,7 @@ impl BlockTrait for BlockFFM { fb.ffm_buffer.len() * (self.ffm_k * self.ffm_num_fields) as usize; if local_data_ffm_len < FFM_STACK_BUF_LEN { // Fast-path - using on-stack data structures - let mut local_data_ffm_values: [f32; FFM_STACK_BUF_LEN as usize] = + let local_data_ffm_values: [f32; FFM_STACK_BUF_LEN as usize] = MaybeUninit::uninit().assume_init(); core_macro!(local_data_ffm_values); } else { diff --git a/src/block_helpers.rs b/src/block_helpers.rs index c575ec8f..ac0cb720 100644 --- a/src/block_helpers.rs +++ b/src/block_helpers.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::optimizer::OptimizerTrait; use std::error::Error; use std::io; diff --git a/src/block_misc.rs b/src/block_misc.rs index fe7910a2..9ae2cd23 100644 --- a/src/block_misc.rs +++ b/src/block_misc.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use std::any::Any; use std::error::Error; diff --git a/src/block_neural.rs b/src/block_neural.rs index fd7e4a2d..58f3e817 100644 --- a/src/block_neural.rs +++ b/src/block_neural.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use rand_distr::{Distribution, Normal, Uniform}; use rand_xoshiro::rand_core::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; @@ -253,11 +255,8 @@ impl BlockTrait for BlockNeuronLayer { unsafe { if update && self.neuron_type == NeuronType::WeightedSum { // first we need to initialize inputs to zero - // TODO - what to think about this buffer - let mut output_errors: [f32; MAX_NUM_INPUTS] = MaybeUninit::uninit().assume_init(); - output_errors - .get_unchecked_mut(0..self.num_inputs) - .fill(0.0); + + let mut output_errors: [f32; MAX_NUM_INPUTS] = [0.0; MAX_NUM_INPUTS]; let (input_tape, output_tape) = block_helpers::get_input_output_borrows( &mut pb.tape, diff --git a/src/block_relu.rs b/src/block_relu.rs index 79777756..43dc7723 100644 --- a/src/block_relu.rs +++ b/src/block_relu.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use std::any::Any; use std::error::Error; diff --git a/src/cache.rs b/src/cache.rs index 1724c2e3..a2a442a9 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use std::error::Error; use std::fs; diff --git a/src/cmdline.rs b/src/cmdline.rs index 2279c91b..a6c1fbb3 100644 --- a/src/cmdline.rs +++ b/src/cmdline.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::version; use clap::{App, AppSettings, Arg}; diff --git a/src/feature_buffer.rs b/src/feature_buffer.rs index 6f5af778..0707d013 100644 --- a/src/feature_buffer.rs +++ b/src/feature_buffer.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::feature_transform_executor; use crate::model_instance; use crate::parser; diff --git a/src/feature_transform_executor.rs b/src/feature_transform_executor.rs index 239022de..7daac95e 100644 --- a/src/feature_transform_executor.rs +++ b/src/feature_transform_executor.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::parser; use crate::vwmap; use std::error::Error; diff --git a/src/feature_transform_implementations.rs b/src/feature_transform_implementations.rs index 93407359..36c7c479 100644 --- a/src/feature_transform_implementations.rs +++ b/src/feature_transform_implementations.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use std::error::Error; use std::io::Error as IOError; use std::io::ErrorKind; @@ -216,14 +218,14 @@ impl FunctionExecutorTrait for TransformerLogRatioBinner { feature_reader_float_namespace!( record_buffer, self.from_namespace1.namespace_descriptor, - hash_index1, + _hash_index1, hash_value1, float_value1, { feature_reader_float_namespace!( record_buffer, self.from_namespace2.namespace_descriptor, - hash_index2, + _hash_index2, hash_value2, float_value2, { diff --git a/src/feature_transform_parser.rs b/src/feature_transform_parser.rs index 3a10d1b8..d024cf5d 100644 --- a/src/feature_transform_parser.rs +++ b/src/feature_transform_parser.rs @@ -1,5 +1,6 @@ //#[macro_use] //extern crate nom; +#![allow(dead_code,unused_imports)] use crate::vwmap; use serde::{Deserialize, Serialize}; @@ -68,7 +69,7 @@ impl NamespaceTransformsParser { } let ( _, - (to_namespace_verbose, function_name, from_namespaces_verbose, function_parameters), + (to_namespace_verbose, _function_name, from_namespaces_verbose, _function_parameters), ) = rr.unwrap(); // Here we just check for clashes with namespaces from input file @@ -242,7 +243,7 @@ impl NamespaceTransforms { } pub fn get_namespace_descriptor( - transform_namespaces: &NamespaceTransforms, + _transform_namespaces: &NamespaceTransforms, vw: &vwmap::VwNamespaceMap, namespace_char: char, ) -> Result> { diff --git a/src/graph.rs b/src/graph.rs index c3e51296..b0fcda9d 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::block_misc; use crate::model_instance; use crate::port_buffer; diff --git a/src/main.rs b/src/main.rs index 753b0629..4440285f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,8 @@ -#![allow(dead_code)] -#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(non_snake_case)] #![allow(redundant_semicolons)] +#![allow(dead_code,unused_imports)] use crate::hogwild::HogwildTrainer; use crate::multithread_helpers::BoxedRegressorTrait; @@ -107,7 +106,7 @@ fn build_cache_without_training(cl: clap::ArgMatches) -> Result<(), Box break, // EOF Ok(buffer) => buffer, Err(_e) => return Err(_e), diff --git a/src/model_instance.rs b/src/model_instance.rs index 2c822996..18ca1d6f 100644 --- a/src/model_instance.rs +++ b/src/model_instance.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use std::error::Error; use std::io::Error as IOError; use std::io::ErrorKind; diff --git a/src/optimizer.rs b/src/optimizer.rs index d1494c7f..8cb1d01b 100644 --- a/src/optimizer.rs +++ b/src/optimizer.rs @@ -1,3 +1,4 @@ +#![allow(dead_code,unused_imports)] use std::marker::PhantomData; pub trait OptimizerTrait: std::clone::Clone { diff --git a/src/parser.rs b/src/parser.rs index f8548d56..722733b3 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::radix_tree::{NamespaceDescriptorWithHash, RadixTree}; use crate::vwmap; use fasthash::murmur3; diff --git a/src/persistence.rs b/src/persistence.rs index ba6be8d7..41a11309 100644 --- a/src/persistence.rs +++ b/src/persistence.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use std::error::Error; use std::str; diff --git a/src/radix_tree.rs b/src/radix_tree.rs index c5ef9c0c..c50a09fb 100644 --- a/src/radix_tree.rs +++ b/src/radix_tree.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use crate::vwmap::NamespaceDescriptor; #[derive(Clone, Copy, Debug, PartialEq)] diff --git a/src/regressor.rs b/src/regressor.rs index c9d3c4a6..e073f5c1 100644 --- a/src/regressor.rs +++ b/src/regressor.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use rustc_hash::FxHashSet; use std::any::Any; diff --git a/src/serving.rs b/src/serving.rs index 6fe45ea6..1393c67d 100644 --- a/src/serving.rs +++ b/src/serving.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use daemonize::Daemonize; use std::error::Error; use std::io; diff --git a/src/vwmap.rs b/src/vwmap.rs index ed792fea..34fd0a42 100644 --- a/src/vwmap.rs +++ b/src/vwmap.rs @@ -1,3 +1,5 @@ +#![allow(dead_code,unused_imports)] + use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::error::Error; From 8235a50e1fae6f1032061413dc82a42aff96dac5 Mon Sep 17 00:00:00 2001 From: bskrlj Date: Thu, 31 Aug 2023 12:55:10 +0200 Subject: [PATCH 2/2] comment --- src/optimizer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/optimizer.rs b/src/optimizer.rs index 8cb1d01b..bf962af4 100644 --- a/src/optimizer.rs +++ b/src/optimizer.rs @@ -124,9 +124,9 @@ impl OptimizerTrait for OptimizerAdagradLUT { let minus_power_t = -power_t; for x in 0..FASTMATH_LR_LUT_SIZE { // accumulated gradients are always positive floating points, sign is guaranteed to be zero - // floating point: 1 bit of sign, 7 bits of signed expontent then floating point bits (mantissa) + // floating point: 1 bit of sign, 7 bits of signed exponent then floating point bits (mantissa) // we will take 7 bits of exponent + whatever most significant bits of mantissa remain - // we take two consequtive such values, so we act as if had rounding + // we take two consequtive such values, so we act as if it had rounding let float_x = (f32::from_bits((x as u32) << (31 - FASTMATH_LR_LUT_BITS))) + initial_acc_gradient; let float_x_plus_one =