diff --git a/Cargo.toml b/Cargo.toml index 0fa96bd..2083d1c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ members = [ [package] name = "fast_log" -version = "1.6.2" +version = "1.6.3" description = "Rust async log High-performance asynchronous logging" readme = "Readme.md" authors = ["ce "] diff --git a/benches/log_mmap.rs b/benches/log_mmap.rs index 8048aa9..bb44df2 100644 --- a/benches/log_mmap.rs +++ b/benches/log_mmap.rs @@ -6,7 +6,7 @@ use fast_log::Config; use fast_log::consts::LogSize; use fast_log::plugin::file_mmap::MmapFile; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::LogPacker; use test::{black_box, Bencher}; @@ -20,7 +20,7 @@ fn bench_log_mmap(b: &mut Bencher) { .split::( "target/logs/temp.log", LogSize::MB(100), - RollingType::All, + KeepType::All, LogPacker {}, ), ) diff --git a/example/src/bench_test_file_split.rs b/example/src/bench_test_file_split.rs index 33a7df5..0215a32 100644 --- a/example/src/bench_test_file_split.rs +++ b/example/src/bench_test_file_split.rs @@ -1,7 +1,7 @@ use fast_log::bencher::TPS; use fast_log::config::Config; use fast_log::consts::LogSize; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::LogPacker; use std::time::Instant; @@ -14,7 +14,7 @@ fn main() { .file_split( "target/logs/", LogSize::MB(1), - RollingType::All, + KeepType::All, LogPacker {}, ) .chan_len(Some(100000)), diff --git a/example/src/split_log.rs b/example/src/split_log.rs index 12aca3d..620467a 100644 --- a/example/src/split_log.rs +++ b/example/src/split_log.rs @@ -1,6 +1,6 @@ use fast_log::config::Config; use fast_log::consts::LogSize; -use fast_log::plugin::keep::{KeepAll, KeepNum}; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::LogPacker; fn main() { @@ -8,7 +8,7 @@ fn main() { fast_log::init(Config::new().chan_len(Some(100000)).console().file_split( "target/logs/", LogSize::MB(1), - KeepNum(2), + KeepType::KeepNum(2), LogPacker {}, )) .unwrap(); diff --git a/example/src/split_log_date.rs b/example/src/split_log_date.rs index 1f38df1..6e663c1 100644 --- a/example/src/split_log_date.rs +++ b/example/src/split_log_date.rs @@ -1,8 +1,7 @@ use fast_log::config::Config; use fast_log::consts::LogSize; use fast_log::error::LogError; -use fast_log::plugin::file_split::Packer; -use fast_log::plugin::keep::KeepNum; +use fast_log::plugin::file_split::{Packer, KeepType}; use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; use std::path::Path; @@ -11,9 +10,9 @@ use std::time::Duration; ///pack by an date #[derive(Clone)] -pub struct LogDatePacker {} +pub struct DateLogPacker {} -impl LogDatePacker { +impl DateLogPacker { pub fn log_name_create_by_time( &self, first_file_path: &str, @@ -33,7 +32,7 @@ impl LogDatePacker { return new_log_name; } } -impl Packer for LogDatePacker { +impl Packer for DateLogPacker { fn pack_name(&self) -> &'static str { "log" } @@ -76,8 +75,8 @@ fn main() { fast_log::init(Config::new().chan_len(Some(100000)).console().file_split( "target/logs/", LogSize::MB(1), - KeepNum(2), - LogDatePacker {}, + KeepType::KeepNum(2), + DateLogPacker {}, )) .unwrap(); for _ in 0..40000 { diff --git a/example/src/split_log_gz.rs b/example/src/split_log_gz.rs index b6f2e4e..ec12c91 100644 --- a/example/src/split_log_gz.rs +++ b/example/src/split_log_gz.rs @@ -1,13 +1,13 @@ use fast_log::config::Config; use fast_log::consts::LogSize; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::GZipPacker; fn main() { fast_log::init(Config::new().chan_len(Some(100000)).console().file_split( "target/logs/", LogSize::KB(50), - RollingType::KeepNum(5), + KeepType::KeepNum(5), GZipPacker {}, )) .unwrap(); diff --git a/example/src/split_log_lz4.rs b/example/src/split_log_lz4.rs index f66a042..e268fae 100644 --- a/example/src/split_log_lz4.rs +++ b/example/src/split_log_lz4.rs @@ -1,13 +1,13 @@ use fast_log::config::Config; use fast_log::consts::LogSize; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::LZ4Packer; fn main() { fast_log::init(Config::new().chan_len(Some(100000)).console().file_split( "target/logs/", LogSize::KB(50), - RollingType::KeepNum(5), + KeepType::KeepNum(5), LZ4Packer {}, )) .unwrap(); diff --git a/example/src/split_log_mmap.rs b/example/src/split_log_mmap.rs index 17a2b47..367a39e 100644 --- a/example/src/split_log_mmap.rs +++ b/example/src/split_log_mmap.rs @@ -1,7 +1,7 @@ use fast_log::config::Config; use fast_log::consts::LogSize; use fast_log::plugin::file_mmap::MmapFile; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::LogPacker; fn main() { @@ -12,7 +12,7 @@ fn main() { .split::( "target/logs/temp.log", LogSize::MB(1), - RollingType::All, + KeepType::All, LogPacker {}, ), ) diff --git a/example/src/split_log_zip.rs b/example/src/split_log_zip.rs index ba7bad4..3c5caeb 100644 --- a/example/src/split_log_zip.rs +++ b/example/src/split_log_zip.rs @@ -1,5 +1,5 @@ use fast_log::consts::LogSize; -use fast_log::plugin::file_split::RollingType; +use fast_log::plugin::file_split::KeepType; use fast_log::plugin::packer::ZipPacker; use fast_log::config::Config; @@ -8,7 +8,7 @@ fn main() { fast_log::init(Config::new().chan_len(Some(100000)).console().file_split( "target/logs/", LogSize::KB(50), - RollingType::KeepNum(5), + KeepType::KeepNum(5), ZipPacker {}, )) .unwrap(); diff --git a/src/config.rs b/src/config.rs index 6bd46d6..cdf99a3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -122,11 +122,11 @@ impl Config { self, file_path: &str, temp_size: LogSize, - rolling_type: R, + keeper: R, packer: P, ) -> Self { self.appends.push(Mutex::new(Box::new( - FileSplitAppender::::new(file_path, temp_size, rolling_type, packer).unwrap(), + FileSplitAppender::::new(file_path, temp_size, keeper, packer).unwrap(), ))); self } diff --git a/src/fast_log.rs b/src/fast_log.rs index 265e441..1f6c93a 100644 --- a/src/fast_log.rs +++ b/src/fast_log.rs @@ -63,7 +63,7 @@ impl Log for Logger { if let Some(filter) = LOGGER.cfg.get() { if let Some(send) = LOGGER.send.get() { if !filter.filter.filter(record) { - send.send(FastLogRecord { + let _= send.send(FastLogRecord { command: Command::CommandRecord, level: record.level(), target: record.metadata().target().to_string(), @@ -93,13 +93,13 @@ pub fn init(config: Config) -> Result<&'static Logger, LogError> { return Err(LogError::from("[fast_log] appends can not be empty!")); } let (s, r) = chan(config.chan_len); - LOGGER.send.set(s).map_err(|e| LogError::from("set fail"))?; - LOGGER.recv.set(r).map_err(|e| LogError::from("set fail"))?; + LOGGER.send.set(s).map_err(|_| LogError::from("set fail"))?; + LOGGER.recv.set(r).map_err(|_| LogError::from("set fail"))?; LOGGER.set_level(config.level); LOGGER .cfg .set(config) - .map_err(|e| LogError::from("set fail"))?; + .map_err(|_| LogError::from("set fail="))?; //main recv data log::set_logger(LOGGER.deref()) .map(|()| log::set_max_level(LOGGER.cfg.get().unwrap().level)) @@ -192,7 +192,7 @@ pub fn init(config: Config) -> Result<&'static Logger, LogError> { } let data = Arc::new(remain); for x in senders.iter() { - x.send(data.clone()); + let _= x.send(data.clone()); } if exit { break; diff --git a/src/filter.rs b/src/filter.rs index 744d2fa..1e11832 100644 --- a/src/filter.rs +++ b/src/filter.rs @@ -7,7 +7,7 @@ pub trait Filter: Send + Sync { pub struct NoFilter {} impl Filter for NoFilter { - fn filter(&self, module: &log::Record) -> bool { + fn filter(&self, _module: &log::Record) -> bool { return false; } } diff --git a/src/lib.rs b/src/lib.rs index e8970de..7a5baaf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,3 @@ -#![allow(unused_variables)] -#![allow(unused_assignments)] -#![allow(unused_must_use)] - extern crate core; pub mod appender; diff --git a/src/plugin/file.rs b/src/plugin/file.rs index fa58c3a..02fc58a 100644 --- a/src/plugin/file.rs +++ b/src/plugin/file.rs @@ -14,7 +14,7 @@ impl FileAppender { let log_file_path = log_file_path.replace("\\", "/"); if let Some(right) = log_file_path.rfind("/") { let path = &log_file_path[0..right]; - std::fs::create_dir_all(path); + let _= std::fs::create_dir_all(path); } Ok(Self { file: RefCell::new( @@ -34,10 +34,10 @@ impl LogAppender for FileAppender { for x in records { buf.push_str(&x.formated); } - log_file.write_all(buf.as_bytes()); + let _= log_file.write_all(buf.as_bytes()); } fn flush(&self) { - self.file.borrow_mut().flush(); + let _= self.file.borrow_mut().flush(); } } diff --git a/src/plugin/file_loop.rs b/src/plugin/file_loop.rs index b141334..52019e8 100644 --- a/src/plugin/file_loop.rs +++ b/src/plugin/file_loop.rs @@ -1,8 +1,7 @@ use crate::appender::{FastLogRecord, LogAppender}; use crate::consts::LogSize; use crate::error::LogError; -use crate::plugin::file_split::{FileSplitAppender, SplitFile}; -use crate::plugin::keep::KeepNum; +use crate::plugin::file_split::{FileSplitAppender, KeepType, SplitFile}; use crate::plugin::packer::LogPacker; /// Single logs are stored in rolling mode by capacity @@ -16,7 +15,7 @@ impl FileLoopAppender { file: FileSplitAppender::::new( log_file_path, size, - KeepNum(1), + KeepType::KeepNum(1), LogPacker {}, )?, }) diff --git a/src/plugin/file_mmap.rs b/src/plugin/file_mmap.rs index 7d8b9da..1d327f5 100644 --- a/src/plugin/file_mmap.rs +++ b/src/plugin/file_mmap.rs @@ -21,14 +21,14 @@ impl MmapFile { let log_file_path = log_file_path.replace("\\", "/"); if let Some(right) = log_file_path.rfind("/") { let path = &log_file_path[0..right]; - std::fs::create_dir_all(path); + let _= std::fs::create_dir_all(path); } let file = OpenOptions::new() .write(true) .read(true) .create(true) .open(&log_file_path)?; - file.set_len(size.get_len() as u64); + file.set_len(size.get_len() as u64)?; let mmap = unsafe { MmapOptions::new().map(&file).map_err(|e| { println!("e={}", e); @@ -132,8 +132,8 @@ impl SplitFile for MmapFile { fn truncate(&self) -> std::io::Result<()> { let file = unsafe { &mut *self.file.get() }; file.set_len(0)?; - file.flush(); - file.set_len(self.size.get_len() as u64); + file.flush()?; + file.set_len(self.size.get_len() as u64)?; let mmap = unsafe { MmapOptions::new() .map(&file) @@ -151,7 +151,7 @@ impl SplitFile for MmapFile { } fn flush(&self) { - self.bytes.borrow_mut().flush(); + let _= self.bytes.borrow_mut().flush(); } fn len(&self) -> usize { diff --git a/src/plugin/file_split.rs b/src/plugin/file_split.rs index c6c9a6c..5aaeda4 100644 --- a/src/plugin/file_split.rs +++ b/src/plugin/file_split.rs @@ -38,7 +38,7 @@ impl From for RawFile { } impl SplitFile for RawFile { - fn new(path: &str, temp_size: LogSize) -> Result + fn new(path: &str, _temp_size: LogSize) -> Result where Self: Sized, { @@ -61,14 +61,14 @@ impl SplitFile for RawFile { } fn truncate(&self) -> std::io::Result<()> { - self.inner.borrow_mut().set_len(0); - self.inner.borrow_mut().flush(); + self.inner.borrow_mut().set_len(0)?; + self.inner.borrow_mut().flush()?; self.inner.borrow_mut().seek(SeekFrom::Start(0))?; Ok(()) } fn flush(&self) { - self.inner.borrow_mut().flush(); + let _= self.inner.borrow_mut().flush(); } fn len(&self) -> usize { @@ -176,7 +176,7 @@ impl FileSplitAppender { pub fn new( file_path: &str, temp_size: LogSize, - rolling: R, + rolling_type: R, packer: P, ) -> Result, LogError> { let temp_name = { @@ -201,7 +201,7 @@ impl FileSplitAppender { dir_path = v.to_str().unwrap_or_default().to_string(); } } - std::fs::create_dir_all(&dir_path); + let _= std::fs::create_dir_all(&dir_path); let mut sp = ""; if !dir_path.is_empty() { sp = "/"; @@ -214,10 +214,10 @@ impl FileSplitAppender { offset += 1; } temp_bytes.store(offset, Ordering::Relaxed); - file.seek(SeekFrom::Start(temp_bytes.load(Ordering::Relaxed) as u64)); + let _= file.seek(SeekFrom::Start(temp_bytes.load(Ordering::Relaxed) as u64)); let (sender, receiver) = chan(None); let arc_packer = Arc::new(packer); - spawn_saver(temp_name.clone(), receiver, rolling, arc_packer.clone()); + spawn_saver(temp_name.clone(), receiver, rolling_type, arc_packer.clone()); Ok(Self { temp_bytes, dir_path: dir_path.to_string(), @@ -237,8 +237,8 @@ impl FileSplitAppender { let first_file_path = format!("{}{}{}", self.dir_path, sp, &self.temp_name); let new_log_name = self.packer.log_name_create(&first_file_path); self.file.flush(); - std::fs::copy(&first_file_path, &new_log_name); - self.sender.send(LogPack { + let _= std::fs::copy(&first_file_path, &new_log_name); + let _= self.sender.send(LogPack { dir: self.dir_path.clone(), new_log_name: new_log_name, wg: None, @@ -248,7 +248,7 @@ impl FileSplitAppender { pub fn truncate(&self) { //reset data - self.file.truncate(); + let _= self.file.truncate(); self.temp_bytes.store(0, Ordering::SeqCst); } } @@ -281,7 +281,7 @@ impl LogPack { let r = packer.do_pack(log_file.unwrap(), log_file_path); if r.is_err() && packer.retry() > 0 { let mut retry = 1; - while let Err(packs) = self.do_pack(packer) { + while let Err(_packs) = self.do_pack(packer) { retry += 1; if retry > packer.retry() { break; @@ -297,6 +297,7 @@ impl LogPack { /// keep logs, for example keep by log num or keep by log create time. /// that do not meet the retention conditions will be deleted +/// you can use KeepType or RollingType::All pub trait Keep: Send { /// return removed nums fn do_keep(&self, dir: &str, temp_name: &str) -> i64; @@ -330,9 +331,10 @@ pub trait Keep: Send { } ///rolling keep type -#[deprecated(note = "use RollingAll,RollingNum,RollingDuration replace this")] +pub type RollingType = KeepType; +///rolling keep type #[derive(Copy, Clone, Debug)] -pub enum RollingType { +pub enum KeepType { /// keep All of log packs All, /// keep by Time Duration, @@ -344,32 +346,30 @@ pub enum RollingType { KeepNum(i64), } -impl Keep for RollingType { +impl Keep for KeepType { fn do_keep(&self, dir: &str, temp_name: &str) -> i64 { let mut removed = 0; match self { - RollingType::KeepNum(n) => { + KeepType::KeepNum(n) => { let paths_vec = self.read_paths(dir, temp_name); for index in 0..paths_vec.len() { if index >= (*n) as usize { let item = &paths_vec[index]; - std::fs::remove_file(item.path()); + let _ = std::fs::remove_file(item.path()); removed += 1; } } } - RollingType::KeepTime(duration) => { + KeepType::KeepTime(duration) => { let paths_vec = self.read_paths(dir, temp_name); let now = DateTime::now(); for index in 0..paths_vec.len() { let item = &paths_vec[index]; - let file_name = item.file_name(); - let name = file_name.to_str().unwrap_or("").to_string(); if let Ok(m) = item.metadata() { if let Ok(c) = m.created() { let time = DateTime::from(c); if now.clone().sub(duration.clone()) > time { - std::fs::remove_file(item.path()); + let _ = std::fs::remove_file(item.path()); removed += 1; } } @@ -412,7 +412,7 @@ impl LogAppender for FileSplitAppender } Command::CommandExit => {} Command::CommandFlush(ref w) => { - self.sender.send(LogPack { + let _= self.sender.send(LogPack { dir: "".to_string(), new_log_name: "".to_string(), wg: Some(w.clone()), @@ -421,7 +421,7 @@ impl LogAppender for FileSplitAppender } } if !temp.is_empty() { - self.temp_bytes.fetch_add( + let _= self.temp_bytes.fetch_add( { let w = self.file.write(temp.as_bytes()); if let Ok(w) = w { @@ -442,7 +442,7 @@ impl LogAppender for FileSplitAppender fn spawn_saver( temp_name: String, r: Receiver, - rolling: R, + rolling_type: R, packer: Arc

, ) { std::thread::spawn(move || { @@ -453,11 +453,11 @@ fn spawn_saver( let remove = pack.do_pack(packer.deref()); if let Ok(remove) = remove { if remove { - std::fs::remove_file(log_file_path); + let _ = std::fs::remove_file(log_file_path); } } //do rolling - rolling.do_keep(&pack.dir, &temp_name); + rolling_type.do_keep(&pack.dir, &temp_name); } else { break; } diff --git a/src/plugin/keep.rs b/src/plugin/keep.rs deleted file mode 100644 index 9b1629f..0000000 --- a/src/plugin/keep.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::plugin::file_split::Keep; -use fastdate::DateTime; -use std::time::Duration; - -/// keeps all,do not rolling -pub struct KeepAll {} -impl Keep for KeepAll { - fn do_keep(&self, dir: &str, temp_name: &str) -> i64 { - 0 - } -} - -/// rolling from file num -pub struct KeepNum(pub i64); - -impl Keep for KeepNum { - fn do_keep(&self, dir: &str, temp_name: &str) -> i64 { - let mut removed = 0; - let paths_vec = self.read_paths(dir, temp_name); - for index in 0..paths_vec.len() { - if index >= (self.0) as usize { - let item = &paths_vec[index]; - let path = item.path(); - std::fs::remove_file(path); - removed += 1; - } - } - removed - } -} - -/// rolling from metadata -pub struct KeepDuration(pub Duration); - -impl Keep for KeepDuration { - fn do_keep(&self, dir: &str, temp_name: &str) -> i64 { - let mut removed = 0; - let paths_vec = self.read_paths(dir, temp_name); - let now = DateTime::now(); - for index in 0..paths_vec.len() { - let item = &paths_vec[index]; - let file_name = item.file_name(); - let name = file_name.to_str().unwrap_or("").to_string(); - if let Ok(m) = item.metadata() { - if let Ok(c) = m.created() { - let time = DateTime::from(c); - if now.clone().sub(self.0.clone()) > time { - std::fs::remove_file(item.path()); - removed += 1; - } - } - } - } - removed - } -} diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs index 932f5a8..cdb2f71 100644 --- a/src/plugin/mod.rs +++ b/src/plugin/mod.rs @@ -5,5 +5,3 @@ pub mod file_loop; pub mod file_mmap; pub mod file_split; pub mod packer; - -pub mod keep; diff --git a/src/plugin/packer.rs b/src/plugin/packer.rs index 052a100..0228dcf 100644 --- a/src/plugin/packer.rs +++ b/src/plugin/packer.rs @@ -1,7 +1,6 @@ use crate::error::LogError; use crate::plugin::file_split::Packer; use std::fs::File; -use std::io::Write; /// keep temp{date}.log #[derive(Clone)] @@ -11,7 +10,7 @@ impl Packer for LogPacker { "log" } - fn do_pack(&self, log_file: File, log_file_path: &str) -> Result { + fn do_pack(&self, _log_file: File, _log_file_path: &str) -> Result { //do nothing,and not remove file return Ok(false); } @@ -51,10 +50,10 @@ impl Packer for ZipPacker { let zip_file = zip_file.unwrap(); //write zip bytes data let mut zip = zip::ZipWriter::new(zip_file); - zip.start_file(log_name, FileOptions::default()); + zip.start_file(log_name, FileOptions::default()).map_err(|e|LogError::from(e.to_string()))?; //buf reader - std::io::copy(&mut log_file, &mut zip); - zip.flush(); + std::io::copy(&mut log_file, &mut zip).map_err(|e|LogError::from(e.to_string()))?; + zip.flush().map_err(|e|LogError::from(e.to_string()))?; let finish: ZipResult = zip.finish(); if finish.is_err() { //println!("[fast_log] try zip fail{:?}", finish.err()); @@ -82,12 +81,8 @@ impl Packer for LZ4Packer { } fn do_pack(&self, mut log_file: File, log_file_path: &str) -> Result { - let mut log_name = log_file_path.replace("\\", "/").to_string(); - if let Some(v) = log_file_path.rfind("/") { - log_name = log_name[(v + 1)..log_name.len()].to_string(); - } let lz4_path = log_file_path.replace(".log", ".lz4"); - let lz4_file = std::fs::File::create(&lz4_path); + let lz4_file = File::create(&lz4_path); if lz4_file.is_err() { return Err(LogError::from(format!( "[fast_log] create(&{}) fail:{}", @@ -99,7 +94,7 @@ impl Packer for LZ4Packer { //write lz4 bytes data let mut encoder = FrameEncoder::new(lz4_file); //buf reader - std::io::copy(&mut log_file, &mut encoder); + std::io::copy(&mut log_file, &mut encoder).map_err(|e|LogError::from(e.to_string()))?; let result = encoder.finish(); if result.is_err() { return Err(LogError::from(format!( @@ -127,12 +122,8 @@ impl Packer for GZipPacker { fn do_pack(&self, mut log_file: File, log_file_path: &str) -> Result { use std::io::Write; - let mut log_name = log_file_path.replace("\\", "/").to_string(); - if let Some(v) = log_file_path.rfind("/") { - log_name = log_name[(v + 1)..log_name.len()].to_string(); - } let zip_path = log_file_path.replace(".log", ".gz"); - let zip_file = std::fs::File::create(&zip_path); + let zip_file = File::create(&zip_path); if zip_file.is_err() { return Err(LogError::from(format!( "[fast_log] create(&{}) fail:{}", @@ -143,8 +134,8 @@ impl Packer for GZipPacker { let zip_file = zip_file.unwrap(); //write zip bytes data let mut zip = GzEncoder::new(zip_file, Compression::default()); - std::io::copy(&mut log_file, &mut zip); - zip.flush(); + std::io::copy(&mut log_file, &mut zip).map_err(|e|LogError::from(e.to_string()))?; + zip.flush().map_err(|e|LogError::from(e.to_string()))?; let finish = zip.finish(); if finish.is_err() { return Err(LogError::from(format!( diff --git a/src/runtime.rs b/src/runtime.rs index 9365552..1079697 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -28,7 +28,7 @@ where } #[cfg(feature = "runtime_thread")] -pub fn spawn_stack_size(f: F, stack_size: usize) -> JoinHandle<()> +pub fn spawn_stack_size(f: F, _stack_size: usize) -> JoinHandle<()> where F: FnOnce() + Send + 'static, { diff --git a/tests/split_test.rs b/tests/split_test.rs index 8e2a1ee..5162fa4 100644 --- a/tests/split_test.rs +++ b/tests/split_test.rs @@ -2,8 +2,7 @@ mod test { use fast_log::appender::{Command, FastLogRecord, LogAppender}; use fast_log::consts::LogSize; - use fast_log::plugin::file_split::{FileSplitAppender, Keep, Packer, RawFile}; - use fast_log::plugin::keep::{KeepAll, KeepNum}; + use fast_log::plugin::file_split::{FileSplitAppender, Keep, Packer, RawFile, RollingType}; use fast_log::plugin::packer::LogPacker; use log::Level; use std::fs::remove_dir_all; @@ -16,7 +15,7 @@ mod test { let appender = FileSplitAppender::::new( "target/test/", LogSize::MB(1), - KeepAll {}, + RollingType::All, LogPacker {}, ) .unwrap(); @@ -33,7 +32,7 @@ mod test { }]); appender.send_pack(); sleep(Duration::from_secs(1)); - let rolling_num = KeepNum(0).do_keep("target/test/", "temp.log"); + let rolling_num = RollingType::KeepNum(0).do_keep("target/test/", "temp.log"); assert_eq!(rolling_num, 1); let _ = remove_dir_all("target/test/"); }