Skip to content

Commit

Permalink
file logger: reducing the frequency of disk writes (#841)
Browse files Browse the repository at this point in the history
* reducing the frequency of disk writes
* default config
  • Loading branch information
dmachard authored Oct 9, 2024
1 parent 4fad268 commit ff507aa
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 15 deletions.
2 changes: 1 addition & 1 deletion config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ global:
text-jinja: ""
worker:
interval-monitor: 10
buffer-size: 4096
buffer-size: 8192
telemetry:
enabled: false
web-path: "/metrics"
Expand Down
6 changes: 5 additions & 1 deletion docs/loggers/logger_file.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ Options:
* `max-files` (integer)
> maximum number of files to retain. Set to zero if you want to disable this feature
* `max-batch-size` (integer)
> accumulate data before writing it to the file
* `flush-interval` (integer)
> flush buffer to log file every X seconds
Expand Down Expand Up @@ -62,7 +65,8 @@ logfile:
file-path: null
max-size: 100
max-files: 10
flush-interval: 10
max-batch-size: 65536
flush-interval: 1
compress: false
compress-interval: 5
compress-postcommand: null
Expand Down
3 changes: 2 additions & 1 deletion pkgconfig/loggers.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ type ConfigLoggers struct {
FilePath string `yaml:"file-path" default:""`
MaxSize int `yaml:"max-size" default:"100"`
MaxFiles int `yaml:"max-files" default:"10"`
FlushInterval int `yaml:"flush-interval" default:"10"`
MaxBatchSize int `yaml:"max-batch-size" default:"65536"`
FlushInterval int `yaml:"flush-interval" default:"1"`
Compress bool `yaml:"compress" default:"false"`
CompressInterval int `yaml:"compress-interval" default:"60"`
CompressPostCommand string `yaml:"compress-postcommand" default:""`
Expand Down
47 changes: 35 additions & 12 deletions workers/logfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,7 @@ func (w *LogFile) OpenFile() error {

switch w.GetConfig().Loggers.LogFile.Mode {
case pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON:
bufferSize := 4096
w.writerPlain = bufio.NewWriterSize(fd, bufferSize)
w.writerPlain = bufio.NewWriterSize(fd, w.config.Loggers.LogFile.MaxBatchSize)

case pkgconfig.ModePCAP:
w.writerPcap = pcapgo.NewWriter(fd)
Expand Down Expand Up @@ -469,13 +468,23 @@ func (w *LogFile) StartLogging() {
var data []byte
var err error

// Max size of a batch before forcing a write
batch := new(bytes.Buffer)
maxBatchSize := w.config.Loggers.LogFile.MaxBatchSize
batchSize := 0 // Current batch size

for {
select {
case <-w.OnLoggerStopped():
// stop timer
flushTimer.Stop()
w.commpressTimer.Stop()

// Force write remaining batch data
if batchSize > 0 {
w.WriteToPlain(batch.Bytes())
}

// flush writer
w.FlushWriters()

Expand All @@ -494,18 +503,15 @@ func (w *LogFile) StartLogging() {
return
}

// write to file
// Process the message based on the configured mode
var message []byte
switch w.GetConfig().Loggers.LogFile.Mode {

// with basic text mode
case pkgconfig.ModeText:
w.WriteToPlain(dm.Bytes(w.textFormat,
w.GetConfig().Global.TextFormatDelimiter,
w.GetConfig().Global.TextFormatBoundary))

var delimiter bytes.Buffer
delimiter.WriteString("\n")
w.WriteToPlain(delimiter.Bytes())
message = dm.Bytes(w.textFormat, w.GetConfig().Global.TextFormatDelimiter, w.GetConfig().Global.TextFormatBoundary)
batch.Write(message)
batch.WriteString("\n")

// with custom text mode
case pkgconfig.ModeJinja:
Expand All @@ -514,7 +520,7 @@ func (w *LogFile) StartLogging() {
w.LogError("jinja template: %s", err)
continue
}
w.WriteToPlain([]byte(textLine))
batch.Write([]byte(textLine))

// with json mode
case pkgconfig.ModeFlatJSON:
Expand All @@ -530,7 +536,7 @@ func (w *LogFile) StartLogging() {
// with json mode
case pkgconfig.ModeJSON:
json.NewEncoder(buffer).Encode(dm)
w.WriteToPlain(buffer.Bytes())
batch.Write(buffer.Bytes())
buffer.Reset()

// with dnstap mode
Expand All @@ -554,7 +560,24 @@ func (w *LogFile) StartLogging() {
w.WriteToPcap(dm, pkt)
}

// Update the batch size
batchSize += batch.Len()

// If the batch exceeds the max size, force a write
if batchSize >= maxBatchSize {
w.WriteToPlain(batch.Bytes())
batch.Reset() // Reset batch after write
batchSize = 0
}

case <-flushTimer.C:
// Flush the current batch, then flush the writers
if batchSize > 0 {
w.WriteToPlain(batch.Bytes())
batch.Reset()
batchSize = 0
}

// flush writer
w.FlushWriters()

Expand Down

0 comments on commit ff507aa

Please sign in to comment.