Skip to content

Commit

Permalink
chore: refactor metrics and fetching slightly
Browse files Browse the repository at this point in the history
  • Loading branch information
calmh committed Jun 27, 2024
1 parent d3760f4 commit 92cc531
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 76 deletions.
47 changes: 18 additions & 29 deletions collector/checks.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
package collector

import (
"strconv"
"sync"
"cmp"

"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
Expand All @@ -26,13 +25,11 @@ func NewChecksCollector(s System, client *updown.Client, log logr.Logger) *Check
Client: client,
Log: log,
Enabled: prometheus.NewDesc(
prometheus.BuildFQName(s.Namespace, subsystem, "enabled"),
"status of check (enabled=1)",
prometheus.BuildFQName(s.Namespace, subsystem, "up"),
"status of check",
[]string{
"token",
"url",
"status",
"ssl_valid",
"alias",
},
nil,
),
Expand All @@ -49,30 +46,22 @@ func (c *ChecksCollector) Collect(ch chan<- prometheus.Metric) {
return
}

var wg sync.WaitGroup
for _, check := range checks {
wg.Add(1)
go func(check updown.Check) {
defer wg.Done()
ch <- prometheus.MustNewConstMetric(
c.Enabled,
prometheus.CounterValue,
func(enabled bool) (result float64) {
if enabled {
result = 1.0
}
return result
}(check.Enabled),
[]string{
check.Token,
check.URL,
strconv.FormatUint(uint64(check.LastStatus), 10),
strconv.FormatBool(check.SSL.Valid),
}...,
)
}(check)
ch <- prometheus.MustNewConstMetric(
c.Enabled,
prometheus.CounterValue,
boolFloat(!check.Down),
check.URL,
cmp.Or(check.Alias, check.URL),
)
}
wg.Wait()
}

func boolFloat(enabled bool) float64 {
if enabled {
return 1.0
}
return 0.0
}

// Describe implements Prometheus' Collector interface is used to describe metrics
Expand Down
65 changes: 28 additions & 37 deletions collector/metrics.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package collector

import (
"strconv"
"cmp"
"sync"

"github.com/go-logr/logr"
Expand All @@ -26,12 +26,11 @@ func NewMetricsCollector(s System, client *updown.Client, log logr.Logger) *Metr
Client: client,
Log: log,
ResponseTime: prometheus.NewDesc(
prometheus.BuildFQName(s.Namespace, subsystem, "response_times"),
"check metrics response times (ms)",
prometheus.BuildFQName(s.Namespace, subsystem, "response_time_seconds"),
"check metrics response times (seconds)",
[]string{
"token",
"url",
"status",
"alias",
},
nil,
),
Expand All @@ -51,41 +50,33 @@ func (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {

var wg sync.WaitGroup
for _, check := range checks {
wg.Add(1)
go func(check updown.Check) {
defer wg.Done()
log := log.WithValues("URL", check.URL)

log := log.WithValues("URL", check.URL)
if check.Token == "" {
log.Info("unable to obtain token for Check")
return
}

if check.Token == "" {
log.Info("unable to obtain token for Check")
return
}
metrics, err := c.Client.GetCheckMetrics(check.Token)
if err != nil {
log.Error(err, "unable to read metrics for Check")
return
}

metrics, err := c.Client.GetCheckMetrics(check.Token)
if err != nil {
log.Info("unable to read metrics for Check")
return
}

respTime := metrics.Requests.ByResponseTime
ch <- prometheus.MustNewConstHistogram(
c.ResponseTime,
// updown doesn't provide values for above 4s (i.e. Infinity)
// website only permits maxium value of 2s so I assume 4s is intended to represent "all else"
// Assuming that Under4000 is effectively infinity and using it as the value for count
uint64(respTime.Under4000),
// updown doesn't provide a value for the sum of values
0.0,
// Convert the struct into a map of buckets
respTime.ToBuckets(),
[]string{
check.Token,
check.URL,
strconv.FormatUint(uint64(check.LastStatus), 10),
}...,
)
}(check)
respTime := metrics.Requests.ByResponseTime
ch <- prometheus.MustNewConstHistogram(
c.ResponseTime,
// updown doesn't provide values for above 4s (i.e. Infinity)
// website only permits maxium value of 2s so I assume 4s is intended to represent "all else"
// Assuming that Under4000 is effectively infinity and using it as the value for count
uint64(respTime.Under4000),
// updown doesn't provide a value for the sum of values
0.0,
// Convert the struct into a map of buckets
respTime.ToBuckets(),
check.URL,
cmp.Or(check.Alias, check.URL),
)
}
wg.Wait()
}
Expand Down
9 changes: 5 additions & 4 deletions updown/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package updown
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"

"github.com/go-logr/logr"
Expand Down Expand Up @@ -51,7 +51,7 @@ func (c *Client) GetChecks() ([]Check, error) {
return []Check{}, err
}

body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Info("Unable to read response body")
return []Check{}, err
Expand Down Expand Up @@ -101,7 +101,7 @@ func (c *Client) GetCheckMetrics(token string) (Metrics, error) {
return Metrics{}, err
}

body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Info("Unable to read response body")
return Metrics{}, err
Expand All @@ -113,7 +113,8 @@ func (c *Client) GetCheckMetrics(token string) (Metrics, error) {

metrics := Metrics{}
if err := json.Unmarshal(body, &metrics); err != nil {
return Metrics{}, err
fmt.Println(string(body))
return Metrics{}, fmt.Errorf("unmarshal: %w", err)
}

// log.Info("Result",
Expand Down
12 changes: 6 additions & 6 deletions updown/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ type ByResponseTime struct {
// ToBuckets is a method that converts ByResponseTIme structs into Prometheus buckets
func (x *ByResponseTime) ToBuckets() map[float64]uint64 {
return map[float64]uint64{
125.0: uint64(x.Under125),
250.0: uint64(x.Under250),
500.0: uint64(x.Under500),
1000.0: uint64(x.Under1000),
2000.0: uint64(x.Under2000),
4000.0: uint64(x.Under4000),
0.125: uint64(x.Under125),
0.250: uint64(x.Under250),
0.500: uint64(x.Under500),
1.000: uint64(x.Under1000),
2.000: uint64(x.Under2000),
4.000: uint64(x.Under4000),
}
}

Expand Down

0 comments on commit 92cc531

Please sign in to comment.