Skip to content

Commit

Permalink
[chore]: reenable default revive rules (open-telemetry#36983)
Browse files Browse the repository at this point in the history
#### Description

By default revive enables a list of rules. 
When a list of rules are defined thay are the only one enabled.
This reenable this default list

Signed-off-by: Matthieu MOREL <[email protected]>
  • Loading branch information
mmorel-35 authored Jan 6, 2025
1 parent 60ae036 commit f05adc6
Show file tree
Hide file tree
Showing 10 changed files with 127 additions and 87 deletions.
43 changes: 43 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,50 @@ linters-settings:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
rules:
# Blank import should be only in a main or test package, or have a comment justifying it.
- name: blank-imports
# context.Context() should be the first parameter of a function when provided as argument.
- name: context-as-argument
# Basic types should not be used as a key in `context.WithValue`
- name: context-keys-type
# Importing with `.` makes the programs much harder to understand
- name: dot-imports
# Empty blocks make code less readable and could be a symptom of a bug or unfinished refactoring.
- name: empty-block
# for better readability, variables of type `error` must be named with the prefix `err`.
- name: error-naming
# for better readability, the errors should be last in the list of returned values by a function.
- name: error-return
# for better readability, error messages should not be capitalized or end with punctuation or a newline.
- name: error-strings
# report when replacing `errors.New(fmt.Sprintf())` with `fmt.Errorf()` is possible
- name: errorf
# incrementing an integer variable by 1 is recommended to be done using the `++` operator
- name: increment-decrement
# highlights redundant else-blocks that can be eliminated from the code
- name: indent-error-flow
# This rule suggests a shorter way of writing ranges that do not use the second value.
- name: range
# receiver names in a method should reflect the struct name (p for Person, for example)
- name: receiver-naming
# redefining built in names (true, false, append, make) can lead to bugs very difficult to detect.
- name: redefines-builtin-id
# redundant else-blocks that can be eliminated from the code.
- name: superfluous-else
# prevent confusing name for variables when using `time` package
- name: time-naming
# warns when an exported function or method returns a value of an un-exported type.
- name: unexported-return
# spots and proposes to remove unreachable code. also helps to spot errors
- name: unreachable-code
# Functions or methods with unused parameters can be a symptom of an unfinished refactoring or a bug.
- name: unused-parameter
# Since Go 1.18, interface{} has an alias: any. This rule proposes to replace instances of interface{} with any.
- name: use-any
# report when a variable declaration can be simplified
- name: var-declaration
# warns when initialism, variable or package naming conventions are not followed.
- name: var-naming

goimports:
# put imports beginning with prefix after 3rd-party packages;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ func ToTDigest(dp pmetric.ExponentialHistogramDataPoint) (counts []int64, values
func safeUint64ToInt64(v uint64) int64 {
if v > math.MaxInt64 {
return math.MaxInt64
} else {
return int64(v) // nolint:goset // overflow checked
}
return int64(v) // nolint:goset // overflow checked
}
3 changes: 1 addition & 2 deletions exporter/elasticsearchexporter/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -1078,7 +1078,6 @@ func mergeGeolocation(attributes pcommon.Map) {
func safeUint64ToInt64(v uint64) int64 {
if v > math.MaxInt64 {
return math.MaxInt64
} else {
return int64(v) // nolint:goset // overflow checked
}
return int64(v) // nolint:goset // overflow checked
}
30 changes: 15 additions & 15 deletions extension/cgroupruntimeextension/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ func checkCgroupSystem(tb testing.TB) {
}
}

// cgroupMaxCpu returns the CPU max definition for a given cgroup slice path
// cgroupMaxCPU returns the CPU max definition for a given cgroup slice path
// File format: cpu_quote cpu_period
func cgroupMaxCpu(filename string) (quota int64, period uint64, err error) {
func cgroupMaxCPU(filename string) (quota int64, period uint64, err error) {
out, err := os.ReadFile(filepath.Join(defaultCgroup2Path, filename, "cpu.max"))
if err != nil {
return 0, 0, err
Expand All @@ -75,17 +75,17 @@ func TestCgroupV2SudoIntegration(t *testing.T) {
tests := []struct {
name string
// nil CPU quota == "max" cgroup string value
cgroupCpuQuota *int64
cgroupCpuPeriod uint64
cgroupCPUQuota *int64
cgroupCPUPeriod uint64
cgroupMaxMemory int64
config *Config
expectedGoMaxProcs int
expectedGoMemLimit int64
}{
{
name: "90% the max cgroup memory and 12 GOMAXPROCS",
cgroupCpuQuota: pointerInt64(100000),
cgroupCpuPeriod: 8000,
cgroupCPUQuota: pointerInt64(100000),
cgroupCPUPeriod: 8000,
// 128 Mb
cgroupMaxMemory: 134217728,
config: &Config{
Expand All @@ -104,8 +104,8 @@ func TestCgroupV2SudoIntegration(t *testing.T) {
},
{
name: "50% of the max cgroup memory and 1 GOMAXPROCS",
cgroupCpuQuota: pointerInt64(100000),
cgroupCpuPeriod: 100000,
cgroupCPUQuota: pointerInt64(100000),
cgroupCPUPeriod: 100000,
// 128 Mb
cgroupMaxMemory: 134217728,
config: &Config{
Expand All @@ -124,8 +124,8 @@ func TestCgroupV2SudoIntegration(t *testing.T) {
},
{
name: "10% of the max cgroup memory, max cpu, default GOMAXPROCS",
cgroupCpuQuota: nil,
cgroupCpuPeriod: 100000,
cgroupCPUQuota: nil,
cgroupCPUPeriod: 100000,
// 128 Mb
cgroupMaxMemory: 134217728,
config: &Config{
Expand Down Expand Up @@ -173,19 +173,19 @@ func TestCgroupV2SudoIntegration(t *testing.T) {
}
}

initialCpuQuota, initialCpuPeriod, err := cgroupMaxCpu(cgroupPath)
initialCPUQuota, initialCPUPeriod, err := cgroupMaxCPU(cgroupPath)
require.NoError(t, err)
cpuCgroupCleanUp := func() {
fmt.Println(initialCpuQuota)
fmt.Println(initialCPUQuota)
err = manager.Update(&cgroup2.Resources{
CPU: &cgroup2.CPU{
Max: cgroup2.NewCPUMax(pointerInt64(initialCpuQuota), pointerUint64(initialCpuPeriod)),
Max: cgroup2.NewCPUMax(pointerInt64(initialCPUQuota), pointerUint64(initialCPUPeriod)),
},
})
assert.NoError(t, err)
}

if initialCpuQuota == math.MaxInt64 {
if initialCPUQuota == math.MaxInt64 {
// fallback solution to set cgroup's max cpu to "max"
cpuCgroupCleanUp = func() {
err = os.WriteFile(path.Join(defaultCgroup2Path, cgroupPath, "cpu.max"), []byte("max"), 0o600)
Expand Down Expand Up @@ -215,7 +215,7 @@ func TestCgroupV2SudoIntegration(t *testing.T) {
Max: pointerInt64(test.cgroupMaxMemory),
},
CPU: &cgroup2.CPU{
Max: cgroup2.NewCPUMax(test.cgroupCpuQuota, pointerUint64(test.cgroupCpuPeriod)),
Max: cgroup2.NewCPUMax(test.cgroupCPUQuota, pointerUint64(test.cgroupCPUPeriod)),
},
})
require.NoError(t, err)
Expand Down
6 changes: 3 additions & 3 deletions pkg/ottl/context_inferrer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ import (
)

var defaultDummyPriorityContextInferrerCandidate = &priorityContextInferrerCandidate{
hasFunctionName: func(name string) bool {
hasFunctionName: func(_ string) bool {
return true
},
hasEnumSymbol: func(enum *EnumSymbol) bool {
hasEnumSymbol: func(_ *EnumSymbol) bool {
return true
},
getLowerContexts: func(context string) []string {
getLowerContexts: func(_ string) []string {
return nil
},
}
Expand Down
30 changes: 15 additions & 15 deletions receiver/libhoneyreceiver/encoder/encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@ import (

const (
PbContentType = "application/x-protobuf"
JsonContentType = "application/json"
JSONContentType = "application/json"
MsgpackContentType = "application/x-msgpack"
)

var (
JsEncoder = &JsonEncoder{}
JsonPbMarshaler = &jsonpb.Marshaler{}
JsEncoder = &JSONEncoder{}
JSONPbMarshaler = &jsonpb.Marshaler{}
MpEncoder = &msgpackEncoder{}
)

Expand All @@ -39,46 +39,46 @@ type Encoder interface {
ContentType() string
}

type JsonEncoder struct{}
type JSONEncoder struct{}

func (JsonEncoder) UnmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
func (JSONEncoder) UnmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
req := ptraceotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}

func (JsonEncoder) UnmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
func (JSONEncoder) UnmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}

func (JsonEncoder) UnmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
func (JSONEncoder) UnmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
req := plogotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}

func (JsonEncoder) MarshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
func (JSONEncoder) MarshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}

func (JsonEncoder) MarshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
func (JSONEncoder) MarshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}

func (JsonEncoder) MarshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
func (JSONEncoder) MarshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}

func (JsonEncoder) MarshalStatus(resp *spb.Status) ([]byte, error) {
func (JSONEncoder) MarshalStatus(resp *spb.Status) ([]byte, error) {
buf := new(bytes.Buffer)
err := JsonPbMarshaler.Marshal(buf, resp)
err := JSONPbMarshaler.Marshal(buf, resp)
return buf.Bytes(), err
}

func (JsonEncoder) ContentType() string {
return JsonContentType
func (JSONEncoder) ContentType() string {
return JSONContentType
}

// messagepack responses seem to work in JSON so leaving this alone for now.
Expand Down Expand Up @@ -116,7 +116,7 @@ func (msgpackEncoder) MarshalLogsResponse(resp plogotlp.ExportResponse) ([]byte,

func (msgpackEncoder) MarshalStatus(resp *spb.Status) ([]byte, error) {
buf := new(bytes.Buffer)
err := JsonPbMarshaler.Marshal(buf, resp)
err := JSONPbMarshaler.Marshal(buf, resp)
return buf.Bytes(), err
}

Expand Down
26 changes: 13 additions & 13 deletions receiver/libhoneyreceiver/internal/libhoneyevent/libhoneyevent.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ func traceIDFrom(s string) trc.TraceID {
return tid
}

func generateAnId(length int) []byte {
func generateAnID(length int) []byte {
token := make([]byte, length)
_, err := rand.Read(token)
if err != nil {
Expand Down Expand Up @@ -276,23 +276,23 @@ func (l *LibhoneyEvent) GetParentID(fieldName string) (trc.SpanID, error) {

// ToPTraceSpan converts a LibhoneyEvent to a Pdata Span
func (l *LibhoneyEvent) ToPTraceSpan(newSpan *ptrace.Span, alreadyUsedFields *[]string, cfg FieldMapConfig, logger zap.Logger) error {
time_ns := l.MsgPackTimestamp.UnixNano()
logger.Debug("processing trace with", zap.Int64("timestamp", time_ns))
timeNs := l.MsgPackTimestamp.UnixNano()
logger.Debug("processing trace with", zap.Int64("timestamp", timeNs))

var parent_id trc.SpanID
var parentID trc.SpanID
if pid, ok := l.Data[cfg.Attributes.ParentID]; ok {
parent_id = spanIDFrom(pid.(string))
newSpan.SetParentSpanID(pcommon.SpanID(parent_id))
parentID = spanIDFrom(pid.(string))
newSpan.SetParentSpanID(pcommon.SpanID(parentID))
}

duration_ms := 0.0
durationMs := 0.0
for _, df := range cfg.Attributes.DurationFields {
if duration, okay := l.Data[df]; okay {
duration_ms = duration.(float64)
durationMs = duration.(float64)
break
}
}
end_timestamp := time_ns + (int64(duration_ms) * 1000000)
endTimestamp := timeNs + (int64(durationMs) * 1000000)

if tid, ok := l.Data[cfg.Attributes.TraceID]; ok {
tid := strings.ReplaceAll(tid.(string), "-", "")
Expand All @@ -306,7 +306,7 @@ func (l *LibhoneyEvent) ToPTraceSpan(newSpan *ptrace.Span, alreadyUsedFields *[]
newSpan.SetTraceID(pcommon.TraceID(traceIDFrom(tid)))
}
} else {
newSpan.SetTraceID(pcommon.TraceID(generateAnId(32)))
newSpan.SetTraceID(pcommon.TraceID(generateAnID(32)))
}

if sid, ok := l.Data[cfg.Attributes.SpanID]; ok {
Expand All @@ -323,11 +323,11 @@ func (l *LibhoneyEvent) ToPTraceSpan(newSpan *ptrace.Span, alreadyUsedFields *[]
newSpan.SetSpanID(pcommon.SpanID(spanIDFrom(sid)))
}
} else {
newSpan.SetSpanID(pcommon.SpanID(generateAnId(16)))
newSpan.SetSpanID(pcommon.SpanID(generateAnID(16)))
}

newSpan.SetStartTimestamp(pcommon.Timestamp(time_ns))
newSpan.SetEndTimestamp(pcommon.Timestamp(end_timestamp))
newSpan.SetStartTimestamp(pcommon.Timestamp(timeNs))
newSpan.SetEndTimestamp(pcommon.Timestamp(endTimestamp))

if spanName, ok := l.Data[cfg.Attributes.Name]; ok {
newSpan.SetName(spanName.(string))
Expand Down
12 changes: 6 additions & 6 deletions receiver/libhoneyreceiver/internal/parser/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func ToPdata(dataset string, lhes []libhoneyevent.LibhoneyEvent, cfg libhoneyeve
alreadyUsedFields = append(alreadyUsedFields, cfg.Attributes.DurationFields...)

for _, lhe := range lhes {
parent_id, err := lhe.GetParentID(cfg.Attributes.ParentID)
parentID, err := lhe.GetParentID(cfg.Attributes.ParentID)
if err != nil {
logger.Warn("parent id not found")
}
Expand All @@ -83,23 +83,23 @@ func ToPdata(dataset string, lhes []libhoneyevent.LibhoneyEvent, cfg libhoneyeve
logger.Warn("log could not be converted from libhoney to plog", zap.String("span.object", lhe.DebugString()))
}
case "span_event":
spanEvents[parent_id] = append(spanEvents[parent_id], lhe)
spanEvents[parentID] = append(spanEvents[parentID], lhe)
case "span_link":
spanLinks[parent_id] = append(spanLinks[parent_id], lhe)
spanLinks[parentID] = append(spanLinks[parentID], lhe)
}
}

start := time.Now()
for _, ss := range foundScopes.Scope {
for i := 0; i < ss.ScopeSpans.Len(); i++ {
sp := ss.ScopeSpans.At(i)
spId := trc.SpanID(sp.SpanID())
spID := trc.SpanID(sp.SpanID())

if speArr, ok := spanEvents[spId]; ok {
if speArr, ok := spanEvents[spID]; ok {
addSpanEventsToSpan(sp, speArr, alreadyUsedFields, &logger)
}

if splArr, ok := spanLinks[spId]; ok {
if splArr, ok := spanLinks[spID]; ok {
addSpanLinksToSpan(sp, splArr, alreadyUsedFields, &logger)
}
}
Expand Down
6 changes: 3 additions & 3 deletions receiver/libhoneyreceiver/receiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ func (r *libhoneyReceiver) handleEvent(resp http.ResponseWriter, req *http.Reque
r.settings.Logger.Debug("Decoding with msgpack worked", zap.Time("timestamp.first.msgpacktimestamp", *libhoneyevents[0].MsgPackTimestamp), zap.String("timestamp.first.time", libhoneyevents[0].Time))
r.settings.Logger.Debug("event zero", zap.String("event.data", libhoneyevents[0].DebugString()))
}
case encoder.JsonContentType:
case encoder.JSONContentType:
err = json.Unmarshal(body, &libhoneyevents)
if err != nil {
errorutil.HTTPError(resp, err)
Expand Down Expand Up @@ -263,7 +263,7 @@ func readContentType(resp http.ResponseWriter, req *http.Request) (encoder.Encod
}

switch getMimeTypeFromContentType(req.Header.Get("Content-Type")) {
case encoder.JsonContentType:
case encoder.JSONContentType:
return encoder.JsEncoder, true
case "application/x-msgpack", "application/msgpack":
return encoder.MpEncoder, true
Expand Down Expand Up @@ -294,5 +294,5 @@ func handleUnmatchedMethod(resp http.ResponseWriter) {

func handleUnmatchedContentType(resp http.ResponseWriter) {
status := http.StatusUnsupportedMediaType
writeResponse(resp, "text/plain", status, []byte(fmt.Sprintf("%v unsupported media type, supported: [%s, %s]", status, encoder.JsonContentType, encoder.PbContentType)))
writeResponse(resp, "text/plain", status, []byte(fmt.Sprintf("%v unsupported media type, supported: [%s, %s]", status, encoder.JSONContentType, encoder.PbContentType)))
}
Loading

0 comments on commit f05adc6

Please sign in to comment.