)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
new file mode 100644
index 0000000..f0381fb
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, Iskander (Alex) Sharipov / quasilyte
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
new file mode 100644
index 0000000..54c500f
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
@@ -0,0 +1,3 @@
+package dslgen
+
+var Fluent = []byte("package fluent\n\n// Matcher is a main API group-level entry point.\n// It's used to define and configure the group rules.\n// It also represents a map of all rule-local variables.\ntype Matcher map[string]Var\n\n// Import loads given package path into a rule group imports table.\n//\n// That table is used during the rules compilation.\n//\n// The table has the following effect on the rules:\n//\t* For type expressions, it's used to resolve the\n//\t full package paths of qualified types, like `foo.Bar`.\n//\t If Import(`a/b/foo`) is called, `foo.Bar` will match\n//\t `a/b/foo.Bar` type during the pattern execution.\nfunc (m Matcher) Import(pkgPath string) {}\n\n// Match specifies a set of patterns that match a rule being defined.\n// Pattern matching succeeds if at least 1 pattern matches.\n//\n// If none of the given patterns matched, rule execution stops.\nfunc (m Matcher) Match(pattern string, alternatives ...string) Matcher {\n\treturn m\n}\n\n// Where applies additional constraint to a match.\n// If a given cond is not satisfied, a match is rejected and\n// rule execution stops.\nfunc (m Matcher) Where(cond bool) Matcher {\n\treturn m\n}\n\n// Report prints a message if associated rule match is successful.\n//\n// A message is a string that can contain interpolated expressions.\n// For every matched variable it's possible to interpolate\n// their printed representation into the message text with $.\n// An entire match can be addressed with $$.\nfunc (m Matcher) Report(message string) Matcher {\n\treturn m\n}\n\n// Suggest assigns a quickfix suggestion for the matched code.\nfunc (m Matcher) Suggest(suggestion string) Matcher {\n\treturn m\n}\n\n// At binds the reported node to a named submatch.\n// If no explicit location is given, the outermost node ($$) is used.\nfunc (m Matcher) At(v Var) Matcher {\n\treturn m\n}\n\n// Var is a pattern variable that describes a named submatch.\ntype Var struct {\n\t// Pure reports whether expr matched by var is side-effect-free.\n\tPure bool\n\n\t// Const reports whether expr matched by var is a constant value.\n\tConst bool\n\n\t// Addressable reports whether the corresponding expression is addressable.\n\t// See https://golang.org/ref/spec#Address_operators.\n\tAddressable bool\n\n\t// Type is a type of a matched expr.\n\tType ExprType\n}\n\n// ExprType describes a type of a matcher expr.\ntype ExprType struct {\n\t// Size represents expression type size in bytes.\n\tSize int\n}\n\n// AssignableTo reports whether a type is assign-compatible with a given type.\n// See https://golang.org/pkg/go/types/#AssignableTo.\nfunc (ExprType) AssignableTo(typ string) bool { return boolResult }\n\n// ConvertibleTo reports whether a type is conversible to a given type.\n// See https://golang.org/pkg/go/types/#ConvertibleTo.\nfunc (ExprType) ConvertibleTo(typ string) bool { return boolResult }\n\n// Implements reports whether a type implements a given interface.\n// See https://golang.org/pkg/go/types/#Implements.\nfunc (ExprType) Implements(typ string) bool { return boolResult }\n\n// Is reports whether a type is identical to a given type.\nfunc (ExprType) Is(typ string) bool { return boolResult }\n\n\n\nvar boolResult bool\n\n")
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
new file mode 100644
index 0000000..a2269b2
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
@@ -0,0 +1,53 @@
+// +build generate
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+func main() {
+ // See #23.
+
+ data, err := dirToBytes("../dsl/fluent")
+ if err != nil {
+ panic(err)
+ }
+
+ f, err := os.Create("./dsl_sources.go")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ fmt.Fprintf(f, `package dslgen
+
+var Fluent = []byte(%q)
+`, string(data))
+}
+
+func dirToBytes(dir string) ([]byte, error) {
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ for i, f := range files {
+ data, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+ if i != 0 {
+ newline := bytes.IndexByte(data, '\n')
+ data = data[newline:]
+ }
+ buf.Write(data)
+ buf.WriteByte('\n')
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
new file mode 100644
index 0000000..6f95229
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
@@ -0,0 +1,2 @@
+# To prevent CRLF breakages on Windows for fragile files, like testdata.
+* -text
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
new file mode 100644
index 0000000..a06c5eb
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017, Daniel Martí. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
new file mode 100644
index 0000000..12cb0fd
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
@@ -0,0 +1,55 @@
+# gogrep
+
+ go get mvdan.cc/gogrep
+
+Search for Go code using syntax trees. Work in progress.
+
+ gogrep -x 'if $x != nil { return $x, $*_ }'
+
+### Instructions
+
+ usage: gogrep commands [packages]
+
+A command is of the form "-A pattern", where -A is one of:
+
+ -x find all nodes matching a pattern
+ -g discard nodes not matching a pattern
+ -v discard nodes matching a pattern
+ -a filter nodes by certain attributes
+ -s substitute with a given syntax tree
+ -w write source back to disk or stdout
+
+A pattern is a piece of Go code which may include wildcards. It can be:
+
+ a statement (many if split by semicolonss)
+ an expression (many if split by commas)
+ a type expression
+ a top-level declaration (var, func, const)
+ an entire file
+
+Wildcards consist of `$` and a name. All wildcards with the same name
+within an expression must match the same node, excluding "_". Example:
+
+ $x.$_ = $x // assignment of self to a field in self
+
+If `*` is before the name, it will match any number of nodes. Example:
+
+ fmt.Fprintf(os.Stdout, $*_) // all Fprintfs on stdout
+
+`*` can also be used to match optional nodes, like:
+
+ for $*_ { $*_ } // will match all for loops
+ if $*_; $b { $*_ } // will match all ifs with condition $b
+
+Regexes can also be used to match certain identifier names only. The
+`.*` pattern can be used to match all identifiers. Example:
+
+ fmt.$(_ /Fprint.*/)(os.Stdout, $*_) // all Fprint* on stdout
+
+The nodes resulting from applying the commands will be printed line by
+line to standard output.
+
+Here are two simple examples of the -a operand:
+
+ gogrep -x '$x + $y' // will match both numerical and string "+" operations
+ gogrep -x '$x + $y' -a 'type(string)' // matches only string concatenations
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
new file mode 100644
index 0000000..f366af8
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
@@ -0,0 +1,61 @@
+package gogrep
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// This is an ugly way to use gogrep as a library.
+// It can go away when there will be another option.
+
+// Parse creates a gogrep pattern out of a given string expression.
+func Parse(fset *token.FileSet, expr string) (*Pattern, error) {
+ m := matcher{
+ fset: fset,
+ Info: &types.Info{},
+ }
+ node, err := m.parseExpr(expr)
+ if err != nil {
+ return nil, err
+ }
+ return &Pattern{m: &m, Expr: node}, nil
+}
+
+// Pattern is a compiled gogrep pattern.
+type Pattern struct {
+ Expr ast.Node
+ m *matcher
+}
+
+// MatchData describes a successful pattern match.
+type MatchData struct {
+ Node ast.Node
+ Values map[string]ast.Node
+}
+
+// MatchNode calls cb if n matches a pattern.
+func (p *Pattern) MatchNode(n ast.Node, cb func(MatchData)) {
+ p.m.values = map[string]ast.Node{}
+ if p.m.node(p.Expr, n) {
+ cb(MatchData{
+ Values: p.m.values,
+ Node: n,
+ })
+ }
+}
+
+// Match calls cb for any pattern match found in n.
+func (p *Pattern) Match(n ast.Node, cb func(MatchData)) {
+ cmd := exprCmd{name: "x", value: p.Expr}
+ matches := p.m.cmdRange(cmd, []submatch{{
+ values: map[string]ast.Node{},
+ node: n,
+ }})
+ for _, match := range matches {
+ cb(MatchData{
+ Values: match.values,
+ Node: match.node,
+ })
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
new file mode 100644
index 0000000..09ab3fd
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+func (m *matcher) load(wd string, args ...string) ([]*packages.Package, error) {
+ mode := packages.NeedName | packages.NeedImports | packages.NeedSyntax |
+ packages.NeedTypes | packages.NeedTypesInfo
+ if m.recursive { // need the syntax trees for the dependencies too
+ mode |= packages.NeedDeps
+ }
+ cfg := &packages.Config{
+ Mode: mode,
+ Dir: wd,
+ Fset: m.fset,
+ Tests: m.tests,
+ }
+ pkgs, err := packages.Load(cfg, args...)
+ if err != nil {
+ return nil, err
+ }
+ jointErr := ""
+ packages.Visit(pkgs, nil, func(pkg *packages.Package) {
+ for _, err := range pkg.Errors {
+ jointErr += err.Error() + "\n"
+ }
+ })
+ if jointErr != "" {
+ return nil, fmt.Errorf("%s", jointErr)
+ }
+
+ // Make a sorted list of the packages, including transitive dependencies
+ // if recurse is true.
+ byPath := make(map[string]*packages.Package)
+ var addDeps func(*packages.Package)
+ addDeps = func(pkg *packages.Package) {
+ if strings.HasSuffix(pkg.PkgPath, ".test") {
+ // don't add recursive test deps
+ return
+ }
+ for _, imp := range pkg.Imports {
+ if _, ok := byPath[imp.PkgPath]; ok {
+ continue // seen; avoid recursive call
+ }
+ byPath[imp.PkgPath] = imp
+ addDeps(imp)
+ }
+ }
+ for _, pkg := range pkgs {
+ byPath[pkg.PkgPath] = pkg
+ if m.recursive {
+ // add all dependencies once
+ addDeps(pkg)
+ }
+ }
+ pkgs = pkgs[:0]
+ for _, pkg := range byPath {
+ pkgs = append(pkgs, pkg)
+ }
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].PkgPath < pkgs[j].PkgPath
+ })
+ return pkgs, nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
new file mode 100644
index 0000000..004cb32
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
@@ -0,0 +1,332 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var usage = func() {
+ fmt.Fprint(os.Stderr, `usage: gogrep commands [packages]
+
+gogrep performs a query on the given Go packages.
+
+ -r search dependencies recursively too
+ -tests search test files too (and direct test deps, with -r)
+
+A command is one of the following:
+
+ -x pattern find all nodes matching a pattern
+ -g pattern discard nodes not matching a pattern
+ -v pattern discard nodes matching a pattern
+ -a attribute discard nodes without an attribute
+ -s pattern substitute with a given syntax tree
+ -p number navigate up a number of node parents
+ -w write the entire source code back
+
+A pattern is a piece of Go code which may include dollar expressions. It can be
+a number of statements, a number of expressions, a declaration, or an entire
+file.
+
+A dollar expression consist of '$' and a name. Dollar expressions with the same
+name within a query always match the same node, excluding "_". Example:
+
+ -x '$x.$_ = $x' # assignment of self to a field in self
+
+If '*' is before the name, it will match any number of nodes. Example:
+
+ -x 'fmt.Fprintf(os.Stdout, $*_)' # all Fprintfs on stdout
+
+By default, the resulting nodes will be printed one per line to standard output.
+To update the input files, use -w.
+`)
+}
+
+func main() {
+ m := matcher{
+ out: os.Stdout,
+ ctx: &build.Default,
+ }
+ err := m.fromArgs(".", os.Args[1:])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+type matcher struct {
+ out io.Writer
+ ctx *build.Context
+
+ fset *token.FileSet
+
+ parents map[ast.Node]ast.Node
+
+ recursive, tests bool
+ aggressive bool
+
+ // information about variables (wildcards), by id (which is an
+ // integer starting at 0)
+ vars []varInfo
+
+ // node values recorded by name, excluding "_" (used only by the
+ // actual matching phase)
+ values map[string]ast.Node
+ scope *types.Scope
+
+ *types.Info
+ stdImporter types.Importer
+}
+
+type varInfo struct {
+ name string
+ any bool
+}
+
+func (m *matcher) info(id int) varInfo {
+ if id < 0 {
+ return varInfo{}
+ }
+ return m.vars[id]
+}
+
+type exprCmd struct {
+ name string
+ src string
+ value interface{}
+}
+
+type strCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *strCmdFlag) String() string { return "" }
+func (o *strCmdFlag) Set(val string) error {
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name, src: val})
+ return nil
+}
+
+type boolCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *boolCmdFlag) String() string { return "" }
+func (o *boolCmdFlag) Set(val string) error {
+ if val != "true" {
+ return fmt.Errorf("flag can only be true")
+ }
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name})
+ return nil
+}
+func (o *boolCmdFlag) IsBoolFlag() bool { return true }
+
+func (m *matcher) fromArgs(wd string, args []string) error {
+ m.fset = token.NewFileSet()
+ cmds, args, err := m.parseCmds(args)
+ if err != nil {
+ return err
+ }
+ pkgs, err := m.load(wd, args...)
+ if err != nil {
+ return err
+ }
+ var all []ast.Node
+ for _, pkg := range pkgs {
+ m.Info = pkg.TypesInfo
+ nodes := make([]ast.Node, len(pkg.Syntax))
+ for i, f := range pkg.Syntax {
+ nodes[i] = f
+ }
+ all = append(all, m.matches(cmds, nodes)...)
+ }
+ for _, n := range all {
+ fpos := m.fset.Position(n.Pos())
+ if strings.HasPrefix(fpos.Filename, wd) {
+ fpos.Filename = fpos.Filename[len(wd)+1:]
+ }
+ fmt.Fprintf(m.out, "%v: %s\n", fpos, singleLinePrint(n))
+ }
+ return nil
+}
+
+func (m *matcher) parseCmds(args []string) ([]exprCmd, []string, error) {
+ flagSet := flag.NewFlagSet("gogrep", flag.ExitOnError)
+ flagSet.Usage = usage
+ flagSet.BoolVar(&m.recursive, "r", false, "search dependencies recursively too")
+ flagSet.BoolVar(&m.tests, "tests", false, "search test files too (and direct test deps, with -r)")
+
+ var cmds []exprCmd
+ flagSet.Var(&strCmdFlag{
+ name: "x",
+ cmds: &cmds,
+ }, "x", "")
+ flagSet.Var(&strCmdFlag{
+ name: "g",
+ cmds: &cmds,
+ }, "g", "")
+ flagSet.Var(&strCmdFlag{
+ name: "v",
+ cmds: &cmds,
+ }, "v", "")
+ flagSet.Var(&strCmdFlag{
+ name: "a",
+ cmds: &cmds,
+ }, "a", "")
+ flagSet.Var(&strCmdFlag{
+ name: "s",
+ cmds: &cmds,
+ }, "s", "")
+ flagSet.Var(&strCmdFlag{
+ name: "p",
+ cmds: &cmds,
+ }, "p", "")
+ flagSet.Var(&boolCmdFlag{
+ name: "w",
+ cmds: &cmds,
+ }, "w", "")
+ flagSet.Parse(args)
+ paths := flagSet.Args()
+
+ if len(cmds) < 1 {
+ return nil, nil, fmt.Errorf("need at least one command")
+ }
+ for i, cmd := range cmds {
+ switch cmd.name {
+ case "w":
+ continue // no expr
+ case "p":
+ n, err := strconv.Atoi(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = n
+ case "a":
+ m, err := m.parseAttrs(cmd.src)
+ if err != nil {
+ return nil, nil, fmt.Errorf("cannot parse mods: %v", err)
+ }
+ cmds[i].value = m
+ default:
+ node, err := m.parseExpr(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = node
+ }
+ }
+ return cmds, paths, nil
+}
+
+type bufferJoinLines struct {
+ bytes.Buffer
+ last string
+}
+
+var rxNeedSemicolon = regexp.MustCompile(`([])}a-zA-Z0-9"'` + "`" + `]|\+\+|--)$`)
+
+func (b *bufferJoinLines) Write(p []byte) (n int, err error) {
+ if string(p) == "\n" {
+ if b.last == "\n" {
+ return 1, nil
+ }
+ if rxNeedSemicolon.MatchString(b.last) {
+ b.Buffer.WriteByte(';')
+ }
+ b.Buffer.WriteByte(' ')
+ b.last = "\n"
+ return 1, nil
+ }
+ p = bytes.Trim(p, "\t")
+ n, err = b.Buffer.Write(p)
+ b.last = string(p)
+ return
+}
+
+func (b *bufferJoinLines) String() string {
+ return strings.TrimSuffix(b.Buffer.String(), "; ")
+}
+
+// inspect is like ast.Inspect, but it supports our extra nodeList Node
+// type (only at the top level).
+func inspect(node ast.Node, fn func(ast.Node) bool) {
+ // ast.Walk barfs on ast.Node types it doesn't know, so
+ // do the first level manually here
+ list, ok := node.(nodeList)
+ if !ok {
+ ast.Inspect(node, fn)
+ return
+ }
+ if !fn(list) {
+ return
+ }
+ for i := 0; i < list.len(); i++ {
+ ast.Inspect(list.at(i), fn)
+ }
+ fn(nil)
+}
+
+var emptyFset = token.NewFileSet()
+
+func singleLinePrint(node ast.Node) string {
+ var buf bufferJoinLines
+ inspect(node, func(node ast.Node) bool {
+ bl, ok := node.(*ast.BasicLit)
+ if !ok || bl.Kind != token.STRING {
+ return true
+ }
+ if !strings.HasPrefix(bl.Value, "`") {
+ return true
+ }
+ if !strings.Contains(bl.Value, "\n") {
+ return true
+ }
+ bl.Value = strconv.Quote(bl.Value[1 : len(bl.Value)-1])
+ return true
+ })
+ printNode(&buf, emptyFset, node)
+ return buf.String()
+}
+
+func printNode(w io.Writer, fset *token.FileSet, node ast.Node) {
+ switch x := node.(type) {
+ case exprList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, ", ")
+ printNode(w, fset, n)
+ }
+ case stmtList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, "; ")
+ printNode(w, fset, n)
+ }
+ default:
+ err := printer.Fprint(w, fset, node)
+ if err != nil && strings.Contains(err.Error(), "go/printer: unsupported node type") {
+ // Should never happen, but make it obvious when it does.
+ panic(fmt.Errorf("cannot print node %T: %v", node, err))
+ }
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
new file mode 100644
index 0000000..08b53d8
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
@@ -0,0 +1,1108 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/token"
+ "go/types"
+ "regexp"
+ "strconv"
+)
+
+func (m *matcher) matches(cmds []exprCmd, nodes []ast.Node) []ast.Node {
+ m.parents = make(map[ast.Node]ast.Node)
+ m.fillParents(nodes...)
+ initial := make([]submatch, len(nodes))
+ for i, node := range nodes {
+ initial[i].node = node
+ initial[i].values = make(map[string]ast.Node)
+ }
+ final := m.submatches(cmds, initial)
+ finalNodes := make([]ast.Node, len(final))
+ for i := range finalNodes {
+ finalNodes[i] = final[i].node
+ }
+ return finalNodes
+}
+
+func (m *matcher) fillParents(nodes ...ast.Node) {
+ stack := make([]ast.Node, 1, 32)
+ for _, node := range nodes {
+ inspect(node, func(node ast.Node) bool {
+ if node == nil {
+ stack = stack[:len(stack)-1]
+ return true
+ }
+ if _, ok := node.(nodeList); !ok {
+ m.parents[node] = stack[len(stack)-1]
+ }
+ stack = append(stack, node)
+ return true
+ })
+ }
+}
+
+type submatch struct {
+ node ast.Node
+ values map[string]ast.Node
+}
+
+func valsCopy(values map[string]ast.Node) map[string]ast.Node {
+ v2 := make(map[string]ast.Node, len(values))
+ for k, v := range values {
+ v2[k] = v
+ }
+ return v2
+}
+
+func (m *matcher) submatches(cmds []exprCmd, subs []submatch) []submatch {
+ if len(cmds) == 0 {
+ return subs
+ }
+ cmd := cmds[0]
+ var fn func(exprCmd, []submatch) []submatch
+ switch cmd.name {
+ case "x":
+ fn = m.cmdRange
+ case "g":
+ fn = m.cmdFilter(true)
+ case "v":
+ fn = m.cmdFilter(false)
+ case "s":
+ fn = m.cmdSubst
+ case "a":
+ fn = m.cmdAttr
+ case "p":
+ fn = m.cmdParents
+ case "w":
+ if len(cmds) > 1 {
+ panic("-w must be the last command")
+ }
+ fn = m.cmdWrite
+ default:
+ panic(fmt.Sprintf("unknown command: %q", cmd.name))
+ }
+ return m.submatches(cmds[1:], fn(cmd, subs))
+}
+
+func (m *matcher) cmdRange(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ seen := map[nodePosHash]bool{}
+
+ // The values context for each new submatch must be a new copy
+ // from its parent submatch. If we don't do this copy, all the
+ // submatches would share the same map and have side effects.
+ var startValues map[string]ast.Node
+
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ m.values = valsCopy(startValues)
+ found := m.topNode(exprNode, node)
+ if found == nil {
+ return
+ }
+ hash := posHash(found)
+ if !seen[hash] {
+ matches = append(matches, submatch{
+ node: found,
+ values: m.values,
+ })
+ seen[hash] = true
+ }
+ }
+ for _, sub := range subs {
+ startValues = valsCopy(sub.values)
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ }
+ return matches
+}
+
+func (m *matcher) cmdFilter(wantAny bool) func(exprCmd, []submatch) []submatch {
+ return func(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ any := false
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ found := m.topNode(exprNode, node)
+ if found != nil {
+ any = true
+ }
+ }
+ for _, sub := range subs {
+ any = false
+ m.values = sub.values
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ if any == wantAny {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+ }
+}
+
+func (m *matcher) cmdAttr(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ for _, sub := range subs {
+ m.values = sub.values
+ if m.attrApplies(sub.node, cmd.value.(attribute)) {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+}
+
+func (m *matcher) cmdParents(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ reps := cmd.value.(int)
+ for j := 0; j < reps; j++ {
+ sub.node = m.parentOf(sub.node)
+ }
+ }
+ return subs
+}
+
+func (m *matcher) attrApplies(node ast.Node, attr interface{}) bool {
+ if rx, ok := attr.(*regexp.Regexp); ok {
+ if exprStmt, ok := node.(*ast.ExprStmt); ok {
+ // since we prefer matching entire statements, get the
+ // ident from the ExprStmt
+ node = exprStmt.X
+ }
+ ident, ok := node.(*ast.Ident)
+ return ok && rx.MatchString(ident.Name)
+ }
+ expr, _ := node.(ast.Expr)
+ if expr == nil {
+ return false // only exprs have types
+ }
+ t := m.Info.TypeOf(expr)
+ if t == nil {
+ return false // an expr, but no type?
+ }
+ tv := m.Info.Types[expr]
+ switch x := attr.(type) {
+ case typeCheck:
+ want := m.resolveType(m.scope, x.expr)
+ switch {
+ case x.op == "type" && !types.Identical(t, want):
+ return false
+ case x.op == "asgn" && !types.AssignableTo(t, want):
+ return false
+ case x.op == "conv" && !types.ConvertibleTo(t, want):
+ return false
+ }
+ case typProperty:
+ switch {
+ case x == "comp" && !types.Comparable(t):
+ return false
+ case x == "addr" && !tv.Addressable():
+ return false
+ }
+ case typUnderlying:
+ u := t.Underlying()
+ uok := true
+ switch x {
+ case "basic":
+ _, uok = u.(*types.Basic)
+ case "array":
+ _, uok = u.(*types.Array)
+ case "slice":
+ _, uok = u.(*types.Slice)
+ case "struct":
+ _, uok = u.(*types.Struct)
+ case "interface":
+ _, uok = u.(*types.Interface)
+ case "pointer":
+ _, uok = u.(*types.Pointer)
+ case "func":
+ _, uok = u.(*types.Signature)
+ case "map":
+ _, uok = u.(*types.Map)
+ case "chan":
+ _, uok = u.(*types.Chan)
+ }
+ if !uok {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *matcher) walkWithLists(exprNode, node ast.Node, fn func(exprNode, node ast.Node)) {
+ visit := func(node ast.Node) bool {
+ fn(exprNode, node)
+ for _, list := range nodeLists(node) {
+ fn(exprNode, list)
+ if id := m.wildAnyIdent(exprNode); id != nil {
+ // so that "$*a" will match "a, b"
+ fn(exprList([]ast.Expr{id}), list)
+ // so that "$*a" will match "a; b"
+ fn(toStmtList(id), list)
+ }
+ }
+ return true
+ }
+ inspect(node, visit)
+}
+
+func (m *matcher) topNode(exprNode, node ast.Node) ast.Node {
+ sts1, ok1 := exprNode.(stmtList)
+ sts2, ok2 := node.(stmtList)
+ if ok1 && ok2 {
+ // allow a partial match at the top level
+ return m.nodes(sts1, sts2, true)
+ }
+ if m.node(exprNode, node) {
+ return node
+ }
+ return nil
+}
+
+// optNode is like node, but for those nodes that can be nil and are not
+// part of a list. For example, init and post statements in a for loop.
+func (m *matcher) optNode(expr, node ast.Node) bool {
+ if ident := m.wildAnyIdent(expr); ident != nil {
+ if m.node(toStmtList(ident), toStmtList(node)) {
+ return true
+ }
+ }
+ return m.node(expr, node)
+}
+
+func (m *matcher) node(expr, node ast.Node) bool {
+ switch node.(type) {
+ case *ast.File, *ast.FuncType, *ast.BlockStmt, *ast.IfStmt,
+ *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.CaseClause,
+ *ast.CommClause, *ast.ForStmt, *ast.RangeStmt:
+ if scope := m.Info.Scopes[node]; scope != nil {
+ m.scope = scope
+ }
+ }
+ if !m.aggressive {
+ if expr == nil || node == nil {
+ return expr == node
+ }
+ } else {
+ if expr == nil && node == nil {
+ return true
+ }
+ if node == nil {
+ expr, node = node, expr
+ }
+ }
+ switch x := expr.(type) {
+ case nil: // only in aggressive mode
+ y, ok := node.(*ast.Ident)
+ return ok && y.Name == "_"
+
+ case *ast.File:
+ y, ok := node.(*ast.File)
+ if !ok || !m.node(x.Name, y.Name) || len(x.Decls) != len(y.Decls) ||
+ len(x.Imports) != len(y.Imports) {
+ return false
+ }
+ for i, decl := range x.Decls {
+ if !m.node(decl, y.Decls[i]) {
+ return false
+ }
+ }
+ for i, imp := range x.Imports {
+ if !m.node(imp, y.Imports[i]) {
+ return false
+ }
+ }
+ return true
+
+ case *ast.Ident:
+ y, yok := node.(*ast.Ident)
+ if !isWildName(x.Name) {
+ // not a wildcard
+ return yok && x.Name == y.Name
+ }
+ if _, ok := node.(ast.Node); !ok {
+ return false // to not include our extra node types
+ }
+ id := fromWildName(x.Name)
+ info := m.info(id)
+ if info.any {
+ return false
+ }
+ if info.name == "_" {
+ // values are discarded, matches anything
+ return true
+ }
+ prev, ok := m.values[info.name]
+ if !ok {
+ // first occurrence, record value
+ m.values[info.name] = node
+ return true
+ }
+ // multiple uses must match
+ return m.node(prev, node)
+
+ // lists (ys are generated by us while walking)
+ case exprList:
+ y, ok := node.(exprList)
+ return ok && m.exprs(x, y)
+ case stmtList:
+ y, ok := node.(stmtList)
+ return ok && m.stmts(x, y)
+
+ // lits
+ case *ast.BasicLit:
+ y, ok := node.(*ast.BasicLit)
+ return ok && x.Kind == y.Kind && x.Value == y.Value
+ case *ast.CompositeLit:
+ y, ok := node.(*ast.CompositeLit)
+ return ok && m.node(x.Type, y.Type) && m.exprs(x.Elts, y.Elts)
+ case *ast.FuncLit:
+ y, ok := node.(*ast.FuncLit)
+ return ok && m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // types
+ case *ast.ArrayType:
+ y, ok := node.(*ast.ArrayType)
+ return ok && m.node(x.Len, y.Len) && m.node(x.Elt, y.Elt)
+ case *ast.MapType:
+ y, ok := node.(*ast.MapType)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StructType:
+ y, ok := node.(*ast.StructType)
+ return ok && m.fields(x.Fields, y.Fields)
+ case *ast.Field:
+ // TODO: tags?
+ y, ok := node.(*ast.Field)
+ if !ok {
+ return false
+ }
+ if len(x.Names) == 0 && x.Tag == nil && m.node(x.Type, y) {
+ // Allow $var to match a field.
+ return true
+ }
+ return m.idents(x.Names, y.Names) && m.node(x.Type, y.Type)
+ case *ast.FuncType:
+ y, ok := node.(*ast.FuncType)
+ return ok && m.fields(x.Params, y.Params) &&
+ m.fields(x.Results, y.Results)
+ case *ast.InterfaceType:
+ y, ok := node.(*ast.InterfaceType)
+ return ok && m.fields(x.Methods, y.Methods)
+ case *ast.ChanType:
+ y, ok := node.(*ast.ChanType)
+ return ok && x.Dir == y.Dir && m.node(x.Value, y.Value)
+
+ // other exprs
+ case *ast.Ellipsis:
+ y, ok := node.(*ast.Ellipsis)
+ return ok && m.node(x.Elt, y.Elt)
+ case *ast.ParenExpr:
+ y, ok := node.(*ast.ParenExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.UnaryExpr:
+ y, ok := node.(*ast.UnaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X)
+ case *ast.BinaryExpr:
+ y, ok := node.(*ast.BinaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X) && m.node(x.Y, y.Y)
+ case *ast.CallExpr:
+ y, ok := node.(*ast.CallExpr)
+ return ok && m.node(x.Fun, y.Fun) && m.exprs(x.Args, y.Args) &&
+ bothValid(x.Ellipsis, y.Ellipsis)
+ case *ast.KeyValueExpr:
+ y, ok := node.(*ast.KeyValueExpr)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StarExpr:
+ y, ok := node.(*ast.StarExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.SelectorExpr:
+ y, ok := node.(*ast.SelectorExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Sel, y.Sel)
+ case *ast.IndexExpr:
+ y, ok := node.(*ast.IndexExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Index, y.Index)
+ case *ast.SliceExpr:
+ y, ok := node.(*ast.SliceExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Low, y.Low) &&
+ m.node(x.High, y.High) && m.node(x.Max, y.Max)
+ case *ast.TypeAssertExpr:
+ y, ok := node.(*ast.TypeAssertExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Type, y.Type)
+
+ // decls
+ case *ast.GenDecl:
+ y, ok := node.(*ast.GenDecl)
+ return ok && x.Tok == y.Tok && m.specs(x.Specs, y.Specs)
+ case *ast.FuncDecl:
+ y, ok := node.(*ast.FuncDecl)
+ return ok && m.fields(x.Recv, y.Recv) && m.node(x.Name, y.Name) &&
+ m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // specs
+ case *ast.ValueSpec:
+ y, ok := node.(*ast.ValueSpec)
+ if !ok || !m.node(x.Type, y.Type) {
+ return false
+ }
+ if m.aggressive && len(x.Names) == 1 {
+ for i := range y.Names {
+ if m.node(x.Names[i], y.Names[i]) &&
+ (x.Values == nil || m.node(x.Values[i], y.Values[i])) {
+ return true
+ }
+ }
+ }
+ return m.idents(x.Names, y.Names) && m.exprs(x.Values, y.Values)
+
+ // stmt bridge nodes
+ case *ast.ExprStmt:
+ if id, ok := x.X.(*ast.Ident); ok && isWildName(id.Name) {
+ // prefer matching $x as a statement, as it's
+ // the parent
+ return m.node(id, node)
+ }
+ y, ok := node.(*ast.ExprStmt)
+ return ok && m.node(x.X, y.X)
+ case *ast.DeclStmt:
+ y, ok := node.(*ast.DeclStmt)
+ return ok && m.node(x.Decl, y.Decl)
+
+ // stmts
+ case *ast.EmptyStmt:
+ _, ok := node.(*ast.EmptyStmt)
+ return ok
+ case *ast.LabeledStmt:
+ y, ok := node.(*ast.LabeledStmt)
+ return ok && m.node(x.Label, y.Label) && m.node(x.Stmt, y.Stmt)
+ case *ast.SendStmt:
+ y, ok := node.(*ast.SendStmt)
+ return ok && m.node(x.Chan, y.Chan) && m.node(x.Value, y.Value)
+ case *ast.IncDecStmt:
+ y, ok := node.(*ast.IncDecStmt)
+ return ok && x.Tok == y.Tok && m.node(x.X, y.X)
+ case *ast.AssignStmt:
+ y, ok := node.(*ast.AssignStmt)
+ if !m.aggressive {
+ return ok && x.Tok == y.Tok &&
+ m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ if ok {
+ return m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ vs, ok := node.(*ast.ValueSpec)
+ return ok && m.nodesMatch(exprList(x.Lhs), identList(vs.Names)) &&
+ m.exprs(x.Rhs, vs.Values)
+ case *ast.GoStmt:
+ y, ok := node.(*ast.GoStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.DeferStmt:
+ y, ok := node.(*ast.DeferStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.ReturnStmt:
+ y, ok := node.(*ast.ReturnStmt)
+ return ok && m.exprs(x.Results, y.Results)
+ case *ast.BranchStmt:
+ y, ok := node.(*ast.BranchStmt)
+ return ok && x.Tok == y.Tok && m.node(maybeNilIdent(x.Label), maybeNilIdent(y.Label))
+ case *ast.BlockStmt:
+ if m.aggressive && m.node(stmtList(x.List), node) {
+ return true
+ }
+ y, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return false
+ }
+ if x == nil || y == nil {
+ return x == y
+ }
+ return m.cases(x.List, y.List) || m.stmts(x.List, y.List)
+ case *ast.IfStmt:
+ y, ok := node.(*ast.IfStmt)
+ if !ok {
+ return false
+ }
+ condAny := m.wildAnyIdent(x.Cond)
+ if condAny != nil && x.Init == nil {
+ // if $*x { ... } on the left
+ left := toStmtList(condAny)
+ return m.node(left, toStmtList(y.Init, y.Cond)) &&
+ m.node(x.Body, y.Body) && m.optNode(x.Else, y.Else)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.node(x.Body, y.Body) && m.node(x.Else, y.Else)
+ case *ast.CaseClause:
+ y, ok := node.(*ast.CaseClause)
+ return ok && m.exprs(x.List, y.List) && m.stmts(x.Body, y.Body)
+ case *ast.SwitchStmt:
+ y, ok := node.(*ast.SwitchStmt)
+ if !ok {
+ return false
+ }
+ tagAny := m.wildAnyIdent(x.Tag)
+ if tagAny != nil && x.Init == nil {
+ // switch $*x { ... } on the left
+ left := toStmtList(tagAny)
+ return m.node(left, toStmtList(y.Init, y.Tag)) &&
+ m.node(x.Body, y.Body)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Tag, y.Tag) && m.node(x.Body, y.Body)
+ case *ast.TypeSwitchStmt:
+ y, ok := node.(*ast.TypeSwitchStmt)
+ return ok && m.optNode(x.Init, y.Init) && m.node(x.Assign, y.Assign) && m.node(x.Body, y.Body)
+ case *ast.CommClause:
+ y, ok := node.(*ast.CommClause)
+ return ok && m.node(x.Comm, y.Comm) && m.stmts(x.Body, y.Body)
+ case *ast.SelectStmt:
+ y, ok := node.(*ast.SelectStmt)
+ return ok && m.node(x.Body, y.Body)
+ case *ast.ForStmt:
+ condIdent := m.wildAnyIdent(x.Cond)
+ if condIdent != nil && x.Init == nil && x.Post == nil {
+ // "for $*x { ... }" on the left
+ left := toStmtList(condIdent)
+ // also accept RangeStmt on the right
+ switch y := node.(type) {
+ case *ast.ForStmt:
+ return m.node(left, toStmtList(y.Init, y.Cond, y.Post)) &&
+ m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ return m.node(left, toStmtList(y.Key, y.Value, y.X)) &&
+ m.node(x.Body, y.Body)
+ default:
+ return false
+ }
+ }
+ y, ok := node.(*ast.ForStmt)
+ if !ok {
+ return false
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.optNode(x.Post, y.Post) && m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ y, ok := node.(*ast.RangeStmt)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value) &&
+ m.node(x.X, y.X) && m.node(x.Body, y.Body)
+
+ case *ast.TypeSpec:
+ y, ok := node.(*ast.TypeSpec)
+ return ok && m.node(x.Name, y.Name) && m.node(x.Type, y.Type)
+
+ case *ast.FieldList:
+ // we ignore these, for now
+ return false
+ default:
+ panic(fmt.Sprintf("unexpected node: %T", x))
+ }
+}
+
+func (m *matcher) wildAnyIdent(node ast.Node) *ast.Ident {
+ switch x := node.(type) {
+ case *ast.ExprStmt:
+ return m.wildAnyIdent(x.X)
+ case *ast.Ident:
+ if !isWildName(x.Name) {
+ return nil
+ }
+ if !m.info(fromWildName(x.Name)).any {
+ return nil
+ }
+ return x
+ }
+ return nil
+}
+
+// resolveType resolves a type expression from a given scope.
+func (m *matcher) resolveType(scope *types.Scope, expr ast.Expr) types.Type {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if obj == nil {
+ // TODO: error if all resolveType calls on a type
+ // expression fail? or perhaps resolve type expressions
+ // across the entire program?
+ return nil
+ }
+ return obj.Type()
+ case *ast.ArrayType:
+ elt := m.resolveType(scope, x.Elt)
+ if x.Len == nil {
+ return types.NewSlice(elt)
+ }
+ bl, ok := x.Len.(*ast.BasicLit)
+ if !ok || bl.Kind != token.INT {
+ panic(fmt.Sprintf("TODO: %T", x))
+ }
+ len, _ := strconv.ParseInt(bl.Value, 0, 0)
+ return types.NewArray(elt, len)
+ case *ast.StarExpr:
+ return types.NewPointer(m.resolveType(scope, x.X))
+ case *ast.ChanType:
+ dir := types.SendRecv
+ switch x.Dir {
+ case ast.SEND:
+ dir = types.SendOnly
+ case ast.RECV:
+ dir = types.RecvOnly
+ }
+ return types.NewChan(dir, m.resolveType(scope, x.Value))
+ case *ast.SelectorExpr:
+ scope = m.findScope(scope, x.X)
+ return m.resolveType(scope, x.Sel)
+ default:
+ panic(fmt.Sprintf("resolveType TODO: %T", x))
+ }
+}
+
+func (m *matcher) findScope(scope *types.Scope, expr ast.Expr) *types.Scope {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if pkg, ok := obj.(*types.PkgName); ok {
+ return pkg.Imported().Scope()
+ }
+ // try to fall back to std
+ if m.stdImporter == nil {
+ m.stdImporter = importer.Default()
+ }
+ path := x.Name
+ if longer, ok := stdImportFixes[path]; ok {
+ path = longer
+ }
+ pkg, err := m.stdImporter.Import(path)
+ if err != nil {
+ panic(fmt.Sprintf("findScope err: %v", err))
+ }
+ return pkg.Scope()
+ default:
+ panic(fmt.Sprintf("findScope TODO: %T", x))
+ }
+}
+
+var stdImportFixes = map[string]string{
+ // go list std | grep -vE 'vendor|internal' | grep '/' | sed -r 's@^(.*)/([^/]*)$@"\2": "\1/\2",@' | sort
+ // (after commenting out the less likely duplicates)
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "build": "go/build",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "exec": "os/exec",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flate": "compress/flate",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "importer": "go/importer",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "md5": "crypto/md5",
+ "multipart": "mime/multipart",
+ "palette": "image/color/palette",
+ "parser": "go/parser",
+ "parse": "text/template/parse",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "png": "image/png",
+ //"pprof": "net/http/pprof",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ //"rand": "crypto/rand",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ //"scanner": "go/scanner",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sql": "database/sql",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "syntax": "regexp/syntax",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ //"template": "html/template",
+ "template": "text/template",
+ "textproto": "net/textproto",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+}
+
+func maybeNilIdent(x *ast.Ident) ast.Node {
+ if x == nil {
+ return nil
+ }
+ return x
+}
+
+func bothValid(p1, p2 token.Pos) bool {
+ return p1.IsValid() == p2.IsValid()
+}
+
+type nodeList interface {
+ at(i int) ast.Node
+ len() int
+ slice(from, to int) nodeList
+ ast.Node
+}
+
+// nodes matches two lists of nodes. It uses a common algorithm to match
+// wildcard patterns with any number of nodes without recursion.
+func (m *matcher) nodes(ns1, ns2 nodeList, partial bool) ast.Node {
+ ns1len, ns2len := ns1.len(), ns2.len()
+ if ns1len == 0 {
+ if ns2len == 0 {
+ return ns2
+ }
+ return nil
+ }
+ partialStart, partialEnd := 0, ns2len
+ i1, i2 := 0, 0
+ next1, next2 := 0, 0
+
+ // We need to keep a copy of m.values so that we can restart
+ // with a different "any of" match while discarding any matches
+ // we found while trying it.
+ type restart struct {
+ matches map[string]ast.Node
+ next1, next2 int
+ }
+ // We need to stack these because otherwise some edge cases
+ // would not match properly. Since we have various kinds of
+ // wildcards (nodes containing them, $_, and $*_), in some cases
+ // we may have to go back and do multiple restarts to get to the
+ // right starting position.
+ var stack []restart
+ push := func(n1, n2 int) {
+ if n2 > ns2len {
+ return // would be discarded anyway
+ }
+ stack = append(stack, restart{valsCopy(m.values), n1, n2})
+ next1, next2 = n1, n2
+ }
+ pop := func() {
+ i1, i2 = next1, next2
+ m.values = stack[len(stack)-1].matches
+ stack = stack[:len(stack)-1]
+ next1, next2 = 0, 0
+ if len(stack) > 0 {
+ next1 = stack[len(stack)-1].next1
+ next2 = stack[len(stack)-1].next2
+ }
+ }
+ wildName := ""
+ wildStart := 0
+
+ // wouldMatch returns whether the current wildcard - if any -
+ // matches the nodes we are currently trying it on.
+ wouldMatch := func() bool {
+ switch wildName {
+ case "", "_":
+ return true
+ }
+ list := ns2.slice(wildStart, i2)
+ // check that it matches any nodes found elsewhere
+ prev, ok := m.values[wildName]
+ if ok && !m.node(prev, list) {
+ return false
+ }
+ m.values[wildName] = list
+ return true
+ }
+ for i1 < ns1len || i2 < ns2len {
+ if i1 < ns1len {
+ n1 := ns1.at(i1)
+ id := fromWildNode(n1)
+ info := m.info(id)
+ if info.any {
+ // keep track of where this wildcard
+ // started (if info.name == wildName,
+ // we're trying the same wildcard
+ // matching one more node)
+ if info.name != wildName {
+ wildStart = i2
+ wildName = info.name
+ }
+ // try to match zero or more at i2,
+ // restarting at i2+1 if it fails
+ push(i1, i2+1)
+ i1++
+ continue
+ }
+ if partial && i1 == 0 {
+ // let "b; c" match "a; b; c"
+ // (simulates a $*_ at the beginning)
+ partialStart = i2
+ push(i1, i2+1)
+ }
+ if i2 < ns2len && wouldMatch() && m.node(n1, ns2.at(i2)) {
+ wildName = ""
+ // ordinary match
+ i1++
+ i2++
+ continue
+ }
+ }
+ if partial && i1 == ns1len && wildName == "" {
+ partialEnd = i2
+ break // let "b; c" match "b; c; d"
+ }
+ // mismatch, try to restart
+ if 0 < next2 && next2 <= ns2len && (i1 != next1 || i2 != next2) {
+ pop()
+ continue
+ }
+ return nil
+ }
+ if !wouldMatch() {
+ return nil
+ }
+ return ns2.slice(partialStart, partialEnd)
+}
+
+func (m *matcher) nodesMatch(list1, list2 nodeList) bool {
+ return m.nodes(list1, list2, false) != nil
+}
+
+func (m *matcher) exprs(exprs1, exprs2 []ast.Expr) bool {
+ return m.nodesMatch(exprList(exprs1), exprList(exprs2))
+}
+
+func (m *matcher) idents(ids1, ids2 []*ast.Ident) bool {
+ return m.nodesMatch(identList(ids1), identList(ids2))
+}
+
+func toStmtList(nodes ...ast.Node) stmtList {
+ var stmts []ast.Stmt
+ for _, node := range nodes {
+ switch x := node.(type) {
+ case nil:
+ case ast.Stmt:
+ stmts = append(stmts, x)
+ case ast.Expr:
+ stmts = append(stmts, &ast.ExprStmt{X: x})
+ default:
+ panic(fmt.Sprintf("unexpected node type: %T", x))
+ }
+ }
+ return stmtList(stmts)
+}
+
+func (m *matcher) cases(stmts1, stmts2 []ast.Stmt) bool {
+ for _, stmt := range stmts2 {
+ switch stmt.(type) {
+ case *ast.CaseClause, *ast.CommClause:
+ default:
+ return false
+ }
+ }
+ var left []*ast.Ident
+ for _, stmt := range stmts1 {
+ var expr ast.Expr
+ var bstmt ast.Stmt
+ switch x := stmt.(type) {
+ case *ast.CaseClause:
+ if len(x.List) != 1 || len(x.Body) != 1 {
+ return false
+ }
+ expr, bstmt = x.List[0], x.Body[0]
+ case *ast.CommClause:
+ if x.Comm == nil || len(x.Body) != 1 {
+ return false
+ }
+ if commExpr, ok := x.Comm.(*ast.ExprStmt); ok {
+ expr = commExpr.X
+ }
+ bstmt = x.Body[0]
+ default:
+ return false
+ }
+ xs, ok := bstmt.(*ast.ExprStmt)
+ if !ok {
+ return false
+ }
+ bodyIdent, ok := xs.X.(*ast.Ident)
+ if !ok || bodyIdent.Name != "gogrep_body" {
+ return false
+ }
+ id, ok := expr.(*ast.Ident)
+ if !ok || !isWildName(id.Name) {
+ return false
+ }
+ left = append(left, id)
+ }
+ return m.nodesMatch(identList(left), stmtList(stmts2))
+}
+
+func (m *matcher) stmts(stmts1, stmts2 []ast.Stmt) bool {
+ return m.nodesMatch(stmtList(stmts1), stmtList(stmts2))
+}
+
+func (m *matcher) specs(specs1, specs2 []ast.Spec) bool {
+ return m.nodesMatch(specList(specs1), specList(specs2))
+}
+
+func (m *matcher) fields(fields1, fields2 *ast.FieldList) bool {
+ if fields1 == nil || fields2 == nil {
+ return fields1 == fields2
+ }
+ return m.nodesMatch(fieldList(fields1.List), fieldList(fields2.List))
+}
+
+func fromWildNode(node ast.Node) int {
+ switch node := node.(type) {
+ case *ast.Ident:
+ return fromWildName(node.Name)
+ case *ast.ExprStmt:
+ return fromWildNode(node.X)
+ case *ast.Field:
+ // Allow $var to represent an entire field; the lone identifier
+ // gets picked up as an anonymous field.
+ if len(node.Names) == 0 && node.Tag == nil {
+ return fromWildNode(node.Type)
+ }
+ }
+ return -1
+}
+
+func nodeLists(n ast.Node) []nodeList {
+ var lists []nodeList
+ addList := func(list nodeList) {
+ if list.len() > 0 {
+ lists = append(lists, list)
+ }
+ }
+ switch x := n.(type) {
+ case nodeList:
+ addList(x)
+ case *ast.CompositeLit:
+ addList(exprList(x.Elts))
+ case *ast.CallExpr:
+ addList(exprList(x.Args))
+ case *ast.AssignStmt:
+ addList(exprList(x.Lhs))
+ addList(exprList(x.Rhs))
+ case *ast.ReturnStmt:
+ addList(exprList(x.Results))
+ case *ast.ValueSpec:
+ addList(exprList(x.Values))
+ case *ast.BlockStmt:
+ addList(stmtList(x.List))
+ case *ast.CaseClause:
+ addList(exprList(x.List))
+ addList(stmtList(x.Body))
+ case *ast.CommClause:
+ addList(stmtList(x.Body))
+ }
+ return lists
+}
+
+type exprList []ast.Expr
+type identList []*ast.Ident
+type stmtList []ast.Stmt
+type specList []ast.Spec
+type fieldList []*ast.Field
+
+func (l exprList) len() int { return len(l) }
+func (l identList) len() int { return len(l) }
+func (l stmtList) len() int { return len(l) }
+func (l specList) len() int { return len(l) }
+func (l fieldList) len() int { return len(l) }
+
+func (l exprList) at(i int) ast.Node { return l[i] }
+func (l identList) at(i int) ast.Node { return l[i] }
+func (l stmtList) at(i int) ast.Node { return l[i] }
+func (l specList) at(i int) ast.Node { return l[i] }
+func (l fieldList) at(i int) ast.Node { return l[i] }
+
+func (l exprList) slice(i, j int) nodeList { return l[i:j] }
+func (l identList) slice(i, j int) nodeList { return l[i:j] }
+func (l stmtList) slice(i, j int) nodeList { return l[i:j] }
+func (l specList) slice(i, j int) nodeList { return l[i:j] }
+func (l fieldList) slice(i, j int) nodeList { return l[i:j] }
+
+func (l exprList) Pos() token.Pos { return l[0].Pos() }
+func (l identList) Pos() token.Pos { return l[0].Pos() }
+func (l stmtList) Pos() token.Pos { return l[0].Pos() }
+func (l specList) Pos() token.Pos { return l[0].Pos() }
+func (l fieldList) Pos() token.Pos { return l[0].Pos() }
+
+func (l exprList) End() token.Pos { return l[len(l)-1].End() }
+func (l identList) End() token.Pos { return l[len(l)-1].End() }
+func (l stmtList) End() token.Pos { return l[len(l)-1].End() }
+func (l specList) End() token.Pos { return l[len(l)-1].End() }
+func (l fieldList) End() token.Pos { return l[len(l)-1].End() }
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
new file mode 100644
index 0000000..b46e643
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+ "text/template"
+)
+
+func (m *matcher) transformSource(expr string) (string, []posOffset, error) {
+ toks, err := m.tokenize([]byte(expr))
+ if err != nil {
+ return "", nil, fmt.Errorf("cannot tokenize expr: %v", err)
+ }
+ var offs []posOffset
+ lbuf := lineColBuffer{line: 1, col: 1}
+ addOffset := func(length int) {
+ lbuf.offs -= length
+ offs = append(offs, posOffset{
+ atLine: lbuf.line,
+ atCol: lbuf.col,
+ offset: length,
+ })
+ }
+ if len(toks) > 0 && toks[0].tok == tokAggressive {
+ toks = toks[1:]
+ m.aggressive = true
+ }
+ lastLit := false
+ for _, t := range toks {
+ if lbuf.offs >= t.pos.Offset && lastLit && t.lit != "" {
+ lbuf.WriteString(" ")
+ }
+ for lbuf.offs < t.pos.Offset {
+ lbuf.WriteString(" ")
+ }
+ if t.lit == "" {
+ lbuf.WriteString(t.tok.String())
+ lastLit = false
+ continue
+ }
+ if isWildName(t.lit) {
+ // to correct the position offsets for the extra
+ // info attached to ident name strings
+ addOffset(len(wildPrefix) - 1)
+ }
+ lbuf.WriteString(t.lit)
+ lastLit = strings.TrimSpace(t.lit) != ""
+ }
+ // trailing newlines can cause issues with commas
+ return strings.TrimSpace(lbuf.String()), offs, nil
+}
+
+func (m *matcher) parseExpr(expr string) (ast.Node, error) {
+ exprStr, offs, err := m.transformSource(expr)
+ if err != nil {
+ return nil, err
+ }
+ node, _, err := parseDetectingNode(m.fset, exprStr)
+ if err != nil {
+ err = subPosOffsets(err, offs...)
+ return nil, fmt.Errorf("cannot parse expr: %v", err)
+ }
+ return node, nil
+}
+
+type lineColBuffer struct {
+ bytes.Buffer
+ line, col, offs int
+}
+
+func (l *lineColBuffer) WriteString(s string) (n int, err error) {
+ for _, r := range s {
+ if r == '\n' {
+ l.line++
+ l.col = 1
+ } else {
+ l.col++
+ }
+ l.offs++
+ }
+ return l.Buffer.WriteString(s)
+}
+
+var tmplDecl = template.Must(template.New("").Parse(`` +
+ `package p; {{ . }}`))
+
+var tmplExprs = template.Must(template.New("").Parse(`` +
+ `package p; var _ = []interface{}{ {{ . }}, }`))
+
+var tmplStmts = template.Must(template.New("").Parse(`` +
+ `package p; func _() { {{ . }} }`))
+
+var tmplType = template.Must(template.New("").Parse(`` +
+ `package p; var _ {{ . }}`))
+
+var tmplValSpec = template.Must(template.New("").Parse(`` +
+ `package p; var {{ . }}`))
+
+func execTmpl(tmpl *template.Template, src string) string {
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, src); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func noBadNodes(node ast.Node) bool {
+ any := false
+ ast.Inspect(node, func(n ast.Node) bool {
+ if any {
+ return false
+ }
+ switch n.(type) {
+ case *ast.BadExpr, *ast.BadDecl:
+ any = true
+ }
+ return true
+ })
+ return !any
+}
+
+func parseType(fset *token.FileSet, src string) (ast.Expr, *ast.File, error) {
+ asType := execTmpl(tmplType, src)
+ f, err := parser.ParseFile(fset, "", asType, 0)
+ if err != nil {
+ err = subPosOffsets(err, posOffset{1, 1, 17})
+ return nil, nil, err
+ }
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs.Type, f, nil
+}
+
+// parseDetectingNode tries its best to parse the ast.Node contained in src, as
+// one of: *ast.File, ast.Decl, ast.Expr, ast.Stmt, *ast.ValueSpec.
+// It also returns the *ast.File used for the parsing, so that the returned node
+// can be easily type-checked.
+func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, *ast.File, error) {
+ file := fset.AddFile("", fset.Base(), len(src))
+ scan := scanner.Scanner{}
+ scan.Init(file, []byte(src), nil, 0)
+ if _, tok, _ := scan.Scan(); tok == token.EOF {
+ return nil, nil, fmt.Errorf("empty source code")
+ }
+ var mainErr error
+
+ // first try as a whole file
+ if f, err := parser.ParseFile(fset, "", src, 0); err == nil && noBadNodes(f) {
+ return f, f, nil
+ }
+
+ // then as a single declaration, or many
+ asDecl := execTmpl(tmplDecl, src)
+ if f, err := parser.ParseFile(fset, "", asDecl, 0); err == nil && noBadNodes(f) {
+ if len(f.Decls) == 1 {
+ return f.Decls[0], f, nil
+ }
+ return f, f, nil
+ }
+
+ // then as value expressions
+ asExprs := execTmpl(tmplExprs, src)
+ if f, err := parser.ParseFile(fset, "", asExprs, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ cl := vs.Values[0].(*ast.CompositeLit)
+ if len(cl.Elts) == 1 {
+ return cl.Elts[0], f, nil
+ }
+ return exprList(cl.Elts), f, nil
+ }
+
+ // then try as statements
+ asStmts := execTmpl(tmplStmts, src)
+ if f, err := parser.ParseFile(fset, "", asStmts, 0); err == nil && noBadNodes(f) {
+ bl := f.Decls[0].(*ast.FuncDecl).Body
+ if len(bl.List) == 1 {
+ return bl.List[0], f, nil
+ }
+ return stmtList(bl.List), f, nil
+ } else {
+ // Statements is what covers most cases, so it will give
+ // the best overall error message. Show positions
+ // relative to where the user's code is put in the
+ // template.
+ mainErr = subPosOffsets(err, posOffset{1, 1, 22})
+ }
+
+ // type expressions not yet picked up, for e.g. chans and interfaces
+ if typ, f, err := parseType(fset, src); err == nil && noBadNodes(f) {
+ return typ, f, nil
+ }
+
+ // value specs
+ asValSpec := execTmpl(tmplValSpec, src)
+ if f, err := parser.ParseFile(fset, "", asValSpec, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs, f, nil
+ }
+ return nil, nil, mainErr
+}
+
+type posOffset struct {
+ atLine, atCol int
+ offset int
+}
+
+func subPosOffsets(err error, offs ...posOffset) error {
+ list, ok := err.(scanner.ErrorList)
+ if !ok {
+ return err
+ }
+ for i, err := range list {
+ for _, off := range offs {
+ if err.Pos.Line != off.atLine {
+ continue
+ }
+ if err.Pos.Column < off.atCol {
+ continue
+ }
+ err.Pos.Column -= off.offset
+ }
+ list[i] = err
+ }
+ return list
+}
+
+const (
+ _ token.Token = -iota
+ tokAggressive
+)
+
+type fullToken struct {
+ pos token.Position
+ tok token.Token
+ lit string
+}
+
+type caseStatus uint
+
+const (
+ caseNone caseStatus = iota
+ caseNeedBlock
+ caseHere
+)
+
+func (m *matcher) tokenize(src []byte) ([]fullToken, error) {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+
+ var err error
+ onError := func(pos token.Position, msg string) {
+ switch msg { // allow certain extra chars
+ case `illegal character U+0024 '$'`:
+ case `illegal character U+007E '~'`:
+ default:
+ err = fmt.Errorf("%v: %s", pos, msg)
+ }
+ }
+
+ // we will modify the input source under the scanner's nose to
+ // enable some features such as regexes.
+ s.Init(file, src, onError, scanner.ScanComments)
+
+ next := func() fullToken {
+ pos, tok, lit := s.Scan()
+ return fullToken{fset.Position(pos), tok, lit}
+ }
+
+ caseStat := caseNone
+
+ var toks []fullToken
+ for t := next(); t.tok != token.EOF; t = next() {
+ switch t.lit {
+ case "$": // continues below
+ case "~":
+ toks = append(toks, fullToken{t.pos, tokAggressive, ""})
+ continue
+ case "switch", "select", "case":
+ if t.lit == "case" {
+ caseStat = caseNone
+ } else {
+ caseStat = caseNeedBlock
+ }
+ fallthrough
+ default: // regular Go code
+ if t.tok == token.LBRACE && caseStat == caseNeedBlock {
+ caseStat = caseHere
+ }
+ toks = append(toks, t)
+ continue
+ }
+ wt, err := m.wildcard(t.pos, next)
+ if err != nil {
+ return nil, err
+ }
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "case"})
+ }
+ toks = append(toks, wt)
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.COLON, ""})
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "gogrep_body"})
+ }
+ }
+ return toks, err
+}
+
+func (m *matcher) wildcard(pos token.Position, next func() fullToken) (fullToken, error) {
+ wt := fullToken{pos, token.IDENT, wildPrefix}
+ t := next()
+ var info varInfo
+ if t.tok == token.MUL {
+ t = next()
+ info.any = true
+ }
+ if t.tok != token.IDENT {
+ return wt, fmt.Errorf("%v: $ must be followed by ident, got %v",
+ t.pos, t.tok)
+ }
+ id := len(m.vars)
+ wt.lit += strconv.Itoa(id)
+ info.name = t.lit
+ m.vars = append(m.vars, info)
+ return wt, nil
+}
+
+type typeCheck struct {
+ op string // "type", "asgn", "conv"
+ expr ast.Expr
+}
+
+type attribute interface{}
+
+type typProperty string
+
+type typUnderlying string
+
+func (m *matcher) parseAttrs(src string) (attribute, error) {
+ toks, err := m.tokenize([]byte(src))
+ if err != nil {
+ return nil, err
+ }
+ i := -1
+ var t fullToken
+ next := func() fullToken {
+ if i++; i < len(toks) {
+ return toks[i]
+ }
+ return fullToken{tok: token.EOF, pos: t.pos}
+ }
+ t = next()
+ op := t.lit
+ switch op { // the ones that don't take args
+ case "comp", "addr":
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return typProperty(op), nil
+ }
+ opPos := t.pos
+ if t = next(); t.tok != token.LPAREN {
+ return nil, fmt.Errorf("%v: wanted (", t.pos)
+ }
+ var attr attribute
+ switch op {
+ case "rx":
+ t = next()
+ rxStr, err := strconv.Unquote(t.lit)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ if !strings.HasPrefix(rxStr, "^") {
+ rxStr = "^" + rxStr
+ }
+ if !strings.HasSuffix(rxStr, "$") {
+ rxStr = rxStr + "$"
+ }
+ rx, err := regexp.Compile(rxStr)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ attr = rx
+ case "type", "asgn", "conv":
+ t = next()
+ start := t.pos.Offset
+ for open := 1; open > 0; t = next() {
+ switch t.tok {
+ case token.LPAREN:
+ open++
+ case token.RPAREN:
+ open--
+ case token.EOF:
+ return nil, fmt.Errorf("%v: expected ) to close (", t.pos)
+ }
+ }
+ end := t.pos.Offset - 1
+ typeStr := strings.TrimSpace(string(src[start:end]))
+ fset := token.NewFileSet()
+ typeExpr, _, err := parseType(fset, typeStr)
+ if err != nil {
+ return nil, err
+ }
+ attr = typeCheck{op, typeExpr}
+ i -= 2 // since we went past RPAREN above
+ case "is":
+ switch t = next(); t.lit {
+ case "basic", "array", "slice", "struct", "interface",
+ "pointer", "func", "map", "chan":
+ default:
+ return nil, fmt.Errorf("%v: unknown type: %q", t.pos,
+ t.lit)
+ }
+ attr = typUnderlying(t.lit)
+ default:
+ return nil, fmt.Errorf("%v: unknown op %q", opPos, op)
+ }
+ if t = next(); t.tok != token.RPAREN {
+ return nil, fmt.Errorf("%v: wanted ), got %v", t.pos, t.tok)
+ }
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return attr, nil
+}
+
+// using a prefix is good enough for now
+const wildPrefix = "gogrep_"
+
+func isWildName(name string) bool {
+ return strings.HasPrefix(name, wildPrefix)
+}
+
+func fromWildName(s string) int {
+ if !isWildName(s) {
+ return -1
+ }
+ n, err := strconv.Atoi(s[len(wildPrefix):])
+ if err != nil {
+ return -1
+ }
+ return n
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
new file mode 100644
index 0000000..8870858
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
@@ -0,0 +1,261 @@
+// Copyright (c) 2018, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "reflect"
+)
+
+func (m *matcher) cmdSubst(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ nodeCopy, _ := m.parseExpr(cmd.src)
+ // since we'll want to set positions within the file's
+ // FileSet
+ scrubPositions(nodeCopy)
+
+ m.fillParents(nodeCopy)
+ nodeCopy = m.fillValues(nodeCopy, sub.values)
+ m.substNode(sub.node, nodeCopy)
+ sub.node = nodeCopy
+ }
+ return subs
+}
+
+type topNode struct {
+ Node ast.Node
+}
+
+func (t topNode) Pos() token.Pos { return t.Node.Pos() }
+func (t topNode) End() token.Pos { return t.Node.End() }
+
+func (m *matcher) fillValues(node ast.Node, values map[string]ast.Node) ast.Node {
+ // node might not have a parent, in which case we need to set an
+ // artificial one. Its pointer interface is a copy, so we must also
+ // return it.
+ top := &topNode{node}
+ m.setParentOf(node, top)
+
+ inspect(node, func(node ast.Node) bool {
+ id := fromWildNode(node)
+ info := m.info(id)
+ if info.name == "" {
+ return true
+ }
+ prev := values[info.name]
+ switch prev.(type) {
+ case exprList:
+ node = exprList([]ast.Expr{
+ node.(*ast.Ident),
+ })
+ case stmtList:
+ if ident, ok := node.(*ast.Ident); ok {
+ node = &ast.ExprStmt{X: ident}
+ }
+ node = stmtList([]ast.Stmt{
+ node.(*ast.ExprStmt),
+ })
+ }
+ m.substNode(node, prev)
+ return true
+ })
+ m.setParentOf(node, nil)
+ return top.Node
+}
+
+func (m *matcher) substNode(oldNode, newNode ast.Node) {
+ parent := m.parentOf(oldNode)
+ m.setParentOf(newNode, parent)
+
+ ptr := m.nodePtr(oldNode)
+ switch x := ptr.(type) {
+ case **ast.Ident:
+ *x = newNode.(*ast.Ident)
+ case *ast.Node:
+ *x = newNode
+ case *ast.Expr:
+ *x = newNode.(ast.Expr)
+ case *ast.Stmt:
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = stmt
+ case ast.Stmt:
+ *x = y
+ default:
+ panic(fmt.Sprintf("cannot replace stmt with %T", y))
+ }
+ case *[]ast.Expr:
+ oldList := oldNode.(exprList)
+ var first, last []ast.Expr
+ for i, expr := range *x {
+ if expr == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ *x = append(first, y)
+ case exprList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace exprs with %T", y))
+ }
+ *x = append(*x, last...)
+ case *[]ast.Stmt:
+ oldList := oldNode.(stmtList)
+ var first, last []ast.Stmt
+ for i, stmt := range *x {
+ if stmt == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = append(first, stmt)
+ case ast.Stmt:
+ *x = append(first, y)
+ case stmtList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace stmts with %T", y))
+ }
+ *x = append(*x, last...)
+ case nil:
+ return
+ default:
+ panic(fmt.Sprintf("unsupported substitution: %T", x))
+ }
+ // the new nodes have scrubbed positions, so try our best to use
+ // sensible ones
+ fixPositions(parent)
+}
+
+func (m *matcher) parentOf(node ast.Node) ast.Node {
+ list, ok := node.(nodeList)
+ if ok {
+ node = list.at(0)
+ }
+ return m.parents[node]
+}
+
+func (m *matcher) setParentOf(node, parent ast.Node) {
+ list, ok := node.(nodeList)
+ if ok {
+ if list.len() == 0 {
+ return
+ }
+ node = list.at(0)
+ }
+ m.parents[node] = parent
+}
+
+func (m *matcher) nodePtr(node ast.Node) interface{} {
+ list, wantSlice := node.(nodeList)
+ if wantSlice {
+ node = list.at(0)
+ }
+ parent := m.parentOf(node)
+ if parent == nil {
+ return nil
+ }
+ v := reflect.ValueOf(parent).Elem()
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ switch fld.Type().Kind() {
+ case reflect.Slice:
+ for i := 0; i < fld.Len(); i++ {
+ ifld := fld.Index(i)
+ if ifld.Interface() != node {
+ continue
+ }
+ if wantSlice {
+ return fld.Addr().Interface()
+ }
+ return ifld.Addr().Interface()
+ }
+ case reflect.Interface:
+ if fld.Interface() == node {
+ return fld.Addr().Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// nodePosHash is an ast.Node that can always be used as a key in maps,
+// even for nodes that are slices like nodeList.
+type nodePosHash struct {
+ pos, end token.Pos
+}
+
+func (n nodePosHash) Pos() token.Pos { return n.pos }
+func (n nodePosHash) End() token.Pos { return n.end }
+
+func posHash(node ast.Node) nodePosHash {
+ return nodePosHash{pos: node.Pos(), end: node.End()}
+}
+
+var posType = reflect.TypeOf(token.NoPos)
+
+func scrubPositions(node ast.Node) {
+ inspect(node, func(node ast.Node) bool {
+ v := reflect.ValueOf(node)
+ if v.Kind() != reflect.Ptr {
+ return true
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return true
+ }
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ if fld.Type() == posType {
+ fld.SetInt(0)
+ }
+ }
+ return true
+ })
+}
+
+// fixPositions tries to fix common syntax errors caused from syntax rewrites.
+func fixPositions(node ast.Node) {
+ if top, ok := node.(*topNode); ok {
+ node = top.Node
+ }
+ // fallback sets pos to the 'to' position if not valid.
+ fallback := func(pos *token.Pos, to token.Pos) {
+ if !pos.IsValid() {
+ *pos = to
+ }
+ }
+ ast.Inspect(node, func(node ast.Node) bool {
+ // TODO: many more node types
+ switch x := node.(type) {
+ case *ast.GoStmt:
+ fallback(&x.Go, x.Call.Pos())
+ case *ast.ReturnStmt:
+ if len(x.Results) == 0 {
+ break
+ }
+ // Ensure that there's no newline before the returned
+ // values, as otherwise we have a naked return. See
+ // https://github.com/golang/go/issues/32854.
+ if pos := x.Results[0].Pos(); pos > x.Return {
+ x.Return = pos
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
new file mode 100644
index 0000000..b4796a8
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2018, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "go/ast"
+ "go/printer"
+ "os"
+)
+
+func (m *matcher) cmdWrite(cmd exprCmd, subs []submatch) []submatch {
+ seenRoot := make(map[nodePosHash]bool)
+ filePaths := make(map[*ast.File]string)
+ var next []submatch
+ for _, sub := range subs {
+ root := m.nodeRoot(sub.node)
+ hash := posHash(root)
+ if seenRoot[hash] {
+ continue // avoid dups
+ }
+ seenRoot[hash] = true
+ file, ok := root.(*ast.File)
+ if ok {
+ path := m.fset.Position(file.Package).Filename
+ if path != "" {
+ // write to disk
+ filePaths[file] = path
+ continue
+ }
+ }
+ // pass it on, to print to stdout
+ next = append(next, submatch{node: root})
+ }
+ for file, path := range filePaths {
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
+ if err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ if err := printConfig.Fprint(f, m.fset, file); err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ }
+ return next
+}
+
+var printConfig = printer.Config{
+ Mode: printer.UseSpaces | printer.TabIndent,
+ Tabwidth: 8,
+}
+
+func (m *matcher) nodeRoot(node ast.Node) ast.Node {
+ parent := m.parentOf(node)
+ if parent == nil {
+ return node
+ }
+ if _, ok := parent.(nodeList); ok {
+ return parent
+ }
+ return m.nodeRoot(parent)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
new file mode 100644
index 0000000..6e9550c
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
@@ -0,0 +1,9 @@
+package ruleguard
+
+type bool3 int
+
+const (
+ bool3unset bool3 = iota
+ bool3false
+ bool3true
+)
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
new file mode 100644
index 0000000..c566578
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
@@ -0,0 +1,40 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/dslgen"
+)
+
+type dslImporter struct {
+ fallback types.Importer
+}
+
+func newDSLImporter() *dslImporter {
+ return &dslImporter{fallback: importer.Default()}
+}
+
+func (i *dslImporter) Import(path string) (*types.Package, error) {
+ switch path {
+ case "github.com/quasilyte/go-ruleguard/dsl/fluent":
+ return i.importDSL(path, dslgen.Fluent)
+
+ default:
+ return i.fallback.Import(path)
+ }
+}
+
+func (i *dslImporter) importDSL(path string, src []byte) (*types.Package, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "dsl.go", src, 0)
+ if err != nil {
+ return nil, err
+ }
+ var typecheker types.Config
+ var info types.Info
+ return typecheker.Check(path, fset, []*ast.File{f}, &info)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
new file mode 100644
index 0000000..6cdaae4
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
@@ -0,0 +1,34 @@
+package ruleguard
+
+import (
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type scopedGoRuleSet struct {
+ uncategorized []goRule
+ categorizedNum int
+ rulesByCategory [nodeCategoriesCount][]goRule
+}
+
+type goRule struct {
+ severity string
+ pat *gogrep.Pattern
+ msg string
+ location string
+ suggestion string
+ filters map[string]submatchFilter
+}
+
+type submatchFilter struct {
+ typePred func(typeQuery) bool
+ pure bool3
+ constant bool3
+ addressable bool3
+}
+
+type typeQuery struct {
+ x types.Type
+ ctx *Context
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
new file mode 100644
index 0000000..859ed39
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
@@ -0,0 +1,159 @@
+package ruleguard
+
+import (
+ "go/ast"
+)
+
+type nodeCategory int
+
+const (
+ nodeUnknown nodeCategory = iota
+
+ nodeArrayType
+ nodeAssignStmt
+ nodeBasicLit
+ nodeBinaryExpr
+ nodeBlockStmt
+ nodeBranchStmt
+ nodeCallExpr
+ nodeCaseClause
+ nodeChanType
+ nodeCommClause
+ nodeCompositeLit
+ nodeDeclStmt
+ nodeDeferStmt
+ nodeEllipsis
+ nodeEmptyStmt
+ nodeExprStmt
+ nodeForStmt
+ nodeFuncDecl
+ nodeFuncLit
+ nodeFuncType
+ nodeGenDecl
+ nodeGoStmt
+ nodeIdent
+ nodeIfStmt
+ nodeImportSpec
+ nodeIncDecStmt
+ nodeIndexExpr
+ nodeInterfaceType
+ nodeKeyValueExpr
+ nodeLabeledStmt
+ nodeMapType
+ nodeParenExpr
+ nodeRangeStmt
+ nodeReturnStmt
+ nodeSelectStmt
+ nodeSelectorExpr
+ nodeSendStmt
+ nodeSliceExpr
+ nodeStarExpr
+ nodeStructType
+ nodeSwitchStmt
+ nodeTypeAssertExpr
+ nodeTypeSpec
+ nodeTypeSwitchStmt
+ nodeUnaryExpr
+ nodeValueSpec
+
+ nodeCategoriesCount
+)
+
+func categorizeNode(n ast.Node) nodeCategory {
+ switch n.(type) {
+ case *ast.ArrayType:
+ return nodeArrayType
+ case *ast.AssignStmt:
+ return nodeAssignStmt
+ case *ast.BasicLit:
+ return nodeBasicLit
+ case *ast.BinaryExpr:
+ return nodeBinaryExpr
+ case *ast.BlockStmt:
+ return nodeBlockStmt
+ case *ast.BranchStmt:
+ return nodeBranchStmt
+ case *ast.CallExpr:
+ return nodeCallExpr
+ case *ast.CaseClause:
+ return nodeCaseClause
+ case *ast.ChanType:
+ return nodeChanType
+ case *ast.CommClause:
+ return nodeCommClause
+ case *ast.CompositeLit:
+ return nodeCompositeLit
+ case *ast.DeclStmt:
+ return nodeDeclStmt
+ case *ast.DeferStmt:
+ return nodeDeferStmt
+ case *ast.Ellipsis:
+ return nodeEllipsis
+ case *ast.EmptyStmt:
+ return nodeEmptyStmt
+ case *ast.ExprStmt:
+ return nodeExprStmt
+ case *ast.ForStmt:
+ return nodeForStmt
+ case *ast.FuncDecl:
+ return nodeFuncDecl
+ case *ast.FuncLit:
+ return nodeFuncLit
+ case *ast.FuncType:
+ return nodeFuncType
+ case *ast.GenDecl:
+ return nodeGenDecl
+ case *ast.GoStmt:
+ return nodeGoStmt
+ case *ast.Ident:
+ return nodeIdent
+ case *ast.IfStmt:
+ return nodeIfStmt
+ case *ast.ImportSpec:
+ return nodeImportSpec
+ case *ast.IncDecStmt:
+ return nodeIncDecStmt
+ case *ast.IndexExpr:
+ return nodeIndexExpr
+ case *ast.InterfaceType:
+ return nodeInterfaceType
+ case *ast.KeyValueExpr:
+ return nodeKeyValueExpr
+ case *ast.LabeledStmt:
+ return nodeLabeledStmt
+ case *ast.MapType:
+ return nodeMapType
+ case *ast.ParenExpr:
+ return nodeParenExpr
+ case *ast.RangeStmt:
+ return nodeRangeStmt
+ case *ast.ReturnStmt:
+ return nodeReturnStmt
+ case *ast.SelectStmt:
+ return nodeSelectStmt
+ case *ast.SelectorExpr:
+ return nodeSelectorExpr
+ case *ast.SendStmt:
+ return nodeSendStmt
+ case *ast.SliceExpr:
+ return nodeSliceExpr
+ case *ast.StarExpr:
+ return nodeStarExpr
+ case *ast.StructType:
+ return nodeStructType
+ case *ast.SwitchStmt:
+ return nodeSwitchStmt
+ case *ast.TypeAssertExpr:
+ return nodeTypeAssertExpr
+ case *ast.TypeSpec:
+ return nodeTypeSpec
+ case *ast.TypeSwitchStmt:
+ return nodeTypeSwitchStmt
+ case *ast.UnaryExpr:
+ return nodeUnaryExpr
+ case *ast.ValueSpec:
+ return nodeValueSpec
+ default:
+ return nodeUnknown
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
new file mode 100644
index 0000000..f5b9367
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
@@ -0,0 +1,634 @@
+package ruleguard
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io"
+ "path"
+ "strconv"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+ "github.com/quasilyte/go-ruleguard/ruleguard/typematch"
+)
+
+type rulesParser struct {
+ fset *token.FileSet
+ res *GoRuleSet
+ types *types.Info
+
+ itab *typematch.ImportsTab
+ dslImporter types.Importer
+ stdImporter types.Importer // TODO(quasilyte): share importer with gogrep?
+ srcImporter types.Importer
+}
+
+func newRulesParser() *rulesParser {
+ var stdlib = map[string]string{
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "bufio": "bufio",
+ "build": "go/build",
+ "bytes": "bytes",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "context": "context",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "crypto": "crypto",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "ed25519": "crypto/ed25519",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "encoding": "encoding",
+ "errors": "errors",
+ "exec": "os/exec",
+ "expvar": "expvar",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flag": "flag",
+ "flate": "compress/flate",
+ "fmt": "fmt",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "hash": "hash",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "html": "html",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "image": "image",
+ "importer": "go/importer",
+ "io": "io",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "log": "log",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "math": "math",
+ "md5": "crypto/md5",
+ "mime": "mime",
+ "multipart": "mime/multipart",
+ "net": "net",
+ "os": "os",
+ "palette": "image/color/palette",
+ "parse": "text/template/parse",
+ "parser": "go/parser",
+ "path": "path",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "plugin": "plugin",
+ "png": "image/png",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "reflect": "reflect",
+ "regexp": "regexp",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ "runtime": "runtime",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sort": "sort",
+ "sql": "database/sql",
+ "strconv": "strconv",
+ "strings": "strings",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "sync": "sync",
+ "syntax": "regexp/syntax",
+ "syscall": "syscall",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ "template": "text/template",
+ "testing": "testing",
+ "textproto": "net/textproto",
+ "time": "time",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "unicode": "unicode",
+ "unsafe": "unsafe",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+ }
+
+ // TODO(quasilyte): do we need to pass the fileset here?
+ fset := token.NewFileSet()
+ return &rulesParser{
+ itab: typematch.NewImportsTab(stdlib),
+ stdImporter: importer.Default(),
+ srcImporter: importer.ForCompiler(fset, "source", nil),
+ dslImporter: newDSLImporter(),
+ }
+}
+
+func (p *rulesParser) ParseFile(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p.fset = fset
+ p.res = &GoRuleSet{
+ local: &scopedGoRuleSet{},
+ universal: &scopedGoRuleSet{},
+ }
+
+ parserFlags := parser.Mode(0)
+ f, err := parser.ParseFile(fset, filename, r, parserFlags)
+ if err != nil {
+ return nil, fmt.Errorf("parser error: %v", err)
+ }
+
+ if f.Name.Name != "gorules" {
+ return nil, fmt.Errorf("expected a gorules package name, found %s", f.Name.Name)
+ }
+
+ typechecker := types.Config{Importer: p.dslImporter}
+ p.types = &types.Info{Types: map[ast.Expr]types.TypeAndValue{}}
+ _, err = typechecker.Check("gorules", fset, []*ast.File{f}, p.types)
+ if err != nil {
+ return nil, fmt.Errorf("typechecker error: %v", err)
+ }
+
+ for _, decl := range f.Decls {
+ decl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if err := p.parseRuleGroup(decl); err != nil {
+ return nil, err
+ }
+ }
+
+ return p.res, nil
+}
+
+func (p *rulesParser) parseRuleGroup(f *ast.FuncDecl) error {
+ if f.Body == nil {
+ return p.errorf(f, "unexpected empty function body")
+ }
+ if f.Type.Results != nil {
+ return p.errorf(f.Type.Results, "rule group function should not return anything")
+ }
+ params := f.Type.Params.List
+ if len(params) != 1 || len(params[0].Names) != 1 {
+ return p.errorf(f.Type.Params, "rule group function should accept exactly 1 Matcher param")
+ }
+ // TODO(quasilyte): do an actual matcher param type check?
+ matcher := params[0].Names[0].Name
+
+ p.itab.EnterScope()
+ defer p.itab.LeaveScope()
+
+ for _, stmt := range f.Body.List {
+ if _, ok := stmt.(*ast.DeclStmt); ok {
+ continue
+ }
+ stmtExpr, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ call, ok := stmtExpr.X.(*ast.CallExpr)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ if err := p.parseCall(matcher, call); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (p *rulesParser) parseCall(matcher string, call *ast.CallExpr) error {
+ f := call.Fun.(*ast.SelectorExpr)
+ x, ok := f.X.(*ast.Ident)
+ if ok && x.Name == matcher {
+ return p.parseStmt(f.Sel, call.Args)
+ }
+
+ return p.parseRule(matcher, call)
+}
+
+func (p *rulesParser) parseStmt(fn *ast.Ident, args []ast.Expr) error {
+ switch fn.Name {
+ case "Import":
+ pkgPath, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ pkgName := path.Base(pkgPath)
+ p.itab.Load(pkgName, pkgPath)
+ return nil
+ default:
+ return p.errorf(fn, "unexpected %s method", fn.Name)
+ }
+}
+
+func (p *rulesParser) parseRule(matcher string, call *ast.CallExpr) error {
+ origCall := call
+ var (
+ matchArgs *[]ast.Expr
+ whereArgs *[]ast.Expr
+ suggestArgs *[]ast.Expr
+ reportArgs *[]ast.Expr
+ atArgs *[]ast.Expr
+ )
+ for {
+ chain, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ switch chain.Sel.Name {
+ case "Match":
+ matchArgs = &call.Args
+ case "Where":
+ whereArgs = &call.Args
+ case "Suggest":
+ suggestArgs = &call.Args
+ case "Report":
+ reportArgs = &call.Args
+ case "At":
+ atArgs = &call.Args
+ default:
+ return p.errorf(chain.Sel, "unexpected %s method", chain.Sel.Name)
+ }
+ call, ok = chain.X.(*ast.CallExpr)
+ if !ok {
+ break
+ }
+ }
+
+ dst := p.res.universal
+ filters := map[string]submatchFilter{}
+ proto := goRule{
+ filters: filters,
+ }
+ var alternatives []string
+
+ if matchArgs == nil {
+ return p.errorf(origCall, "missing Match() call")
+ }
+ for _, arg := range *matchArgs {
+ alt, ok := p.toStringValue(arg)
+ if !ok {
+ return p.errorf(arg, "expected a string literal argument")
+ }
+ alternatives = append(alternatives, alt)
+ }
+
+ if whereArgs != nil {
+ if err := p.walkFilter(filters, (*whereArgs)[0], false); err != nil {
+ return err
+ }
+ }
+
+ if suggestArgs != nil {
+ s, ok := p.toStringValue((*suggestArgs)[0])
+ if !ok {
+ return p.errorf((*suggestArgs)[0], "expected string literal argument")
+ }
+ proto.suggestion = s
+ }
+
+ if reportArgs == nil {
+ if suggestArgs == nil {
+ return p.errorf(origCall, "missing Report() or Suggest() call")
+ }
+ proto.msg = "suggestion: " + proto.suggestion
+ } else {
+ message, ok := p.toStringValue((*reportArgs)[0])
+ if !ok {
+ return p.errorf((*reportArgs)[0], "expected string literal argument")
+ }
+ proto.msg = message
+ }
+
+ if atArgs != nil {
+ index, ok := (*atArgs)[0].(*ast.IndexExpr)
+ if !ok {
+ return p.errorf((*atArgs)[0], "expected %s[`varname`] expression", matcher)
+ }
+ arg, ok := p.toStringValue(index.Index)
+ if !ok {
+ return p.errorf(index.Index, "expected a string literal index")
+ }
+ proto.location = arg
+ }
+
+ for i, alt := range alternatives {
+ rule := proto
+ pat, err := gogrep.Parse(p.fset, alt)
+ if err != nil {
+ return p.errorf((*matchArgs)[i], "gogrep parse: %v", err)
+ }
+ rule.pat = pat
+ cat := categorizeNode(pat.Expr)
+ if cat == nodeUnknown {
+ dst.uncategorized = append(dst.uncategorized, rule)
+ } else {
+ dst.categorizedNum++
+ dst.rulesByCategory[cat] = append(dst.rulesByCategory[cat], rule)
+ }
+ }
+
+ return nil
+}
+
+func (p *rulesParser) walkFilter(dst map[string]submatchFilter, e ast.Expr, negate bool) error {
+ AND := func(x, y func(typeQuery) bool) func(typeQuery) bool {
+ if x == nil {
+ return y
+ }
+ return func(q typeQuery) bool {
+ return x(q) && y(q)
+ }
+ }
+
+ switch e := e.(type) {
+ case *ast.UnaryExpr:
+ if e.Op == token.NOT {
+ return p.walkFilter(dst, e.X, !negate)
+ }
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND:
+ err := p.walkFilter(dst, e.X, negate)
+ if err != nil {
+ return err
+ }
+ return p.walkFilter(dst, e.Y, negate)
+ case token.GEQ, token.LEQ, token.LSS, token.GTR, token.EQL, token.NEQ:
+ operand := p.toFilterOperand(e.X)
+ size := p.types.Types[e.Y].Value
+ if operand.path == "Type.Size" && size != nil {
+ filter := dst[operand.varName]
+ filter.typePred = AND(filter.typePred, func(q typeQuery) bool {
+ x := constant.MakeInt64(q.ctx.Sizes.Sizeof(q.x))
+ return constant.Compare(x, e.Op, size)
+ })
+ dst[operand.varName] = filter
+
+ return nil
+ }
+ }
+ }
+
+ // TODO(quasilyte): refactor and extend.
+ operand := p.toFilterOperand(e)
+ args := operand.args
+ filter := dst[operand.varName]
+ switch operand.path {
+ default:
+ return p.errorf(e, "%s is not a valid filter expression", sprintNode(p.fset, e))
+ case "Pure":
+ if negate {
+ filter.pure = bool3false
+ } else {
+ filter.pure = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Const":
+ if negate {
+ filter.constant = bool3false
+ } else {
+ filter.constant = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Addressable":
+ if negate {
+ filter.addressable = bool3false
+ } else {
+ filter.addressable = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Type.Is":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ ctx := typematch.Context{Itab: p.itab}
+ pat, err := typematch.Parse(&ctx, typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ wantIdentical := !negate
+ filter.typePred = AND(filter.typePred, func(q typeQuery) bool {
+ return wantIdentical == pat.MatchIdentical(q.x)
+ })
+ dst[operand.varName] = filter
+ case "Type.ConvertibleTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantConvertible := !negate
+ filter.typePred = AND(filter.typePred, func(q typeQuery) bool {
+ return wantConvertible == types.ConvertibleTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.AssignableTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantAssignable := !negate
+ filter.typePred = AND(filter.typePred, func(q typeQuery) bool {
+ return wantAssignable == types.AssignableTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.Implements":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ n, err := parser.ParseExpr(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ e, ok := n.(*ast.SelectorExpr)
+ if !ok {
+ return p.errorf(args[0], "only qualified names are supported")
+ }
+ pkgName, ok := e.X.(*ast.Ident)
+ if !ok {
+ return p.errorf(e.X, "invalid package name")
+ }
+ pkgPath, ok := p.itab.Lookup(pkgName.Name)
+ if !ok {
+ return p.errorf(e.X, "package %s is not imported", pkgName.Name)
+ }
+ pkg, err := p.stdImporter.Import(pkgPath)
+ if err != nil {
+ pkg, err = p.srcImporter.Import(pkgPath)
+ if err != nil {
+ return p.errorf(e, "can't load %s: %v", pkgPath, err)
+ }
+ }
+ obj := pkg.Scope().Lookup(e.Sel.Name)
+ if obj == nil {
+ return p.errorf(e, "%s is not found in %s", e.Sel.Name, pkgPath)
+ }
+ iface, ok := obj.Type().Underlying().(*types.Interface)
+ if !ok {
+ return p.errorf(e, "%s is not an interface type", e.Sel.Name)
+ }
+ wantImplemented := !negate
+ filter.typePred = AND(filter.typePred, func(q typeQuery) bool {
+ return wantImplemented == types.Implements(q.x, iface)
+ })
+ dst[operand.varName] = filter
+ }
+
+ return nil
+}
+
+func (p *rulesParser) toIntValue(x ast.Node) (int64, bool) {
+ lit, ok := x.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return 0, false
+ }
+ v, err := strconv.ParseInt(lit.Value, 10, 64)
+ return v, err == nil
+}
+
+func (p *rulesParser) toStringValue(x ast.Node) (string, bool) {
+ switch x := x.(type) {
+ case *ast.BasicLit:
+ if x.Kind != token.STRING {
+ return "", false
+ }
+ return unquoteNode(x), true
+ case ast.Expr:
+ typ, ok := p.types.Types[x]
+ if !ok || typ.Type.String() != "string" {
+ return "", false
+ }
+ str := typ.Value.ExactString()
+ str = str[1 : len(str)-1] // remove quotes
+ return str, true
+ }
+ return "", false
+}
+
+func (p *rulesParser) toFilterOperand(e ast.Expr) filterOperand {
+ var o filterOperand
+
+ if call, ok := e.(*ast.CallExpr); ok {
+ o.args = call.Args
+ e = call.Fun
+ }
+ var path string
+ for {
+ selector, ok := e.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ if path == "" {
+ path = selector.Sel.Name
+ } else {
+ path = selector.Sel.Name + "." + path
+ }
+ e = selector.X
+ }
+ indexing, ok := e.(*ast.IndexExpr)
+ if !ok {
+ return o
+ }
+ mapIdent, ok := indexing.X.(*ast.Ident)
+ if !ok {
+ return o
+ }
+ indexString, ok := p.toStringValue(indexing.Index)
+ if !ok {
+ return o
+ }
+
+ o.mapName = mapIdent.Name
+ o.varName = indexString
+ o.path = path
+ return o
+}
+
+func (p *rulesParser) errorf(n ast.Node, format string, args ...interface{}) error {
+ loc := p.fset.Position(n.Pos())
+ return fmt.Errorf("%s:%d: %s",
+ loc.Filename, loc.Line, fmt.Sprintf(format, args...))
+}
+
+type filterOperand struct {
+ mapName string
+ varName string
+ path string
+ args []ast.Expr
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
new file mode 100644
index 0000000..591fc2a
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
@@ -0,0 +1,36 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+)
+
+type Context struct {
+ Types *types.Info
+ Sizes types.Sizes
+ Fset *token.FileSet
+ Report func(n ast.Node, msg string, s *Suggestion)
+ Pkg *types.Package
+}
+
+type Suggestion struct {
+ From token.Pos
+ To token.Pos
+ Replacement []byte
+}
+
+func ParseRules(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p := newRulesParser()
+ return p.ParseFile(filename, fset, r)
+}
+
+func RunRules(ctx *Context, f *ast.File, rules *GoRuleSet) error {
+ return newRulesRunner(ctx, rules).run(f)
+}
+
+type GoRuleSet struct {
+ universal *scopedGoRuleSet
+ local *scopedGoRuleSet
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
new file mode 100644
index 0000000..9610199
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
@@ -0,0 +1,183 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/printer"
+ "io/ioutil"
+ "strings"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type rulesRunner struct {
+ ctx *Context
+ rules *GoRuleSet
+
+ filename string
+ src []byte
+}
+
+func newRulesRunner(ctx *Context, rules *GoRuleSet) *rulesRunner {
+ return &rulesRunner{
+ ctx: ctx,
+ rules: rules,
+ }
+}
+
+func (rr *rulesRunner) fileBytes() []byte {
+ if rr.src != nil {
+ return rr.src
+ }
+
+ // TODO(quasilyte): re-use src slice?
+ src, err := ioutil.ReadFile(rr.filename)
+ if err != nil || src == nil {
+ // Assign a zero-length slice so rr.src
+ // is never nil during the second fileBytes call.
+ rr.src = make([]byte, 0)
+ } else {
+ rr.src = src
+ }
+ return rr.src
+}
+
+func (rr *rulesRunner) run(f *ast.File) error {
+ // TODO(quasilyte): run local rules as well.
+
+ rr.filename = rr.ctx.Fset.Position(f.Pos()).Filename
+
+ for _, rule := range rr.rules.universal.uncategorized {
+ rule.pat.Match(f, func(m gogrep.MatchData) {
+ rr.handleMatch(rule, m)
+ })
+ }
+
+ if rr.rules.universal.categorizedNum != 0 {
+ ast.Inspect(f, func(n ast.Node) bool {
+ cat := categorizeNode(n)
+ for _, rule := range rr.rules.universal.rulesByCategory[cat] {
+ matched := false
+ rule.pat.MatchNode(n, func(m gogrep.MatchData) {
+ matched = rr.handleMatch(rule, m)
+ })
+ if matched {
+ break
+ }
+ }
+ return true
+ })
+ }
+
+ return nil
+}
+
+func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool {
+ for name, node := range m.Values {
+ expr, ok := node.(ast.Expr)
+ if !ok {
+ continue
+ }
+ filter, ok := rule.filters[name]
+ if !ok {
+ continue
+ }
+ if filter.typePred != nil {
+ typ := rr.ctx.Types.TypeOf(expr)
+ q := typeQuery{x: typ, ctx: rr.ctx}
+ if !filter.typePred(q) {
+ return false
+ }
+ }
+ switch filter.addressable {
+ case bool3true:
+ if !isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.pure {
+ case bool3true:
+ if !isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.constant {
+ case bool3true:
+ if !isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ }
+
+ prefix := ""
+ if rule.severity != "" {
+ prefix = rule.severity + ": "
+ }
+ message := prefix + rr.renderMessage(rule.msg, m.Node, m.Values)
+ node := m.Node
+ if rule.location != "" {
+ node = m.Values[rule.location]
+ }
+ var suggestion *Suggestion
+ if rule.suggestion != "" {
+ suggestion = &Suggestion{
+ Replacement: []byte(rr.renderMessage(rule.suggestion, m.Node, m.Values)),
+ From: node.Pos(),
+ To: node.End(),
+ }
+ }
+ rr.ctx.Report(node, message, suggestion)
+ return true
+}
+
+func (rr *rulesRunner) renderMessage(msg string, n ast.Node, nodes map[string]ast.Node) string {
+ var buf strings.Builder
+ if strings.Contains(msg, "$$") {
+ rr.writeNode(&buf, n)
+ msg = strings.ReplaceAll(msg, "$$", buf.String())
+ }
+ if len(nodes) == 0 {
+ return msg
+ }
+ for name, n := range nodes {
+ key := "$" + name
+ if !strings.Contains(msg, key) {
+ continue
+ }
+ buf.Reset()
+ rr.writeNode(&buf, n)
+ // Don't interpolate strings that are too long.
+ var replacement string
+ if buf.Len() > 40 {
+ replacement = key
+ } else {
+ replacement = buf.String()
+ }
+ msg = strings.ReplaceAll(msg, key, replacement)
+ }
+ return msg
+}
+
+func (rr *rulesRunner) writeNode(buf *strings.Builder, n ast.Node) {
+ from := rr.ctx.Fset.Position(n.Pos()).Offset
+ to := rr.ctx.Fset.Position(n.End()).Offset
+ src := rr.fileBytes()
+ if (from >= 0 && int(from) < len(src)) && (to >= 0 && int(to) < len(src)) {
+ buf.Write(src[from:to])
+ return
+ }
+ if err := printer.Fprint(buf, rr.ctx.Fset, n); err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
new file mode 100644
index 0000000..e6d85ac
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
@@ -0,0 +1,329 @@
+package typematch
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+type patternOp int
+
+const (
+ opBuiltinType patternOp = iota
+ opPointer
+ opVar
+ opSlice
+ opArray
+ opMap
+ opChan
+ opNamed
+)
+
+type Pattern struct {
+ typeMatches map[string]types.Type
+ int64Matches map[string]int64
+
+ root *pattern
+}
+
+type pattern struct {
+ value interface{}
+ op patternOp
+ subs []*pattern
+}
+
+type ImportsTab struct {
+ imports []map[string]string
+}
+
+func NewImportsTab(initial map[string]string) *ImportsTab {
+ return &ImportsTab{imports: []map[string]string{initial}}
+}
+
+func (itab *ImportsTab) Lookup(pkgName string) (string, bool) {
+ for i := len(itab.imports) - 1; i >= 0; i-- {
+ pkgPath, ok := itab.imports[i][pkgName]
+ if ok {
+ return pkgPath, true
+ }
+ }
+ return "", false
+}
+
+func (itab *ImportsTab) Load(pkgName, pkgPath string) {
+ itab.imports[len(itab.imports)-1][pkgName] = pkgPath
+}
+
+func (itab *ImportsTab) EnterScope() {
+ itab.imports = append(itab.imports, map[string]string{})
+}
+
+func (itab *ImportsTab) LeaveScope() {
+ itab.imports = itab.imports[:len(itab.imports)-1]
+}
+
+type Context struct {
+ Itab *ImportsTab
+}
+
+func Parse(ctx *Context, s string) (*Pattern, error) {
+ noDollars := strings.ReplaceAll(s, "$", "__")
+ n, err := parser.ParseExpr(noDollars)
+ if err != nil {
+ return nil, err
+ }
+ root := parseExpr(ctx, n)
+ if root == nil {
+ return nil, fmt.Errorf("can't convert %s type expression", s)
+ }
+ p := &Pattern{
+ typeMatches: map[string]types.Type{},
+ int64Matches: map[string]int64{},
+ root: root,
+ }
+ return p, nil
+}
+
+var (
+ builtinTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+
+ "error": types.Universe.Lookup("error").Type(),
+ }
+
+ efaceType = types.NewInterfaceType(nil, nil)
+)
+
+func parseExpr(ctx *Context, e ast.Expr) *pattern {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := builtinTypeByName[e.Name]
+ if ok {
+ return &pattern{op: opBuiltinType, value: basic}
+ }
+ if strings.HasPrefix(e.Name, "__") {
+ name := strings.TrimPrefix(e.Name, "__")
+ return &pattern{op: opVar, value: name}
+ }
+
+ case *ast.SelectorExpr:
+ pkg, ok := e.X.(*ast.Ident)
+ if !ok {
+ return nil
+ }
+ pkgPath, ok := ctx.Itab.Lookup(pkg.Name)
+ if !ok {
+ return nil
+ }
+ return &pattern{op: opNamed, value: [2]string{pkgPath, e.Sel.Name}}
+
+ case *ast.StarExpr:
+ elem := parseExpr(ctx, e.X)
+ if elem == nil {
+ return nil
+ }
+ return &pattern{op: opPointer, subs: []*pattern{elem}}
+
+ case *ast.ArrayType:
+ elem := parseExpr(ctx, e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return &pattern{
+ op: opSlice,
+ subs: []*pattern{elem},
+ }
+ }
+ if id, ok := e.Len.(*ast.Ident); ok && strings.HasPrefix(id.Name, "__") {
+ name := strings.TrimPrefix(id.Name, "__")
+ return &pattern{
+ op: opArray,
+ value: name,
+ subs: []*pattern{elem},
+ }
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.ParseInt(lit.Value, 10, 64)
+ if err != nil {
+ return nil
+ }
+ return &pattern{
+ op: opArray,
+ value: length,
+ subs: []*pattern{elem},
+ }
+
+ case *ast.MapType:
+ keyType := parseExpr(ctx, e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ return &pattern{
+ op: opMap,
+ subs: []*pattern{keyType, valType},
+ }
+
+ case *ast.ChanType:
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ var dir types.ChanDir
+ switch {
+ case e.Dir&ast.SEND != 0 && e.Dir&ast.RECV != 0:
+ dir = types.SendRecv
+ case e.Dir&ast.SEND != 0:
+ dir = types.SendOnly
+ case e.Dir&ast.RECV != 0:
+ dir = types.RecvOnly
+ default:
+ return nil
+ }
+ return &pattern{
+ op: opChan,
+ value: dir,
+ subs: []*pattern{valType},
+ }
+
+ case *ast.ParenExpr:
+ return parseExpr(ctx, e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return &pattern{op: opBuiltinType, value: efaceType}
+ }
+ }
+
+ return nil
+}
+
+func (p *Pattern) MatchIdentical(typ types.Type) bool {
+ p.reset()
+ return p.matchIdentical(p.root, typ)
+}
+
+func (p *Pattern) reset() {
+ if len(p.int64Matches) != 0 {
+ p.int64Matches = map[string]int64{}
+ }
+ if len(p.typeMatches) != 0 {
+ p.typeMatches = map[string]types.Type{}
+ }
+}
+
+func (p *Pattern) matchIdentical(sub *pattern, typ types.Type) bool {
+ switch sub.op {
+ case opVar:
+ name := sub.value.(string)
+ if name == "_" {
+ return true
+ }
+ y, ok := p.typeMatches[name]
+ if !ok {
+ p.typeMatches[name] = typ
+ return true
+ }
+ if y == nil {
+ return typ == nil
+ }
+ return types.Identical(typ, y)
+
+ case opBuiltinType:
+ return types.Identical(typ, sub.value.(types.Type))
+
+ case opPointer:
+ typ, ok := typ.(*types.Pointer)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opSlice:
+ typ, ok := typ.(*types.Slice)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opArray:
+ typ, ok := typ.(*types.Array)
+ if !ok {
+ return false
+ }
+ var wantLen int64
+ switch v := sub.value.(type) {
+ case string:
+ if v == "_" {
+ wantLen = typ.Len()
+ break
+ }
+ length, ok := p.int64Matches[v]
+ if ok {
+ wantLen = length
+ } else {
+ p.int64Matches[v] = typ.Len()
+ wantLen = typ.Len()
+ }
+ case int64:
+ wantLen = v
+ }
+ return wantLen == typ.Len() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opMap:
+ typ, ok := typ.(*types.Map)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Key()) &&
+ p.matchIdentical(sub.subs[1], typ.Elem())
+
+ case opChan:
+ typ, ok := typ.(*types.Chan)
+ if !ok {
+ return false
+ }
+ dir := sub.value.(types.ChanDir)
+ return dir == typ.Dir() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opNamed:
+ typ, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ pkgPath := sub.value.([2]string)[0]
+ typeName := sub.value.([2]string)[1]
+ obj := typ.Obj()
+ return obj.Pkg().Path() == pkgPath && typeName == obj.Name()
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
new file mode 100644
index 0000000..c17dc24
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
@@ -0,0 +1,205 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+func unquoteNode(lit *ast.BasicLit) string {
+ return lit.Value[1 : len(lit.Value)-1]
+}
+
+func sprintNode(fset *token.FileSet, n ast.Node) string {
+ if fset == nil {
+ fset = token.NewFileSet()
+ }
+ var buf strings.Builder
+ if err := printer.Fprint(&buf, fset, n); err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+var basicTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+}
+
+func typeFromString(s string) (types.Type, error) {
+ s = strings.ReplaceAll(s, "?", "__any")
+
+ n, err := parser.ParseExpr(s)
+ if err != nil {
+ return nil, err
+ }
+ return typeFromNode(n), nil
+}
+
+func typeFromNode(e ast.Expr) types.Type {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := basicTypeByName[e.Name]
+ if ok {
+ return basic
+ }
+
+ case *ast.ArrayType:
+ elem := typeFromNode(e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return types.NewSlice(elem)
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.Atoi(lit.Value)
+ if err != nil {
+ return nil
+ }
+ types.NewArray(elem, int64(length))
+
+ case *ast.MapType:
+ keyType := typeFromNode(e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := typeFromNode(e.Value)
+ if valType == nil {
+ return nil
+ }
+ return types.NewMap(keyType, valType)
+
+ case *ast.StarExpr:
+ typ := typeFromNode(e.X)
+ if typ != nil {
+ return types.NewPointer(typ)
+ }
+
+ case *ast.ParenExpr:
+ return typeFromNode(e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return types.NewInterfaceType(nil, nil)
+ }
+ }
+
+ return nil
+}
+
+// isPure reports whether expr is a softly safe expression and contains
+// no significant side-effects. As opposed to strictly safe expressions,
+// soft safe expressions permit some forms of side-effects, like
+// panic possibility during indexing or nil pointer dereference.
+//
+// Uses types info to determine type conversion expressions that
+// are the only permitted kinds of call expressions.
+// Note that is does not check whether called function really
+// has any side effects. The analysis is very conservative.
+func isPure(info *types.Info, expr ast.Expr) bool {
+ // This list switch is not comprehensive and uses
+ // whitelist to be on the conservative side.
+ // Can be extended as needed.
+
+ switch expr := expr.(type) {
+ case *ast.StarExpr:
+ return isPure(info, expr.X)
+ case *ast.BinaryExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Y)
+ case *ast.UnaryExpr:
+ return expr.Op != token.ARROW &&
+ isPure(info, expr.X)
+ case *ast.BasicLit, *ast.Ident:
+ return true
+ case *ast.IndexExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Index)
+ case *ast.SelectorExpr:
+ return isPure(info, expr.X)
+ case *ast.ParenExpr:
+ return isPure(info, expr.X)
+ case *ast.CompositeLit:
+ return isPureList(info, expr.Elts)
+ case *ast.CallExpr:
+ return isTypeExpr(info, expr.Fun) && isPureList(info, expr.Args)
+
+ default:
+ return false
+ }
+}
+
+// isPureList reports whether every expr in list is safe.
+//
+// See isPure.
+func isPureList(info *types.Info, list []ast.Expr) bool {
+ for _, expr := range list {
+ if !isPure(info, expr) {
+ return false
+ }
+ }
+ return true
+}
+
+func isAddressable(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Addressable()
+}
+
+func isConstant(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Value != nil
+}
+
+// isTypeExpr reports whether x represents a type expression.
+//
+// Type expression does not evaluate to any run time value,
+// but rather describes a type that is used inside Go expression.
+//
+// For example, (*T)(v) is a CallExpr that "calls" (*T).
+// (*T) is a type expression that tells Go compiler type v should be converted to.
+func isTypeExpr(info *types.Info, x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.StarExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.ParenExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.SelectorExpr:
+ return isTypeExpr(info, x.Sel)
+
+ case *ast.Ident:
+ // Identifier may be a type expression if object
+ // it reffers to is a type name.
+ _, ok := info.ObjectOf(x).(*types.TypeName)
+ return ok
+
+ case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType:
+ return true
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/LICENSE b/vendor/github.com/quasilyte/regex/syntax/LICENSE
new file mode 100644
index 0000000..f0c8128
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Iskander (Alex) Sharipov / quasilyte
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md
new file mode 100644
index 0000000..13064ec
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/README.md
@@ -0,0 +1,26 @@
+# Package `regex/syntax`
+
+Package `syntax` provides regular expressions parser as well as AST definitions.
+
+## Rationale
+
+There are several problems with the stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/) package:
+
+1. It does several transformations during the parsing that make it
+ hard to do any kind of syntax analysis afterward.
+
+2. The AST used there is optimized for the compilation and
+ execution inside the [regexp](https://golang.org/pkg/regexp) package.
+ It's somewhat complicated, especially in a way character ranges are encoded.
+
+3. It only supports [re2](https://github.com/google/re2/wiki/Syntax) syntax.
+ This parser recognizes most PCRE operations.
+
+4. It's easier to extend this package than something from the standard library.
+
+This package does almost no assumptions about how generated AST is going to be used
+so it preserves as much syntax information as possible.
+
+It's easy to write another intermediate representation on top of it. The main
+function of this package is to convert a textual regexp pattern into a more
+structured form that can be processed more easily.
diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go
new file mode 100644
index 0000000..44b7b61
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/ast.go
@@ -0,0 +1,147 @@
+package syntax
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Regexp struct {
+ Pattern string
+ Expr Expr
+}
+
+type RegexpPCRE struct {
+ Pattern string
+ Expr Expr
+
+ Source string
+ Modifiers string
+ Delim [2]byte
+}
+
+func (re *RegexpPCRE) HasModifier(mod byte) bool {
+ return strings.IndexByte(re.Modifiers, mod) >= 0
+}
+
+type Expr struct {
+ // The operations that this expression performs. See `operation.go`.
+ Op Operation
+
+ Form Form
+
+ _ [2]byte // Reserved
+
+ // Pos describes a source location inside regexp pattern.
+ Pos Position
+
+ // Args is a list of sub-expressions of this expression.
+ //
+ // See Operation constants documentation to learn how to
+ // interpret the particular expression args.
+ Args []Expr
+
+ // Value holds expression textual value.
+ //
+ // Usually, that value is identical to src[Begin():End()],
+ // but this is not true for programmatically generated objects.
+ Value string
+}
+
+// Begin returns expression leftmost offset.
+func (e Expr) Begin() uint16 { return e.Pos.Begin }
+
+// End returns expression rightmost offset.
+func (e Expr) End() uint16 { return e.Pos.End }
+
+// LastArg returns expression last argument.
+//
+// Should not be called on expressions that may have 0 arguments.
+func (e Expr) LastArg() Expr {
+ return e.Args[len(e.Args)-1]
+}
+
+type Operation byte
+
+type Form byte
+
+func FormatSyntax(re *Regexp) string {
+ return formatExprSyntax(re, re.Expr)
+}
+
+func formatExprSyntax(re *Regexp, e Expr) string {
+ switch e.Op {
+ case OpChar, OpLiteral:
+ switch e.Value {
+ case "{":
+ return "'{'"
+ case "}":
+ return "'}'"
+ default:
+ return e.Value
+ }
+ case OpString, OpEscapeChar, OpEscapeMeta, OpEscapeOctal, OpEscapeUni, OpEscapeHex, OpPosixClass:
+ return e.Value
+ case OpRepeat:
+ return fmt.Sprintf("(repeat %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value)
+ case OpCaret:
+ return "^"
+ case OpDollar:
+ return "$"
+ case OpDot:
+ return "."
+ case OpQuote:
+ return fmt.Sprintf("(q %s)", e.Value)
+ case OpCharRange:
+ return fmt.Sprintf("%s-%s", formatExprSyntax(re, e.Args[0]), formatExprSyntax(re, e.Args[1]))
+ case OpCharClass:
+ return fmt.Sprintf("[%s]", formatArgsSyntax(re, e.Args))
+ case OpNegCharClass:
+ return fmt.Sprintf("[^%s]", formatArgsSyntax(re, e.Args))
+ case OpConcat:
+ return fmt.Sprintf("{%s}", formatArgsSyntax(re, e.Args))
+ case OpAlt:
+ return fmt.Sprintf("(or %s)", formatArgsSyntax(re, e.Args))
+ case OpCapture:
+ return fmt.Sprintf("(capture %s)", formatExprSyntax(re, e.Args[0]))
+ case OpNamedCapture:
+ return fmt.Sprintf("(capture %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value)
+ case OpGroup:
+ return fmt.Sprintf("(group %s)", formatExprSyntax(re, e.Args[0]))
+ case OpAtomicGroup:
+ return fmt.Sprintf("(atomic %s)", formatExprSyntax(re, e.Args[0]))
+ case OpGroupWithFlags:
+ return fmt.Sprintf("(group %s ?%s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value)
+ case OpFlagOnlyGroup:
+ return fmt.Sprintf("(flags ?%s)", formatExprSyntax(re, e.Args[0]))
+ case OpPositiveLookahead:
+ return fmt.Sprintf("(?= %s)", formatExprSyntax(re, e.Args[0]))
+ case OpNegativeLookahead:
+ return fmt.Sprintf("(?! %s)", formatExprSyntax(re, e.Args[0]))
+ case OpPositiveLookbehind:
+ return fmt.Sprintf("(?<= %s)", formatExprSyntax(re, e.Args[0]))
+ case OpNegativeLookbehind:
+ return fmt.Sprintf("(?", e.Op)
+ }
+}
+
+func formatArgsSyntax(re *Regexp, args []Expr) string {
+ parts := make([]string, len(args))
+ for i, e := range args {
+ parts[i] = formatExprSyntax(re, e)
+ }
+ return strings.Join(parts, " ")
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go
new file mode 100644
index 0000000..cfafc1d
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/errors.go
@@ -0,0 +1,27 @@
+package syntax
+
+import (
+ "fmt"
+)
+
+type ParseError struct {
+ Pos Position
+ Message string
+}
+
+func (e ParseError) Error() string { return e.Message }
+
+func throwfPos(pos Position, format string, args ...interface{}) {
+ panic(ParseError{
+ Pos: pos,
+ Message: fmt.Sprintf(format, args...),
+ })
+}
+
+func throwErrorf(posBegin, posEnd int, format string, args ...interface{}) {
+ pos := Position{
+ Begin: uint16(posBegin),
+ End: uint16(posEnd),
+ }
+ throwfPos(pos, format, args...)
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/go.mod b/vendor/github.com/quasilyte/regex/syntax/go.mod
new file mode 100644
index 0000000..2a4e1f3
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/go.mod
@@ -0,0 +1,3 @@
+module github.com/quasilyte/regex/syntax
+
+go 1.14
diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go
new file mode 100644
index 0000000..e92b038
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go
@@ -0,0 +1,455 @@
+package syntax
+
+import (
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type token struct {
+ kind tokenKind
+ pos Position
+}
+
+func (tok token) String() string {
+ return tok.kind.String()
+}
+
+type tokenKind byte
+
+//go:generate stringer -type=tokenKind -trimprefix=tok -linecomment=true
+const (
+ tokNone tokenKind = iota
+
+ tokChar
+ tokGroupFlags
+ tokPosixClass
+ tokConcat
+ tokRepeat
+ tokEscapeChar
+ tokEscapeMeta
+ tokEscapeOctal
+ tokEscapeUni
+ tokEscapeUniFull
+ tokEscapeHex
+ tokEscapeHexFull
+ tokComment
+
+ tokQ // \Q
+ tokMinus // -
+ tokLbracket // [
+ tokLbracketCaret // [^
+ tokRbracket // ]
+ tokDollar // $
+ tokCaret // ^
+ tokQuestion // ?
+ tokDot // .
+ tokPlus // +
+ tokStar // *
+ tokPipe // |
+ tokLparen // (
+ tokLparenName // (?P
+ tokLparenNameAngle // (?
+ tokLparenNameQuote // (?'name'
+ tokLparenFlags // (?flags
+ tokLparenAtomic // (?>
+ tokLparenPositiveLookahead // (?=
+ tokLparenPositiveLookbehind // (?<=
+ tokLparenNegativeLookahead // (?!
+ tokLparenNegativeLookbehind // (? unicode.MaxASCII {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ l.maybeInsertConcat()
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(false)
+ case '.':
+ l.pushTok(tokDot, 1)
+ case '+':
+ l.pushTok(tokPlus, 1)
+ case '*':
+ l.pushTok(tokStar, 1)
+ case '^':
+ l.pushTok(tokCaret, 1)
+ case '$':
+ l.pushTok(tokDollar, 1)
+ case '?':
+ l.pushTok(tokQuestion, 1)
+ case ')':
+ l.pushTok(tokRparen, 1)
+ case '|':
+ l.pushTok(tokPipe, 1)
+ case '[':
+ if l.byteAt(l.pos+1) == '^' {
+ l.pushTok(tokLbracketCaret, 2)
+ } else {
+ l.pushTok(tokLbracket, 1)
+ }
+ l.scanCharClass()
+ case '(':
+ if l.byteAt(l.pos+1) == '?' {
+ switch {
+ case l.byteAt(l.pos+2) == '>':
+ l.pushTok(tokLparenAtomic, len("(?>"))
+ case l.byteAt(l.pos+2) == '=':
+ l.pushTok(tokLparenPositiveLookahead, len("(?="))
+ case l.byteAt(l.pos+2) == '!':
+ l.pushTok(tokLparenNegativeLookahead, len("(?!"))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '=':
+ l.pushTok(tokLparenPositiveLookbehind, len("(?<="))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '!':
+ l.pushTok(tokLparenNegativeLookbehind, len("(?= 0 {
+ l.pushTok(tokRepeat, len("{")+j)
+ } else {
+ l.pushTok(tokChar, 1)
+ }
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ l.maybeInsertConcat()
+ }
+}
+
+func (l *lexer) scanCharClass() {
+ l.maybeInsertConcat()
+
+ // We need to handle first `]` in a special way. See #3.
+ if l.byteAt(l.pos) == ']' {
+ l.pushTok(tokChar, 1)
+ }
+
+ for l.pos < len(l.input) {
+ ch := l.input[l.pos]
+ if ch > unicode.MaxASCII {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(true)
+ case '[':
+ isPosixClass := false
+ if l.byteAt(l.pos+1) == ':' {
+ j := l.stringIndex(l.pos+2, ":]")
+ if j >= 0 {
+ isPosixClass = true
+ l.pushTok(tokPosixClass, j+len("[::]"))
+ }
+ }
+ if !isPosixClass {
+ l.pushTok(tokChar, 1)
+ }
+ case '-':
+ l.pushTok(tokMinus, 1)
+ case ']':
+ l.pushTok(tokRbracket, 1)
+ return // Stop scanning in the char context
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ }
+}
+
+func (l *lexer) scanEscape(insideCharClass bool) {
+ s := l.input
+ if l.pos+1 >= len(s) {
+ throwErrorf(l.pos, l.pos+1, `unexpected end of pattern: trailing '\'`)
+ }
+ switch {
+ case s[l.pos+1] == 'p' || s[l.pos+1] == 'P':
+ if l.pos+2 >= len(s) {
+ throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected uni-class-short or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throwErrorf(l.pos, l.pos+2, "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeUniFull, len(`\p{`)+j)
+ } else {
+ l.pushTok(tokEscapeUni, len(`\pL`))
+ }
+ case s[l.pos+1] == 'x':
+ if l.pos+2 >= len(s) {
+ throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected hex-digit or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throwErrorf(l.pos, l.pos+2, "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeHexFull, len(`\x{`)+j)
+ } else {
+ if isHexDigit(l.byteAt(l.pos + 3)) {
+ l.pushTok(tokEscapeHex, len(`\xFF`))
+ } else {
+ l.pushTok(tokEscapeHex, len(`\xF`))
+ }
+ }
+ case isOctalDigit(s[l.pos+1]):
+ digits := 1
+ if isOctalDigit(l.byteAt(l.pos + 2)) {
+ if isOctalDigit(l.byteAt(l.pos + 3)) {
+ digits = 3
+ } else {
+ digits = 2
+ }
+ }
+ l.pushTok(tokEscapeOctal, len(`\`)+digits)
+ case s[l.pos+1] == 'Q':
+ size := len(s) - l.pos // Until the pattern ends
+ j := l.stringIndex(l.pos+2, `\E`)
+ if j >= 0 {
+ size = j + len(`\Q\E`)
+ }
+ l.pushTok(tokQ, size)
+
+ default:
+ ch := l.byteAt(l.pos + 1)
+ if ch > unicode.MaxASCII {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos+1:])
+ l.pushTok(tokEscapeChar, len(`\`)+size)
+ return
+ }
+ kind := tokEscapeChar
+ if insideCharClass {
+ if charClassMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ } else {
+ if reMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ }
+ l.pushTok(kind, 2)
+ }
+}
+
+func (l *lexer) maybeInsertConcat() {
+ if l.isConcatPos() {
+ last := len(l.tokens) - 1
+ tok := l.tokens[last]
+ l.tokens[last].kind = tokConcat
+ l.tokens = append(l.tokens, tok)
+ }
+}
+
+func (l *lexer) Init(s string) {
+ l.pos = 0
+ l.tokens = l.tokens[:0]
+ l.input = s
+
+ l.scan()
+
+ l.pos = 0
+}
+
+func (l *lexer) tryScanGroupName(pos int) bool {
+ tok := tokLparenName
+ endCh := byte('>')
+ offset := 1
+ switch l.byteAt(pos) {
+ case '\'':
+ endCh = '\''
+ tok = tokLparenNameQuote
+ case '<':
+ tok = tokLparenNameAngle
+ case 'P':
+ offset = 2
+ default:
+ return false
+ }
+ if pos+offset >= len(l.input) {
+ return false
+ }
+ end := strings.IndexByte(l.input[pos+offset:], endCh)
+ if end < 0 {
+ return false
+ }
+ l.pushTok(tok, len("(?")+offset+end+1)
+ return true
+}
+
+func (l *lexer) tryScanGroupFlags(pos int) bool {
+ colonPos := strings.IndexByte(l.input[pos:], ':')
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ end := parenPos
+ if colonPos >= 0 && colonPos < parenPos {
+ end = colonPos + len(":")
+ }
+ l.pushTok(tokLparenFlags, len("(?")+end)
+ return true
+}
+
+func (l *lexer) tryScanComment(pos int) bool {
+ if l.byteAt(pos) != '#' {
+ return false
+ }
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ l.pushTok(tokComment, len("(?")+parenPos+len(")"))
+ return true
+}
+
+func (l *lexer) repeatWidth(pos int) int {
+ j := pos
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if j == pos {
+ return -1
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min}
+ }
+ if l.byteAt(j) != ',' {
+ return -1
+ }
+ j += len(",")
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min,} or {min,max}
+ }
+ return -1
+}
+
+func (l *lexer) stringIndex(offset int, s string) int {
+ if offset < len(l.input) {
+ return strings.Index(l.input[offset:], s)
+ }
+ return -1
+}
+
+func (l *lexer) byteAt(pos int) byte {
+ if pos >= 0 && pos < len(l.input) {
+ return l.input[pos]
+ }
+ return 0
+}
+
+func (l *lexer) pushTok(kind tokenKind, size int) {
+ l.tokens = append(l.tokens, token{
+ kind: kind,
+ pos: Position{Begin: uint16(l.pos), End: uint16(l.pos + size)},
+ })
+ l.pos += size
+}
+
+func (l *lexer) isConcatPos() bool {
+ if len(l.tokens) < 2 {
+ return false
+ }
+ x := l.tokens[len(l.tokens)-2].kind
+ if concatTable[x]&concatX != 0 {
+ return false
+ }
+ y := l.tokens[len(l.tokens)-1].kind
+ return concatTable[y]&concatY == 0
+}
+
+const (
+ concatX byte = 1 << iota
+ concatY
+)
+
+var concatTable = [256]byte{
+ tokPipe: concatX | concatY,
+
+ tokLparen: concatX,
+ tokLparenFlags: concatX,
+ tokLparenName: concatX,
+ tokLparenNameAngle: concatX,
+ tokLparenNameQuote: concatX,
+ tokLparenAtomic: concatX,
+ tokLbracket: concatX,
+ tokLbracketCaret: concatX,
+ tokLparenPositiveLookahead: concatX,
+ tokLparenPositiveLookbehind: concatX,
+ tokLparenNegativeLookahead: concatX,
+ tokLparenNegativeLookbehind: concatX,
+
+ tokRparen: concatY,
+ tokRbracket: concatY,
+ tokPlus: concatY,
+ tokStar: concatY,
+ tokQuestion: concatY,
+ tokRepeat: concatY,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go
new file mode 100644
index 0000000..284e5dc
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/operation.go
@@ -0,0 +1,189 @@
+package syntax
+
+//go:generate stringer -type=Operation -trimprefix=Op
+const (
+ OpNone Operation = iota
+
+ // OpConcat is a concatenation of ops.
+ // Examples: `xy` `abc\d` ``
+ // Args - concatenated ops
+ //
+ // As a special case, OpConcat with 0 Args is used for "empty"
+ // set of operations.
+ OpConcat
+
+ // OpDot is a '.' wildcard.
+ OpDot
+
+ // OpAlt is x|y alternation of ops.
+ // Examples: `a|bc` `x(.*?)|y(.*?)`
+ // Args - union-connected regexp branches
+ OpAlt
+
+ // OpStar is a shorthand for {0,} repetition.
+ // Examples: `x*`
+ // Args[0] - repeated expression
+ OpStar
+
+ // OpPlus is a shorthand for {1,} repetition.
+ // Examples: `x+`
+ // Args[0] - repeated expression
+ OpPlus
+
+ // OpQuestion is a shorthand for {0,1} repetition.
+ // Examples: `x?`
+ // Args[0] - repeated expression
+ OpQuestion
+
+ // OpNonGreedy makes its operand quantifier non-greedy.
+ // Examples: `x??` `x*?` `x+?`
+ // Args[0] - quantified expression
+ OpNonGreedy
+
+ // OpPossessive makes its operand quantifier possessive.
+ // Examples: `x?+` `x*+` `x++`
+ // Args[0] - quantified expression
+ OpPossessive
+
+ // OpCaret is ^ anchor.
+ OpCaret
+
+ // OpDollar is $ anchor.
+ OpDollar
+
+ // OpLiteral is a collection of consecutive chars.
+ // Examples: `ab` `10x`
+ // Args - enclosed characters (OpChar)
+ OpLiteral
+
+ // OpChar is a single literal pattern character.
+ // Examples: `a` `6` `ф`
+ OpChar
+
+ // OpString is an artificial element that is used in other expressions.
+ OpString
+
+ // OpQuote is a \Q...\E enclosed literal.
+ // Examples: `\Q.?\E` `\Q?q[]=1`
+ //
+ // Note that closing \E is not mandatory.
+ OpQuote
+
+ // OpEscapeChar is a single char escape.
+ // Examples: `\d` `\a` `\n`
+ OpEscapeChar
+
+ // OpEscapeMeta is an escaped meta char.
+ // Examples: `\(` `\[` `\+`
+ OpEscapeMeta
+
+ // OpEscapeOctal is an octal char code escape (up to 3 digits).
+ // Examples: `\123` `\12`
+ OpEscapeOctal
+
+ // OpEscapeHex is a hex char code escape.
+ // Examples: `\x7F` `\xF7`
+ // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`.
+ OpEscapeHex
+
+ // OpEscapeUni is a Unicode char class escape.
+ // Examples: `\pS` `\pL` `\PL`
+ // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}`
+ OpEscapeUni
+
+ // OpCharClass is a char class enclosed in [].
+ // Examples: `[abc]` `[a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass).
+ OpCharClass
+
+ // OpNegCharClass is a negated char class enclosed in [].
+ // Examples: `[^abc]` `[^a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass).
+ OpNegCharClass
+
+ // OpCharRange is an inclusive char range inside a char class.
+ // Examples: `0-9` `A-Z`
+ // Args[0] - range lower bound (OpChar or OpEscape).
+ // Args[1] - range upper bound (OpChar or OpEscape).
+ OpCharRange
+
+ // OpPosixClass is a named ASCII char set inside a char class.
+ // Examples: `[:alpha:]` `[:blank:]`
+ OpPosixClass
+
+ // OpRepeat is a {min,max} repetition quantifier.
+ // Examples: `x{5}` `x{min,max}` `x{min,}`
+ // Args[0] - repeated expression
+ // Args[1] - repeat count (OpString)
+ OpRepeat
+
+ // OpCapture is `(re)` capturing group.
+ // Examples: `(abc)` `(x|y)`
+ // Args[0] - enclosed expression
+ OpCapture
+
+ // OpNamedCapture is `(?Pre)` capturing group.
+ // Examples: `(?Pabc)` `(?Px|y)`
+ // FormNamedCaptureAngle examples: `(?abc)` `(?x|y)`
+ // FormNamedCaptureQuote examples: `(?'foo'abc)` `(?'name'x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - group name (OpString)
+ OpNamedCapture
+
+ // OpGroup is `(?:re)` non-capturing group.
+ // Examples: `(?:abc)` `(?:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpGroup
+
+ // OpGroupWithFlags is `(?flags:re)` non-capturing group.
+ // Examples: `(?i:abc)` `(?i:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - flags (OpString)
+ OpGroupWithFlags
+
+ // OpAtomicGroup is `(?>re)` non-capturing group without backtracking.
+ // Examples: `(?>foo)` `(?>)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpAtomicGroup
+
+ // OpPositiveLookahead is `(?=re)` asserts that following text matches re.
+ // Examples: `(?=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookahead
+
+ // OpNegativeLookahead is `(?!re)` asserts that following text doesn't match re.
+ // Examples: `(?!foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpNegativeLookahead
+
+ // OpPositiveLookbehind is `(?<=re)` asserts that preceding text matches re.
+ // Examples: `(?<=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookbehind
+
+ // OpNegativeLookbehind is `(?=re)` asserts that preceding text doesn't match re.
+ // Examples: `(?= Operation(len(_Operation_index)-1) {
+ return "Operation(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Operation_name[_Operation_index[i]:_Operation_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go
new file mode 100644
index 0000000..faf0f8b
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/parser.go
@@ -0,0 +1,471 @@
+package syntax
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type ParserOptions struct {
+ // NoLiterals disables OpChar merging into OpLiteral.
+ NoLiterals bool
+}
+
+func NewParser(opts *ParserOptions) *Parser {
+ return newParser(opts)
+}
+
+type Parser struct {
+ out Regexp
+ lexer lexer
+ exprPool []Expr
+
+ prefixParselets [256]prefixParselet
+ infixParselets [256]infixParselet
+
+ charClass []Expr
+ allocated uint
+
+ opts ParserOptions
+}
+
+// ParsePCRE parses PHP-style pattern with delimiters.
+// An example of such pattern is `/foo/i`.
+func (p *Parser) ParsePCRE(pattern string) (*RegexpPCRE, error) {
+ pcre, err := p.newPCRE(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if pcre.HasModifier('x') {
+ return nil, errors.New("'x' modifier is not supported")
+ }
+ re, err := p.Parse(pcre.Pattern)
+ if re != nil {
+ pcre.Expr = re.Expr
+ }
+ return pcre, err
+}
+
+func (p *Parser) Parse(pattern string) (result *Regexp, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+ if err2, ok := r.(ParseError); ok {
+ err = err2
+ return
+ }
+ panic(r)
+ }()
+
+ p.lexer.Init(pattern)
+ p.allocated = 0
+ p.out.Pattern = pattern
+ if pattern == "" {
+ p.out.Expr = *p.newExpr(OpConcat, Position{})
+ } else {
+ p.out.Expr = *p.parseExpr(0)
+ }
+
+ if !p.opts.NoLiterals {
+ p.mergeChars(&p.out.Expr)
+ }
+ p.setValues(&p.out.Expr)
+
+ return &p.out, nil
+}
+
+type prefixParselet func(token) *Expr
+
+type infixParselet func(*Expr, token) *Expr
+
+func newParser(opts *ParserOptions) *Parser {
+ var p Parser
+
+ if opts != nil {
+ p.opts = *opts
+ }
+ p.exprPool = make([]Expr, 256)
+
+ for tok, op := range tok2op {
+ if op != 0 {
+ p.prefixParselets[tokenKind(tok)] = p.parsePrefixElementary
+ }
+ }
+
+ p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr {
+ return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos)
+ }
+ p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr {
+ return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos)
+ }
+
+ p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) }
+ p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) }
+ p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) }
+ p.prefixParselets[tokLparenNegativeLookahead] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookahead, tok) }
+ p.prefixParselets[tokLparenPositiveLookbehind] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookbehind, tok) }
+ p.prefixParselets[tokLparenNegativeLookbehind] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookbehind, tok) }
+
+ p.prefixParselets[tokLparenName] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormDefault, tok)
+ }
+ p.prefixParselets[tokLparenNameAngle] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureAngle, tok)
+ }
+ p.prefixParselets[tokLparenNameQuote] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureQuote, tok)
+ }
+
+ p.prefixParselets[tokLparenFlags] = p.parseGroupWithFlags
+
+ p.prefixParselets[tokPipe] = func(tok token) *Expr {
+ // We need prefix pipe parselet to handle `(|x)` syntax.
+ right := p.parseExpr(1)
+ return p.newExpr(OpAlt, tok.pos, p.newEmpty(tok.pos), right)
+ }
+ p.prefixParselets[tokLbracket] = func(tok token) *Expr {
+ return p.parseCharClass(OpCharClass, tok)
+ }
+ p.prefixParselets[tokLbracketCaret] = func(tok token) *Expr {
+ return p.parseCharClass(OpNegCharClass, tok)
+ }
+
+ p.infixParselets[tokRepeat] = func(left *Expr, tok token) *Expr {
+ repeatLit := p.newExpr(OpString, tok.pos)
+ return p.newExpr(OpRepeat, combinePos(left.Pos, tok.pos), left, repeatLit)
+ }
+ p.infixParselets[tokStar] = func(left *Expr, tok token) *Expr {
+ return p.newExpr(OpStar, combinePos(left.Pos, tok.pos), left)
+ }
+ p.infixParselets[tokConcat] = func(left *Expr, tok token) *Expr {
+ right := p.parseExpr(2)
+ if left.Op == OpConcat {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpConcat, combinePos(left.Pos, right.Pos), left, right)
+ }
+ p.infixParselets[tokPipe] = p.parseAlt
+ p.infixParselets[tokMinus] = p.parseMinus
+ p.infixParselets[tokPlus] = p.parsePlus
+ p.infixParselets[tokQuestion] = p.parseQuestion
+
+ return &p
+}
+
+func (p *Parser) setValues(e *Expr) {
+ for i := range e.Args {
+ p.setValues(&e.Args[i])
+ }
+ e.Value = p.exprValue(e)
+}
+
+func (p *Parser) exprValue(e *Expr) string {
+ return p.out.Pattern[e.Begin():e.End()]
+}
+
+func (p *Parser) mergeChars(e *Expr) {
+ for i := range e.Args {
+ p.mergeChars(&e.Args[i])
+ }
+ if e.Op != OpConcat || len(e.Args) < 2 {
+ return
+ }
+
+ args := e.Args[:0]
+ i := 0
+ for i < len(e.Args) {
+ first := i
+ chars := 0
+ for j := i; j < len(e.Args) && e.Args[j].Op == OpChar; j++ {
+ chars++
+ }
+ if chars > 1 {
+ c1 := e.Args[first]
+ c2 := e.Args[first+chars-1]
+ lit := p.newExpr(OpLiteral, combinePos(c1.Pos, c2.Pos))
+ for j := 0; j < chars; j++ {
+ lit.Args = append(lit.Args, e.Args[first+j])
+ }
+ args = append(args, *lit)
+ i += chars
+ } else {
+ args = append(args, e.Args[i])
+ i++
+ }
+ }
+ if len(args) == 1 {
+ *e = args[0] // Turn OpConcat into OpLiteral
+ } else {
+ e.Args = args
+ }
+}
+
+func (p *Parser) newEmpty(pos Position) *Expr {
+ return p.newExpr(OpConcat, pos)
+}
+
+func (p *Parser) newExprForm(op Operation, form Form, pos Position, args ...*Expr) *Expr {
+ e := p.newExpr(op, pos, args...)
+ e.Form = form
+ return e
+}
+
+func (p *Parser) newExpr(op Operation, pos Position, args ...*Expr) *Expr {
+ e := p.allocExpr()
+ *e = Expr{
+ Op: op,
+ Pos: pos,
+ Args: e.Args[:0],
+ }
+ for _, arg := range args {
+ e.Args = append(e.Args, *arg)
+ }
+ return e
+}
+
+func (p *Parser) allocExpr() *Expr {
+ i := p.allocated
+ if i < uint(len(p.exprPool)) {
+ p.allocated++
+ return &p.exprPool[i]
+ }
+ return &Expr{}
+}
+
+func (p *Parser) expect(kind tokenKind) Position {
+ tok := p.lexer.NextToken()
+ if tok.kind != kind {
+ throwErrorf(int(tok.pos.Begin), int(tok.pos.End), "expected '%s', found '%s'", kind, tok.kind)
+ }
+ return tok.pos
+}
+
+func (p *Parser) parseExpr(precedence int) *Expr {
+ tok := p.lexer.NextToken()
+ prefix := p.prefixParselets[tok.kind]
+ if prefix == nil {
+ throwfPos(tok.pos, "unexpected token: %v", tok)
+ }
+ left := prefix(tok)
+
+ for precedence < p.precedenceOf(p.lexer.Peek()) {
+ tok := p.lexer.NextToken()
+ infix := p.infixParselets[tok.kind]
+ left = infix(left, tok)
+ }
+
+ return left
+}
+
+func (p *Parser) parsePrefixElementary(tok token) *Expr {
+ return p.newExpr(tok2op[tok.kind], tok.pos)
+}
+
+func (p *Parser) parseCharClass(op Operation, tok token) *Expr {
+ var endPos Position
+ p.charClass = p.charClass[:0]
+ for {
+ p.charClass = append(p.charClass, *p.parseExpr(0))
+ next := p.lexer.Peek()
+ if next.kind == tokRbracket {
+ endPos = next.pos
+ p.lexer.NextToken()
+ break
+ }
+ if next.kind == tokNone {
+ throwfPos(tok.pos, "unterminated '['")
+ }
+ }
+
+ result := p.newExpr(op, combinePos(tok.pos, endPos))
+ result.Args = append(result.Args, p.charClass...)
+ return result
+}
+
+func (p *Parser) parseMinus(left *Expr, tok token) *Expr {
+ if p.isValidCharRangeOperand(left) {
+ if p.lexer.Peek().kind != tokRbracket {
+ right := p.parseExpr(2)
+ return p.newExpr(OpCharRange, combinePos(left.Pos, right.Pos), left, right)
+ }
+ }
+ p.charClass = append(p.charClass, *left)
+ return p.newExpr(OpChar, tok.pos)
+}
+
+func (p *Parser) isValidCharRangeOperand(e *Expr) bool {
+ switch e.Op {
+ case OpEscapeHex, OpEscapeOctal, OpEscapeMeta, OpChar:
+ return true
+ case OpEscapeChar:
+ switch p.exprValue(e) {
+ case `\\`, `\|`, `\*`, `\+`, `\?`, `\.`, `\[`, `\^`, `\$`, `\(`, `\)`:
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) parsePlus(left *Expr, tok token) *Expr {
+ op := OpPlus
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpPossessive
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseQuestion(left *Expr, tok token) *Expr {
+ op := OpQuestion
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpNonGreedy
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseAlt(left *Expr, tok token) *Expr {
+ var right *Expr
+ switch p.lexer.Peek().kind {
+ case tokRparen, tokNone:
+ // This is needed to handle `(x|)` syntax.
+ right = p.newEmpty(tok.pos)
+ default:
+ right = p.parseExpr(1)
+ }
+ if left.Op == OpAlt {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpAlt, combinePos(left.Pos, right.Pos), left, right)
+}
+
+func (p *Parser) parseGroupItem(tok token) *Expr {
+ if p.lexer.Peek().kind == tokRparen {
+ // This is needed to handle `() syntax.`
+ return p.newEmpty(tok.pos)
+ }
+ return p.parseExpr(0)
+}
+
+func (p *Parser) parseGroup(op Operation, tok token) *Expr {
+ x := p.parseGroupItem(tok)
+ result := p.newExpr(op, tok.pos, x)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseNamedCapture(form Form, tok token) *Expr {
+ prefixLen := len("(?<")
+ if form == FormDefault {
+ prefixLen = len("(?P<")
+ }
+ name := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(prefixLen),
+ End: tok.pos.End - uint16(len(">")),
+ })
+ x := p.parseGroupItem(tok)
+ result := p.newExprForm(OpNamedCapture, form, tok.pos, x, name)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseGroupWithFlags(tok token) *Expr {
+ var result *Expr
+ val := p.out.Pattern[tok.pos.Begin+1 : tok.pos.End]
+ switch {
+ case !strings.HasSuffix(val, ":"):
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End,
+ })
+ result = p.newExpr(OpFlagOnlyGroup, tok.pos, flags)
+ case val == "?:":
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroup, tok.pos, x)
+ default:
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End - uint16(len(":")),
+ })
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroupWithFlags, tok.pos, x, flags)
+ }
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) precedenceOf(tok token) int {
+ switch tok.kind {
+ case tokPipe:
+ return 1
+ case tokConcat, tokMinus:
+ return 2
+ case tokPlus, tokStar, tokQuestion, tokRepeat:
+ return 3
+ default:
+ return 0
+ }
+}
+
+func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) {
+ if source == "" {
+ return nil, errors.New("empty pattern: can't find delimiters")
+ }
+
+ delim := source[0]
+ endDelim := delim
+ switch delim {
+ case '(':
+ endDelim = ')'
+ case '{':
+ endDelim = '}'
+ case '[':
+ endDelim = ']'
+ case '<':
+ endDelim = '>'
+ case '\\':
+ return nil, errors.New("'\\' is not a valid delimiter")
+ default:
+ if isSpace(delim) {
+ return nil, errors.New("whitespace is not a valid delimiter")
+ }
+ if isAlphanumeric(delim) {
+ return nil, fmt.Errorf("'%c' is not a valid delimiter", delim)
+ }
+ }
+
+ j := strings.LastIndexByte(source, endDelim)
+ if j == -1 {
+ return nil, fmt.Errorf("can't find '%c' ending delimiter", endDelim)
+ }
+
+ pcre := &RegexpPCRE{
+ Pattern: source[1:j],
+ Source: source,
+ Delim: [2]byte{delim, endDelim},
+ Modifiers: source[j+1:],
+ }
+ return pcre, nil
+}
+
+var tok2op = [256]Operation{
+ tokDollar: OpDollar,
+ tokCaret: OpCaret,
+ tokDot: OpDot,
+ tokChar: OpChar,
+ tokMinus: OpChar,
+ tokEscapeChar: OpEscapeChar,
+ tokEscapeMeta: OpEscapeMeta,
+ tokEscapeHex: OpEscapeHex,
+ tokEscapeOctal: OpEscapeOctal,
+ tokEscapeUni: OpEscapeUni,
+ tokPosixClass: OpPosixClass,
+ tokQ: OpQuote,
+ tokComment: OpComment,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/pos.go b/vendor/github.com/quasilyte/regex/syntax/pos.go
new file mode 100644
index 0000000..51bdbf8
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/pos.go
@@ -0,0 +1,10 @@
+package syntax
+
+type Position struct {
+ Begin uint16
+ End uint16
+}
+
+func combinePos(begin, end Position) Position {
+ return Position{Begin: begin.Begin, End: end.End}
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
new file mode 100644
index 0000000..8800436
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
@@ -0,0 +1,59 @@
+// Code generated by "stringer -type=tokenKind -trimprefix=tok -linecomment=true"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[tokNone-0]
+ _ = x[tokChar-1]
+ _ = x[tokGroupFlags-2]
+ _ = x[tokPosixClass-3]
+ _ = x[tokConcat-4]
+ _ = x[tokRepeat-5]
+ _ = x[tokEscapeChar-6]
+ _ = x[tokEscapeMeta-7]
+ _ = x[tokEscapeOctal-8]
+ _ = x[tokEscapeUni-9]
+ _ = x[tokEscapeUniFull-10]
+ _ = x[tokEscapeHex-11]
+ _ = x[tokEscapeHexFull-12]
+ _ = x[tokComment-13]
+ _ = x[tokQ-14]
+ _ = x[tokMinus-15]
+ _ = x[tokLbracket-16]
+ _ = x[tokLbracketCaret-17]
+ _ = x[tokRbracket-18]
+ _ = x[tokDollar-19]
+ _ = x[tokCaret-20]
+ _ = x[tokQuestion-21]
+ _ = x[tokDot-22]
+ _ = x[tokPlus-23]
+ _ = x[tokStar-24]
+ _ = x[tokPipe-25]
+ _ = x[tokLparen-26]
+ _ = x[tokLparenName-27]
+ _ = x[tokLparenNameAngle-28]
+ _ = x[tokLparenNameQuote-29]
+ _ = x[tokLparenFlags-30]
+ _ = x[tokLparenAtomic-31]
+ _ = x[tokLparenPositiveLookahead-32]
+ _ = x[tokLparenPositiveLookbehind-33]
+ _ = x[tokLparenNegativeLookahead-34]
+ _ = x[tokLparenNegativeLookbehind-35]
+ _ = x[tokRparen-36]
+}
+
+const _tokenKind_name = "NoneCharGroupFlagsPosixClassConcatRepeatEscapeCharEscapeMetaEscapeOctalEscapeUniEscapeUniFullEscapeHexEscapeHexFullComment\\Q-[[^]$^?.+*|((?P(?(?'name'(?flags(?>(?=(?<=(?!(?= tokenKind(len(_tokenKind_index)-1) {
+ return "tokenKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _tokenKind_name[_tokenKind_index[i]:_tokenKind_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go
new file mode 100644
index 0000000..934680c
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/utils.go
@@ -0,0 +1,30 @@
+package syntax
+
+func isSpace(ch byte) bool {
+ switch ch {
+ case '\r', '\n', '\t', '\f', '\v':
+ return true
+ default:
+ return false
+ }
+}
+
+func isAlphanumeric(ch byte) bool {
+ return (ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9')
+}
+
+func isDigit(ch byte) bool {
+ return ch >= '0' && ch <= '9'
+}
+
+func isOctalDigit(ch byte) bool {
+ return ch >= '0' && ch <= '7'
+}
+
+func isHexDigit(ch byte) bool {
+ return (ch >= '0' && ch <= '9') ||
+ (ch >= 'a' && ch <= 'f') ||
+ (ch >= 'A' && ch <= 'F')
+}
diff --git a/vendor/github.com/ryancurrah/gomodguard/.dockerignore b/vendor/github.com/ryancurrah/gomodguard/.dockerignore
new file mode 100644
index 0000000..7773828
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.dockerignore
@@ -0,0 +1 @@
+dist/
\ No newline at end of file
diff --git a/vendor/github.com/ryancurrah/gomodguard/.gitignore b/vendor/github.com/ryancurrah/gomodguard/.gitignore
new file mode 100644
index 0000000..030056d
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.gitignore
@@ -0,0 +1,23 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+/gomodguard
+
+*.xml
+
+dist/
+
+coverage.*
\ No newline at end of file
diff --git a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml
new file mode 100644
index 0000000..9c19e63
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml
@@ -0,0 +1,6 @@
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - lll
diff --git a/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml b/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml
new file mode 100644
index 0000000..38a2f0b
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml
@@ -0,0 +1,27 @@
+allowed:
+ modules: # List of allowed modules
+ - gopkg.in/yaml.v2
+ - github.com/go-xmlfmt/xmlfmt
+ - github.com/Masterminds/semver
+ domains: # List of allowed module domains
+ - golang.org
+
+blocked:
+ modules: # List of blocked modules
+ - github.com/uudashr/go-module: # Blocked module
+ recommendations: # Recommended modules that should be used instead (Optional)
+ - golang.org/x/mod
+ reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional)
+ - github.com/mitchellh/go-homedir:
+ recommendations:
+ - github.com/ryancurrah/gomodguard
+ reason: "testing if the current/linted module is not blocked when it is recommended"
+ - github.com/phayes/checkstyle:
+ recommendations:
+ - github.com/someother/module
+ reason: "testing if module is blocked with recommendation"
+
+ versions:
+ - github.com/mitchellh/go-homedir:
+ version: "<= 1.1.0"
+ reason: "testing if blocked version constraint works."
diff --git a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml
new file mode 100644
index 0000000..20d8349
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml
@@ -0,0 +1,31 @@
+builds:
+- main: ./cmd/gomodguard/main.go
+ env:
+ - CGO_ENABLED=0
+archives:
+- replacements:
+ darwin: Darwin
+ linux: Linux
+ windows: Windows
+ 386: i386
+ amd64: x86_64
+checksum:
+ name_template: 'checksums.txt'
+dockers:
+- goos: linux
+ goarch: amd64
+ binaries:
+ - gomodguard
+ image_templates:
+ - "ryancurrah/gomodguard:latest"
+ - "ryancurrah/gomodguard:{{.Tag}}"
+ skip_push: false
+ dockerfile: Dockerfile.goreleaser
+ build_flag_templates:
+ - "--pull"
+ - "--build-arg=gomodguard_VERSION={{.Version}}"
+ - "--label=org.opencontainers.image.created={{.Date}}"
+ - "--label=org.opencontainers.image.name={{.ProjectName}}"
+ - "--label=org.opencontainers.image.revision={{.FullCommit}}"
+ - "--label=org.opencontainers.image.version={{.Version}}"
+ - "--label=org.opencontainers.image.source={{.GitURL}}"
diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile b/vendor/github.com/ryancurrah/gomodguard/Dockerfile
new file mode 100644
index 0000000..719a0eb
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile
@@ -0,0 +1,17 @@
+ARG GO_VERSION=1.14.2
+ARG ALPINE_VERSION=3.11
+ARG gomodguard_VERSION=
+
+# ---- Build container
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS builder
+WORKDIR /gomodguard
+COPY . .
+RUN apk add --no-cache git
+RUN go build -o gomodguard cmd/gomodguard/main.go
+
+# ---- App container
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION}
+WORKDIR /
+RUN apk --no-cache add ca-certificates
+COPY --from=builder gomodguard/gomodguard /
+ENTRYPOINT ./gomodguard
diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser
new file mode 100644
index 0000000..57a042a
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser
@@ -0,0 +1,10 @@
+ARG GO_VERSION=1.14.2
+ARG ALPINE_VERSION=3.11
+ARG gomodguard_VERSION=
+
+# ---- App container
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION}
+WORKDIR /
+RUN apk --no-cache add ca-certificates
+COPY gomodguard /gomodguard
+ENTRYPOINT ./gomodguard
diff --git a/vendor/github.com/ryancurrah/gomodguard/LICENSE b/vendor/github.com/ryancurrah/gomodguard/LICENSE
new file mode 100644
index 0000000..acd8a81
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Ryan Currah
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ryancurrah/gomodguard/Makefile b/vendor/github.com/ryancurrah/gomodguard/Makefile
new file mode 100644
index 0000000..9af2f76
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/Makefile
@@ -0,0 +1,49 @@
+current_dir = $(shell pwd)
+version = $(shell printf '%s' $$(cat VERSION))
+
+.PHONEY: lint
+lint:
+ golangci-lint run ./...
+
+.PHONEY: build
+build:
+ go build -o gomodguard cmd/gomodguard/main.go
+
+.PHONEY: dockerbuild
+dockerbuild:
+ docker build --build-arg GOMODGUARD_VERSION=${version} --tag ryancurrah/gomodguard:${version} .
+
+.PHONEY: run
+run: build
+ ./gomodguard
+
+.PHONEY: test
+test:
+ go test -v -coverprofile coverage.out
+
+.PHONEY: cover
+cover:
+ gocover-cobertura < coverage.out > coverage.xml
+
+.PHONEY: dockerrun
+dockerrun: dockerbuild
+ docker run -v "${current_dir}/.gomodguard.yaml:/.gomodguard.yaml" ryancurrah/gomodguard:latest
+
+.PHONEY: release
+release:
+ git tag ${version}
+ git push --tags
+ goreleaser --skip-validate --rm-dist
+
+.PHONEY: clean
+clean:
+ rm -rf dist/
+ rm -f gomodguard coverage.xml coverage.out
+
+.PHONEY: install-tools-mac
+install-tools-mac:
+ brew install goreleaser/tap/goreleaser
+
+.PHONEY: install-go-tools
+install-go-tools:
+ go get github.com/t-yuki/gocover-cobertura
diff --git a/vendor/github.com/ryancurrah/gomodguard/README.md b/vendor/github.com/ryancurrah/gomodguard/README.md
new file mode 100644
index 0000000..f09b5e1
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/README.md
@@ -0,0 +1,127 @@
+# gomodguard
+![Codecov](https://img.shields.io/codecov/c/gh/ryancurrah/gomodguard?style=flat-square)
+![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ryancurrah/gomodguard/Go?logo=Go&style=flat-square)
+
+
+
+Allow and block list linter for direct Go module dependencies. This is useful for organizations where they want to standardize on the modules used and be able to recommend alternative modules.
+
+## Description
+
+Allowed and blocked modules are defined in a `.gomodguard.yaml` or `~/.gomodguard.yaml` file.
+
+Modules can be allowed by module or domain name. When allowed modules are specified any modules not in the allowed configuration are blocked.
+
+If no allowed modules or domains are specified then all modules are allowed except for blocked ones.
+
+The linter looks for blocked modules in `go.mod` and searches for imported packages where the imported packages module is blocked. Indirect modules are not considered.
+
+Alternative modules can be optionally recommended in the blocked modules list.
+
+If the linted module imports a blocked module but the linted module is in the recommended modules list the blocked module is ignored. Usually, this means the linted module wraps that blocked module for use by other modules, therefore the import of the blocked module should not be blocked.
+
+Version constraints can be specified for modules as well which lets you block new or old versions of modules or specific versions.
+
+Results are printed to `stdout`.
+
+Logging statements are printed to `stderr`.
+
+Results can be exported to different report formats. Which can be imported into CI tools. See the help section for more information.
+
+## Configuration
+
+```yaml
+allowed:
+ modules: # List of allowed modules
+ - gopkg.in/yaml.v2
+ - github.com/go-xmlfmt/xmlfmt
+ - github.com/phayes/checkstyle
+ - github.com/mitchellh/go-homedir
+ domains: # List of allowed module domains
+ - golang.org
+
+blocked:
+ modules: # List of blocked modules
+ - github.com/uudashr/go-module: # Blocked module
+ recommendations: # Recommended modules that should be used instead (Optional)
+ - golang.org/x/mod
+ reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional)
+ versions: # List of blocked module version constraints.
+ - github.com/mitchellh/go-homedir: # Blocked module with version constraint.
+ version: "<= 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons.
+ reason: "testing if blocked version constraint works." # Reason why the version constraint exists.
+```
+
+## Usage
+
+```
+╰─ ./gomodguard -h
+Usage: gomodguard [files...]
+Also supports package syntax but will use it in relative path, i.e. ./pkg/...
+Flags:
+ -f string
+ Report results to the specified file. A report type must also be specified
+ -file string
+
+ -h Show this help text
+ -help
+
+ -i int
+ Exit code when issues were found (default 2)
+ -issues-exit-code int
+ (default 2)
+
+ -n Don't lint test files
+ -no-test
+
+ -r string
+ Report results to one of the following formats: checkstyle. A report file destination must also be specified
+ -report string
+```
+
+## Example
+
+```
+╰─ ./gomodguard -r checkstyle -f gomodguard-checkstyle.xml ./...
+
+info: allowed modules, [gopkg.in/yaml.v2 github.com/go-xmlfmt/xmlfmt github.com/phayes/checkstyle github.com/mitchellh/go-homedir]
+info: allowed module domains, [golang.org]
+info: blocked modules, [github.com/uudashr/go-module]
+info: found `2` blocked modules in the go.mod file, [github.com/gofrs/uuid github.com/uudashr/go-module]
+blocked_example.go:6: import of package `github.com/gofrs/uuid` is blocked because the module is not in the allowed modules list.
+blocked_example.go:7: import of package `github.com/uudashr/go-module` is blocked because the module is in the blocked modules list. `golang.org/x/mod` is a recommended module. `mod` is the official go.mod parser library.
+```
+
+Resulting checkstyle file
+
+```
+╰─ cat gomodguard-checkstyle.xml
+
+
+
+
+
+
+
+
+
+
+```
+
+## Install
+
+```
+go get -u github.com/ryancurrah/gomodguard/cmd/gomodguard
+```
+
+## Develop
+
+```
+git clone https://github.com/ryancurrah/gomodguard.git && cd gomodguard
+
+go build -o gomodguard cmd/gomodguard/main.go
+```
+
+## License
+
+**MIT**
diff --git a/vendor/github.com/ryancurrah/gomodguard/VERSION b/vendor/github.com/ryancurrah/gomodguard/VERSION
new file mode 100644
index 0000000..3e7bcf0
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/VERSION
@@ -0,0 +1 @@
+v1.0.4
diff --git a/vendor/github.com/ryancurrah/gomodguard/cmd.go b/vendor/github.com/ryancurrah/gomodguard/cmd.go
new file mode 100644
index 0000000..652e61f
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/cmd.go
@@ -0,0 +1,239 @@
+package gomodguard
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-xmlfmt/xmlfmt"
+ "github.com/mitchellh/go-homedir"
+ "github.com/phayes/checkstyle"
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ errFindingHomedir = "unable to find home directory, %w"
+ errReadingConfigFile = "could not read config file: %w"
+ errParsingConfigFile = "could not parse config file: %w"
+)
+
+var (
+ configFile = ".gomodguard.yaml"
+ logger = log.New(os.Stderr, "", 0)
+ errFindingConfigFile = fmt.Errorf("could not find config file")
+)
+
+// Run the gomodguard linter. Returns the exit code to use.
+func Run() int {
+ var (
+ args []string
+ help bool
+ noTest bool
+ report string
+ reportFile string
+ issuesExitCode int
+ cwd, _ = os.Getwd()
+ )
+
+ flag.BoolVar(&help, "h", false, "Show this help text")
+ flag.BoolVar(&help, "help", false, "")
+ flag.BoolVar(&noTest, "n", false, "Don't lint test files")
+ flag.BoolVar(&noTest, "no-test", false, "")
+ flag.StringVar(&report, "r", "", "Report results to one of the following formats: checkstyle. A report file destination must also be specified")
+ flag.StringVar(&report, "report", "", "")
+ flag.StringVar(&reportFile, "f", "", "Report results to the specified file. A report type must also be specified")
+ flag.StringVar(&reportFile, "file", "", "")
+ flag.IntVar(&issuesExitCode, "i", 2, "Exit code when issues were found")
+ flag.IntVar(&issuesExitCode, "issues-exit-code", 2, "")
+ flag.Parse()
+
+ report = strings.TrimSpace(strings.ToLower(report))
+
+ if help {
+ showHelp()
+ return 0
+ }
+
+ if report != "" && report != "checkstyle" {
+ logger.Fatalf("error: invalid report type '%s'", report)
+ }
+
+ if report != "" && reportFile == "" {
+ logger.Fatalf("error: a report file must be specified when a report is enabled")
+ }
+
+ if report == "" && reportFile != "" {
+ logger.Fatalf("error: a report type must be specified when a report file is enabled")
+ }
+
+ args = flag.Args()
+ if len(args) == 0 {
+ args = []string{"./..."}
+ }
+
+ config, err := GetConfig(configFile)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+
+ filteredFiles := GetFilteredFiles(cwd, noTest, args)
+
+ processor, err := NewProcessor(*config, logger)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+
+ results := processor.ProcessFiles(filteredFiles)
+
+ if report == "checkstyle" {
+ err := WriteCheckstyle(reportFile, results)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+ }
+
+ for _, r := range results {
+ fmt.Println(r.String())
+ }
+
+ if len(results) > 0 {
+ return issuesExitCode
+ }
+
+ return 0
+}
+
+// GetConfig from YAML file.
+func GetConfig(configFile string) (*Configuration, error) {
+ config := Configuration{}
+
+ home, err := homedir.Dir()
+ if err != nil {
+ return nil, fmt.Errorf(errFindingHomedir, err)
+ }
+
+ cfgFile := ""
+ homeDirCfgFile := filepath.Join(home, configFile)
+
+ switch {
+ case fileExists(configFile):
+ cfgFile = configFile
+ case fileExists(homeDirCfgFile):
+ cfgFile = homeDirCfgFile
+ default:
+ return nil, fmt.Errorf("%w: %s %s", errFindingConfigFile, configFile, homeDirCfgFile)
+ }
+
+ data, err := ioutil.ReadFile(cfgFile)
+ if err != nil {
+ return nil, fmt.Errorf(errReadingConfigFile, err)
+ }
+
+ err = yaml.Unmarshal(data, &config)
+ if err != nil {
+ return nil, fmt.Errorf(errParsingConfigFile, err)
+ }
+
+ return &config, nil
+}
+
+// GetFilteredFiles returns files based on search string arguments and filters.
+func GetFilteredFiles(cwd string, skipTests bool, args []string) []string {
+ var (
+ foundFiles = []string{}
+ filteredFiles = []string{}
+ )
+
+ for _, f := range args {
+ if strings.HasSuffix(f, "/...") {
+ dir, _ := filepath.Split(f)
+
+ foundFiles = append(foundFiles, expandGoWildcard(dir)...)
+
+ continue
+ }
+
+ if _, err := os.Stat(f); err == nil {
+ foundFiles = append(foundFiles, f)
+ }
+ }
+
+ // Use relative path to print shorter names, sort out test foundFiles if chosen.
+ for _, f := range foundFiles {
+ if skipTests {
+ if strings.HasSuffix(f, "_test.go") {
+ continue
+ }
+ }
+
+ if relativePath, err := filepath.Rel(cwd, f); err == nil {
+ filteredFiles = append(filteredFiles, relativePath)
+
+ continue
+ }
+
+ filteredFiles = append(filteredFiles, f)
+ }
+
+ return filteredFiles
+}
+
+// showHelp text for command line.
+func showHelp() {
+ helpText := `Usage: gomodguard [files...]
+Also supports package syntax but will use it in relative path, i.e. ./pkg/...
+Flags:`
+ fmt.Println(helpText)
+ flag.PrintDefaults()
+}
+
+// WriteCheckstyle takes the results and writes them to a checkstyle formated file.
+func WriteCheckstyle(checkstyleFilePath string, results []Result) error {
+ check := checkstyle.New()
+
+ for i := range results {
+ file := check.EnsureFile(results[i].FileName)
+ file.AddError(checkstyle.NewError(results[i].LineNumber, 1, checkstyle.SeverityError, results[i].Reason, "gomodguard"))
+ }
+
+ checkstyleXML := fmt.Sprintf("\n%s", check.String())
+
+ err := ioutil.WriteFile(checkstyleFilePath, []byte(xmlfmt.FormatXML(checkstyleXML, "", " ")), 0644) // nolint:gosec
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// fileExists returns true if the file path provided exists.
+func fileExists(filename string) bool {
+ info, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ return false
+ }
+
+ return !info.IsDir()
+}
+
+// expandGoWildcard path provided.
+func expandGoWildcard(root string) []string {
+ foundFiles := []string{}
+
+ _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ // Only append go foundFiles.
+ if !strings.HasSuffix(info.Name(), ".go") {
+ return nil
+ }
+
+ foundFiles = append(foundFiles, path)
+
+ return nil
+ })
+
+ return foundFiles
+}
diff --git a/vendor/github.com/ryancurrah/gomodguard/go.mod b/vendor/github.com/ryancurrah/gomodguard/go.mod
new file mode 100644
index 0000000..15231c9
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/go.mod
@@ -0,0 +1,13 @@
+module github.com/ryancurrah/gomodguard
+
+go 1.14
+
+require (
+ github.com/Masterminds/semver v1.5.0
+ github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b
+ github.com/mitchellh/go-homedir v1.1.0
+ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d
+ github.com/pkg/errors v0.9.1
+ golang.org/x/mod v0.2.0
+ gopkg.in/yaml.v2 v2.2.8
+)
diff --git a/vendor/github.com/ryancurrah/gomodguard/go.sum b/vendor/github.com/ryancurrah/gomodguard/go.sum
new file mode 100644
index 0000000..55ae4e5
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/go.sum
@@ -0,0 +1,28 @@
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go
new file mode 100644
index 0000000..1646773
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go
@@ -0,0 +1,492 @@
+package gomodguard
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/Masterminds/semver"
+
+ "golang.org/x/mod/modfile"
+)
+
+const (
+ goModFilename = "go.mod"
+ errReadingGoModFile = "unable to read go mod file %s: %w"
+ errParsingGoModFile = "unable to parsing go mod file %s: %w"
+)
+
+var (
+ blockReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the allowed modules list."
+ blockReasonInBlockedList = "import of package `%s` is blocked because the module is in the blocked modules list."
+)
+
+// BlockedVersion has a version constraint a reason why the the module version is blocked.
+type BlockedVersion struct {
+ Version string `yaml:"version"`
+ Reason string `yaml:"reason"`
+ lintedModuleVersion string `yaml:"-"`
+}
+
+// Set required values for performing checks. This must be ran before running anything else.
+func (r *BlockedVersion) Set(lintedModuleVersion string) {
+ r.lintedModuleVersion = lintedModuleVersion
+}
+
+// IsAllowed returns true if the blocked module is allowed. You must Set() values first.
+func (r *BlockedVersion) IsAllowed() bool {
+ return !r.isLintedModuleVersionBlocked()
+}
+
+// isLintedModuleVersionBlocked returns true if version constraint specified and the
+// linted module version meets the constraint.
+func (r *BlockedVersion) isLintedModuleVersionBlocked() bool {
+ if r.Version == "" {
+ return false
+ }
+
+ constraint, err := semver.NewConstraint(r.Version)
+ if err != nil {
+ return false
+ }
+
+ version, err := semver.NewVersion(strings.TrimLeft(r.lintedModuleVersion, "v"))
+ if err != nil {
+ return false
+ }
+
+ return constraint.Check(version)
+}
+
+// Message returns the reason why the module version is blocked.
+func (r *BlockedVersion) Message() string {
+ msg := ""
+
+ // Add version contraint to message
+ msg += fmt.Sprintf("version `%s` is blocked because it does not meet the version constraint `%s`.", r.lintedModuleVersion, r.Version)
+
+ if r.Reason == "" {
+ return msg
+ }
+
+ // Add reason to message
+ msg += fmt.Sprintf(" %s.", strings.TrimRight(r.Reason, "."))
+
+ return msg
+}
+
+// BlockedModule has alternative modules to use and a reason why the module is blocked.
+type BlockedModule struct {
+ Recommendations []string `yaml:"recommendations"`
+ Reason string `yaml:"reason"`
+ currentModuleName string `yaml:"-"`
+}
+
+// Set required values for performing checks. This must be ran before running anything else.
+func (r *BlockedModule) Set(currentModuleName string) {
+ r.currentModuleName = currentModuleName
+}
+
+// IsAllowed returns true if the blocked module is allowed. You must Set() values first.
+func (r *BlockedModule) IsAllowed() bool {
+ // If the current go.mod file being linted is a recommended module of a
+ // blocked module and it imports that blocked module, do not set as blocked.
+ // This could mean that the linted module is a wrapper for that blocked module.
+ return r.isCurrentModuleARecommendation()
+}
+
+// isCurrentModuleARecommendation returns true if the current module is in the Recommendations list.
+func (r *BlockedModule) isCurrentModuleARecommendation() bool {
+ if r == nil {
+ return false
+ }
+
+ for n := range r.Recommendations {
+ if strings.TrimSpace(r.currentModuleName) == strings.TrimSpace(r.Recommendations[n]) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Message returns the reason why the module is blocked and a list of recommended modules if provided.
+func (r *BlockedModule) Message() string {
+ msg := ""
+
+ // Add recommendations to message
+ for i := range r.Recommendations {
+ switch {
+ case len(r.Recommendations) == 1:
+ msg += fmt.Sprintf("`%s` is a recommended module.", r.Recommendations[i])
+ case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1):
+ msg += fmt.Sprintf("`%s` ", r.Recommendations[i])
+ case (i + 1) != len(r.Recommendations):
+ msg += fmt.Sprintf("`%s`, ", r.Recommendations[i])
+ default:
+ msg += fmt.Sprintf("and `%s` are recommended modules.", r.Recommendations[i])
+ }
+ }
+
+ if r.Reason == "" {
+ return msg
+ }
+
+ // Add reason to message
+ if msg == "" {
+ msg = fmt.Sprintf("%s.", strings.TrimRight(r.Reason, "."))
+ } else {
+ msg += fmt.Sprintf(" %s.", strings.TrimRight(r.Reason, "."))
+ }
+
+ return msg
+}
+
+// HasRecommendations returns true if the blocked package has
+// recommended modules.
+func (r *BlockedModule) HasRecommendations() bool {
+ if r == nil {
+ return false
+ }
+
+ return len(r.Recommendations) > 0
+}
+
+// BlockedVersions a list of blocked modules by a version constraint.
+type BlockedVersions []map[string]BlockedVersion
+
+// Get returns the module names that are blocked.
+func (b BlockedVersions) Get() []string {
+ modules := make([]string, len(b))
+
+ for n := range b {
+ for module := range b[n] {
+ modules[n] = module
+ break
+ }
+ }
+
+ return modules
+}
+
+// GetBlockReason returns a block version if one is set for the provided linted module name.
+func (b BlockedVersions) GetBlockReason(lintedModuleName, lintedModuleVersion string) *BlockedVersion {
+ for _, blockedModule := range b {
+ for blockedModuleName, blockedVersion := range blockedModule {
+ if strings.EqualFold(strings.TrimSpace(lintedModuleName), strings.TrimSpace(blockedModuleName)) {
+ blockedVersion.Set(lintedModuleVersion)
+ return &blockedVersion
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlockedModules a list of blocked modules.
+type BlockedModules []map[string]BlockedModule
+
+// Get returns the module names that are blocked.
+func (b BlockedModules) Get() []string {
+ modules := make([]string, len(b))
+
+ for n := range b {
+ for module := range b[n] {
+ modules[n] = module
+ break
+ }
+ }
+
+ return modules
+}
+
+// GetBlockReason returns a block module if one is set for the provided linted module name.
+func (b BlockedModules) GetBlockReason(currentModuleName, lintedModuleName string) *BlockedModule {
+ for _, blockedModule := range b {
+ for blockedModuleName, blockedModule := range blockedModule {
+ if strings.EqualFold(strings.TrimSpace(lintedModuleName), strings.TrimSpace(blockedModuleName)) {
+ blockedModule.Set(currentModuleName)
+ return &blockedModule
+ }
+ }
+ }
+
+ return nil
+}
+
+// Allowed is a list of modules and module
+// domains that are allowed to be used.
+type Allowed struct {
+ Modules []string `yaml:"modules"`
+ Domains []string `yaml:"domains"`
+}
+
+// IsAllowedModule returns true if the given module
+// name is in the allowed modules list.
+func (a *Allowed) IsAllowedModule(moduleName string) bool {
+ allowedModules := a.Modules
+
+ for i := range allowedModules {
+ if strings.EqualFold(strings.TrimSpace(moduleName), strings.TrimSpace(allowedModules[i])) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsAllowedModuleDomain returns true if the given modules domain is
+// in the allowed module domains list.
+func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool {
+ allowedDomains := a.Domains
+
+ for i := range allowedDomains {
+ if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), strings.TrimSpace(strings.ToLower(allowedDomains[i]))) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Blocked is a list of modules that are
+// blocked and not to be used.
+type Blocked struct {
+ Modules BlockedModules `yaml:"modules"`
+ Versions BlockedVersions `yaml:"versions"`
+}
+
+// Configuration of gomodguard allow and block lists.
+type Configuration struct {
+ Allowed Allowed `yaml:"allowed"`
+ Blocked Blocked `yaml:"blocked"`
+}
+
+// Result represents the result of one error.
+type Result struct {
+ FileName string
+ LineNumber int
+ Position token.Position
+ Reason string
+}
+
+// String returns the filename, line
+// number and reason of a Result.
+func (r *Result) String() string {
+ return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason)
+}
+
+// Processor processes Go files.
+type Processor struct {
+ Config Configuration
+ Logger *log.Logger
+ Modfile *modfile.File
+ blockedModulesFromModFile map[string][]string
+ Result []Result
+}
+
+// NewProcessor will create a Processor to lint blocked packages.
+func NewProcessor(config Configuration, logger *log.Logger) (*Processor, error) {
+ goModFileBytes, err := loadGoModFile()
+ if err != nil {
+ return nil, fmt.Errorf(errReadingGoModFile, goModFilename, err)
+ }
+
+ mfile, err := modfile.Parse(goModFilename, goModFileBytes, nil)
+ if err != nil {
+ return nil, fmt.Errorf(errParsingGoModFile, goModFilename, err)
+ }
+
+ logger.Printf("info: allowed modules, %+v", config.Allowed.Modules)
+ logger.Printf("info: allowed module domains, %+v", config.Allowed.Domains)
+ logger.Printf("info: blocked modules, %+v", config.Blocked.Modules.Get())
+ logger.Printf("info: blocked modules with version constraints, %+v", config.Blocked.Versions.Get())
+
+ p := &Processor{
+ Config: config,
+ Logger: logger,
+ Modfile: mfile,
+ Result: []Result{},
+ }
+
+ p.SetBlockedModulesFromModFile()
+
+ return p, nil
+}
+
+// ProcessFiles takes a string slice with file names (full paths)
+// and lints them.
+func (p *Processor) ProcessFiles(filenames []string) []Result {
+ pluralModuleMsg := "s"
+ if len(p.blockedModulesFromModFile) == 1 {
+ pluralModuleMsg = ""
+ }
+
+ blockedModules := make([]string, 0, len(p.blockedModulesFromModFile))
+ for blockedModuleName := range p.blockedModulesFromModFile {
+ blockedModules = append(blockedModules, blockedModuleName)
+ }
+
+ p.Logger.Printf("info: found %d blocked module%s in %s: %+v",
+ len(p.blockedModulesFromModFile), pluralModuleMsg, goModFilename, blockedModules)
+
+ for _, filename := range filenames {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ p.Result = append(p.Result, Result{
+ FileName: filename,
+ LineNumber: 0,
+ Reason: fmt.Sprintf("unable to read file, file cannot be linted (%s)", err.Error()),
+ })
+ }
+
+ p.process(filename, data)
+ }
+
+ return p.Result
+}
+
+// process file imports and add lint error if blocked package is imported.
+func (p *Processor) process(filename string, data []byte) {
+ fileSet := token.NewFileSet()
+
+ file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments)
+ if err != nil {
+ p.Result = append(p.Result, Result{
+ FileName: filename,
+ LineNumber: 0,
+ Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()),
+ })
+
+ return
+ }
+
+ imports := file.Imports
+ for n := range imports {
+ importedPkg := strings.TrimSpace(strings.Trim(imports[n].Path.Value, "\""))
+
+ blockReasons := p.isBlockedPackageFromModFile(importedPkg)
+ if blockReasons == nil {
+ continue
+ }
+
+ for _, blockReason := range blockReasons {
+ p.addError(fileSet, imports[n].Pos(), blockReason)
+ }
+ }
+}
+
+// addError adds an error for the file and line number for the current token.Pos
+// with the given reason.
+func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason string) {
+ position := fileset.Position(pos)
+
+ p.Result = append(p.Result, Result{
+ FileName: position.Filename,
+ LineNumber: position.Line,
+ Position: position,
+ Reason: reason,
+ })
+}
+
+// SetBlockedModulesFromModFile determines which modules are blocked by reading
+// the go.mod file and comparing the require modules to the allowed modules.
+func (p *Processor) SetBlockedModulesFromModFile() {
+ blockedModules := make(map[string][]string, len(p.Modfile.Require))
+ currentModuleName := p.Modfile.Module.Mod.Path
+ lintedModules := p.Modfile.Require
+
+ for i := range lintedModules {
+ if lintedModules[i].Indirect {
+ continue
+ }
+
+ lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path)
+ lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version)
+
+ var isAllowed bool
+
+ switch {
+ case len(p.Config.Allowed.Modules) == 0 && len(p.Config.Allowed.Domains) == 0:
+ isAllowed = true
+ case p.Config.Allowed.IsAllowedModuleDomain(lintedModuleName):
+ isAllowed = true
+ case p.Config.Allowed.IsAllowedModule(lintedModuleName):
+ isAllowed = true
+ default:
+ isAllowed = false
+ }
+
+ blockModuleReason := p.Config.Blocked.Modules.GetBlockReason(currentModuleName, lintedModuleName)
+ blockVersionReason := p.Config.Blocked.Versions.GetBlockReason(lintedModuleName, lintedModuleVersion)
+
+ if !isAllowed && blockModuleReason == nil && blockVersionReason == nil {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], blockReasonNotInAllowedList)
+ continue
+ }
+
+ if blockModuleReason != nil && !blockModuleReason.IsAllowed() {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], fmt.Sprintf("%s %s", blockReasonInBlockedList, blockModuleReason.Message()))
+ }
+
+ if blockVersionReason != nil && !blockVersionReason.IsAllowed() {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], fmt.Sprintf("%s %s", blockReasonInBlockedList, blockVersionReason.Message()))
+ }
+ }
+
+ p.blockedModulesFromModFile = blockedModules
+}
+
+// isBlockedPackageFromModFile returns the block reason if the package is blocked.
+func (p *Processor) isBlockedPackageFromModFile(packageName string) []string {
+ for blockedModuleName, blockReasons := range p.blockedModulesFromModFile {
+ if strings.HasPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) {
+ formattedReasons := make([]string, 0, len(blockReasons))
+
+ for _, blockReason := range blockReasons {
+ formattedReasons = append(formattedReasons, fmt.Sprintf(blockReason, packageName))
+ }
+
+ return formattedReasons
+ }
+ }
+
+ return nil
+}
+
+func loadGoModFile() ([]byte, error) {
+ cmd := exec.Command("go", "env", "-json")
+ stdout, _ := cmd.StdoutPipe()
+ _ = cmd.Start()
+
+ if stdout == nil {
+ return ioutil.ReadFile(goModFilename)
+ }
+
+ buf := new(bytes.Buffer)
+ _, _ = buf.ReadFrom(stdout)
+
+ goEnv := make(map[string]string)
+
+ err := json.Unmarshal(buf.Bytes(), &goEnv)
+ if err != nil {
+ return ioutil.ReadFile(goModFilename)
+ }
+
+ if _, ok := goEnv["GOMOD"]; !ok {
+ return ioutil.ReadFile(goModFilename)
+ }
+
+ if _, err := os.Stat(goEnv["GOMOD"]); os.IsNotExist(err) {
+ return ioutil.ReadFile(goModFilename)
+ }
+
+ return ioutil.ReadFile(goEnv["GOMOD"])
+}
diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE
new file mode 100644
index 0000000..77b261d
--- /dev/null
+++ b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2020 Ryan R. Olds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go
new file mode 100644
index 0000000..bc42dfb
--- /dev/null
+++ b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go
@@ -0,0 +1,311 @@
+package analyzer
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+)
+
+const (
+ rowsName = "Rows"
+ stmtName = "Stmt"
+ closeMethod = "Close"
+)
+
+var (
+ sqlPackages = []string{
+ "database/sql",
+ "github.com/jmoiron/sqlx",
+ }
+)
+
+func NewAnalyzer() *analysis.Analyzer {
+ return &analysis.Analyzer{
+ Name: "sqlclosecheck",
+ Doc: "Checks that sql.Rows and sql.Stmt are closed.",
+ Run: run,
+ Requires: []*analysis.Analyzer{
+ buildssa.Analyzer,
+ },
+ }
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+
+ // Build list of types we are looking for
+ targetTypes := getTargetTypes(pssa, sqlPackages)
+
+ // If non of the types are found, skip
+ if len(targetTypes) == 0 {
+ return nil, nil
+ }
+
+ funcs := pssa.SrcFuncs
+ for _, f := range funcs {
+ for _, b := range f.Blocks {
+ for i := range b.Instrs {
+ // Check if instruction is call that returns a target type
+ targetValues := getTargetTypesValues(b, i, targetTypes)
+ if len(targetValues) == 0 {
+ continue
+ }
+
+ // log.Printf("%s", f.Name())
+
+ // For each found target check if they are closed and deferred
+ for _, targetValue := range targetValues {
+ refs := (*targetValue.value).Referrers()
+ isClosed := checkClosed(refs, targetTypes)
+ if !isClosed {
+ pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt was not closed")
+ }
+
+ checkDeferred(pass, refs, targetTypes, false)
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointer {
+ targets := []*types.Pointer{}
+
+ for _, sqlPkg := range targetPackages {
+ pkg := pssa.Pkg.Prog.ImportedPackage(sqlPkg)
+ if pkg == nil {
+ // the SQL package being checked isn't imported
+ return targets
+ }
+
+ rowsType := getTypePointerFromName(pkg, rowsName)
+ if rowsType != nil {
+ targets = append(targets, rowsType)
+ }
+
+ stmtType := getTypePointerFromName(pkg, stmtName)
+ if stmtType != nil {
+ targets = append(targets, stmtType)
+ }
+ }
+
+ return targets
+}
+
+func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer {
+ pkgType := pkg.Type(name)
+ if pkgType == nil {
+ // this package does not use Rows/Stmt
+ return nil
+ }
+
+ obj := pkgType.Object()
+ named, ok := obj.Type().(*types.Named)
+ if !ok {
+ return nil
+ }
+
+ return types.NewPointer(named)
+}
+
+type targetValue struct {
+ value *ssa.Value
+ instr ssa.Instruction
+}
+
+func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer) []targetValue {
+ targetValues := []targetValue{}
+
+ instr := b.Instrs[i]
+ call, ok := instr.(*ssa.Call)
+ if !ok {
+ return targetValues
+ }
+
+ signature := call.Call.Signature()
+ results := signature.Results()
+ for i := 0; i < results.Len(); i++ {
+ v := results.At(i)
+ varType := v.Type()
+
+ for _, targetType := range targetTypes {
+ if !types.Identical(varType, targetType) {
+ continue
+ }
+
+ for _, cRef := range *call.Referrers() {
+ switch instr := cRef.(type) {
+ case *ssa.Call:
+ if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), targetType) {
+ targetValues = append(targetValues, targetValue{
+ value: &instr.Call.Args[0],
+ instr: call,
+ })
+ }
+ case ssa.Value:
+ if types.Identical(instr.Type(), targetType) {
+ targetValues = append(targetValues, targetValue{
+ value: &instr,
+ instr: call,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return targetValues
+}
+
+func checkClosed(refs *[]ssa.Instruction, targetTypes []*types.Pointer) bool {
+ numInstrs := len(*refs)
+ for idx, ref := range *refs {
+ // log.Printf("%T - %s", ref, ref)
+
+ action := getAction(ref, targetTypes)
+ switch action {
+ case "closed":
+ return true
+ case "passed":
+ // Passed and not used after
+ if numInstrs == idx+1 {
+ return true
+ }
+ case "returned":
+ return true
+ case "handled":
+ return true
+ default:
+ // log.Printf(action)
+ }
+ }
+
+ return false
+}
+
+func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string {
+ switch instr := instr.(type) {
+ case *ssa.Defer:
+ if instr.Call.Value == nil {
+ return "unvalued defer"
+ }
+
+ name := instr.Call.Value.Name()
+ if name == closeMethod {
+ return "closed"
+ }
+ case *ssa.Call:
+ if instr.Call.Value == nil {
+ return "unvalued call"
+ }
+
+ isTarget := false
+ receiver := instr.Call.StaticCallee().Signature.Recv()
+ if receiver != nil {
+ isTarget = isTargetType(receiver.Type(), targetTypes)
+ }
+
+ name := instr.Call.Value.Name()
+ if isTarget && name == closeMethod {
+ return "closed"
+ }
+
+ if !isTarget {
+ return "passed"
+ }
+ case *ssa.Phi:
+ return "passed"
+ case *ssa.MakeInterface:
+ return "passed"
+ case *ssa.Store:
+ if len(*instr.Addr.Referrers()) == 0 {
+ return "noop"
+ }
+
+ for _, aRef := range *instr.Addr.Referrers() {
+ if c, ok := aRef.(*ssa.MakeClosure); ok {
+ f := c.Fn.(*ssa.Function)
+ for _, b := range f.Blocks {
+ if checkClosed(&b.Instrs, targetTypes) {
+ return "handled"
+ }
+ }
+ }
+ }
+ case *ssa.UnOp:
+ instrType := instr.Type()
+ for _, targetType := range targetTypes {
+ if types.Identical(instrType, targetType) {
+ if checkClosed(instr.Referrers(), targetTypes) {
+ return "handled"
+ }
+ }
+ }
+ case *ssa.FieldAddr:
+ if checkClosed(instr.Referrers(), targetTypes) {
+ return "handled"
+ }
+ case *ssa.Return:
+ return "returned"
+ default:
+ // log.Printf("%s", instr)
+ }
+
+ return "unhandled"
+}
+
+func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []*types.Pointer, inDefer bool) {
+ for _, instr := range *instrs {
+ switch instr := instr.(type) {
+ case *ssa.Defer:
+ if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod {
+ return
+ }
+ case *ssa.Call:
+ if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod {
+ if !inDefer {
+ pass.Reportf(instr.Pos(), "Close should use defer")
+ }
+
+ return
+ }
+ case *ssa.Store:
+ if len(*instr.Addr.Referrers()) == 0 {
+ return
+ }
+
+ for _, aRef := range *instr.Addr.Referrers() {
+ if c, ok := aRef.(*ssa.MakeClosure); ok {
+ f := c.Fn.(*ssa.Function)
+
+ for _, b := range f.Blocks {
+ checkDeferred(pass, &b.Instrs, targetTypes, true)
+ }
+ }
+ }
+ case *ssa.UnOp:
+ instrType := instr.Type()
+ for _, targetType := range targetTypes {
+ if types.Identical(instrType, targetType) {
+ checkDeferred(pass, instr.Referrers(), targetTypes, inDefer)
+ }
+ }
+ case *ssa.FieldAddr:
+ checkDeferred(pass, instr.Referrers(), targetTypes, inDefer)
+ }
+ }
+}
+
+func isTargetType(t types.Type, targetTypes []*types.Pointer) bool {
+ for _, targetType := range targetTypes {
+ if types.Identical(t, targetType) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/securego/gosec/v2/.gitignore b/vendor/github.com/securego/gosec/v2/.gitignore
new file mode 100644
index 0000000..f282cda
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/.gitignore
@@ -0,0 +1,35 @@
+# transient files
+/image
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.swp
+/gosec
+
+# Folders
+_obj
+_test
+vendor
+dist
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.DS_Store
+
+.vscode
diff --git a/vendor/github.com/securego/gosec/v2/.goreleaser.yml b/vendor/github.com/securego/gosec/v2/.goreleaser.yml
new file mode 100644
index 0000000..4f8fc41
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/.goreleaser.yml
@@ -0,0 +1,20 @@
+---
+project_name: gosec
+
+release:
+ github:
+ owner: securego
+ name: gosec
+
+builds:
+ - main : ./cmd/gosec/
+ binary: gosec
+ goos:
+ - darwin
+ - linux
+ - windows
+ goarch:
+ - amd64
+ ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}}
+ env:
+ - CGO_ENABLED=0
diff --git a/vendor/github.com/securego/gosec/v2/Dockerfile b/vendor/github.com/securego/gosec/v2/Dockerfile
new file mode 100644
index 0000000..a874697
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/Dockerfile
@@ -0,0 +1,14 @@
+ARG GO_VERSION
+FROM golang:${GO_VERSION}-alpine AS builder
+RUN apk add --update --no-cache ca-certificates make git curl gcc libc-dev
+RUN mkdir -p /build
+WORKDIR /build
+COPY . /build/
+RUN go mod download
+RUN make build-linux
+
+FROM golang:${GO_VERSION}-alpine
+RUN apk add --update --no-cache ca-certificates git gcc libc-dev
+ENV GO111MODULE on
+COPY --from=builder /build/gosec /bin/gosec
+ENTRYPOINT ["/bin/gosec"]
diff --git a/vendor/github.com/securego/gosec/v2/LICENSE.txt b/vendor/github.com/securego/gosec/v2/LICENSE.txt
new file mode 100644
index 0000000..1756c78
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/LICENSE.txt
@@ -0,0 +1,154 @@
+Apache License
+
+Version 2.0, January 2004
+
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable copyright license to
+reproduce, prepare Derivative Works of, publicly display, publicly perform,
+sublicense, and distribute the Work and such Derivative Works in Source or
+Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License,
+each Contributor hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section) patent
+license to make, have made, use, offer to sell, sell, import, and otherwise
+transfer the Work, where such license applies only to those patent claims
+licensable by such Contributor that are necessarily infringed by their
+Contribution(s) alone or by combination of their Contribution(s) with the Work
+to which such Contribution(s) was submitted. If You institute patent litigation
+against any entity (including a cross-claim or counterclaim in a lawsuit)
+alleging that the Work or a Contribution incorporated within the Work
+constitutes direct or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate as of the date
+such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or
+Derivative Works thereof in any medium, with or without modifications, and in
+Source or Object form, provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and You must cause any modified files to carry prominent notices
+stating that You changed the files; and You must retain, in the Source form of
+any Derivative Works that You distribute, all copyright, patent, trademark, and
+attribution notices from the Source form of the Work, excluding those notices
+that do not pertain to any part of the Derivative Works; and If the Work
+includes a "NOTICE" text file as part of its distribution, then any Derivative
+Works that You distribute must include a readable copy of the attribution
+notices contained within such NOTICE file, excluding those notices that do not
+pertain to any part of the Derivative Works, in at least one of the following
+places: within a NOTICE text file distributed as part of the Derivative Works;
+within the Source form or documentation, if provided along with the Derivative
+Works; or, within a display generated by the Derivative Works, if and wherever
+such third-party notices normally appear. The contents of the NOTICE file are
+for informational purposes only and do not modify the License. You may add Your
+own attribution notices within Derivative Works that You distribute, alongside
+or as an addendum to the NOTICE text from the Work, provided that such
+additional attribution notices cannot be construed as modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License. 5. Submission of Contributions.
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names,
+trademarks, service marks, or product names of the Licensor, except as required
+for reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in
+writing, Licensor provides the Work (and each Contributor provides its
+Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied, including, without limitation, any warranties
+or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any risks
+associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in
+tort (including negligence), contract, or otherwise, unless required by
+applicable law (such as deliberate and grossly negligent acts) or agreed to in
+writing, shall any Contributor be liable to You for damages, including any
+direct, indirect, special, incidental, or consequential damages of any character
+arising as a result of this License or out of the use or inability to use the
+Work (including but not limited to damages for loss of goodwill, work stoppage,
+computer failure or malfunction, or any and all other commercial damages or
+losses), even if such Contributor has been advised of the possibility of such
+damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or
+Derivative Works thereof, You may choose to offer, and charge a fee for,
+acceptance of support, warranty, indemnity, or other liability obligations
+and/or rights consistent with this License. However, in accepting such
+obligations, You may act only on Your own behalf and on Your sole
+responsibility, not on behalf of any other Contributor, and only if You agree to
+indemnify, defend, and hold each Contributor harmless for any liability incurred
+by, or claims asserted against, such Contributor by reason of your accepting any
+such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/securego/gosec/v2/Makefile b/vendor/github.com/securego/gosec/v2/Makefile
new file mode 100644
index 0000000..217651c
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/Makefile
@@ -0,0 +1,71 @@
+GIT_TAG?= $(shell git describe --always --tags)
+BIN = gosec
+FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr)
+IMAGE_REPO = securego
+BUILDFLAGS := '-w -s'
+CGO_ENABLED = 0
+GO := GO111MODULE=on go
+GO_NOMOD :=GO111MODULE=off go
+GOPATH ?= $(shell $(GO) env GOPATH)
+GOBIN ?= $(GOPATH)/bin
+GOLINT ?= $(GOBIN)/golint
+GOSEC ?= $(GOBIN)/gosec
+GINKGO ?= $(GOBIN)/ginkgo
+GO_VERSION = 1.14
+
+default:
+ $(MAKE) build
+
+install-test-deps:
+ $(GO_NOMOD) get -u github.com/onsi/ginkgo/ginkgo
+ $(GO_NOMOD) get -u golang.org/x/crypto/ssh
+ $(GO_NOMOD) get -u github.com/lib/pq
+
+test: install-test-deps build fmt lint sec
+ $(GINKGO) -r -v
+
+fmt:
+ @echo "FORMATTING"
+ @FORMATTED=`$(GO) fmt ./...`
+ @([[ ! -z "$(FORMATTED)" ]] && printf "Fixed unformatted files:\n$(FORMATTED)") || true
+
+lint:
+ @echo "LINTING"
+ $(GO_NOMOD) get -u golang.org/x/lint/golint
+ $(GOLINT) -set_exit_status ./...
+ @echo "VETTING"
+ $(GO) vet ./...
+
+sec:
+ @echo "SECURITY SCANNING"
+ ./$(BIN) ./...
+
+test-coverage: install-test-deps
+ go test -race -coverprofile=coverage.txt -covermode=atomic
+
+build:
+ go build -o $(BIN) ./cmd/gosec/
+
+clean:
+ rm -rf build vendor dist coverage.txt
+ rm -f release image $(BIN)
+
+release:
+ @echo "Releasing the gosec binary..."
+ goreleaser release
+
+build-linux:
+ CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=amd64 go build -ldflags $(BUILDFLAGS) -o $(BIN) ./cmd/gosec/
+
+image:
+ @echo "Building the Docker image..."
+ docker build -t $(IMAGE_REPO)/$(BIN):$(GIT_TAG) --build-arg GO_VERSION=$(GO_VERSION) .
+ docker tag $(IMAGE_REPO)/$(BIN):$(GIT_TAG) $(IMAGE_REPO)/$(BIN):latest
+ touch image
+
+image-push: image
+ @echo "Pushing the Docker image..."
+ docker push $(IMAGE_REPO)/$(BIN):$(GIT_TAG)
+ docker push $(IMAGE_REPO)/$(BIN):latest
+
+.PHONY: test build clean release image image-push
diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md
new file mode 100644
index 0000000..52be734
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/README.md
@@ -0,0 +1,326 @@
+
+# gosec - Golang Security Checker
+
+Inspects source code for security problems by scanning the Go AST.
+
+
+
+## License
+
+Licensed under the Apache License, Version 2.0 (the "License").
+You may not use this file except in compliance with the License.
+You may obtain a copy of the License [here](http://www.apache.org/licenses/LICENSE-2.0).
+
+## Project status
+
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3218/badge)](https://bestpractices.coreinfrastructure.org/projects/3218)
+[![Build Status](https://github.com/securego/gosec/workflows/CI/badge.svg)](https://github.com/securego/gosec/actions?query=workflows%3ACI)
+[![Coverage Status](https://codecov.io/gh/securego/gosec/branch/master/graph/badge.svg)](https://codecov.io/gh/securego/gosec)
+[![GoReport](https://goreportcard.com/badge/github.com/securego/gosec)](https://goreportcard.com/badge/github.com/securego/gosec)
+[![GoDoc](https://godoc.org/github.com/securego/gosec?status.svg)](https://godoc.org/github.com/securego/gosec)
+[![Docs](https://readthedocs.org/projects/docs/badge/?version=latest)](https://securego.io/)
+[![Downloads](https://img.shields.io/github/downloads/securego/gosec/total.svg)](https://github.com/securego/gosec/releases)
+[![Docker Pulls](https://img.shields.io/docker/pulls/securego/gosec.svg)](https://hub.docker.com/r/securego/gosec/tags)
+[![Slack](http://securego.herokuapp.com/badge.svg)](http://securego.herokuapp.com)
+
+## Install
+
+### CI Installation
+
+```bash
+# binary will be $GOPATH/bin/gosec
+curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $GOPATH/bin vX.Y.Z
+
+# or install it into ./bin/
+curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s vX.Y.Z
+
+# In alpine linux (as it does not come with curl by default)
+wget -O - -q https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s vX.Y.Z
+
+# If you want to use the checksums provided on the "Releases" page
+# then you will have to download a tar.gz file for your operating system instead of a binary file
+wget https://github.com/securego/gosec/releases/download/vX.Y.Z/gosec_vX.Y.Z_OS.tar.gz
+
+# The file will be in the current folder where you run the command
+# and you can check the checksum like this
+echo " gosec_vX.Y.Z_OS.tar.gz" | sha256sum -c -
+
+gosec --help
+```
+### GitHub Action
+
+You can run `gosec` as a GitHub action as follows:
+
+```yaml
+name: Run Gosec
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+jobs:
+ tests:
+ runs-on: ubuntu-latest
+ env:
+ GO111MODULE: on
+ steps:
+ - name: Checkout Source
+ uses: actions/checkout@v2
+ - name: Run Gosec Security Scanner
+ uses: securego/gosec@master
+ with:
+ args: ./...
+```
+
+### Local Installation
+
+```bash
+go get github.com/securego/gosec/cmd/gosec
+```
+
+## Usage
+
+Gosec can be configured to only run a subset of rules, to exclude certain file
+paths, and produce reports in different formats. By default all rules will be
+run against the supplied input files. To recursively scan from the current
+directory you can supply `./...` as the input argument.
+
+
+### Available rules
+
+- G101: Look for hard coded credentials
+- G102: Bind to all interfaces
+- G103: Audit the use of unsafe block
+- G104: Audit errors not checked
+- G106: Audit the use of ssh.InsecureIgnoreHostKey
+- G107: Url provided to HTTP request as taint input
+- G108: Profiling endpoint automatically exposed on /debug/pprof
+- G109: Potential Integer overflow made by strconv.Atoi result conversion to int16/32
+- G110: Potential DoS vulnerability via decompression bomb
+- G201: SQL query construction using format string
+- G202: SQL query construction using string concatenation
+- G203: Use of unescaped data in HTML templates
+- G204: Audit use of command execution
+- G301: Poor file permissions used when creating a directory
+- G302: Poor file permissions used with chmod
+- G303: Creating tempfile using a predictable path
+- G304: File path provided as taint input
+- G305: File traversal when extracting zip archive
+- G306: Poor file permissions used when writing to a new file
+- G307: Deferring a method which returns an error
+- G401: Detect the usage of DES, RC4, MD5 or SHA1
+- G402: Look for bad TLS connection settings
+- G403: Ensure minimum RSA key length of 2048 bits
+- G404: Insecure random number source (rand)
+- G501: Import blacklist: crypto/md5
+- G502: Import blacklist: crypto/des
+- G503: Import blacklist: crypto/rc4
+- G504: Import blacklist: net/http/cgi
+- G505: Import blacklist: crypto/sha1
+- G601: Implicit memory aliasing of items from a range statement
+
+### Retired rules
+
+- G105: Audit the use of math/big.Int.Exp - [CVE is fixed](https://github.com/golang/go/issues/15184)
+
+### Selecting rules
+
+By default, gosec will run all rules against the supplied file paths. It is however possible to select a subset of rules to run via the `-include=` flag,
+or to specify a set of rules to explicitly exclude using the `-exclude=` flag.
+
+```bash
+# Run a specific set of rules
+$ gosec -include=G101,G203,G401 ./...
+
+# Run everything except for rule G303
+$ gosec -exclude=G303 ./...
+```
+### CWE Mapping
+
+Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/53be8dd8644ee48802114178cff6eb7e29757414/issue.go#L49).
+
+### Configuration
+
+A number of global settings can be provided in a configuration file as follows:
+
+```JSON
+{
+ "global": {
+ "nosec": "enabled",
+ "audit": "enabled"
+ }
+}
+```
+
+- `nosec`: this setting will overwrite all `#nosec` directives defined throughout the code base
+- `audit`: runs in audit mode which enables addition checks that for normal code analysis might be too nosy
+
+```bash
+# Run with a global configuration file
+$ gosec -conf config.json .
+```
+Also some rules accept configuration. For instance on rule `G104`, it is possible to define packages along with a list
+of functions which will be skipped when auditing the not checked errors:
+
+```JSON
+{
+ "G104": {
+ "io/ioutil": ["WriteFile"]
+ }
+}
+```
+
+You can also configure the hard-coded credentials rule `G101` with additional patters, or adjust the entropy threshold:
+
+```JSON
+{
+ "G101": {
+ "pattern": "(?i)passwd|pass|password|pwd|secret|private_key|token",
+ "ingnore_entropy": false,
+ "entropy_threshold": "80.0",
+ "per_char_threshold": "3.0",
+ "trucate": "32"
+ }
+}
+```
+
+### Dependencies
+
+gosec will fetch automatically the dependencies of the code which is being analyzed when go module is turned on (e.g.` GO111MODULE=on`). If this is not the case,
+the dependencies need to be explicitly downloaded by running the `go get -d` command before the scan.
+
+### Excluding test files and folders
+
+gosec will ignore test files across all packages and any dependencies in your vendor directory.
+
+The scanning of test files can be enabled with the following flag:
+
+```bash
+
+gosec -tests ./...
+```
+
+Also additional folders can be excluded as follows:
+
+```bash
+ gosec -exclude-dir=rules -exclude-dir=cmd ./...
+```
+
+### Annotating code
+
+As with all automated detection tools, there will be cases of false positives. In cases where gosec reports a failure that has been manually verified as being safe,
+it is possible to annotate the code with a `#nosec` comment.
+
+The annotation causes gosec to stop processing any further nodes within the
+AST so can apply to a whole block or more granularly to a single expression.
+
+```go
+
+import "md5" // #nosec
+
+
+func main(){
+
+ /* #nosec */
+ if x > y {
+ h := md5.New() // this will also be ignored
+ }
+
+}
+
+```
+
+When a specific false positive has been identified and verified as safe, you may wish to suppress only that single rule (or a specific set of rules)
+within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within
+the `#nosec` annotation, e.g: `/* #nosec G401 */` or `// #nosec G201 G202 G203`
+
+In some cases you may also want to revisit places where `#nosec` annotations
+have been used. To run the scanner and ignore any `#nosec` annotations you
+can do the following:
+
+```bash
+gosec -nosec=true ./...
+```
+
+### Build tags
+
+gosec is able to pass your [Go build tags](https://golang.org/pkg/go/build/) to the analyzer.
+They can be provided as a comma separated list as follows:
+
+```bash
+gosec -tag debug,ignore ./...
+```
+
+### Output formats
+
+gosec currently supports `text`, `json`, `yaml`, `csv`, `sonarqube`, `JUnit XML`, `html` and `golint` output formats. By default
+results will be reported to stdout, but can also be written to an output
+file. The output format is controlled by the `-fmt` flag, and the output file is controlled by the `-out` flag as follows:
+
+```bash
+# Write output in json format to results.json
+$ gosec -fmt=json -out=results.json *.go
+```
+
+## Development
+
+### Build
+
+You can build the binary with:
+```bash
+make
+```
+
+### Tests
+
+You can run all unit tests using:
+```bash
+make test
+```
+
+### Release
+
+You can create a release by tagging the version as follows:
+
+``` bash
+git tag v1.0.0 -m "Release version v1.0.0"
+git push origin v1.0.0
+```
+
+The GitHub [release workflow](.github/workflows/release.yml) triggers immediately after the tag is pushed upstream. This flow will
+release the binaries using the [goreleaser](https://goreleaser.com/actions/) action and then it will build and publish the docker image into Docker Hub.
+
+### Docker image
+
+You can also build locally the docker image by using the command:
+
+```bash
+make image
+```
+
+You can run the `gosec` tool in a container against your local Go project. You only have to mount the project
+into a volume as follows:
+
+```bash
+docker run -it -v /:/ securego/gosec //...
+```
+
+### Generate TLS rule
+
+The configuration of TLS rule can be generated from [Mozilla's TLS ciphers recommendation](https://statics.tls.security.mozilla.org/server-side-tls-conf.json).
+
+First you need to install the generator tool:
+
+```bash
+go get github.com/securego/gosec/cmd/tlsconfig/...
+```
+
+You can invoke now the `go generate` in the root of the project:
+
+```bash
+go generate ./...
+```
+
+This will generate the `rules/tls_config.go` file which will contain the current ciphers recommendation from Mozilla.
diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml
new file mode 100644
index 0000000..aab6c80
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/action.yml
@@ -0,0 +1,19 @@
+name: 'Gosec Security Checker'
+description: 'Runs the gosec security checker'
+author: '@ccojocar'
+
+inputs:
+ args:
+ description: 'Arguments for gosec'
+ required: true
+ default: '-h'
+
+runs:
+ using: 'docker'
+ image: 'docker://securego/gosec'
+ args:
+ - ${{ inputs.args }}
+
+branding:
+ icon: 'shield'
+ color: 'blue'
diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go
new file mode 100644
index 0000000..ca4440c
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/analyzer.go
@@ -0,0 +1,378 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gosec holds the central scanning logic used by gosec security scanner
+package gosec
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+// LoadMode controls the amount of details to return when loading the packages
+const LoadMode = packages.NeedName |
+ packages.NeedFiles |
+ packages.NeedCompiledGoFiles |
+ packages.NeedImports |
+ packages.NeedTypes |
+ packages.NeedTypesSizes |
+ packages.NeedTypesInfo |
+ packages.NeedSyntax
+
+// The Context is populated with data parsed from the source code as it is scanned.
+// It is passed through to all rule functions as they are called. Rules may use
+// this data in conjunction withe the encountered AST node.
+type Context struct {
+ FileSet *token.FileSet
+ Comments ast.CommentMap
+ Info *types.Info
+ Pkg *types.Package
+ PkgFiles []*ast.File
+ Root *ast.File
+ Config Config
+ Imports *ImportTracker
+ Ignores []map[string]bool
+ PassedValues map[string]interface{}
+}
+
+// Metrics used when reporting information about a scanning run.
+type Metrics struct {
+ NumFiles int `json:"files"`
+ NumLines int `json:"lines"`
+ NumNosec int `json:"nosec"`
+ NumFound int `json:"found"`
+}
+
+// Analyzer object is the main object of gosec. It has methods traverse an AST
+// and invoke the correct checking rules as on each node as required.
+type Analyzer struct {
+ ignoreNosec bool
+ ruleset RuleSet
+ context *Context
+ config Config
+ logger *log.Logger
+ issues []*Issue
+ stats *Metrics
+ errors map[string][]Error // keys are file paths; values are the golang errors in those files
+ tests bool
+}
+
+// NewAnalyzer builds a new analyzer.
+func NewAnalyzer(conf Config, tests bool, logger *log.Logger) *Analyzer {
+ ignoreNoSec := false
+ if enabled, err := conf.IsGlobalEnabled(Nosec); err == nil {
+ ignoreNoSec = enabled
+ }
+ if logger == nil {
+ logger = log.New(os.Stderr, "[gosec]", log.LstdFlags)
+ }
+ return &Analyzer{
+ ignoreNosec: ignoreNoSec,
+ ruleset: make(RuleSet),
+ context: &Context{},
+ config: conf,
+ logger: logger,
+ issues: make([]*Issue, 0, 16),
+ stats: &Metrics{},
+ errors: make(map[string][]Error),
+ tests: tests,
+ }
+}
+
+// SetConfig upates the analyzer configuration
+func (gosec *Analyzer) SetConfig(conf Config) {
+ gosec.config = conf
+}
+
+// Config returns the current configuration
+func (gosec *Analyzer) Config() Config {
+ return gosec.config
+}
+
+// LoadRules instantiates all the rules to be used when analyzing source
+// packages
+func (gosec *Analyzer) LoadRules(ruleDefinitions map[string]RuleBuilder) {
+ for id, def := range ruleDefinitions {
+ r, nodes := def(id, gosec.config)
+ gosec.ruleset.Register(r, nodes...)
+ }
+}
+
+// Process kicks off the analysis process for a given package
+func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error {
+ config := gosec.pkgConfig(buildTags)
+ for _, pkgPath := range packagePaths {
+ pkgs, err := gosec.load(pkgPath, config)
+ if err != nil {
+ gosec.AppendError(pkgPath, err)
+ }
+ for _, pkg := range pkgs {
+ if pkg.Name != "" {
+ err := gosec.ParseErrors(pkg)
+ if err != nil {
+ return fmt.Errorf("parsing errors in pkg %q: %v", pkg.Name, err)
+ }
+ gosec.Check(pkg)
+ }
+ }
+ }
+ sortErrors(gosec.errors)
+ return nil
+}
+
+func (gosec *Analyzer) pkgConfig(buildTags []string) *packages.Config {
+ flags := []string{}
+ if len(buildTags) > 0 {
+ tagsFlag := "-tags=" + strings.Join(buildTags, " ")
+ flags = append(flags, tagsFlag)
+ }
+ return &packages.Config{
+ Mode: LoadMode,
+ BuildFlags: flags,
+ Tests: gosec.tests,
+ }
+}
+
+func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.Package, error) {
+ abspath, err := GetPkgAbsPath(pkgPath)
+ if err != nil {
+ gosec.logger.Printf("Skipping: %s. Path doesn't exist.", abspath)
+ return []*packages.Package{}, nil
+ }
+
+ gosec.logger.Println("Import directory:", abspath)
+ basePackage, err := build.Default.ImportDir(pkgPath, build.ImportComment)
+ if err != nil {
+ return []*packages.Package{}, fmt.Errorf("importing dir %q: %v", pkgPath, err)
+ }
+
+ var packageFiles []string
+ for _, filename := range basePackage.GoFiles {
+ packageFiles = append(packageFiles, path.Join(pkgPath, filename))
+ }
+ for _, filename := range basePackage.CgoFiles {
+ packageFiles = append(packageFiles, path.Join(pkgPath, filename))
+ }
+
+ if gosec.tests {
+ testsFiles := []string{}
+ testsFiles = append(testsFiles, basePackage.TestGoFiles...)
+ testsFiles = append(testsFiles, basePackage.XTestGoFiles...)
+ for _, filename := range testsFiles {
+ packageFiles = append(packageFiles, path.Join(pkgPath, filename))
+ }
+ }
+
+ pkgs, err := packages.Load(conf, packageFiles...)
+ if err != nil {
+ return []*packages.Package{}, fmt.Errorf("loading files from package %q: %v", pkgPath, err)
+ }
+ return pkgs, nil
+}
+
+// Check runs analysis on the given package
+func (gosec *Analyzer) Check(pkg *packages.Package) {
+ gosec.logger.Println("Checking package:", pkg.Name)
+ for _, file := range pkg.Syntax {
+ checkedFile := pkg.Fset.File(file.Pos()).Name()
+ // Skip the no-Go file from analysis (e.g. a Cgo files is expanded in 3 different files
+ // stored in the cache which do not need to by analyzed)
+ if filepath.Ext(checkedFile) != ".go" {
+ continue
+ }
+ gosec.logger.Println("Checking file:", checkedFile)
+ gosec.context.FileSet = pkg.Fset
+ gosec.context.Config = gosec.config
+ gosec.context.Comments = ast.NewCommentMap(gosec.context.FileSet, file, file.Comments)
+ gosec.context.Root = file
+ gosec.context.Info = pkg.TypesInfo
+ gosec.context.Pkg = pkg.Types
+ gosec.context.PkgFiles = pkg.Syntax
+ gosec.context.Imports = NewImportTracker()
+ gosec.context.Imports.TrackFile(file)
+ gosec.context.PassedValues = make(map[string]interface{})
+ ast.Walk(gosec, file)
+ gosec.stats.NumFiles++
+ gosec.stats.NumLines += pkg.Fset.File(file.Pos()).LineCount()
+ }
+}
+
+// ParseErrors parses the errors from given package
+func (gosec *Analyzer) ParseErrors(pkg *packages.Package) error {
+ if len(pkg.Errors) == 0 {
+ return nil
+ }
+ for _, pkgErr := range pkg.Errors {
+ parts := strings.Split(pkgErr.Pos, ":")
+ file := parts[0]
+ var err error
+ var line int
+ if len(parts) > 1 {
+ if line, err = strconv.Atoi(parts[1]); err != nil {
+ return fmt.Errorf("parsing line: %v", err)
+ }
+ }
+ var column int
+ if len(parts) > 2 {
+ if column, err = strconv.Atoi(parts[2]); err != nil {
+ return fmt.Errorf("parsing column: %v", err)
+ }
+ }
+ msg := strings.TrimSpace(pkgErr.Msg)
+ newErr := NewError(line, column, msg)
+ if errSlice, ok := gosec.errors[file]; ok {
+ gosec.errors[file] = append(errSlice, *newErr)
+ } else {
+ errSlice = []Error{}
+ gosec.errors[file] = append(errSlice, *newErr)
+ }
+ }
+ return nil
+}
+
+// AppendError appends an error to the file errors
+func (gosec *Analyzer) AppendError(file string, err error) {
+ // Do not report the error for empty packages (e.g. files excluded from build with a tag)
+ r := regexp.MustCompile(`no buildable Go source files in`)
+ if r.MatchString(err.Error()) {
+ return
+ }
+ errors := []Error{}
+ if ferrs, ok := gosec.errors[file]; ok {
+ errors = ferrs
+ }
+ ferr := NewError(0, 0, err.Error())
+ errors = append(errors, *ferr)
+ gosec.errors[file] = errors
+}
+
+// ignore a node (and sub-tree) if it is tagged with a nosec tag comment
+func (gosec *Analyzer) ignore(n ast.Node) ([]string, bool) {
+ if groups, ok := gosec.context.Comments[n]; ok && !gosec.ignoreNosec {
+
+ // Checks if an alternative for #nosec is set and, if not, uses the default.
+ noSecDefaultTag := "#nosec"
+ noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative)
+ if err != nil {
+ noSecAlternativeTag = noSecDefaultTag
+ }
+
+ for _, group := range groups {
+
+ foundDefaultTag := strings.Contains(group.Text(), noSecDefaultTag)
+ foundAlternativeTag := strings.Contains(group.Text(), noSecAlternativeTag)
+
+ if foundDefaultTag || foundAlternativeTag {
+ gosec.stats.NumNosec++
+
+ // Pull out the specific rules that are listed to be ignored.
+ re := regexp.MustCompile(`(G\d{3})`)
+ matches := re.FindAllStringSubmatch(group.Text(), -1)
+
+ // If no specific rules were given, ignore everything.
+ if len(matches) == 0 {
+ return nil, true
+ }
+
+ // Find the rule IDs to ignore.
+ var ignores []string
+ for _, v := range matches {
+ ignores = append(ignores, v[1])
+ }
+ return ignores, false
+ }
+ }
+ }
+ return nil, false
+}
+
+// Visit runs the gosec visitor logic over an AST created by parsing go code.
+// Rule methods added with AddRule will be invoked as necessary.
+func (gosec *Analyzer) Visit(n ast.Node) ast.Visitor {
+ // If we've reached the end of this branch, pop off the ignores stack.
+ if n == nil {
+ if len(gosec.context.Ignores) > 0 {
+ gosec.context.Ignores = gosec.context.Ignores[1:]
+ }
+ return gosec
+ }
+
+ // Get any new rule exclusions.
+ ignoredRules, ignoreAll := gosec.ignore(n)
+ if ignoreAll {
+ return nil
+ }
+
+ // Now create the union of exclusions.
+ ignores := map[string]bool{}
+ if len(gosec.context.Ignores) > 0 {
+ for k, v := range gosec.context.Ignores[0] {
+ ignores[k] = v
+ }
+ }
+
+ for _, v := range ignoredRules {
+ ignores[v] = true
+ }
+
+ // Push the new set onto the stack.
+ gosec.context.Ignores = append([]map[string]bool{ignores}, gosec.context.Ignores...)
+
+ // Track aliased and initialization imports
+ gosec.context.Imports.TrackImport(n)
+
+ for _, rule := range gosec.ruleset.RegisteredFor(n) {
+ if _, ok := ignores[rule.ID()]; ok {
+ continue
+ }
+ issue, err := rule.Match(n, gosec.context)
+ if err != nil {
+ file, line := GetLocation(n, gosec.context)
+ file = path.Base(file)
+ gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line)
+ }
+ if issue != nil {
+ gosec.issues = append(gosec.issues, issue)
+ gosec.stats.NumFound++
+ }
+ }
+ return gosec
+}
+
+// Report returns the current issues discovered and the metrics about the scan
+func (gosec *Analyzer) Report() ([]*Issue, *Metrics, map[string][]Error) {
+ return gosec.issues, gosec.stats, gosec.errors
+}
+
+// Reset clears state such as context, issues and metrics from the configured analyzer
+func (gosec *Analyzer) Reset() {
+ gosec.context = &Context{}
+ gosec.issues = make([]*Issue, 0, 16)
+ gosec.stats = &Metrics{}
+ gosec.ruleset = NewRuleSet()
+}
diff --git a/vendor/github.com/securego/gosec/v2/call_list.go b/vendor/github.com/securego/gosec/v2/call_list.go
new file mode 100644
index 0000000..115c6c8
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/call_list.go
@@ -0,0 +1,109 @@
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gosec
+
+import (
+ "go/ast"
+ "strings"
+)
+
+const vendorPath = "vendor/"
+
+type set map[string]bool
+
+// CallList is used to check for usage of specific packages
+// and functions.
+type CallList map[string]set
+
+// NewCallList creates a new empty CallList
+func NewCallList() CallList {
+ return make(CallList)
+}
+
+// AddAll will add several calls to the call list at once
+func (c CallList) AddAll(selector string, idents ...string) {
+ for _, ident := range idents {
+ c.Add(selector, ident)
+ }
+}
+
+// Add a selector and call to the call list
+func (c CallList) Add(selector, ident string) {
+ if _, ok := c[selector]; !ok {
+ c[selector] = make(set)
+ }
+ c[selector][ident] = true
+}
+
+// Contains returns true if the package and function are
+/// members of this call list.
+func (c CallList) Contains(selector, ident string) bool {
+ if idents, ok := c[selector]; ok {
+ _, found := idents[ident]
+ return found
+ }
+ return false
+}
+
+// ContainsPointer returns true if a pointer to the selector type or the type
+// itself is a members of this call list.
+func (c CallList) ContainsPointer(selector, indent string) bool {
+ if strings.HasPrefix(selector, "*") {
+ if c.Contains(selector, indent) {
+ return true
+ }
+ s := strings.TrimPrefix(selector, "*")
+ return c.Contains(s, indent)
+ }
+ return false
+}
+
+// ContainsPkgCallExpr resolves the call expression name and type, and then further looks
+// up the package path for that type. Finally, it determines if the call exists within the call list
+func (c CallList) ContainsPkgCallExpr(n ast.Node, ctx *Context, stripVendor bool) *ast.CallExpr {
+ selector, ident, err := GetCallInfo(n, ctx)
+ if err != nil {
+ return nil
+ }
+
+ // Use only explicit path (optionally strip vendor path prefix) to reduce conflicts
+ path, ok := GetImportPath(selector, ctx)
+ if !ok {
+ return nil
+ }
+ if stripVendor {
+ if vendorIdx := strings.Index(path, vendorPath); vendorIdx >= 0 {
+ path = path[vendorIdx+len(vendorPath):]
+ }
+ }
+ if !c.Contains(path, ident) {
+ return nil
+ }
+
+ return n.(*ast.CallExpr)
+}
+
+// ContainsCallExpr resolves the call expression name and type, and then determines
+// if the call exists with the call list
+func (c CallList) ContainsCallExpr(n ast.Node, ctx *Context) *ast.CallExpr {
+ selector, ident, err := GetCallInfo(n, ctx)
+ if err != nil {
+ return nil
+ }
+ if !c.Contains(selector, ident) && !c.ContainsPointer(selector, ident) {
+ return nil
+ }
+
+ return n.(*ast.CallExpr)
+}
diff --git a/vendor/github.com/securego/gosec/v2/config.go b/vendor/github.com/securego/gosec/v2/config.go
new file mode 100644
index 0000000..5b7f739
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/config.go
@@ -0,0 +1,125 @@
+package gosec
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+const (
+ // Globals are applicable to all rules and used for general
+ // configuration settings for gosec.
+ Globals = "global"
+)
+
+// GlobalOption defines the name of the global options
+type GlobalOption string
+
+const (
+ // Nosec global option for #nosec directive
+ Nosec GlobalOption = "nosec"
+ // Audit global option which indicates that gosec runs in audit mode
+ Audit GlobalOption = "audit"
+ // NoSecAlternative global option alternative for #nosec directive
+ NoSecAlternative GlobalOption = "#nosec"
+)
+
+// Config is used to provide configuration and customization to each of the rules.
+type Config map[string]interface{}
+
+// NewConfig initializes a new configuration instance. The configuration data then
+// needs to be loaded via c.ReadFrom(strings.NewReader("config data"))
+// or from a *os.File.
+func NewConfig() Config {
+ cfg := make(Config)
+ cfg[Globals] = make(map[GlobalOption]string)
+ return cfg
+}
+
+func (c Config) keyToGlobalOptions(key string) GlobalOption {
+ return GlobalOption(key)
+}
+
+func (c Config) convertGlobals() {
+ if globals, ok := c[Globals]; ok {
+ if settings, ok := globals.(map[string]interface{}); ok {
+ validGlobals := map[GlobalOption]string{}
+ for k, v := range settings {
+ validGlobals[c.keyToGlobalOptions(k)] = fmt.Sprintf("%v", v)
+ }
+ c[Globals] = validGlobals
+ }
+ }
+}
+
+// ReadFrom implements the io.ReaderFrom interface. This
+// should be used with io.Reader to load configuration from
+//file or from string etc.
+func (c Config) ReadFrom(r io.Reader) (int64, error) {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ return int64(len(data)), err
+ }
+ if err = json.Unmarshal(data, &c); err != nil {
+ return int64(len(data)), err
+ }
+ c.convertGlobals()
+ return int64(len(data)), nil
+}
+
+// WriteTo implements the io.WriteTo interface. This should
+// be used to save or print out the configuration information.
+func (c Config) WriteTo(w io.Writer) (int64, error) {
+ data, err := json.Marshal(c)
+ if err != nil {
+ return int64(len(data)), err
+ }
+ return io.Copy(w, bytes.NewReader(data))
+}
+
+// Get returns the configuration section for the supplied key
+func (c Config) Get(section string) (interface{}, error) {
+ settings, found := c[section]
+ if !found {
+ return nil, fmt.Errorf("Section %s not in configuration", section)
+ }
+ return settings, nil
+}
+
+// Set section in the configuration to specified value
+func (c Config) Set(section string, value interface{}) {
+ c[section] = value
+}
+
+// GetGlobal returns value associated with global configuration option
+func (c Config) GetGlobal(option GlobalOption) (string, error) {
+ if globals, ok := c[Globals]; ok {
+ if settings, ok := globals.(map[GlobalOption]string); ok {
+ if value, ok := settings[option]; ok {
+ return value, nil
+ }
+ return "", fmt.Errorf("global setting for %s not found", option)
+ }
+ }
+ return "", fmt.Errorf("no global config options found")
+}
+
+// SetGlobal associates a value with a global configuration option
+func (c Config) SetGlobal(option GlobalOption, value string) {
+ if globals, ok := c[Globals]; ok {
+ if settings, ok := globals.(map[GlobalOption]string); ok {
+ settings[option] = value
+ }
+ }
+}
+
+// IsGlobalEnabled checks if a global option is enabled
+func (c Config) IsGlobalEnabled(option GlobalOption) (bool, error) {
+ value, err := c.GetGlobal(option)
+ if err != nil {
+ return false, err
+ }
+ return (value == "true" || value == "enabled"), nil
+}
diff --git a/vendor/github.com/securego/gosec/v2/errors.go b/vendor/github.com/securego/gosec/v2/errors.go
new file mode 100644
index 0000000..a27aa58
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/errors.go
@@ -0,0 +1,33 @@
+package gosec
+
+import (
+ "sort"
+)
+
+// Error is used when there are golang errors while parsing the AST
+type Error struct {
+ Line int `json:"line"`
+ Column int `json:"column"`
+ Err string `json:"error"`
+}
+
+// NewError creates Error object
+func NewError(line, column int, err string) *Error {
+ return &Error{
+ Line: line,
+ Column: column,
+ Err: err,
+ }
+}
+
+// sortErros sorts the golang erros by line
+func sortErrors(allErrors map[string][]Error) {
+ for _, errors := range allErrors {
+ sort.Slice(errors, func(i, j int) bool {
+ if errors[i].Line == errors[j].Line {
+ return errors[i].Column <= errors[j].Column
+ }
+ return errors[i].Line < errors[j].Line
+ })
+ }
+}
diff --git a/vendor/github.com/securego/gosec/v2/go.mod b/vendor/github.com/securego/gosec/v2/go.mod
new file mode 100644
index 0000000..edfa343
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/go.mod
@@ -0,0 +1,19 @@
+module github.com/securego/gosec/v2
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/golang/protobuf v1.3.2 // indirect
+ github.com/gookit/color v1.2.4
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee
+ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d
+ github.com/onsi/ginkgo v1.12.0
+ github.com/onsi/gomega v1.9.0
+ github.com/stretchr/testify v1.4.0 // indirect
+ golang.org/x/text v0.3.2 // indirect
+ golang.org/x/tools v0.0.0-20200331202046-9d5940d49312
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+ gopkg.in/yaml.v2 v2.2.8
+)
+
+go 1.14
diff --git a/vendor/github.com/securego/gosec/v2/go.sum b/vendor/github.com/securego/gosec/v2/go.sum
new file mode 100644
index 0000000..fff56a3
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/go.sum
@@ -0,0 +1,82 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/gookit/color v1.2.4 h1:xOYBan3Fwlrqj1M1UN2TlHOCRiek3bGzWf/vPnJ1roE=
+github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee h1:1xJ+Xi9lYWLaaP4yB67ah0+548CD3110mCPWhVVjFkI=
+github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200331202046-9d5940d49312 h1:2PHG+Ia3gK1K2kjxZnSylizb//eyaMG8gDFbOG7wLV8=
+golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go
new file mode 100644
index 0000000..40dc8e9
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/helpers.go
@@ -0,0 +1,435 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gosec
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "os"
+ "os/user"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// MatchCallByPackage ensures that the specified package is imported,
+// adjusts the name for any aliases and ignores cases that are
+// initialization only imports.
+//
+// Usage:
+// node, matched := MatchCallByPackage(n, ctx, "math/rand", "Read")
+//
+func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*ast.CallExpr, bool) {
+
+ importedName, found := GetImportedName(pkg, c)
+ if !found {
+ return nil, false
+ }
+
+ if callExpr, ok := n.(*ast.CallExpr); ok {
+ packageName, callName, err := GetCallInfo(callExpr, c)
+ if err != nil {
+ return nil, false
+ }
+ if packageName == importedName {
+ for _, name := range names {
+ if callName == name {
+ return callExpr, true
+ }
+ }
+ }
+ }
+ return nil, false
+}
+
+// MatchCompLit will match an ast.CompositeLit based on the supplied type
+func MatchCompLit(n ast.Node, ctx *Context, required string) *ast.CompositeLit {
+ if complit, ok := n.(*ast.CompositeLit); ok {
+ typeOf := ctx.Info.TypeOf(complit)
+ if typeOf.String() == required {
+ return complit
+ }
+ }
+ return nil
+}
+
+// GetInt will read and return an integer value from an ast.BasicLit
+func GetInt(n ast.Node) (int64, error) {
+ if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.INT {
+ return strconv.ParseInt(node.Value, 0, 64)
+ }
+ return 0, fmt.Errorf("Unexpected AST node type: %T", n)
+}
+
+// GetFloat will read and return a float value from an ast.BasicLit
+func GetFloat(n ast.Node) (float64, error) {
+ if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.FLOAT {
+ return strconv.ParseFloat(node.Value, 64)
+ }
+ return 0.0, fmt.Errorf("Unexpected AST node type: %T", n)
+}
+
+// GetChar will read and return a char value from an ast.BasicLit
+func GetChar(n ast.Node) (byte, error) {
+ if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.CHAR {
+ return node.Value[0], nil
+ }
+ return 0, fmt.Errorf("Unexpected AST node type: %T", n)
+}
+
+// GetString will read and return a string value from an ast.BasicLit
+func GetString(n ast.Node) (string, error) {
+ if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING {
+ return strconv.Unquote(node.Value)
+ }
+ return "", fmt.Errorf("Unexpected AST node type: %T", n)
+}
+
+// GetCallObject returns the object and call expression and associated
+// object for a given AST node. nil, nil will be returned if the
+// object cannot be resolved.
+func GetCallObject(n ast.Node, ctx *Context) (*ast.CallExpr, types.Object) {
+ switch node := n.(type) {
+ case *ast.CallExpr:
+ switch fn := node.Fun.(type) {
+ case *ast.Ident:
+ return node, ctx.Info.Uses[fn]
+ case *ast.SelectorExpr:
+ return node, ctx.Info.Uses[fn.Sel]
+ }
+ }
+ return nil, nil
+}
+
+// GetCallInfo returns the package or type and name associated with a
+// call expression.
+func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) {
+ switch node := n.(type) {
+ case *ast.CallExpr:
+ switch fn := node.Fun.(type) {
+ case *ast.SelectorExpr:
+ switch expr := fn.X.(type) {
+ case *ast.Ident:
+ if expr.Obj != nil && expr.Obj.Kind == ast.Var {
+ t := ctx.Info.TypeOf(expr)
+ if t != nil {
+ return t.String(), fn.Sel.Name, nil
+ }
+ return "undefined", fn.Sel.Name, fmt.Errorf("missing type info")
+ }
+ return expr.Name, fn.Sel.Name, nil
+ case *ast.SelectorExpr:
+ if expr.Sel != nil {
+ t := ctx.Info.TypeOf(expr.Sel)
+ if t != nil {
+ return t.String(), fn.Sel.Name, nil
+ }
+ return "undefined", fn.Sel.Name, fmt.Errorf("missing type info")
+ }
+ case *ast.CallExpr:
+ switch call := expr.Fun.(type) {
+ case *ast.Ident:
+ if call.Name == "new" {
+ t := ctx.Info.TypeOf(expr.Args[0])
+ if t != nil {
+ return t.String(), fn.Sel.Name, nil
+ }
+ return "undefined", fn.Sel.Name, fmt.Errorf("missing type info")
+ }
+ if call.Obj != nil {
+ switch decl := call.Obj.Decl.(type) {
+ case *ast.FuncDecl:
+ ret := decl.Type.Results
+ if ret != nil && len(ret.List) > 0 {
+ ret1 := ret.List[0]
+ if ret1 != nil {
+ t := ctx.Info.TypeOf(ret1.Type)
+ if t != nil {
+ return t.String(), fn.Sel.Name, nil
+ }
+ return "undefined", fn.Sel.Name, fmt.Errorf("missing type info")
+ }
+ }
+ }
+ }
+
+ }
+ }
+ case *ast.Ident:
+ return ctx.Pkg.Name(), fn.Name, nil
+ }
+ }
+
+ return "", "", fmt.Errorf("unable to determine call info")
+}
+
+// GetCallStringArgsValues returns the values of strings arguments if they can be resolved
+func GetCallStringArgsValues(n ast.Node, ctx *Context) []string {
+ values := []string{}
+ switch node := n.(type) {
+ case *ast.CallExpr:
+ for _, arg := range node.Args {
+ switch param := arg.(type) {
+ case *ast.BasicLit:
+ value, err := GetString(param)
+ if err == nil {
+ values = append(values, value)
+ }
+ case *ast.Ident:
+ values = append(values, GetIdentStringValues(param)...)
+ }
+ }
+ }
+ return values
+}
+
+// GetIdentStringValues return the string values of an Ident if they can be resolved
+func GetIdentStringValues(ident *ast.Ident) []string {
+ values := []string{}
+ obj := ident.Obj
+ if obj != nil {
+ switch decl := obj.Decl.(type) {
+ case *ast.ValueSpec:
+ for _, v := range decl.Values {
+ value, err := GetString(v)
+ if err == nil {
+ values = append(values, value)
+ }
+ }
+ case *ast.AssignStmt:
+ for _, v := range decl.Rhs {
+ value, err := GetString(v)
+ if err == nil {
+ values = append(values, value)
+ }
+ }
+ }
+
+ }
+ return values
+}
+
+// GetImportedName returns the name used for the package within the
+// code. It will resolve aliases and ignores initialization only imports.
+func GetImportedName(path string, ctx *Context) (string, bool) {
+ importName, imported := ctx.Imports.Imported[path]
+ if !imported {
+ return "", false
+ }
+
+ if _, initonly := ctx.Imports.InitOnly[path]; initonly {
+ return "", false
+ }
+
+ if alias, ok := ctx.Imports.Aliased[path]; ok {
+ importName = alias
+ }
+ return importName, true
+}
+
+// GetImportPath resolves the full import path of an identifier based on
+// the imports in the current context.
+func GetImportPath(name string, ctx *Context) (string, bool) {
+ for path := range ctx.Imports.Imported {
+ if imported, ok := GetImportedName(path, ctx); ok && imported == name {
+ return path, true
+ }
+ }
+ return "", false
+}
+
+// GetLocation returns the filename and line number of an ast.Node
+func GetLocation(n ast.Node, ctx *Context) (string, int) {
+ fobj := ctx.FileSet.File(n.Pos())
+ return fobj.Name(), fobj.Line(n.Pos())
+}
+
+// Gopath returns all GOPATHs
+func Gopath() []string {
+ defaultGoPath := runtime.GOROOT()
+ if u, err := user.Current(); err == nil {
+ defaultGoPath = filepath.Join(u.HomeDir, "go")
+ }
+ path := Getenv("GOPATH", defaultGoPath)
+ paths := strings.Split(path, string(os.PathListSeparator))
+ for idx, path := range paths {
+ if abs, err := filepath.Abs(path); err == nil {
+ paths[idx] = abs
+ }
+ }
+ return paths
+}
+
+// Getenv returns the values of the environment variable, otherwise
+//returns the default if variable is not set
+func Getenv(key, userDefault string) string {
+ if val := os.Getenv(key); val != "" {
+ return val
+ }
+ return userDefault
+}
+
+// GetPkgRelativePath returns the Go relative relative path derived
+// form the given path
+func GetPkgRelativePath(path string) (string, error) {
+ abspath, err := filepath.Abs(path)
+ if err != nil {
+ abspath = path
+ }
+ if strings.HasSuffix(abspath, ".go") {
+ abspath = filepath.Dir(abspath)
+ }
+ for _, base := range Gopath() {
+ projectRoot := filepath.FromSlash(fmt.Sprintf("%s/src/", base))
+ if strings.HasPrefix(abspath, projectRoot) {
+ return strings.TrimPrefix(abspath, projectRoot), nil
+ }
+ }
+ return "", errors.New("no project relative path found")
+}
+
+// GetPkgAbsPath returns the Go package absolute path derived from
+// the given path
+func GetPkgAbsPath(pkgPath string) (string, error) {
+ absPath, err := filepath.Abs(pkgPath)
+ if err != nil {
+ return "", err
+ }
+ if _, err := os.Stat(absPath); os.IsNotExist(err) {
+ return "", errors.New("no project absolute path found")
+ }
+ return absPath, nil
+}
+
+// ConcatString recursively concatenates strings from a binary expression
+func ConcatString(n *ast.BinaryExpr) (string, bool) {
+ var s string
+ // sub expressions are found in X object, Y object is always last BasicLit
+ if rightOperand, ok := n.Y.(*ast.BasicLit); ok {
+ if str, err := GetString(rightOperand); err == nil {
+ s = str + s
+ }
+ } else {
+ return "", false
+ }
+ if leftOperand, ok := n.X.(*ast.BinaryExpr); ok {
+ if recursion, ok := ConcatString(leftOperand); ok {
+ s = recursion + s
+ }
+ } else if leftOperand, ok := n.X.(*ast.BasicLit); ok {
+ if str, err := GetString(leftOperand); err == nil {
+ s = str + s
+ }
+ } else {
+ return "", false
+ }
+ return s, true
+}
+
+// FindVarIdentities returns array of all variable identities in a given binary expression
+func FindVarIdentities(n *ast.BinaryExpr, c *Context) ([]*ast.Ident, bool) {
+ identities := []*ast.Ident{}
+ // sub expressions are found in X object, Y object is always the last term
+ if rightOperand, ok := n.Y.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(rightOperand)
+ if _, ok := obj.(*types.Var); ok && !TryResolve(rightOperand, c) {
+ identities = append(identities, rightOperand)
+ }
+ }
+ if leftOperand, ok := n.X.(*ast.BinaryExpr); ok {
+ if leftIdentities, ok := FindVarIdentities(leftOperand, c); ok {
+ identities = append(identities, leftIdentities...)
+ }
+ } else {
+ if leftOperand, ok := n.X.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(leftOperand)
+ if _, ok := obj.(*types.Var); ok && !TryResolve(leftOperand, c) {
+ identities = append(identities, leftOperand)
+ }
+ }
+ }
+
+ if len(identities) > 0 {
+ return identities, true
+ }
+ // if nil or error, return false
+ return nil, false
+}
+
+// PackagePaths returns a slice with all packages path at given root directory
+func PackagePaths(root string, excludes []*regexp.Regexp) ([]string, error) {
+ if strings.HasSuffix(root, "...") {
+ root = root[0 : len(root)-3]
+ } else {
+ return []string{root}, nil
+ }
+ paths := map[string]bool{}
+ err := filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
+ if filepath.Ext(path) == ".go" {
+ path = filepath.Dir(path)
+ if isExcluded(path, excludes) {
+ return nil
+ }
+ paths[path] = true
+ }
+ return nil
+ })
+ if err != nil {
+ return []string{}, err
+ }
+
+ result := []string{}
+ for path := range paths {
+ result = append(result, path)
+ }
+ return result, nil
+}
+
+// isExcluded checks if a string matches any of the exclusion regexps
+func isExcluded(str string, excludes []*regexp.Regexp) bool {
+ if excludes == nil {
+ return false
+ }
+ for _, exclude := range excludes {
+ if exclude != nil && exclude.MatchString(str) {
+ return true
+ }
+ }
+ return false
+}
+
+// ExcludedDirsRegExp builds the regexps for a list of excluded dirs provided as strings
+func ExcludedDirsRegExp(excludedDirs []string) []*regexp.Regexp {
+ var exps []*regexp.Regexp
+ for _, excludedDir := range excludedDirs {
+ str := fmt.Sprintf(`([\\/])?%s([\\/])?`, excludedDir)
+ r := regexp.MustCompile(str)
+ exps = append(exps, r)
+ }
+ return exps
+}
+
+// RootPath returns the absolute root path of a scan
+func RootPath(root string) (string, error) {
+ if strings.HasSuffix(root, "...") {
+ root = root[0 : len(root)-3]
+ }
+ return filepath.Abs(root)
+}
diff --git a/vendor/github.com/securego/gosec/v2/import_tracker.go b/vendor/github.com/securego/gosec/v2/import_tracker.go
new file mode 100644
index 0000000..cbb8c55
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/import_tracker.go
@@ -0,0 +1,75 @@
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gosec
+
+import (
+ "go/ast"
+ "go/types"
+ "strings"
+)
+
+// ImportTracker is used to normalize the packages that have been imported
+// by a source file. It is able to differentiate between plain imports, aliased
+// imports and init only imports.
+type ImportTracker struct {
+ Imported map[string]string
+ Aliased map[string]string
+ InitOnly map[string]bool
+}
+
+// NewImportTracker creates an empty Import tracker instance
+func NewImportTracker() *ImportTracker {
+ return &ImportTracker{
+ make(map[string]string),
+ make(map[string]string),
+ make(map[string]bool),
+ }
+}
+
+// TrackFile track all the imports used by the supplied file
+func (t *ImportTracker) TrackFile(file *ast.File) {
+ for _, imp := range file.Imports {
+ path := strings.Trim(imp.Path.Value, `"`)
+ parts := strings.Split(path, "/")
+ if len(parts) > 0 {
+ name := parts[len(parts)-1]
+ t.Imported[path] = name
+ }
+ }
+}
+
+// TrackPackages tracks all the imports used by the supplied packages
+func (t *ImportTracker) TrackPackages(pkgs ...*types.Package) {
+ for _, pkg := range pkgs {
+ t.Imported[pkg.Path()] = pkg.Name()
+ }
+}
+
+// TrackImport tracks imports and handles the 'unsafe' import
+func (t *ImportTracker) TrackImport(n ast.Node) {
+ if imported, ok := n.(*ast.ImportSpec); ok {
+ path := strings.Trim(imported.Path.Value, `"`)
+ if imported.Name != nil {
+ if imported.Name.Name == "_" {
+ // Initialization only import
+ t.InitOnly[path] = true
+ } else {
+ // Aliased import
+ t.Aliased[path] = imported.Name.Name
+ }
+ }
+ if path == "unsafe" {
+ t.Imported[path] = path
+ }
+ }
+}
diff --git a/vendor/github.com/securego/gosec/v2/install.sh b/vendor/github.com/securego/gosec/v2/install.sh
new file mode 100644
index 0000000..37bed0a
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/install.sh
@@ -0,0 +1,372 @@
+#!/bin/sh
+set -e
+# Code generated by godownloader on 2020-03-02T13:35:13Z. DO NOT EDIT.
+#
+
+usage() {
+ this=$1
+ cat </dev/null
+}
+echoerr() {
+ echo "$@" 1>&2
+}
+log_prefix() {
+ echo "$0"
+}
+_logp=6
+log_set_priority() {
+ _logp="$1"
+}
+log_priority() {
+ if test -z "$1"; then
+ echo "$_logp"
+ return
+ fi
+ [ "$1" -le "$_logp" ]
+}
+log_tag() {
+ case $1 in
+ 0) echo "emerg" ;;
+ 1) echo "alert" ;;
+ 2) echo "crit" ;;
+ 3) echo "err" ;;
+ 4) echo "warning" ;;
+ 5) echo "notice" ;;
+ 6) echo "info" ;;
+ 7) echo "debug" ;;
+ *) echo "$1" ;;
+ esac
+}
+log_debug() {
+ log_priority 7 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
+}
+log_info() {
+ log_priority 6 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
+}
+log_err() {
+ log_priority 3 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
+}
+log_crit() {
+ log_priority 2 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
+}
+uname_os() {
+ os=$(uname -s | tr '[:upper:]' '[:lower:]')
+ case "$os" in
+ cygwin_nt*) os="windows" ;;
+ mingw*) os="windows" ;;
+ msys_nt*) os="windows" ;;
+ esac
+ echo "$os"
+}
+uname_arch() {
+ arch=$(uname -m)
+ case $arch in
+ x86_64) arch="amd64" ;;
+ x86) arch="386" ;;
+ i686) arch="386" ;;
+ i386) arch="386" ;;
+ aarch64) arch="arm64" ;;
+ armv5*) arch="armv5" ;;
+ armv6*) arch="armv6" ;;
+ armv7*) arch="armv7" ;;
+ esac
+ echo ${arch}
+}
+uname_os_check() {
+ os=$(uname_os)
+ case "$os" in
+ darwin) return 0 ;;
+ dragonfly) return 0 ;;
+ freebsd) return 0 ;;
+ linux) return 0 ;;
+ android) return 0 ;;
+ nacl) return 0 ;;
+ netbsd) return 0 ;;
+ openbsd) return 0 ;;
+ plan9) return 0 ;;
+ solaris) return 0 ;;
+ windows) return 0 ;;
+ esac
+ log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
+ return 1
+}
+uname_arch_check() {
+ arch=$(uname_arch)
+ case "$arch" in
+ 386) return 0 ;;
+ amd64) return 0 ;;
+ arm64) return 0 ;;
+ armv5) return 0 ;;
+ armv6) return 0 ;;
+ armv7) return 0 ;;
+ ppc64) return 0 ;;
+ ppc64le) return 0 ;;
+ mips) return 0 ;;
+ mipsle) return 0 ;;
+ mips64) return 0 ;;
+ mips64le) return 0 ;;
+ s390x) return 0 ;;
+ amd64p32) return 0 ;;
+ esac
+ log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
+ return 1
+}
+untar() {
+ tarball=$1
+ case "${tarball}" in
+ *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
+ *.tar) tar --no-same-owner -xf "${tarball}" ;;
+ *.zip) unzip "${tarball}" ;;
+ *)
+ log_err "untar unknown archive format for ${tarball}"
+ return 1
+ ;;
+ esac
+}
+http_download_curl() {
+ local_file=$1
+ source_url=$2
+ header=$3
+ if [ -z "$header" ]; then
+ code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
+ else
+ code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
+ fi
+ if [ "$code" != "200" ]; then
+ log_debug "http_download_curl received HTTP status $code"
+ return 1
+ fi
+ return 0
+}
+http_download_wget() {
+ local_file=$1
+ source_url=$2
+ header=$3
+ if [ -z "$header" ]; then
+ wget -q -O "$local_file" "$source_url"
+ else
+ wget -q --header "$header" -O "$local_file" "$source_url"
+ fi
+}
+http_download() {
+ log_debug "http_download $2"
+ if is_command curl; then
+ http_download_curl "$@"
+ return
+ elif is_command wget; then
+ http_download_wget "$@"
+ return
+ fi
+ log_crit "http_download unable to find wget or curl"
+ return 1
+}
+http_copy() {
+ tmp=$(mktemp)
+ http_download "${tmp}" "$1" "$2" || return 1
+ body=$(cat "$tmp")
+ rm -f "${tmp}"
+ echo "$body"
+}
+github_release() {
+ owner_repo=$1
+ version=$2
+ test -z "$version" && version="latest"
+ giturl="https://github.com/${owner_repo}/releases/${version}"
+ json=$(http_copy "$giturl" "Accept:application/json")
+ test -z "$json" && return 1
+ version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
+ test -z "$version" && return 1
+ echo "$version"
+}
+hash_sha256() {
+ TARGET=${1:-/dev/stdin}
+ if is_command gsha256sum; then
+ hash=$(gsha256sum "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command sha256sum; then
+ hash=$(sha256sum "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command shasum; then
+ hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command openssl; then
+ hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f a
+ else
+ log_crit "hash_sha256 unable to find command to compute sha-256 hash"
+ return 1
+ fi
+}
+hash_sha256_verify() {
+ TARGET=$1
+ checksums=$2
+ if [ -z "$checksums" ]; then
+ log_err "hash_sha256_verify checksum file not specified in arg2"
+ return 1
+ fi
+ BASENAME=${TARGET##*/}
+ want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
+ if [ -z "$want" ]; then
+ log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
+ return 1
+ fi
+ got=$(hash_sha256 "$TARGET")
+ if [ "$want" != "$got" ]; then
+ log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
+ return 1
+ fi
+}
+cat /dev/null < 1 {
+ arg := callExpr.Args[1]
+ if bl, ok := arg.(*ast.BasicLit); ok {
+ if arg, err := gosec.GetString(bl); err == nil {
+ if r.pattern.MatchString(arg) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ } else if ident, ok := arg.(*ast.Ident); ok {
+ values := gosec.GetIdentStringValues(ident)
+ for _, value := range values {
+ if r.pattern.MatchString(value) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ } else if len(callExpr.Args) > 0 {
+ values := gosec.GetCallStringArgsValues(callExpr.Args[0], c)
+ for _, value := range values {
+ if r.pattern.MatchString(value) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewBindsToAllNetworkInterfaces detects socket connections that are setup to
+// listen on all network interfaces.
+func NewBindsToAllNetworkInterfaces(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ calls := gosec.NewCallList()
+ calls.Add("net", "Listen")
+ calls.Add("crypto/tls", "Listen")
+ return &bindsToAllNetworkInterfaces{
+ calls: calls,
+ pattern: regexp.MustCompile(`^(0.0.0.0|:).*$`),
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: "Binds to all network interfaces",
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/blacklist.go b/vendor/github.com/securego/gosec/v2/rules/blacklist.go
new file mode 100644
index 0000000..9bb7338
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/blacklist.go
@@ -0,0 +1,94 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "strings"
+
+ "github.com/securego/gosec/v2"
+)
+
+type blacklistedImport struct {
+ gosec.MetaData
+ Blacklisted map[string]string
+}
+
+func unquote(original string) string {
+ copy := strings.TrimSpace(original)
+ copy = strings.TrimLeft(copy, `"`)
+ return strings.TrimRight(copy, `"`)
+}
+
+func (r *blacklistedImport) ID() string {
+ return r.MetaData.ID
+}
+
+func (r *blacklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node, ok := n.(*ast.ImportSpec); ok {
+ if description, ok := r.Blacklisted[unquote(node.Path.Value)]; ok {
+ return gosec.NewIssue(c, node, r.ID(), description, r.Severity, r.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewBlacklistedImports reports when a blacklisted import is being used.
+// Typically when a deprecated technology is being used.
+func NewBlacklistedImports(id string, conf gosec.Config, blacklist map[string]string) (gosec.Rule, []ast.Node) {
+ return &blacklistedImport{
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ },
+ Blacklisted: blacklist,
+ }, []ast.Node{(*ast.ImportSpec)(nil)}
+}
+
+// NewBlacklistedImportMD5 fails if MD5 is imported
+func NewBlacklistedImportMD5(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlacklistedImports(id, conf, map[string]string{
+ "crypto/md5": "Blacklisted import crypto/md5: weak cryptographic primitive",
+ })
+}
+
+// NewBlacklistedImportDES fails if DES is imported
+func NewBlacklistedImportDES(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlacklistedImports(id, conf, map[string]string{
+ "crypto/des": "Blacklisted import crypto/des: weak cryptographic primitive",
+ })
+}
+
+// NewBlacklistedImportRC4 fails if DES is imported
+func NewBlacklistedImportRC4(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlacklistedImports(id, conf, map[string]string{
+ "crypto/rc4": "Blacklisted import crypto/rc4: weak cryptographic primitive",
+ })
+}
+
+// NewBlacklistedImportCGI fails if CGI is imported
+func NewBlacklistedImportCGI(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlacklistedImports(id, conf, map[string]string{
+ "net/http/cgi": "Blacklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)",
+ })
+}
+
+// NewBlacklistedImportSHA1 fails if SHA1 is imported
+func NewBlacklistedImportSHA1(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlacklistedImports(id, conf, map[string]string{
+ "crypto/sha1": "Blacklisted import crypto/sha1: weak cryptographic primitive",
+ })
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go
new file mode 100644
index 0000000..bfc5897
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go
@@ -0,0 +1,109 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "fmt"
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type decompressionBombCheck struct {
+ gosec.MetaData
+ readerCalls gosec.CallList
+ copyCalls gosec.CallList
+}
+
+func (d *decompressionBombCheck) ID() string {
+ return d.MetaData.ID
+}
+
+func containsReaderCall(node ast.Node, ctx *gosec.Context, list gosec.CallList) bool {
+ if list.ContainsPkgCallExpr(node, ctx, false) != nil {
+ return true
+ }
+ // Resolve type info of ident (for *archive/zip.File.Open)
+ s, idt, _ := gosec.GetCallInfo(node, ctx)
+ return list.Contains(s, idt)
+}
+
+func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ var readerVarObj map[*ast.Object]struct{}
+
+ // To check multiple lines, ctx.PassedValues is used to store temporary data.
+ if _, ok := ctx.PassedValues[d.ID()]; !ok {
+ readerVarObj = make(map[*ast.Object]struct{})
+ ctx.PassedValues[d.ID()] = readerVarObj
+ } else if pv, ok := ctx.PassedValues[d.ID()].(map[*ast.Object]struct{}); ok {
+ readerVarObj = pv
+ } else {
+ return nil, fmt.Errorf("PassedValues[%s] of Context is not map[*ast.Object]struct{}, but %T", d.ID(), ctx.PassedValues[d.ID()])
+ }
+
+ // io.Copy is a common function.
+ // To reduce false positives, This rule detects code which is used for compressed data only.
+ switch n := node.(type) {
+ case *ast.AssignStmt:
+ for _, expr := range n.Rhs {
+ if callExpr, ok := expr.(*ast.CallExpr); ok && containsReaderCall(callExpr, ctx, d.readerCalls) {
+ if idt, ok := n.Lhs[0].(*ast.Ident); ok && idt.Name != "_" {
+ // Example:
+ // r, _ := zlib.NewReader(buf)
+ // Add r's Obj to readerVarObj map
+ readerVarObj[idt.Obj] = struct{}{}
+ }
+ }
+ }
+ case *ast.CallExpr:
+ if d.copyCalls.ContainsPkgCallExpr(n, ctx, false) != nil {
+ if idt, ok := n.Args[1].(*ast.Ident); ok {
+ if _, ok := readerVarObj[idt.Obj]; ok {
+ // Detect io.Copy(x, r)
+ return gosec.NewIssue(ctx, n, d.ID(), d.What, d.Severity, d.Confidence), nil
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// NewDecompressionBombCheck detects if there is potential DoS vulnerability via decompression bomb
+func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ readerCalls := gosec.NewCallList()
+ readerCalls.Add("compress/gzip", "NewReader")
+ readerCalls.AddAll("compress/zlib", "NewReader", "NewReaderDict")
+ readerCalls.Add("compress/bzip2", "NewReader")
+ readerCalls.AddAll("compress/flate", "NewReader", "NewReaderDict")
+ readerCalls.Add("compress/lzw", "NewReader")
+ readerCalls.Add("archive/tar", "NewReader")
+ readerCalls.Add("archive/zip", "NewReader")
+ readerCalls.Add("*archive/zip.File", "Open")
+
+ copyCalls := gosec.NewCallList()
+ copyCalls.Add("io", "Copy")
+
+ return &decompressionBombCheck{
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.Medium,
+ What: "Potential DoS vulnerability via decompression bomb",
+ },
+ readerCalls: readerCalls,
+ copyCalls: copyCalls,
+ }, []ast.Node{(*ast.FuncDecl)(nil), (*ast.AssignStmt)(nil), (*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/errors.go b/vendor/github.com/securego/gosec/v2/rules/errors.go
new file mode 100644
index 0000000..f16f91d
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/errors.go
@@ -0,0 +1,119 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/securego/gosec/v2"
+)
+
+type noErrorCheck struct {
+ gosec.MetaData
+ whitelist gosec.CallList
+}
+
+func (r *noErrorCheck) ID() string {
+ return r.MetaData.ID
+}
+
+func returnsError(callExpr *ast.CallExpr, ctx *gosec.Context) int {
+ if tv := ctx.Info.TypeOf(callExpr); tv != nil {
+ switch t := tv.(type) {
+ case *types.Tuple:
+ for pos := 0; pos < t.Len(); pos++ {
+ variable := t.At(pos)
+ if variable != nil && variable.Type().String() == "error" {
+ return pos
+ }
+ }
+ case *types.Named:
+ if t.String() == "error" {
+ return 0
+ }
+ }
+ }
+ return -1
+}
+
+func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ switch stmt := n.(type) {
+ case *ast.AssignStmt:
+ cfg := ctx.Config
+ if enabled, err := cfg.IsGlobalEnabled(gosec.Audit); err == nil && enabled {
+ for _, expr := range stmt.Rhs {
+ if callExpr, ok := expr.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(expr, ctx) == nil {
+ pos := returnsError(callExpr, ctx)
+ if pos < 0 || pos >= len(stmt.Lhs) {
+ return nil, nil
+ }
+ if id, ok := stmt.Lhs[pos].(*ast.Ident); ok && id.Name == "_" {
+ return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ }
+ case *ast.ExprStmt:
+ if callExpr, ok := stmt.X.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(stmt.X, ctx) == nil {
+ pos := returnsError(callExpr, ctx)
+ if pos >= 0 {
+ return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewNoErrorCheck detects if the returned error is unchecked
+func NewNoErrorCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ // TODO(gm) Come up with sensible defaults here. Or flip it to use a
+ // black list instead.
+ whitelist := gosec.NewCallList()
+ whitelist.AddAll("bytes.Buffer", "Write", "WriteByte", "WriteRune", "WriteString")
+ whitelist.AddAll("fmt", "Print", "Printf", "Println", "Fprint", "Fprintf", "Fprintln")
+ whitelist.AddAll("strings.Builder", "Write", "WriteByte", "WriteRune", "WriteString")
+ whitelist.Add("io.PipeWriter", "CloseWithError")
+
+ if configured, ok := conf["G104"]; ok {
+ if whitelisted, ok := configured.(map[string]interface{}); ok {
+ for pkg, funcs := range whitelisted {
+ if funcs, ok := funcs.([]interface{}); ok {
+ whitelist.AddAll(pkg, toStringSlice(funcs)...)
+ }
+ }
+ }
+ }
+
+ return &noErrorCheck{
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Low,
+ Confidence: gosec.High,
+ What: "Errors unhandled.",
+ },
+ whitelist: whitelist,
+ }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)}
+}
+
+func toStringSlice(values []interface{}) []string {
+ result := []string{}
+ for _, value := range values {
+ if value, ok := value.(string); ok {
+ result = append(result, value)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/fileperms.go b/vendor/github.com/securego/gosec/v2/rules/fileperms.go
new file mode 100644
index 0000000..ffe7b97
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/fileperms.go
@@ -0,0 +1,111 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "fmt"
+ "go/ast"
+ "strconv"
+
+ "github.com/securego/gosec/v2"
+)
+
+type filePermissions struct {
+ gosec.MetaData
+ mode int64
+ pkg string
+ calls []string
+}
+
+func (r *filePermissions) ID() string {
+ return r.MetaData.ID
+}
+
+func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMode int64) int64 {
+ var mode = defaultMode
+ if value, ok := conf[configKey]; ok {
+ switch value := value.(type) {
+ case int64:
+ mode = value
+ case string:
+ if m, e := strconv.ParseInt(value, 0, 64); e != nil {
+ mode = defaultMode
+ } else {
+ mode = m
+ }
+ }
+ }
+ return mode
+}
+
+func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if callexpr, matched := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matched {
+ modeArg := callexpr.Args[len(callexpr.Args)-1]
+ if mode, err := gosec.GetInt(modeArg); err == nil && mode > r.mode {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewWritePerms creates a rule to detect file Writes with bad permissions.
+func NewWritePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ mode := getConfiguredMode(conf, "G306", 0600)
+ return &filePermissions{
+ mode: mode,
+ pkg: "io/ioutil",
+ calls: []string{"WriteFile"},
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: fmt.Sprintf("Expect WriteFile permissions to be %#o or less", mode),
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
+
+// NewFilePerms creates a rule to detect file creation with a more permissive than configured
+// permission mask.
+func NewFilePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ mode := getConfiguredMode(conf, "G302", 0600)
+ return &filePermissions{
+ mode: mode,
+ pkg: "os",
+ calls: []string{"OpenFile", "Chmod"},
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: fmt.Sprintf("Expect file permissions to be %#o or less", mode),
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
+
+// NewMkdirPerms creates a rule to detect directory creation with more permissive than
+// configured permission mask.
+func NewMkdirPerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ mode := getConfiguredMode(conf, "G301", 0750)
+ return &filePermissions{
+ mode: mode,
+ pkg: "os",
+ calls: []string{"Mkdir", "MkdirAll"},
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: fmt.Sprintf("Expect directory permissions to be %#o or less", mode),
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go
new file mode 100644
index 0000000..6b360c5
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go
@@ -0,0 +1,173 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "go/token"
+ "regexp"
+ "strconv"
+
+ zxcvbn "github.com/nbutton23/zxcvbn-go"
+ "github.com/securego/gosec/v2"
+)
+
+type credentials struct {
+ gosec.MetaData
+ pattern *regexp.Regexp
+ entropyThreshold float64
+ perCharThreshold float64
+ truncate int
+ ignoreEntropy bool
+}
+
+func (r *credentials) ID() string {
+ return r.MetaData.ID
+}
+
+func truncate(s string, n int) string {
+ if n > len(s) {
+ return s
+ }
+ return s[:n]
+}
+
+func (r *credentials) isHighEntropyString(str string) bool {
+ s := truncate(str, r.truncate)
+ info := zxcvbn.PasswordStrength(s, []string{})
+ entropyPerChar := info.Entropy / float64(len(s))
+ return (info.Entropy >= r.entropyThreshold ||
+ (info.Entropy >= (r.entropyThreshold/2) &&
+ entropyPerChar >= r.perCharThreshold))
+}
+
+func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ switch node := n.(type) {
+ case *ast.AssignStmt:
+ return r.matchAssign(node, ctx)
+ case *ast.ValueSpec:
+ return r.matchValueSpec(node, ctx)
+ case *ast.BinaryExpr:
+ return r.matchEqualityCheck(node, ctx)
+ }
+ return nil, nil
+}
+
+func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*gosec.Issue, error) {
+ for _, i := range assign.Lhs {
+ if ident, ok := i.(*ast.Ident); ok {
+ if r.pattern.MatchString(ident.Name) {
+ for _, e := range assign.Rhs {
+ if val, err := gosec.GetString(e); err == nil {
+ if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) {
+ return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ }
+ }
+ }
+ return nil, nil
+}
+
+func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*gosec.Issue, error) {
+ for index, ident := range valueSpec.Names {
+ if r.pattern.MatchString(ident.Name) && valueSpec.Values != nil {
+ // const foo, bar = "same value"
+ if len(valueSpec.Values) <= index {
+ index = len(valueSpec.Values) - 1
+ }
+ if val, err := gosec.GetString(valueSpec.Values[index]); err == nil {
+ if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) {
+ return gosec.NewIssue(ctx, valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ }
+ return nil, nil
+}
+
+func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*gosec.Issue, error) {
+ if binaryExpr.Op == token.EQL || binaryExpr.Op == token.NEQ {
+ if ident, ok := binaryExpr.X.(*ast.Ident); ok {
+ if r.pattern.MatchString(ident.Name) {
+ if val, err := gosec.GetString(binaryExpr.Y); err == nil {
+ if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) {
+ return gosec.NewIssue(ctx, binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewHardcodedCredentials attempts to find high entropy string constants being
+// assigned to variables that appear to be related to credentials.
+func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ pattern := `(?i)passwd|pass|password|pwd|secret|token`
+ entropyThreshold := 80.0
+ perCharThreshold := 3.0
+ ignoreEntropy := false
+ var truncateString = 16
+ if val, ok := conf["G101"]; ok {
+ conf := val.(map[string]interface{})
+ if configPattern, ok := conf["pattern"]; ok {
+ if cfgPattern, ok := configPattern.(string); ok {
+ pattern = cfgPattern
+ }
+ }
+ if configIgnoreEntropy, ok := conf["ignore_entropy"]; ok {
+ if cfgIgnoreEntropy, ok := configIgnoreEntropy.(bool); ok {
+ ignoreEntropy = cfgIgnoreEntropy
+ }
+ }
+ if configEntropyThreshold, ok := conf["entropy_threshold"]; ok {
+ if cfgEntropyThreshold, ok := configEntropyThreshold.(string); ok {
+ if parsedNum, err := strconv.ParseFloat(cfgEntropyThreshold, 64); err == nil {
+ entropyThreshold = parsedNum
+ }
+ }
+ }
+ if configCharThreshold, ok := conf["per_char_threshold"]; ok {
+ if cfgCharThreshold, ok := configCharThreshold.(string); ok {
+ if parsedNum, err := strconv.ParseFloat(cfgCharThreshold, 64); err == nil {
+ perCharThreshold = parsedNum
+ }
+ }
+ }
+ if configTruncate, ok := conf["truncate"]; ok {
+ if cfgTruncate, ok := configTruncate.(string); ok {
+ if parsedInt, err := strconv.Atoi(cfgTruncate); err == nil {
+ truncateString = parsedInt
+ }
+ }
+ }
+ }
+
+ return &credentials{
+ pattern: regexp.MustCompile(pattern),
+ entropyThreshold: entropyThreshold,
+ perCharThreshold: perCharThreshold,
+ ignoreEntropy: ignoreEntropy,
+ truncate: truncateString,
+ MetaData: gosec.MetaData{
+ ID: id,
+ What: "Potential hardcoded credentials",
+ Confidence: gosec.Low,
+ Severity: gosec.High,
+ },
+ }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ValueSpec)(nil), (*ast.BinaryExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go
new file mode 100644
index 0000000..65c7ae3
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go
@@ -0,0 +1,116 @@
+package rules
+
+import (
+ "fmt"
+ "github.com/securego/gosec/v2"
+ "go/ast"
+ "go/token"
+)
+
+type implicitAliasing struct {
+ gosec.MetaData
+ aliases map[*ast.Object]struct{}
+ rightBrace token.Pos
+ acceptableAlias []*ast.UnaryExpr
+}
+
+func (r *implicitAliasing) ID() string {
+ return r.MetaData.ID
+}
+
+func containsUnary(exprs []*ast.UnaryExpr, expr *ast.UnaryExpr) bool {
+ for _, e := range exprs {
+ if e == expr {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ switch node := n.(type) {
+ case *ast.RangeStmt:
+ // When presented with a range statement, get the underlying Object bound to
+ // by assignment and add it to our set (r.aliases) of objects to check for.
+ if key, ok := node.Value.(*ast.Ident); ok {
+ if assignment, ok := key.Obj.Decl.(*ast.AssignStmt); ok {
+ if len(assignment.Lhs) < 2 {
+ return nil, nil
+ }
+
+ if object, ok := assignment.Lhs[1].(*ast.Ident); ok {
+ r.aliases[object.Obj] = struct{}{}
+
+ if r.rightBrace < node.Body.Rbrace {
+ r.rightBrace = node.Body.Rbrace
+ }
+ }
+ }
+ }
+ case *ast.UnaryExpr:
+ // If this unary expression is outside of the last range statement we were looking at
+ // then clear the list of objects we're concerned about because they're no longer in
+ // scope
+ if node.Pos() > r.rightBrace {
+ r.aliases = make(map[*ast.Object]struct{})
+ r.acceptableAlias = make([]*ast.UnaryExpr, 0)
+ }
+
+ // Short circuit logic to skip checking aliases if we have nothing to check against.
+ if len(r.aliases) == 0 {
+ return nil, nil
+ }
+
+ // If this unary is at the top level of a return statement then it is okay--
+ // see *ast.ReturnStmt comment below.
+ if containsUnary(r.acceptableAlias, node) {
+ return nil, nil
+ }
+
+ // If we find a unary op of & (reference) of an object within r.aliases, complain.
+ if ident, ok := node.X.(*ast.Ident); ok && node.Op.String() == "&" {
+ if _, contains := r.aliases[ident.Obj]; contains {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ case *ast.ReturnStmt:
+ // Returning a rangeStmt yielded value is acceptable since only one value will be returned
+ for _, item := range node.Results {
+ if unary, ok := item.(*ast.UnaryExpr); ok && unary.Op.String() == "&" {
+ r.acceptableAlias = append(r.acceptableAlias, unary)
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// NewImplicitAliasing detects implicit memory aliasing of type: for blah := SomeCall() {... SomeOtherCall(&blah) ...}
+func NewImplicitAliasing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &implicitAliasing{
+ aliases: make(map[*ast.Object]struct{}),
+ rightBrace: token.NoPos,
+ acceptableAlias: make([]*ast.UnaryExpr, 0),
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.Medium,
+ What: fmt.Sprintf("Implicit memory aliasing in for loop."),
+ },
+ }, []ast.Node{(*ast.RangeStmt)(nil), (*ast.UnaryExpr)(nil), (*ast.ReturnStmt)(nil)}
+}
+
+/*
+This rule is prone to flag false positives.
+
+Within GoSec, the rule is just an AST match-- there are a handful of other
+implementation strategies which might lend more nuance to the rule at the
+cost of allowing false negatives.
+
+From a tooling side, I'd rather have this rule flag false positives than
+potentially have some false negatives-- especially if the sentiment of this
+rule (as I understand it, and Go) is that referencing a rangeStmt-yielded
+value is kinda strange and does not have a strongly justified use case.
+
+Which is to say-- a false positive _should_ just be changed.
+*/
diff --git a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go
new file mode 100644
index 0000000..dfcda94
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go
@@ -0,0 +1,89 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "fmt"
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type integerOverflowCheck struct {
+ gosec.MetaData
+ calls gosec.CallList
+}
+
+func (i *integerOverflowCheck) ID() string {
+ return i.MetaData.ID
+}
+
+func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ var atoiVarObj map[*ast.Object]ast.Node
+
+ // To check multiple lines, ctx.PassedValues is used to store temporary data.
+ if _, ok := ctx.PassedValues[i.ID()]; !ok {
+ atoiVarObj = make(map[*ast.Object]ast.Node)
+ ctx.PassedValues[i.ID()] = atoiVarObj
+ } else if pv, ok := ctx.PassedValues[i.ID()].(map[*ast.Object]ast.Node); ok {
+ atoiVarObj = pv
+ } else {
+ return nil, fmt.Errorf("PassedValues[%s] of Context is not map[*ast.Object]ast.Node, but %T", i.ID(), ctx.PassedValues[i.ID()])
+ }
+
+ // strconv.Atoi is a common function.
+ // To reduce false positives, This rule detects code which is converted to int32/int16 only.
+ switch n := node.(type) {
+ case *ast.AssignStmt:
+ for _, expr := range n.Rhs {
+ if callExpr, ok := expr.(*ast.CallExpr); ok && i.calls.ContainsPkgCallExpr(callExpr, ctx, false) != nil {
+ if idt, ok := n.Lhs[0].(*ast.Ident); ok && idt.Name != "_" {
+ // Example:
+ // v, _ := strconv.Atoi("1111")
+ // Add v's Obj to atoiVarObj map
+ atoiVarObj[idt.Obj] = n
+ }
+ }
+ }
+ case *ast.CallExpr:
+ if fun, ok := n.Fun.(*ast.Ident); ok {
+ if fun.Name == "int32" || fun.Name == "int16" {
+ if idt, ok := n.Args[0].(*ast.Ident); ok {
+ if n, ok := atoiVarObj[idt.Obj]; ok {
+ // Detect int32(v) and int16(v)
+ return gosec.NewIssue(ctx, n, i.ID(), i.What, i.Severity, i.Confidence), nil
+ }
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// NewIntegerOverflowCheck detects if there is potential Integer OverFlow
+func NewIntegerOverflowCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ calls := gosec.NewCallList()
+ calls.Add("strconv", "Atoi")
+ return &integerOverflowCheck{
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.High,
+ Confidence: gosec.Medium,
+ What: "Potential Integer overflow made by strconv.Atoi result conversion to int16/32",
+ },
+ calls: calls,
+ }, []ast.Node{(*ast.FuncDecl)(nil), (*ast.AssignStmt)(nil), (*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/pprof.go b/vendor/github.com/securego/gosec/v2/rules/pprof.go
new file mode 100644
index 0000000..4c99af7
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/pprof.go
@@ -0,0 +1,42 @@
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type pprofCheck struct {
+ gosec.MetaData
+ importPath string
+ importName string
+}
+
+// ID returns the ID of the check
+func (p *pprofCheck) ID() string {
+ return p.MetaData.ID
+}
+
+// Match checks for pprof imports
+func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node, ok := n.(*ast.ImportSpec); ok {
+ if p.importPath == unquote(node.Path.Value) && node.Name != nil && p.importName == node.Name.Name {
+ return gosec.NewIssue(c, node, p.ID(), p.What, p.Severity, p.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewPprofCheck detects when the profiling endpoint is automatically exposed
+func NewPprofCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &pprofCheck{
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.High,
+ Confidence: gosec.High,
+ What: "Profiling endpoint is automatically exposed on /debug/pprof",
+ },
+ importPath: "net/http/pprof",
+ importName: "_",
+ }, []ast.Node{(*ast.ImportSpec)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/rand.go b/vendor/github.com/securego/gosec/v2/rules/rand.go
new file mode 100644
index 0000000..08c28fc
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/rand.go
@@ -0,0 +1,55 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type weakRand struct {
+ gosec.MetaData
+ funcNames []string
+ packagePath string
+}
+
+func (w *weakRand) ID() string {
+ return w.MetaData.ID
+}
+
+func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ for _, funcName := range w.funcNames {
+ if _, matched := gosec.MatchCallByPackage(n, c, w.packagePath, funcName); matched {
+ return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil
+ }
+ }
+
+ return nil, nil
+}
+
+// NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure
+func NewWeakRandCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &weakRand{
+ funcNames: []string{"Read", "Int"},
+ packagePath: "math/rand",
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.High,
+ Confidence: gosec.Medium,
+ What: "Use of weak random number generator (math/rand instead of crypto/rand)",
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/readfile.go b/vendor/github.com/securego/gosec/v2/rules/readfile.go
new file mode 100644
index 0000000..a52f742
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/readfile.go
@@ -0,0 +1,106 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/securego/gosec/v2"
+)
+
+type readfile struct {
+ gosec.MetaData
+ gosec.CallList
+ pathJoin gosec.CallList
+}
+
+// ID returns the identifier for this rule
+func (r *readfile) ID() string {
+ return r.MetaData.ID
+}
+
+// isJoinFunc checks if there is a filepath.Join or other join function
+func (r *readfile) isJoinFunc(n ast.Node, c *gosec.Context) bool {
+ if call := r.pathJoin.ContainsPkgCallExpr(n, c, false); call != nil {
+ for _, arg := range call.Args {
+ // edge case: check if one of the args is a BinaryExpr
+ if binExp, ok := arg.(*ast.BinaryExpr); ok {
+ // iterate and resolve all found identities from the BinaryExpr
+ if _, ok := gosec.FindVarIdentities(binExp, c); ok {
+ return true
+ }
+ }
+
+ // try and resolve identity
+ if ident, ok := arg.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(ident)
+ if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// Match inspects AST nodes to determine if the match the methods `os.Open` or `ioutil.ReadFile`
+func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node := r.ContainsPkgCallExpr(n, c, false); node != nil {
+ for _, arg := range node.Args {
+ // handles path joining functions in Arg
+ // eg. os.Open(filepath.Join("/tmp/", file))
+ if callExpr, ok := arg.(*ast.CallExpr); ok {
+ if r.isJoinFunc(callExpr, c) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ // handles binary string concatenation eg. ioutil.Readfile("/tmp/" + file + "/blob")
+ if binExp, ok := arg.(*ast.BinaryExpr); ok {
+ // resolve all found identities from the BinaryExpr
+ if _, ok := gosec.FindVarIdentities(binExp, c); ok {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+
+ if ident, ok := arg.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(ident)
+ if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewReadFile detects cases where we read files
+func NewReadFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ rule := &readfile{
+ pathJoin: gosec.NewCallList(),
+ CallList: gosec.NewCallList(),
+ MetaData: gosec.MetaData{
+ ID: id,
+ What: "Potential file inclusion via variable",
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ },
+ }
+ rule.pathJoin.Add("path/filepath", "Join")
+ rule.pathJoin.Add("path", "Join")
+ rule.Add("io/ioutil", "ReadFile")
+ rule.Add("os", "Open")
+ return rule, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/rsa.go b/vendor/github.com/securego/gosec/v2/rules/rsa.go
new file mode 100644
index 0000000..f2ed5db
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/rsa.go
@@ -0,0 +1,58 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "fmt"
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type weakKeyStrength struct {
+ gosec.MetaData
+ calls gosec.CallList
+ bits int
+}
+
+func (w *weakKeyStrength) ID() string {
+ return w.MetaData.ID
+}
+
+func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if callExpr := w.calls.ContainsPkgCallExpr(n, c, false); callExpr != nil {
+ if bits, err := gosec.GetInt(callExpr.Args[1]); err == nil && bits < (int64)(w.bits) {
+ return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewWeakKeyStrength builds a rule that detects RSA keys < 2048 bits
+func NewWeakKeyStrength(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ calls := gosec.NewCallList()
+ calls.Add("crypto/rsa", "GenerateKey")
+ bits := 2048
+ return &weakKeyStrength{
+ calls: calls,
+ bits: bits,
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: fmt.Sprintf("RSA keys should be at least %d bits", bits),
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/rulelist.go b/vendor/github.com/securego/gosec/v2/rules/rulelist.go
new file mode 100644
index 0000000..06e1dfb
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/rulelist.go
@@ -0,0 +1,116 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import "github.com/securego/gosec/v2"
+
+// RuleDefinition contains the description of a rule and a mechanism to
+// create it.
+type RuleDefinition struct {
+ ID string
+ Description string
+ Create gosec.RuleBuilder
+}
+
+// RuleList is a mapping of rule ID's to rule definitions
+type RuleList map[string]RuleDefinition
+
+// Builders returns all the create methods for a given rule list
+func (rl RuleList) Builders() map[string]gosec.RuleBuilder {
+ builders := make(map[string]gosec.RuleBuilder)
+ for _, def := range rl {
+ builders[def.ID] = def.Create
+ }
+ return builders
+}
+
+// RuleFilter can be used to include or exclude a rule depending on the return
+// value of the function
+type RuleFilter func(string) bool
+
+// NewRuleFilter is a closure that will include/exclude the rule ID's based on
+// the supplied boolean value.
+func NewRuleFilter(action bool, ruleIDs ...string) RuleFilter {
+ rulelist := make(map[string]bool)
+ for _, rule := range ruleIDs {
+ rulelist[rule] = true
+ }
+ return func(rule string) bool {
+ if _, found := rulelist[rule]; found {
+ return action
+ }
+ return !action
+ }
+}
+
+// Generate the list of rules to use
+func Generate(filters ...RuleFilter) RuleList {
+ rules := []RuleDefinition{
+ // misc
+ {"G101", "Look for hardcoded credentials", NewHardcodedCredentials},
+ {"G102", "Bind to all interfaces", NewBindsToAllNetworkInterfaces},
+ {"G103", "Audit the use of unsafe block", NewUsingUnsafe},
+ {"G104", "Audit errors not checked", NewNoErrorCheck},
+ {"G106", "Audit the use of ssh.InsecureIgnoreHostKey function", NewSSHHostKey},
+ {"G107", "Url provided to HTTP request as taint input", NewSSRFCheck},
+ {"G108", "Profiling endpoint is automatically exposed", NewPprofCheck},
+ {"G109", "Converting strconv.Atoi result to int32/int16", NewIntegerOverflowCheck},
+ {"G110", "Detect io.Copy instead of io.CopyN when decompression", NewDecompressionBombCheck},
+
+ // injection
+ {"G201", "SQL query construction using format string", NewSQLStrFormat},
+ {"G202", "SQL query construction using string concatenation", NewSQLStrConcat},
+ {"G203", "Use of unescaped data in HTML templates", NewTemplateCheck},
+ {"G204", "Audit use of command execution", NewSubproc},
+
+ // filesystem
+ {"G301", "Poor file permissions used when creating a directory", NewMkdirPerms},
+ {"G302", "Poor file permissions used when creation file or using chmod", NewFilePerms},
+ {"G303", "Creating tempfile using a predictable path", NewBadTempFile},
+ {"G304", "File path provided as taint input", NewReadFile},
+ {"G305", "File path traversal when extracting zip archive", NewArchive},
+ {"G306", "Poor file permissions used when writing to a file", NewWritePerms},
+ {"G307", "Unsafe defer call of a method returning an error", NewDeferredClosing},
+
+ // crypto
+ {"G401", "Detect the usage of DES, RC4, MD5 or SHA1", NewUsesWeakCryptography},
+ {"G402", "Look for bad TLS connection settings", NewIntermediateTLSCheck},
+ {"G403", "Ensure minimum RSA key length of 2048 bits", NewWeakKeyStrength},
+ {"G404", "Insecure random number source (rand)", NewWeakRandCheck},
+
+ // blacklist
+ {"G501", "Import blacklist: crypto/md5", NewBlacklistedImportMD5},
+ {"G502", "Import blacklist: crypto/des", NewBlacklistedImportDES},
+ {"G503", "Import blacklist: crypto/rc4", NewBlacklistedImportRC4},
+ {"G504", "Import blacklist: net/http/cgi", NewBlacklistedImportCGI},
+ {"G505", "Import blacklist: crypto/sha1", NewBlacklistedImportSHA1},
+
+ // memory safety
+ {"G601", "Implicit memory aliasing in RangeStmt", NewImplicitAliasing},
+ }
+
+ ruleMap := make(map[string]RuleDefinition)
+
+RULES:
+ for _, rule := range rules {
+ for _, filter := range filters {
+ if filter(rule.ID) {
+ continue RULES
+ }
+ }
+ ruleMap[rule.ID] = rule
+ }
+ return ruleMap
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/sql.go b/vendor/github.com/securego/gosec/v2/rules/sql.go
new file mode 100644
index 0000000..3279a34
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/sql.go
@@ -0,0 +1,219 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "regexp"
+
+ "github.com/securego/gosec/v2"
+)
+
+type sqlStatement struct {
+ gosec.MetaData
+
+ // Contains a list of patterns which must all match for the rule to match.
+ patterns []*regexp.Regexp
+}
+
+func (s *sqlStatement) ID() string {
+ return s.MetaData.ID
+}
+
+// See if the string matches the patterns for the statement.
+func (s *sqlStatement) MatchPatterns(str string) bool {
+ for _, pattern := range s.patterns {
+ if !pattern.MatchString(str) {
+ return false
+ }
+ }
+ return true
+}
+
+type sqlStrConcat struct {
+ sqlStatement
+}
+
+func (s *sqlStrConcat) ID() string {
+ return s.MetaData.ID
+}
+
+// see if we can figure out what it is
+func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool {
+ if n.Obj != nil {
+ return n.Obj.Kind != ast.Var && n.Obj.Kind != ast.Fun
+ }
+
+ // Try to resolve unresolved identifiers using other files in same package
+ for _, file := range c.PkgFiles {
+ if node, ok := file.Scope.Objects[n.String()]; ok {
+ return node.Kind != ast.Var && node.Kind != ast.Fun
+ }
+ }
+ return false
+}
+
+// Look for "SELECT * FROM table WHERE " + " ' OR 1=1"
+func (s *sqlStrConcat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node, ok := n.(*ast.BinaryExpr); ok {
+ if start, ok := node.X.(*ast.BasicLit); ok {
+ if str, e := gosec.GetString(start); e == nil {
+ if !s.MatchPatterns(str) {
+ return nil, nil
+ }
+ if _, ok := node.Y.(*ast.BasicLit); ok {
+ return nil, nil // string cat OK
+ }
+ if second, ok := node.Y.(*ast.Ident); ok && s.checkObject(second, c) {
+ return nil, nil
+ }
+ return gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewSQLStrConcat looks for cases where we are building SQL strings via concatenation
+func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &sqlStrConcat{
+ sqlStatement: sqlStatement{
+ patterns: []*regexp.Regexp{
+ regexp.MustCompile(`(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `),
+ },
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: "SQL string concatenation",
+ },
+ },
+ }, []ast.Node{(*ast.BinaryExpr)(nil)}
+}
+
+type sqlStrFormat struct {
+ sqlStatement
+ calls gosec.CallList
+ noIssue gosec.CallList
+ noIssueQuoted gosec.CallList
+}
+
+// see if we can figure out what it is
+func (s *sqlStrFormat) constObject(e ast.Expr, c *gosec.Context) bool {
+ n, ok := e.(*ast.Ident)
+ if !ok {
+ return false
+ }
+
+ if n.Obj != nil {
+ return n.Obj.Kind == ast.Con
+ }
+
+ // Try to resolve unresolved identifiers using other files in same package
+ for _, file := range c.PkgFiles {
+ if node, ok := file.Scope.Objects[n.String()]; ok {
+ return node.Kind == ast.Con
+ }
+ }
+ return false
+}
+
+// Looks for "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)"
+func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+
+ // argIndex changes the function argument which gets matched to the regex
+ argIndex := 0
+
+ // TODO(gm) improve confidence if database/sql is being used
+ if node := s.calls.ContainsPkgCallExpr(n, c, false); node != nil {
+ // if the function is fmt.Fprintf, search for SQL statement in Args[1] instead
+ if sel, ok := node.Fun.(*ast.SelectorExpr); ok {
+ if sel.Sel.Name == "Fprintf" {
+ // if os.Stderr or os.Stdout is in Arg[0], mark as no issue
+ if arg, ok := node.Args[0].(*ast.SelectorExpr); ok {
+ if ident, ok := arg.X.(*ast.Ident); ok {
+ if s.noIssue.Contains(ident.Name, arg.Sel.Name) {
+ return nil, nil
+ }
+ }
+ }
+ // the function is Fprintf so set argIndex = 1
+ argIndex = 1
+ }
+ }
+
+ // no formatter
+ if len(node.Args) == 0 {
+ return nil, nil
+ }
+
+ var formatter string
+
+ // concats callexpr arg strings together if needed before regex evaluation
+ if argExpr, ok := node.Args[argIndex].(*ast.BinaryExpr); ok {
+ if fullStr, ok := gosec.ConcatString(argExpr); ok {
+ formatter = fullStr
+ }
+ } else if arg, e := gosec.GetString(node.Args[argIndex]); e == nil {
+ formatter = arg
+ }
+ if len(formatter) <= 0 {
+ return nil, nil
+ }
+
+ // If all formatter args are quoted or constant, then the SQL construction is safe
+ if argIndex+1 < len(node.Args) {
+ allSafe := true
+ for _, arg := range node.Args[argIndex+1:] {
+ if n := s.noIssueQuoted.ContainsPkgCallExpr(arg, c, true); n == nil && !s.constObject(arg, c) {
+ allSafe = false
+ break
+ }
+ }
+ if allSafe {
+ return nil, nil
+ }
+ }
+ if s.MatchPatterns(formatter) {
+ return gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewSQLStrFormat looks for cases where we're building SQL query strings using format strings
+func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ rule := &sqlStrFormat{
+ calls: gosec.NewCallList(),
+ noIssue: gosec.NewCallList(),
+ noIssueQuoted: gosec.NewCallList(),
+ sqlStatement: sqlStatement{
+ patterns: []*regexp.Regexp{
+ regexp.MustCompile("(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "),
+ regexp.MustCompile("%[^bdoxXfFp]"),
+ },
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: "SQL string formatting",
+ },
+ },
+ }
+ rule.calls.AddAll("fmt", "Sprint", "Sprintf", "Sprintln", "Fprintf")
+ rule.noIssue.AddAll("os", "Stdout", "Stderr")
+ rule.noIssueQuoted.Add("github.com/lib/pq", "QuoteIdentifier")
+ return rule, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/ssh.go b/vendor/github.com/securego/gosec/v2/rules/ssh.go
new file mode 100644
index 0000000..01f37da
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/ssh.go
@@ -0,0 +1,38 @@
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type sshHostKey struct {
+ gosec.MetaData
+ pkg string
+ calls []string
+}
+
+func (r *sshHostKey) ID() string {
+ return r.MetaData.ID
+}
+
+func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) {
+ if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ return nil, nil
+}
+
+// NewSSHHostKey rule detects the use of insecure ssh HostKeyCallback.
+func NewSSHHostKey(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &sshHostKey{
+ pkg: "golang.org/x/crypto/ssh",
+ calls: []string{"InsecureIgnoreHostKey"},
+ MetaData: gosec.MetaData{
+ ID: id,
+ What: "Use of ssh InsecureIgnoreHostKey should be audited",
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/ssrf.go b/vendor/github.com/securego/gosec/v2/rules/ssrf.go
new file mode 100644
index 0000000..86bb827
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/ssrf.go
@@ -0,0 +1,66 @@
+package rules
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/securego/gosec/v2"
+)
+
+type ssrf struct {
+ gosec.MetaData
+ gosec.CallList
+}
+
+// ID returns the identifier for this rule
+func (r *ssrf) ID() string {
+ return r.MetaData.ID
+}
+
+// ResolveVar tries to resolve the first argument of a call expression
+// The first argument is the url
+func (r *ssrf) ResolveVar(n *ast.CallExpr, c *gosec.Context) bool {
+ if len(n.Args) > 0 {
+ arg := n.Args[0]
+ if ident, ok := arg.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(ident)
+ if _, ok := obj.(*types.Var); ok {
+ scope := c.Pkg.Scope()
+ if scope != nil && scope.Lookup(ident.Name) != nil {
+ // a URL defined in a variable at package scope can be changed at any time
+ return true
+ }
+ if !gosec.TryResolve(ident, c) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// Match inspects AST nodes to determine if certain net/http methods are called with variable input
+func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ // Call expression is using http package directly
+ if node := r.ContainsPkgCallExpr(n, c, false); node != nil {
+ if r.ResolveVar(node, c) {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewSSRFCheck detects cases where HTTP requests are sent
+func NewSSRFCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ rule := &ssrf{
+ CallList: gosec.NewCallList(),
+ MetaData: gosec.MetaData{
+ ID: id,
+ What: "Potential HTTP request made with variable url",
+ Severity: gosec.Medium,
+ Confidence: gosec.Medium,
+ },
+ }
+ rule.AddAll("net/http", "Do", "Get", "Head", "Post", "PostForm", "RoundTrip")
+ return rule, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/subproc.go b/vendor/github.com/securego/gosec/v2/rules/subproc.go
new file mode 100644
index 0000000..30c32cc
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/subproc.go
@@ -0,0 +1,85 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/securego/gosec/v2"
+)
+
+type subprocess struct {
+ gosec.MetaData
+ gosec.CallList
+}
+
+func (r *subprocess) ID() string {
+ return r.MetaData.ID
+}
+
+// TODO(gm) The only real potential for command injection with a Go project
+// is something like this:
+//
+// syscall.Exec("/bin/sh", []string{"-c", tainted})
+//
+// E.g. Input is correctly escaped but the execution context being used
+// is unsafe. For example:
+//
+// syscall.Exec("echo", "foobar" + tainted)
+func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node := r.ContainsPkgCallExpr(n, c, false); node != nil {
+ args := node.Args
+ if r.isContext(n, c) {
+ args = args[1:]
+ }
+ for _, arg := range args {
+ if ident, ok := arg.(*ast.Ident); ok {
+ obj := c.Info.ObjectOf(ident)
+ if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) {
+ return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil
+ }
+ } else if !gosec.TryResolve(arg, c) {
+ // the arg is not a constant or a variable but instead a function call or os.Args[i]
+ return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with function call as argument or cmd arguments", gosec.Medium, gosec.High), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// isContext checks whether or not the node is a CommandContext call or not
+// Thi is requried in order to skip the first argument from the check.
+func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool {
+ selector, indent, err := gosec.GetCallInfo(n, ctx)
+ if err != nil {
+ return false
+ }
+ if selector == "exec" && indent == "CommandContext" {
+ return true
+ }
+ return false
+}
+
+// NewSubproc detects cases where we are forking out to an external process
+func NewSubproc(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ rule := &subprocess{gosec.MetaData{ID: id}, gosec.NewCallList()}
+ rule.Add("os/exec", "Command")
+ rule.Add("os/exec", "CommandContext")
+ rule.Add("syscall", "Exec")
+ rule.Add("syscall", "ForkExec")
+ rule.Add("syscall", "StartProcess")
+ return rule, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go
new file mode 100644
index 0000000..36f0f97
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go
@@ -0,0 +1,58 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+ "regexp"
+
+ "github.com/securego/gosec/v2"
+)
+
+type badTempFile struct {
+ gosec.MetaData
+ calls gosec.CallList
+ args *regexp.Regexp
+}
+
+func (t *badTempFile) ID() string {
+ return t.MetaData.ID
+}
+
+func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) {
+ if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil {
+ if arg, e := gosec.GetString(node.Args[0]); t.args.MatchString(arg) && e == nil {
+ return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewBadTempFile detects direct writes to predictable path in temporary directory
+func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ calls := gosec.NewCallList()
+ calls.Add("io/ioutil", "WriteFile")
+ calls.Add("os", "Create")
+ return &badTempFile{
+ calls: calls,
+ args: regexp.MustCompile(`^/tmp/.*$|^/var/tmp/.*$`),
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: "File creation in shared tmp directory without using ioutil.Tempfile",
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/templates.go b/vendor/github.com/securego/gosec/v2/rules/templates.go
new file mode 100644
index 0000000..8192409
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/templates.go
@@ -0,0 +1,61 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type templateCheck struct {
+ gosec.MetaData
+ calls gosec.CallList
+}
+
+func (t *templateCheck) ID() string {
+ return t.MetaData.ID
+}
+
+func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil {
+ for _, arg := range node.Args {
+ if _, ok := arg.(*ast.BasicLit); !ok { // basic lits are safe
+ return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// NewTemplateCheck constructs the template check rule. This rule is used to
+// find use of templates where HTML/JS escaping is not being used
+func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+
+ calls := gosec.NewCallList()
+ calls.Add("html/template", "HTML")
+ calls.Add("html/template", "HTMLAttr")
+ calls.Add("html/template", "JS")
+ calls.Add("html/template", "URL")
+ return &templateCheck{
+ calls: calls,
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.Low,
+ What: "this method will not auto-escape HTML. Verify data is well formed.",
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/tls.go b/vendor/github.com/securego/gosec/v2/rules/tls.go
new file mode 100644
index 0000000..fab9ee1
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/tls.go
@@ -0,0 +1,130 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate tlsconfig
+
+package rules
+
+import (
+ "fmt"
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type insecureConfigTLS struct {
+ gosec.MetaData
+ MinVersion int16
+ MaxVersion int16
+ requiredType string
+ goodCiphers []string
+}
+
+func (t *insecureConfigTLS) ID() string {
+ return t.MetaData.ID
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *gosec.Issue {
+
+ if ciphers, ok := n.(*ast.CompositeLit); ok {
+ for _, cipher := range ciphers.Elts {
+ if ident, ok := cipher.(*ast.SelectorExpr); ok {
+ if !stringInSlice(ident.Sel.Name, t.goodCiphers) {
+ err := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name)
+ return gosec.NewIssue(c, ident, t.ID(), err, gosec.High, gosec.High)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Context) *gosec.Issue {
+ if ident, ok := n.Key.(*ast.Ident); ok {
+ switch ident.Name {
+
+ case "InsecureSkipVerify":
+ if node, ok := n.Value.(*ast.Ident); ok {
+ if node.Name != "false" {
+ return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify set true.", gosec.High, gosec.High)
+ }
+ } else {
+ // TODO(tk): symbol tab look up to get the actual value
+ return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify may be true.", gosec.High, gosec.Low)
+ }
+
+ case "PreferServerCipherSuites":
+ if node, ok := n.Value.(*ast.Ident); ok {
+ if node.Name == "false" {
+ return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites set false.", gosec.Medium, gosec.High)
+ }
+ } else {
+ // TODO(tk): symbol tab look up to get the actual value
+ return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites may be false.", gosec.Medium, gosec.Low)
+ }
+
+ case "MinVersion":
+ if ival, ierr := gosec.GetInt(n.Value); ierr == nil {
+ if (int16)(ival) < t.MinVersion {
+ return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High)
+ }
+ // TODO(tk): symbol tab look up to get the actual value
+ return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion may be too low.", gosec.High, gosec.Low)
+ }
+
+ case "MaxVersion":
+ if ival, ierr := gosec.GetInt(n.Value); ierr == nil {
+ if (int16)(ival) < t.MaxVersion {
+ return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High)
+ }
+ // TODO(tk): symbol tab look up to get the actual value
+ return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion may be too low.", gosec.High, gosec.Low)
+ }
+
+ case "CipherSuites":
+ if ret := t.processTLSCipherSuites(n.Value, c); ret != nil {
+ return ret
+ }
+
+ }
+
+ }
+ return nil
+}
+
+func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil {
+ actualType := c.Info.TypeOf(complit.Type)
+ if actualType != nil && actualType.String() == t.requiredType {
+ for _, elt := range complit.Elts {
+ if kve, ok := elt.(*ast.KeyValueExpr); ok {
+ issue := t.processTLSConfVal(kve, c)
+ if issue != nil {
+ return issue, nil
+ }
+ }
+ }
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/tls_config.go b/vendor/github.com/securego/gosec/v2/rules/tls_config.go
new file mode 100644
index 0000000..ff4f3fe
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/tls_config.go
@@ -0,0 +1,88 @@
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+// NewModernTLSCheck creates a check for Modern TLS ciphers
+// DO NOT EDIT - generated by tlsconfig tool
+func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &insecureConfigTLS{
+ MetaData: gosec.MetaData{ID: id},
+ requiredType: "crypto/tls.Config",
+ MinVersion: 0x0304,
+ MaxVersion: 0x0304,
+ goodCiphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ },
+ }, []ast.Node{(*ast.CompositeLit)(nil)}
+}
+
+// NewIntermediateTLSCheck creates a check for Intermediate TLS ciphers
+// DO NOT EDIT - generated by tlsconfig tool
+func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &insecureConfigTLS{
+ MetaData: gosec.MetaData{ID: id},
+ requiredType: "crypto/tls.Config",
+ MinVersion: 0x0303,
+ MaxVersion: 0x0304,
+ goodCiphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+ "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ },
+ }, []ast.Node{(*ast.CompositeLit)(nil)}
+}
+
+// NewOldTLSCheck creates a check for Old TLS ciphers
+// DO NOT EDIT - generated by tlsconfig tool
+func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &insecureConfigTLS{
+ MetaData: gosec.MetaData{ID: id},
+ requiredType: "crypto/tls.Config",
+ MinVersion: 0x0301,
+ MaxVersion: 0x0304,
+ goodCiphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+ "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+ "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256",
+ "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256",
+ "TLS_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA256",
+ "TLS_RSA_WITH_AES_256_CBC_SHA256",
+ "TLS_RSA_WITH_AES_128_CBC_SHA",
+ "TLS_RSA_WITH_AES_256_CBC_SHA",
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ },
+ }, []ast.Node{(*ast.CompositeLit)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/unsafe.go b/vendor/github.com/securego/gosec/v2/rules/unsafe.go
new file mode 100644
index 0000000..88a298f
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/unsafe.go
@@ -0,0 +1,53 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type usingUnsafe struct {
+ gosec.MetaData
+ pkg string
+ calls []string
+}
+
+func (r *usingUnsafe) ID() string {
+ return r.MetaData.ID
+}
+
+func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) {
+ if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ return nil, nil
+}
+
+// NewUsingUnsafe rule detects the use of the unsafe package. This is only
+// really useful for auditing purposes.
+func NewUsingUnsafe(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return &usingUnsafe{
+ pkg: "unsafe",
+ calls: []string{"Alignof", "Offsetof", "Sizeof", "Pointer"},
+ MetaData: gosec.MetaData{
+ ID: id,
+ What: "Use of unsafe calls should be audited",
+ Severity: gosec.Low,
+ Confidence: gosec.High,
+ },
+ }, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go
new file mode 100644
index 0000000..0e45393
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go
@@ -0,0 +1,58 @@
+// (c) Copyright 2016 Hewlett Packard Enterprise Development LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+ "go/ast"
+
+ "github.com/securego/gosec/v2"
+)
+
+type usesWeakCryptography struct {
+ gosec.MetaData
+ blacklist map[string][]string
+}
+
+func (r *usesWeakCryptography) ID() string {
+ return r.MetaData.ID
+}
+
+func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+ for pkg, funcs := range r.blacklist {
+ if _, matched := gosec.MatchCallByPackage(n, c, pkg, funcs...); matched {
+ return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// NewUsesWeakCryptography detects uses of des.* md5.* or rc4.*
+func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ calls := make(map[string][]string)
+ calls["crypto/des"] = []string{"NewCipher", "NewTripleDESCipher"}
+ calls["crypto/md5"] = []string{"New", "Sum"}
+ calls["crypto/sha1"] = []string{"New", "Sum"}
+ calls["crypto/rc4"] = []string{"NewCipher"}
+ rule := &usesWeakCryptography{
+ blacklist: calls,
+ MetaData: gosec.MetaData{
+ ID: id,
+ Severity: gosec.Medium,
+ Confidence: gosec.High,
+ What: "Use of weak cryptographic primitive",
+ },
+ }
+ return rule, []ast.Node{(*ast.CallExpr)(nil)}
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..6b7d7d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
new file mode 100644
index 0000000..65dc285
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.golangci.yml
@@ -0,0 +1,40 @@
+run:
+ # do not run on test files yet
+ tests: false
+
+# all available settings of specific linters
+linters-settings:
+ errcheck:
+ # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
+ # default is false: such cases aren't reported by default.
+ check-type-assertions: false
+
+ # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
+ # default is false: such cases aren't reported by default.
+ check-blank: false
+
+ lll:
+ line-length: 100
+ tab-width: 4
+
+ prealloc:
+ simple: false
+ range-loops: false
+ for-loops: false
+
+ whitespace:
+ multi-if: false # Enforces newlines (or comments) after every multi-line if statement
+ multi-func: false # Enforces newlines (or comments) after every multi-line function signature
+
+linters:
+ enable:
+ - megacheck
+ - govet
+ disable:
+ - maligned
+ - prealloc
+ disable-all: false
+ presets:
+ - bugs
+ - unused
+ fast: false
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..5e20aa4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+git:
+ depth: 1
+env:
+ - GO111MODULE=on
+go: [1.13.x, 1.14.x]
+os: [linux, osx]
+install:
+ - ./travis/install.sh
+script:
+ - ./travis/cross_build.sh
+ - ./travis/lint.sh
+ - export GOMAXPROCS=4
+ - export GORACE=halt_on_error=1
+ - go test -race -v ./...
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..584026d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,223 @@
+# 1.6.0
+Fixes:
+ * end of line cleanup
+ * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
+ * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
+
+Features:
+ * add an option to the `TextFormatter` to completely disable fields quoting
+
+# 1.5.0
+Code quality:
+ * add golangci linter run on travis
+
+Fixes:
+ * add mutex for hooks concurrent access on `Entry` data
+ * caller function field for go1.14
+ * fix build issue for gopherjs target
+
+Feature:
+ * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
+ * add a `DisableHTMLEscape` option in the `JSONFormatter`
+ * add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
+
+# 1.4.2
+ * Fixes build break for plan9, nacl, solaris
+# 1.4.1
+This new release introduces:
+ * Enhance TextFormatter to not print caller information when they are empty (#944)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
+
+Fixes:
+ * Fix Entry.WithContext method to return a copy of the initial entry (#941)
+
+# 1.4.0
+This new release introduces:
+ * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
+ * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
+ * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
+
+Fixes:
+ * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
+ * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
+ * Fix infinite recursion on unknown `Level.String()` (#907)
+ * Fix race condition in `getCaller` (#916).
+
+
+# 1.3.0
+This new release introduces:
+ * Log, Logf, Logln functions for Logger and Entry that take a Level
+
+Fixes:
+ * Building prometheus node_exporter on AIX (#840)
+ * Race condition in TextFormatter (#468)
+ * Travis CI import path (#868)
+ * Remove coloured output on Windows (#862)
+ * Pointer to func as field in JSONFormatter (#870)
+ * Properly marshal Levels (#873)
+
+# 1.2.0
+This new release introduces:
+ * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+ * A new trace level named `Trace` whose level is below `Debug`
+ * A configurable exit function to be called upon a Fatal trace
+ * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+ * fix the build break on Solaris
+ * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+ * several fixes:
+ * a fix for a race condition on entry formatting
+ * proper cleanup of previously used entries before putting them back in the pool
+ * the extra new line at the end of message in text formatter has been removed
+ * a new global public API to check if a level is activated: IsLevelEnabled
+ * the following methods have been added to the Logger object
+ * IsLevelEnabled
+ * SetFormatter
+ * SetOutput
+ * ReplaceHooks
+ * introduction of go module
+ * an indent configuration for the json formatter
+ * output colour support for windows
+ * the field sort function is now configurable for text formatter
+ * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+ * a new api WithTime which allows to easily force the time of the log entry
+ which is mostly useful for logger wrapper
+ * a fix reverting the immutability of the entry given as parameter to the hooks
+ a new configuration field of the json formatter in order to put all the fields
+ in a nested dictionnary
+ * a new SetOutput method in the Logger
+ * a new configuration of the textformatter to configure the name of the default keys
+ * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..5796706
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,513 @@
+# Logrus [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Logrus is in maintenance-mode.** We will not be introducing new features. It's
+simply too hard to do in a way that won't break many people's projects, which is
+the last thing you want from your Logging library (again...).
+
+This does not mean Logrus is dead. Logrus will continue to be maintained for
+security, (backwards compatible) bug fixes, and performance (where we are
+limited by the interface).
+
+I believe Logrus' biggest contribution is to have played a part in today's
+widespread use of structured logging in Golang. There doesn't seem to be a
+reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
+community has built those independently. Many fantastic alternatives have sprung
+up. Logrus would look like those, had it been re-designed with what we know
+about structured logging in Go today. Check out, for example,
+[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
+
+[zerolog]: https://github.com/rs/zerolog
+[zap]: https://github.com/uber-go/zap
+[apex]: https://github.com/apex/log
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+ log.SetFormatter(&log.TextFormatter{
+ DisableColors: true,
+ FullTimestamp: true,
+ })
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
+environment via benchmarks:
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "os"
+ "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * When colors are enabled, levels are truncated to 4 characters by default. To disable
+ truncation set the `DisableLevelTruncation` field to `true`.
+ * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
+* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
+* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
+* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := test.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8fd189e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,76 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
+
+// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func DeferExitHandler(handler func()) {
+ handlers = append([]func(){handler}, handlers...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..df9d65c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..f6e062a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,426 @@
+package logrus
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ bufferPool *sync.Pool
+
+ // qualified package name, cached at first use
+ logrusPackage string
+
+ // Positions in the call stack when tracing to report the calling method
+ minimumCallerDepth int
+
+ // Used for caller information initialisation
+ callerInitOnce sync.Once
+)
+
+const (
+ maximumCallerDepth int = 25
+ knownLogrusFrames int = 4
+)
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+
+ // start at the bottom of the stack before the package-name cache is primed
+ minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+ Level Level
+
+ // Calling method, with package name
+ Caller *runtime.Frame
+
+ // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), a Buffer may be set to entry
+ Buffer *bytes.Buffer
+
+ // Contains the context set by the user. Useful for hook processing etc.
+ Context context.Context
+
+ // err may contain a field formatting error
+ err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, plus one optional. Give a little extra room.
+ Data: make(Fields, 6),
+ }
+}
+
+// Returns the bytes representation of this entry from the formatter.
+func (entry *Entry) Bytes() ([]byte, error) {
+ return entry.Logger.Formatter.Format(entry)
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Bytes()
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a context to the Entry.
+func (entry *Entry) WithContext(ctx context.Context) *Entry {
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ fieldErr := entry.err
+ for k, v := range fields {
+ isErrField := false
+ if t := reflect.TypeOf(v); t != nil {
+ switch t.Kind() {
+ case reflect.Func:
+ isErrField = true
+ case reflect.Ptr:
+ isErrField = t.Elem().Kind() == reflect.Func
+ }
+ }
+ if isErrField {
+ tmp := fmt.Sprintf("can not add field %q", k)
+ if fieldErr != "" {
+ fieldErr = entry.err + ", " + tmp
+ } else {
+ fieldErr = tmp
+ }
+ } else {
+ data[k] = v
+ }
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+ for {
+ lastPeriod := strings.LastIndex(f, ".")
+ lastSlash := strings.LastIndex(f, "/")
+ if lastPeriod > lastSlash {
+ f = f[:lastPeriod]
+ } else {
+ break
+ }
+ }
+
+ return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+ // cache this package's fully-qualified name
+ callerInitOnce.Do(func() {
+ pcs := make([]uintptr, maximumCallerDepth)
+ _ = runtime.Callers(0, pcs)
+
+ // dynamic get the package name and the minimum caller depth
+ for i := 0; i < maximumCallerDepth; i++ {
+ funcName := runtime.FuncForPC(pcs[i]).Name()
+ if strings.Contains(funcName, "getCaller") {
+ logrusPackage = getPackageName(funcName)
+ break
+ }
+ }
+
+ minimumCallerDepth = knownLogrusFrames
+ })
+
+ // Restrict the lookback frames to avoid runaway lookups
+ pcs := make([]uintptr, maximumCallerDepth)
+ depth := runtime.Callers(minimumCallerDepth, pcs)
+ frames := runtime.CallersFrames(pcs[:depth])
+
+ for f, again := frames.Next(); again; f, again = frames.Next() {
+ pkg := getPackageName(f.Function)
+
+ // If the caller isn't part of this package, we're done
+ if pkg != logrusPackage {
+ return &f //nolint:scopelint
+ }
+ }
+
+ // if we got here, we failed to find the caller's context
+ return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+ return entry.Logger != nil &&
+ entry.Logger.ReportCaller &&
+ entry.Caller != nil
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+
+ // Default to now, but allow users to override if they want.
+ //
+ // We don't have to worry about polluting future calls to Entry#log()
+ // with this assignment because this function is declared with a
+ // non-pointer receiver.
+ if entry.Time.IsZero() {
+ entry.Time = time.Now()
+ }
+
+ entry.Level = level
+ entry.Message = msg
+ entry.Logger.mu.Lock()
+ if entry.Logger.ReportCaller {
+ entry.Caller = getCaller()
+ }
+ entry.Logger.mu.Unlock()
+
+ entry.fireHooks()
+
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+
+ entry.write()
+
+ entry.Buffer = nil
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) fireHooks() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ err := entry.Logger.Hooks.Fire(entry.Level, entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ }
+}
+
+func (entry *Entry) write() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ return
+ }
+ if _, err = entry.Logger.Out.Write(serialized); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+}
+
+func (entry *Entry) Log(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.log(level, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+ entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ entry.Log(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ entry.Log(PanicLevel, args...)
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+ entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ entry.Logf(FatalLevel, format, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+ entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ entry.Logln(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..42b04f6
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,225 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "time"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+ std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+ return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithContext creates an entry from the standard logger and adds a context to it.
+func WithContext(ctx context.Context) *Entry {
+ return std.WithContext(ctx)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// WithTime creates an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+ return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+ std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+ std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+ std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+ defaultTimestampFormat = time.RFC3339
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+ FieldKeyLogrusError = "logrus_error"
+ FieldKeyFunc = "func"
+ FieldKeyFile = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+ timeKey := fieldMap.resolve(FieldKeyTime)
+ if t, ok := data[timeKey]; ok {
+ data["fields."+timeKey] = t
+ delete(data, timeKey)
+ }
+
+ msgKey := fieldMap.resolve(FieldKeyMsg)
+ if m, ok := data[msgKey]; ok {
+ data["fields."+msgKey] = m
+ delete(data, msgKey)
+ }
+
+ levelKey := fieldMap.resolve(FieldKeyLevel)
+ if l, ok := data[levelKey]; ok {
+ data["fields."+levelKey] = l
+ delete(data, levelKey)
+ }
+
+ logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+ if l, ok := data[logrusErrKey]; ok {
+ data["fields."+logrusErrKey] = l
+ delete(data, logrusErrKey)
+ }
+
+ // If reportCaller is not set, 'func' will not conflict.
+ if reportCaller {
+ funcKey := fieldMap.resolve(FieldKeyFunc)
+ if l, ok := data[funcKey]; ok {
+ data["fields."+funcKey] = l
+ }
+ fileKey := fieldMap.resolve(FieldKeyFile)
+ if l, ok := data[fileKey]; ok {
+ data["fields."+fileKey] = l
+ }
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 0000000..d413296
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,11 @@
+module github.com/sirupsen/logrus
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.3
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+)
+
+go 1.13
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 0000000..49c690f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,12 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..ba7f237
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,125 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "runtime"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // DisableHTMLEscape allows disabling html escaping in output
+ DisableHTMLEscape bool
+
+ // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+ DataKey string
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // FieldKeyFunc: "@caller",
+ // },
+ // }
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the json data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from json fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ // PrettyPrint will indent all json logs
+ PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+4)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+
+ if f.DataKey != "" {
+ newData := make(Fields, 4)
+ newData[f.DataKey] = data
+ data = newData
+ }
+
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+
+ if entry.err != "" {
+ data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+ }
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+ if entry.HasCaller() {
+ funcVal := entry.Caller.Function
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+ if funcVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
+ }
+ if fileVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
+ }
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ encoder := json.NewEncoder(b)
+ encoder.SetEscapeHTML(!f.DisableHTMLEscape)
+ if f.PrettyPrint {
+ encoder.SetIndent("", " ")
+ }
+ if err := encoder.Encode(data); err != nil {
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..6fdda74
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,352 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventurous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+
+ // Flag for whether to log caller info (off by default)
+ ReportCaller bool
+
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged.
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+ // Function to exit the application, defaults to `os.Exit()`
+ ExitFunc exitFunc
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &logrus.Logger{
+// Out: os.Stderr,
+// Formatter: new(logrus.JSONFormatter),
+// Hooks: make(logrus.LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ ExitFunc: os.Exit,
+ ReportCaller: false,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ entry.Data = map[string]interface{}{}
+ logger.entryPool.Put(entry)
+}
+
+// WithField allocates a new entry and adds a field to it.
+// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
+// this new returned entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+// Add a context to the log entry.
+func (logger *Logger) WithContext(ctx context.Context) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithContext(ctx)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logf(level, format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+ logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ logger.Logf(FatalLevel, format, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+ logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Print(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ logger.Log(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logln(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+ logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ logger.Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ logger.Logln(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+ runHandlers()
+ if logger.ExitFunc == nil {
+ logger.ExitFunc = os.Exit
+ }
+ logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+ return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+ return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+ logger.mu.Lock()
+ oldHooks := logger.Hooks
+ logger.Hooks = hooks
+ logger.mu.Unlock()
+ return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..2f16224
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ if b, err := level.MarshalText(); err == nil {
+ return string(b)
+ } else {
+ return "unknown"
+ }
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ case "trace":
+ return TraceLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+ l, err := ParseLevel(string(text))
+ if err != nil {
+ return err
+ }
+
+ *level = l
+
+ return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+ switch level {
+ case TraceLevel:
+ return []byte("trace"), nil
+ case DebugLevel:
+ return []byte("debug"), nil
+ case InfoLevel:
+ return []byte("info"), nil
+ case WarnLevel:
+ return []byte("warning"), nil
+ case ErrorLevel:
+ return []byte("error"), nil
+ case FatalLevel:
+ return []byte("fatal"), nil
+ case PanicLevel:
+ return []byte("panic"), nil
+ }
+
+ return nil, fmt.Errorf("not a valid logrus level %d", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+ TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+ // TraceLevel level. Designates finer-grained informational events than the Debug.
+ TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+
+ // IsDebugEnabled() bool
+ // IsInfoEnabled() bool
+ // IsWarnEnabled() bool
+ // IsErrorEnabled() bool
+ // IsFatalEnabled() bool
+ // IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+ FieldLogger
+ Tracef(format string, args ...interface{})
+ Trace(args ...interface{})
+ Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 0000000..4997899
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+// +build !js
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 0000000..ebdae3e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,7 @@
+// +build js
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 0000000..97af92c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
+// +build js nacl plan9
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..3293fb3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
+// +build !appengine,!js,!windows,!nacl,!plan9
+
+package logrus
+
+import (
+ "io"
+ "os"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return isTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 0000000..f6710b3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
+package logrus
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 0000000..cc4fe6e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
+// +build linux aix
+// +build !js
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..572889d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,34 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+ switch v := w.(type) {
+ case *os.File:
+ sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+ }
+}
+
+func checkIfTerminal(w io.Writer) bool {
+ var ret bool
+ switch v := w.(type) {
+ case *os.File:
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+ ret = (err == nil)
+ default:
+ ret = false
+ }
+ if ret {
+ initTerminal(w)
+ }
+ return ret
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..3c28b54
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,334 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ red = 31
+ yellow = 33
+ blue = 36
+ gray = 37
+)
+
+var baseTimestamp time.Time
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Force quoting of all values
+ ForceQuote bool
+
+ // DisableQuote disables quoting for all values.
+ // DisableQuote will have a lower priority than ForceQuote.
+ // If both of them are set to true, quote will be forced on all values.
+ DisableQuote bool
+
+ // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+ EnvironmentOverrideColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // The keys sorting function, when uninitialized it uses sort.Strings.
+ SortingFunc func([]string)
+
+ // Disables the truncation of the level text to 4 characters.
+ DisableLevelTruncation bool
+
+ // PadLevelText Adds padding the level text so that all the levels output at the same length
+ // PadLevelText is a superset of the DisableLevelTruncation option
+ PadLevelText bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &TextFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message"}}
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ terminalInitOnce sync.Once
+
+ // The max length of the level text, generated dynamically on init
+ levelTextMaxLength int
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if entry.Logger != nil {
+ f.isTerminal = checkIfTerminal(entry.Logger.Out)
+ }
+ // Get the max length of the level text
+ for _, level := range AllLevels {
+ levelTextLength := utf8.RuneCount([]byte(level.String()))
+ if levelTextLength > f.levelTextMaxLength {
+ f.levelTextMaxLength = levelTextLength
+ }
+ }
+}
+
+func (f *TextFormatter) isColored() bool {
+ isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+ if f.EnvironmentOverrideColors {
+ switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
+ case ok && force != "0":
+ isColored = true
+ case ok && force == "0", os.Getenv("CLICOLOR") == "0":
+ isColored = false
+ }
+ }
+
+ return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+ keys := make([]string, 0, len(data))
+ for k := range data {
+ keys = append(keys, k)
+ }
+
+ var funcVal, fileVal string
+
+ fixedKeys := make([]string, 0, 4+len(data))
+ if !f.DisableTimestamp {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+ }
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+ if entry.Message != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+ }
+ if entry.err != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+ }
+ if entry.HasCaller() {
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ } else {
+ funcVal = entry.Caller.Function
+ fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ }
+
+ if funcVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
+ }
+ if fileVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
+ }
+ }
+
+ if !f.DisableSorting {
+ if f.SortingFunc == nil {
+ sort.Strings(keys)
+ fixedKeys = append(fixedKeys, keys...)
+ } else {
+ if !f.isColored() {
+ fixedKeys = append(fixedKeys, keys...)
+ f.SortingFunc(fixedKeys)
+ } else {
+ f.SortingFunc(keys)
+ }
+ }
+ } else {
+ fixedKeys = append(fixedKeys, keys...)
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ f.terminalInitOnce.Do(func() { f.init(entry) })
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+ if f.isColored() {
+ f.printColored(b, entry, keys, data, timestampFormat)
+ } else {
+
+ for _, key := range fixedKeys {
+ var value interface{}
+ switch {
+ case key == f.FieldMap.resolve(FieldKeyTime):
+ value = entry.Time.Format(timestampFormat)
+ case key == f.FieldMap.resolve(FieldKeyLevel):
+ value = entry.Level.String()
+ case key == f.FieldMap.resolve(FieldKeyMsg):
+ value = entry.Message
+ case key == f.FieldMap.resolve(FieldKeyLogrusError):
+ value = entry.err
+ case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+ value = funcVal
+ case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+ value = fileVal
+ default:
+ value = data[key]
+ }
+ f.appendKeyValue(b, key, value)
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel, TraceLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())
+ if !f.DisableLevelTruncation && !f.PadLevelText {
+ levelText = levelText[0:4]
+ }
+ if f.PadLevelText {
+ // Generates the format string used in the next line, for example "%-6s" or "%-7s".
+ // Based on the max level text length.
+ formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s"
+ // Formats the level text by appending spaces up to the max length, for example:
+ // - "INFO "
+ // - "WARNING"
+ levelText = fmt.Sprintf(formatString, levelText)
+ }
+
+ // Remove a single newline if it already exists in the message to keep
+ // the behavior of logrus text_formatter the same as the stdlib log package
+ entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+ caller := ""
+ if entry.HasCaller() {
+ funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+
+ if fileVal == "" {
+ caller = funcVal
+ } else if funcVal == "" {
+ caller = fileVal
+ } else {
+ caller = fileVal + " " + funcVal
+ }
+ }
+
+ switch {
+ case f.DisableTimestamp:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+ case !f.FullTimestamp:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+ default:
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+ }
+ for _, k := range keys {
+ v := data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.ForceQuote {
+ return true
+ }
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ if f.DisableQuote {
+ return false
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
+ }
+
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..72e8e3a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,70 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+// Writer at INFO level. See WriterLevel for details.
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+// WriterLevel returns an io.Writer that can be used to write arbitrary text to
+// the logger at the given log level. Each line written to the writer will be
+// printed in the usual way using formatters and hooks. The writer is part of an
+// io.Pipe and it is the callers responsibility to close the writer when done.
+// This can be used to override the standard library logger easily.
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case TraceLevel:
+ printFunc = entry.Trace
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/sonatard/noctx/.gitignore b/vendor/github.com/sonatard/noctx/.gitignore
new file mode 100644
index 0000000..2d83068
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/.gitignore
@@ -0,0 +1 @@
+coverage.out
diff --git a/vendor/github.com/sonatard/noctx/.golangci.yml b/vendor/github.com/sonatard/noctx/.golangci.yml
new file mode 100644
index 0000000..1580acd
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/.golangci.yml
@@ -0,0 +1,20 @@
+run:
+
+linters-settings:
+ govet:
+ enable-all: true
+
+linters:
+ enable-all: true
+ disable:
+ - gochecknoglobals
+ - gomnd
+ - gocognit
+ - nestif
+
+issues:
+ exclude-rules:
+ - path: reqwithoutctx/ssa.go
+ text: "Consider preallocating `exts`"
+ linters:
+ - prealloc
diff --git a/vendor/github.com/sonatard/noctx/LICENSE b/vendor/github.com/sonatard/noctx/LICENSE
new file mode 100644
index 0000000..a00d572
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 sonatard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sonatard/noctx/Makefile b/vendor/github.com/sonatard/noctx/Makefile
new file mode 100644
index 0000000..1a27f6b
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/Makefile
@@ -0,0 +1,16 @@
+.PHONY: all imports test lint
+
+all: imports test lint
+
+imports:
+ goimports -w ./
+
+test:
+ go test -race ./...
+
+test_coverage:
+ go test -race -coverprofile=coverage.out -covermode=atomic ./...
+
+lint:
+ golangci-lint run ./...
+
diff --git a/vendor/github.com/sonatard/noctx/README.md b/vendor/github.com/sonatard/noctx/README.md
new file mode 100644
index 0000000..bfe9782
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/README.md
@@ -0,0 +1,95 @@
+# noctx
+
+![](https://github.com/sonatard/noctx/workflows/.github/workflows/ci.yml/badge.svg)
+
+`noctx` finds sending http request without context.Context.
+
+You should use `noctx` if sending http request in your library.
+Passing `context.Context` enables library user to cancel http request, getting trace information and so on.
+
+## Install
+
+```sh
+$ go get -u github.com/sonatard/noctx/cmd/noctx
+```
+
+## Usage
+
+```sh
+$ go vet -vettool=`which noctx` main.go
+./main.go:6:11: net/http.Get must not be called
+```
+
+## Detection rules
+- Executing following functions
+ - `net/http.Get`
+ - `net/http.Head`
+ - `net/http.Post`
+ - `net/http.PostForm`
+ - `(*net/http.Client).Get`
+ - `(*net/http.Client).Head`
+ - `(*net/http.Client).Post`
+ - `(*net/http.Client).PostForm`
+- `http.Request` returned by `http.NewRequest` function and passes it to other function.
+
+## How to fix
+- Send http request using `(*http.Client).Do(*http.Request)` method.
+- In Go 1.13 and later, use `http.NewRequestWithContext` function instead of using `http.NewRequest` function.
+- In Go 1.12 and earlier, call `(http.Request).WithContext(ctx)` after `http.NewRequest`.
+
+`(http.Request).WithContext(ctx)` has a disadvantage of performance because it returns a copy of `http.Request`. Use `http.NewRequestWithContext` function if you only support Go1.13 or later.
+
+## Sample Code
+
+```go
+package main
+
+import (
+ "context"
+ "net/http"
+)
+
+func main() {
+ const url = "http://example.com"
+ http.Get(url) // want `net/http\.Get must not be called`
+ http.Head(url) // want `net/http\.Head must not be called`
+ http.Post(url, "", nil) // want `net/http\.Post must not be called`
+ http.PostForm(url, nil) // want `net/http\.PostForm must not be called`
+
+ cli := &http.Client{}
+ cli.Get(url) // want `\(\*net/http\.Client\)\.Get must not be called`
+ cli.Head(url) // want `\(\*net/http\.Client\)\.Head must not be called`
+ cli.Post(url, "", nil) // want `\(\*net/http\.Client\)\.Post must not be called`
+ cli.PostForm(url, nil) // want `\(\*net/http\.Client\)\.PostForm must not be called`
+
+ req, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ cli.Do(req)
+
+ ctx := context.Background()
+ req2, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) // OK
+ cli.Do(req2)
+
+ req3, _ := http.NewRequest(http.MethodPost, url, nil) // OK
+ req3 = req3.WithContext(ctx)
+ cli.Do(req3)
+
+ f2 := func(req *http.Request, ctx context.Context) *http.Request {
+ return req
+ }
+ req4, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ req4 = f2(req4, ctx)
+ cli.Do(req4)
+
+ req5, _ := func() (*http.Request, error) {
+ return http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ }()
+ cli.Do(req5)
+
+}
+```
+
+## Reference
+- [net/http - NewRequest](https://golang.org/pkg/net/http/#NewRequest)
+- [net/http - NewRequestWithContext](https://golang.org/pkg/net/http/#NewRequestWithContext)
+- [net/http - Request.WithContext](https://golang.org/pkg/net/http/#Request.WithContext)
+
diff --git a/vendor/github.com/sonatard/noctx/go.mod b/vendor/github.com/sonatard/noctx/go.mod
new file mode 100644
index 0000000..47b7901
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/go.mod
@@ -0,0 +1,8 @@
+module github.com/sonatard/noctx
+
+go 1.13
+
+require (
+ github.com/gostaticanalysis/analysisutil v0.0.3
+ golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9
+)
diff --git a/vendor/github.com/sonatard/noctx/go.sum b/vendor/github.com/sonatard/noctx/go.sum
new file mode 100644
index 0000000..f8e5b07
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/go.sum
@@ -0,0 +1,16 @@
+github.com/gostaticanalysis/analysisutil v0.0.3 h1:iwp+5/UAyzQSFgQ4uR2sni99sJ8Eo9DEacKWM5pekIg=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9 h1:KOkk4e2xd5OeCDJGwacvr75ICCbCsShrHiqPEdsA9hg=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/main.go b/vendor/github.com/sonatard/noctx/ngfunc/main.go
new file mode 100644
index 0000000..cfeb0f0
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/main.go
@@ -0,0 +1,57 @@
+package ngfunc
+
+import (
+ "go/types"
+
+ "github.com/gostaticanalysis/analysisutil"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+)
+
+func Run(pass *analysis.Pass) (interface{}, error) {
+ ngFuncNames := []string{
+ "net/http.Get",
+ "net/http.Head",
+ "net/http.Post",
+ "net/http.PostForm",
+ "(*net/http.Client).Get",
+ "(*net/http.Client).Head",
+ "(*net/http.Client).Post",
+ "(*net/http.Client).PostForm",
+ }
+
+ ngFuncs := typeFuncs(pass, ngFuncNames)
+ if len(ngFuncs) == 0 {
+ return nil, nil
+ }
+
+ reportFuncs := ngCalledFuncs(pass, ngFuncs)
+ report(pass, reportFuncs)
+
+ return nil, nil
+}
+
+func ngCalledFuncs(pass *analysis.Pass, ngFuncs []*types.Func) []*Report {
+ var reports []*Report
+
+ srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs
+ for _, sf := range srcFuncs {
+ for _, b := range sf.Blocks {
+ for _, instr := range b.Instrs {
+ for _, ngFunc := range ngFuncs {
+ if analysisutil.Called(instr, nil, ngFunc) {
+ ngCalledFunc := &Report{
+ Instruction: instr,
+ function: ngFunc,
+ }
+ reports = append(reports, ngCalledFunc)
+
+ break
+ }
+ }
+ }
+ }
+ }
+
+ return reports
+}
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/report.go b/vendor/github.com/sonatard/noctx/ngfunc/report.go
new file mode 100644
index 0000000..e500517
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/report.go
@@ -0,0 +1,29 @@
+package ngfunc
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Report struct {
+ Instruction ssa.Instruction
+ function *types.Func
+}
+
+func (n *Report) Pos() token.Pos {
+ return n.Instruction.Pos()
+}
+
+func (n *Report) Message() string {
+ return fmt.Sprintf("%s must not be called", n.function.FullName())
+}
+
+func report(pass *analysis.Pass, reports []*Report) {
+ for _, report := range reports {
+ pass.Reportf(report.Pos(), report.Message())
+ }
+}
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/types.go b/vendor/github.com/sonatard/noctx/ngfunc/types.go
new file mode 100644
index 0000000..f187738
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/types.go
@@ -0,0 +1,65 @@
+package ngfunc
+
+import (
+ "fmt"
+ "go/types"
+ "strings"
+
+ "github.com/gostaticanalysis/analysisutil"
+ "golang.org/x/tools/go/analysis"
+)
+
+var errNotFound = fmt.Errorf("function not found")
+
+func typeFuncs(pass *analysis.Pass, funcs []string) []*types.Func {
+ fs := make([]*types.Func, 0, len(funcs))
+
+ for _, fn := range funcs {
+ f, err := typeFunc(pass, fn)
+ if err != nil {
+ continue
+ }
+
+ fs = append(fs, f)
+ }
+
+ return fs
+}
+
+func typeFunc(pass *analysis.Pass, funcName string) (*types.Func, error) {
+ ss := strings.Split(strings.TrimSpace(funcName), ".")
+
+ switch len(ss) {
+ case 2:
+ // package function: pkgname.Func
+ f, ok := analysisutil.ObjectOf(pass, ss[0], ss[1]).(*types.Func)
+ if !ok || f == nil {
+ return nil, errNotFound
+ }
+
+ return f, nil
+ case 3:
+ // method: (*pkgname.Type).Method
+ pkgname := strings.TrimLeft(ss[0], "(")
+ typename := strings.TrimRight(ss[1], ")")
+
+ if pkgname != "" && pkgname[0] == '*' {
+ pkgname = pkgname[1:]
+ typename = "*" + typename
+ }
+
+ typ := analysisutil.TypeOf(pass, pkgname, typename)
+ if typ == nil {
+ return nil, errNotFound
+ }
+
+ m := analysisutil.MethodOf(typ, ss[2])
+ if m == nil {
+ return nil, errNotFound
+ }
+
+ return m, nil
+ }
+
+ return nil, errNotFound
+}
diff --git a/vendor/github.com/sonatard/noctx/noctx.go b/vendor/github.com/sonatard/noctx/noctx.go
new file mode 100644
index 0000000..478ad88
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/noctx.go
@@ -0,0 +1,31 @@
+package noctx
+
+import (
+ "github.com/sonatard/noctx/ngfunc"
+ "github.com/sonatard/noctx/reqwithoutctx"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+)
+
+var Analyzer = &analysis.Analyzer{
+ Name: "noctx",
+ Doc: Doc,
+ Run: run,
+ Requires: []*analysis.Analyzer{
+ buildssa.Analyzer,
+ },
+}
+
+const Doc = "noctx finds sending http request without context.Context"
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ if _, err := ngfunc.Run(pass); err != nil {
+ return nil, err
+ }
+
+ if _, err := reqwithoutctx.Run(pass); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go
new file mode 100644
index 0000000..b09e1de
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go
@@ -0,0 +1,14 @@
+package reqwithoutctx
+
+import (
+ "golang.org/x/tools/go/analysis"
+)
+
+func Run(pass *analysis.Pass) (interface{}, error) {
+ analyzer := NewAnalyzer(pass)
+ reports := analyzer.Exec()
+
+ report(pass, reports)
+
+ return nil, nil
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go
new file mode 100644
index 0000000..1c94e31
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go
@@ -0,0 +1,26 @@
+package reqwithoutctx
+
+import (
+ "go/token"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Report struct {
+ Instruction ssa.Instruction
+}
+
+func (n *Report) Pos() token.Pos {
+ return n.Instruction.Pos()
+}
+
+func (n *Report) Message() string {
+ return "should rewrite http.NewRequestWithContext or add (*Request).WithContext"
+}
+
+func report(pass *analysis.Pass, reports []*Report) {
+ for _, report := range reports {
+ pass.Reportf(report.Pos(), report.Message())
+ }
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go
new file mode 100644
index 0000000..3575126
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go
@@ -0,0 +1,180 @@
+package reqwithoutctx
+
+import (
+ "go/types"
+
+ "github.com/gostaticanalysis/analysisutil"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Analyzer struct {
+ Funcs []*ssa.Function
+ newRequestType types.Type
+ requestType types.Type
+}
+
+func NewAnalyzer(pass *analysis.Pass) *Analyzer {
+ newRequestType := analysisutil.TypeOf(pass, "net/http", "NewRequest")
+ requestType := analysisutil.TypeOf(pass, "net/http", "*Request")
+
+ srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs
+
+ return &Analyzer{
+ Funcs: srcFuncs,
+ newRequestType: newRequestType,
+ requestType: requestType,
+ }
+}
+
+func (a *Analyzer) Exec() []*Report {
+ if a.newRequestType == nil || a.requestType == nil {
+ return []*Report{}
+ }
+
+ usedReqs := a.usedReqs()
+ newReqs := a.requestsByNewRequest()
+
+ return a.report(usedReqs, newReqs)
+}
+
+func (a *Analyzer) report(usedReqs map[string]*ssa.Extract, newReqs map[*ssa.Call]*ssa.Extract) []*Report {
+ var reports []*Report
+
+ for _, fReq := range usedReqs {
+ for newRequest, req := range newReqs {
+ if fReq == req {
+ reports = append(reports, &Report{Instruction: newRequest})
+ }
+ }
+ }
+
+ return reports
+}
+
+func (a *Analyzer) usedReqs() map[string]*ssa.Extract {
+ reqExts := make(map[string]*ssa.Extract)
+
+ for _, f := range a.Funcs {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ switch i := instr.(type) {
+ case *ssa.Call:
+ exts := a.usedReqByCall(i)
+ for _, ext := range exts {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ case *ssa.UnOp:
+ ext := a.usedReqByUnOp(i)
+ if ext != nil {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ case *ssa.Return:
+ exts := a.usedReqByReturn(i)
+ for _, ext := range exts {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ }
+ }
+ }
+ }
+
+ return reqExts
+}
+
+func (a *Analyzer) usedReqByCall(call *ssa.Call) []*ssa.Extract {
+ var exts []*ssa.Extract
+
+ // skip net/http.Request method call
+ if call.Common().Signature().Recv() != nil && types.Identical(call.Value().Type(), a.requestType) {
+ return exts
+ }
+
+ args := call.Common().Args
+ if len(args) == 0 {
+ return exts
+ }
+
+ for _, arg := range args {
+ ext, ok := arg.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if !types.Identical(ext.Type(), a.requestType) {
+ continue
+ }
+
+ exts = append(exts, ext)
+ }
+
+ return exts
+}
+
+func (a *Analyzer) usedReqByUnOp(op *ssa.UnOp) *ssa.Extract {
+ if ext, ok := op.X.(*ssa.Extract); ok && types.Identical(ext.Type(), a.requestType) {
+ return ext
+ }
+
+ return nil
+}
+
+func (a *Analyzer) usedReqByReturn(ret *ssa.Return) []*ssa.Extract {
+ rets := ret.Results
+ exts := make([]*ssa.Extract, 0, len(rets))
+
+ for _, ret := range rets {
+ ext, ok := ret.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if types.Identical(ext.Type(), a.requestType) {
+ exts = append(exts, ext)
+ }
+ }
+
+ return exts
+}
+
+func (a *Analyzer) requestsByNewRequest() map[*ssa.Call]*ssa.Extract {
+ reqs := make(map[*ssa.Call]*ssa.Extract)
+
+ for _, f := range a.Funcs {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ ext, ok := instr.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if !types.Identical(ext.Type(), a.requestType) {
+ continue
+ }
+
+ operands := ext.Operands([]*ssa.Value{})
+ if len(operands) != 1 {
+ continue
+ }
+
+ operand := *operands[0]
+
+ f, ok := operand.(*ssa.Call)
+ if !ok {
+ continue
+ }
+
+ if types.Identical(f.Call.Value.Type(), a.newRequestType) {
+ reqs[f] = ext
+ }
+ }
+ }
+ }
+
+ return reqs
+}
diff --git a/vendor/github.com/sourcegraph/go-diff/LICENSE b/vendor/github.com/sourcegraph/go-diff/LICENSE
new file mode 100644
index 0000000..0733b6e
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2014 Sourcegraph, Inc.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+-----------------------------------------------------------------
+
+Portions adapted from python-unidiff:
+
+Copyright (c) 2012 Matias Bordese
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.go
new file mode 100644
index 0000000..646602a
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.go
@@ -0,0 +1,76 @@
+package diff
+
+import "bytes"
+
+// NOTE: types are code-generated in diff.pb.go.
+
+//go:generate protoc -I../../../.. -I ../../../../github.com/gogo/protobuf/protobuf -I. --gogo_out=. diff.proto
+
+// Stat computes the number of lines added/changed/deleted in all
+// hunks in this file's diff.
+func (d *FileDiff) Stat() Stat {
+ total := Stat{}
+ for _, h := range d.Hunks {
+ total.add(h.Stat())
+ }
+ return total
+}
+
+// Stat computes the number of lines added/changed/deleted in this
+// hunk.
+func (h *Hunk) Stat() Stat {
+ lines := bytes.Split(h.Body, []byte{'\n'})
+ var last byte
+ st := Stat{}
+ for _, line := range lines {
+ if len(line) == 0 {
+ last = 0
+ continue
+ }
+ switch line[0] {
+ case '-':
+ if last == '+' {
+ st.Added--
+ st.Changed++
+ last = 0 // next line can't change this one since this is already a change
+ } else {
+ st.Deleted++
+ last = line[0]
+ }
+ case '+':
+ if last == '-' {
+ st.Deleted--
+ st.Changed++
+ last = 0 // next line can't change this one since this is already a change
+ } else {
+ st.Added++
+ last = line[0]
+ }
+ default:
+ last = 0
+ }
+ }
+ return st
+}
+
+var (
+ hunkPrefix = []byte("@@ ")
+)
+
+const hunkHeader = "@@ -%d,%d +%d,%d @@"
+
+// diffTimeParseLayout is the layout used to parse the time in unified diff file
+// header timestamps.
+// See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html.
+const diffTimeParseLayout = "2006-01-02 15:04:05 -0700"
+
+// diffTimeFormatLayout is the layout used to format (i.e., print) the time in unified diff file
+// header timestamps.
+// See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html.
+const diffTimeFormatLayout = "2006-01-02 15:04:05.000000000 -0700"
+
+func (s *Stat) add(o Stat) {
+ s.Added += o.Added
+ s.Changed += o.Changed
+ s.Deleted += o.Deleted
+}
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go
new file mode 100644
index 0000000..2e7c27f
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go
@@ -0,0 +1,1059 @@
+// Code generated by protoc-gen-gogo.
+// source: diff.proto
+// DO NOT EDIT!
+
+/*
+ Package diff is a generated protocol buffer package.
+
+ It is generated from these files:
+ diff.proto
+
+ It has these top-level messages:
+ FileDiff
+ Hunk
+ Stat
+*/
+package diff
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
+import pbtypes "sourcegraph.com/sqs/pbtypes"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// A FileDiff represents a unified diff for a single file.
+//
+// A file unified diff has a header that resembles the following:
+//
+// --- oldname 2009-10-11 15:12:20.000000000 -0700
+// +++ newname 2009-10-11 15:12:30.000000000 -0700
+type FileDiff struct {
+ // the original name of the file
+ OrigName string `protobuf:"bytes,1,opt,name=OrigName,proto3" json:"OrigName,omitempty"`
+ // the original timestamp (nil if not present)
+ OrigTime *pbtypes.Timestamp `protobuf:"bytes,2,opt,name=OrigTime" json:"OrigTime,omitempty"`
+ // the new name of the file (often same as OrigName)
+ NewName string `protobuf:"bytes,3,opt,name=NewName,proto3" json:"NewName,omitempty"`
+ // the new timestamp (nil if not present)
+ NewTime *pbtypes.Timestamp `protobuf:"bytes,4,opt,name=NewTime" json:"NewTime,omitempty"`
+ // extended header lines (e.g., git's "new mode ", "rename from ", etc.)
+ Extended []string `protobuf:"bytes,5,rep,name=Extended" json:"Extended,omitempty"`
+ // hunks that were changed from orig to new
+ Hunks []*Hunk `protobuf:"bytes,6,rep,name=Hunks" json:"Hunks,omitempty"`
+}
+
+func (m *FileDiff) Reset() { *m = FileDiff{} }
+func (m *FileDiff) String() string { return proto.CompactTextString(m) }
+func (*FileDiff) ProtoMessage() {}
+
+// A Hunk represents a series of changes (additions or deletions) in a file's
+// unified diff.
+type Hunk struct {
+ // starting line number in original file
+ OrigStartLine int32 `protobuf:"varint,1,opt,name=OrigStartLine,proto3" json:"OrigStartLine,omitempty"`
+ // number of lines the hunk applies to in the original file
+ OrigLines int32 `protobuf:"varint,2,opt,name=OrigLines,proto3" json:"OrigLines,omitempty"`
+ // if > 0, then the original file had a 'No newline at end of file' mark at this offset
+ OrigNoNewlineAt int32 `protobuf:"varint,3,opt,name=OrigNoNewlineAt,proto3" json:"OrigNoNewlineAt,omitempty"`
+ // starting line number in new file
+ NewStartLine int32 `protobuf:"varint,4,opt,name=NewStartLine,proto3" json:"NewStartLine,omitempty"`
+ // number of lines the hunk applies to in the new file
+ NewLines int32 `protobuf:"varint,5,opt,name=NewLines,proto3" json:"NewLines,omitempty"`
+ // optional section heading
+ Section string `protobuf:"bytes,6,opt,name=Section,proto3" json:"Section,omitempty"`
+ // 0-indexed line offset in unified file diff (including section headers); this is
+ // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is
+ // called) This accounts for hunk headers, too, so the StartPosition of the first
+ // hunk will be 1.
+ StartPosition int32 `protobuf:"varint,7,opt,name=StartPosition,proto3" json:"StartPosition,omitempty"`
+ // hunk body (lines prefixed with '-', '+', or ' ')
+ Body []byte `protobuf:"bytes,8,opt,name=Body,proto3" json:"Body,omitempty"`
+}
+
+func (m *Hunk) Reset() { *m = Hunk{} }
+func (m *Hunk) String() string { return proto.CompactTextString(m) }
+func (*Hunk) ProtoMessage() {}
+
+// A Stat is a diff stat that represents the number of lines added/changed/deleted.
+type Stat struct {
+ // number of lines added
+ Added int32 `protobuf:"varint,1,opt,name=Added,proto3" json:""`
+ // number of lines changed
+ Changed int32 `protobuf:"varint,2,opt,name=Changed,proto3" json:""`
+ // number of lines deleted
+ Deleted int32 `protobuf:"varint,3,opt,name=Deleted,proto3" json:""`
+}
+
+func (m *Stat) Reset() { *m = Stat{} }
+func (m *Stat) String() string { return proto.CompactTextString(m) }
+func (*Stat) ProtoMessage() {}
+
+func (m *FileDiff) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *FileDiff) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.OrigName) > 0 {
+ data[i] = 0xa
+ i++
+ i = encodeVarintDiff(data, i, uint64(len(m.OrigName)))
+ i += copy(data[i:], m.OrigName)
+ }
+ if m.OrigTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.OrigTime.Size()))
+ n1, err := m.OrigTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if len(m.NewName) > 0 {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintDiff(data, i, uint64(len(m.NewName)))
+ i += copy(data[i:], m.NewName)
+ }
+ if m.NewTime != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.NewTime.Size()))
+ n2, err := m.NewTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if len(m.Extended) > 0 {
+ for _, s := range m.Extended {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Hunks) > 0 {
+ for _, msg := range m.Hunks {
+ data[i] = 0x32
+ i++
+ i = encodeVarintDiff(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Hunk) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Hunk) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.OrigStartLine != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.OrigStartLine))
+ }
+ if m.OrigLines != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.OrigLines))
+ }
+ if m.OrigNoNewlineAt != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.OrigNoNewlineAt))
+ }
+ if m.NewStartLine != 0 {
+ data[i] = 0x20
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.NewStartLine))
+ }
+ if m.NewLines != 0 {
+ data[i] = 0x28
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.NewLines))
+ }
+ if len(m.Section) > 0 {
+ data[i] = 0x32
+ i++
+ i = encodeVarintDiff(data, i, uint64(len(m.Section)))
+ i += copy(data[i:], m.Section)
+ }
+ if m.StartPosition != 0 {
+ data[i] = 0x38
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.StartPosition))
+ }
+ if m.Body != nil {
+ if len(m.Body) > 0 {
+ data[i] = 0x42
+ i++
+ i = encodeVarintDiff(data, i, uint64(len(m.Body)))
+ i += copy(data[i:], m.Body)
+ }
+ }
+ return i, nil
+}
+
+func (m *Stat) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Stat) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Added != 0 {
+ data[i] = 0x8
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.Added))
+ }
+ if m.Changed != 0 {
+ data[i] = 0x10
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.Changed))
+ }
+ if m.Deleted != 0 {
+ data[i] = 0x18
+ i++
+ i = encodeVarintDiff(data, i, uint64(m.Deleted))
+ }
+ return i, nil
+}
+
+func encodeFixed64Diff(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Diff(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintDiff(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *FileDiff) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.OrigName)
+ if l > 0 {
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ if m.OrigTime != nil {
+ l = m.OrigTime.Size()
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ l = len(m.NewName)
+ if l > 0 {
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ if m.NewTime != nil {
+ l = m.NewTime.Size()
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ if len(m.Extended) > 0 {
+ for _, s := range m.Extended {
+ l = len(s)
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ }
+ if len(m.Hunks) > 0 {
+ for _, e := range m.Hunks {
+ l = e.Size()
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Hunk) Size() (n int) {
+ var l int
+ _ = l
+ if m.OrigStartLine != 0 {
+ n += 1 + sovDiff(uint64(m.OrigStartLine))
+ }
+ if m.OrigLines != 0 {
+ n += 1 + sovDiff(uint64(m.OrigLines))
+ }
+ if m.OrigNoNewlineAt != 0 {
+ n += 1 + sovDiff(uint64(m.OrigNoNewlineAt))
+ }
+ if m.NewStartLine != 0 {
+ n += 1 + sovDiff(uint64(m.NewStartLine))
+ }
+ if m.NewLines != 0 {
+ n += 1 + sovDiff(uint64(m.NewLines))
+ }
+ l = len(m.Section)
+ if l > 0 {
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ if m.StartPosition != 0 {
+ n += 1 + sovDiff(uint64(m.StartPosition))
+ }
+ if m.Body != nil {
+ l = len(m.Body)
+ if l > 0 {
+ n += 1 + l + sovDiff(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Stat) Size() (n int) {
+ var l int
+ _ = l
+ if m.Added != 0 {
+ n += 1 + sovDiff(uint64(m.Added))
+ }
+ if m.Changed != 0 {
+ n += 1 + sovDiff(uint64(m.Changed))
+ }
+ if m.Deleted != 0 {
+ n += 1 + sovDiff(uint64(m.Deleted))
+ }
+ return n
+}
+
+func sovDiff(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozDiff(x uint64) (n int) {
+ return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *FileDiff) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FileDiff: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FileDiff: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrigName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OrigName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrigTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.OrigTime == nil {
+ m.OrigTime = &pbtypes.Timestamp{}
+ }
+ if err := m.OrigTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NewName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NewTime == nil {
+ m.NewTime = &pbtypes.Timestamp{}
+ }
+ if err := m.NewTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extended", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extended = append(m.Extended, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hunks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hunks = append(m.Hunks, &Hunk{})
+ if err := m.Hunks[len(m.Hunks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDiff(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDiff
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Hunk) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Hunk: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Hunk: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrigStartLine", wireType)
+ }
+ m.OrigStartLine = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.OrigStartLine |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrigLines", wireType)
+ }
+ m.OrigLines = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.OrigLines |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrigNoNewlineAt", wireType)
+ }
+ m.OrigNoNewlineAt = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.OrigNoNewlineAt |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewStartLine", wireType)
+ }
+ m.NewStartLine = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NewStartLine |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewLines", wireType)
+ }
+ m.NewLines = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NewLines |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Section", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Section = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartPosition", wireType)
+ }
+ m.StartPosition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.StartPosition |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthDiff
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Body = append([]byte{}, data[iNdEx:postIndex]...)
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDiff(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDiff
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Stat) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Stat: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Added", wireType)
+ }
+ m.Added = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Added |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Changed", wireType)
+ }
+ m.Changed = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Changed |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+ }
+ m.Deleted = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Deleted |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDiff(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthDiff
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipDiff(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthDiff
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDiff
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipDiff(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.proto b/vendor/github.com/sourcegraph/go-diff/diff/diff.proto
new file mode 100644
index 0000000..8868970
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.proto
@@ -0,0 +1,81 @@
+syntax = "proto3";
+package diff;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "sourcegraph.com/sqs/pbtypes/timestamp.proto";
+
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+
+// A FileDiff represents a unified diff for a single file.
+//
+// A file unified diff has a header that resembles the following:
+//
+// --- oldname 2009-10-11 15:12:20.000000000 -0700
+// +++ newname 2009-10-11 15:12:30.000000000 -0700
+message FileDiff {
+ // the original name of the file
+ string OrigName = 1;
+
+ // the original timestamp (nil if not present)
+ pbtypes.Timestamp OrigTime = 2;
+
+ // the new name of the file (often same as OrigName)
+ string NewName = 3;
+
+ // the new timestamp (nil if not present)
+ pbtypes.Timestamp NewTime = 4;
+
+ // extended header lines (e.g., git's "new mode ", "rename from ", etc.)
+ repeated string Extended = 5;
+
+ // hunks that were changed from orig to new
+ repeated Hunk Hunks = 6;
+}
+
+
+// A Hunk represents a series of changes (additions or deletions) in a file's
+// unified diff.
+message Hunk {
+ // starting line number in original file
+ int32 OrigStartLine = 1;
+
+ // number of lines the hunk applies to in the original file
+ int32 OrigLines = 2;
+
+ // if > 0, then the original file had a 'No newline at end of file' mark at this offset
+ int32 OrigNoNewlineAt = 3;
+
+ // starting line number in new file
+ int32 NewStartLine = 4;
+
+ // number of lines the hunk applies to in the new file
+ int32 NewLines = 5;
+
+ // optional section heading
+ string Section = 6;
+
+ // 0-indexed line offset in unified file diff (including section headers); this is
+ // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is
+ // called) This accounts for hunk headers, too, so the StartPosition of the first
+ // hunk will be 1.
+ int32 StartPosition = 7;
+
+ // hunk body (lines prefixed with '-', '+', or ' ')
+ bytes Body = 8;
+}
+
+// A Stat is a diff stat that represents the number of lines added/changed/deleted.
+message Stat {
+ // number of lines added
+ int32 Added = 1 [(gogoproto.jsontag) = ""];
+
+ // number of lines changed
+ int32 Changed = 2 [(gogoproto.jsontag) = ""];
+
+ // number of lines deleted
+ int32 Deleted = 3 [(gogoproto.jsontag) = ""];
+}
+
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/doc.go b/vendor/github.com/sourcegraph/go-diff/diff/doc.go
new file mode 100644
index 0000000..12fe96a
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/doc.go
@@ -0,0 +1,2 @@
+// Package diff provides a parser for unified diffs.
+package diff
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/parse.go b/vendor/github.com/sourcegraph/go-diff/diff/parse.go
new file mode 100644
index 0000000..08cba66
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/parse.go
@@ -0,0 +1,659 @@
+package diff
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "sourcegraph.com/sqs/pbtypes"
+)
+
+// ParseMultiFileDiff parses a multi-file unified diff. It returns an error if
+// parsing failed as a whole, but does its best to parse as many files in the
+// case of per-file errors. If it cannot detect when the diff of the next file
+// begins, the hunks are added to the FileDiff of the previous file.
+func ParseMultiFileDiff(diff []byte) ([]*FileDiff, error) {
+ return NewMultiFileDiffReader(bytes.NewReader(diff)).ReadAllFiles()
+}
+
+// NewMultiFileDiffReader returns a new MultiFileDiffReader that reads
+// a multi-file unified diff from r.
+func NewMultiFileDiffReader(r io.Reader) *MultiFileDiffReader {
+ return &MultiFileDiffReader{reader: bufio.NewReader(r)}
+}
+
+// MultiFileDiffReader reads a multi-file unified diff.
+type MultiFileDiffReader struct {
+ line int
+ offset int64
+ reader *bufio.Reader
+
+ // TODO(sqs): line and offset tracking in multi-file diffs is broken; add tests and fix
+
+ // nextFileFirstLine is a line that was read by a HunksReader that
+ // was how it determined the hunk was complete. But to determine
+ // that, it needed to read the first line of the next file. We
+ // store nextFileFirstLine so we can "give the first line back" to
+ // the next file.
+ nextFileFirstLine []byte
+}
+
+// ReadFile reads the next file unified diff (including headers and
+// all hunks) from r. If there are no more files in the diff, it
+// returns error io.EOF.
+func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) {
+ fr := &FileDiffReader{
+ line: r.line,
+ offset: r.offset,
+ reader: r.reader,
+ fileHeaderLine: r.nextFileFirstLine,
+ }
+ r.nextFileFirstLine = nil
+
+ fd, err := fr.ReadAllHeaders()
+ if err != nil {
+ switch e := err.(type) {
+ case *ParseError:
+ if e.Err == ErrNoFileHeader || e.Err == ErrExtendedHeadersEOF {
+ return nil, io.EOF
+ }
+
+ case OverflowError:
+ r.nextFileFirstLine = []byte(e)
+ return fd, nil
+
+ default:
+ return nil, err
+ }
+ }
+
+ // Before reading hunks, check to see if there are any. If there
+ // aren't any, and there's another file after this file in the
+ // diff, then the hunks reader will complain ErrNoHunkHeader. It's
+ // not easy for us to tell from that error alone if that was
+ // caused by the lack of any hunks, or a malformatted hunk, so we
+ // need to perform the check here.
+ hr := fr.HunksReader()
+ line, err := readLine(r.reader)
+ if err != nil && err != io.EOF {
+ return fd, err
+ }
+ line = bytes.TrimSuffix(line, []byte{'\n'})
+ if bytes.HasPrefix(line, hunkPrefix) {
+ hr.nextHunkHeaderLine = line
+ fd.Hunks, err = hr.ReadAllHunks()
+ r.line = fr.line
+ r.offset = fr.offset
+ if err != nil {
+ if e0, ok := err.(*ParseError); ok {
+ if e, ok := e0.Err.(*ErrBadHunkLine); ok {
+ // This just means we finished reading the hunks for the
+ // current file. See the ErrBadHunkLine doc for more info.
+ r.nextFileFirstLine = e.Line
+ return fd, nil
+ }
+ }
+ return nil, err
+ }
+ } else {
+ // There weren't any hunks, so that line we peeked ahead at
+ // actually belongs to the next file. Put it back.
+ r.nextFileFirstLine = line
+ }
+
+ return fd, nil
+}
+
+// ReadAllFiles reads all file unified diffs (including headers and all
+// hunks) remaining in r.
+func (r *MultiFileDiffReader) ReadAllFiles() ([]*FileDiff, error) {
+ var ds []*FileDiff
+ for {
+ d, err := r.ReadFile()
+ if d != nil {
+ ds = append(ds, d)
+ }
+ if err == io.EOF {
+ return ds, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// ParseFileDiff parses a file unified diff.
+func ParseFileDiff(diff []byte) (*FileDiff, error) {
+ return NewFileDiffReader(bytes.NewReader(diff)).Read()
+}
+
+// NewFileDiffReader returns a new FileDiffReader that reads a file
+// unified diff.
+func NewFileDiffReader(r io.Reader) *FileDiffReader {
+ return &FileDiffReader{reader: bufio.NewReader(r)}
+}
+
+// FileDiffReader reads a unified file diff.
+type FileDiffReader struct {
+ line int
+ offset int64
+ reader *bufio.Reader
+
+ // fileHeaderLine is the first file header line, set by:
+ //
+ // (1) ReadExtendedHeaders if it encroaches on a file header line
+ // (which it must to detect when extended headers are done); or
+ // (2) (*MultiFileDiffReader).ReadFile() if it encroaches on a
+ // file header line while reading the previous file's hunks (in a
+ // multi-file diff).
+ fileHeaderLine []byte
+}
+
+// Read reads a file unified diff, including headers and hunks, from r.
+func (r *FileDiffReader) Read() (*FileDiff, error) {
+ fd, err := r.ReadAllHeaders()
+ if err != nil {
+ return nil, err
+ }
+
+ fd.Hunks, err = r.HunksReader().ReadAllHunks()
+ if err != nil {
+ return nil, err
+ }
+
+ return fd, nil
+}
+
+// ReadAllHeaders reads the file headers and extended headers (if any)
+// from a file unified diff. It does not read hunks, and the returned
+// FileDiff's Hunks field is nil. To read the hunks, call the
+// (*FileDiffReader).HunksReader() method to get a HunksReader and
+// read hunks from that.
+func (r *FileDiffReader) ReadAllHeaders() (*FileDiff, error) {
+ var err error
+ fd := &FileDiff{}
+
+ fd.Extended, err = r.ReadExtendedHeaders()
+ if pe, ok := err.(*ParseError); ok && pe.Err == ErrExtendedHeadersEOF {
+ wasEmpty := handleEmpty(fd)
+ if wasEmpty {
+ return fd, nil
+ }
+ return fd, err
+ } else if _, ok := err.(OverflowError); ok {
+ handleEmpty(fd)
+ return fd, err
+ } else if err != nil {
+ return fd, err
+ }
+
+ var origTime, newTime *time.Time
+ fd.OrigName, fd.NewName, origTime, newTime, err = r.ReadFileHeaders()
+ if err != nil {
+ return nil, err
+ }
+ if origTime != nil {
+ ts := pbtypes.NewTimestamp(*origTime)
+ fd.OrigTime = &ts
+ }
+ if newTime != nil {
+ ts := pbtypes.NewTimestamp(*newTime)
+ fd.NewTime = &ts
+ }
+
+ return fd, nil
+}
+
+// HunksReader returns a new HunksReader that reads hunks from r. The
+// HunksReader's line and offset (used in error messages) is set to
+// start where the file diff header ended (which means errors have the
+// correct position information).
+func (r *FileDiffReader) HunksReader() *HunksReader {
+ return &HunksReader{
+ line: r.line,
+ offset: r.offset,
+ reader: r.reader,
+ }
+}
+
+// ReadFileHeaders reads the unified file diff header (the lines that
+// start with "---" and "+++" with the orig/new file names and
+// timestamps).
+func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimestamp, newTimestamp *time.Time, err error) {
+ origName, origTimestamp, err = r.readOneFileHeader([]byte("--- "))
+ if err != nil {
+ return "", "", nil, nil, err
+ }
+
+ newName, newTimestamp, err = r.readOneFileHeader([]byte("+++ "))
+ if err != nil {
+ return "", "", nil, nil, err
+ }
+
+ unquotedOrigName, err := strconv.Unquote(origName)
+ if err == nil {
+ origName = unquotedOrigName
+ }
+ unquotedNewName, err := strconv.Unquote(newName)
+ if err == nil {
+ newName = unquotedNewName
+ }
+
+ return origName, newName, origTimestamp, newTimestamp, nil
+}
+
+// readOneFileHeader reads one of the file headers (prefix should be
+// either "+++ " or "--- ").
+func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, timestamp *time.Time, err error) {
+ var line []byte
+
+ if r.fileHeaderLine == nil {
+ var err error
+ line, err = readLine(r.reader)
+ if err == io.EOF {
+ return "", nil, &ParseError{r.line, r.offset, ErrNoFileHeader}
+ } else if err != nil {
+ return "", nil, err
+ }
+ } else {
+ line = r.fileHeaderLine
+ r.fileHeaderLine = nil
+ }
+
+ if !bytes.HasPrefix(line, prefix) {
+ return "", nil, &ParseError{r.line, r.offset, ErrBadFileHeader}
+ }
+
+ r.offset += int64(len(line))
+ r.line++
+ line = line[len(prefix):]
+
+ trimmedLine := strings.TrimSpace(string(line)) // filenames that contain spaces may be terminated by a tab
+ parts := strings.SplitN(trimmedLine, "\t", 2)
+ filename = parts[0]
+ if len(parts) == 2 {
+ // Timestamp is optional, but this header has it.
+ ts, err := time.Parse(diffTimeParseLayout, parts[1])
+ if err != nil {
+ return "", nil, err
+ }
+ timestamp = &ts
+ }
+
+ return filename, timestamp, err
+}
+
+// OverflowError is returned when we have overflowed into the start
+// of the next file while reading extended headers.
+type OverflowError string
+
+func (e OverflowError) Error() string {
+ return fmt.Sprintf("overflowed into next file: %s", e)
+}
+
+// ReadExtendedHeaders reads the extended header lines, if any, from a
+// unified diff file (e.g., git's "diff --git a/foo.go b/foo.go", "new
+// mode ", "rename from ", etc.).
+func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) {
+ var xheaders []string
+ firstLine := true
+ for {
+ var line []byte
+ if r.fileHeaderLine == nil {
+ var err error
+ line, err = readLine(r.reader)
+ if err == io.EOF {
+ return xheaders, &ParseError{r.line, r.offset, ErrExtendedHeadersEOF}
+ } else if err != nil {
+ return xheaders, err
+ }
+ } else {
+ line = r.fileHeaderLine
+ r.fileHeaderLine = nil
+ }
+
+ if bytes.HasPrefix(line, []byte("diff --git ")) {
+ if firstLine {
+ firstLine = false
+ } else {
+ return xheaders, OverflowError(line)
+ }
+ }
+ if bytes.HasPrefix(line, []byte("--- ")) {
+ // We've reached the file header.
+ r.fileHeaderLine = line // pass to readOneFileHeader (see fileHeaderLine field doc)
+ return xheaders, nil
+ }
+
+ r.line++
+ r.offset += int64(len(line))
+ xheaders = append(xheaders, string(line))
+ }
+}
+
+// handleEmpty detects when FileDiff was an empty diff and will not have any hunks
+// that follow. It updates fd fields from the parsed extended headers.
+func handleEmpty(fd *FileDiff) (wasEmpty bool) {
+ var err error
+ lineCount := len(fd.Extended)
+ if lineCount > 0 && !strings.HasPrefix(fd.Extended[0], "diff --git ") {
+ return false
+ }
+ switch {
+ case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) &&
+ strings.HasPrefix(fd.Extended[1], "new file mode "):
+
+ names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
+ fd.OrigName = "/dev/null"
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
+ return true
+ case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) &&
+ strings.HasPrefix(fd.Extended[1], "deleted file mode "):
+
+ names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
+ fd.NewName = "/dev/null"
+ return true
+ case lineCount == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "):
+ names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
+ return true
+ case lineCount == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") || lineCount > 3 && strings.HasPrefix(fd.Extended[2], "GIT binary patch"):
+ names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ // ErrNoFileHeader is when a file unified diff has no file header
+ // (i.e., the lines that begin with "---" and "+++").
+ ErrNoFileHeader = errors.New("expected file header, got EOF")
+
+ // ErrBadFileHeader is when a file unified diff has a malformed
+ // file header (i.e., the lines that begin with "---" and "+++").
+ ErrBadFileHeader = errors.New("bad file header")
+
+ // ErrExtendedHeadersEOF is when an EOF was encountered while reading extended file headers, which means that there were no ---/+++ headers encountered before hunks (if any) began.
+ ErrExtendedHeadersEOF = errors.New("expected file header while reading extended headers, got EOF")
+)
+
+// ParseHunks parses hunks from a unified diff. The diff must consist
+// only of hunks and not include a file header; if it has a file
+// header, use ParseFileDiff.
+func ParseHunks(diff []byte) ([]*Hunk, error) {
+ r := NewHunksReader(bytes.NewReader(diff))
+ hunks, err := r.ReadAllHunks()
+ if err != nil {
+ return nil, err
+ }
+ return hunks, nil
+}
+
+// NewHunksReader returns a new HunksReader that reads unified diff hunks
+// from r.
+func NewHunksReader(r io.Reader) *HunksReader {
+ return &HunksReader{reader: bufio.NewReader(r)}
+}
+
+// A HunksReader reads hunks from a unified diff.
+type HunksReader struct {
+ line int
+ offset int64
+ hunk *Hunk
+ reader *bufio.Reader
+
+ nextHunkHeaderLine []byte
+}
+
+// ReadHunk reads one hunk from r. If there are no more hunks, it
+// returns error io.EOF.
+func (r *HunksReader) ReadHunk() (*Hunk, error) {
+ r.hunk = nil
+ lastLineFromOrig := true
+ var line []byte
+ var err error
+ for {
+ if r.nextHunkHeaderLine != nil {
+ // Use stored hunk header line that was scanned in at the
+ // completion of the previous hunk's ReadHunk.
+ line = r.nextHunkHeaderLine
+ r.nextHunkHeaderLine = nil
+ } else {
+ line, err = readLine(r.reader)
+ if err != nil {
+ if err == io.EOF && r.hunk != nil {
+ return r.hunk, nil
+ }
+ return nil, err
+ }
+ }
+
+ // Record position.
+ r.line++
+ r.offset += int64(len(line))
+
+ if r.hunk == nil {
+ // Check for presence of hunk header.
+ if !bytes.HasPrefix(line, hunkPrefix) {
+ return nil, &ParseError{r.line, r.offset, ErrNoHunkHeader}
+ }
+
+ // Parse hunk header.
+ r.hunk = &Hunk{}
+ items := []interface{}{
+ &r.hunk.OrigStartLine, &r.hunk.OrigLines,
+ &r.hunk.NewStartLine, &r.hunk.NewLines,
+ }
+ header, section, err := normalizeHeader(string(line))
+ if err != nil {
+ return nil, &ParseError{r.line, r.offset, err}
+ }
+ n, err := fmt.Sscanf(header, hunkHeader, items...)
+ if err != nil {
+ return nil, err
+ }
+ if n < len(items) {
+ return nil, &ParseError{r.line, r.offset, &ErrBadHunkHeader{header: string(line)}}
+ }
+
+ r.hunk.Section = section
+ } else {
+ // Read hunk body line.
+ if bytes.HasPrefix(line, hunkPrefix) {
+ // Saw start of new hunk, so this hunk is
+ // complete. But we've already read in the next hunk's
+ // header, so we need to be sure that the next call to
+ // ReadHunk starts with that header.
+ r.nextHunkHeaderLine = line
+
+ // Rewind position.
+ r.line--
+ r.offset -= int64(len(line))
+
+ return r.hunk, nil
+ }
+
+ if len(line) >= 1 && !linePrefix(line[0]) {
+ // Bad hunk header line. If we're reading a multi-file
+ // diff, this may be the end of the current
+ // file. Return a "rich" error that lets our caller
+ // handle that case.
+ return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}}
+ }
+ if bytes.Equal(line, []byte(noNewlineMessage)) {
+ if lastLineFromOrig {
+ // Retain the newline in the body (otherwise the
+ // diff line would be like "-a+b", where "+b" is
+ // the the next line of the new file, which is not
+ // validly formatted) but record that the orig had
+ // no newline.
+ r.hunk.OrigNoNewlineAt = int32(len(r.hunk.Body))
+ } else {
+ // Remove previous line's newline.
+ if len(r.hunk.Body) != 0 {
+ r.hunk.Body = r.hunk.Body[:len(r.hunk.Body)-1]
+ }
+ }
+ continue
+ }
+
+ if len(line) > 0 {
+ lastLineFromOrig = line[0] == '-'
+ }
+
+ r.hunk.Body = append(r.hunk.Body, line...)
+ r.hunk.Body = append(r.hunk.Body, '\n')
+ }
+ }
+}
+
+const noNewlineMessage = `\ No newline at end of file`
+
+// linePrefixes is the set of all characters a valid line in a diff
+// hunk can start with. '\' can appear in diffs when no newline is
+// present at the end of a file.
+// See: 'http://www.gnu.org/software/diffutils/manual/diffutils.html#Incomplete-Lines'
+var linePrefixes = []byte{' ', '-', '+', '\\'}
+
+// linePrefix returns true if 'c' is in 'linePrefixes'.
+func linePrefix(c byte) bool {
+ for _, p := range linePrefixes {
+ if p == c {
+ return true
+ }
+ }
+ return false
+}
+
+// normalizeHeader takes a header of the form:
+// "@@ -linestart[,chunksize] +linestart[,chunksize] @@ section"
+// and returns two strings, with the first in the form:
+// "@@ -linestart,chunksize +linestart,chunksize @@".
+// where linestart and chunksize are both integers. The second is the
+// optional section header. chunksize may be omitted from the header
+// if its value is 1. normalizeHeader returns an error if the header
+// is not in the correct format.
+func normalizeHeader(header string) (string, string, error) {
+ // Split the header into five parts: the first '@@', the two
+ // ranges, the last '@@', and the optional section.
+ pieces := strings.SplitN(header, " ", 5)
+ if len(pieces) < 4 {
+ return "", "", &ErrBadHunkHeader{header: header}
+ }
+
+ if pieces[0] != "@@" {
+ return "", "", &ErrBadHunkHeader{header: header}
+ }
+ for i := 1; i < 3; i++ {
+ if !strings.ContainsRune(pieces[i], ',') {
+ pieces[i] = pieces[i] + ",1"
+ }
+ }
+ if pieces[3] != "@@" {
+ return "", "", &ErrBadHunkHeader{header: header}
+ }
+
+ var section string
+ if len(pieces) == 5 {
+ section = pieces[4]
+ }
+ return strings.Join(pieces, " "), strings.TrimSpace(section), nil
+}
+
+// ReadAllHunks reads all remaining hunks from r. A successful call
+// returns err == nil, not err == EOF. Because ReadAllHunks is defined
+// to read until EOF, it does not treat end of file as an error to be
+// reported.
+func (r *HunksReader) ReadAllHunks() ([]*Hunk, error) {
+ var hunks []*Hunk
+ linesRead := int32(0)
+ for {
+ hunk, err := r.ReadHunk()
+ if err == io.EOF {
+ return hunks, nil
+ }
+ if hunk != nil {
+ linesRead++ // account for the hunk header line
+ hunk.StartPosition = linesRead
+ hunks = append(hunks, hunk)
+ linesRead += int32(bytes.Count(hunk.Body, []byte{'\n'}))
+ }
+ if err != nil {
+ return hunks, err
+ }
+ }
+}
+
+// A ParseError is a description of a unified diff syntax error.
+type ParseError struct {
+ Line int // Line where the error occurred
+ Offset int64 // Offset where the error occurred
+ Err error // The actual error
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("line %d, char %d: %s", e.Line, e.Offset, e.Err)
+}
+
+// ErrNoHunkHeader indicates that a unified diff hunk header was
+// expected but not found during parsing.
+var ErrNoHunkHeader = errors.New("no hunk header")
+
+// ErrBadHunkHeader indicates that a malformed unified diff hunk
+// header was encountered during parsing.
+type ErrBadHunkHeader struct {
+ header string
+}
+
+func (e *ErrBadHunkHeader) Error() string {
+ if e.header == "" {
+ return "bad hunk header"
+ }
+ return "bad hunk header: " + e.header
+}
+
+// ErrBadHunkLine is when a line not beginning with ' ', '-', '+', or
+// '\' is encountered while reading a hunk. In the context of reading
+// a single hunk or file, it is an unexpected error. In a multi-file
+// diff, however, it indicates that the current file's diff is
+// complete (and remaining diff data will describe another file
+// unified diff).
+type ErrBadHunkLine struct {
+ Line []byte
+}
+
+func (e *ErrBadHunkLine) Error() string {
+ m := "bad hunk line (does not start with ' ', '-', '+', or '\\')"
+ if len(e.Line) == 0 {
+ return m
+ }
+ return m + ": " + string(e.Line)
+}
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/print.go b/vendor/github.com/sourcegraph/go-diff/diff/print.go
new file mode 100644
index 0000000..d440cb9
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/print.go
@@ -0,0 +1,140 @@
+package diff
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "time"
+
+ "sourcegraph.com/sqs/pbtypes"
+)
+
+// PrintMultiFileDiff prints a multi-file diff in unified diff format.
+func PrintMultiFileDiff(ds []*FileDiff) ([]byte, error) {
+ var buf bytes.Buffer
+ for _, d := range ds {
+ diff, err := PrintFileDiff(d)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := buf.Write(diff); err != nil {
+ return nil, err
+ }
+ }
+ return buf.Bytes(), nil
+}
+
+// PrintFileDiff prints a FileDiff in unified diff format.
+//
+// TODO(sqs): handle escaping whitespace/etc. chars in filenames
+func PrintFileDiff(d *FileDiff) ([]byte, error) {
+ var buf bytes.Buffer
+
+ for _, xheader := range d.Extended {
+ if _, err := fmt.Fprintln(&buf, xheader); err != nil {
+ return nil, err
+ }
+ }
+
+ if d.Hunks == nil {
+ return buf.Bytes(), nil
+ }
+
+ if err := printFileHeader(&buf, "--- ", d.OrigName, timePtr(d.OrigTime)); err != nil {
+ return nil, err
+ }
+ if err := printFileHeader(&buf, "+++ ", d.NewName, timePtr(d.NewTime)); err != nil {
+ return nil, err
+ }
+
+ ph, err := PrintHunks(d.Hunks)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := buf.Write(ph); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func timePtr(ts *pbtypes.Timestamp) *time.Time {
+ if ts == nil {
+ return nil
+ }
+ t := ts.Time()
+ return &t
+}
+
+func printFileHeader(w io.Writer, prefix string, filename string, timestamp *time.Time) error {
+ if _, err := fmt.Fprint(w, prefix, filename); err != nil {
+ return err
+ }
+ if timestamp != nil {
+ if _, err := fmt.Fprint(w, "\t", timestamp.Format(diffTimeFormatLayout)); err != nil {
+ return err
+ }
+ }
+ if _, err := fmt.Fprintln(w); err != nil {
+ return err
+ }
+ return nil
+}
+
+// PrintHunks prints diff hunks in unified diff format.
+func PrintHunks(hunks []*Hunk) ([]byte, error) {
+ var buf bytes.Buffer
+ for _, hunk := range hunks {
+ _, err := fmt.Fprintf(&buf,
+ "@@ -%d,%d +%d,%d @@", hunk.OrigStartLine, hunk.OrigLines, hunk.NewStartLine, hunk.NewLines,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if hunk.Section != "" {
+ _, err := fmt.Fprint(&buf, " ", hunk.Section)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if _, err := fmt.Fprintln(&buf); err != nil {
+ return nil, err
+ }
+
+ if hunk.OrigNoNewlineAt == 0 {
+ if _, err := buf.Write(hunk.Body); err != nil {
+ return nil, err
+ }
+ } else {
+ if _, err := buf.Write(hunk.Body[:hunk.OrigNoNewlineAt]); err != nil {
+ return nil, err
+ }
+ if err := printNoNewlineMessage(&buf); err != nil {
+ return nil, err
+ }
+ if _, err := buf.Write(hunk.Body[hunk.OrigNoNewlineAt:]); err != nil {
+ return nil, err
+ }
+ }
+
+ if !bytes.HasSuffix(hunk.Body, []byte{'\n'}) {
+ if _, err := fmt.Fprintln(&buf); err != nil {
+ return nil, err
+ }
+ if err := printNoNewlineMessage(&buf); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return buf.Bytes(), nil
+}
+
+func printNoNewlineMessage(w io.Writer) error {
+ if _, err := w.Write([]byte(noNewlineMessage)); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintln(w); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go
new file mode 100644
index 0000000..395fb7b
--- /dev/null
+++ b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go
@@ -0,0 +1,37 @@
+package diff
+
+import (
+ "bufio"
+ "io"
+)
+
+// readLine is a helper that mimics the functionality of calling bufio.Scanner.Scan() and
+// bufio.Scanner.Bytes(), but without the token size limitation. It will read and return
+// the next line in the Reader with the trailing newline stripped. It will return an
+// io.EOF error when there is nothing left to read (at the start of the function call). It
+// will return any other errors it receives from the underlying call to ReadBytes.
+func readLine(r *bufio.Reader) ([]byte, error) {
+ line_, err := r.ReadBytes('\n')
+ if err == io.EOF {
+ if len(line_) == 0 {
+ return nil, io.EOF
+ }
+
+ // ReadBytes returned io.EOF, because it didn't find another newline, but there is
+ // still the remainder of the file to return as a line.
+ line := line_
+ return line, nil
+ } else if err != nil {
+ return nil, err
+ }
+ line := line_[0 : len(line_)-1]
+ return dropCR(line), nil
+}
+
+// dropCR drops a terminal \r from the data.
+func dropCR(data []byte) []byte {
+ if len(data) > 0 && data[len(data)-1] == '\r' {
+ return data[0 : len(data)-1]
+ }
+ return data
+}
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
new file mode 100644
index 0000000..0637db7
--- /dev/null
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+
+go:
+ - 1.9
+ - "1.10"
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go build
+ - go test -race -v ./...
+
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 0000000..298f0e2
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 0000000..0c9b04b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,452 @@
+![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
+
+A FileSystem Abstraction System for Go
+
+[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is an filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in a any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define you own file system.
+* Wrap for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+ $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs = afero.NewMemMapFs()
+
+or
+
+var AppFs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open('/tmp/foo')
+```
+We would replace it with:
+```go
+AppFs.Open('/tmp/foo')
+```
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+ appFS := afero.NewMemMapFs()
+ // create test files and directories
+ appFS.MkdirAll("src/a", 0755)
+ afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+ afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+ name := "src/c"
+ _, err := appFS.Stat(name)
+ if os.IsNotExist(err) {
+ t.Errorf("file \"%s\" does not exist.\n", name)
+ }
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755))
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755))
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs()
+fileserver := http.FileServer(httpFs.Dir()))
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+ base := afero.NewOsFs()
+ roBase := afero.NewReadOnlyFs(base)
+ ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* ZIP
+* TAR
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+* **0.10.0** 2015.12.10
+ * Full compatibility with Windows
+ * Introduction of afero utilities
+ * Test suite rewritten to work cross platform
+ * Normalize paths for MemMapFs
+ * Adding Sync to the file interface
+ * **Breaking Change** Walk and ReadDir have changed parameter order
+ * Moving types used by MemMapFs to a subpackage
+ * General bugfixes and improvements
+* **0.9.0** 2015.11.05
+ * New Walk function similar to filepath.Walk
+ * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
+ * MemMapFs.Remove now really deletes the file
+ * InMemoryFile.Readdir and Readdirnames work correctly
+ * InMemoryFile functions lock it for concurrent access
+ * Test suite improvements
+* **0.8.0** 2014.10.28
+ * First public version
+ * Interfaces feel ready for people to build using
+ * Interfaces satisfy all known uses
+ * MemMapFs passes the majority of the OS test suite
+ * OsFs passes the majority of the OS test suite
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 0000000..f5b5e12
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,108 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+type Afero struct {
+ Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ io.Writer
+ io.WriterAt
+
+ Name() string
+ Readdir(count int) ([]os.FileInfo, error)
+ Readdirnames(n int) ([]string, error)
+ Stat() (os.FileInfo, error)
+ Sync() error
+ Truncate(size int64) error
+ WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+ // Create creates a file in the filesystem, returning the file and an
+ // error, if any happens.
+ Create(name string) (File, error)
+
+ // Mkdir creates a directory in the filesystem, return an error if any
+ // happens.
+ Mkdir(name string, perm os.FileMode) error
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+
+ // Open opens a file, returning it or an error, if any happens.
+ Open(name string) (File, error)
+
+ // OpenFile opens a file using the given flags and the given mode.
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ // RemoveAll removes a directory path and any children it contains. It
+ // does not fail if the path does not exist (return nil).
+ RemoveAll(path string) error
+
+ // Rename renames a file.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file, or an error, if any
+ // happens.
+ Stat(name string) (os.FileInfo, error)
+
+ // The name of this FileSystem
+ Name() string
+
+ //Chmod changes the mode of the named file to mode.
+ Chmod(name string, mode os.FileMode) error
+
+ //Chtimes changes the access and modification times of the named file
+ Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 0000000..a633ad5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,15 @@
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+ GOPATH: C:\gopath
+build_script:
+- cmd: >-
+ go version
+
+ go env
+
+ go get -v github.com/spf13/afero/...
+
+ go build github.com/spf13/afero
+test_script:
+- cmd: go test -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 0000000..616ff8f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,180 @@
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var _ Lstater = (*BasePathFs)(nil)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+ source Fs
+ path string
+}
+
+type BasePathFile struct {
+ File
+ path string
+}
+
+func (f *BasePathFile) Name() string {
+ sourcename := f.File.Name()
+ return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+ return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+ if err := validateBasePathName(name); err != nil {
+ return name, err
+ }
+
+ bpath := filepath.Clean(b.path)
+ path = filepath.Clean(filepath.Join(bpath, name))
+ if !strings.HasPrefix(path, bpath) {
+ return name, os.ErrNotExist
+ }
+
+ return path, nil
+}
+
+func validateBasePathName(name string) error {
+ if runtime.GOOS != "windows" {
+ // Not much to do here;
+ // the virtual file paths all look absolute on *nix.
+ return nil
+ }
+
+ // On Windows a common mistake would be to provide an absolute OS path
+ // We could strip out the base part, but that would not be very portable.
+ if filepath.IsAbs(name) {
+ return os.ErrNotExist
+ }
+
+ return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chtimes", Path: name, Err: err}
+ }
+ return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chmod", Path: name, Err: err}
+ }
+ return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Name() string {
+ return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+ }
+ return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+ if oldname, err = b.RealPath(oldname); err != nil {
+ return &os.PathError{Op: "rename", Path: oldname, Err: err}
+ }
+ if newname, err = b.RealPath(newname); err != nil {
+ return &os.PathError{Op: "rename", Path: newname, Err: err}
+ }
+ return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove_all", Path: name, Err: err}
+ }
+ return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+ }
+ sourcef, err := b.source.OpenFile(name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{sourcef, b.path}, nil
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "open", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "create", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
+ }
+ if lstater, ok := b.source.(Lstater); ok {
+ return lstater.LstatIfPossible(name)
+ }
+ fi, err := b.source.Stat(name)
+ return fi, false, err
+}
+
+// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 0000000..29a26c6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,290 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+ base Fs
+ layer Fs
+ cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+ return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+ // not present in the overlay, unknown if it exists in the base:
+ cacheMiss cacheState = iota
+ // present in the overlay and in base, base file is newer:
+ cacheStale
+ // present in the overlay - with cache time == 0 it may exist in the base,
+ // with cacheTime > 0 it exists in the base and is same age or newer in the
+ // overlay
+ cacheHit
+ // happens if someone writes directly to the overlay without
+ // going through this union
+ cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+ var lfi, bfi os.FileInfo
+ lfi, err = u.layer.Stat(name)
+ if err == nil {
+ if u.cacheTime == 0 {
+ return cacheHit, lfi, nil
+ }
+ if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+ bfi, err = u.base.Stat(name)
+ if err != nil {
+ return cacheLocal, lfi, nil
+ }
+ if bfi.ModTime().After(lfi.ModTime()) {
+ return cacheStale, bfi, nil
+ }
+ }
+ return cacheHit, lfi, nil
+ }
+
+ if err == syscall.ENOENT || os.IsNotExist(err) {
+ return cacheMiss, nil, nil
+ }
+
+ return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chtimes(name, atime, mtime)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chtimes(name, atime, mtime)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chmod(name, mode)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chmod(name, mode)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheMiss:
+ return u.base.Stat(name)
+ default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+ return fi, nil
+ }
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+ st, _, err := u.cacheStatus(oldname)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Rename(oldname, newname)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(oldname); err != nil {
+ return err
+ }
+ err = u.base.Rename(oldname, newname)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.Remove(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.RemoveAll(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheLocal, cacheHit:
+ default:
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ }
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ bfi, err := u.base.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ lfi, err := u.layer.OpenFile(name, flag, perm)
+ if err != nil {
+ bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+ return nil, err
+ }
+ return &UnionFile{Base: bfi, Layer: lfi}, nil
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch st {
+ case cacheLocal:
+ return u.layer.Open(name)
+
+ case cacheMiss:
+ bfi, err := u.base.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if bfi.IsDir() {
+ return u.base.Open(name)
+ }
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+
+ case cacheStale:
+ if !fi.IsDir() {
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+ }
+ case cacheHit:
+ if !fi.IsDir() {
+ return u.layer.Open(name)
+ }
+ }
+ // the dirs from cacheHit, cacheStale fall down here:
+ bfile, _ := u.base.Open(name)
+ lfile, err := u.layer.Open(name)
+ if err != nil && bfile == nil {
+ return nil, err
+ }
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+ err := u.base.Mkdir(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+ return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+ err := u.base.MkdirAll(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+ bfh, err := u.base.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ lfh, err := u.layer.Create(name)
+ if err != nil {
+ // oops, see comment about OS_TRUNC above, should we remove? then we have to
+ // remember if the file did not exist before
+ bfh.Close()
+ return nil, err
+ }
+ return &UnionFile{Base: bfh, Layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 0000000..5728243
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 0000000..968fc27
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,25 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 0000000..9aef397
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,292 @@
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*CopyOnWriteFs)(nil)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes() and Chmod()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+ base Fs
+ layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+ return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+ if _, err := u.layer.Stat(name); err == nil {
+ return false, nil
+ }
+ _, err := u.base.Stat(name)
+ if err != nil {
+ if oerr, ok := err.(*os.PathError); ok {
+ if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+ return false, nil
+ }
+ }
+ if err == syscall.ENOENT {
+ return false, nil
+ }
+ }
+ return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+ fi, err := u.layer.Stat(name)
+ if err != nil {
+ isNotExist := u.isNotExist(err)
+ if isNotExist {
+ return u.base.Stat(name)
+ }
+ return nil, err
+ }
+ return fi, nil
+}
+
+func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ llayer, ok1 := u.layer.(Lstater)
+ lbase, ok2 := u.base.(Lstater)
+
+ if ok1 {
+ fi, b, err := llayer.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ if ok2 {
+ fi, b, err := lbase.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ fi, err := u.Stat(name)
+
+ return fi, false, err
+}
+
+func (u *CopyOnWriteFs) isNotExist(err error) bool {
+ if e, ok := err.(*os.PathError); ok {
+ err = e.Err
+ }
+ if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+ return true
+ }
+ return false
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+ b, err := u.isBaseFile(oldname)
+ if err != nil {
+ return err
+ }
+ if b {
+ return syscall.EPERM
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+ err := u.layer.Remove(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+ err := u.layer.RemoveAll(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ if b {
+ if err = u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ dir := filepath.Dir(name)
+ isaDir, err := IsDir(u.base, dir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if isaDir {
+ if err = u.layer.MkdirAll(dir, 0777); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ isaDir, err = IsDir(u.layer, dir)
+ if err != nil {
+ return nil, err
+ }
+ if isaDir {
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+ }
+ if b {
+ return u.base.OpenFile(name, flag, perm)
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+// layer: doesn't exist, exists as a file, and exists as a directory
+// base: doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+ // Since the overlay overrides the base we check that first
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If overlay doesn't exist, return the base (base state irrelevant)
+ if b {
+ return u.base.Open(name)
+ }
+
+ // If overlay is a file, return it (base state irrelevant)
+ dir, err := IsDir(u.layer, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ return u.layer.Open(name)
+ }
+
+ // Overlay is a directory, base state now matters.
+ // Base state has 3 states to check but 2 outcomes:
+ // A. It's a file or non-readable in the base (return just the overlay)
+ // B. It's an accessible directory in the base (return a UnionFile)
+
+ // If base is file or nonreadable, return overlay
+ dir, err = IsDir(u.base, name)
+ if !dir || err != nil {
+ return u.layer.Open(name)
+ }
+
+ // Both base & layer are directories
+ // Return union file (if opens are without error)
+ bfile, bErr := u.base.Open(name)
+ lfile, lErr := u.layer.Open(name)
+
+ // If either have errors at this point something is very wrong. Return nil and the errors
+ if bErr != nil || lErr != nil {
+ return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+ }
+
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return syscall.EEXIST
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+ return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return syscall.EEXIST
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+ return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod
new file mode 100644
index 0000000..9eff4fe
--- /dev/null
+++ b/vendor/github.com/spf13/afero/go.mod
@@ -0,0 +1 @@
+module github.com/spf13/afero
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 0000000..c421936
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type httpDir struct {
+ basePath string
+ fs HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d.basePath)
+ if dir == "" {
+ dir = "."
+ }
+
+ f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type HttpFs struct {
+ source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+ return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+ return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+ return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+ return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+ return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+ return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+ f, err := h.source.Open(name)
+ if err == nil {
+ if httpfile, ok := f.(http.File); ok {
+ return httpfile, nil
+ }
+ }
+ return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+ return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+ return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+ return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+ return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 0000000..5c3a3d8
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,230 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byName(list))
+ return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+ return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // It's a good but not certain bet that FileInfo will tell us exactly how much to
+ // read, so let's try it but be prepared for the answer to be wrong.
+ var n int64
+
+ if fi, err := f.Stat(); err == nil {
+ // Don't preallocate a huge buffer, just in case.
+ if size := fi.Size(); size < 1e9 {
+ n = size
+ }
+ }
+ // As initial capacity for readAll, use n + a little extra in case Size is zero,
+ // and to avoid another allocation after Read has filled the buffer. The readAll
+ // call will read into its allocated internal buffer cheaply. If the size was
+ // wrong, we'll either waste some space off the end or reallocate as needed, but
+ // in the overwhelmingly common case we'll get it just right.
+ return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, capacity))
+ // If the buffer overflows, we will get bytes.ErrTooLarge.
+ // Return that as an error. Any other panic remains.
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+ err = panicErr
+ } else {
+ panic(e)
+ }
+ }()
+ _, err = buf.ReadFrom(r)
+ return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+ f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, prefix string) (f File, err error) {
+ return TempFile(a.Fs, dir, prefix)
+}
+
+func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory. If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+ return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ try := filepath.Join(dir, prefix+nextSuffix())
+ err = fs.Mkdir(try, 0700)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ if err == nil {
+ name = try
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go
new file mode 100644
index 0000000..89c1bfc
--- /dev/null
+++ b/vendor/github.com/spf13/afero/lstater.go
@@ -0,0 +1,27 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+)
+
+// Lstater is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
+// Else it will call Stat.
+// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
+type Lstater interface {
+ LstatIfPossible(name string) (os.FileInfo, bool, error)
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 0000000..c18a87f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // Lstat not supported by a ll filesystems.
+ if _, err = lstatIfPossible(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 0000000..e104013
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+ Len() int
+ Names() []string
+ Files() []*FileData
+ Add(*FileData)
+ Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+ if d.memDir == nil {
+ d.dir = true
+ d.memDir = &DirMap{}
+ }
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 0000000..03a57ee
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int { return len(m) }
+func (m DirMap) Add(f *FileData) { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+ for _, f := range m {
+ files = append(files, f)
+ }
+ sort.Sort(filesSorter(files))
+ return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int { return len(s) }
+func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+ for x := range m {
+ names = append(names, x)
+ }
+ return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 0000000..7af2fb5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,317 @@
+// Copyright © 2015 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+)
+
+import "time"
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+ // atomic requires 64-bit alignment for struct field access
+ at int64
+ readDirCount int64
+ closed bool
+ readOnly bool
+ fileData *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+ return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+ return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+ return f.fileData
+}
+
+type FileData struct {
+ sync.Mutex
+ name string
+ data []byte
+ memDir Dir
+ dir bool
+ mode os.FileMode
+ modtime time.Time
+}
+
+func (d *FileData) Name() string {
+ d.Lock()
+ defer d.Unlock()
+ return d.name
+}
+
+func CreateFile(name string) *FileData {
+ return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+ return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+ f.Lock()
+ f.name = newname
+ f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+ f.Lock()
+ f.mode = mode
+ f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+ f.Lock()
+ setModTime(f, mtime)
+ f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+ f.modtime = mtime
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+ return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+ atomic.StoreInt64(&f.at, 0)
+ atomic.StoreInt64(&f.readDirCount, 0)
+ f.fileData.Lock()
+ f.closed = false
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Close() error {
+ f.fileData.Lock()
+ f.closed = true
+ if !f.readOnly {
+ setModTime(f.fileData, time.Now())
+ }
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Name() string {
+ return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ if !f.fileData.dir {
+ return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
+ }
+ var outLength int64
+
+ f.fileData.Lock()
+ files := f.fileData.memDir.Files()[f.readDirCount:]
+ if count > 0 {
+ if len(files) < count {
+ outLength = int64(len(files))
+ } else {
+ outLength = int64(count)
+ }
+ if len(files) == 0 {
+ err = io.EOF
+ }
+ } else {
+ outLength = int64(len(files))
+ }
+ f.readDirCount += outLength
+ f.fileData.Unlock()
+
+ res = make([]os.FileInfo, outLength)
+ for i := range res {
+ res[i] = &FileInfo{files[i]}
+ }
+
+ return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ fi, err := f.Readdir(n)
+ names = make([]string, len(fi))
+ for i, f := range fi {
+ _, names[i] = filepath.Split(f.Name())
+ }
+ return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+ return 0, io.EOF
+ }
+ if int(f.at) > len(f.fileData.data) {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if len(f.fileData.data)-int(f.at) >= len(b) {
+ n = len(b)
+ } else {
+ n = len(f.fileData.data) - int(f.at)
+ }
+ copy(b, f.fileData.data[f.at:f.at+int64(n)])
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Read(b)
+}
+
+func (f *File) Truncate(size int64) error {
+ if f.closed == true {
+ return ErrFileClosed
+ }
+ if f.readOnly {
+ return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ if size < 0 {
+ return ErrOutOfRange
+ }
+ if size > int64(len(f.fileData.data)) {
+ diff := size - int64(len(f.fileData.data))
+ f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+ } else {
+ f.fileData.data = f.fileData.data[0:size]
+ }
+ setModTime(f.fileData, time.Now())
+ return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ switch whence {
+ case 0:
+ atomic.StoreInt64(&f.at, offset)
+ case 1:
+ atomic.AddInt64(&f.at, int64(offset))
+ case 2:
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+ }
+ return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ if f.readOnly {
+ return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ n = len(b)
+ cur := atomic.LoadInt64(&f.at)
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ diff := cur - int64(len(f.fileData.data))
+ var tail []byte
+ if n+int(cur) < len(f.fileData.data) {
+ tail = f.fileData.data[n+int(cur):]
+ }
+ if diff > 0 {
+ f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ } else {
+ f.fileData.data = append(f.fileData.data[:cur], b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ }
+ setModTime(f.fileData, time.Now())
+
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+ return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+ return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+ *FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+ s.Lock()
+ _, name := filepath.Split(s.name)
+ s.Unlock()
+ return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+ s.Lock()
+ defer s.Unlock()
+ return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+ if s.IsDir() {
+ return int64(42)
+ }
+ s.Lock()
+ defer s.Unlock()
+ return int64(len(s.data))
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 0000000..09498e7
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,365 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/afero/mem"
+)
+
+type MemMapFs struct {
+ mu sync.RWMutex
+ data map[string]*mem.FileData
+ init sync.Once
+}
+
+func NewMemMapFs() Fs {
+ return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+ m.init.Do(func() {
+ m.data = make(map[string]*mem.FileData)
+ // Root should always exist, right?
+ // TODO: what about windows?
+ m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+ })
+ return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+ name = normalizePath(name)
+ m.mu.Lock()
+ file := mem.CreateFile(name)
+ m.getData()[name] = file
+ m.registerWithParent(file)
+ m.mu.Unlock()
+ return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+ f, err := m.lockfreeOpen(fileName)
+ if err != nil {
+ return err
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ log.Panic("parent of ", f.Name(), " is nil")
+ }
+
+ parent.Lock()
+ mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
+ return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+ pdir, _ := filepath.Split(f.Name())
+ pdir = filepath.Clean(pdir)
+ pfile, err := m.lockfreeOpen(pdir)
+ if err != nil {
+ return nil
+ }
+ return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+ if f == nil {
+ return
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ pdir := filepath.Dir(filepath.Clean(f.Name()))
+ err := m.lockfreeMkdir(pdir, 0777)
+ if err != nil {
+ //log.Println("Mkdir error:", err)
+ return
+ }
+ parent, err = m.lockfreeOpen(pdir)
+ if err != nil {
+ //log.Println("Open after Mkdir error:", err)
+ return
+ }
+ }
+
+ parent.Lock()
+ mem.InitializeDir(parent)
+ mem.AddToMemDir(parent, f)
+ parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+ x, ok := m.getData()[name]
+ if ok {
+ // Only return ErrFileExists if it's a file, not a directory.
+ i := mem.FileInfo{FileData: x}
+ if !i.IsDir() {
+ return ErrFileExists
+ }
+ } else {
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ }
+ return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ _, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if ok {
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ m.mu.Unlock()
+
+ m.Chmod(name, perm|os.ModeDir)
+
+ return nil
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+ err := m.Mkdir(path, perm)
+ if err != nil {
+ if err.(*os.PathError).Err == ErrFileExists {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+ path = filepath.Clean(path)
+
+ switch path {
+ case ".":
+ return FilePathSeparator
+ case "..":
+ return FilePathSeparator
+ default:
+ return path
+ }
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewReadOnlyFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+ }
+ return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+ f, ok := m.getData()[name]
+ if ok {
+ return f, nil
+ } else {
+ return nil, ErrFileNotFound
+ }
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ chmod := false
+ file, err := m.openWrite(name)
+ if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+ file, err = m.Create(name)
+ chmod = true
+ }
+ if err != nil {
+ return nil, err
+ }
+ if flag == os.O_RDONLY {
+ file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+ }
+ if flag&os.O_APPEND > 0 {
+ _, err = file.Seek(0, os.SEEK_END)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+ err = file.Truncate(0)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if chmod {
+ m.Chmod(name, perm)
+ }
+ return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+ name = normalizePath(name)
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if _, ok := m.getData()[name]; ok {
+ err := m.unRegisterWithParent(name)
+ if err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ delete(m.getData(), name)
+ } else {
+ return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+ }
+ return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+ path = normalizePath(path)
+ m.mu.Lock()
+ m.unRegisterWithParent(path)
+ m.mu.Unlock()
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for p, _ := range m.getData() {
+ if strings.HasPrefix(p, path) {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ delete(m.getData(), p)
+ m.mu.Unlock()
+ m.mu.RLock()
+ }
+ }
+ return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+ oldname = normalizePath(oldname)
+ newname = normalizePath(newname)
+
+ if oldname == newname {
+ return nil
+ }
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if _, ok := m.getData()[oldname]; ok {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ m.unRegisterWithParent(oldname)
+ fileData := m.getData()[oldname]
+ delete(m.getData(), oldname)
+ mem.ChangeFileName(fileData, newname)
+ m.getData()[newname] = fileData
+ m.registerWithParent(fileData)
+ m.mu.Unlock()
+ m.mu.RLock()
+ } else {
+ return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+ }
+ return nil
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+ f, err := m.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi := mem.GetFileInfo(f.(*mem.File).Data())
+ return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetMode(f, mode)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetModTime(f, mtime)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) List() {
+ for _, x := range m.data {
+ y := mem.FileInfo{FileData: x}
+ fmt.Println(x.Name(), y.Size())
+ }
+}
+
+// func debugMemMapList(fs Fs) {
+// if x, ok := fs.(*MemMapFs); ok {
+// x.List()
+// }
+// }
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 0000000..13cc1b8
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,101 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "time"
+)
+
+var _ Lstater = (*OsFs)(nil)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+ return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+ f, e := os.Create(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+ return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+ f, e := os.Open(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, e := os.OpenFile(name, flag, perm)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Remove(name string) error {
+ return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+ return os.Chmod(name, mode)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
+
+func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fi, err := os.Lstat(name)
+ return fi, true, err
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 0000000..18f60a0
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,106 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(fs, path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := lstatIfPossible(fs, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walk(fs, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// if the filesystem supports it, use Lstat, else use fs.Stat
+func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
+ if lfs, ok := fs.(Lstater); ok {
+ fi, _, err := lfs.LstatIfPossible(path)
+ return fi, err
+ }
+ return fs.Stat(path)
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+ return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+ info, err := lstatIfPossible(fs, root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 0000000..c6376ec
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,80 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*ReadOnlyFs)(nil)
+
+type ReadOnlyFs struct {
+ source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+ return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+ return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+ return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+ return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ if lsf, ok := r.source.(Lstater); ok {
+ return lsf.LstatIfPossible(name)
+ }
+ fi, err := r.Stat(name)
+ return fi, false, err
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ return nil, syscall.EPERM
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+ return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+ return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 0000000..9d92dbc
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,214 @@
+package afero
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+ re *regexp.Regexp
+ source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+ return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+ f File
+ re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+ if r.re == nil {
+ return nil
+ }
+ if r.re.MatchString(name) {
+ return nil
+ }
+ return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Name() string {
+ return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+ dir, err := IsDir(r.source, oldname)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ if err := r.matchesName(oldname); err != nil {
+ return err
+ }
+ if err := r.matchesName(newname); err != nil {
+ return err
+ }
+ return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+ dir, err := IsDir(r.source, p)
+ if err != nil {
+ return err
+ }
+ if !dir {
+ if err := r.matchesName(p); err != nil {
+ return err
+ }
+ }
+ return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ }
+ f, err := r.source.Open(name)
+ return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+ return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+ return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+ return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+ return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+ return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+ return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+ return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+ return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+ return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+ var rfi []os.FileInfo
+ rfi, err = f.f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range rfi {
+ if i.IsDir() || f.re.MatchString(i.Name()) {
+ fi = append(fi, i)
+ }
+ }
+ return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+ fi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range fi {
+ n = append(n, s.Name())
+ }
+ return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+ return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+ return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+ return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+ return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 0000000..1e78f7d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,305 @@
+package afero
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+ Base File
+ Layer File
+ Merger DirsMerger
+ off int
+ files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+ // first close base, so we have a newer timestamp in the overlay. If we'd close
+ // the overlay first, we'd get a cacheStale the next time we access this file
+ // -> cache would be useless ;-)
+ if f.Base != nil {
+ f.Base.Close()
+ }
+ if f.Layer != nil {
+ return f.Layer.Close()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.Read(s)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ // advance the file position also in the base file, the next
+ // call may be a write at this position (or a seek with SEEK_CUR)
+ if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+ // only overwrite err in case the seek fails: we need to
+ // report an eventual io.EOF to the caller
+ err = seekErr
+ }
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Read(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.ReadAt(s, o)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.ReadAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+ if f.Layer != nil {
+ pos, err = f.Layer.Seek(o, w)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o, w)
+ }
+ return pos, err
+ }
+ if f.Base != nil {
+ return f.Base.Seek(o, w)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.Write(s)
+ if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+ _, err = f.Base.Write(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Write(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteAt(s, o)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteAt(s, o)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+ if f.Layer != nil {
+ return f.Layer.Name()
+ }
+ return f.Base.Name()
+}
+
+// DirsMerger is how UnionFile weaves two directories together.
+// It takes the FileInfo slices from the layer and the base and returns a
+// single view.
+type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
+
+var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
+ var files = make(map[string]os.FileInfo)
+
+ for _, fi := range lofi {
+ files[fi.Name()] = fi
+ }
+
+ for _, fi := range bofi {
+ if _, exists := files[fi.Name()]; !exists {
+ files[fi.Name()] = fi
+ }
+ }
+
+ rfi := make([]os.FileInfo, len(files))
+
+ i := 0
+ for _, fi := range files {
+ rfi[i] = fi
+ i++
+ }
+
+ return rfi, nil
+
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+ var merge DirsMerger = f.Merger
+ if merge == nil {
+ merge = defaultUnionMergeDirsFn
+ }
+
+ if f.off == 0 {
+ var lfi []os.FileInfo
+ if f.Layer != nil {
+ lfi, err = f.Layer.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var bfi []os.FileInfo
+ if f.Base != nil {
+ bfi, err = f.Base.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ merged, err := merge(lfi, bfi)
+ if err != nil {
+ return nil, err
+ }
+ f.files = append(f.files, merged...)
+ }
+ if c == -1 {
+ return f.files[f.off:], nil
+ }
+ defer func() { f.off += c }()
+ return f.files[f.off:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+ rfi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for _, fi := range rfi {
+ names = append(names, fi.Name())
+ }
+ return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+ if f.Layer != nil {
+ return f.Layer.Stat()
+ }
+ if f.Base != nil {
+ return f.Base.Stat()
+ }
+ return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Sync()
+ if err == nil && f.Base != nil {
+ err = f.Base.Sync()
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Sync()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Truncate(s)
+ if err == nil && f.Base != nil {
+ err = f.Base.Truncate(s)
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Truncate(s)
+ }
+ return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteString(s)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteString(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteString(s)
+ }
+ return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+ bfh, err := base.Open(name)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ // First make sure the directory exists
+ exists, err := Exists(layer, filepath.Dir(name))
+ if err != nil {
+ return err
+ }
+ if !exists {
+ err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the file on the overlay
+ lfh, err := layer.Create(name)
+ if err != nil {
+ return err
+ }
+ n, err := io.Copy(lfh, bfh)
+ if err != nil {
+ // If anything fails, clean up the file
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+
+ bfi, err := bfh.Stat()
+ if err != nil || bfi.Size() != n {
+ layer.Remove(name)
+ lfh.Close()
+ return syscall.EIO
+ }
+
+ err = lfh.Close()
+ if err != nil {
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+ return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 0000000..4f253f4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,330 @@
+// Copyright ©2015 Steve Francia
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+ return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ if err != os.ErrExist {
+ return err
+ }
+ }
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+ return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ return
+ }
+ }
+
+ exists, err := Exists(fs, path)
+ if err != nil {
+ return
+ }
+ if exists {
+ return fmt.Errorf("%v already exists", path)
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+ return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+ addSlash := func(p string) string {
+ if FilePathSeparator != p[len(p)-1:] {
+ p = p + FilePathSeparator
+ }
+ return p
+ }
+ dir := addSlash(os.TempDir())
+
+ if subPath != "" {
+ // preserve windows backslash :-(
+ if FilePathSeparator == "\\" {
+ subPath = strings.Replace(subPath, "\\", "____", -1)
+ }
+ dir = dir + UnicodeSanitize((subPath))
+ if FilePathSeparator == "\\" {
+ dir = strings.Replace(dir, "____", "\\", -1)
+ }
+
+ if exists, _ := Exists(fs, dir); exists {
+ return addSlash(dir)
+ }
+
+ err := fs.MkdirAll(dir, 0777)
+ if err != nil {
+ panic(err)
+ }
+ dir = addSlash(dir)
+ }
+ return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+ source := []rune(s)
+ target := make([]rune, 0, len(source))
+
+ for _, r := range source {
+ if unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsMark(r) ||
+ r == '.' ||
+ r == '/' ||
+ r == '\\' ||
+ r == '_' ||
+ r == '-' ||
+ r == '%' ||
+ r == ' ' ||
+ r == '#' {
+ target = append(target, r)
+ }
+ }
+
+ return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+ t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+ result, _, _ := transform.String(t, string(s))
+
+ return result
+}
+
+func isMn(r rune) bool {
+ return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+ return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+ return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+ if r == nil || len(subslices) == 0 {
+ return false
+ }
+
+ largestSlice := 0
+
+ for _, sl := range subslices {
+ if len(sl) > largestSlice {
+ largestSlice = len(sl)
+ }
+ }
+
+ if largestSlice == 0 {
+ return false
+ }
+
+ bufflen := largestSlice * 4
+ halflen := bufflen / 2
+ buff := make([]byte, bufflen)
+ var err error
+ var n, i int
+
+ for {
+ i++
+ if i == 1 {
+ n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+ } else {
+ if i != 2 {
+ // shift left to catch overlapping matches
+ copy(buff[:], buff[halflen:])
+ }
+ n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+ }
+
+ if n > 0 {
+ for _, sl := range subslices {
+ if bytes.Contains(buff, sl) {
+ return true
+ }
+ }
+ }
+
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+ return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err == nil && fi.IsDir() {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+ return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+ return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+ if b, _ := Exists(fs, path); !b {
+ return false, fmt.Errorf("%q path does not exist", path)
+ }
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ if fi.IsDir() {
+ f, err := fs.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+ list, err := f.Readdir(-1)
+ return len(list) == 0, nil
+ }
+ return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+ return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+ combinedPath := filepath.Join(basePathFs.path, relativePath)
+ if parent, ok := basePathFs.source.(*BasePathFs); ok {
+ return FullBaseFsPath(parent, combinedPath)
+ }
+
+ return combinedPath
+}
diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore
new file mode 100644
index 0000000..53053a8
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.bench
diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml
new file mode 100644
index 0000000..6420d1c
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+env:
+ - GO111MODULE=on
+sudo: required
+go:
+ - "1.11.x"
+ - tip
+os:
+ - linux
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+script:
+ - make check
diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 0000000..4527efb
--- /dev/null
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 0000000..7ccf893
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 0000000..e693939
--- /dev/null
+++ b/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+ cast.ToString("mayonegg") // "mayonegg"
+ cast.ToString(8) // "8"
+ cast.ToString(8.31) // "8.31"
+ cast.ToString([]byte("one time")) // "one time"
+ cast.ToString(nil) // ""
+
+ var foo interface{} = "one more time"
+ cast.ToString(foo) // "one more time"
+
+
+### Example ‘ToInt’:
+
+ cast.ToInt(8) // 8
+ cast.ToInt(8.31) // 8
+ cast.ToInt("8") // 8
+ cast.ToInt(true) // 1
+ cast.ToInt(false) // 0
+
+ var eight interface{} = 8
+ cast.ToInt(eight) // 8
+ cast.ToInt(nil) // 0
+
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 0000000..9fba638
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,171 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMapInt casts an interface to a map[string]int type.
+func ToStringMapInt(i interface{}) map[string]int {
+ v, _ := ToStringMapIntE(i)
+ return v
+}
+
+// ToStringMapInt64 casts an interface to a map[string]int64 type.
+func ToStringMapInt64(i interface{}) map[string]int64 {
+ v, _ := ToStringMapInt64E(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 0000000..a4859fb
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1249 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "html/template"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+ i = indirect(i)
+
+ switch v := i.(type) {
+ case time.Time:
+ return v, nil
+ case string:
+ return StringToDate(v)
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
+ default:
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case time.Duration:
+ return s, nil
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+ d = time.Duration(ToInt64(s))
+ return
+ case float32, float64:
+ d = time.Duration(ToFloat64(s))
+ return
+ case string:
+ if strings.ContainsAny(s, "nsuµmh") {
+ d, err = time.ParseDuration(s)
+ } else {
+ d, err = time.ParseDuration(s + "ns")
+ }
+ return
+ default:
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+ return
+ }
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+ i = indirect(i)
+
+ switch b := i.(type) {
+ case bool:
+ return b, nil
+ case nil:
+ return false, nil
+ case int:
+ if i.(int) != 0 {
+ return true, nil
+ }
+ return false, nil
+ case string:
+ return strconv.ParseBool(i.(string))
+ default:
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ }
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return s, nil
+ case float32:
+ return float64(s), nil
+ case int:
+ return float64(s), nil
+ case int64:
+ return float64(s), nil
+ case int32:
+ return float64(s), nil
+ case int16:
+ return float64(s), nil
+ case int8:
+ return float64(s), nil
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
+ return float64(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ }
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return float32(s), nil
+ case float32:
+ return s, nil
+ case int:
+ return float32(s), nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int64(s), nil
+ case int64:
+ return s, nil
+ case int32:
+ return int64(s), nil
+ case int16:
+ return int64(s), nil
+ case int8:
+ return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int32(s), nil
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
+ case float64:
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int16(s), nil
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ }
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int8(s), nil
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return s, nil
+ case int64:
+ return int(s), nil
+ case int32:
+ return int(s), nil
+ case int16:
+ return int(s), nil
+ case int8:
+ return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 0)
+ if err == nil {
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ }
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+ // Avoid creating a reflect.Value if it's not a pointer.
+ return a
+ }
+ v := reflect.ValueOf(a)
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+
+ var errorType = reflect.TypeOf((*error)(nil)).Elem()
+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+ v := reflect.ValueOf(a)
+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+ i = indirectToStringerOrError(i)
+
+ switch s := i.(type) {
+ case string:
+ return s, nil
+ case bool:
+ return strconv.FormatBool(s), nil
+ case float64:
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+ case int:
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint64:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint32:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case []byte:
+ return string(s), nil
+ case template.HTML:
+ return string(s), nil
+ case template.URL:
+ return string(s), nil
+ case template.JS:
+ return string(s), nil
+ case template.CSS:
+ return string(s), nil
+ case template.HTMLAttr:
+ return string(s), nil
+ case nil:
+ return "", nil
+ case fmt.Stringer:
+ return s.String(), nil
+ case error:
+ return s.Error(), nil
+ default:
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+ }
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+ var m = map[string]string{}
+
+ switch v := i.(type) {
+ case map[string]string:
+ return v, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+ }
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+ var m = map[string][]string{}
+
+ switch v := i.(type) {
+ case map[string][]string:
+ return v, nil
+ case map[string][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[string]string:
+ for k, val := range v {
+ m[ToString(k)] = []string{val}
+ }
+ case map[string]interface{}:
+ for k, val := range v {
+ switch vt := val.(type) {
+ case []interface{}:
+ m[ToString(k)] = ToStringSlice(vt)
+ case []string:
+ m[ToString(k)] = vt
+ default:
+ m[ToString(k)] = []string{ToString(val)}
+ }
+ }
+ return m, nil
+ case map[interface{}][]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ key, err := ToStringE(k)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ value, err := ToStringSliceE(val)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ m[key] = value
+ }
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+ var m = map[string]bool{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]bool:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+ }
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+ var m = map[string]interface{}{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = val
+ }
+ return m, nil
+ case map[string]interface{}:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+ }
+}
+
+// ToStringMapIntE casts an interface to a map[string]int{} type.
+func ToStringMapIntE(i interface{}) (map[string]int, error) {
+ var m = map[string]int{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt(val)
+ }
+ return m, nil
+ case map[string]int:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToIntE(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToStringMapInt64E casts an interface to a map[string]int64{} type.
+func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
+ var m = map[string]int64{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]int64:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToInt64E(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+ var s []interface{}
+
+ switch v := i.(type) {
+ case []interface{}:
+ return append(s, v...), nil
+ case []map[string]interface{}:
+ for _, u := range v {
+ s = append(s, u)
+ }
+ return s, nil
+ default:
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+ }
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+ if i == nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+
+ switch v := i.(type) {
+ case []bool:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]bool, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToBoolE(s.Index(j).Interface())
+ if err != nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+ var a []string
+
+ switch v := i.(type) {
+ case []interface{}:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []string:
+ return v, nil
+ case string:
+ return strings.Fields(v), nil
+ case interface{}:
+ str, err := ToStringE(v)
+ if err != nil {
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+ return []string{str}, nil
+ default:
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+ if i == nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+
+ switch v := i.(type) {
+ case []int:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]int, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToIntE(s.Index(j).Interface())
+ if err != nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+ if i == nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+
+ switch v := i.(type) {
+ case []time.Duration:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]time.Duration, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToDurationE(s.Index(j).Interface())
+ if err != nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+ return parseDateWith(s, []string{
+ time.RFC3339,
+ "2006-01-02T15:04:05", // iso8601 without timezone
+ time.RFC1123Z,
+ time.RFC1123,
+ time.RFC822Z,
+ time.RFC822,
+ time.RFC850,
+ time.ANSIC,
+ time.UnixDate,
+ time.RubyDate,
+ "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
+ "2006-01-02",
+ "02 Jan 2006",
+ "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon
+ "2006-01-02 15:04:05 -07:00",
+ "2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05Z07:00", // RFC3339 without T
+ "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon
+ "2006-01-02 15:04:05",
+ time.Kitchen,
+ time.Stamp,
+ time.StampMilli,
+ time.StampMicro,
+ time.StampNano,
+ })
+}
+
+func parseDateWith(s string, dates []string) (d time.Time, e error) {
+ for _, dateType := range dates {
+ if d, e = time.Parse(dateType, s); e == nil {
+ return
+ }
+ }
+ return d, fmt.Errorf("unable to parse date: %s", s)
+}
+
+// jsonStringToObject attempts to unmarshall a string as JSON into
+// the object passed as pointer.
+func jsonStringToObject(s string, v interface{}) error {
+ data := []byte(s)
+ return json.Unmarshal(data, v)
+}
diff --git a/vendor/github.com/spf13/cast/go.mod b/vendor/github.com/spf13/cast/go.mod
new file mode 100644
index 0000000..c1c0232
--- /dev/null
+++ b/vendor/github.com/spf13/cast/go.mod
@@ -0,0 +1,7 @@
+module github.com/spf13/cast
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+)
diff --git a/vendor/github.com/spf13/cast/go.sum b/vendor/github.com/spf13/cast/go.sum
new file mode 100644
index 0000000..e03ee77
--- /dev/null
+++ b/vendor/github.com/spf13/cast/go.sum
@@ -0,0 +1,6 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
index 3b053c5..c7b459e 100644
--- a/vendor/github.com/spf13/cobra/.gitignore
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -32,7 +32,8 @@ Session.vim
tags
*.exe
-
cobra.test
+bin
-.idea/*
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
index 38b85f4..a9bd4e5 100644
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -3,29 +3,27 @@ language: go
stages:
- diff
- test
+ - build
go:
- - 1.10.x
- - 1.11.x
- 1.12.x
+ - 1.13.x
- tip
+before_install:
+ - go get -u github.com/kyoh86/richgo
+ - go get -u github.com/mitchellh/gox
+
matrix:
allow_failures:
- go: tip
include:
- stage: diff
- go: 1.12.x
- script: diff -u <(echo -n) <(gofmt -d -s .)
+ go: 1.13.x
+ script: make fmt
+ - stage: build
+ go: 1.13.x
+ script: make cobra_generator
-before_install:
- - mkdir -p bin
- - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
- - chmod +x bin/shellcheck
- - go get -u github.com/kyoh86/richgo
-script:
- - PATH=$PATH:$PWD/bin richgo test -v ./...
- - go build
- - if [ -z $NOVET ]; then
- diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
- fi
+script:
+ - make test
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 0000000..e9740d1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,36 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which richgo))
+$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+endif
+
+.PHONY: fmt vet test cobra_generator install_deps clean
+
+default: all
+
+all: fmt vet test cobra_generator
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+test: install_deps vet
+ $(info ******************** running tests ********************)
+ richgo test -v ./...
+
+cobra_generator: install_deps
+ $(info ******************** building generator ********************)
+ mkdir -p $(BIN)
+ make -C cobra all
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+vet:
+ $(info ******************** vetting ********************)
+ go vet ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 60c5a42..9d79934 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -24,11 +24,13 @@ Many of the most widely used Go projects are built using Cobra, such as:
[Prototool](https://github.com/uber/prototool),
[mattermost-server](https://github.com/mattermost/mattermost-server),
[Gardener](https://github.com/gardener/gardenctl),
+[Linkerd](https://linkerd.io/),
+[Github CLI](https://github.com/cli/cli)
etc.
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
-[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
# Table of Contents
@@ -208,51 +210,78 @@ You will additionally define flags and handle configuration in your init() funct
For example cmd/root.go:
```go
+package cmd
+
import (
- "fmt"
- "os"
+ "fmt"
+ "os"
- homedir "github.com/mitchellh/go-homedir"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
)
-func init() {
- cobra.OnInitialize(initConfig)
- rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
- rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
- rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
- rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
- rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
- viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
- viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
- viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
- viper.SetDefault("author", "NAME HERE ")
- viper.SetDefault("license", "apache")
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
}
-func initConfig() {
- // Don't forget to read config either from cfgFile or from home directory!
- if cfgFile != "" {
- // Use config file from the flag.
- viper.SetConfigFile(cfgFile)
- } else {
- // Find home directory.
- home, err := homedir.Dir()
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+func init() {
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE ")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
- // Search config in home directory with name ".cobra" (without extension).
- viper.AddConfigPath(home)
- viper.SetConfigName(".cobra")
- }
+func er(msg interface{}) {
+ fmt.Println("Error:", msg)
+ os.Exit(1)
+}
- if err := viper.ReadInConfig(); err != nil {
- fmt.Println("Can't read config:", err)
- os.Exit(1)
- }
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ er(err)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
}
```
@@ -459,7 +488,7 @@ For many years people have printed back to the screen.`,
Echo works a lot like print, except it has a child command.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Print: " + strings.Join(args, " "))
+ fmt.Println("Echo: " + strings.Join(args, " "))
},
}
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index c4d820b..70e9b26 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -2,6 +2,7 @@ package cobra
import (
"fmt"
+ "strings"
)
type PositionalArgs func(cmd *Command, args []string) error
@@ -34,8 +35,15 @@ func NoArgs(cmd *Command, args []string) error {
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ var validArgs []string
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.Split(v, "\t")[0])
+ }
+
for _, v := range args {
- if !stringInSlice(v, cmd.ValidArgs) {
+ if !stringInSlice(v, validArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 57bb8e1..1e27188 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -58,9 +58,71 @@ __%[1]s_contains_word()
return 1
}
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+
+ if [ $((directive & %[3]d)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & %[4]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & %[5]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out[*]}" -- "$cur")
+ fi
+}
+
__%[1]s_handle_reply()
{
__%[1]s_debug "${FUNCNAME[0]}"
+ local comp
case $cur in
-*)
if [[ $(type -t compopt) = "builtin" ]]; then
@@ -72,7 +134,9 @@ __%[1]s_handle_reply()
else
allflags=("${flags[*]} ${two_word_flags[*]}")
fi
- COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
if [[ $(type -t compopt) = "builtin" ]]; then
[[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
fi
@@ -118,14 +182,22 @@ __%[1]s_handle_reply()
completions=("${commands[@]}")
if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
completions=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ completions=()
+ __%[1]s_handle_go_custom_completion
fi
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
completions+=("${must_have_one_flag[@]}")
fi
- COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
- COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
@@ -160,7 +232,7 @@ __%[1]s_handle_filename_extension_flag()
__%[1]s_handle_subdirs_in_dir_flag()
{
local dir="$1"
- pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
}
__%[1]s_handle_flag()
@@ -272,7 +344,7 @@ __%[1]s_handle_word()
__%[1]s_handle_word
}
-`, name))
+`, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
}
func writePostscript(buf *bytes.Buffer, name string) {
@@ -297,6 +369,7 @@ func writePostscript(buf *bytes.Buffer, name string) {
local commands=("%[1]s")
local must_have_one_flag=()
local must_have_one_noun=()
+ local has_completion_function
local last_command
local nouns=()
@@ -397,7 +470,22 @@ func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
buf.WriteString(fmt.Sprintf(format, name))
}
+// Setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
buf.WriteString(` flags=()
two_word_flags=()
local_nonpersistent_flags=()
@@ -460,8 +548,14 @@ func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_noun=()\n")
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.Split(value, "\t")[0]
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
+ if cmd.ValidArgsFunction != nil {
+ buf.WriteString(" has_completion_function=1\n")
+ }
}
func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index 4ac61ee..e61a3a6 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -56,7 +56,149 @@ func main() {
`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
-## Creating your own custom functions
+## Have the completions code complete your 'nouns'
+
+### Static completion of nouns
+
+This method allows you to provide a pre-defined list of completion choices for your nouns using the `validArgs` field.
+For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := RunGet(f, out, cmd, args)
+ util.CheckErr(err)
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+### Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+### Dynamic completion of nouns
+
+In some cases it is not possible to provide a list of possible completions in advance. Instead, the list of completions must be determined at execution-time. Cobra provides two ways of defining such dynamic completion of nouns. Note that both these methods can be used along-side each other as long as they are not both used for the same command.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+#### 1. Custom completions of nouns written in Go
+
+In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
+Simplified code from `helm status` looks like:
+
+```go
+cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "Display the status of the named release",
+ Long: status_long,
+ RunE: func(cmd *cobra.Command, args []string) {
+ RunGet(args[0])
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
+Notice we put the `ValidArgsFunction` on the `status` subcommand. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like
+
+```bash
+# helm status [tab][tab]
+harbor notary rook thanos
+```
+You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
+```go
+// Indicates an error occurred and completions should be ignored.
+ShellCompDirectiveError
+// Indicates that the shell should not add a space after the completion,
+// even if there is a single completion provided.
+ShellCompDirectiveNoSpace
+// Indicates that the shell should not provide file completion even when
+// no completion is provided.
+// This currently does not work for zsh or bash < 4
+ShellCompDirectiveNoFileComp
+// Indicates that the shell will perform its default behavior after completions
+// have been provided (this implies !ShellCompDirectiveNoSpace && !ShellCompDirectiveNoFileComp).
+ShellCompDirectiveDefault
+```
+
+When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
+
+##### Debugging
+
+Cobra achieves dynamic completions written in Go through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
+```bash
+# helm __complete status har
+harbor
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** If the noun to complete is empty, you must pass an empty parameter to the `__complete` command:
+```bash
+# helm __complete status ""
+harbor
+notary
+rook
+thanos
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
+```go
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and optionally prints to stderr.
+cobra.CompDebug(msg string, printToStdErr bool) {
+cobra.CompDebugln(msg string, printToStdErr bool)
+
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and to stderr.
+cobra.CompError(msg string)
+cobra.CompErrorln(msg string)
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
+
+#### 2. Custom completions of nouns written in Bash
+
+This method allows you to inject bash functions into the completion script. Those bash functions are responsible for providing the completion choices for your own completions.
Some more actual code that works in kubernetes:
@@ -111,58 +253,6 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
-## Have the completions code complete your 'nouns'
-
-In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
-
-```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
-
-cmd := &cobra.Command{
- Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
- Short: "Display one or many resources",
- Long: get_long,
- Example: get_example,
- Run: func(cmd *cobra.Command, args []string) {
- err := RunGet(f, out, cmd, args)
- util.CheckErr(err)
- },
- ValidArgs: validArgs,
-}
-```
-
-Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
-
-```bash
-# kubectl get [tab][tab]
-node pod replicationcontroller service
-```
-
-## Plural form and shortcuts for nouns
-
-If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
-
-```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
-
-cmd := &cobra.Command{
- ...
- ValidArgs: validArgs,
- ArgAliases: argAliases
-}
-```
-
-The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
-the completion algorithm if entered manually, e.g. in:
-
-```bash
-# kubectl get rc [tab][tab]
-backend frontend database
-```
-
-Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
-in this example again instead of the replication controllers.
-
## Mark flags as required
Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
@@ -211,8 +301,45 @@ So while there are many other files in the CWD it only shows me subdirs and thos
# Specify custom flag completion
-Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
-a custom flag completion function with cobra.BashCompCustom:
+As for nouns, Cobra provides two ways of defining dynamic completion of flags. Note that both these methods can be used along-side each other as long as they are not both used for the same flag.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+## 1. Custom completions of flags written in Go
+
+To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function in the following manner:
+
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
+})
+```
+Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
+
+```bash
+# helm status --output [tab][tab]
+json table yaml
+```
+
+### Debugging
+
+You can also easily debug your Go completion code for flags:
+```bash
+# helm __complete status --output ""
+json
+table
+yaml
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned in the above section.
+
+## 2. Custom completions of flags written in Bash
+
+Alternatively, you can use bash code for flag custom completion. Similar to the filename
+completion and filtering using `cobra.BashCompFilenameExt`, you can specify
+a custom flag completion bash function with `cobra.BashCompCustom`:
```go
annotation := make(map[string][]string)
@@ -226,7 +353,7 @@ a custom flag completion function with cobra.BashCompCustom:
cmd.Flags().AddFlag(flag)
```
-In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
value, e.g.:
```bash
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index 6505c07..d01becc 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -52,7 +52,7 @@ var EnableCommandSorting = true
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
-var MousetrapHelpText string = `This is a command line tool.
+var MousetrapHelpText = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
@@ -61,7 +61,7 @@ You need to open cmd.exe and run it from there.
// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
// Works only on Microsoft Windows.
-var MousetrapDisplayDuration time.Duration = 5 * time.Second
+var MousetrapDisplayDuration = 5 * time.Second
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index c7e8983..88e6ed7 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -17,6 +17,7 @@ package cobra
import (
"bytes"
+ "context"
"fmt"
"io"
"os"
@@ -56,6 +57,10 @@ type Command struct {
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
// Expected arguments
Args PositionalArgs
@@ -80,7 +85,8 @@ type Command struct {
// Version defines the version for this command. If this value is non-empty and the command does not
// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
- // will print content of the "Version" variable.
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
Version string
// The *Run functions are executed in the following order:
@@ -140,9 +146,11 @@ type Command struct {
// TraverseChildren parses flags on all parents before executing child command.
TraverseChildren bool
- //FParseErrWhitelist flag parse errors to be ignored
+ // FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
+ ctx context.Context
+
// commands is the list of commands supported by this program.
commands []*Command
// parent is a parent command for this command.
@@ -202,6 +210,12 @@ type Command struct {
errWriter io.Writer
}
+// Context returns underlying command context. If command wasn't
+// executed with ExecuteContext Context returns Background context.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
@@ -228,7 +242,7 @@ func (c *Command) SetErr(newErr io.Writer) {
c.errWriter = newErr
}
-// SetOut sets the source for input data
+// SetIn sets the source for input data
// If newIn is nil, os.Stdin is used.
func (c *Command) SetIn(newIn io.Reader) {
c.inReader = newIn
@@ -297,7 +311,7 @@ func (c *Command) ErrOrStderr() io.Writer {
return c.getErr(os.Stderr)
}
-// ErrOrStderr returns output to stderr
+// InOrStdin returns input to stdin
func (c *Command) InOrStdin() io.Reader {
return c.getIn(os.Stdin)
}
@@ -369,6 +383,8 @@ func (c *Command) HelpFunc() func(*Command, []string) {
}
return func(c *Command, a []string) {
c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
if err != nil {
c.Println(err)
@@ -857,6 +873,13 @@ func (c *Command) preRun() {
}
}
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
// Execute uses the args (os.Args[1:] by default)
// and run through the command tree finding appropriate matches
// for commands and then corresponding flags.
@@ -867,6 +890,10 @@ func (c *Command) Execute() error {
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
// Regardless of what command execute is called on, run on Root only
if c.HasParent() {
return c.Root().ExecuteC()
@@ -888,6 +915,9 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
args = os.Args[1:]
}
+ // initialize the hidden command to be used for bash completion
+ c.initCompleteCmd(args)
+
var flags []string
if c.TraverseChildren {
cmd, flags, err = c.Traverse(args)
@@ -911,6 +941,12 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
cmd.commandCalledAs.name = cmd.Name()
}
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
err = cmd.execute(flags)
if err != nil {
// Always show help if requested, even if SilenceErrors is in
@@ -994,7 +1030,11 @@ func (c *Command) InitDefaultVersionFlag() {
} else {
usage += c.Name()
}
- c.Flags().Bool("version", false, usage)
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
}
}
@@ -1547,7 +1587,7 @@ func (c *Command) ParseFlags(args []string) error {
beforeErrorBufLen := c.flagErrorBuf.Len()
c.mergePersistentFlags()
- //do it here after merging all flags and just before parse
+ // do it here after merging all flags and just before parse
c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
err := c.Flags().Parse(args)
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
new file mode 100644
index 0000000..ba57327
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -0,0 +1,384 @@
+package cobra
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ // This currently does not work for zsh or bash < 4
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// Adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
+ for _, comp := range completions {
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.Split(comp, "\t")[0]
+ }
+ // Print each possible completion to stdout for the completion script to consume.
+ fmt.Fprintln(finalCmd.OutOrStdout(), comp)
+ }
+
+ if directive > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ directive = ShellCompDirectiveDefault
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ var completions []string
+
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ // Find the real command for which completion must be performed
+ finalCmd, finalArgs, err := c.Root().Find(trimmedArgs)
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, completions, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
+ }
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag to be complete
+ if len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
+ // We are completing a flag name
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+
+ directive := ShellCompDirectiveDefault
+ if len(completions) > 0 {
+ if strings.HasSuffix(completions[0], "=") {
+ directive = ShellCompDirectiveNoSpace
+ }
+ }
+ return finalCmd, completions, directive, nil
+ }
+
+ var flag *pflag.Flag
+ if !finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ flag, finalArgs, toComplete, err = checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+ if err != nil {
+ // Error while attempting to parse flags
+ return finalCmd, completions, ShellCompDirectiveDefault, err
+ }
+ }
+
+ if flag == nil {
+ // Complete subcommand names
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() && strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+
+ if len(finalCmd.ValidArgs) > 0 {
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // Always let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+
+ // Parse the flags and extract the arguments to prepare for calling the completion function
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, completions, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil {
+ completionFn = flagCompletionFunctions[flag]
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn == nil {
+ // Go custom completion not supported/needed for this flag or command
+ return finalCmd, completions, ShellCompDirectiveDefault, nil
+ }
+
+ // Call the registered completion function to get the completions
+ comps, directive := completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ return finalCmd, completions, directive, nil
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ if len(flag.NoOptDefVal) == 0 {
+ // Flag requires a value, so it can be suffixed with =
+ flagName += "="
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ if isFlagArg(lastArg) {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ flagName = strings.TrimLeft(lastArg[:index], "-")
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ return nil, nil, "", errors.New("Unexpected completion request for flag")
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ flagName = strings.TrimLeft(prevArg, "-")
+
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, nothing to complete
+ err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
+ return nil, nil, "", err
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ f.WriteString(msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprintf(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 0000000..c83609c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,172 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+function __%[1]s_debug
+ set file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+
+ set args (string split -- " " "$argv")
+ set lastArg "$args[-1]"
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ set emptyArg ""
+ if test -z "$lastArg"
+ __%[1]s_debug "Setting emptyArg"
+ set emptyArg \"\"
+ end
+ __%[1]s_debug "emptyArg: $emptyArg"
+
+ set requestComp "$args[1] %[2]s $args[2..-1] $emptyArg"
+ __%[1]s_debug "Calling $requestComp"
+
+ set results (eval $requestComp 2> /dev/null)
+ set comps $results[1..-2]
+ set directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=