Skip to content

Commit

Permalink
gofmt
Browse files Browse the repository at this point in the history
  • Loading branch information
puellanivis committed Oct 29, 2024
1 parent 4ff6adb commit 5a107ff
Show file tree
Hide file tree
Showing 10 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion client.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ func WithMaxInflight(count int) ClientOption {
// It will generate an error if one attempts to set the length beyond the 2^32-1 limitation of the sftp protocol.
// There may also be compatibility issues if setting the value above 2^31-1.
func WithMaxDataLength(length int) ClientOption {
withPktLen := WithMaxPacketLength(length + (sshfx.DefaultMaxPacketLength - sshfx.DefaultMaxDataLength))
withPktLen := WithMaxPacketLength(length + sshfx.MaxPacketLengthOverhead)

return func(cl *Client) error {
if err := withPktLen(cl); err != nil {
Expand Down
9 changes: 6 additions & 3 deletions encoding/ssh/filexfer/extended_packets.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ type ExtendedData = interface {
}

// ExtendedRequestPacket defines the interface an extended request packet should implement.
type ExtendedRequestPacket interface{
type ExtendedRequestPacket interface {
ExtendedData

// Type ensures it is packet-like, it should always return PacketTypeExtended.
Expand All @@ -24,7 +24,7 @@ type ExtendedRequestPacket interface{
}

// extendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket).
type extendedDataConstructor struct{
type extendedDataConstructor struct {
typ reflect.Type
new func() ExtendedData
}
Expand All @@ -37,7 +37,10 @@ var extendedPacketTypes struct {
// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extended request name.
//
// This operation is idempotent so long as the ExtendedRequest name is only being registered with the same type.
func RegisterExtendedPacketType[PKT any, EXT interface{ ExtendedRequestPacket ; *PKT }]() {
func RegisterExtendedPacketType[PKT any, EXT interface {
ExtendedRequestPacket
*PKT
}]() {
extendedPacketTypes.mu.Lock()
defer extendedPacketTypes.mu.Unlock()

Expand Down
6 changes: 3 additions & 3 deletions encoding/ssh/filexfer/filexfer.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func ComposePacket(header, payload []byte, err error) ([]byte, error) {
// Default length values,
// Defined in draft-ietf-secsh-filexfer-02 section 3.
const (
DefaultMaxPacketLength = 34000
DefaultMaxDataLength = 32768
MaxPacketLengthOverhead = DefaultMaxPacketLength-DefaultMaxDataLength
DefaultMaxPacketLength = 34000
DefaultMaxDataLength = 32768
MaxPacketLengthOverhead = DefaultMaxPacketLength - DefaultMaxDataLength
)
6 changes: 2 additions & 4 deletions encoding/ssh/filexfer/fxp.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ func (f PacketType) String() string {
}

var (
readPool = pool.NewPool[ReadPacket](64)
writePool = pool.NewPool[WritePacket](64)
readPool = pool.NewPool[ReadPacket](64)
writePool = pool.NewPool[WritePacket](64)
wrDataPool = pool.NewSlicePool[[]byte](64, DefaultMaxDataLength)
)

Expand Down Expand Up @@ -207,5 +207,3 @@ func newPacketFromType(typ PacketType) (Packet, error) {

return newRequestPacketFromType(typ)
}


2 changes: 1 addition & 1 deletion encoding/ssh/filexfer/response_packets.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (p *PathPseudoPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
Path: e.Filename,
}

for range count-1 {
for range count - 1 {
var e NameEntry
if err := e.UnmarshalFrom(buf); err != nil {
return err
Expand Down
6 changes: 3 additions & 3 deletions encoding/ssh/filexfer/response_packets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,8 @@ var _ Packet = &PathPseudoPacket{}

func TestPathPseudoPacket(t *testing.T) {
const (
id = 42
filename = "foo"
id = 42
filename = "foo"
)

p := &PathPseudoPacket{
Expand All @@ -265,7 +265,7 @@ func TestPathPseudoPacket(t *testing.T) {
}

want := []byte{
0x00, 0x00, 0x00, 16+8,
0x00, 0x00, 0x00, 16 + 8,
104,
0x00, 0x00, 0x00, 42,
0x00, 0x00, 0x00, 0x01,
Expand Down
4 changes: 2 additions & 2 deletions examples/go-sftp-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
)

var (
readOnly = flag.Bool("read-only", false, "read-only server")
readOnly = flag.Bool("read-only", false, "read-only server")
debugStderr = flag.Bool("debug", false, "debug to stderr")
)

Expand Down Expand Up @@ -129,7 +129,7 @@ func main() {
}

srv := &sftp.Server{
Handler: &localfs.ServerHandler{
Handler: &localfs.ServerHandler{
ReadOnly: *readOnly,
},
Debug: debug,
Expand Down
24 changes: 12 additions & 12 deletions internal/pool/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ import (
"sync/atomic"
)

type metrics struct{
hits atomic.Uint64
type metrics struct {
hits atomic.Uint64
misses atomic.Uint64
}

Expand All @@ -25,25 +25,25 @@ func (m *metrics) Hits() (hits, total uint64) {
}

// BufPool provides a pool of slices that will return nil when a miss occurs.
type SlicePool[S []T, T any] struct{
type SlicePool[S []T, T any] struct {
metrics

ch chan S
length int
}

func NewSlicePool[S []T, T any](depth, cullLength int) *SlicePool[S,T] {
func NewSlicePool[S []T, T any](depth, cullLength int) *SlicePool[S, T] {
if cullLength <= 0 {
panic("sftp: bufPool: new buffer creation length must be greater than zero")
}

return &SlicePool[S,T]{
return &SlicePool[S, T]{
ch: make(chan S, depth),
length: cullLength,
}
}

func (p *SlicePool[S,T]) Get() S {
func (p *SlicePool[S, T]) Get() S {
if p == nil {
return nil
}
Expand All @@ -59,7 +59,7 @@ func (p *SlicePool[S,T]) Get() S {
}
}

func (p *SlicePool[S,T]) Put(b S) {
func (p *SlicePool[S, T]) Put(b S) {
if p == nil {
// functional default: no reuse
return
Expand All @@ -78,15 +78,15 @@ func (p *SlicePool[S,T]) Put(b S) {
}

// Pool provides a pool of types that should be called with new(T) when a miss occurs.
type Pool[T any] struct{
type Pool[T any] struct {
metrics

ch chan *T
ch chan *T
}

func NewPool[T any](depth int) *Pool[T] {
return &Pool[T]{
ch: make(chan *T, depth),
ch: make(chan *T, depth),
}
}

Expand Down Expand Up @@ -122,14 +122,14 @@ func (p *Pool[T]) Put(v *T) {
}

// WorkPool provides a pool of types that blocks when the pool is empty.
type WorkPool[T any] struct{
type WorkPool[T any] struct {
ch chan chan T
wg sync.WaitGroup
}

func NewWorkPool[T any](depth int) *WorkPool[T] {
p := &WorkPool[T]{
ch: make(chan chan T, depth),
ch: make(chan chan T, depth),
}

for len(p.ch) < cap(p.ch) {
Expand Down
1 change: 0 additions & 1 deletion localfs/localfs_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -897,4 +897,3 @@ func BenchmarkReadFrom10MiB(b *testing.B) {
func BenchmarkReadFrom64MiB(b *testing.B) {
benchHelperReadFrom(b, 1<<26)
}

2 changes: 1 addition & 1 deletion localfs/localfs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ import (

var handler = &ServerHandler{}

//var _ sftp.HardlinkServerHandler = handler
// var _ sftp.HardlinkServerHandler = handler
var _ sftp.POSIXRenameServerHandler = handler

0 comments on commit 5a107ff

Please sign in to comment.