Skip to content

Commit

Permalink
feat: add identify option to swarm peers command
Browse files Browse the repository at this point in the history
Fixes #9578
  • Loading branch information
arthurgavazza authored Mar 30, 2023
1 parent 9fb09dd commit e89cce6
Show file tree
Hide file tree
Showing 4 changed files with 210 additions and 50 deletions.
71 changes: 64 additions & 7 deletions core/commands/swarm.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package commands

import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
Expand All @@ -21,8 +22,10 @@ import (
"github.com/ipfs/kubo/repo/fsrepo"

cmds "github.com/ipfs/go-ipfs-cmds"
ic "github.com/libp2p/go-libp2p/core/crypto"
inet "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
Expand Down Expand Up @@ -69,6 +72,7 @@ const (
swarmDirectionOptionName = "direction"
swarmResetLimitsOptionName = "reset"
swarmUsedResourcesPercentageName = "min-used-limit-perc"
swarmIdentifyOptionName = "identify"
)

type peeringResult struct {
Expand Down Expand Up @@ -236,17 +240,18 @@ var swarmPeersCmd = &cmds.Command{
cmds.BoolOption(swarmStreamsOptionName, "Also list information about open streams for each peer"),
cmds.BoolOption(swarmLatencyOptionName, "Also list information about latency to each peer"),
cmds.BoolOption(swarmDirectionOptionName, "Also list information about the direction of connection"),
cmds.BoolOption(swarmIdentifyOptionName, "Also list information about peers identify"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}

verbose, _ := req.Options[swarmVerboseOptionName].(bool)
latency, _ := req.Options[swarmLatencyOptionName].(bool)
streams, _ := req.Options[swarmStreamsOptionName].(bool)
direction, _ := req.Options[swarmDirectionOptionName].(bool)
identify, _ := req.Options[swarmIdentifyOptionName].(bool)

conns, err := api.Swarm().Peers(req.Context)
if err != nil {
Expand Down Expand Up @@ -287,6 +292,15 @@ var swarmPeersCmd = &cmds.Command{
ci.Streams = append(ci.Streams, streamInfo{Protocol: string(s)})
}
}

if verbose || identify {
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
identifyResult, _ := ci.identifyPeer(n.Peerstore, c.ID())
ci.Identify = identifyResult
}
sort.Sort(&ci)
out.Peers = append(out.Peers, ci)
}
Expand Down Expand Up @@ -411,12 +425,13 @@ type streamInfo struct {
}

type connInfo struct {
Addr string
Peer string
Latency string
Muxer string
Direction inet.Direction
Streams []streamInfo
Addr string `json:",omitempty"`
Peer string `json:",omitempty"`
Latency string `json:",omitempty"`
Muxer string `json:",omitempty"`
Direction inet.Direction `json:",omitempty"`
Streams []streamInfo `json:",omitempty"`
Identify IdOutput `json:",omitempty"`
}

func (ci *connInfo) Less(i, j int) bool {
Expand Down Expand Up @@ -447,6 +462,48 @@ func (ci connInfos) Swap(i, j int) {
ci.Peers[i], ci.Peers[j] = ci.Peers[j], ci.Peers[i]
}

func (ci *connInfo) identifyPeer(ps pstore.Peerstore, p peer.ID) (IdOutput, error) {
var info IdOutput
info.ID = p.String()

if pk := ps.PubKey(p); pk != nil {
pkb, err := ic.MarshalPublicKey(pk)
if err != nil {
return IdOutput{}, err
}
info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
}

addrInfo := ps.PeerInfo(p)
addrs, err := peer.AddrInfoToP2pAddrs(&addrInfo)
if err != nil {
return IdOutput{}, err
}

for _, a := range addrs {
info.Addresses = append(info.Addresses, a.String())
}
sort.Strings(info.Addresses)

if protocols, err := ps.GetProtocols(p); err == nil {
info.Protocols = append(info.Protocols, protocols...)
sort.Slice(info.Protocols, func(i, j int) bool { return info.Protocols[i] < info.Protocols[j] })
}

if v, err := ps.Get(p, "ProtocolVersion"); err == nil {
if vs, ok := v.(string); ok {
info.ProtocolVersion = vs
}
}
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
info.AgentVersion = vs
}
}

return info, nil
}

// directionString transfers to string
func directionString(d inet.Direction) string {
switch d {
Expand Down
37 changes: 37 additions & 0 deletions test/cli/harness/peering.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package harness

import (
"fmt"
"math/rand"
"testing"

"github.com/ipfs/kubo/config"
)

type Peering struct {
From int
To int
}

func newRandPort() int {
n := rand.Int()
return 3000 + (n % 1000)
}

func CreatePeerNodes(t *testing.T, n int, peerings []Peering) (*Harness, Nodes) {
h := NewT(t)
nodes := h.NewNodes(n).Init()
nodes.ForEachPar(func(node *Node) {
node.UpdateConfig(func(cfg *config.Config) {
cfg.Routing.Type = config.NewOptionalString("none")
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", newRandPort())}
})

})

for _, peering := range peerings {
nodes[peering.From].PeerWith(nodes[peering.To])
}

return h, nodes
}
55 changes: 12 additions & 43 deletions test/cli/peering_test.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
package cli

import (
"fmt"
"math/rand"
"testing"
"time"

"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
. "github.com/ipfs/kubo/test/cli/testutils"
"github.com/libp2p/go-libp2p/core/peer"
Expand All @@ -16,16 +13,6 @@ import (
func TestPeering(t *testing.T) {
t.Parallel()

type peering struct {
from int
to int
}

newRandPort := func() int {
n := rand.Int()
return 3000 + (n % 1000)
}

containsPeerID := func(p peer.ID, peers []peer.ID) bool {
for _, peerID := range peers {
if p == peerID {
Expand Down Expand Up @@ -63,34 +50,16 @@ func TestPeering(t *testing.T) {
}, 20*time.Second, 10*time.Millisecond, "%d -> %d peered", from.ID, to.ID)
}

assertPeerings := func(h *harness.Harness, nodes []*harness.Node, peerings []peering) {
ForEachPar(peerings, func(peering peering) {
assertPeered(h, nodes[peering.from], nodes[peering.to])
assertPeerings := func(h *harness.Harness, nodes []*harness.Node, peerings []harness.Peering) {
ForEachPar(peerings, func(peering harness.Peering) {
assertPeered(h, nodes[peering.From], nodes[peering.To])
})
}

createNodes := func(t *testing.T, n int, peerings []peering) (*harness.Harness, harness.Nodes) {
h := harness.NewT(t)
nodes := h.NewNodes(n).Init()
nodes.ForEachPar(func(node *harness.Node) {
node.UpdateConfig(func(cfg *config.Config) {
cfg.Routing.Type = config.NewOptionalString("none")
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", newRandPort())}
})

})

for _, peering := range peerings {
nodes[peering.from].PeerWith(nodes[peering.to])
}

return h, nodes
}

t.Run("bidirectional peering should work (simultaneous connect)", func(t *testing.T) {
t.Parallel()
peerings := []peering{{from: 0, to: 1}, {from: 1, to: 0}, {from: 1, to: 2}}
h, nodes := createNodes(t, 3, peerings)
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)

nodes.StartDaemons()
assertPeerings(h, nodes, peerings)
Expand All @@ -101,8 +70,8 @@ func TestPeering(t *testing.T) {

t.Run("1 should reconnect to 2 when 2 disconnects from 1", func(t *testing.T) {
t.Parallel()
peerings := []peering{{from: 0, to: 1}, {from: 1, to: 0}, {from: 1, to: 2}}
h, nodes := createNodes(t, 3, peerings)
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)

nodes.StartDaemons()
assertPeerings(h, nodes, peerings)
Expand All @@ -113,21 +82,21 @@ func TestPeering(t *testing.T) {

t.Run("1 will peer with 2 when it comes online", func(t *testing.T) {
t.Parallel()
peerings := []peering{{from: 0, to: 1}, {from: 1, to: 0}, {from: 1, to: 2}}
h, nodes := createNodes(t, 3, peerings)
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)

nodes[0].StartDaemon()
nodes[1].StartDaemon()
assertPeerings(h, nodes, []peering{{from: 0, to: 1}, {from: 1, to: 0}})
assertPeerings(h, nodes, []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}})

nodes[2].StartDaemon()
assertPeerings(h, nodes, peerings)
})

t.Run("1 will re-peer with 2 when it disconnects and then comes back online", func(t *testing.T) {
t.Parallel()
peerings := []peering{{from: 0, to: 1}, {from: 1, to: 0}, {from: 1, to: 2}}
h, nodes := createNodes(t, 3, peerings)
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)

nodes.StartDaemons()
assertPeerings(h, nodes, peerings)
Expand Down
97 changes: 97 additions & 0 deletions test/cli/swarm_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
package cli

import (
"encoding/json"
"fmt"
"testing"

"github.com/ipfs/kubo/test/cli/harness"

"github.com/stretchr/testify/assert"
)

// TODO: Migrate the rest of the sharness swarm test.
func TestSwarm(t *testing.T) {
type identifyType struct {
ID string
PublicKey string
Addresses []string
AgentVersion string
ProtocolVersion string
Protocols []string
}
type peer struct {
Identify identifyType
}
type expectedOutputType struct {
Peers []peer
}

t.Parallel()

t.Run("ipfs swarm peers returns empty peers when a node is not connected to any peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
assert.Equal(t, 0, len(output.Peers))

})
t.Run("ipfs swarm peers with flag identify outputs expected identify information about connected peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
node.Connect(otherNode)

res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
actualID := output.Peers[0].Identify.ID
actualPublicKey := output.Peers[0].Identify.PublicKey
actualAgentVersion := output.Peers[0].Identify.AgentVersion
actualAdresses := output.Peers[0].Identify.Addresses
actualProtocolVersion := output.Peers[0].Identify.ProtocolVersion
actualProtocols := output.Peers[0].Identify.Protocols

expectedID := otherNode.PeerID().String()
expectedAddresses := []string{fmt.Sprintf("%s/p2p/%s", otherNode.SwarmAddrs()[0], actualID)}

assert.Equal(t, actualID, expectedID)
assert.NotNil(t, actualPublicKey)
assert.NotNil(t, actualAgentVersion)
assert.NotNil(t, actualProtocolVersion)
assert.Len(t, actualAdresses, 1)
assert.Equal(t, expectedAddresses[0], actualAdresses[0])
assert.Greater(t, len(actualProtocols), 0)

})

t.Run("ipfs swarm peers with flag identify outputs Identify field with data that matches calling ipfs id on a peer", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
node.Connect(otherNode)

otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json")
var otherNodeIDOutput identifyType
err := json.Unmarshal(otherNodeIDResponse.Stdout.Bytes(), &otherNodeIDOutput)
assert.Nil(t, err)
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")

var output expectedOutputType
err = json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
outputIdentify := output.Peers[0].Identify

assert.Equal(t, outputIdentify.ID, otherNodeIDOutput.ID)
assert.Equal(t, outputIdentify.PublicKey, otherNodeIDOutput.PublicKey)
assert.Equal(t, outputIdentify.AgentVersion, otherNodeIDOutput.AgentVersion)
assert.Equal(t, outputIdentify.ProtocolVersion, otherNodeIDOutput.ProtocolVersion)
assert.ElementsMatch(t, outputIdentify.Addresses, otherNodeIDOutput.Addresses)
assert.ElementsMatch(t, outputIdentify.Protocols, otherNodeIDOutput.Protocols)

})
}

0 comments on commit e89cce6

Please sign in to comment.