diff --git a/client/db.go b/client/db.go index 9f54b72108..da34b81ae5 100644 --- a/client/db.go +++ b/client/db.go @@ -106,6 +106,11 @@ type Store interface { // Backup holds the backup related methods that must be implemented by the database. Backup + // P2P contains functions related to the P2P system. + // + // These functions are only useful if there is a configured network peer. + P2P + // AddSchema takes the provided GQL schema in SDL format, and applies it to the [Store], // creating the necessary collections, request types, etc. // diff --git a/client/errors.go b/client/errors.go index 460392a030..dac8ebcc87 100644 --- a/client/errors.go +++ b/client/errors.go @@ -56,6 +56,7 @@ var ( ErrCanNotNormalizeValue = errors.New(errCanNotNormalizeValue) ErrCanNotTurnNormalValueIntoArray = errors.New(errCanNotTurnNormalValueIntoArray) ErrCanNotMakeNormalNilFromFieldKind = errors.New(errCanNotMakeNormalNilFromFieldKind) + ErrCollectionNotFound = errors.New(errCollectionNotFound) ) // NewErrFieldNotExist returns an error indicating that the given field does not exist. diff --git a/client/mocks/db.go b/client/mocks/db.go index 0d832e7d00..d704ce3903 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -18,6 +18,8 @@ import ( mock "github.com/stretchr/testify/mock" model "github.com/lens-vm/lens/host-go/config/model" + + peer "github.com/libp2p/go-libp2p/core/peer" ) // DB is an autogenerated mock type for the DB type @@ -33,6 +35,49 @@ func (_m *DB) EXPECT() *DB_Expecter { return &DB_Expecter{mock: &_m.Mock} } +// AddP2PCollections provides a mock function with given fields: ctx, collectionIDs +func (_m *DB) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + ret := _m.Called(ctx, collectionIDs) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []string) error); ok { + r0 = rf(ctx, collectionIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_AddP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddP2PCollections' +type DB_AddP2PCollections_Call struct { + *mock.Call +} + +// AddP2PCollections is a helper method to define mock.On call +// - ctx context.Context +// - collectionIDs []string +func (_e *DB_Expecter) AddP2PCollections(ctx interface{}, collectionIDs interface{}) *DB_AddP2PCollections_Call { + return &DB_AddP2PCollections_Call{Call: _e.mock.On("AddP2PCollections", ctx, collectionIDs)} +} + +func (_c *DB_AddP2PCollections_Call) Run(run func(ctx context.Context, collectionIDs []string)) *DB_AddP2PCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]string)) + }) + return _c +} + +func (_c *DB_AddP2PCollections_Call) Return(_a0 error) *DB_AddP2PCollections_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_AddP2PCollections_Call) RunAndReturn(run func(context.Context, []string) error) *DB_AddP2PCollections_Call { + _c.Call.Return(run) + return _c +} + // AddPolicy provides a mock function with given fields: ctx, policy func (_m *DB) AddPolicy(ctx context.Context, policy string) (client.AddPolicyResult, error) { ret := _m.Called(ctx, policy) @@ -359,6 +404,49 @@ func (_c *DB_Close_Call) RunAndReturn(run func()) *DB_Close_Call { return _c } +// DeleteReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_DeleteReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicator' +type DB_DeleteReplicator_Call struct { + *mock.Call +} + +// DeleteReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) DeleteReplicator(ctx interface{}, rep interface{}) *DB_DeleteReplicator_Call { + return &DB_DeleteReplicator_Call{Call: _e.mock.On("DeleteReplicator", ctx, rep)} +} + +func (_c *DB_DeleteReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_DeleteReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_DeleteReplicator_Call) Return(_a0 error) *DB_DeleteReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_DeleteReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_DeleteReplicator_Call { + _c.Call.Return(run) + return _c +} + // Events provides a mock function with given fields: func (_m *DB) Events() *event.Bus { ret := _m.Called() @@ -501,6 +589,114 @@ func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[str return _c } +// GetAllP2PCollections provides a mock function with given fields: ctx +func (_m *DB) GetAllP2PCollections(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllP2PCollections' +type DB_GetAllP2PCollections_Call struct { + *mock.Call +} + +// GetAllP2PCollections is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllP2PCollections(ctx interface{}) *DB_GetAllP2PCollections_Call { + return &DB_GetAllP2PCollections_Call{Call: _e.mock.On("GetAllP2PCollections", ctx)} +} + +func (_c *DB_GetAllP2PCollections_Call) Run(run func(ctx context.Context)) *DB_GetAllP2PCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) Return(_a0 []string, _a1 error) *DB_GetAllP2PCollections_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) RunAndReturn(run func(context.Context) ([]string, error)) *DB_GetAllP2PCollections_Call { + _c.Call.Return(run) + return _c +} + +// GetAllReplicators provides a mock function with given fields: ctx +func (_m *DB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + ret := _m.Called(ctx) + + var r0 []client.Replicator + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.Replicator, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.Replicator); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.Replicator) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllReplicators_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllReplicators' +type DB_GetAllReplicators_Call struct { + *mock.Call +} + +// GetAllReplicators is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllReplicators(ctx interface{}) *DB_GetAllReplicators_Call { + return &DB_GetAllReplicators_Call{Call: _e.mock.On("GetAllReplicators", ctx)} +} + +func (_c *DB_GetAllReplicators_Call) Run(run func(ctx context.Context)) *DB_GetAllReplicators_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllReplicators_Call) Return(_a0 []client.Replicator, _a1 error) *DB_GetAllReplicators_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllReplicators_Call) RunAndReturn(run func(context.Context) ([]client.Replicator, error)) *DB_GetAllReplicators_Call { + _c.Call.Return(run) + return _c +} + // GetCollectionByName provides a mock function with given fields: _a0, _a1 func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { ret := _m.Called(_a0, _a1) @@ -1044,6 +1240,47 @@ func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, im return _c } +// PeerInfo provides a mock function with given fields: +func (_m *DB) PeerInfo() peer.AddrInfo { + ret := _m.Called() + + var r0 peer.AddrInfo + if rf, ok := ret.Get(0).(func() peer.AddrInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(peer.AddrInfo) + } + + return r0 +} + +// DB_PeerInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PeerInfo' +type DB_PeerInfo_Call struct { + *mock.Call +} + +// PeerInfo is a helper method to define mock.On call +func (_e *DB_Expecter) PeerInfo() *DB_PeerInfo_Call { + return &DB_PeerInfo_Call{Call: _e.mock.On("PeerInfo")} +} + +func (_c *DB_PeerInfo_Call) Run(run func()) *DB_PeerInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_PeerInfo_Call) Return(_a0 peer.AddrInfo) *DB_PeerInfo_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_PeerInfo_Call) RunAndReturn(run func() peer.AddrInfo) *DB_PeerInfo_Call { + _c.Call.Return(run) + return _c +} + // Peerstore provides a mock function with given fields: func (_m *DB) Peerstore() datastore.DSBatching { ret := _m.Called() @@ -1129,6 +1366,49 @@ func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_P return _c } +// RemoveP2PCollections provides a mock function with given fields: ctx, collectionIDs +func (_m *DB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + ret := _m.Called(ctx, collectionIDs) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []string) error); ok { + r0 = rf(ctx, collectionIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_RemoveP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveP2PCollections' +type DB_RemoveP2PCollections_Call struct { + *mock.Call +} + +// RemoveP2PCollections is a helper method to define mock.On call +// - ctx context.Context +// - collectionIDs []string +func (_e *DB_Expecter) RemoveP2PCollections(ctx interface{}, collectionIDs interface{}) *DB_RemoveP2PCollections_Call { + return &DB_RemoveP2PCollections_Call{Call: _e.mock.On("RemoveP2PCollections", ctx, collectionIDs)} +} + +func (_c *DB_RemoveP2PCollections_Call) Run(run func(ctx context.Context, collectionIDs []string)) *DB_RemoveP2PCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]string)) + }) + return _c +} + +func (_c *DB_RemoveP2PCollections_Call) Return(_a0 error) *DB_RemoveP2PCollections_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_RemoveP2PCollections_Call) RunAndReturn(run func(context.Context, []string) error) *DB_RemoveP2PCollections_Call { + _c.Call.Return(run) + return _c +} + // Root provides a mock function with given fields: func (_m *DB) Root() datastore.Rootstore { ret := _m.Called() @@ -1258,6 +1538,49 @@ func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.Le return _c } +// SetReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) SetReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetReplicator' +type DB_SetReplicator_Call struct { + *mock.Call +} + +// SetReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) SetReplicator(ctx interface{}, rep interface{}) *DB_SetReplicator_Call { + return &DB_SetReplicator_Call{Call: _e.mock.On("SetReplicator", ctx, rep)} +} + +func (_c *DB_SetReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_SetReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_SetReplicator_Call) Return(_a0 error) *DB_SetReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_SetReplicator_Call { + _c.Call.Return(run) + return _c +} + // NewDB creates a new instance of DB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewDB(t interface { diff --git a/client/p2p.go b/client/p2p.go index 12be6ebf8d..d3d3c699b3 100644 --- a/client/p2p.go +++ b/client/p2p.go @@ -18,8 +18,6 @@ import ( // P2P is a peer connected database implementation. type P2P interface { - DB - // PeerInfo returns the p2p host id and listening addresses. PeerInfo() peer.AddrInfo diff --git a/event/event.go b/event/event.go index e9afdf1a57..5889577c7d 100644 --- a/event/event.go +++ b/event/event.go @@ -32,6 +32,12 @@ const ( PubSubName = Name("pubsub") // PeerName is the name of the network connect event. PeerName = Name("peer") + // P2PTopicName is the name of the network p2p topic update event. + P2PTopicName = Name("p2p-topic") + // PeerInfoName is the name of the network peer info event. + PeerInfoName = Name("peer-info") + // ReplicatorName is the name of the replicator event. + ReplicatorName = Name("replicator") ) // Peer is an event that is published when @@ -110,3 +116,25 @@ type Subscription struct { func (s *Subscription) Message() <-chan Message { return s.value } + +// P2PTopic is an event that is published when a peer has updated the topics it is subscribed to. +type P2PTopic struct { + ToAdd []string + ToRemove []string +} + +// PeerInfo is an event that is published when the node has updated its peer info. +type PeerInfo struct { + Info peer.AddrInfo +} + +// Replicator is an event that is published when a replicator is added or updated. +type Replicator struct { + // The peer info for the replicator instance. + Info peer.AddrInfo + // The map of schema roots that the replicator will receive updates for. + Schemas map[string]struct{} + // Docs will receive Updates if new collections have been added to the replicator + // and those collections have documents to be replicated. + Docs <-chan Update +} diff --git a/go.mod b/go.mod index 7ffa87cf4c..885d398f81 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.10.1 github.com/libp2p/go-libp2p-record v0.2.0 + github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.4 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 @@ -191,7 +192,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect diff --git a/internal/db/db.go b/internal/db/db.go index 11750e5881..57e84b3566 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -21,6 +21,7 @@ import ( ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" @@ -75,6 +76,11 @@ type db struct { // Contains ACP if it exists acp immutable.Option[acp.ACP] + + // The peer ID and network address information for the current node + // if network is enabled. + peerInfo immutable.Option[peer.AddrInfo] + peerMutex sync.RWMutex } // NewDB creates a new instance of the DB using the given options. @@ -126,11 +132,11 @@ func newDB( return nil, err } - sub, err := db.events.Subscribe(event.MergeName) + sub, err := db.events.Subscribe(event.MergeName, event.PeerInfoName) if err != nil { return nil, err } - go db.handleMerges(ctx, sub) + go db.handleMessages(ctx, sub) return db, nil } diff --git a/internal/db/errors.go b/internal/db/errors.go index 7a81824efe..72dd7e27b0 100644 --- a/internal/db/errors.go +++ b/internal/db/errors.go @@ -11,6 +11,8 @@ package db import ( + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" ) @@ -94,6 +96,9 @@ const ( errSecondaryFieldOnSchema string = "secondary relation fields cannot be defined on the schema" errRelationMissingField string = "relation missing field" errNoTransactionInContext string = "no transaction in context" + errReplicatorExists string = "replicator already exists for %s with peerID %s" + errReplicatorDocID string = "failed to get docID for replicator" + errReplicatorCollections string = "failed to get collections for replicator" ) var ( @@ -127,7 +132,12 @@ var ( ErrSecondaryFieldOnSchema = errors.New(errSecondaryFieldOnSchema) ErrRelationMissingField = errors.New(errRelationMissingField) ErrMultipleRelationPrimaries = errors.New("relation can only have a single field set as primary") + ErrP2PColHasPolicy = errors.New("p2p collection specified has a policy on it") ErrNoTransactionInContext = errors.New(errNoTransactionInContext) + ErrReplicatorColHasPolicy = errors.New("replicator collection specified has a policy on it") + ErrReplicatorSomeColsHavePolicy = errors.New("replicator can not use all collections, as some have policy") + ErrSelfTargetForReplicator = errors.New("can't target ourselves as a replicator") + ErrReplicatorCollections = errors.New(errReplicatorCollections) ) // NewErrFailedToGetHeads returns a new error indicating that the heads of a document @@ -617,3 +627,19 @@ func NewErrRelationMissingField(objectName, relationName string) error { errors.NewKV("RelationName", relationName), ) } + +func NewErrReplicatorExists(collection string, peerID peer.ID) error { + return errors.New( + errReplicatorExists, + errors.NewKV("Collection", collection), + errors.NewKV("PeerID", peerID.String()), + ) +} + +func NewErrReplicatorDocID(inner error, kv ...errors.KV) error { + return errors.Wrap(errReplicatorDocID, inner, kv...) +} + +func NewErrReplicatorCollections(inner error, kv ...errors.KV) error { + return errors.Wrap(errReplicatorCollections, inner, kv...) +} diff --git a/internal/db/merge.go b/internal/db/merge.go index 278c13ad12..bbfedd98d8 100644 --- a/internal/db/merge.go +++ b/internal/db/merge.go @@ -19,12 +19,10 @@ import ( "github.com/ipld/go-ipld-prime/linking" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" @@ -34,50 +32,6 @@ import ( merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) -func (db *db) handleMerges(ctx context.Context, sub *event.Subscription) { - queue := newMergeQueue() - for { - select { - case <-ctx.Done(): - return - case msg, ok := <-sub.Message(): - if !ok { - return - } - merge, ok := msg.Data.(event.Merge) - if !ok { - continue - } - go func() { - // ensure only one merge per docID - queue.add(merge.DocID) - defer queue.done(merge.DocID) - - // retry the merge process if a conflict occurs - // - // conficts occur when a user updates a document - // while a merge is in progress. - var err error - for i := 0; i < db.MaxTxnRetries(); i++ { - err = db.executeMerge(ctx, merge) - if errors.Is(err, badger.ErrTxnConflict) { - continue // retry merge - } - break // merge success or error - } - - if err != nil { - log.ErrorContextE( - ctx, - "Failed to execute merge", - err, - corelog.Any("Event", merge)) - } - }() - } - } -} - func (db *db) executeMerge(ctx context.Context, dagMerge event.Merge) error { ctx, txn, err := ensureContextTxn(ctx, db, false) if err != nil { diff --git a/internal/db/messages.go b/internal/db/messages.go new file mode 100644 index 0000000000..241dc8ed7a --- /dev/null +++ b/internal/db/messages.go @@ -0,0 +1,81 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + + "github.com/sourcenetwork/corelog" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/event" +) + +func (db *db) handleMessages(ctx context.Context, sub *event.Subscription) { + queue := newMergeQueue() + for { + select { + case <-ctx.Done(): + return + case msg, ok := <-sub.Message(): + if !ok { + return + } + switch evt := msg.Data.(type) { + case event.Merge: + go func() { + // ensure only one merge per docID + queue.add(evt.DocID) + defer queue.done(evt.DocID) + + // retry the merge process if a conflict occurs + // + // conficts occur when a user updates a document + // while a merge is in progress. + var err error + for i := 0; i < db.MaxTxnRetries(); i++ { + err = db.executeMerge(ctx, evt) + if errors.Is(err, badger.ErrTxnConflict) { + continue // retry merge + } + break // merge success or error + } + + if err != nil { + log.ErrorContextE( + ctx, + "Failed to execute merge", + err, + corelog.Any("Event", evt)) + } + }() + case event.PeerInfo: + db.peerMutex.Lock() + db.peerInfo = immutable.Some(evt.Info) + db.peerMutex.Unlock() + go func() { + err := db.loadP2PCollections(ctx) + if err != nil { + log.ErrorContextE(ctx, "Failed to load P2P collections", err) + } + }() + go func() { + err := db.loadReplicators(ctx) + if err != nil { + log.ErrorContextE(ctx, "Failed to load replicators", err) + } + }() + } + } + } +} diff --git a/internal/db/p2p_replicator.go b/internal/db/p2p_replicator.go new file mode 100644 index 0000000000..0c01f39047 --- /dev/null +++ b/internal/db/p2p_replicator.go @@ -0,0 +1,341 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + + dsq "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/corelog" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/event" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/merkle/clock" +) + +func (db *db) SetReplicator(ctx context.Context, rep client.Replicator) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + db.peerMutex.RLock() + if db.peerInfo.HasValue() && rep.Info.ID == db.peerInfo.Value().ID { + db.peerMutex.RUnlock() + return ErrSelfTargetForReplicator + } + db.peerMutex.RUnlock() + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 + // ctx = db.SetContextIdentity(ctx, identity) + ctx = SetContextTxn(ctx, txn) + + oldSchemas := make(map[string]struct{}) + repKey := core.NewReplicatorKey(rep.Info.ID.String()) + hasOldRep, err := txn.Systemstore().Has(ctx, repKey.ToDS()) + if err != nil { + return err + } + if hasOldRep { + repBytes, err := txn.Systemstore().Get(ctx, repKey.ToDS()) + if err != nil { + return err + } + oldRep := client.Replicator{} + err = json.Unmarshal(repBytes, &oldRep) + if err != nil { + return err + } + for _, schema := range oldRep.Schemas { + oldSchemas[schema] = struct{}{} + } + } + + var collections []client.Collection + switch { + case len(rep.Schemas) > 0: + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := db.GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + + if col.Description().Policy.HasValue() { + return ErrReplicatorColHasPolicy + } + + collections = append(collections, col) + } + + default: + // default to all collections (unless a collection contains a policy). + // TODO-ACP: default to all collections after resolving https://github.com/sourcenetwork/defradb/issues/2366 + allCollections, err := db.GetCollections(ctx, client.CollectionFetchOptions{}) + if err != nil { + return NewErrReplicatorCollections(err) + } + + for _, col := range allCollections { + // Can not default to all collections if any collection has a policy. + // TODO-ACP: remove this check/loop after https://github.com/sourcenetwork/defradb/issues/2366 + if col.Description().Policy.HasValue() { + return ErrReplicatorSomeColsHavePolicy + } + } + collections = allCollections + } + + rep.Schemas = nil + schemaMap := make(map[string]struct{}) + addedCols := []client.Collection{} + for _, col := range collections { + rep.Schemas = append(rep.Schemas, col.SchemaRoot()) + schemaMap[col.SchemaRoot()] = struct{}{} + if _, ok := oldSchemas[col.SchemaRoot()]; !ok { + addedCols = append(addedCols, col) + } + } + + // persist replicator to the datastore + newRepBytes, err := json.Marshal(rep) + if err != nil { + return err + } + + err = txn.Systemstore().Put(ctx, repKey.ToDS(), newRepBytes) + if err != nil { + return err + } + + txn.OnSuccess(func() { + db.events.Publish(event.NewMessage(event.ReplicatorName, event.Replicator{ + Info: rep.Info, + Schemas: schemaMap, + Docs: db.getDocsHeads(ctx, addedCols), + })) + }) + + return txn.Commit(ctx) +} + +func (db *db) getDocsHeads( + ctx context.Context, + cols []client.Collection, +) <-chan event.Update { + updateChan := make(chan event.Update) + go func() { + defer close(updateChan) + txn, err := db.NewTxn(ctx, true) + if err != nil { + log.ErrorContextE(ctx, "Failed to get transaction", err) + return + } + defer txn.Discard(ctx) + ctx = SetContextTxn(ctx, txn) + for _, col := range cols { + keysCh, err := col.GetAllDocIDs(ctx) + if err != nil { + log.ErrorContextE( + ctx, + "Failed to get all docIDs", + NewErrReplicatorDocID(err, errors.NewKV("Collection", col.Name().Value())), + ) + continue + } + for docIDResult := range keysCh { + if docIDResult.Err != nil { + log.ErrorContextE(ctx, "Key channel error", docIDResult.Err) + continue + } + docID := core.DataStoreKeyFromDocID(docIDResult.ID) + headset := clock.NewHeadSet( + txn.Headstore(), + docID.WithFieldId(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(), + ) + cids, _, err := headset.List(ctx) + if err != nil { + log.ErrorContextE( + ctx, + "Failed to get heads", + err, + corelog.String("DocID", docIDResult.ID.String()), + corelog.Any("Collection", col.Name())) + continue + } + // loop over heads, get block, make the required logs, and send + for _, c := range cids { + blk, err := txn.Blockstore().Get(ctx, c) + if err != nil { + log.ErrorContextE(ctx, "Failed to get block", err, + corelog.Any("CID", c), + corelog.Any("Collection", col.Name())) + continue + } + + updateChan <- event.Update{ + DocID: docIDResult.ID.String(), + Cid: c, + SchemaRoot: col.SchemaRoot(), + Block: blk.RawData(), + } + } + } + } + }() + + return updateChan +} + +func (db *db) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + // set transaction for all operations + ctx = SetContextTxn(ctx, txn) + + oldSchemas := make(map[string]struct{}) + repKey := core.NewReplicatorKey(rep.Info.ID.String()) + hasOldRep, err := txn.Systemstore().Has(ctx, repKey.ToDS()) + if err != nil { + return err + } + if hasOldRep { + repBytes, err := txn.Systemstore().Get(ctx, repKey.ToDS()) + if err != nil { + return err + } + oldRep := client.Replicator{} + err = json.Unmarshal(repBytes, &oldRep) + if err != nil { + return err + } + for _, schema := range oldRep.Schemas { + oldSchemas[schema] = struct{}{} + } + } + + var collections []client.Collection + if len(rep.Schemas) > 0 { + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := db.GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + collections = append(collections, col) + } + // make sure the replicator exists in the datastore + key := core.NewReplicatorKey(rep.Info.ID.String()) + _, err = txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return err + } + } else { + oldSchemas = make(map[string]struct{}) + } + + rep.Schemas = nil + for _, col := range collections { + delete(oldSchemas, col.SchemaRoot()) + } + for schema := range oldSchemas { + rep.Schemas = append(rep.Schemas, schema) + } + + // persist the replicator to the store, deleting it if no schemas remain + key := core.NewReplicatorKey(rep.Info.ID.String()) + if len(rep.Schemas) == 0 { + err := txn.Systemstore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + } else { + repBytes, err := json.Marshal(rep) + if err != nil { + return err + } + err = txn.Systemstore().Put(ctx, key.ToDS(), repBytes) + if err != nil { + return err + } + } + + txn.OnSuccess(func() { + db.events.Publish(event.NewMessage(event.ReplicatorName, event.Replicator{ + Info: rep.Info, + Schemas: oldSchemas, + })) + }) + + return txn.Commit(ctx) +} + +func (db *db) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + txn, err := db.NewTxn(ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(ctx) + + // create collection system prefix query + query := dsq.Query{ + Prefix: core.NewReplicatorKey("").ToString(), + } + results, err := txn.Systemstore().Query(ctx, query) + if err != nil { + return nil, err + } + + var reps []client.Replicator + for result := range results.Next() { + var rep client.Replicator + if err = json.Unmarshal(result.Value, &rep); err != nil { + return nil, err + } + reps = append(reps, rep) + } + return reps, nil +} + +func (db *db) loadReplicators(ctx context.Context) error { + replicators, err := db.GetAllReplicators(ctx) + if err != nil { + return err + } + + for _, rep := range replicators { + schemaMap := make(map[string]struct{}) + for _, schema := range rep.Schemas { + schemaMap[schema] = struct{}{} + } + db.events.Publish(event.NewMessage(event.ReplicatorName, event.Replicator{ + Info: rep.Info, + Schemas: schemaMap, + })) + } + return nil +} diff --git a/internal/db/p2p_replicator_test.go b/internal/db/p2p_replicator_test.go new file mode 100644 index 0000000000..44f00486cd --- /dev/null +++ b/internal/db/p2p_replicator_test.go @@ -0,0 +1,224 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + b58 "github.com/mr-tron/base58/base58" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/event" +) + +func waitForPeerInfo(db *db, sub *event.Subscription) { + for msg := range sub.Message() { + if msg.Name == event.PeerInfoName { + hasPeerInfo := false + db.peerMutex.RLock() + if db.peerInfo.HasValue() { + hasPeerInfo = true + } + db.peerMutex.RUnlock() + if !hasPeerInfo { + time.Sleep(1 * time.Millisecond) + } + break + } + } +} + +func TestSetReplicator_WithEmptyPeerInfo_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + err = db.SetReplicator(ctx, client.Replicator{}) + require.ErrorContains(t, err, "empty peer ID") +} + +func TestSetReplicator_WithSelfTarget_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.PeerInfoName) + require.NoError(t, err) + db.events.Publish(event.NewMessage(event.PeerInfoName, event.PeerInfo{Info: peer.AddrInfo{ID: "self"}})) + waitForPeerInfo(db, sub) + err = db.SetReplicator(ctx, client.Replicator{Info: peer.AddrInfo{ID: "self"}}) + require.ErrorIs(t, err, ErrSelfTargetForReplicator) +} + +func TestSetReplicator_WithInvalidCollection_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.PeerInfoName) + require.NoError(t, err) + db.events.Publish(event.NewMessage(event.PeerInfoName, event.PeerInfo{Info: peer.AddrInfo{ID: "self"}})) + waitForPeerInfo(db, sub) + err = db.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{ID: "other"}, + Schemas: []string{"invalidCollection"}, + }) + require.ErrorIs(t, err, ErrReplicatorCollections) +} + +func TestSetReplicator_WithValidCollection_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.ReplicatorName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema, err := db.GetSchemaByVersionID(ctx, cols[0].SchemaVersionID) + require.NoError(t, err) + err = db.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{ID: "other"}, + Schemas: []string{"User"}, + }) + require.NoError(t, err) + for msg := range sub.Message() { + if msg.Name == event.ReplicatorName { + replicator := msg.Data.(event.Replicator) + require.Equal(t, peer.ID("other"), replicator.Info.ID) + require.Equal(t, map[string]struct{}{schema.Root: {}}, replicator.Schemas) + break + } + } +} + +func TestSetReplicator_WithValidCollectionWithDoc_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.ReplicatorName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + col, err := db.GetCollectionByName(ctx, cols[0].Name.Value()) + require.NoError(t, err) + doc, err := client.NewDocFromMap(map[string]any{"name": "Alice"}, col.Definition()) + require.NoError(t, err) + err = col.Create(ctx, doc) + require.NoError(t, err) + + err = db.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{ID: "other"}, + Schemas: []string{"User"}, + }) + require.NoError(t, err) + for msg := range sub.Message() { + if msg.Name == event.ReplicatorName { + replicator := msg.Data.(event.Replicator) + require.Equal(t, peer.ID("other"), replicator.Info.ID) + require.Equal(t, map[string]struct{}{col.SchemaRoot(): {}}, replicator.Schemas) + for docEvt := range replicator.Docs { + require.Equal(t, doc.ID().String(), docEvt.DocID) + } + break + } + } +} + +func TestDeleteReplicator_WithEmptyPeerInfo_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + err = db.DeleteReplicator(ctx, client.Replicator{}) + require.ErrorContains(t, err, "empty peer ID") +} + +func TestDeleteReplicator_WithValidCollection_ShouldSucceed(t *testing.T) { + b, err := b58.Decode("12D3KooWB8Na2fKhdGtej5GjoVhmBBYFvqXiqFCSkR7fJFWHUbNr") + require.NoError(t, err) + peerID, err := peer.IDFromBytes(b) + require.NoError(t, err) + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.ReplicatorName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema, err := db.GetSchemaByVersionID(ctx, cols[0].SchemaVersionID) + require.NoError(t, err) + err = db.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{ID: peerID}, + Schemas: []string{"User"}, + }) + require.NoError(t, err) + for msg := range sub.Message() { + if msg.Name == event.ReplicatorName { + replicator := msg.Data.(event.Replicator) + require.Equal(t, peer.ID(peerID), replicator.Info.ID) + require.Equal(t, map[string]struct{}{schema.Root: {}}, replicator.Schemas) + break + } + } + + err = db.DeleteReplicator(ctx, client.Replicator{Info: peer.AddrInfo{ID: peerID}}) + require.NoError(t, err) + for msg := range sub.Message() { + if msg.Name == event.ReplicatorName { + replicator := msg.Data.(event.Replicator) + require.Equal(t, peer.ID(peerID), replicator.Info.ID) + require.Equal(t, map[string]struct{}{}, replicator.Schemas) + break + } + } +} + +func TestGetAllReplicators_WithValidCollection_ShouldSucceed(t *testing.T) { + b, err := b58.Decode("12D3KooWB8Na2fKhdGtej5GjoVhmBBYFvqXiqFCSkR7fJFWHUbNr") + require.NoError(t, err) + peerID, err := peer.IDFromBytes(b) + require.NoError(t, err) + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.ReplicatorName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema, err := db.GetSchemaByVersionID(ctx, cols[0].SchemaVersionID) + require.NoError(t, err) + err = db.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{ID: peerID}, + Schemas: []string{"User"}, + }) + require.NoError(t, err) + for msg := range sub.Message() { + if msg.Name == event.ReplicatorName { + replicator := msg.Data.(event.Replicator) + require.Equal(t, peer.ID(peerID), replicator.Info.ID) + require.Equal(t, map[string]struct{}{schema.Root: {}}, replicator.Schemas) + break + } + } + + reps, err := db.GetAllReplicators(ctx) + require.NoError(t, err) + require.Equal(t, peerID, reps[0].Info.ID) + require.Equal(t, []string{schema.Root}, reps[0].Schemas) +} diff --git a/net/peer_collection.go b/internal/db/p2p_schema_root.go similarity index 60% rename from net/peer_collection.go rename to internal/db/p2p_schema_root.go index 1676a7be43..b63fc9cb00 100644 --- a/net/peer_collection.go +++ b/internal/db/p2p_schema_root.go @@ -1,4 +1,4 @@ -// Copyright 2023 Democratized Data Foundation +// Copyright 2024 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,23 +8,24 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package net +package db import ( "context" dsq "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" - "github.com/sourcenetwork/defradb/internal/db" ) const marker = byte(0xff) -func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) error { - txn, err := p.db.NewTxn(ctx, false) +func (db *db) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := db.NewTxn(ctx, false) if err != nil { return err } @@ -32,12 +33,12 @@ func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) er // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 // ctx = db.SetContextIdentity(ctx, identity) - ctx = db.SetContextTxn(ctx, txn) + ctx = SetContextTxn(ctx, txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} for _, col := range collectionIDs { - storeCol, err := p.db.GetCollections( + storeCol, err := db.GetCollections( ctx, client.CollectionFetchOptions{ SchemaRoot: immutable.Some(col), @@ -60,6 +61,8 @@ func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) er } } + evt := event.P2PTopic{} + // Ensure we can add all the collections to the store on the transaction // before adding to topics. for _, col := range storeCollections { @@ -68,45 +71,28 @@ func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) er if err != nil { return err } + evt.ToAdd = append(evt.ToAdd, col.SchemaRoot()) } - // Add pubsub topics and remove them if we get an error. - addedTopics := []string{} - for _, col := range collectionIDs { - err = p.server.addPubSubTopic(col, true) - if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, col) - } - - // After adding the collection topics, we remove the collections' documents - // from the pubsub topics to avoid receiving duplicate events. - removedTopics := []string{} for _, col := range storeCollections { keyChan, err := col.GetAllDocIDs(ctx) if err != nil { return err } for key := range keyChan { - err := p.server.removePubSubTopic(key.ID.String()) - if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, key.ID.String()) + evt.ToRemove = append(evt.ToRemove, key.ID.String()) } } - if err = txn.Commit(ctx); err != nil { - err = p.rollbackRemovePubSubTopics(removedTopics, err) - return p.rollbackAddPubSubTopics(addedTopics, err) - } + txn.OnSuccess(func() { + db.events.Publish(event.NewMessage(event.P2PTopicName, evt)) + }) - return nil + return txn.Commit(ctx) } -func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { - txn, err := p.db.NewTxn(ctx, false) +func (db *db) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := db.NewTxn(ctx, false) if err != nil { return err } @@ -114,12 +100,12 @@ func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 // ctx = db.SetContextIdentity(ctx, identity) - ctx = db.SetContextTxn(ctx, txn) + ctx = SetContextTxn(ctx, txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} for _, col := range collectionIDs { - storeCol, err := p.db.GetCollections( + storeCol, err := db.GetCollections( ctx, client.CollectionFetchOptions{ SchemaRoot: immutable.Some(col), @@ -134,6 +120,8 @@ func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) storeCollections = append(storeCollections, storeCol...) } + evt := event.P2PTopic{} + // Ensure we can remove all the collections to the store on the transaction // before adding to topics. for _, col := range storeCollections { @@ -142,49 +130,32 @@ func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) if err != nil { return err } + evt.ToRemove = append(evt.ToRemove, col.SchemaRoot()) } - // Remove pubsub topics and add them back if we get an error. - removedTopics := []string{} - for _, col := range collectionIDs { - err = p.server.removePubSubTopic(col) - if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, col) - } - - // After removing the collection topics, we add back the collections' documents - // to the pubsub topics. - addedTopics := []string{} for _, col := range storeCollections { keyChan, err := col.GetAllDocIDs(ctx) if err != nil { return err } for key := range keyChan { - err := p.server.addPubSubTopic(key.ID.String(), true) - if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, key.ID.String()) + evt.ToAdd = append(evt.ToAdd, key.ID.String()) } } - if err = txn.Commit(ctx); err != nil { - err = p.rollbackAddPubSubTopics(addedTopics, err) - return p.rollbackRemovePubSubTopics(removedTopics, err) - } + txn.OnSuccess(func() { + db.events.Publish(event.NewMessage(event.P2PTopicName, evt)) + }) - return nil + return txn.Commit(ctx) } -func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { - txn, err := p.db.NewTxn(p.ctx, true) +func (db *db) GetAllP2PCollections(ctx context.Context) ([]string, error) { + txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } - defer txn.Discard(p.ctx) + defer txn.Discard(ctx) query := dsq.Query{ Prefix: core.NewP2PCollectionKey("").ToString(), @@ -205,3 +176,52 @@ func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { return collectionIDs, nil } + +func (db *db) PeerInfo() peer.AddrInfo { + db.peerMutex.RLock() + defer db.peerMutex.RUnlock() + if db.peerInfo.HasValue() { + return db.peerInfo.Value() + } + return peer.AddrInfo{} +} + +func (db *db) loadP2PCollections(ctx context.Context) error { + schemaRoots, err := db.GetAllP2PCollections(ctx) + if err != nil { + return err + } + db.events.Publish(event.NewMessage(event.P2PTopicName, event.P2PTopic{ + ToAdd: schemaRoots, + })) + + colMap := make(map[string]struct{}) + for _, schemaRoot := range schemaRoots { + colMap[schemaRoot] = struct{}{} + } + + // Get all DocIDs across all collections in the DB + cols, err := db.GetCollections(ctx, client.CollectionFetchOptions{}) + if err != nil { + return err + } + + for _, col := range cols { + // If we subscribed to the collection, we skip subscribing to the collection's docIDs. + if _, ok := colMap[col.SchemaRoot()]; ok { + continue + } + // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 + docIDChan, err := col.GetAllDocIDs(ctx) + if err != nil { + return err + } + + for docID := range docIDChan { + db.events.Publish(event.NewMessage(event.P2PTopicName, event.P2PTopic{ + ToAdd: []string{docID.ID.String()}, + })) + } + } + return nil +} diff --git a/internal/db/p2p_schema_root_test.go b/internal/db/p2p_schema_root_test.go new file mode 100644 index 0000000000..7058ac54cc --- /dev/null +++ b/internal/db/p2p_schema_root_test.go @@ -0,0 +1,262 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/hex" + "fmt" + "testing" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/event" +) + +func TestAddP2PCollection_WithInvalidCollection_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + err = db.AddP2PCollections(ctx, []string{"invalidCollection"}) + require.ErrorIs(t, err, client.ErrCollectionNotFound) +} + +func TestAddP2PCollection_WithValidCollection_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.P2PTopicName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema, err := db.GetSchemaByVersionID(ctx, cols[0].SchemaVersionID) + require.NoError(t, err) + err = db.AddP2PCollections(ctx, []string{schema.Root}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{schema.Root}, p2pTopic.ToAdd) + break + } +} + +func TestAddP2PCollection_WithValidCollectionAndDoc_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.P2PTopicName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + col, err := db.GetCollectionByName(ctx, cols[0].Name.Value()) + require.NoError(t, err) + doc, err := client.NewDocFromMap(map[string]any{"name": "Alice"}, col.Definition()) + require.NoError(t, err) + err = col.Create(ctx, doc) + require.NoError(t, err) + + err = db.AddP2PCollections(ctx, []string{col.SchemaRoot()}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{col.SchemaRoot()}, p2pTopic.ToAdd) + require.Equal(t, []string{doc.ID().String()}, p2pTopic.ToRemove) + break + } +} + +func TestAddP2PCollection_WithMultipleValidCollections_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.P2PTopicName) + require.NoError(t, err) + cols1, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema1, err := db.GetSchemaByVersionID(ctx, cols1[0].SchemaVersionID) + require.NoError(t, err) + cols2, err := db.AddSchema(ctx, `type Books { name: String }`) + require.NoError(t, err) + schema2, err := db.GetSchemaByVersionID(ctx, cols2[0].SchemaVersionID) + require.NoError(t, err) + err = db.AddP2PCollections(ctx, []string{schema1.Root, schema2.Root}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{schema1.Root, schema2.Root}, p2pTopic.ToAdd) + break + } +} + +func TestRemoveP2PCollection_WithInvalidCollection_ShouldError(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + err = db.RemoveP2PCollections(ctx, []string{"invalidCollection"}) + require.ErrorIs(t, err, client.ErrCollectionNotFound) +} + +func TestRemoveP2PCollection_WithValidCollection_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.P2PTopicName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema, err := db.GetSchemaByVersionID(ctx, cols[0].SchemaVersionID) + require.NoError(t, err) + err = db.AddP2PCollections(ctx, []string{schema.Root}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{schema.Root}, p2pTopic.ToAdd) + break + } + err = db.RemoveP2PCollections(ctx, []string{schema.Root}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{schema.Root}, p2pTopic.ToRemove) + break + } +} + +func TestRemoveP2PCollection_WithValidCollectionAndDoc_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + sub, err := db.events.Subscribe(event.P2PTopicName) + require.NoError(t, err) + cols, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + col, err := db.GetCollectionByName(ctx, cols[0].Name.Value()) + require.NoError(t, err) + doc, err := client.NewDocFromMap(map[string]any{"name": "Alice"}, col.Definition()) + require.NoError(t, err) + err = col.Create(ctx, doc) + require.NoError(t, err) + + err = db.AddP2PCollections(ctx, []string{col.SchemaRoot()}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{col.SchemaRoot()}, p2pTopic.ToAdd) + require.Equal(t, []string{doc.ID().String()}, p2pTopic.ToRemove) + break + } + err = db.RemoveP2PCollections(ctx, []string{col.SchemaRoot()}) + require.NoError(t, err) + // Check that the event was published + for msg := range sub.Message() { + p2pTopic := msg.Data.(event.P2PTopic) + require.Equal(t, []string{col.SchemaRoot()}, p2pTopic.ToRemove) + require.Equal(t, []string{doc.ID().String()}, p2pTopic.ToAdd) + break + } +} + +func TestGetAllP2PCollections_WithMultipleValidCollections_ShouldSucceed(t *testing.T) { + ctx := context.Background() + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + defer db.Close() + cols1, err := db.AddSchema(ctx, `type User { name: String }`) + require.NoError(t, err) + schema1, err := db.GetSchemaByVersionID(ctx, cols1[0].SchemaVersionID) + require.NoError(t, err) + cols2, err := db.AddSchema(ctx, `type Books { name: String }`) + require.NoError(t, err) + schema2, err := db.GetSchemaByVersionID(ctx, cols2[0].SchemaVersionID) + require.NoError(t, err) + err = db.AddP2PCollections(ctx, []string{schema1.Root, schema2.Root}) + require.NoError(t, err) + cols, err := db.GetAllP2PCollections(ctx) + require.NoError(t, err) + require.Equal(t, []string{schema2.Root, schema1.Root}, cols) +} + +// This test documents that we don't allow adding p2p collections that have a policy +// until the following is implemented: +// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2366 +func TestAddP2PCollectionsWithPermissionedCollection_Error(t *testing.T) { + ctx := context.Background() + rootstore := memory.NewDatastore(ctx) + db, err := newDB(ctx, rootstore, immutable.Some[acp.ACP](acp.NewLocalACP()), nil) + require.NoError(t, err) + + policy := ` + name: test + description: a policy + actor: + name: actor + resources: + user: + permissions: + read: + expr: owner + write: + expr: owner + relations: + owner: + types: + - actor + ` + + privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") + require.NoError(t, err) + privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) + + ctx = SetContextIdentity(ctx, identity) + policyResult, err := db.AddPolicy(ctx, policy) + policyID := policyResult.PolicyID + require.NoError(t, err) + require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) + + schema := fmt.Sprintf(` + type User @policy(id: "%s", resource: "user") { + name: String + age: Int + } + `, policyID, + ) + _, err = db.AddSchema(ctx, schema) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = db.AddP2PCollections(ctx, []string{col.SchemaRoot()}) + require.Error(t, err) + require.ErrorIs(t, err, ErrP2PColHasPolicy) +} diff --git a/net/client_test.go b/net/client_test.go index 6a43805ae8..43c4ec1e01 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -45,8 +45,9 @@ var def = client.CollectionDefinition{ func TestPushlogWithDialFailure(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`), def) require.NoError(t, err) @@ -56,13 +57,13 @@ func TestPushlogWithDialFailure(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - n.server.opts = append( - n.server.opts, + p.server.opts = append( + p.server.opts, grpc.WithTransportCredentials(nil), grpc.WithCredentialsBundle(nil), ) - err = n.server.pushLog(ctx, event.Update{ + err = p.server.pushLog(ctx, event.Update{ DocID: id.String(), Cid: cid, SchemaRoot: "test", @@ -73,8 +74,9 @@ func TestPushlogWithDialFailure(t *testing.T) { func TestPushlogWithInvalidPeerID(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`), def) require.NoError(t, err) @@ -84,7 +86,7 @@ func TestPushlogWithInvalidPeerID(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.server.pushLog(ctx, event.Update{ + err = p.server.pushLog(ctx, event.Update{ DocID: id.String(), Cid: cid, SchemaRoot: "test", @@ -95,27 +97,29 @@ func TestPushlogWithInvalidPeerID(t *testing.T) { func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { ctx := context.Background() - _, n1 := newTestNode(ctx, t) - defer n1.Close() - n1.Start() - _, n2 := newTestNode(ctx, t) - defer n2.Close() - n2.Start() + db1, p1 := newTestPeer(ctx, t) + defer db1.Close() + defer p1.Close() + p1.Start() + db2, p2 := newTestPeer(ctx, t) + defer p2.Close() + defer db2.Close() + p2.Start() - err := n1.host.Connect(ctx, n2.PeerInfo()) + err := p1.host.Connect(ctx, p2.PeerInfo()) require.NoError(t, err) - _, err = n1.db.AddSchema(ctx, `type User { + _, err = db1.AddSchema(ctx, `type User { name: String }`) require.NoError(t, err) - _, err = n2.db.AddSchema(ctx, `type User { + _, err = db2.AddSchema(ctx, `type User { name: String }`) require.NoError(t, err) - col, err := n1.db.GetCollectionByName(ctx, "User") + col, err := db1.GetCollectionByName(ctx, "User") require.NoError(t, err) doc, err := client.NewDocFromJSON([]byte(`{"name": "test"}`), col.Definition()) @@ -124,22 +128,22 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { err = col.Save(ctx, doc) require.NoError(t, err) - col, err = n2.db.GetCollectionByName(ctx, "User") + col, err = db2.GetCollectionByName(ctx, "User") require.NoError(t, err) err = col.Save(ctx, doc) require.NoError(t, err) - headCID, err := getHead(ctx, n1.db, doc.ID()) + headCID, err := getHead(ctx, db1, doc.ID()) require.NoError(t, err) - b, err := n1.db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) + b, err := db1.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n1.server.pushLog(ctx, event.Update{ + err = p1.server.pushLog(ctx, event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), Block: b, - }, n2.PeerInfo().ID) + }, p2.PeerInfo().ID) require.NoError(t, err) } diff --git a/net/dialer_test.go b/net/dialer_test.go index 7f37611ec3..4157cf0aae 100644 --- a/net/dialer_test.go +++ b/net/dialer_test.go @@ -23,17 +23,23 @@ import ( func TestDial_WithConnectedPeer_NoError(t *testing.T) { db1 := FixtureNewMemoryDBWithBroadcaster(t) db2 := FixtureNewMemoryDBWithBroadcaster(t) + defer db1.Close() + defer db2.Close() ctx := context.Background() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db1, + db1.Root(), + db1.Blockstore(), + db1.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() - n2, err := NewNode( + n2, err := NewPeer( ctx, - db2, + db2.Root(), + db2.Blockstore(), + db2.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) @@ -50,17 +56,23 @@ func TestDial_WithConnectedPeer_NoError(t *testing.T) { func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { db1 := FixtureNewMemoryDBWithBroadcaster(t) db2 := FixtureNewMemoryDBWithBroadcaster(t) + defer db1.Close() + defer db2.Close() ctx := context.Background() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db1, + db1.Root(), + db1.Blockstore(), + db1.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() - n2, err := NewNode( + n2, err := NewPeer( ctx, - db2, + db2.Root(), + db2.Blockstore(), + db2.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) @@ -80,17 +92,23 @@ func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_ClosingConnectionError(t *testing.T) { db1 := FixtureNewMemoryDBWithBroadcaster(t) db2 := FixtureNewMemoryDBWithBroadcaster(t) + defer db1.Close() + defer db2.Close() ctx := context.Background() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db1, + db1.Root(), + db1.Blockstore(), + db1.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) defer n1.Close() - n2, err := NewNode( + n2, err := NewPeer( ctx, - db2, + db2.Root(), + db2.Blockstore(), + db2.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) assert.NoError(t, err) diff --git a/net/errors.go b/net/errors.go index eb53a8e2a5..615f1088ef 100644 --- a/net/errors.go +++ b/net/errors.go @@ -13,8 +13,6 @@ package net import ( "fmt" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/sourcenetwork/defradb/errors" ) @@ -23,23 +21,16 @@ const ( errFailedToGetDocID = "failed to get DocID from broadcast message" errPublishingToDocIDTopic = "can't publish log %s for docID %s" errPublishingToSchemaTopic = "can't publish log %s for schema %s" - errReplicatorExists = "replicator already exists for %s with peerID %s" - errReplicatorDocID = "failed to get docID for replicator %s with peerID %s" - errReplicatorCollections = "failed to get collections for replicator" errCheckingForExistingBlock = "failed to check for existing block" ) var ( - ErrP2PColHasPolicy = errors.New("p2p collection specified has a policy on it") - ErrReplicatorColHasPolicy = errors.New("replicator collection specified has a policy on it") - ErrReplicatorSomeColsHavePolicy = errors.New("replicator can not use all collections, as some have policy") - ErrPeerConnectionWaitTimout = errors.New("waiting for peer connection timed out") - ErrPubSubWaitTimeout = errors.New("waiting for pubsub timed out") - ErrPushLogWaitTimeout = errors.New("waiting for pushlog timed out") - ErrNilDB = errors.New("database object can't be nil") - ErrNilUpdateChannel = errors.New("tried to subscribe to update channel, but update channel is nil") - ErrSelfTargetForReplicator = errors.New("can't target ourselves as a replicator") - ErrCheckingForExistingBlock = errors.New(errCheckingForExistingBlock) + ErrPeerConnectionWaitTimout = errors.New("waiting for peer connection timed out") + ErrPubSubWaitTimeout = errors.New("waiting for pubsub timed out") + ErrPushLogWaitTimeout = errors.New("waiting for pushlog timed out") + ErrNilDB = errors.New("database object can't be nil") + ErrNilUpdateChannel = errors.New("tried to subscribe to update channel, but update channel is nil") + ErrCheckingForExistingBlock = errors.New(errCheckingForExistingBlock) ) func NewErrPushLog(inner error, kv ...errors.KV) error { @@ -58,18 +49,6 @@ func NewErrPublishingToSchemaTopic(inner error, cid, docID string, kv ...errors. return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, docID), inner, kv...) } -func NewErrReplicatorExists(collection string, peerID peer.ID, kv ...errors.KV) error { - return errors.New(fmt.Sprintf(errReplicatorExists, collection, peerID), kv...) -} - -func NewErrReplicatorDocID(inner error, collection string, peerID peer.ID, kv ...errors.KV) error { - return errors.Wrap(fmt.Sprintf(errReplicatorDocID, collection, peerID), inner, kv...) -} - -func NewErrReplicatorCollections(inner error, kv ...errors.KV) error { - return errors.Wrap(errReplicatorCollections, inner, kv...) -} - func NewErrCheckingForExistingBlock(inner error, cid string) error { return errors.Wrap(errCheckingForExistingBlock, inner, errors.NewKV("cid", cid)) } diff --git a/net/node.go b/net/node.go deleted file mode 100644 index 3338ac0f04..0000000000 --- a/net/node.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package node is responsible for interfacing a given DefraDB instance with a networked peer instance -and GRPC server. - -Basically it combines db/DB, net/Peer, and net/Server into a single Node object. -*/ -package net - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/ipfs/boxo/ipns" - ds "github.com/ipfs/go-datastore" - libp2p "github.com/libp2p/go-libp2p" - dht "github.com/libp2p/go-libp2p-kad-dht" - dualdht "github.com/libp2p/go-libp2p-kad-dht/dual" - pubsub "github.com/libp2p/go-libp2p-pubsub" - record "github.com/libp2p/go-libp2p-record" - libp2pCrypto "github.com/libp2p/go-libp2p/core/crypto" - libp2pEvent "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/multiformats/go-multiaddr" - - "github.com/sourcenetwork/corelog" - "github.com/sourcenetwork/go-libp2p-pubsub-rpc/finalizer" - - // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 - //nolint:staticcheck - "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" - "github.com/libp2p/go-libp2p/p2p/net/connmgr" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/crypto" - "github.com/sourcenetwork/defradb/event" -) - -var _ client.P2P = (*Node)(nil) - -// Node is a networked peer instance of DefraDB. -type Node struct { - // embed the DB interface into the node - client.DB - - *Peer - - ctx context.Context - cancel context.CancelFunc - dhtClose func() error -} - -// NewNode creates a new network node instance of DefraDB, wired into libp2p. -func NewNode( - ctx context.Context, - db client.DB, - opts ...NodeOpt, -) (node *Node, err error) { - options := DefaultOptions() - for _, opt := range opts { - opt(options) - } - - connManager, err := connmgr.NewConnManager(100, 400, connmgr.WithGracePeriod(time.Second*20)) - if err != nil { - return nil, err - } - - var listenAddresses []multiaddr.Multiaddr - for _, addr := range options.ListenAddresses { - listenAddress, err := multiaddr.NewMultiaddr(addr) - if err != nil { - return nil, err - } - listenAddresses = append(listenAddresses, listenAddress) - } - - fin := finalizer.NewFinalizer() - - ctx, cancel := context.WithCancel(ctx) - defer func() { - if node == nil { - cancel() - } - }() - - peerstore, err := pstoreds.NewPeerstore(ctx, db.Peerstore(), pstoreds.DefaultOpts()) - if err != nil { - return nil, fin.Cleanup(err) - } - fin.Add(peerstore) - - if options.PrivateKey == nil { - // generate an ephemeral private key - key, err := crypto.GenerateEd25519() - if err != nil { - return nil, fin.Cleanup(err) - } - options.PrivateKey = key - } - - // unmarshal the private key bytes - privateKey, err := libp2pCrypto.UnmarshalEd25519PrivateKey(options.PrivateKey) - if err != nil { - return nil, fin.Cleanup(err) - } - - var ddht *dualdht.DHT - - libp2pOpts := []libp2p.Option{ - libp2p.ConnectionManager(connManager), - libp2p.DefaultTransports, - libp2p.Identity(privateKey), - libp2p.ListenAddrs(listenAddresses...), - libp2p.Peerstore(peerstore), - libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { - // Delete this line and uncomment the next 6 lines once we remove batchable datastore support. - // var store ds.Batching - // // If `rootstore` doesn't implement `Batching`, `nil` will be passed - // // to newDHT which will cause the DHT to be stored in memory. - // if dsb, isBatching := rootstore.(ds.Batching); isBatching { - // store = dsb - // } - store := db.Root() // Delete this line once we remove batchable datastore support. - ddht, err = newDHT(ctx, h, store) - return ddht, err - }), - } - if !options.EnableRelay { - libp2pOpts = append(libp2pOpts, libp2p.DisableRelay()) - } - - h, err := libp2p.New(libp2pOpts...) - if err != nil { - return nil, fin.Cleanup(err) - } - log.InfoContext( - ctx, - "Created LibP2P host", - corelog.Any("PeerId", h.ID()), - corelog.Any("Address", options.ListenAddresses), - ) - - var ps *pubsub.PubSub - if options.EnablePubSub { - ps, err = pubsub.NewGossipSub( - ctx, - h, - pubsub.WithPeerExchange(true), - pubsub.WithFloodPublish(true), - ) - if err != nil { - return nil, fin.Cleanup(err) - } - } - - peer, err := NewPeer( - ctx, - db, - h, - ddht, - ps, - options.GRPCServerOptions, - options.GRPCDialOptions, - ) - if err != nil { - return nil, fin.Cleanup(err) - } - - sub, err := h.EventBus().Subscribe(&libp2pEvent.EvtPeerConnectednessChanged{}) - if err != nil { - return nil, fin.Cleanup(err) - } - // publish subscribed events to the event bus - go func() { - for val := range sub.Out() { - db.Events().Publish(event.NewMessage(event.PeerName, val)) - } - }() - - node = &Node{ - Peer: peer, - DB: db, - ctx: ctx, - cancel: cancel, - dhtClose: ddht.Close, - } - - return -} - -// Bootstrap connects to the given peers. -func (n *Node) Bootstrap(addrs []peer.AddrInfo) { - var connected uint64 - - var wg sync.WaitGroup - for _, pinfo := range addrs { - wg.Add(1) - go func(pinfo peer.AddrInfo) { - defer wg.Done() - err := n.host.Connect(n.ctx, pinfo) - if err != nil { - log.ErrorContextE(n.ctx, "Cannot connect to peer", err) - return - } - log.InfoContext(n.ctx, "Connected", corelog.Any("PeerID", pinfo.ID)) - atomic.AddUint64(&connected, 1) - }(pinfo) - } - - wg.Wait() - - if nPeers := len(addrs); int(connected) < nPeers/2 { - log.InfoContext(n.ctx, fmt.Sprintf("Only connected to %d bootstrap peers out of %d", connected, nPeers)) - } - - err := n.dht.Bootstrap(n.ctx) - if err != nil { - log.ErrorContextE(n.ctx, "Problem bootstraping using DHT", err) - return - } -} - -func (n *Node) PeerID() peer.ID { - return n.host.ID() -} - -func (n *Node) ListenAddrs() []multiaddr.Multiaddr { - return n.host.Network().ListenAddresses() -} - -func (n *Node) PeerInfo() peer.AddrInfo { - return peer.AddrInfo{ - ID: n.host.ID(), - Addrs: n.host.Network().ListenAddresses(), - } -} - -func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, error) { - dhtOpts := []dualdht.Option{ - dualdht.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), - dualdht.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})), - dualdht.DHTOption(dht.Concurrency(10)), - dualdht.DHTOption(dht.Mode(dht.ModeAuto)), - } - if dsb != nil { - dhtOpts = append(dhtOpts, dualdht.DHTOption(dht.Datastore(dsb))) - } - - return dualdht.New(ctx, h, dhtOpts...) -} - -// Close closes the node and all its services. -func (n Node) Close() { - if n.cancel != nil { - n.cancel() - } - if n.Peer != nil { - n.Peer.Close() - } - if n.dhtClose != nil { - err := n.dhtClose() - if err != nil { - log.ErrorContextE(n.ctx, "Failed to close DHT", err) - } - } - n.DB.Close() -} diff --git a/net/node_test.go b/net/node_test.go index f04e7c6bac..4d5fbeb57e 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -39,21 +39,24 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { return database } -func TestNewNode_WithEnableRelay_NoError(t *testing.T) { +func TestNewPeer_WithEnableRelay_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - n, err := NewNode( + defer db.Close() + n, err := NewPeer( context.Background(), - db, + db.Root(), + db.Blockstore(), + db.Events(), WithEnableRelay(true), ) require.NoError(t, err) - defer n.Close() + n.Close() } -func TestNewNode_WithDBClosed_NoError(t *testing.T) { +func TestNewPeer_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) @@ -61,44 +64,53 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { require.NoError(t, err) db.Close() - _, err = NewNode( + _, err = NewPeer( context.Background(), - db, + db.Root(), + db.Blockstore(), + db.Events(), ) require.ErrorContains(t, err, "datastore closed") } -func TestNewNode_NoPubSub_NoError(t *testing.T) { +func TestNewPeer_NoPubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - n, err := NewNode( + defer db.Close() + + n, err := NewPeer( context.Background(), - db, + db.Root(), + db.Blockstore(), + db.Events(), WithEnablePubSub(false), ) require.NoError(t, err) - defer n.Close() require.Nil(t, n.ps) + n.Close() } -func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { +func TestNewPeer_WithEnablePubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) + defer db.Close() - n, err := NewNode( + n, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithEnablePubSub(true), ) require.NoError(t, err) - defer n.Close() // overly simple check of validity of pubsub, avoiding the process of creating a PubSub require.NotNil(t, n.ps) + n.Close() } func TestNodeClose_NoError(t *testing.T) { @@ -106,46 +118,56 @@ func TestNodeClose_NoError(t *testing.T) { store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - n, err := NewNode( + defer db.Close() + n, err := NewPeer( context.Background(), - db, + db.Root(), + db.Blockstore(), + db.Events(), ) require.NoError(t, err) n.Close() } -func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { +func TestNewPeer_BootstrapWithNoPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) + defer db.Close() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) - defer n1.Close() n1.Bootstrap([]peer.AddrInfo{}) + n1.Close() } -func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { +func TestNewPeer_BootstrapWithOnePeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - - n1, err := NewNode( + defer db.Close() + n1, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n1.Close() - n2, err := NewNode( + n2, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) @@ -157,22 +179,27 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { n2.Bootstrap(addrs) } -func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { +func TestNewPeer_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) + defer db.Close() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) defer n1.Close() - n2, err := NewNode( + n2, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) @@ -192,13 +219,16 @@ func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - n, err := NewNode( + defer db.Close() + + n, err := NewPeer( context.Background(), - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) - defer n.Close() - require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") + n.Close() } diff --git a/net/peer.go b/net/peer.go index 17ac7ad2e3..2efef55ec0 100644 --- a/net/peer.go +++ b/net/peer.go @@ -14,40 +14,53 @@ package net import ( "context" + "fmt" "sync" + "sync/atomic" "time" "github.com/ipfs/boxo/bitswap" "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/blockservice" exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" + libp2p "github.com/libp2p/go-libp2p" gostream "github.com/libp2p/go-libp2p-gostream" + dht "github.com/libp2p/go-libp2p-kad-dht" + dualdht "github.com/libp2p/go-libp2p-kad-dht/dual" pubsub "github.com/libp2p/go-libp2p-pubsub" + record "github.com/libp2p/go-libp2p-record" + libp2pCrypto "github.com/libp2p/go-libp2p/core/crypto" + libp2pEvent "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" + + // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 + //nolint:staticcheck + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" + "github.com/multiformats/go-multiaddr" "github.com/sourcenetwork/corelog" "google.golang.org/grpc" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/crypto" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/event" - "github.com/sourcenetwork/defradb/internal/core" corenet "github.com/sourcenetwork/defradb/internal/core/net" - "github.com/sourcenetwork/defradb/internal/merkle/clock" pb "github.com/sourcenetwork/defradb/net/pb" ) // Peer is a DefraDB Peer node which exposes all the LibP2P host/peer functionality // to the underlying DefraDB instance. type Peer struct { - //config?? + blockstore datastore.Blockstore - db client.DB + bus *event.Bus updateSub *event.Subscription host host.Host @@ -57,50 +70,148 @@ type Peer struct { server *server p2pRPC *grpc.Server // rpc server over the P2P network - // replicators is a map from collectionName => peerId - replicators map[string]map[peer.ID]struct{} - mu sync.Mutex - // peer DAG service exch exchange.Interface bserv blockservice.BlockService - ctx context.Context - cancel context.CancelFunc + ctx context.Context + cancel context.CancelFunc + dhtClose func() error } // NewPeer creates a new instance of the DefraDB server as a peer-to-peer node. func NewPeer( ctx context.Context, - db client.DB, - h host.Host, - dht routing.Routing, - ps *pubsub.PubSub, - serverOptions []grpc.ServerOption, - dialOptions []grpc.DialOption, -) (*Peer, error) { - if db == nil { + rootstore datastore.Rootstore, + blockstore datastore.Blockstore, + bus *event.Bus, + opts ...NodeOpt, +) (p *Peer, err error) { + if rootstore == nil || blockstore == nil { return nil, ErrNilDB } + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + + connManager, err := connmgr.NewConnManager(100, 400, connmgr.WithGracePeriod(time.Second*20)) + if err != nil { + return nil, err + } + + var listenAddresses []multiaddr.Multiaddr + for _, addr := range options.ListenAddresses { + listenAddress, err := multiaddr.NewMultiaddr(addr) + if err != nil { + return nil, err + } + listenAddresses = append(listenAddresses, listenAddress) + } + ctx, cancel := context.WithCancel(ctx) - p := &Peer{ - host: h, - dht: dht, - ps: ps, - db: db, - p2pRPC: grpc.NewServer(serverOptions...), - ctx: ctx, - cancel: cancel, - replicators: make(map[string]map[peer.ID]struct{}), - } - var err error - p.server, err = newServer(p, dialOptions...) + defer func() { + if p == nil { + cancel() + } + }() + + peerstore, err := pstoreds.NewPeerstore(ctx, rootstore, pstoreds.DefaultOpts()) if err != nil { return nil, err } - err = p.loadReplicators(p.ctx) + if options.PrivateKey == nil { + // generate an ephemeral private key + key, err := crypto.GenerateEd25519() + if err != nil { + return nil, err + } + options.PrivateKey = key + } + + // unmarshal the private key bytes + privateKey, err := libp2pCrypto.UnmarshalEd25519PrivateKey(options.PrivateKey) + if err != nil { + return nil, err + } + + var ddht *dualdht.DHT + + libp2pOpts := []libp2p.Option{ + libp2p.ConnectionManager(connManager), + libp2p.DefaultTransports, + libp2p.Identity(privateKey), + libp2p.ListenAddrs(listenAddresses...), + libp2p.Peerstore(peerstore), + libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { + // Delete this line and uncomment the next 6 lines once we remove batchable datastore support. + // var store ds.Batching + // // If `rootstore` doesn't implement `Batching`, `nil` will be passed + // // to newDHT which will cause the DHT to be stored in memory. + // if dsb, isBatching := rootstore.(ds.Batching); isBatching { + // store = dsb + // } + ddht, err = newDHT(ctx, h, rootstore) + return ddht, err + }), + } + if !options.EnableRelay { + libp2pOpts = append(libp2pOpts, libp2p.DisableRelay()) + } + + h, err := libp2p.New(libp2pOpts...) + if err != nil { + return nil, err + } + log.InfoContext( + ctx, + "Created LibP2P host", + corelog.Any("PeerId", h.ID()), + corelog.Any("Address", options.ListenAddresses), + ) + + var ps *pubsub.PubSub + if options.EnablePubSub { + ps, err = pubsub.NewGossipSub( + ctx, + h, + pubsub.WithPeerExchange(true), + pubsub.WithFloodPublish(true), + ) + if err != nil { + return nil, err + } + } + + if err != nil { + return nil, err + } + + sub, err := h.EventBus().Subscribe(&libp2pEvent.EvtPeerConnectednessChanged{}) + if err != nil { + return nil, err + } + // publish subscribed events to the event bus + go func() { + for val := range sub.Out() { + bus.Publish(event.NewMessage(event.PeerName, val)) + } + }() + + p = &Peer{ + host: h, + dht: ddht, + ps: ps, + blockstore: blockstore, + bus: bus, + p2pRPC: grpc.NewServer(options.GRPCServerOptions...), + ctx: ctx, + cancel: cancel, + } + + p.server, err = newServer(p, options.GRPCDialOptions...) if err != nil { return nil, err } @@ -112,8 +223,7 @@ func NewPeer( // Start all the internal workers/goroutines/loops that manage the P2P state. func (p *Peer) Start() error { - p.mu.Lock() - defer p.mu.Unlock() + p.bus.Publish(event.NewMessage(event.PeerInfoName, event.PeerInfo{Info: p.PeerInfo()})) // reconnect to known peers var wg sync.WaitGroup @@ -142,13 +252,13 @@ func (p *Peer) Start() error { } if p.ps != nil { - sub, err := p.db.Events().Subscribe(event.UpdateName) + sub, err := p.bus.Subscribe(event.UpdateName, event.P2PTopicName, event.ReplicatorName) if err != nil { return err } p.updateSub = sub log.InfoContext(p.ctx, "Starting internal broadcaster for pubsub network") - go p.handleBroadcastLoop() + go p.handleMessageLoop() } log.InfoContext( @@ -180,10 +290,9 @@ func (p *Peer) Close() { log.ErrorContextE(p.ctx, "Failed closing server RPC connections", err) } } - stopGRPCServer(p.ctx, p.p2pRPC) if p.updateSub != nil { - p.db.Events().Unsubscribe(p.updateSub) + p.bus.Unsubscribe(p.updateSub) } if err := p.bserv.Close(); err != nil { @@ -194,31 +303,50 @@ func (p *Peer) Close() { log.ErrorContextE(p.ctx, "Error closing host", err) } - p.cancel() + if p.dhtClose != nil { + err := p.dhtClose() + if err != nil { + log.ErrorContextE(p.ctx, "Failed to close DHT", err) + } + } + + stopGRPCServer(p.ctx, p.p2pRPC) + + if p.cancel != nil { + p.cancel() + } } -// handleBroadcast loop manages the transition of messages +// handleMessage loop manages the transition of messages // from the internal broadcaster to the external pubsub network -func (p *Peer) handleBroadcastLoop() { +func (p *Peer) handleMessageLoop() { for { msg, isOpen := <-p.updateSub.Message() if !isOpen { return } - update, ok := msg.Data.(event.Update) - if !ok { - continue // ignore invalid value - } - var err error - if update.IsCreate { - err = p.handleDocCreateLog(update) - } else { - err = p.handleDocUpdateLog(update) - } + switch evt := msg.Data.(type) { + case event.Update: + var err error + if evt.IsCreate { + err = p.handleDocCreateLog(evt) + } else { + err = p.handleDocUpdateLog(evt) + } - if err != nil { - log.ErrorContextE(p.ctx, "Error while handling broadcast log", err) + if err != nil { + log.ErrorContextE(p.ctx, "Error while handling broadcast log", err) + } + + case event.P2PTopic: + p.server.updatePubSubTopics(evt) + + case event.Replicator: + p.server.updateReplicators(evt) + default: + // ignore other events + continue } } } @@ -258,112 +386,6 @@ func (p *Peer) RegisterNewDocument( return p.server.publishLog(p.ctx, schemaRoot, req) } -func (p *Peer) pushToReplicator( - ctx context.Context, - txn datastore.Txn, - collection client.Collection, - docIDsCh <-chan client.DocIDResult, - pid peer.ID, -) { - for docIDResult := range docIDsCh { - if docIDResult.Err != nil { - log.ErrorContextE(ctx, "Key channel error", docIDResult.Err) - continue - } - docID := core.DataStoreKeyFromDocID(docIDResult.ID) - headset := clock.NewHeadSet( - txn.Headstore(), - docID.WithFieldId(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(), - ) - cids, _, err := headset.List(ctx) - if err != nil { - log.ErrorContextE( - ctx, - "Failed to get heads", - err, - corelog.String("DocID", docIDResult.ID.String()), - corelog.Any("PeerID", pid), - corelog.Any("Collection", collection.Name())) - continue - } - // loop over heads, get block, make the required logs, and send - for _, c := range cids { - blk, err := txn.Blockstore().Get(ctx, c) - if err != nil { - log.ErrorContextE(ctx, "Failed to get block", err, - corelog.Any("CID", c), - corelog.Any("PeerID", pid), - corelog.Any("Collection", collection.Name())) - continue - } - - evt := event.Update{ - DocID: docIDResult.ID.String(), - Cid: c, - SchemaRoot: collection.SchemaRoot(), - Block: blk.RawData(), - } - if err := p.server.pushLog(ctx, evt, pid); err != nil { - log.ErrorContextE( - ctx, - "Failed to replicate log", - err, - corelog.Any("CID", c), - corelog.Any("PeerID", pid), - ) - } - } - } -} - -func (p *Peer) loadReplicators(ctx context.Context) error { - reps, err := p.GetAllReplicators(ctx) - if err != nil { - return errors.Wrap("failed to get replicators", err) - } - p.mu.Lock() - defer p.mu.Unlock() - for _, rep := range reps { - for _, schema := range rep.Schemas { - if pReps, exists := p.replicators[schema]; exists { - if _, exists := pReps[rep.Info.ID]; exists { - continue - } - } else { - p.replicators[schema] = make(map[peer.ID]struct{}) - } - - // add to replicators list - p.replicators[schema][rep.Info.ID] = struct{}{} - } - - // Add the destination's peer multiaddress in the peerstore. - // This will be used during connection and stream creation by libp2p. - p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) - - log.InfoContext(ctx, "loaded replicators from datastore", corelog.Any("Replicator", rep)) - } - - return nil -} - -func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, error) { - collections, err := p.GetAllP2PCollections(ctx) - if err != nil && !errors.Is(err, ds.ErrNotFound) { - return nil, err - } - colMap := make(map[string]struct{}) - for _, col := range collections { - err := p.server.addPubSubTopic(col, true) - if err != nil { - return nil, err - } - colMap[col] = struct{}{} - } - - return colMap, nil -} - func (p *Peer) handleDocCreateLog(evt event.Update) error { docID, err := client.NewDocIDFromString(evt.DocID) if err != nil { @@ -425,9 +447,9 @@ func (p *Peer) pushLogToReplicators(lg event.Update) { peers[peer.String()] = struct{}{} } - p.mu.Lock() - reps, exists := p.replicators[lg.SchemaRoot] - p.mu.Unlock() + p.server.mu.Lock() + reps, exists := p.server.replicators[lg.SchemaRoot] + p.server.mu.Unlock() if exists { for pid := range reps { @@ -453,8 +475,8 @@ func (p *Peer) pushLogToReplicators(lg event.Update) { func (p *Peer) setupBlockService() { bswapnet := network.NewFromIpfsHost(p.host, p.dht) - bswap := bitswap.New(p.ctx, bswapnet, p.db.Blockstore()) - p.bserv = blockservice.New(p.db.Blockstore(), bswap) + bswap := bitswap.New(p.ctx, bswapnet, p.blockstore) + p.bserv = blockservice.New(p.blockstore, bswap) p.exch = bswap } @@ -474,22 +496,63 @@ func stopGRPCServer(ctx context.Context, server *grpc.Server) { } } -// rollbackAddPubSubTopics removes the given topics from the pubsub system. -func (p *Peer) rollbackAddPubSubTopics(topics []string, cause error) error { - for _, topic := range topics { - if err := p.server.removePubSubTopic(topic); err != nil { - return errors.WithStack(err, errors.NewKV("Cause", cause)) - } +// Bootstrap connects to the given peers. +func (p *Peer) Bootstrap(addrs []peer.AddrInfo) { + var connected uint64 + + var wg sync.WaitGroup + for _, pinfo := range addrs { + wg.Add(1) + go func(pinfo peer.AddrInfo) { + defer wg.Done() + err := p.host.Connect(p.ctx, pinfo) + if err != nil { + log.InfoContext(p.ctx, "Cannot connect to peer", corelog.Any("Error", err)) + return + } + log.InfoContext(p.ctx, "Connected", corelog.Any("PeerID", pinfo.ID)) + atomic.AddUint64(&connected, 1) + }(pinfo) + } + + wg.Wait() + + if nPeers := len(addrs); int(connected) < nPeers/2 { + log.InfoContext(p.ctx, fmt.Sprintf("Only connected to %d bootstrap peers out of %d", connected, nPeers)) + } + + err := p.dht.Bootstrap(p.ctx) + if err != nil { + log.ErrorContextE(p.ctx, "Problem bootstraping using DHT", err) + return } - return cause } -// rollbackRemovePubSubTopics adds back the given topics from the pubsub system. -func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { - for _, topic := range topics { - if err := p.server.addPubSubTopic(topic, true); err != nil { - return errors.WithStack(err, errors.NewKV("Cause", cause)) - } +func (p *Peer) PeerID() peer.ID { + return p.host.ID() +} + +func (p *Peer) ListenAddrs() []multiaddr.Multiaddr { + return p.host.Network().ListenAddresses() +} + +func (p *Peer) PeerInfo() peer.AddrInfo { + return peer.AddrInfo{ + ID: p.host.ID(), + Addrs: p.host.Network().ListenAddresses(), + } +} + +func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, error) { + dhtOpts := []dualdht.Option{ + dualdht.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), + dualdht.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})), + dualdht.DHTOption(dht.Concurrency(10)), + dualdht.DHTOption(dht.Mode(dht.ModeAuto)), } - return cause + if dsb != nil { + dhtOpts = append(dhtOpts, dualdht.DHTOption(dht.Datastore(dsb))) + } + + return dualdht.New(ctx, h, dhtOpts...) } diff --git a/net/peer_replicator.go b/net/peer_replicator.go deleted file mode 100644 index 19accb17c4..0000000000 --- a/net/peer_replicator.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package net - -import ( - "context" - "encoding/json" - - dsq "github.com/ipfs/go-datastore/query" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/core" - "github.com/sourcenetwork/defradb/internal/db" -) - -func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { - p.mu.Lock() - defer p.mu.Unlock() - - txn, err := p.db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - if rep.Info.ID == p.host.ID() { - return ErrSelfTargetForReplicator - } - if err := rep.Info.ID.Validate(); err != nil { - return err - } - - // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 - // ctx = db.SetContextIdentity(ctx, identity) - ctx = db.SetContextTxn(ctx, txn) - - var collections []client.Collection - switch { - case len(rep.Schemas) > 0: - // if specific collections are chosen get them by name - for _, name := range rep.Schemas { - col, err := p.db.GetCollectionByName(ctx, name) - if err != nil { - return NewErrReplicatorCollections(err) - } - - if col.Description().Policy.HasValue() { - return ErrReplicatorColHasPolicy - } - - collections = append(collections, col) - } - - default: - // default to all collections (unless a collection contains a policy). - // TODO-ACP: default to all collections after resolving https://github.com/sourcenetwork/defradb/issues/2366 - allCollections, err := p.db.GetCollections(ctx, client.CollectionFetchOptions{}) - if err != nil { - return NewErrReplicatorCollections(err) - } - - for _, col := range allCollections { - // Can not default to all collections if any collection has a policy. - // TODO-ACP: remove this check/loop after https://github.com/sourcenetwork/defradb/issues/2366 - if col.Description().Policy.HasValue() { - return ErrReplicatorSomeColsHavePolicy - } - } - collections = allCollections - } - rep.Schemas = nil - - // Add the destination's peer multiaddress in the peerstore. - // This will be used during connection and stream creation by libp2p. - p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) - - var added []client.Collection - for _, col := range collections { - reps, exists := p.replicators[col.SchemaRoot()] - if !exists { - p.replicators[col.SchemaRoot()] = make(map[peer.ID]struct{}) - } - if _, exists := reps[rep.Info.ID]; !exists { - // keep track of newly added collections so we don't - // push logs to a replicator peer multiple times. - p.replicators[col.SchemaRoot()][rep.Info.ID] = struct{}{} - added = append(added, col) - } - rep.Schemas = append(rep.Schemas, col.SchemaRoot()) - } - - // persist replicator to the datastore - repBytes, err := json.Marshal(rep) - if err != nil { - return err - } - key := core.NewReplicatorKey(rep.Info.ID.String()) - err = txn.Systemstore().Put(ctx, key.ToDS(), repBytes) - if err != nil { - return err - } - - // push all collection documents to the replicator peer - for _, col := range added { - keysCh, err := col.GetAllDocIDs(ctx) - if err != nil { - return NewErrReplicatorDocID(err, col.Name().Value(), rep.Info.ID) - } - p.pushToReplicator(ctx, txn, col, keysCh, rep.Info.ID) - } - - return txn.Commit(ctx) -} - -func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - p.mu.Lock() - defer p.mu.Unlock() - - txn, err := p.db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - if rep.Info.ID == p.host.ID() { - return ErrSelfTargetForReplicator - } - if err := rep.Info.ID.Validate(); err != nil { - return err - } - - // set transaction for all operations - ctx = db.SetContextTxn(ctx, txn) - - var collections []client.Collection - switch { - case len(rep.Schemas) > 0: - // if specific collections are chosen get them by name - for _, name := range rep.Schemas { - col, err := p.db.GetCollectionByName(ctx, name) - if err != nil { - return NewErrReplicatorCollections(err) - } - collections = append(collections, col) - } - // make sure the replicator exists in the datastore - key := core.NewReplicatorKey(rep.Info.ID.String()) - _, err = txn.Systemstore().Get(ctx, key.ToDS()) - if err != nil { - return err - } - - default: - // default to all collections - collections, err = p.db.GetCollections(ctx, client.CollectionFetchOptions{}) - if err != nil { - return NewErrReplicatorCollections(err) - } - } - rep.Schemas = nil - - schemaMap := make(map[string]struct{}) - for _, col := range collections { - schemaMap[col.SchemaRoot()] = struct{}{} - } - - // update replicators and add remaining schemas to rep - for key, val := range p.replicators { - if _, exists := val[rep.Info.ID]; exists { - if _, toDelete := schemaMap[key]; toDelete { - delete(p.replicators[key], rep.Info.ID) - } else { - rep.Schemas = append(rep.Schemas, key) - } - } - } - - if len(rep.Schemas) == 0 { - // Remove the destination's peer multiaddress in the peerstore. - p.host.Peerstore().ClearAddrs(rep.Info.ID) - } - - // persist the replicator to the store, deleting it if no schemas remain - key := core.NewReplicatorKey(rep.Info.ID.String()) - if len(rep.Schemas) == 0 { - return txn.Systemstore().Delete(ctx, key.ToDS()) - } - repBytes, err := json.Marshal(rep) - if err != nil { - return err - } - return txn.Systemstore().Put(ctx, key.ToDS(), repBytes) -} - -func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - // create collection system prefix query - query := dsq.Query{ - Prefix: core.NewReplicatorKey("").ToString(), - } - results, err := txn.Systemstore().Query(ctx, query) - if err != nil { - return nil, err - } - - var reps []client.Replicator - for result := range results.Next() { - var rep client.Replicator - if err = json.Unmarshal(result.Value, &rep); err != nil { - return nil, err - } - reps = append(reps, rep) - } - return reps, nil -} diff --git a/net/peer_test.go b/net/peer_test.go index 6f6fda67ad..09f87830ac 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -12,24 +12,16 @@ package net import ( "context" - "encoding/hex" - "fmt" "testing" "time" - "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - libp2p "github.com/libp2p/go-libp2p" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" mh "github.com/multiformats/go-multihash" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/event" @@ -71,16 +63,18 @@ func createCID(doc *client.Document) (cid.Cid, error) { const randomMultiaddr = "/ip4/127.0.0.1/tcp/0" -func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { +func newTestPeer(ctx context.Context, t *testing.T) (client.DB, *Peer) { store := memory.NewDatastore(ctx) acpLocal := acp.NewLocalACP() acpLocal.Init(context.Background(), "") db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), nil) require.NoError(t, err) - n, err := NewNode( + n, err := NewPeer( ctx, - db, + db.Root(), + db.Blockstore(), + db.Events(), WithListenAddresses(randomMultiaddr), ) require.NoError(t, err) @@ -93,72 +87,26 @@ func TestNewPeer_NoError(t *testing.T) { store := memory.NewDatastore(ctx) db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) - - h, err := libp2p.New() - require.NoError(t, err) - - _, err = NewPeer(ctx, db, h, nil, nil, nil, nil) + defer db.Close() + p, err := NewPeer(ctx, db.Root(), db.Blockstore(), db.Events()) require.NoError(t, err) + p.Close() } func TestNewPeer_NoDB_NilDBError(t *testing.T) { ctx := context.Background() - - h, err := libp2p.New() - require.NoError(t, err) - - _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil) + _, err := NewPeer(ctx, nil, nil, nil) require.ErrorIs(t, err, ErrNilDB) } -func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil) - require.NoError(t, err) - - _, err = db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - h, err := libp2p.New() - require.NoError(t, err) - - ps, err := pubsub.NewGossipSub( - ctx, - h, - pubsub.WithPeerExchange(true), - pubsub.WithFloodPublish(true), - ) - require.NoError(t, err) - - _, err = rpc.NewTopic(ctx, ps, h.ID(), doc.ID().String(), true) - require.NoError(t, err) - - _, err = NewPeer(ctx, db, h, nil, ps, nil, nil) - require.ErrorContains(t, err, "topic already exists") -} - func TestStartAndClose_NoError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() - err := n.Start() + err := p.Start() require.NoError(t, err) - - db.Close() } func TestStart_WithKnownPeer_NoError(t *testing.T) { @@ -166,23 +114,31 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { store := memory.NewDatastore(ctx) db1, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) + defer db1.Close() store2 := memory.NewDatastore(ctx) db2, err := db.NewDB(ctx, store2, acp.NoACP, nil) require.NoError(t, err) + defer db2.Close() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db1, + db1.Root(), + db1.Blockstore(), + db1.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) - n2, err := NewNode( + defer n1.Close() + n2, err := NewPeer( ctx, - db2, + db2.Root(), + db2.Blockstore(), + db2.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) + defer n2.Close() addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { @@ -192,9 +148,6 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - - db1.Close() - db2.Close() } func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { @@ -202,23 +155,31 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { store := memory.NewDatastore(ctx) db1, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) + defer db1.Close() store2 := memory.NewDatastore(ctx) db2, err := db.NewDB(ctx, store2, acp.NoACP, nil) require.NoError(t, err) + defer db2.Close() - n1, err := NewNode( + n1, err := NewPeer( ctx, - db1, + db1.Root(), + db1.Blockstore(), + db1.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) - n2, err := NewNode( + defer n1.Close() + n2, err := NewPeer( ctx, - db2, + db2.Root(), + db2.Blockstore(), + db2.Events(), WithListenAddresses("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) + defer n2.Close() addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { @@ -232,15 +193,13 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - - db1.Close() - db2.Close() } func TestRegisterNewDocument_NoError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -257,14 +216,15 @@ func TestRegisterNewDocument_NoError(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) + err = p.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) require.NoError(t, err) } func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -278,600 +238,21 @@ func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) require.NoError(t, err) - _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.ID().String(), true) + _, err = rpc.NewTopic(ctx, p.ps, p.host.ID(), doc.ID().String(), true) require.NoError(t, err) cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) + err = p.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) require.Equal(t, err.Error(), "creating topic: joining topic: topic already exists") } -func TestSetReplicator_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"User"}, - }) - require.NoError(t, err) -} - -// This test documents that we don't allow setting replicator with a collection that has a policy -// until the following is implemented: -// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2366 -func TestSetReplicatorWithACollectionSpecifiedThatHasPolicy_ReturnError(t *testing.T) { - ctx := context.Background() - d, n := newTestNode(ctx, t) - defer n.Close() - - policy := ` - name: test - description: a policy - actor: - name: actor - resources: - user: - permissions: - read: - expr: owner - write: - expr: owner - relations: - owner: - types: - - actor - ` - - privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") - require.NoError(t, err) - privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity, err := acpIdentity.FromPrivateKey(privKey) - require.NoError(t, err) - - ctx = db.SetContextIdentity(ctx, identity) - policyResult, err := d.AddPolicy(ctx, policy) - policyID := policyResult.PolicyID - require.NoError(t, err) - require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) - - schema := fmt.Sprintf(` - type User @policy(id: "%s", resource: "user") { - name: String - age: Int - } - `, policyID, - ) - _, err = d.AddSchema(ctx, schema) - require.NoError(t, err) - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"User"}, - }) - require.Error(t, err) - require.ErrorIs(t, err, ErrReplicatorColHasPolicy) -} - -// This test documents that we don't allow setting replicator using default option when any collection has a policy -// until the following is implemented: -// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2366 -func TestSetReplicatorWithSomeCollectionThatHasPolicyUsingAllCollectionsByDefault_ReturnError(t *testing.T) { - ctx := context.Background() - d, n := newTestNode(ctx, t) - defer n.Close() - - policy := ` - name: test - description: a policy - actor: - name: actor - resources: - user: - permissions: - read: - expr: owner - write: - expr: owner - relations: - owner: - types: - - actor - ` - - privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") - require.NoError(t, err) - privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity, err := acpIdentity.FromPrivateKey(privKey) - require.NoError(t, err) - - ctx = db.SetContextIdentity(ctx, identity) - policyResult, err := d.AddPolicy(ctx, policy) - policyID := policyResult.PolicyID - require.NoError(t, err) - require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) - - schema := fmt.Sprintf(` - type User @policy(id: "%s", resource: "user") { - name: String - age: Int - } - `, policyID, - ) - _, err = d.AddSchema(ctx, schema) - require.NoError(t, err) - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - // Note: The missing explicit input of schemas here - }) - require.ErrorIs(t, err, ErrReplicatorSomeColsHavePolicy) -} - -func TestSetReplicator_WithInvalidAddress_EmptyPeerIDError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: peer.AddrInfo{}, - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "empty peer ID") -} - -func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - db.Close() - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "datastore closed") -} - -func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") -} - -func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") - require.NoError(t, err) - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: *info, - }) - require.NoError(t, err) -} - -func TestPushToReplicator_SingleDocumentNoPeer_FailedToReplicateLogError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - keysCh, err := col.GetAllDocIDs(ctx) - require.NoError(t, err) - - txn, err := db.NewTxn(ctx, true) - require.NoError(t, err) - - n.pushToReplicator(ctx, txn, col, keysCh, n.PeerID()) -} - -func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - info := peer.AddrInfo{ - ID: n.PeerID(), - Addrs: n.ListenAddrs(), - } - - db.Close() - - err := n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: info, - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "datastore closed") -} - -func TestDeleteReplicator_WithTargetSelf_SelfTargetForReplicatorError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - err := n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n.PeerInfo(), - Schemas: []string{"User"}, - }) - require.ErrorIs(t, err, ErrSelfTargetForReplicator) -} - -func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err := n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") -} - -func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) - - err = n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) -} - -func TestDeleteReplicator_WithNoCollection_NoError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err := n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) -} - -func TestDeleteReplicator_WithNotSetReplicator_KeyNotFoundError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err = n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - Schemas: []string{"User"}, - }) - require.ErrorContains(t, err, "datastore: key not found") -} - -func TestGetAllReplicator_WithReplicator_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) - - reps, err := n.Peer.GetAllReplicators(ctx) - require.NoError(t, err) - - require.Len(t, reps, 1) - require.Equal(t, n2.PeerInfo().ID, reps[0].Info.ID) -} - -func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - db.Close() - - _, err := n.Peer.GetAllReplicators(ctx) - require.ErrorContains(t, err, "datastore closed") -} - -func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - db.Close() - - err := n.Peer.loadReplicators(ctx) - require.ErrorContains(t, err, "datastore closed") -} - -func TestLoadReplicator_WithReplicator_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) - - err = n.Peer.loadReplicators(ctx) - require.NoError(t, err) -} - -func TestLoadReplicator_WithReplicatorAndEmptyReplicatorMap_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - _, n2 := newTestNode(ctx, t) - defer n2.Close() - - err = n.Peer.SetReplicator(ctx, client.Replicator{ - Info: n2.PeerInfo(), - }) - require.NoError(t, err) - - n.replicators = make(map[string]map[peer.ID]struct{}) - - err = n.Peer.loadReplicators(ctx) - require.NoError(t, err) -} - -func TestAddP2PCollections_WithInvalidCollectionID_NotFoundError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - err := n.Peer.AddP2PCollections(ctx, []string{"invalid_collection"}) - require.Error(t, err, ds.ErrNotFound) -} - -// This test documents that we don't allow adding p2p collections that have a policy -// until the following is implemented: -// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2366 -func TestAddP2PCollectionsWithPermissionedCollection_Error(t *testing.T) { - ctx := context.Background() - d, n := newTestNode(ctx, t) - defer n.Close() - - policy := ` - name: test - description: a policy - actor: - name: actor - resources: - user: - permissions: - read: - expr: owner - write: - expr: owner - relations: - owner: - types: - - actor - ` - - privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") - require.NoError(t, err) - privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity, err := acpIdentity.FromPrivateKey(privKey) - require.NoError(t, err) - - ctx = db.SetContextIdentity(ctx, identity) - policyResult, err := d.AddPolicy(ctx, policy) - policyID := policyResult.PolicyID - require.NoError(t, err) - require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) - - schema := fmt.Sprintf(` - type User @policy(id: "%s", resource: "user") { - name: String - age: Int - } - `, policyID, - ) - _, err = d.AddSchema(ctx, schema) - require.NoError(t, err) - - col, err := d.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaRoot()}) - require.Error(t, err) - require.ErrorIs(t, err, ErrP2PColHasPolicy) -} - -func TestAddP2PCollections_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaRoot()}) - require.NoError(t, err) -} - -func TestRemoveP2PCollectionsWithInvalidCollectionID(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - err := n.Peer.RemoveP2PCollections(ctx, []string{"invalid_collection"}) - require.Error(t, err, ds.ErrNotFound) -} - -func TestRemoveP2PCollections(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = n.Peer.RemoveP2PCollections(ctx, []string{col.SchemaRoot()}) - require.NoError(t, err) -} - -func TestGetAllP2PCollectionsWithNoCollections(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - - cols, err := n.Peer.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.Len(t, cols, 0) -} - -func TestGetAllP2PCollections(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaRoot()}) - require.NoError(t, err) - - cols, err := n.Peer.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.ElementsMatch(t, []string{col.SchemaRoot()}, cols) -} - func TestHandleDocCreateLog_NoError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -894,7 +275,7 @@ func TestHandleDocCreateLog_NoError(t *testing.T) { b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n.handleDocCreateLog(event.Update{ + err = p.handleDocCreateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -905,10 +286,11 @@ func TestHandleDocCreateLog_NoError(t *testing.T) { func TestHandleDocCreateLog_WithInvalidDocID_NoError(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() - err := n.handleDocCreateLog(event.Update{ + err := p.handleDocCreateLog(event.Update{ DocID: "some-invalid-key", }) require.ErrorContains(t, err, "failed to get DocID from broadcast message: selected encoding not supported") @@ -916,8 +298,9 @@ func TestHandleDocCreateLog_WithInvalidDocID_NoError(t *testing.T) { func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -934,10 +317,10 @@ func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { err = col.Create(ctx, doc) require.NoError(t, err) - _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.ID().String(), true) + _, err = rpc.NewTopic(ctx, p.ps, p.host.ID(), doc.ID().String(), true) require.NoError(t, err) - err = n.handleDocCreateLog(event.Update{ + err = p.handleDocCreateLog(event.Update{ DocID: doc.ID().String(), SchemaRoot: col.SchemaRoot(), }) @@ -946,8 +329,9 @@ func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { func TestHandleDocUpdateLog_NoError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -970,7 +354,7 @@ func TestHandleDocUpdateLog_NoError(t *testing.T) { b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n.handleDocUpdateLog(event.Update{ + err = p.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -981,10 +365,11 @@ func TestHandleDocUpdateLog_NoError(t *testing.T) { func TestHandleDoUpdateLog_WithInvalidDocID_NoError(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() - err := n.handleDocUpdateLog(event.Update{ + err := p.handleDocUpdateLog(event.Update{ DocID: "some-invalid-key", }) require.ErrorContains(t, err, "failed to get DocID from broadcast message: selected encoding not supported") @@ -992,8 +377,9 @@ func TestHandleDoUpdateLog_WithInvalidDocID_NoError(t *testing.T) { func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -1016,10 +402,10 @@ func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.ID().String(), true) + _, err = rpc.NewTopic(ctx, p.ps, p.host.ID(), doc.ID().String(), true) require.NoError(t, err) - err = n.handleDocUpdateLog(event.Update{ + err = p.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -1030,8 +416,9 @@ func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - defer n.Close() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -1054,10 +441,10 @@ func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing. b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaRoot(), true) + _, err = rpc.NewTopic(ctx, p.ps, p.host.ID(), col.SchemaRoot(), true) require.NoError(t, err) - err = n.handleDocUpdateLog(event.Update{ + err = p.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), diff --git a/net/server.go b/net/server.go index 413f391064..ed5ac306c8 100644 --- a/net/server.go +++ b/net/server.go @@ -18,7 +18,9 @@ import ( "sync" cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" libpeer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/sourcenetwork/corelog" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "google.golang.org/grpc" @@ -43,7 +45,9 @@ type server struct { opts []grpc.DialOption topics map[string]pubsubTopic - mu sync.Mutex + // replicators is a map from collectionName => peerId + replicators map[string]map[peer.ID]struct{} + mu sync.Mutex conns map[libpeer.ID]*grpc.ClientConn @@ -61,9 +65,10 @@ type pubsubTopic struct { // underlying DB instance. func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { s := &server{ - peer: p, - conns: make(map[libpeer.ID]*grpc.ClientConn), - topics: make(map[string]pubsubTopic), + peer: p, + conns: make(map[libpeer.ID]*grpc.ClientConn), + topics: make(map[string]pubsubTopic), + replicators: make(map[string]map[peer.ID]struct{}), } cred := insecure.NewCredentials() @@ -73,38 +78,6 @@ func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { } s.opts = append(defaultOpts, opts...) - if s.peer.ps != nil { - colMap, err := p.loadP2PCollections(p.ctx) - if err != nil { - return nil, err - } - - // Get all DocIDs across all collections in the DB - cols, err := s.peer.db.GetCollections(s.peer.ctx, client.CollectionFetchOptions{}) - if err != nil { - return nil, err - } - - i := 0 - for _, col := range cols { - // If we subscribed to the collection, we skip subscribing to the collection's docIDs. - if _, ok := colMap[col.SchemaRoot()]; ok { - continue - } - // TODO-ACP: Support ACP <> P2P - https://github.com/sourcenetwork/defradb/issues/2366 - docIDChan, err := col.GetAllDocIDs(p.ctx) - if err != nil { - return nil, err - } - - for docID := range docIDChan { - if err := s.addPubSubTopic(docID.ID.String(), true); err != nil { - return nil, err - } - i++ - } - } - } return s, nil } @@ -157,7 +130,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return nil, err } - s.peer.db.Events().Publish(event.NewMessage(event.MergeName, event.Merge{ + s.peer.bus.Publish(event.NewMessage(event.MergeName, event.Merge{ DocID: docID.String(), ByPeer: byPeer, FromPeer: pid, @@ -313,7 +286,7 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { evt := event.NewMessage(event.PubSubName, event.PubSub{ Peer: from, }) - s.peer.db.Events().Publish(evt) + s.peer.bus.Publish(evt) } // addr implements net.Addr and holds a libp2p peer ID. @@ -337,3 +310,62 @@ func peerIDFromContext(ctx context.Context) (libpeer.ID, error) { } return pid, nil } + +func (s *server) updatePubSubTopics(evt event.P2PTopic) { + for _, topic := range evt.ToAdd { + err := s.addPubSubTopic(topic, true) + if err != nil { + log.ErrorContextE(s.peer.ctx, "Failed to add pubsub topic.", err) + } + } + + for _, topic := range evt.ToRemove { + err := s.removePubSubTopic(topic) + if err != nil { + log.ErrorContextE(s.peer.ctx, "Failed to remove pubsub topic.", err) + } + } +} + +func (s *server) updateReplicators(evt event.Replicator) { + isDeleteRep := len(evt.Schemas) == 0 + // update the cached replicators + s.mu.Lock() + for schema, peers := range s.replicators { + if _, hasSchema := evt.Schemas[schema]; hasSchema { + s.replicators[schema][evt.Info.ID] = struct{}{} + delete(evt.Schemas, schema) + } else { + if _, exists := peers[evt.Info.ID]; exists { + delete(s.replicators[schema], evt.Info.ID) + } + } + } + for schema := range evt.Schemas { + if _, exists := s.replicators[schema]; !exists { + s.replicators[schema] = make(map[peer.ID]struct{}) + } + s.replicators[schema][evt.Info.ID] = struct{}{} + } + s.mu.Unlock() + + if isDeleteRep { + s.peer.host.Peerstore().ClearAddrs(evt.Info.ID) + } else { + s.peer.host.Peerstore().AddAddrs(evt.Info.ID, evt.Info.Addrs, peerstore.PermanentAddrTTL) + } + + if evt.Docs != nil { + for update := range evt.Docs { + if err := s.pushLog(s.peer.ctx, update, evt.Info.ID); err != nil { + log.ErrorContextE( + s.peer.ctx, + "Failed to replicate log", + err, + corelog.Any("CID", update.Cid), + corelog.Any("PeerID", evt.Info.ID), + ) + } + } + } +} diff --git a/net/server_test.go b/net/server_test.go index d17705b404..1ac178a2d1 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -18,12 +18,10 @@ import ( "github.com/ipfs/go-datastore/query" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" - rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/stretchr/testify/require" grpcpeer "google.golang.org/grpc/peer" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" net_pb "github.com/sourcenetwork/defradb/net/pb" @@ -31,124 +29,15 @@ import ( func TestNewServerSimple(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - _, err := newServer(n.Peer) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + _, err := newServer(p) require.NoError(t, err) } -func TestNewServerWithDBClosed(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - db.Close() - - _, err := newServer(n.Peer) - require.ErrorIs(t, err, memory.ErrClosed) -} - var mockError = errors.New("mock error") -type mockDBColError struct { - client.DB -} - -func (mDB *mockDBColError) GetCollections(context.Context, client.CollectionFetchOptions) ([]client.Collection, error) { - return nil, mockError -} - -func TestNewServerWithGetAllCollectionError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - mDB := mockDBColError{db} - n.Peer.db = &mDB - _, err := newServer(n.Peer) - require.ErrorIs(t, err, mockError) -} - -func TestNewServerWithCollectionSubscribed(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = n.AddP2PCollections(ctx, []string{col.SchemaRoot()}) - require.NoError(t, err) - - _, err = newServer(n.Peer) - require.NoError(t, err) -} - -type mockDBDocIDsError struct { - client.DB -} - -func (mDB *mockDBDocIDsError) GetCollections(context.Context, client.CollectionFetchOptions) ([]client.Collection, error) { - return []client.Collection{ - &mockCollection{}, - }, nil -} - -type mockCollection struct { - client.Collection -} - -func (mCol *mockCollection) SchemaRoot() string { - return "mockColID" -} -func (mCol *mockCollection) GetAllDocIDs( - ctx context.Context, -) (<-chan client.DocIDResult, error) { - return nil, mockError -} - -func TestNewServerWithGetAllDocIDsError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - mDB := mockDBDocIDsError{db} - n.Peer.db = &mDB - _, err = newServer(n.Peer) - require.ErrorIs(t, err, mockError) -} - -func TestNewServerWithAddTopicError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.ID().String(), true) - require.NoError(t, err) - - _, err = newServer(n.Peer) - require.ErrorContains(t, err, "topic already exists") -} - type mockHost struct { host.Host } @@ -171,7 +60,9 @@ func (mB *mockBus) Subscribe(eventType any, opts ...event.SubscriptionOpt) (even func TestNewServerWithEmitterError(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() _, err := db.AddSchema(ctx, `type User { name: String @@ -188,40 +79,48 @@ func TestNewServerWithEmitterError(t *testing.T) { err = col.Create(ctx, doc) require.NoError(t, err) - n.Peer.host = &mockHost{n.Peer.host} + p.host = &mockHost{p.host} - _, err = newServer(n.Peer) + _, err = newServer(p) require.NoError(t, err) } func TestGetDocGraph(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - r, err := n.server.GetDocGraph(ctx, &net_pb.GetDocGraphRequest{}) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + r, err := p.server.GetDocGraph(ctx, &net_pb.GetDocGraphRequest{}) require.Nil(t, r) require.Nil(t, err) } func TestPushDocGraph(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - r, err := n.server.PushDocGraph(ctx, &net_pb.PushDocGraphRequest{}) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + r, err := p.server.PushDocGraph(ctx, &net_pb.PushDocGraphRequest{}) require.Nil(t, r) require.Nil(t, err) } func TestGetLog(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - r, err := n.server.GetLog(ctx, &net_pb.GetLogRequest{}) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + r, err := p.server.GetLog(ctx, &net_pb.GetLogRequest{}) require.Nil(t, r) require.Nil(t, err) } func TestGetHeadLog(t *testing.T) { ctx := context.Background() - _, n := newTestNode(ctx, t) - r, err := n.server.GetHeadLog(ctx, &net_pb.GetHeadLogRequest{}) + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + r, err := p.server.GetHeadLog(ctx, &net_pb.GetHeadLogRequest{}) require.Nil(t, r) require.Nil(t, err) } @@ -249,8 +148,10 @@ func getHead(ctx context.Context, db client.DB, docID client.DocID) (cid.Cid, er func TestPushLog(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - err := n.Start() + db, p := newTestPeer(ctx, t) + defer db.Close() + defer p.Close() + err := p.Start() require.NoError(t, err) _, err = db.AddSchema(ctx, `type User { @@ -266,7 +167,7 @@ func TestPushLog(t *testing.T) { require.NoError(t, err) ctx = grpcpeer.NewContext(ctx, &grpcpeer.Peer{ - Addr: addr{n.PeerID()}, + Addr: addr{p.PeerID()}, }) err = col.Create(ctx, doc) @@ -278,12 +179,12 @@ func TestPushLog(t *testing.T) { b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - _, err = n.server.PushLog(ctx, &net_pb.PushLogRequest{ + _, err = p.server.PushLog(ctx, &net_pb.PushLogRequest{ Body: &net_pb.PushLogRequest_Body{ DocID: []byte(doc.ID().String()), Cid: headCID.Bytes(), SchemaRoot: []byte(col.SchemaRoot()), - Creator: n.PeerID().String(), + Creator: p.PeerID().String(), Log: &net_pb.Document_Log{ Block: b, }, diff --git a/node/node.go b/node/node.go index 215cf05fc7..8fb5ce8f0e 100644 --- a/node/node.go +++ b/node/node.go @@ -77,7 +77,7 @@ func WithPeers(peers ...peer.AddrInfo) NodeOpt { // Node is a DefraDB instance with optional sub-systems. type Node struct { DB client.DB - Node *net.Node + Peer *net.Peer Server *http.Server } @@ -138,27 +138,22 @@ func NewNode(ctx context.Context, opts ...Option) (*Node, error) { return nil, err } - var node *net.Node + var peer *net.Peer if !options.disableP2P { // setup net node - node, err = net.NewNode(ctx, db, netOpts...) + peer, err = net.NewPeer(ctx, db.Root(), db.Blockstore(), db.Events(), netOpts...) if err != nil { return nil, err } if len(options.peers) > 0 { - node.Bootstrap(options.peers) + peer.Bootstrap(options.peers) } } var server *http.Server if !options.disableAPI { // setup http server - var handler *http.Handler - if node != nil { - handler, err = http.NewHandler(node) - } else { - handler, err = http.NewHandler(db) - } + handler, err := http.NewHandler(db) if err != nil { return nil, err } @@ -170,15 +165,15 @@ func NewNode(ctx context.Context, opts ...Option) (*Node, error) { return &Node{ DB: db, - Node: node, + Peer: peer, Server: server, }, nil } // Start starts the node sub-systems. func (n *Node) Start(ctx context.Context) error { - if n.Node != nil { - if err := n.Node.Start(); err != nil { + if n.Peer != nil { + if err := n.Peer.Start(); err != nil { return err } } @@ -203,9 +198,10 @@ func (n *Node) Close(ctx context.Context) error { if n.Server != nil { err = n.Server.Shutdown(ctx) } - if n.Node != nil { - n.Node.Close() - } else { + if n.Peer != nil { + n.Peer.Close() + } + if n.DB != nil { n.DB.Close() } return err diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 464a75baef..1debed17fe 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -31,20 +31,20 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/node" ) -var _ client.P2P = (*Wrapper)(nil) +var _ client.DB = (*Wrapper)(nil) type Wrapper struct { - node *net.Node + node *node.Node cmd *cliWrapper handler *http.Handler httpServer *httptest.Server } -func NewWrapper(node *net.Node) (*Wrapper, error) { - handler, err := http.NewHandler(node) +func NewWrapper(node *node.Node) (*Wrapper, error) { + handler, err := http.NewHandler(node.DB) if err != nil { return nil, err } @@ -504,39 +504,39 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor } func (w *Wrapper) Root() datastore.Rootstore { - return w.node.Root() + return w.node.DB.Root() } func (w *Wrapper) Blockstore() datastore.Blockstore { - return w.node.Blockstore() + return w.node.DB.Blockstore() } func (w *Wrapper) Headstore() ds.Read { - return w.node.Headstore() + return w.node.DB.Headstore() } func (w *Wrapper) Peerstore() datastore.DSBatching { - return w.node.Peerstore() + return w.node.DB.Peerstore() } func (w *Wrapper) Close() { w.httpServer.CloseClientConnections() w.httpServer.Close() - w.node.Close() + _ = w.node.Close(context.Background()) } func (w *Wrapper) Events() *event.Bus { - return w.node.Events() + return w.node.DB.Events() } func (w *Wrapper) MaxTxnRetries() int { - return w.node.MaxTxnRetries() + return w.node.DB.MaxTxnRetries() } func (w *Wrapper) PrintDump(ctx context.Context) error { - return w.node.PrintDump(ctx) + return w.node.DB.PrintDump(ctx) } func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { - w.node.Bootstrap(addrs) + w.node.Peer.Bootstrap(addrs) } diff --git a/tests/clients/clients.go b/tests/clients/clients.go index 249b1e767f..f5d822ab39 100644 --- a/tests/clients/clients.go +++ b/tests/clients/clients.go @@ -19,6 +19,6 @@ import ( // Client implements the P2P interface along with a few other methods // required for testing. type Client interface { - client.P2P + client.DB Bootstrap([]peer.AddrInfo) } diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index bb60f40c05..82cb1402cb 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -24,22 +24,22 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/node" ) -var _ client.P2P = (*Wrapper)(nil) +var _ client.DB = (*Wrapper)(nil) // Wrapper combines an HTTP client and server into a // single struct that implements the client.DB interface. type Wrapper struct { - node *net.Node + node *node.Node handler *http.Handler client *http.Client httpServer *httptest.Server } -func NewWrapper(node *net.Node) (*Wrapper, error) { - handler, err := http.NewHandler(node) +func NewWrapper(node *node.Node) (*Wrapper, error) { + handler, err := http.NewHandler(node.DB) if err != nil { return nil, err } @@ -200,39 +200,39 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor } func (w *Wrapper) Root() datastore.Rootstore { - return w.node.Root() + return w.node.DB.Root() } func (w *Wrapper) Blockstore() datastore.Blockstore { - return w.node.Blockstore() + return w.node.DB.Blockstore() } func (w *Wrapper) Headstore() ds.Read { - return w.node.Headstore() + return w.node.DB.Headstore() } func (w *Wrapper) Peerstore() datastore.DSBatching { - return w.node.Peerstore() + return w.node.DB.Peerstore() } func (w *Wrapper) Close() { w.httpServer.CloseClientConnections() w.httpServer.Close() - w.node.Close() + _ = w.node.Close(context.Background()) } func (w *Wrapper) Events() *event.Bus { - return w.node.Events() + return w.node.DB.Events() } func (w *Wrapper) MaxTxnRetries() int { - return w.node.MaxTxnRetries() + return w.node.DB.MaxTxnRetries() } func (w *Wrapper) PrintDump(ctx context.Context) error { - return w.node.PrintDump(ctx) + return w.node.DB.PrintDump(ctx) } func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { - w.node.Bootstrap(addrs) + w.node.Peer.Bootstrap(addrs) } diff --git a/tests/integration/client.go b/tests/integration/client.go index 1d06bfc744..dee5a3f0c9 100644 --- a/tests/integration/client.go +++ b/tests/integration/client.go @@ -15,7 +15,11 @@ import ( "os" "strconv" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/node" "github.com/sourcenetwork/defradb/tests/clients" "github.com/sourcenetwork/defradb/tests/clients/cli" "github.com/sourcenetwork/defradb/tests/clients/http" @@ -63,7 +67,7 @@ func init() { // setupClient returns the client implementation for the current // testing state. The client type on the test state is used to // select the client implementation to use. -func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { +func setupClient(s *state, node *node.Node) (impl clients.Client, err error) { switch s.clientType { case HTTPClientType: impl, err = http.NewWrapper(node) @@ -72,7 +76,7 @@ func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { impl, err = cli.NewWrapper(node) case GoClientType: - impl = node + impl = newGoClientWrapper(node) default: err = fmt.Errorf("invalid client type: %v", s.dbt) @@ -83,3 +87,28 @@ func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { } return } + +type goClientWrapper struct { + client.DB + peer *net.Peer +} + +func newGoClientWrapper(n *node.Node) *goClientWrapper { + return &goClientWrapper{ + DB: n.DB, + peer: n.Peer, + } +} + +func (w *goClientWrapper) Bootstrap(addrs []peer.AddrInfo) { + if w.peer != nil { + w.peer.Bootstrap(addrs) + } +} + +func (w *goClientWrapper) Close() { + if w.peer != nil { + w.peer.Close() + } + w.DB.Close() +} diff --git a/tests/integration/db.go b/tests/integration/db.go index ab15e2d5fc..0928f3b90b 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -100,7 +100,7 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { // setupDatabase returns the database implementation for the current // testing state. The database type on the test state is used to // select the datastore implementation to use. -func setupDatabase(s *state) (client.DB, string, error) { +func setupDatabase(s *state) (*node.Node, string, error) { opts := []node.Option{ node.WithLensPoolSize(lensPoolSize), // The test framework sets this up elsewhere when required so that it may be wrapped @@ -158,5 +158,5 @@ func setupDatabase(s *state) (client.DB, string, error) { return nil, "", err } - return node.DB, path, nil + return node, path, nil } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 5e9d089ccd..3fbbb25d30 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -652,7 +652,7 @@ func setStartingNodes( db, path, err := setupDatabase(s) require.Nil(s.t, err) - c, err := setupClient(s, &net.Node{DB: db}) + c, err := setupClient(s, db) require.Nil(s.t, err) s.nodes = append(s.nodes, c) @@ -673,14 +673,14 @@ func restartNodes( for i := len(s.nodes) - 1; i >= 0; i-- { originalPath := databaseDir databaseDir = s.dbPaths[i] - db, _, err := setupDatabase(s) + node, _, err := setupDatabase(s) require.Nil(s.t, err) databaseDir = originalPath if len(s.nodeConfigs) == 0 { // If there are no explicit node configuration actions the node will be // basic (i.e. no P2P stuff) and can be yielded now. - c, err := setupClient(s, &net.Node{DB: db}) + c, err := setupClient(s, node) require.NoError(s.t, err) s.nodes[i] = c continue @@ -696,16 +696,16 @@ func restartNodes( nodeOpts := s.nodeConfigs[i] nodeOpts = append(nodeOpts, net.WithListenAddresses(addresses...)) - var n *net.Node - n, err = net.NewNode(s.ctx, db, nodeOpts...) + p, err := net.NewPeer(s.ctx, node.DB.Root(), node.DB.Blockstore(), node.DB.Events(), nodeOpts...) require.NoError(s.t, err) - if err := n.Start(); err != nil { - n.Close() + if err := p.Start(); err != nil { + p.Close() require.NoError(s.t, err) } + node.Peer = p - c, err := setupClient(s, n) + c, err := setupClient(s, node) require.NoError(s.t, err) s.nodes[i] = c @@ -787,7 +787,7 @@ func configureNode( return } - db, path, err := setupDatabase(s) //disable change dector, or allow it? + node, path, err := setupDatabase(s) //disable change dector, or allow it? require.NoError(s.t, err) privateKey, err := crypto.GenerateEd25519() @@ -796,20 +796,21 @@ func configureNode( nodeOpts := action() nodeOpts = append(nodeOpts, net.WithPrivateKey(privateKey)) - var n *net.Node - n, err = net.NewNode(s.ctx, db, nodeOpts...) + p, err := net.NewPeer(s.ctx, node.DB.Root(), node.DB.Blockstore(), node.DB.Events(), nodeOpts...) require.NoError(s.t, err) - log.InfoContext(s.ctx, "Starting P2P node", corelog.Any("P2P address", n.PeerInfo())) - if err := n.Start(); err != nil { - n.Close() + log.InfoContext(s.ctx, "Starting P2P node", corelog.Any("P2P address", p.PeerInfo())) + if err := p.Start(); err != nil { + p.Close() require.NoError(s.t, err) } - s.nodeAddresses = append(s.nodeAddresses, n.PeerInfo()) + s.nodeAddresses = append(s.nodeAddresses, p.PeerInfo()) s.nodeConfigs = append(s.nodeConfigs, nodeOpts) - c, err := setupClient(s, n) + node.Peer = p + + c, err := setupClient(s, node) require.NoError(s.t, err) s.nodes = append(s.nodes, c) @@ -1163,7 +1164,7 @@ func createDoc( substituteRelations(s, action) } - var mutation func(*state, CreateDoc, client.P2P, []client.Collection) (*client.Document, error) + var mutation func(*state, CreateDoc, client.DB, []client.Collection) (*client.Document, error) switch mutationType { case CollectionSaveMutationType: @@ -1204,7 +1205,7 @@ func createDoc( func createDocViaColSave( s *state, action CreateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1229,7 +1230,7 @@ func createDocViaColSave( func createDocViaColCreate( s *state, action CreateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1254,7 +1255,7 @@ func createDocViaColCreate( func createDocViaGQL( s *state, action CreateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) (*client.Document, error) { collection := collections[action.CollectionID] @@ -1356,7 +1357,7 @@ func updateDoc( s *state, action UpdateDoc, ) { - var mutation func(*state, UpdateDoc, client.P2P, []client.Collection) error + var mutation func(*state, UpdateDoc, client.DB, []client.Collection) error switch mutationType { case CollectionSaveMutationType: @@ -1386,7 +1387,7 @@ func updateDoc( func updateDocViaColSave( s *state, action UpdateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) error { cachedDoc := s.documents[action.CollectionID][action.DocID] @@ -1413,7 +1414,7 @@ func updateDocViaColSave( func updateDocViaColUpdate( s *state, action UpdateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) error { cachedDoc := s.documents[action.CollectionID][action.DocID] @@ -1437,7 +1438,7 @@ func updateDocViaColUpdate( func updateDocViaGQL( s *state, action UpdateDoc, - node client.P2P, + node client.DB, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID]