diff --git a/core/node/events/stream.go b/core/node/events/stream.go index fc8a39151..d96508453 100644 --- a/core/node/events/stream.go +++ b/core/node/events/stream.go @@ -33,8 +33,12 @@ type Stream interface { } type SyncResultReceiver interface { + // OnUpdate is called each time a new cookie is available for a stream OnUpdate(r *StreamAndCookie) + // OnSyncError is called when a sync subscription failed unrecoverable OnSyncError(err error) + // OnStreamSyncDown is called when updates for a stream could not be given. + OnStreamSyncDown(StreamId) } // TODO: refactor interfaces. diff --git a/core/node/events/stream_cache_test.go b/core/node/events/stream_cache_test.go index 711bc3dd8..7595d95fc 100644 --- a/core/node/events/stream_cache_test.go +++ b/core/node/events/stream_cache_test.go @@ -199,6 +199,7 @@ func TestCacheEvictionWithFilledMiniBlockPool(t *testing.T) { type testStreamCacheViewEvictionSub struct { receivedStreamAndCookies []*protocol.StreamAndCookie receivedErrors []error + streamErrors []shared.StreamId } func (sub *testStreamCacheViewEvictionSub) OnUpdate(sac *protocol.StreamAndCookie) { @@ -209,6 +210,10 @@ func (sub *testStreamCacheViewEvictionSub) OnSyncError(err error) { sub.receivedErrors = append(sub.receivedErrors, err) } +func (sub *testStreamCacheViewEvictionSub) OnStreamSyncDown(streamID shared.StreamId) { + sub.streamErrors = append(sub.streamErrors, streamID) +} + func (sub *testStreamCacheViewEvictionSub) eventsReceived() int { count := 0 for _, sac := range sub.receivedStreamAndCookies { diff --git a/core/node/protocol/protocol.pb.go b/core/node/protocol/protocol.pb.go index 6fe7e90d1..d452738ed 100644 --- a/core/node/protocol/protocol.pb.go +++ b/core/node/protocol/protocol.pb.go @@ -30,6 +30,7 @@ const ( SyncOp_SYNC_CLOSE SyncOp = 2 // close the sync SyncOp_SYNC_UPDATE SyncOp = 3 // update from server SyncOp_SYNC_PONG SyncOp = 4 // respond to the ping message from the client. + SyncOp_SYNC_DOWN SyncOp = 5 // indication that stream updates could (temporarily) not be provided ) // Enum value maps for SyncOp. @@ -40,6 +41,7 @@ var ( 2: "SYNC_CLOSE", 3: "SYNC_UPDATE", 4: "SYNC_PONG", + 5: "SYNC_DOWN", } SyncOp_value = map[string]int32{ "SYNC_UNSPECIFIED": 0, @@ -47,6 +49,7 @@ var ( "SYNC_CLOSE": 2, "SYNC_UPDATE": 3, "SYNC_PONG": 4, + "SYNC_DOWN": 5, } ) @@ -3368,11 +3371,13 @@ func (x *AddEventResponse) GetError() *AddEventResponse_Error { return nil } +// SyncStreamsRequest is a request to start a streams sync session. type SyncStreamsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // sync_pos is the list of streams and positions in those streams to receive updates from. SyncPos []*SyncCookie `protobuf:"bytes,1,rep,name=sync_pos,json=syncPos,proto3" json:"sync_pos,omitempty"` } @@ -3415,15 +3420,26 @@ func (x *SyncStreamsRequest) GetSyncPos() []*SyncCookie { return nil } +// SyncStreamsResponse is a stream of updates that the client receives for streams it subscribed to within a streams +// sync session. type SyncStreamsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - SyncOp SyncOp `protobuf:"varint,2,opt,name=sync_op,json=syncOp,proto3,enum=river.SyncOp" json:"sync_op,omitempty"` - Stream *StreamAndCookie `protobuf:"bytes,3,opt,name=stream,proto3" json:"stream,omitempty"` - PongNonce string `protobuf:"bytes,4,opt,name=pong_nonce,json=pongNonce,proto3" json:"pong_nonce,omitempty"` + // sync_id is the id of the sync session. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + // sync_op marks the type of update. + SyncOp SyncOp `protobuf:"varint,2,opt,name=sync_op,json=syncOp,proto3,enum=river.SyncOp" json:"sync_op,omitempty"` + // stream indicates an update of a stream. + // only set when sync_op = SYNC_UPDATE + Stream *StreamAndCookie `protobuf:"bytes,3,opt,name=stream,proto3" json:"stream,omitempty"` + // pong_nonce is returned after a ping request was made to the sync session through PingSync. + // Set with the ping value from the PingSync request when sync_op = SYNC_PONG + PongNonce string `protobuf:"bytes,4,opt,name=pong_nonce,json=pongNonce,proto3" json:"pong_nonce,omitempty"` + // stream_id is set when sync_op = SYNC_DOWN and indicates it will not receive updates anymore for this stream. + // If the client is still is interested in updates for this stream it must re-add the stream to the sync session. + StreamId []byte `protobuf:"bytes,5,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` } func (x *SyncStreamsResponse) Reset() { @@ -3486,12 +3502,22 @@ func (x *SyncStreamsResponse) GetPongNonce() string { return "" } +func (x *SyncStreamsResponse) GetStreamId() []byte { + if x != nil { + return x.StreamId + } + return nil +} + +// AddStreamToSyncRequest is a request to add a stream to an existing streams sync session. type AddStreamToSyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + // sync_id is the id of the sync session. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + // sync_pos identifies the stream and position in the stream to receive updates from. SyncPos *SyncCookie `protobuf:"bytes,2,opt,name=sync_pos,json=syncPos,proto3" json:"sync_pos,omitempty"` } @@ -3579,6 +3605,9 @@ func (*AddStreamToSyncResponse) Descriptor() ([]byte, []int) { return file_protocol_proto_rawDescGZIP(), []int{37} } +// RemoveStreamFromSyncRequest stops the client to receive updates from this stream in the sync session. +// Note that due to buffering in the stream it is possible still receives several updates for this stream after it was +// removed. type RemoveStreamFromSyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3672,11 +3701,13 @@ func (*RemoveStreamFromSyncResponse) Descriptor() ([]byte, []int) { return file_protocol_proto_rawDescGZIP(), []int{39} } +// CancelSyncRequest cancels the sync session. type CancelSyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // sync_id is the unique id of the sync session. SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` } @@ -3757,13 +3788,16 @@ func (*CancelSyncResponse) Descriptor() ([]byte, []int) { return file_protocol_proto_rawDescGZIP(), []int{41} } +// PingSyncRequest is a request to receive a pong in the sync session stream. type PingSyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // sync_id is the unique id of the sync session. SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - Nonce string `protobuf:"bytes,2,opt,name=nonce,proto3" json:"nonce,omitempty"` + // nonce is the pong to return in the sync session stream. + Nonce string `protobuf:"bytes,2,opt,name=nonce,proto3" json:"nonce,omitempty"` } func (x *PingSyncRequest) Reset() { @@ -7370,7 +7404,7 @@ var file_protocol_proto_rawDesc = []byte{ 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x52, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x50, 0x6f, - 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x6f, 0x70, 0x18, 0x02, 0x20, @@ -7380,203 +7414,206 @@ var file_protocol_proto_rawDesc = []byte{ 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x6e, 0x64, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x6f, 0x6e, 0x67, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x5f, 0x0a, 0x16, 0x41, 0x64, 0x64, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x08, - 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6f, 0x6b, 0x69, - 0x65, 0x52, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x50, 0x6f, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x64, - 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x53, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, - 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x11, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x14, 0x0a, 0x12, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, - 0x0a, 0x0f, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x67, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x08, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x52, 0x07, + 0x73, 0x79, 0x6e, 0x63, 0x50, 0x6f, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x64, 0x64, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x53, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, - 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, - 0x22, 0x12, 0x0a, 0x10, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x7f, 0x0a, 0x0c, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x74, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x74, 0x69, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x5c, 0x0a, 0x06, 0x53, 0x79, - 0x6e, 0x63, 0x4f, 0x70, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x59, - 0x4e, 0x43, 0x5f, 0x4e, 0x45, 0x57, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x59, 0x4e, 0x43, - 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, - 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x59, 0x4e, - 0x43, 0x5f, 0x50, 0x4f, 0x4e, 0x47, 0x10, 0x04, 0x2a, 0x4c, 0x0a, 0x0c, 0x4d, 0x65, 0x6d, 0x62, - 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x4f, 0x70, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x4f, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x53, 0x4f, 0x5f, 0x49, 0x4e, 0x56, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, - 0x4f, 0x5f, 0x4a, 0x4f, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x5f, 0x4c, - 0x45, 0x41, 0x56, 0x45, 0x10, 0x03, 0x2a, 0x4f, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x4f, 0x70, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x43, 0x52, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x44, 0x45, - 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x04, 0x2a, 0xdd, 0x0a, 0x0a, 0x03, 0x45, 0x72, 0x72, 0x12, - 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, - 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, - 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, - 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, - 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, - 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, - 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, - 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, - 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, - 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, - 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, - 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, - 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, - 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, - 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x42, 0x55, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x11, 0x0a, - 0x0d, 0x42, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x49, 0x44, 0x10, 0x12, - 0x12, 0x1e, 0x0a, 0x1a, 0x42, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x52, 0x41, 0x4d, 0x53, 0x10, 0x13, - 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x53, 0x57, 0x49, 0x54, 0x43, 0x48, 0x10, 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x42, - 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x49, 0x44, 0x10, 0x15, 0x12, 0x17, 0x0a, - 0x13, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, - 0x54, 0x55, 0x52, 0x45, 0x10, 0x16, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, 0x44, 0x5f, 0x48, 0x41, - 0x53, 0x48, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x10, 0x17, 0x12, 0x1b, 0x0a, 0x17, 0x42, - 0x41, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x56, 0x5f, 0x4d, 0x49, 0x4e, 0x49, 0x42, 0x4c, 0x4f, 0x43, - 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x18, 0x12, 0x16, 0x0a, 0x12, 0x4e, 0x4f, 0x5f, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x19, - 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x1a, 0x12, - 0x12, 0x0a, 0x0e, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x43, 0x41, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, - 0x54, 0x10, 0x1b, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x42, 0x41, - 0x44, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x10, 0x1c, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, - 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x1d, 0x12, 0x14, 0x0a, 0x10, - 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x10, 0x1e, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x41, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, - 0x54, 0x45, 0x5f, 0x53, 0x49, 0x47, 0x10, 0x1f, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x41, 0x44, 0x5f, - 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x20, 0x12, 0x0f, 0x0a, 0x0b, - 0x42, 0x41, 0x44, 0x5f, 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x21, 0x12, 0x12, 0x0a, - 0x0e, 0x42, 0x41, 0x44, 0x5f, 0x48, 0x45, 0x58, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, - 0x22, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x48, - 0x41, 0x53, 0x48, 0x10, 0x23, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, 0x44, 0x5f, 0x53, 0x59, 0x4e, - 0x43, 0x5f, 0x43, 0x4f, 0x4f, 0x4b, 0x49, 0x45, 0x10, 0x24, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x55, - 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x25, 0x12, - 0x0d, 0x0a, 0x09, 0x42, 0x41, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x26, 0x12, 0x1d, - 0x0a, 0x19, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x5f, 0x49, 0x4e, 0x43, 0x45, - 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x27, 0x12, 0x14, 0x0a, - 0x10, 0x42, 0x41, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, - 0x52, 0x10, 0x28, 0x12, 0x15, 0x0a, 0x11, 0x42, 0x41, 0x44, 0x5f, 0x4d, 0x49, 0x4e, 0x49, 0x50, - 0x4f, 0x4f, 0x4c, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x29, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x41, - 0x44, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, - 0x53, 0x10, 0x2a, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x4c, - 0x45, 0x47, 0x41, 0x54, 0x45, 0x10, 0x2b, 0x12, 0x21, 0x0a, 0x1d, 0x42, 0x41, 0x44, 0x5f, 0x4c, - 0x49, 0x4e, 0x4b, 0x5f, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x10, 0x2c, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, - 0x44, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x49, 0x44, 0x10, 0x2d, 0x12, - 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x10, - 0x2e, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x2f, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, - 0x49, 0x4e, 0x49, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, - 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x30, 0x12, 0x0f, 0x0a, 0x0b, 0x42, - 0x41, 0x44, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x31, 0x12, 0x0f, 0x0a, 0x0b, - 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x32, 0x12, 0x0e, 0x0a, - 0x0a, 0x42, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x33, 0x12, 0x10, 0x0a, - 0x0c, 0x42, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x34, 0x12, - 0x12, 0x0a, 0x0e, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, - 0x54, 0x10, 0x35, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x47, 0x45, - 0x54, 0x5f, 0x4c, 0x49, 0x4e, 0x4b, 0x45, 0x44, 0x5f, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x53, - 0x10, 0x36, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x48, 0x45, - 0x43, 0x4b, 0x5f, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x53, 0x10, - 0x37, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x41, 0x4c, 0x4c, - 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x38, 0x12, 0x12, 0x0a, 0x0e, 0x53, - 0x50, 0x41, 0x43, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x39, 0x12, - 0x14, 0x0a, 0x10, 0x43, 0x48, 0x41, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, - 0x4c, 0x45, 0x44, 0x10, 0x3a, 0x12, 0x15, 0x0a, 0x11, 0x57, 0x52, 0x4f, 0x4e, 0x47, 0x5f, 0x53, - 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x3b, 0x12, 0x1b, 0x0a, 0x17, - 0x4d, 0x49, 0x4e, 0x49, 0x50, 0x4f, 0x4f, 0x4c, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x10, 0x3c, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x54, 0x52, - 0x45, 0x41, 0x4d, 0x5f, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4d, - 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3d, 0x12, 0x1c, 0x0a, 0x18, 0x44, 0x4f, 0x57, - 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x3e, 0x32, 0xf6, 0x06, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1a, 0x2e, 0x72, 0x69, 0x76, 0x65, - 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, - 0x17, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, - 0x78, 0x12, 0x19, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x45, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, - 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x4a, 0x0a, 0x0d, 0x47, 0x65, - 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1b, 0x2e, 0x72, 0x69, - 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, - 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, - 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x11, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x14, 0x0a, 0x12, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, + 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x0f, 0x50, + 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, + 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x12, 0x0a, + 0x10, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x7f, 0x0a, 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, + 0x74, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, + 0x74, 0x69, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x6b, 0x0a, 0x06, 0x53, 0x79, 0x6e, 0x63, 0x4f, + 0x70, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x59, 0x4e, 0x43, 0x5f, + 0x4e, 0x45, 0x57, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, + 0x4f, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x50, + 0x4f, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x44, 0x4f, + 0x57, 0x4e, 0x10, 0x05, 0x2a, 0x4c, 0x0a, 0x0c, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, + 0x69, 0x70, 0x4f, 0x70, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x4f, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x4f, 0x5f, 0x49, + 0x4e, 0x56, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x4f, 0x5f, 0x4a, 0x4f, + 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x5f, 0x4c, 0x45, 0x41, 0x56, 0x45, + 0x10, 0x03, 0x2a, 0x4f, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x12, + 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x04, 0x2a, 0xdd, 0x0a, 0x0a, 0x03, 0x45, 0x72, 0x72, 0x12, 0x13, 0x0a, 0x0f, 0x45, + 0x52, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, 0x4e, 0x54, 0x10, + 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x5f, 0x45, 0x58, + 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, + 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x4c, 0x52, 0x45, 0x41, + 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x50, + 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, + 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, + 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, + 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, + 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, + 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, + 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, + 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, + 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x42, 0x55, + 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x42, 0x41, 0x44, + 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x49, 0x44, 0x10, 0x12, 0x12, 0x1e, 0x0a, 0x1a, + 0x42, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x52, 0x41, 0x4d, 0x53, 0x10, 0x13, 0x12, 0x19, 0x0a, 0x15, + 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, + 0x57, 0x49, 0x54, 0x43, 0x48, 0x10, 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x42, 0x41, 0x44, 0x5f, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x49, 0x44, 0x10, 0x15, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x41, 0x44, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, + 0x10, 0x16, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, 0x44, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x46, + 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x10, 0x17, 0x12, 0x1b, 0x0a, 0x17, 0x42, 0x41, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x56, 0x5f, 0x4d, 0x49, 0x4e, 0x49, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, + 0x53, 0x48, 0x10, 0x18, 0x12, 0x16, 0x0a, 0x12, 0x4e, 0x4f, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x19, 0x12, 0x0d, 0x0a, 0x09, + 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x1a, 0x12, 0x12, 0x0a, 0x0e, 0x55, + 0x53, 0x45, 0x52, 0x5f, 0x43, 0x41, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x1b, 0x12, + 0x15, 0x0a, 0x11, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x48, 0x41, + 0x53, 0x48, 0x45, 0x53, 0x10, 0x1c, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, + 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x1d, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x45, + 0x41, 0x4d, 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x1e, 0x12, 0x14, + 0x0a, 0x10, 0x42, 0x41, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, 0x54, 0x45, 0x5f, 0x53, + 0x49, 0x47, 0x10, 0x1f, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x41, 0x44, 0x5f, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x20, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x41, 0x44, 0x5f, + 0x50, 0x41, 0x59, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x21, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x41, 0x44, + 0x5f, 0x48, 0x45, 0x58, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x22, 0x12, 0x12, 0x0a, + 0x0e, 0x42, 0x41, 0x44, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, + 0x23, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, 0x44, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4f, + 0x4f, 0x4b, 0x49, 0x45, 0x10, 0x24, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x55, 0x50, 0x4c, 0x49, 0x43, + 0x41, 0x54, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x25, 0x12, 0x0d, 0x0a, 0x09, 0x42, + 0x41, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, + 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x5f, 0x49, 0x4e, 0x43, 0x45, 0x50, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x27, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x41, 0x44, + 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x28, 0x12, + 0x15, 0x0a, 0x11, 0x42, 0x41, 0x44, 0x5f, 0x4d, 0x49, 0x4e, 0x49, 0x50, 0x4f, 0x4f, 0x4c, 0x5f, + 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x29, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x41, 0x44, 0x5f, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x2a, 0x12, + 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, 0x54, + 0x45, 0x10, 0x2b, 0x12, 0x21, 0x0a, 0x1d, 0x42, 0x41, 0x44, 0x5f, 0x4c, 0x49, 0x4e, 0x4b, 0x5f, + 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, + 0x54, 0x55, 0x52, 0x45, 0x10, 0x2c, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x41, 0x44, 0x5f, 0x52, 0x4f, + 0x4f, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x49, 0x44, 0x10, 0x2d, 0x12, 0x10, 0x0a, 0x0c, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x2e, 0x12, 0x18, 0x0a, + 0x14, 0x44, 0x42, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x2f, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x49, 0x4e, 0x49, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x30, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x41, 0x44, 0x5f, 0x41, + 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x31, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x55, 0x46, 0x46, + 0x45, 0x52, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x32, 0x12, 0x0e, 0x0a, 0x0a, 0x42, 0x41, 0x44, + 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x33, 0x12, 0x10, 0x0a, 0x0c, 0x42, 0x41, 0x44, + 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x34, 0x12, 0x12, 0x0a, 0x0e, 0x43, + 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x35, 0x12, + 0x1d, 0x0a, 0x19, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x4c, 0x49, + 0x4e, 0x4b, 0x45, 0x44, 0x5f, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x53, 0x10, 0x36, 0x12, 0x1d, + 0x0a, 0x19, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x5f, 0x45, + 0x4e, 0x54, 0x49, 0x54, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x53, 0x10, 0x37, 0x12, 0x18, 0x0a, + 0x14, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x38, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x50, 0x41, 0x43, 0x45, + 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x39, 0x12, 0x14, 0x0a, 0x10, 0x43, + 0x48, 0x41, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, + 0x3a, 0x12, 0x15, 0x0a, 0x11, 0x57, 0x52, 0x4f, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, + 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x3b, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x49, 0x4e, 0x49, + 0x50, 0x4f, 0x4f, 0x4c, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x53, 0x10, 0x3c, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, + 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, + 0x54, 0x43, 0x48, 0x10, 0x3d, 0x12, 0x1c, 0x0a, 0x18, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, + 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x3e, 0x32, 0xf6, 0x06, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1a, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x17, 0x2e, 0x72, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x12, 0x19, 0x2e, + 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x4a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, + 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1b, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, + 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, + 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x2e, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x69, - 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, - 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x69, - 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x12, 0x19, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, - 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x50, 0x0a, 0x0f, - 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x12, - 0x1d, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, - 0x0a, 0x0a, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x18, 0x2e, 0x72, - 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x5f, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x22, 0x2e, 0x72, 0x69, 0x76, 0x65, - 0x72, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, - 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, - 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x72, 0x69, 0x76, - 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, - 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x12, - 0x16, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, + 0x6e, 0x69, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x16, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, - 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, - 0x69, 0x76, 0x65, 0x72, 0x2d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x72, 0x69, 0x76, 0x65, 0x72, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x41, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x46, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, + 0x19, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x50, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1d, 0x2e, 0x72, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, + 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6f, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x18, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, + 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, 0x6f, + 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x22, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x72, 0x69, 0x76, 0x65, + 0x72, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x72, + 0x6f, 0x6d, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3b, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x16, 0x2e, 0x72, 0x69, + 0x76, 0x65, 0x72, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x31, 0x5a, 0x2f, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x2d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x72, 0x69, 0x76, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/core/node/rpc/forwarder.go b/core/node/rpc/forwarder.go index 1ab3851b9..aaa2ea45b 100644 --- a/core/node/rpc/forwarder.go +++ b/core/node/rpc/forwarder.go @@ -2,6 +2,7 @@ package rpc import ( "context" + "github.com/river-build/river/core/node/utils" "connectrpc.com/connect" @@ -141,7 +142,7 @@ func (s *Service) CreateStream( ctx context.Context, req *connect.Request[CreateStreamRequest], ) (*connect.Response[CreateStreamResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("CreateStream REQUEST", "streamId", req.Msg.StreamId) r, e := s.createStreamImpl(ctx, req) if e != nil { @@ -174,7 +175,7 @@ func (s *Service) GetStream( ctx context.Context, req *connect.Request[GetStreamRequest], ) (*connect.Response[GetStreamResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("GetStream ENTER") r, e := s.getStreamImpl(ctx, req) if e != nil { @@ -194,7 +195,7 @@ func (s *Service) GetStreamEx( req *connect.Request[GetStreamExRequest], resp *connect.ServerStream[GetStreamExResponse], ) error { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("GetStreamEx ENTER") e := s.getStreamExImpl(ctx, req, resp) if e != nil { @@ -316,7 +317,7 @@ func (s *Service) GetMiniblocks( ctx context.Context, req *connect.Request[GetMiniblocksRequest], ) (*connect.Response[GetMiniblocksResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("GetMiniblocks ENTER", "req", req.Msg) r, e := s.getMiniblocksImpl(ctx, req) if e != nil { @@ -368,7 +369,7 @@ func (s *Service) GetLastMiniblockHash( ctx context.Context, req *connect.Request[GetLastMiniblockHashRequest], ) (*connect.Response[GetLastMiniblockHashResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("GetLastMiniblockHash ENTER", "req", req.Msg) r, e := s.getLastMiniblockHashImpl(ctx, req) if e != nil { @@ -420,7 +421,7 @@ func (s *Service) AddEvent( ctx context.Context, req *connect.Request[AddEventRequest], ) (*connect.Response[AddEventResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("AddEvent ENTER", "req", req.Msg) r, e := s.addEventImpl(ctx, req) if e != nil { diff --git a/core/node/rpc/info.go b/core/node/rpc/info.go index a822cee58..5dd01cb1e 100644 --- a/core/node/rpc/info.go +++ b/core/node/rpc/info.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "github.com/river-build/river/core/node/rpc/sync" + "github.com/river-build/river/core/node/utils" "log/slog" "strconv" @@ -24,7 +26,7 @@ func (s *Service) Info( ctx context.Context, req *connect.Request[InfoRequest], ) (*connect.Response[InfoResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("Info ENTER", "request", req.Msg) @@ -45,6 +47,7 @@ func (s *Service) info( ) (*connect.Response[InfoResponse], error) { if len(request.Msg.Debug) > 0 { debug := request.Msg.Debug[0] + if debug == "error" { return nil, RiverError(Err_DEBUG_ERROR, "Error requested through Info request") } else if debug == "network_error" { @@ -54,6 +57,8 @@ func (s *Service) info( return nil, errors.New("error requested through Info request") } else if debug == "make_miniblock" { return s.debugInfoMakeMiniblock(ctx, request) + } else if debug == "drop_stream" { + return s.debugDropStream(ctx, request) } if s.config.EnableTestAPIs { @@ -83,6 +88,32 @@ func (s *Service) info( }), nil } +func (s *Service) debugDropStream( + ctx context.Context, + request *connect.Request[InfoRequest], +) (*connect.Response[InfoResponse], error) { + if len(request.Msg.GetDebug()) < 3 { + return nil, RiverError(Err_DEBUG_ERROR, "drop_stream requires a sync id and stream id") + } + + syncID := request.Msg.Debug[1] + streamID, err := shared.StreamIdFromString(request.Msg.Debug[2]) + if err != nil { + return nil, err + } + + dbgHandler, ok := s.syncHandler.(sync.DebugHandler) + if !ok { + return nil, RiverError(Err_UNAVAILABLE, "Drop stream not supported") + } + + if err = dbgHandler.DebugDropStream(ctx, syncID, streamID); err != nil { + return nil, err + } + + return connect.NewResponse(&InfoResponse{}), nil +} + func (s *Service) debugInfoMakeMiniblock( ctx context.Context, request *connect.Request[InfoRequest], diff --git a/core/node/rpc/metrics_interceptor.go b/core/node/rpc/metrics_interceptor.go index bcfd08e33..b8cf1194f 100644 --- a/core/node/rpc/metrics_interceptor.go +++ b/core/node/rpc/metrics_interceptor.go @@ -5,10 +5,9 @@ import ( "connectrpc.com/connect" "github.com/prometheus/client_golang/prometheus" + "github.com/river-build/river/core/node/shared" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - - "github.com/river-build/river/core/node/shared" ) type streamIdProvider interface { @@ -46,7 +45,7 @@ func (i *metricsInterceptor) WrapUnary(next connect.UnaryFunc) connect.UnaryFunc m.Dec() prometheus.NewTimer(i.rpcDuration.WithLabelValues(proc)).ObserveDuration() }() - + // add streamId to tracing span r, ok := req.Any().(streamIdProvider) if ok { diff --git a/core/node/rpc/node2node.go b/core/node/rpc/node2node.go index 4b0090e07..8b4e1f268 100644 --- a/core/node/rpc/node2node.go +++ b/core/node/rpc/node2node.go @@ -2,6 +2,7 @@ package rpc import ( "context" + "github.com/river-build/river/core/node/utils" "connectrpc.com/connect" @@ -15,7 +16,7 @@ func (s *Service) AllocateStream( ctx context.Context, req *connect.Request[AllocateStreamRequest], ) (*connect.Response[AllocateStreamResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("AllocateStream ENTER") r, e := s.allocateStream(ctx, req.Msg) if e != nil { @@ -51,7 +52,7 @@ func (s *Service) NewEventReceived( ctx context.Context, req *connect.Request[NewEventReceivedRequest], ) (*connect.Response[NewEventReceivedResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("NewEventReceived ENTER") r, e := s.newEventReceived(ctx, req.Msg) if e != nil { @@ -105,7 +106,7 @@ func (s *Service) ProposeMiniblock( ctx context.Context, req *connect.Request[ProposeMiniblockRequest], ) (*connect.Response[ProposeMiniblockResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("ProposeMiniblock ENTER") r, e := s.proposeMiniblock(ctx, req.Msg) if e != nil { @@ -148,7 +149,7 @@ func (s *Service) SaveMiniblockCandidate( ctx context.Context, req *connect.Request[SaveMiniblockCandidateRequest], ) (*connect.Response[SaveMiniblockCandidateResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) + ctx, log := utils.CtxAndLogForRequest(ctx, req) log.Debug("SaveMiniblockCandidate ENTER") r, e := s.saveMiniblockCandidate(ctx, req.Msg) if e != nil { diff --git a/core/node/rpc/server.go b/core/node/rpc/server.go index e1415610c..4b8865233 100644 --- a/core/node/rpc/server.go +++ b/core/node/rpc/server.go @@ -16,10 +16,6 @@ import ( "connectrpc.com/connect" "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" - "github.com/rs/cors" - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" - "github.com/river-build/river/core/config" "github.com/river-build/river/core/node/auth" . "github.com/river-build/river/core/node/base" @@ -31,8 +27,12 @@ import ( . "github.com/river-build/river/core/node/protocol" "github.com/river-build/river/core/node/protocol/protocolconnect" "github.com/river-build/river/core/node/registries" + "github.com/river-build/river/core/node/rpc/sync" "github.com/river-build/river/core/node/storage" "github.com/river-build/river/core/xchain/entitlement" + "github.com/rs/cors" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" ) const ( @@ -563,11 +563,10 @@ func (s *Service) initCacheAndSync() error { s.mbProducer = events.NewMiniblockProducer(s.serverCtx, s.cache, nil) - s.syncHandler = NewSyncHandler( - s.wallet, + s.syncHandler = sync.NewHandler( + s.wallet.Address, s.cache, s.nodeRegistry, - s.streamRegistry, ) return nil diff --git a/core/node/rpc/service.go b/core/node/rpc/service.go index ee42f58ed..003ba1e0f 100644 --- a/core/node/rpc/service.go +++ b/core/node/rpc/service.go @@ -2,6 +2,7 @@ package rpc import ( "context" + river_sync "github.com/river-build/river/core/node/rpc/sync" "log/slog" "net" "net/http" @@ -46,7 +47,7 @@ type Service struct { // Streams cache events.StreamCache mbProducer events.MiniblockProducer - syncHandler SyncHandler + syncHandler river_sync.Handler // River chain riverChain *crypto.Blockchain diff --git a/core/node/rpc/service_sync_streams.go b/core/node/rpc/service_sync_streams.go new file mode 100644 index 000000000..dc4e03981 --- /dev/null +++ b/core/node/rpc/service_sync_streams.go @@ -0,0 +1,55 @@ +package rpc + +import ( + "connectrpc.com/connect" + "context" + . "github.com/river-build/river/core/node/protocol" +) + +// TODO: wire metrics. +// var ( +// syncStreamsRequests = infra.NewSuccessMetrics("sync_streams_requests", serviceRequests) +// syncStreamsResultSize = infra.NewCounter("sync_streams_result_size", "The total number of events returned by sync streams") +// ) + +// func addUpdatesToCounter(updates []*StreamAndCookie) { +// for _, stream := range updates { +// syncStreamsResultSize.Add(float64(len(stream.Events))) +// } +// } + +func (s *Service) SyncStreams( + ctx context.Context, + req *connect.Request[SyncStreamsRequest], + res *connect.ServerStream[SyncStreamsResponse], +) error { + return s.syncHandler.SyncStreams(ctx, req, res) +} + +func (s *Service) AddStreamToSync( + ctx context.Context, + req *connect.Request[AddStreamToSyncRequest], +) (*connect.Response[AddStreamToSyncResponse], error) { + return s.syncHandler.AddStreamToSync(ctx, req) +} + +func (s *Service) RemoveStreamFromSync( + ctx context.Context, + req *connect.Request[RemoveStreamFromSyncRequest], +) (*connect.Response[RemoveStreamFromSyncResponse], error) { + return s.syncHandler.RemoveStreamFromSync(ctx, req) +} + +func (s *Service) CancelSync( + ctx context.Context, + req *connect.Request[CancelSyncRequest], +) (*connect.Response[CancelSyncResponse], error) { + return s.syncHandler.CancelSync(ctx, req) +} + +func (s *Service) PingSync( + ctx context.Context, + req *connect.Request[PingSyncRequest], +) (*connect.Response[PingSyncResponse], error) { + return s.syncHandler.PingSync(ctx, req) +} diff --git a/core/node/rpc/service_test.go b/core/node/rpc/service_test.go index aeaedc46b..2f81122ce 100644 --- a/core/node/rpc/service_test.go +++ b/core/node/rpc/service_test.go @@ -1,17 +1,24 @@ package rpc import ( + "bytes" "context" "crypto/tls" "fmt" + "math/rand" "net" "net/http" "os" + "slices" "strconv" + "sync" "testing" + "time" - "golang.org/x/net/http2" - + "connectrpc.com/connect" + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + eth_crypto "github.com/ethereum/go-ethereum/crypto" "github.com/river-build/river/core/node/crypto" "github.com/river-build/river/core/node/dlog" "github.com/river-build/river/core/node/events" @@ -20,12 +27,8 @@ import ( "github.com/river-build/river/core/node/protocol/protocolconnect" . "github.com/river-build/river/core/node/shared" "github.com/river-build/river/core/node/testutils" - - "connectrpc.com/connect" - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" - eth_crypto "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" + "golang.org/x/net/http2" "google.golang.org/protobuf/proto" ) @@ -749,7 +752,7 @@ func testAddStreamsToSync(tester *serviceTester) { }, ), ) - require.Nilf(err, "error calling AddStreamsToSync: %v", err) + require.NoError(err, "error calling AddStreamsToSync") // wait for the sync syncRes.Receive() msg := syncRes.Msg() @@ -761,6 +764,7 @@ func testAddStreamsToSync(tester *serviceTester) { */ require.NotEmpty(syncId, "expected non-empty sync id") require.NotNil(msg.Stream, "expected 1 stream") + require.Equal(len(msg.Stream.Events), 1, "expected 1 event") require.Equal(syncId, msg.SyncId, "expected sync id to match") } @@ -839,7 +843,7 @@ func testRemoveStreamsFromSync(tester *serviceTester) { }, ), ) - require.Nilf(err, "error calling AddStreamsToSync: %v", err) + require.NoError(err, "AddStreamsToSync") log.Info("AddStreamToSync", "resp", resp) // When AddEvent is called, node calls streamImpl.notifyToSubscribers() twice // for different events. See hnt-3683 for explanation. First event is for @@ -898,13 +902,20 @@ OuterLoop: ) require.Nilf(err, "error calling AddEvent: %v", err) - /** - For debugging only. Uncomment to see syncRes.Receive() block. - bobClient's syncRes no longer receives the latest events from alice. + gotUnexpectedMsg := make(chan *protocol.SyncStreamsResponse) + go func() { + if syncRes.Receive() { + gotUnexpectedMsg <- syncRes.Msg() + } + }() + + select { + case <-time.After(3 * time.Second): + break + case <-gotUnexpectedMsg: + require.Fail("received message after stream was removed from sync") + } - // wait to see if we got a message. We shouldn't. - // uncomment: syncRes.Receive() - */ syncCancel() /** @@ -1042,3 +1053,513 @@ func TestForwardingWithRetries(t *testing.T) { }) } } + +// TestUnstableStreams ensures that when a stream becomes unavailable a SyncOp_Down message is received and when +// available again allows the client to resubscribe. +func TestUnstableStreams(t *testing.T) { + var ( + req = require.New(t) + services = newServiceTester(t, serviceTesterOpts{numNodes: 5, start: true}) + client0 = services.testClient(0) + client1 = services.testClient(1) + ctx = services.ctx + wallets []*crypto.Wallet + users []*protocol.SyncCookie + channels []*protocol.SyncCookie + ) + + // create users that will join and add messages to channels. + for range 10 { + // Create user streams + wallet, err := crypto.NewWallet(ctx) + req.NoError(err, "new wallet") + syncCookie, _, err := createUser(ctx, wallet, client0, nil) + req.NoError(err, "create user") + + _, _, err = createUserDeviceKeyStream(ctx, wallet, client0, nil) + req.NoError(err) + + wallets = append(wallets, wallet) + users = append(users, syncCookie) + } + + // create a space and several channels in it + spaceID := testutils.FakeStreamId(STREAM_SPACE_BIN) + resspace, _, err := createSpace(ctx, wallets[0], client0, spaceID, nil) + req.NoError(err) + req.NotNil(resspace, "create space sync cookie") + + // create enough channels that they will be distributed among local and remote nodes + for range TestStreams { + channelId := testutils.FakeStreamId(STREAM_CHANNEL_BIN) + channel, _, err := createChannel(ctx, wallets[0], client0, spaceID, channelId, nil) + req.NoError(err) + req.NotNil(channel, "nil create channel sync cookie") + channels = append(channels, channel) + } + + // subscribe to channel updates + syncPos := append(users, channels...) + syncRes, err := client1.SyncStreams(ctx, connect.NewRequest(&protocol.SyncStreamsRequest{SyncPos: syncPos})) + req.NoError(err, "sync streams") + + syncRes.Receive() + syncID := syncRes.Msg().SyncId + t.Logf("subscription %s created on node: %s", syncID, services.nodes[1].address) + + // collect sync cookie updates for channels + var ( + messages = make(chan string, 512) + mu sync.Mutex + streamDownMessages = make(map[StreamId]struct{}) + syncCookies = make(map[StreamId][]*protocol.StreamAndCookie) + ) + + go func() { + for syncRes.Receive() { + msg := syncRes.Msg() + + switch msg.GetSyncOp() { + case protocol.SyncOp_SYNC_NEW: + syncID := msg.GetSyncId() + t.Logf("start stream sync %s ", syncID) + case protocol.SyncOp_SYNC_UPDATE: + req.Equal(syncID, msg.GetSyncId(), "sync id") + req.NotNil(msg.GetStream(), "stream") + req.NotNil(msg.GetStream().GetNextSyncCookie(), "next sync cookie") + cookie := msg.GetStream().GetNextSyncCookie() + streamID, err := StreamIdFromBytes(cookie.GetStreamId()) + if err != nil { + req.NoError(err, "invalid stream id in sync op update") + } + + mu.Lock() + syncCookies[streamID] = append(syncCookies[streamID], msg.GetStream()) + delete(streamDownMessages, streamID) + mu.Unlock() + + for _, e := range msg.GetStream().GetEvents() { + var payload protocol.StreamEvent + err = proto.Unmarshal(e.Event, &payload) + req.NoError(err) + switch p := payload.Payload.(type) { + case *protocol.StreamEvent_ChannelPayload: + switch p.ChannelPayload.Content.(type) { + case *protocol.ChannelPayload_Message: + messages <- p.ChannelPayload.GetMessage().GetCiphertext() + } + } + } + + case protocol.SyncOp_SYNC_DOWN: + req.Equal(syncID, msg.GetSyncId(), "sync id") + streamID, err := StreamIdFromBytes(msg.GetStreamId()) + req.NoError(err, "stream id") + + mu.Lock() + if _, found := streamDownMessages[streamID]; found { + t.Error("received a second down message in a row for a stream") + return + } + streamDownMessages[streamID] = struct{}{} + mu.Unlock() + + case protocol.SyncOp_SYNC_CLOSE: + req.Equal(syncID, msg.GetSyncId(), "invalid sync id in sync close message") + close(messages) + + case protocol.SyncOp_SYNC_UNSPECIFIED, protocol.SyncOp_SYNC_PONG: + continue + + default: + t.Errorf("unexpected sync operation %s", msg.GetSyncOp()) + return + } + } + }() + + // users join channels + channelsCount := len(channels) + for i, wallet := range wallets[1:] { + for c := range channelsCount { + channel := channels[c] + + miniBlockHashResp, err := client1.GetLastMiniblockHash( + ctx, + connect.NewRequest(&protocol.GetLastMiniblockHashRequest{StreamId: users[i+1].StreamId})) + + req.NoError(err, "get last miniblock hash") + + channelId, _ := StreamIdFromBytes(channel.GetStreamId()) + userJoin, err := events.MakeEnvelopeWithPayload( + wallet, + events.Make_UserPayload_Membership(protocol.MembershipOp_SO_JOIN, channelId, nil, spaceID[:]), + miniBlockHashResp.Msg.GetHash(), + ) + req.NoError(err) + + resp, err := client1.AddEvent( + ctx, + connect.NewRequest( + &protocol.AddEventRequest{ + StreamId: users[i+1].StreamId, + Event: userJoin, + }, + ), + ) + + req.NoError(err) + req.Nil(resp.Msg.GetError()) + } + } + + // send a bunch of messages and ensure that all are received + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(StreamId) bool { return false }) + + t.Logf("first messages batch received") + + // bring ~25% of the streams down + streamsDownCounter := 0 + rand.Shuffle(len(channels), func(i, j int) { channels[i], channels[j] = channels[j], channels[i] }) + + for i, syncCookie := range channels { + streamID, _ := StreamIdFromBytes(syncCookie.GetStreamId()) + if _, err = client1.Info(ctx, connect.NewRequest(&protocol.InfoRequest{Debug: []string{ + "drop_stream", + syncID, + streamID.String(), + }})); err != nil { + req.NoError(err, "unable to bring stream down") + } + + streamsDownCounter++ + + t.Logf("bring stream %s down", streamID) + + if i > TestStreams/4 { + break + } + } + + // make sure that for all streams that are down a SyncOp_Down msg is received + req.Eventuallyf(func() bool { + mu.Lock() + count := len(streamDownMessages) + mu.Unlock() + + return count == streamsDownCounter + }, 20*time.Second, 100*time.Millisecond, "didn't receive for all streams a down message") + + t.Logf("received SyncOp_Down message for all expected streams") + + // make sure that no more stream down messages are received + req.Never(func() bool { + mu.Lock() + count := len(streamDownMessages) + mu.Unlock() + return count > streamsDownCounter + }, 5*time.Second, 100*time.Millisecond, "received unexpected stream down message") + + // send a bunch of messages to streams and ensure that we messages are received streams that are up + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + mu.Lock() + defer mu.Unlock() + + _, found := streamDownMessages[streamID] + return found + }) + + t.Logf("second messages batch received") + + // resubscribe to the head on down streams and ensure that messages are received for all streams again + mu.Lock() + for streamID := range streamDownMessages { + getStreamResp, err := client1.GetStream(ctx, connect.NewRequest(&protocol.GetStreamRequest{ + StreamId: streamID[:], + Optional: false, + })) + req.NoError(err, "GetStream") + + _, err = client1.AddStreamToSync(ctx, connect.NewRequest(&protocol.AddStreamToSyncRequest{ + SyncId: syncID, + SyncPos: getStreamResp.Msg.GetStream().GetNextSyncCookie(), + })) + req.NoError(err, "AddStreamToSync") + } + mu.Unlock() + + t.Logf("resubscribed to streams that where brought down") + + // ensure that messages for all streams are received again + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(StreamId) bool { return false }) + + t.Logf("third messages batch received") + + // unsub from ~25% streams and ensure that no updates are received again + unsubbedStreams := make(map[StreamId]struct{}) + rand.Shuffle(len(channels), func(i, j int) { channels[i], channels[j] = channels[j], channels[i] }) + for i, syncCookie := range channels { + streamID, _ := StreamIdFromBytes(syncCookie.GetStreamId()) + _, err = client1.RemoveStreamFromSync(ctx, connect.NewRequest(&protocol.RemoveStreamFromSyncRequest{ + SyncId: syncID, + StreamId: streamID[:], + })) + req.NoError(err, "RemoveStreamFromSync") + + unsubbedStreams[streamID] = struct{}{} + + t.Logf("unsubbed from stream %s", streamID) + + if i > TestStreams/4 { + break + } + } + + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + _, found := unsubbedStreams[streamID] + return found + }) + + t.Logf("fourth messages batch received") + + // resubscribe to the head on down streams and ensure that messages are received for all streams again + mu.Lock() + for streamID := range unsubbedStreams { + getStreamResp, err := client1.GetStream(ctx, connect.NewRequest(&protocol.GetStreamRequest{ + StreamId: streamID[:], + Optional: false, + })) + req.NoError(err, "GetStream") + + _, err = client1.AddStreamToSync(ctx, connect.NewRequest(&protocol.AddStreamToSyncRequest{ + SyncId: syncID, + SyncPos: getStreamResp.Msg.GetStream().GetNextSyncCookie(), + })) + req.NoError(err, "AddStreamToSync") + } + mu.Unlock() + + t.Logf("resubscribed to streams that where brought down") + + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + return false + }) + + t.Logf("fifth messages batch received") + + // drop all streams from a node + var ( + targetNodeAddr = services.nodes[4].address + targetStreams []StreamId + ) + + mu.Lock() + streamDownMessages = map[StreamId]struct{}{} + mu.Unlock() + + for _, pos := range syncPos { + if bytes.Equal(pos.GetNodeAddress(), targetNodeAddr.Bytes()) { + streamID, _ := StreamIdFromBytes(pos.GetStreamId()) + targetStreams = append(targetStreams, streamID) + } + } + + for _, targetStream := range targetStreams { + _, err = client1.Info(ctx, connect.NewRequest(&protocol.InfoRequest{Debug: []string{ + "drop_stream", + syncID, + targetStream.String(), + }})) + req.NoError(err, "drop stream") + } + + // make sure that for all streams that are down a SyncOp_Down msg is received + req.Eventuallyf(func() bool { + mu.Lock() + count := len(streamDownMessages) + mu.Unlock() + + return count == len(targetStreams) + }, 20*time.Second, 100*time.Millisecond, "didn't receive for all streams a down message") + + t.Logf("received SyncOp_Down message for all expected streams") + + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + mu.Lock() + _, found := streamDownMessages[streamID] + mu.Unlock() + return found + }) + + t.Logf("sixt messages batch received") + + // make sure we can resubscribe to these streams + for _, streamID := range targetStreams { + getStreamResp, err := client1.GetStream(ctx, connect.NewRequest(&protocol.GetStreamRequest{ + StreamId: streamID[:], + Optional: false, + })) + req.NoError(err, "GetStream") + + _, err = client1.AddStreamToSync(ctx, connect.NewRequest(&protocol.AddStreamToSyncRequest{ + SyncId: syncID, + SyncPos: getStreamResp.Msg.GetStream().GetNextSyncCookie(), + })) + req.NoError(err, "AddStreamToSync") + } + + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + return false + }) + + t.Logf("seventh messages batch received") + + _, err = client1.CancelSync(ctx, connect.NewRequest(&protocol.CancelSyncRequest{SyncId: syncID})) + req.NoError(err, "cancel sync") + + t.Logf("Streams subscription cancelled") + + sendMessagesAndReceive(100, wallets, channels, req, client0, ctx, messages, func(streamID StreamId) bool { + return true + }) + + t.Logf("eight messages batch received") + + // make sure that SyncOp_Close msg is received (messages is closed) + req.Eventuallyf(func() bool { + select { + case _, gotMsg := <-messages: + return !gotMsg + default: + return false + } + }, 20*time.Second, 100*time.Millisecond, "no SyncOp_Close message received") +} + +func sendMessagesAndReceive( + N int, + wallets []*crypto.Wallet, + channels []*protocol.SyncCookie, + require *require.Assertions, + client protocolconnect.StreamServiceClient, + ctx context.Context, + messages chan string, + expectNoReceive func(streamID StreamId) bool, +) { + var ( + prefix = fmt.Sprintf("%d", time.Now().UnixMilli()%100000) + sendMsgCount = 0 + expMsgToReceive = make(map[string]struct{}) + ) + + // send a bunch of messages to random channels + for range N { + wallet := wallets[rand.Int()%len(wallets)] + channel := channels[rand.Int()%len(channels)] + streamID, _ := StreamIdFromBytes(channel.GetStreamId()) + expNoRecv := expectNoReceive(streamID) + msgContents := fmt.Sprintf("%s: msg #%d", prefix, sendMsgCount) + + getStreamResp, err := client.GetStream(ctx, connect.NewRequest(&protocol.GetStreamRequest{ + StreamId: channel.GetStreamId(), + Optional: false, + })) + require.NoError(err) + + message, err := events.MakeEnvelopeWithPayload( + wallet, + events.Make_ChannelPayload_Message(msgContents), + getStreamResp.Msg.GetStream().GetNextSyncCookie().GetPrevMiniblockHash(), + ) + require.NoError(err) + + _, err = client.AddEvent( + ctx, + connect.NewRequest( + &protocol.AddEventRequest{ + StreamId: channel.GetStreamId(), + Event: message, + }, + ), + ) + + require.NoError(err) + + if !expNoRecv { + expMsgToReceive[msgContents] = struct{}{} + sendMsgCount++ + } + } + + // make sure all expected messages are received + require.Eventuallyf(func() bool { + for { + select { + case msg, ok := <-messages: + if !ok { + return len(expMsgToReceive) == 0 + } + + delete(expMsgToReceive, msg) + continue + default: + return len(expMsgToReceive) == 0 + } + } + }, 20*time.Second, 100*time.Millisecond, "didn't receive messages in reasonable time") +} + +// TestStreamSyncPingPong test stream sync subscription ping/pong +func TestStreamSyncPingPong(t *testing.T) { + var ( + req = require.New(t) + services = newServiceTester(t, serviceTesterOpts{numNodes: 2, start: true}) + client = services.testClient(0) + ctx = services.ctx + mu sync.Mutex + pongs []string + syncID string + ) + + // create stream sub + syncRes, err := client.SyncStreams(ctx, connect.NewRequest(&protocol.SyncStreamsRequest{SyncPos: nil})) + req.NoError(err, "sync streams") + + pings := []string{"ping1", "ping2", "ping3", "ping4", "ping5"} + sendPings := func() { + for _, ping := range pings { + _, err := client.PingSync(ctx, connect.NewRequest(&protocol.PingSyncRequest{SyncId: syncID, Nonce: ping})) + req.NoError(err, "ping sync") + } + } + + go func() { + for syncRes.Receive() { + msg := syncRes.Msg() + switch msg.GetSyncOp() { + case protocol.SyncOp_SYNC_NEW: + syncID = msg.GetSyncId() + // send some pings and ensure all pongs are received + sendPings() + case protocol.SyncOp_SYNC_PONG: + req.NotEmpty(syncID, "expected non-empty sync id") + req.Equal(syncID, msg.GetSyncId(), "sync id") + mu.Lock() + pongs = append(pongs, msg.GetPongNonce()) + mu.Unlock() + case protocol.SyncOp_SYNC_CLOSE, protocol.SyncOp_SYNC_DOWN, + protocol.SyncOp_SYNC_UNSPECIFIED, protocol.SyncOp_SYNC_UPDATE: + continue + default: + t.Errorf("unexpected sync operation %s", msg.GetSyncOp()) + return + } + } + }() + + req.Eventuallyf(func() bool { + mu.Lock() + defer mu.Unlock() + return slices.Equal(pings, pongs) + }, 20*time.Second, 100*time.Millisecond, "didn't receive all pongs in reasonable time or out of order") +} diff --git a/core/node/rpc/sync/client/local.go b/core/node/rpc/sync/client/local.go new file mode 100644 index 000000000..4a6493f7e --- /dev/null +++ b/core/node/rpc/sync/client/local.go @@ -0,0 +1,149 @@ +package client + +import ( + "context" + "sync" + + "github.com/ethereum/go-ethereum/common" + . "github.com/river-build/river/core/node/base" + "github.com/river-build/river/core/node/events" + . "github.com/river-build/river/core/node/protocol" + . "github.com/river-build/river/core/node/shared" +) + +type localSyncer struct { + syncStreamCtx context.Context + + streamCache events.StreamCache + cookies []*SyncCookie + messages chan<- *SyncStreamsResponse + localAddr common.Address + + activeStreamsMu sync.Mutex + activeStreams map[StreamId]events.SyncStream +} + +func newLocalSyncer( + ctx context.Context, + localAddr common.Address, + streamCache events.StreamCache, + cookies []*SyncCookie, + messages chan<- *SyncStreamsResponse, +) (*localSyncer, error) { + return &localSyncer{ + syncStreamCtx: ctx, + streamCache: streamCache, + localAddr: localAddr, + cookies: cookies, + messages: messages, + activeStreams: make(map[StreamId]events.SyncStream), + }, nil +} + +func (s *localSyncer) Run() { + for _, cookie := range s.cookies { + streamID, _ := StreamIdFromBytes(cookie.GetStreamId()) + _ = s.addStream(s.syncStreamCtx, streamID, cookie) + } + + <-s.syncStreamCtx.Done() + + s.activeStreamsMu.Lock() + defer s.activeStreamsMu.Unlock() + + for streamID, syncStream := range s.activeStreams { + syncStream.Unsub(s) + delete(s.activeStreams, streamID) + } +} + +func (s *localSyncer) Address() common.Address { + return s.localAddr +} + +func (s *localSyncer) AddStream(ctx context.Context, cookie *SyncCookie) error { + streamID, err := StreamIdFromBytes(cookie.GetStreamId()) + if err != nil { + return err + } + return s.addStream(ctx, streamID, cookie) +} + +func (s *localSyncer) RemoveStream(_ context.Context, streamID StreamId) (bool, error) { + s.activeStreamsMu.Lock() + defer s.activeStreamsMu.Unlock() + + syncStream, found := s.activeStreams[streamID] + if found { + syncStream.Unsub(s) + delete(s.activeStreams, streamID) + } + + return len(s.activeStreams) == 0, nil +} + +// OnUpdate is called each time a new cookie is available for a stream +func (s *localSyncer) OnUpdate(r *StreamAndCookie) { + s.messages <- &SyncStreamsResponse{ + SyncOp: SyncOp_SYNC_UPDATE, + Stream: r, + } +} + +// OnSyncError is called when a sync subscription failed unrecoverable +func (s *localSyncer) OnSyncError(error) { + s.activeStreamsMu.Lock() + defer s.activeStreamsMu.Unlock() + + for streamID, syncStream := range s.activeStreams { + syncStream.Unsub(s) + delete(s.activeStreams, streamID) + s.OnStreamSyncDown(streamID) + } +} + +// OnStreamSyncDown is called when updates for a stream could not be given. +func (s *localSyncer) OnStreamSyncDown(streamID StreamId) { + s.messages <- &SyncStreamsResponse{ + SyncOp: SyncOp_SYNC_DOWN, + StreamId: streamID[:], + } +} + +func (s *localSyncer) addStream(ctx context.Context, streamID StreamId, cookie *SyncCookie) error { + s.activeStreamsMu.Lock() + defer s.activeStreamsMu.Unlock() + + // prevent subscribing multiple times on the same stream + if _, found := s.activeStreams[streamID]; found { + return nil + } + + syncStream, err := s.streamCache.GetSyncStream(ctx, streamID) + if err != nil { + return err + } + + if err := syncStream.Sub(ctx, cookie, s); err != nil { + return err + } + + s.activeStreams[streamID] = syncStream + + return nil +} + +func (s *localSyncer) DebugDropStream(_ context.Context, streamID StreamId) (bool, error) { + s.activeStreamsMu.Lock() + defer s.activeStreamsMu.Unlock() + + syncStream, found := s.activeStreams[streamID] + if found { + syncStream.Unsub(s) + delete(s.activeStreams, streamID) + s.OnStreamSyncDown(streamID) + return false, nil + } + + return false, RiverError(Err_NOT_FOUND, "stream not found").Tag("stream", streamID) +} diff --git a/core/node/rpc/sync/client/remote.go b/core/node/rpc/sync/client/remote.go new file mode 100644 index 000000000..92bd6fcfb --- /dev/null +++ b/core/node/rpc/sync/client/remote.go @@ -0,0 +1,243 @@ +package client + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "connectrpc.com/connect" + "github.com/ethereum/go-ethereum/common" + . "github.com/river-build/river/core/node/base" + "github.com/river-build/river/core/node/dlog" + . "github.com/river-build/river/core/node/protocol" + "github.com/river-build/river/core/node/protocol/protocolconnect" + . "github.com/river-build/river/core/node/shared" +) + +type remoteSyncer struct { + syncStreamCtx context.Context + syncStreamCancel context.CancelFunc + syncID string + forwarderSyncID string + remoteAddr common.Address + client protocolconnect.StreamServiceClient + cookies []*SyncCookie + messages chan<- *SyncStreamsResponse + streams sync.Map + responseStream *connect.ServerStreamForClient[SyncStreamsResponse] +} + +func newRemoteSyncer( + ctx context.Context, + forwarderSyncID string, + remoteAddr common.Address, + client protocolconnect.StreamServiceClient, + cookies []*SyncCookie, + messages chan<- *SyncStreamsResponse, +) (*remoteSyncer, error) { + syncStreamCtx, syncStreamCancel := context.WithCancel(ctx) + responseStream, err := client.SyncStreams(syncStreamCtx, connect.NewRequest(&SyncStreamsRequest{SyncPos: cookies})) + if err != nil { + for _, cookie := range cookies { + messages <- &SyncStreamsResponse{ + SyncOp: SyncOp_SYNC_DOWN, + StreamId: cookie.GetStreamId(), + } + } + syncStreamCancel() + return nil, err + } + + if !responseStream.Receive() { + syncStreamCancel() + return nil, responseStream.Err() + } + + log := dlog.FromCtx(ctx) + + if responseStream.Msg().GetSyncOp() != SyncOp_SYNC_NEW || responseStream.Msg().GetSyncId() == "" { + log.Error("Received unexpected sync stream message", + "syncOp", responseStream.Msg().SyncOp, + "syncId", responseStream.Msg().SyncId) + syncStreamCancel() + return nil, err + } + + s := &remoteSyncer{ + forwarderSyncID: forwarderSyncID, + syncStreamCtx: syncStreamCtx, + syncStreamCancel: syncStreamCancel, + client: client, + cookies: cookies, + messages: messages, + responseStream: responseStream, + remoteAddr: remoteAddr, + } + + s.syncID = responseStream.Msg().GetSyncId() + + for _, cookie := range s.cookies { + streamID, _ := StreamIdFromBytes(cookie.GetStreamId()) + s.streams.Store(streamID, struct{}{}) + } + + return s, nil +} + +func (s *remoteSyncer) Run() { + defer s.responseStream.Close() + + var latestMsgReceived atomic.Value + + latestMsgReceived.Store(time.Now()) + + go s.connectionAlive(&latestMsgReceived) + + for s.responseStream.Receive() { + if s.syncStreamCtx.Err() != nil { + break + } + + latestMsgReceived.Store(time.Now()) + + res := s.responseStream.Msg() + + if res.GetSyncOp() == SyncOp_SYNC_UPDATE { + s.messages <- res + } else if res.GetSyncOp() == SyncOp_SYNC_DOWN { + if streamID, err := StreamIdFromBytes(res.GetStreamId()); err == nil { + s.messages <- res + s.streams.Delete(streamID) + } + } + } + + // stream interrupted while client didn't cancel sync -> remote is unavailable + if s.syncStreamCtx.Err() == nil { + log := dlog.FromCtx(s.syncStreamCtx) + log.Info("remote node disconnected", "remote", s.remoteAddr) + + s.streams.Range(func(key, value any) bool { + streamID := key.(StreamId) + + log.Debug("stream down", "syncId", s.forwarderSyncID, "remote", s.remoteAddr, "stream", streamID) + s.messages <- &SyncStreamsResponse{ + SyncOp: SyncOp_SYNC_DOWN, + StreamId: streamID[:], + } + return true + }) + } +} + +// connectionAlive periodically pings remote to check if the connection is still alive. +// if the remote can't be reach the sync stream is canceled. +func (s *remoteSyncer) connectionAlive(latestMsgReceived *atomic.Value) { + var ( + // check every pingTicker if it's time to send a ping req to remote + pingTicker = time.NewTicker(3 * time.Second) + // don't send a ping req if there was activity within recentActivityInterval + recentActivityInterval = 15 * time.Second + // if no message was receiving within recentActivityDeadline assume stream is dead + recentActivityDeadline = 30 * time.Second + ) + defer pingTicker.Stop() + + for { + select { + case <-pingTicker.C: + now := time.Now() + lastMsgRecv := latestMsgReceived.Load().(time.Time) + if lastMsgRecv.Add(recentActivityDeadline).Before(now) { // no recent activity -> conn dead + s.syncStreamCancel() + return + } + + if lastMsgRecv.Add(recentActivityInterval).After(now) { // seen recent activity + continue + } + + // send ping to remote to generate activity to check if remote is still alive + if _, err := s.client.PingSync(s.syncStreamCtx, connect.NewRequest(&PingSyncRequest{ + SyncId: s.syncID, + Nonce: fmt.Sprintf("%d", now.Unix()), + })); err != nil { + s.syncStreamCancel() + return + } + return + + case <-s.syncStreamCtx.Done(): + return + } + } +} + +func (s *remoteSyncer) Address() common.Address { + return s.remoteAddr +} + +func (s *remoteSyncer) AddStream(ctx context.Context, cookie *SyncCookie) error { + streamID, err := StreamIdFromBytes(cookie.GetStreamId()) + if err != nil { + return err + } + + _, err = s.client.AddStreamToSync(ctx, connect.NewRequest(&AddStreamToSyncRequest{ + SyncId: s.syncID, + SyncPos: cookie, + })) + + if err == nil { + s.streams.Store(streamID, struct{}{}) + } + + return err +} + +func (s *remoteSyncer) RemoveStream(ctx context.Context, streamID StreamId) (bool, error) { + _, err := s.client.RemoveStreamFromSync(ctx, connect.NewRequest(&RemoveStreamFromSyncRequest{ + SyncId: s.syncID, + StreamId: streamID[:], + })) + + if err == nil { + s.streams.Delete(streamID) + } + + noMoreStreams := true + s.streams.Range(func(key, value any) bool { + noMoreStreams = false + return false + }) + + if noMoreStreams { + s.syncStreamCancel() + } + + return noMoreStreams, err +} + +func (s *remoteSyncer) DebugDropStream(ctx context.Context, streamID StreamId) (bool, error) { + if _, err := s.client.Info(ctx, connect.NewRequest(&InfoRequest{Debug: []string{ + "drop_stream", + s.syncID, + streamID.String(), + }})); err != nil { + return false, AsRiverError(err) + } + + noMoreStreams := true + s.streams.Range(func(key, value any) bool { + noMoreStreams = false + return false + }) + + if noMoreStreams { + s.syncStreamCancel() + } + + return noMoreStreams, nil +} diff --git a/core/node/rpc/sync/client/syncer_set.go b/core/node/rpc/sync/client/syncer_set.go new file mode 100644 index 000000000..e3a9a8d84 --- /dev/null +++ b/core/node/rpc/sync/client/syncer_set.go @@ -0,0 +1,273 @@ +package client + +import ( + "context" + "sync" + + "github.com/ethereum/go-ethereum/common" + . "github.com/river-build/river/core/node/base" + "github.com/river-build/river/core/node/events" + "github.com/river-build/river/core/node/nodes" + . "github.com/river-build/river/core/node/protocol" + . "github.com/river-build/river/core/node/shared" +) + +type ( + StreamsSyncer interface { + Run() + Address() common.Address + AddStream(ctx context.Context, cookie *SyncCookie) error + RemoveStream(ctx context.Context, streamID StreamId) (bool, error) + } + + DebugStreamsSyncer interface { + DebugDropStream(ctx context.Context, streamID StreamId) (bool, error) + } + + // SyncerSet is the set of StreamsSyncers that are used for a sync operation. + SyncerSet struct { + // ctx is the root context for all syncers in this set and used to cancel them + ctx context.Context + // syncID is the sync id as used between the client and this node + syncID string + // localNodeAddress is the node address for this stream node instance + localNodeAddress common.Address + // messages is the channel to which StreamsSyncers write updates that must be sent to the client + messages chan *SyncStreamsResponse + // streamCache is used to subscribe to streams managed by this node instance + streamCache events.StreamCache + // nodeRegistry keeps a mapping from node address to node meta-data + nodeRegistry nodes.NodeRegistry + // syncerTasks is a wait group for running background StreamsSyncers that is used to ensure all syncers stopped + syncerTasks sync.WaitGroup + // muSyncers guards syncers and streamID2Syncer + muSyncers sync.Mutex + // syncers is the existing set of syncers, indexed by the syncer node address + syncers map[common.Address]StreamsSyncer + // streamID2Syncer maps from a stream to its syncer + streamID2Syncer map[StreamId]StreamsSyncer + } + + // SyncCookieSet maps from a stream id to a sync cookie + SyncCookieSet map[StreamId]*SyncCookie + // StreamCookieSetGroupedByNodeAddress is a mapping from a node address to a SyncCookieSet + StreamCookieSetGroupedByNodeAddress map[common.Address]SyncCookieSet +) + +var ( + _ StreamsSyncer = (*localSyncer)(nil) + _ DebugStreamsSyncer = (*localSyncer)(nil) + + _ StreamsSyncer = (*remoteSyncer)(nil) + _ DebugStreamsSyncer = (*remoteSyncer)(nil) +) + +func (cs SyncCookieSet) AsSlice() []*SyncCookie { + cookies := make([]*SyncCookie, 0, len(cs)) + for _, cookie := range cs { + cookies = append(cookies, cookie) + } + return cookies +} + +// NewSyncers creates the required syncer set that subscribe on all given cookies. +// A syncer can either be local or remote and writes received events to an internal messages channel from which events +// are streamed to the client. +func NewSyncers( + ctx context.Context, + syncID string, + streamCache events.StreamCache, + nodeRegistry nodes.NodeRegistry, + localNodeAddress common.Address, + cookies StreamCookieSetGroupedByNodeAddress, +) (*SyncerSet, <-chan *SyncStreamsResponse, error) { + var ( + syncers = make(map[common.Address]StreamsSyncer) + streamID2Syncer = make(map[StreamId]StreamsSyncer) + messages = make(chan *SyncStreamsResponse, 128) + ) + + // instantiate background syncers for sync operation + for nodeAddress, cookieSet := range cookies { + if nodeAddress == localNodeAddress { // stream managed by this node + syncer, err := newLocalSyncer(ctx, localNodeAddress, streamCache, cookieSet.AsSlice(), messages) + if err != nil { + return nil, nil, err + } + syncers[nodeAddress] = syncer + } else { + client, err := nodeRegistry.GetStreamServiceClientForAddress(nodeAddress) + if err != nil { + return nil, nil, err + } + + syncer, err := newRemoteSyncer(ctx, syncID, nodeAddress, client, cookieSet.AsSlice(), messages) + if err != nil { + return nil, nil, err + } + + syncers[nodeAddress] = syncer + } + + // associate syncer with streamId to remove stream from sync operation + syncer := syncers[nodeAddress] + for streamID := range cookieSet { + streamID2Syncer[streamID] = syncer + } + } + + return &SyncerSet{ + ctx: ctx, + syncID: syncID, + streamCache: streamCache, + nodeRegistry: nodeRegistry, + localNodeAddress: localNodeAddress, + syncers: syncers, + streamID2Syncer: streamID2Syncer, + messages: messages, + }, messages, nil +} + +func (ss *SyncerSet) Run() { + ss.muSyncers.Lock() + for _, syncer := range ss.syncers { + ss.startSyncer(syncer) + } + ss.muSyncers.Unlock() + + <-ss.ctx.Done() // sync cancelled by client or client conn dropped + ss.syncerTasks.Wait() // background syncers finished + close(ss.messages) // close will cause the sync operation to send the SYNC_CLOSE message to the client +} + +func (ss *SyncerSet) AddStream(ctx context.Context, nodeAddress common.Address, streamID StreamId, cookie *SyncCookie) error { + ss.muSyncers.Lock() + defer ss.muSyncers.Unlock() + + if _, found := ss.streamID2Syncer[streamID]; found { + return nil // stream is already part of sync operation + } + + // check if there is already a syncer that can sync the given stream -> add stream to the syncer + if syncer, found := ss.syncers[nodeAddress]; found { + if err := syncer.AddStream(ctx, cookie); err != nil { + return err + } + ss.streamID2Syncer[streamID] = syncer + return nil + } + + // first stream to sync with remote -> create a new syncer instance + var ( + syncer StreamsSyncer + err error + ) + if nodeAddress == ss.localNodeAddress { + if syncer, err = newLocalSyncer(ss.ctx, ss.localNodeAddress, ss.streamCache, []*SyncCookie{cookie}, ss.messages); err != nil { + return err + } + } else { + client, err := ss.nodeRegistry.GetStreamServiceClientForAddress(nodeAddress) + if err != nil { + return err + } + if syncer, err = newRemoteSyncer(ss.ctx, ss.syncID, nodeAddress, client, []*SyncCookie{cookie}, ss.messages); err != nil { + return err + } + } + + ss.syncers[nodeAddress] = syncer + ss.streamID2Syncer[streamID] = syncer + ss.startSyncer(syncer) + + return nil +} + +// caller must have ss.muSyncers claimed +func (ss *SyncerSet) startSyncer(syncer StreamsSyncer) { + ss.syncerTasks.Add(1) + go func() { + syncer.Run() + ss.muSyncers.Lock() + delete(ss.syncers, syncer.Address()) + ss.muSyncers.Unlock() + ss.syncerTasks.Done() + }() +} + +func (ss *SyncerSet) RemoveStream(ctx context.Context, streamID StreamId) error { + ss.muSyncers.Lock() + defer ss.muSyncers.Unlock() + + // get the syncer that is responsible for the stream. + // (if not it indicates state corruption between ss.streamID2Syncer and ss.syncers) + syncer, found := ss.streamID2Syncer[streamID] + if !found { + return RiverError(Err_NOT_FOUND, "Stream not part of sync operation"). + Tags("syncId", ss.syncID, "streamId", streamID) + } + + syncerStopped, err := syncer.RemoveStream(ctx, streamID) + if err != nil { + return err + } + + delete(ss.streamID2Syncer, streamID) + if syncerStopped { + delete(ss.syncers, syncer.Address()) + } + + return nil +} + +func (ss *SyncerSet) DebugDropStream(ctx context.Context, streamID StreamId) error { + ss.muSyncers.Lock() + defer ss.muSyncers.Unlock() + + syncer, found := ss.streamID2Syncer[streamID] + if !found { + return RiverError(Err_NOT_FOUND, "Stream not part of sync operation"). + Tags("syncId", ss.syncID, "streamId", streamID) + } + + debugSyncer, ok := syncer.(DebugStreamsSyncer) + if !ok { + return RiverError(Err_UNAVAILABLE, + "Syncer responsible for stream doesn't support debug drop stream"). + Tags("syncId", ss.syncID, "streamId", streamID) + } + + syncerStopped, err := debugSyncer.DebugDropStream(ctx, streamID) + if err != nil { + return err + } + + delete(ss.streamID2Syncer, streamID) + if syncerStopped { + delete(ss.syncers, syncer.Address()) + } + + return nil +} + +// ValidateAndGroupSyncCookies validates the given syncCookies and groups them by node address/streamID. +func ValidateAndGroupSyncCookies(syncCookies []*SyncCookie) (StreamCookieSetGroupedByNodeAddress, error) { + cookies := make(StreamCookieSetGroupedByNodeAddress) + for _, cookie := range syncCookies { + if err := events.SyncCookieValidate(cookie); err != nil { + return nil, err + } + + streamID, err := StreamIdFromBytes(cookie.GetStreamId()) + if err != nil { + return nil, err + } + + nodeAddr := common.BytesToAddress(cookie.NodeAddress) + if cookies[nodeAddr] == nil { + cookies[nodeAddr] = make(map[StreamId]*SyncCookie) + } + cookies[nodeAddr][streamID] = cookie + } + return cookies, nil +} diff --git a/core/node/rpc/sync/handler.go b/core/node/rpc/sync/handler.go new file mode 100644 index 000000000..4e7cb639d --- /dev/null +++ b/core/node/rpc/sync/handler.go @@ -0,0 +1,168 @@ +package sync + +import ( + "context" + "github.com/river-build/river/core/node/utils" + "sync" + + "connectrpc.com/connect" + "github.com/ethereum/go-ethereum/common" + . "github.com/river-build/river/core/node/base" + "github.com/river-build/river/core/node/events" + "github.com/river-build/river/core/node/nodes" + . "github.com/river-build/river/core/node/protocol" + "github.com/river-build/river/core/node/shared" +) + +type ( + // Handler defines the external grpc interface that clients can call. + Handler interface { + SyncStreams( + ctx context.Context, + req *connect.Request[SyncStreamsRequest], + res *connect.ServerStream[SyncStreamsResponse], + ) error + + AddStreamToSync( + ctx context.Context, + req *connect.Request[AddStreamToSyncRequest], + ) (*connect.Response[AddStreamToSyncResponse], error) + + RemoveStreamFromSync( + ctx context.Context, + req *connect.Request[RemoveStreamFromSyncRequest], + ) (*connect.Response[RemoveStreamFromSyncResponse], error) + + CancelSync( + ctx context.Context, + req *connect.Request[CancelSyncRequest], + ) (*connect.Response[CancelSyncResponse], error) + + PingSync( + ctx context.Context, + req *connect.Request[PingSyncRequest], + ) (*connect.Response[PingSyncResponse], error) + } + + // DebugHandler defines the external grpc interface that clients can call for debugging purposes. + DebugHandler interface { + // DebugDropStream drops the stream from the sync session and sends the stream down message to the client. + DebugDropStream( + ctx context.Context, + syncID string, + streamID shared.StreamId, + ) error + } + + handlerImpl struct { + // nodeAddr is used to determine if a stream is local or remote + nodeAddr common.Address + // streamCache is used to subscribe on local streams + streamCache events.StreamCache + // nodeRegistry is used to find a node endpoint to subscribe on remote streams + nodeRegistry nodes.NodeRegistry + // activeSyncOperations keeps a mapping from SyncID -> *StreamSyncOperation + activeSyncOperations sync.Map + } +) + +var ( + _ Handler = (*handlerImpl)(nil) + _ DebugHandler = (*handlerImpl)(nil) +) + +// NewHandler returns a structure that implements the Handler interface. +// It keeps internally a map of in progress stream sync operations and forwards add stream, remove sream, cancel sync +// requests to the associated stream sync operation. +func NewHandler( + nodeAddr common.Address, + cache events.StreamCache, + nodeRegistry nodes.NodeRegistry, +) *handlerImpl { + return &handlerImpl{ + nodeAddr: nodeAddr, + streamCache: cache, + nodeRegistry: nodeRegistry, + } +} + +func (h *handlerImpl) SyncStreams( + ctx context.Context, + req *connect.Request[SyncStreamsRequest], + res *connect.ServerStream[SyncStreamsResponse], +) error { + ctx, log := utils.CtxAndLogForRequest(ctx, req) + + op, err := NewStreamsSyncOperation(ctx, h.nodeAddr, h.streamCache, h.nodeRegistry) + if err != nil { + log.Error("Unable to create streams sync subscription", "error", err) + return err + } + + h.activeSyncOperations.Store(op.SyncID, op) + defer h.activeSyncOperations.Delete(op.SyncID) + + // send SyncID to client + if err := res.Send(&SyncStreamsResponse{ + SyncId: op.SyncID, + SyncOp: SyncOp_SYNC_NEW, + }); err != nil { + err := AsRiverError(err).Func("SyncStreams") + return err + } + + // run until sub.ctx expires or until the client calls CancelSync + return op.Run(req, res) +} + +func (h *handlerImpl) AddStreamToSync( + ctx context.Context, + req *connect.Request[AddStreamToSyncRequest], +) (*connect.Response[AddStreamToSyncResponse], error) { + if op, ok := h.activeSyncOperations.Load(req.Msg.GetSyncId()); ok { + return op.(*StreamSyncOperation).AddStreamToSync(ctx, req) + } + return nil, RiverError(Err_NOT_FOUND, "unknown sync operation").Tag("syncId", req.Msg.GetSyncId()) +} + +func (h *handlerImpl) RemoveStreamFromSync( + ctx context.Context, + req *connect.Request[RemoveStreamFromSyncRequest], +) (*connect.Response[RemoveStreamFromSyncResponse], error) { + if op, ok := h.activeSyncOperations.Load(req.Msg.GetSyncId()); ok { + return op.(*StreamSyncOperation).RemoveStreamFromSync(ctx, req) + } + return nil, RiverError(Err_NOT_FOUND, "unknown sync operation").Tag("syncId", req.Msg.GetSyncId()) +} + +func (h *handlerImpl) CancelSync( + ctx context.Context, + req *connect.Request[CancelSyncRequest], +) (*connect.Response[CancelSyncResponse], error) { + if op, ok := h.activeSyncOperations.Load(req.Msg.GetSyncId()); ok { + // sync op is dropped from h.activeSyncOps when SyncStreams returns + return op.(*StreamSyncOperation).CancelSync(ctx, req) + } + return nil, RiverError(Err_NOT_FOUND, "unknown sync operation").Tag("syncId", req.Msg.GetSyncId()) +} + +func (h *handlerImpl) PingSync( + ctx context.Context, + req *connect.Request[PingSyncRequest], +) (*connect.Response[PingSyncResponse], error) { + if op, ok := h.activeSyncOperations.Load(req.Msg.GetSyncId()); ok { + return op.(*StreamSyncOperation).PingSync(ctx, req) + } + return nil, RiverError(Err_NOT_FOUND, "unknown sync operation").Tag("syncId", req.Msg.GetSyncId()) +} + +func (h *handlerImpl) DebugDropStream( + ctx context.Context, + syncID string, + streamID shared.StreamId, +) error { + if op, ok := h.activeSyncOperations.Load(syncID); ok { + return op.(*StreamSyncOperation).debugDropStream(ctx, streamID) + } + return RiverError(Err_NOT_FOUND, "unknown sync operation").Tag("syncId", syncID) +} diff --git a/core/node/rpc/sync/operation.go b/core/node/rpc/sync/operation.go new file mode 100644 index 000000000..77dc9eb3c --- /dev/null +++ b/core/node/rpc/sync/operation.go @@ -0,0 +1,242 @@ +package sync + +import ( + "context" + + "connectrpc.com/connect" + "github.com/ethereum/go-ethereum/common" + . "github.com/river-build/river/core/node/base" + "github.com/river-build/river/core/node/events" + "github.com/river-build/river/core/node/nodes" + . "github.com/river-build/river/core/node/protocol" + "github.com/river-build/river/core/node/rpc/sync/client" + "github.com/river-build/river/core/node/shared" +) + +type ( + // StreamSyncOperation represents a stream sync operation that is currently in progress. + StreamSyncOperation struct { + // SyncID is the identifier as used with the external client to identify the streams sync operation. + SyncID string + // ctx is the root context for this subscription, when expires the subscription and all background syncers are + // cancelled + ctx context.Context + // cancel sync operation + cancel context.CancelFunc + // commands holds incoming requests from the client to add/remove/cancel commands + commands chan *subCommand + // thisNodeAddress keeps the address of this stream thisNodeAddress instance + thisNodeAddress common.Address + // streamCache gives access to streams managed by this thisNodeAddress + streamCache events.StreamCache + // nodeRegistry is used to get the remote remoteNode endpoint from a thisNodeAddress address + nodeRegistry nodes.NodeRegistry + } + + // subCommand represents a request to add or remove a stream and ping sync operation + subCommand struct { + Ctx context.Context + RmStreamReq *connect.Request[RemoveStreamFromSyncRequest] + AddStreamReq *connect.Request[AddStreamToSyncRequest] + PingReq *connect.Request[PingSyncRequest] + DebugDropStream shared.StreamId + reply chan error + } +) + +func (cmd *subCommand) Reply(err error) { + if err != nil { + cmd.reply <- err + } + close(cmd.reply) +} + +// NewStreamsSyncOperation initialises a new sync stream operation. It groups the given syncCookies per stream node +// by its address and subscribes on the internal stream streamCache for local streams. +// +// Use the Run method to start syncing. +func NewStreamsSyncOperation( + ctx context.Context, + node common.Address, + streamCache events.StreamCache, + nodeRegistry nodes.NodeRegistry, +) (*StreamSyncOperation, error) { + // make the sync operation cancellable for CancelSync + ctx, cancel := context.WithCancel(ctx) + + return &StreamSyncOperation{ + ctx: ctx, + cancel: cancel, + SyncID: GenNanoid(), + thisNodeAddress: node, + commands: make(chan *subCommand), + streamCache: streamCache, + nodeRegistry: nodeRegistry, + }, nil +} + +// Run the stream sync until either sub.Cancel is called or until sub.ctx expired +func (syncOp *StreamSyncOperation) Run( + req *connect.Request[SyncStreamsRequest], + res *connect.ServerStream[SyncStreamsResponse], +) error { + defer syncOp.cancel() + + cookies, err := client.ValidateAndGroupSyncCookies(req.Msg.GetSyncPos()) + if err != nil { + return err + } + + syncers, messages, err := client.NewSyncers( + syncOp.ctx, syncOp.SyncID, syncOp.streamCache, syncOp.nodeRegistry, syncOp.thisNodeAddress, cookies) + if err != nil { + return err + } + + go syncers.Run() + + for { + select { + case msg, ok := <-messages: + if !ok { // messages is closed in syncers when syncOp.ctx is cancelled + _ = res.Send(&SyncStreamsResponse{ + SyncId: syncOp.SyncID, + SyncOp: SyncOp_SYNC_CLOSE, + }) + return nil + } + + msg.SyncId = syncOp.SyncID + if err = res.Send(msg); err != nil { + return err + } + + case cmd := <-syncOp.commands: + if cmd.AddStreamReq != nil { + nodeAddress := common.BytesToAddress(cmd.AddStreamReq.Msg.GetSyncPos().GetNodeAddress()) + streamID, err := shared.StreamIdFromBytes(cmd.AddStreamReq.Msg.GetSyncPos().GetStreamId()) + if err != nil { + cmd.Reply(err) + continue + } + cmd.Reply(syncers.AddStream(cmd.Ctx, nodeAddress, streamID, cmd.AddStreamReq.Msg.GetSyncPos())) + } else if cmd.RmStreamReq != nil { + streamID, err := shared.StreamIdFromBytes(cmd.RmStreamReq.Msg.GetStreamId()) + if err != nil { + cmd.Reply(err) + continue + } + cmd.Reply(syncers.RemoveStream(cmd.Ctx, streamID)) + } else if cmd.PingReq != nil { + err = res.Send(&SyncStreamsResponse{ + SyncId: syncOp.SyncID, + SyncOp: SyncOp_SYNC_PONG, + PongNonce: cmd.PingReq.Msg.GetNonce(), + }) + cmd.Reply(err) + } else if cmd.DebugDropStream != (shared.StreamId{}) { + cmd.Reply(syncers.DebugDropStream(cmd.Ctx, cmd.DebugDropStream)) + } + } + } +} + +func (syncOp *StreamSyncOperation) AddStreamToSync( + ctx context.Context, + req *connect.Request[AddStreamToSyncRequest], +) (*connect.Response[AddStreamToSyncResponse], error) { + if err := events.SyncCookieValidate(req.Msg.GetSyncPos()); err != nil { + return nil, err + } + + cmd := &subCommand{ + Ctx: ctx, + AddStreamReq: req, + reply: make(chan error, 1), + } + + if err := syncOp.process(cmd); err != nil { + return nil, err + } + + return connect.NewResponse(&AddStreamToSyncResponse{}), nil +} + +func (syncOp *StreamSyncOperation) RemoveStreamFromSync( + ctx context.Context, + req *connect.Request[RemoveStreamFromSyncRequest], +) (*connect.Response[RemoveStreamFromSyncResponse], error) { + if req.Msg.GetSyncId() != syncOp.SyncID { + return nil, RiverError(Err_INVALID_ARGUMENT, "invalid syncId").Tag("syncId", req.Msg.GetSyncId()) + } + + cmd := &subCommand{ + Ctx: ctx, + RmStreamReq: req, + reply: make(chan error, 1), + } + + if err := syncOp.process(cmd); err != nil { + return nil, err + } + + return connect.NewResponse(&RemoveStreamFromSyncResponse{}), nil +} + +func (syncOp *StreamSyncOperation) CancelSync( + _ context.Context, + req *connect.Request[CancelSyncRequest], +) (*connect.Response[CancelSyncResponse], error) { + if req.Msg.GetSyncId() != syncOp.SyncID { + return nil, RiverError(Err_INVALID_ARGUMENT, "invalid syncId").Tag("syncId", req.Msg.GetSyncId()) + } + + syncOp.cancel() + + return connect.NewResponse(&CancelSyncResponse{}), nil +} + +func (syncOp *StreamSyncOperation) PingSync( + ctx context.Context, + req *connect.Request[PingSyncRequest], +) (*connect.Response[PingSyncResponse], error) { + if req.Msg.GetSyncId() != syncOp.SyncID { + return nil, RiverError(Err_INVALID_ARGUMENT, "invalid syncId").Tag("syncId", req.Msg.GetSyncId()) + } + + cmd := &subCommand{ + Ctx: ctx, + PingReq: req, + reply: make(chan error, 1), + } + + if err := syncOp.process(cmd); err != nil { + return nil, err + } + + return connect.NewResponse(&PingSyncResponse{}), nil +} + +func (syncOp *StreamSyncOperation) debugDropStream(ctx context.Context, streamID shared.StreamId) error { + cmd := &subCommand{ + Ctx: ctx, + DebugDropStream: streamID, + reply: make(chan error, 1), + } + + return syncOp.process(cmd) +} + +func (syncOp *StreamSyncOperation) process(cmd *subCommand) error { + select { + case syncOp.commands <- cmd: + select { + case err := <-cmd.reply: + return err + case <-syncOp.ctx.Done(): + return RiverError(Err_CANCELED, "sync operation cancelled").Tags("syncId", syncOp.SyncID) + } + case <-syncOp.ctx.Done(): + return RiverError(Err_CANCELED, "sync operation cancelled").Tags("syncId", syncOp.SyncID) + } +} diff --git a/core/node/rpc/sync_receiver.go b/core/node/rpc/sync_receiver.go deleted file mode 100644 index 9a918b904..000000000 --- a/core/node/rpc/sync_receiver.go +++ /dev/null @@ -1,84 +0,0 @@ -package rpc - -import ( - "context" - "sync" - - . "github.com/river-build/river/core/node/base" - "github.com/river-build/river/core/node/dlog" - . "github.com/river-build/river/core/node/events" - . "github.com/river-build/river/core/node/protocol" -) - -type syncReceiver struct { - ctx context.Context - cancel context.CancelFunc - channel chan *StreamAndCookie - - mu sync.Mutex - firstError error -} - -var _ SyncResultReceiver = (*syncReceiver)(nil) - -func (s *syncReceiver) OnUpdate(r *StreamAndCookie) { - if s.ctx.Err() != nil { - return - } - - select { - case s.channel <- r: - return - default: - err := RiverError( - Err_BUFFER_FULL, - "channel full, dropping update and canceling", - "streamId", - r.NextSyncCookie.StreamId, - ). - Func("OnUpdate"). - LogWarn(dlog.FromCtx(s.ctx)) - s.setErrorAndCancel(err) - return - } -} - -func (s *syncReceiver) OnSyncError(err error) { - if s.ctx.Err() != nil { - return - } - s.setErrorAndCancel(err) - dlog.FromCtx(s.ctx).Warn("OnSyncError: cancelling sync", "error", err) -} - -func (s *syncReceiver) setErrorAndCancel(err error) { - s.mu.Lock() - if s.firstError == nil { - s.firstError = err - } - s.mu.Unlock() - - s.cancel() -} - -func (s *syncReceiver) Dispatch(sender syncStream) { - log := dlog.FromCtx(s.ctx) - - for { - select { - case <-s.ctx.Done(): - err := s.ctx.Err() - s.setErrorAndCancel(err) - log.Debug("SyncStreams: context done", "err", err) - return - case data := <-s.channel: - log.Debug("SyncStreams: received update in forward loop", "data", data) - resp := SyncStreamsResponseFromStreamAndCookie(data) - if err := sender.Send(resp); err != nil { - s.setErrorAndCancel(err) - log.Debug("SyncStreams: failed to send update", "resp", data, "err", err) - return - } - } - } -} diff --git a/core/node/rpc/sync_streams.go b/core/node/rpc/sync_streams.go deleted file mode 100644 index d9a3e5187..000000000 --- a/core/node/rpc/sync_streams.go +++ /dev/null @@ -1,843 +0,0 @@ -package rpc - -import ( - "bytes" - "context" - "errors" - "sync" - - "connectrpc.com/connect" - "github.com/ethereum/go-ethereum/common" - - . "github.com/river-build/river/core/node/base" - "github.com/river-build/river/core/node/crypto" - "github.com/river-build/river/core/node/dlog" - "github.com/river-build/river/core/node/events" - "github.com/river-build/river/core/node/nodes" - . "github.com/river-build/river/core/node/protocol" - "github.com/river-build/river/core/node/protocol/protocolconnect" - . "github.com/river-build/river/core/node/shared" -) - -// TODO: wire metrics. -// var ( -// syncStreamsRequests = infra.NewSuccessMetrics("sync_streams_requests", serviceRequests) -// syncStreamsResultSize = infra.NewCounter("sync_streams_result_size", "The total number of events returned by sync streams") -// ) - -// func addUpdatesToCounter(updates []*StreamAndCookie) { -// for _, stream := range updates { -// syncStreamsResultSize.Add(float64(len(stream.Events))) -// } -// } - -func NewSyncHandler( - wallet *crypto.Wallet, - cache events.StreamCache, - nodeRegistry nodes.NodeRegistry, - streamRegistry nodes.StreamRegistry, -) SyncHandler { - return &syncHandlerImpl{ - wallet: wallet, - cache: cache, - nodeRegistry: nodeRegistry, - streamRegistry: streamRegistry, - mu: sync.Mutex{}, - syncIdToSubscription: make(map[string]*syncSubscriptionImpl), - } -} - -type SyncHandler interface { - SyncStreams( - ctx context.Context, - req *connect.Request[SyncStreamsRequest], - res *connect.ServerStream[SyncStreamsResponse], - ) error - AddStreamToSync( - ctx context.Context, - req *connect.Request[AddStreamToSyncRequest], - ) (*connect.Response[AddStreamToSyncResponse], error) - RemoveStreamFromSync( - ctx context.Context, - req *connect.Request[RemoveStreamFromSyncRequest], - ) (*connect.Response[RemoveStreamFromSyncResponse], error) - CancelSync( - ctx context.Context, - req *connect.Request[CancelSyncRequest], - ) (*connect.Response[CancelSyncResponse], error) - PingSync( - ctx context.Context, - req *connect.Request[PingSyncRequest], - ) (*connect.Response[PingSyncResponse], error) -} - -type syncHandlerImpl struct { - wallet *crypto.Wallet - cache events.StreamCache - nodeRegistry nodes.NodeRegistry - streamRegistry nodes.StreamRegistry - mu sync.Mutex - syncIdToSubscription map[string]*syncSubscriptionImpl -} - -type syncNode struct { - address common.Address - remoteSyncId string // the syncId to the remote node's sync subscription - forwarderSyncId string // the forwarding node's sync Id - stub protocolconnect.StreamServiceClient - - mu sync.Mutex - closed bool -} - -func (s *Service) SyncStreams( - ctx context.Context, - req *connect.Request[SyncStreamsRequest], - res *connect.ServerStream[SyncStreamsResponse], -) error { - return s.syncHandler.SyncStreams(ctx, req, res) -} - -func (s *Service) AddStreamToSync( - ctx context.Context, - req *connect.Request[AddStreamToSyncRequest], -) (*connect.Response[AddStreamToSyncResponse], error) { - return s.syncHandler.AddStreamToSync(ctx, req) -} - -func (s *Service) RemoveStreamFromSync( - ctx context.Context, - req *connect.Request[RemoveStreamFromSyncRequest], -) (*connect.Response[RemoveStreamFromSyncResponse], error) { - return s.syncHandler.RemoveStreamFromSync(ctx, req) -} - -func (s *Service) CancelSync( - ctx context.Context, - req *connect.Request[CancelSyncRequest], -) (*connect.Response[CancelSyncResponse], error) { - return s.syncHandler.CancelSync(ctx, req) -} - -func (s *Service) PingSync( - ctx context.Context, - req *connect.Request[PingSyncRequest], -) (*connect.Response[PingSyncResponse], error) { - return s.syncHandler.PingSync(ctx, req) -} - -func (s *syncHandlerImpl) SyncStreams( - ctx context.Context, - req *connect.Request[SyncStreamsRequest], - res *connect.ServerStream[SyncStreamsResponse], -) error { - ctx, log := ctxAndLogForRequest(ctx, req) - - // generate a random syncId - syncId := GenNanoid() - log.Debug("SyncStreams:SyncHandlerV2.SyncStreams ENTER", "syncId", syncId, "syncPos", req.Msg.SyncPos) - - sub, err := s.addSubscription(ctx, syncId) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.SyncStreams LEAVE: failed to add subscription", - "syncId", - syncId, - "err", - err, - ) - return err - } - - // send syncId to client - e := res.Send(&SyncStreamsResponse{ - SyncId: syncId, - SyncOp: SyncOp_SYNC_NEW, - }) - if e != nil { - err := AsRiverError(e).Func("SyncStreams") - log.Info( - "SyncStreams:SyncHandlerV2.SyncStreams LEAVE: failed to send syncId", - "res", - res, - "err", - err, - "syncId", - syncId, - ) - return err - } - log.Debug("SyncStreams:SyncHandlerV2.SyncStreams: sent syncId", "syncId", syncId) - - e = s.handleSyncRequest(req, res, sub) - if e != nil { - err := AsRiverError(e).Func("SyncStreams") - if err.Code == Err_CANCELED { - // Context is canceled when client disconnects, so this is normal case. - log.Debug( - "SyncStreams:SyncHandlerV2.SyncStreams LEAVE: sync Dispatch() ended with expected error", - "syncId", - syncId, - ) - _ = err.LogDebug(log) - } else { - log.Info("SyncStreams:SyncHandlerV2.SyncStreams LEAVE: sync Dispatch() ended with unexpected error", "syncId", syncId) - _ = err.LogWarn(log) - } - return err.AsConnectError() - } - // no errors from handling the sync request. - log.Debug("SyncStreams:SyncHandlerV2.SyncStreams LEAVE") - return nil -} - -func (s *syncHandlerImpl) handleSyncRequest( - req *connect.Request[SyncStreamsRequest], - res *connect.ServerStream[SyncStreamsResponse], - sub *syncSubscriptionImpl, -) error { - if sub == nil { - return RiverError(Err_NOT_FOUND, "SyncId not found").Func("SyncStreams") - } - log := dlog.FromCtx(sub.ctx) - - defer s.removeSubscription(sub.ctx, sub.syncId) - - localCookies, remoteCookies := getLocalAndRemoteCookies(s.wallet.Address, req.Msg.SyncPos) - - for nodeAddr, remoteCookie := range remoteCookies { - var r *syncNode - if r = sub.getRemoteNode(nodeAddr); r == nil { - stub, err := s.nodeRegistry.GetStreamServiceClientForAddress(nodeAddr) - if err != nil { - // TODO: Handle the case when node is no longer available. HNT-4715 - log.Error( - "SyncStreams:SyncHandlerV2.SyncStreams failed to get stream service client", - "syncId", - sub.syncId, - "err", - err, - ) - return err - } - - r = &syncNode{ - address: nodeAddr, - forwarderSyncId: sub.syncId, - stub: stub, - } - } - err := sub.addSyncNode(r, remoteCookie) - if err != nil { - return err - } - } - - if len(localCookies) > 0 { - go s.syncLocalNode(sub.ctx, localCookies, sub) - } - - remotes := sub.getRemoteNodes() - for _, remote := range remotes { - cookies := remoteCookies[remote.address] - go remote.syncRemoteNode(sub.ctx, sub.syncId, cookies, sub) - } - - // start the sync loop - log.Debug("SyncStreams:SyncHandlerV2.SyncStreams: sync Dispatch() started", "syncId", sub.syncId) - sub.Dispatch(res) - log.Debug("SyncStreams:SyncHandlerV2.SyncStreams: sync Dispatch() ended", "syncId", sub.syncId) - - err := sub.getError() - if err != nil { - log.Debug( - "SyncStreams:SyncHandlerV2.SyncStreams LEAVE: sync Dispatch() ended with expected error", - "syncId", - sub.syncId, - ) - return err - } - - log.Error("SyncStreams:SyncStreams: sync always should be terminated by context cancel.") - return nil -} - -func (s *syncHandlerImpl) CancelSync( - ctx context.Context, - req *connect.Request[CancelSyncRequest], -) (*connect.Response[CancelSyncResponse], error) { - _, log := ctxAndLogForRequest(ctx, req) - log.Debug("SyncStreams:SyncHandlerV2.CancelSync ENTER", "syncId", req.Msg.SyncId) - sub := s.getSub(req.Msg.SyncId) - if sub != nil { - sub.OnClose() - } - log.Debug("SyncStreams:SyncHandlerV2.CancelSync LEAVE", "syncId", req.Msg.SyncId) - return connect.NewResponse(&CancelSyncResponse{}), nil -} - -func (s *syncHandlerImpl) PingSync( - ctx context.Context, - req *connect.Request[PingSyncRequest], -) (*connect.Response[PingSyncResponse], error) { - _, log := ctxAndLogForRequest(ctx, req) - syncId := req.Msg.SyncId - - sub := s.getSub(syncId) - if sub == nil { - log.Debug("SyncStreams: ping sync", "syncId", syncId) - return nil, RiverError(Err_NOT_FOUND, "SyncId not found").Func("PingSync") - } - - // cancel if context is done - if sub.ctx.Err() != nil { - log.Debug("SyncStreams: ping sync", "syncId", syncId, "context_error", sub.ctx.Err()) - return nil, RiverError(Err_CANCELED, "SyncId canceled").Func("PingSync") - } - - log.Debug("SyncStreams: ping sync", "syncId", syncId) - c := pingOp{ - baseSyncOp: baseSyncOp{op: SyncOp_SYNC_PONG}, - nonce: req.Msg.Nonce, - } - select { - // send the pong response to the client via the control channel - case sub.controlChannel <- &c: - return connect.NewResponse(&PingSyncResponse{}), nil - default: - return nil, RiverError(Err_BUFFER_FULL, "control channel full").Func("PingSync") - } -} - -func getLocalAndRemoteCookies( - localWalletAddr common.Address, - syncCookies []*SyncCookie, -) (localCookies []*SyncCookie, remoteCookies map[common.Address][]*SyncCookie) { - localCookies = make([]*SyncCookie, 0, 8) - remoteCookies = make(map[common.Address][]*SyncCookie) - for _, cookie := range syncCookies { - if bytes.Equal(cookie.NodeAddress, localWalletAddr[:]) { - localCookies = append(localCookies, cookie) - } else { - remoteAddr := common.BytesToAddress(cookie.NodeAddress) - if remoteCookies[remoteAddr] == nil { - remoteCookies[remoteAddr] = make([]*SyncCookie, 0, 8) - } - remoteCookies[remoteAddr] = append(remoteCookies[remoteAddr], cookie) - } - } - return -} - -func (s *syncHandlerImpl) syncLocalNode( - ctx context.Context, - syncPos []*SyncCookie, - sub *syncSubscriptionImpl, -) { - log := dlog.FromCtx(ctx) - - if ctx.Err() != nil { - log.Error("SyncStreams:SyncHandlerV2.SyncStreams: syncLocalNode not starting", "context_error", ctx.Err()) - return - } - - err := s.syncLocalStreamsImpl(ctx, syncPos, sub) - if err != nil { - log.Error("SyncStreams:SyncHandlerV2.SyncStreams: syncLocalNode failed", "err", err) - if sub != nil { - sub.OnSyncError(err) - } - } -} - -func (s *syncHandlerImpl) syncLocalStreamsImpl( - ctx context.Context, - syncPos []*SyncCookie, - sub *syncSubscriptionImpl, -) error { - if len(syncPos) <= 0 { - return nil - } - - defer func() { - if sub != nil { - sub.unsubLocalStreams() - } - }() - - for _, pos := range syncPos { - if ctx.Err() != nil { - return ctx.Err() - } - - err := s.addLocalStreamToSync(ctx, pos, sub) - if err != nil { - return err - } - } - - // Wait for context to be done before unsubbing. - <-ctx.Done() - return nil -} - -func (s *syncHandlerImpl) addLocalStreamToSync( - ctx context.Context, - cookie *SyncCookie, - subs *syncSubscriptionImpl, -) error { - log := dlog.FromCtx(ctx) - log.Debug("SyncStreams:SyncHandlerV2.addLocalStreamToSync ENTER", "syncId", subs.syncId, "syncPos", cookie) - - if ctx.Err() != nil { - log.Error("SyncStreams:SyncHandlerV2.addLocalStreamToSync: context error", "err", ctx.Err()) - return ctx.Err() - } - if subs == nil { - return RiverError(Err_NOT_FOUND, "SyncId not found").Func("SyncStreams") - } - - err := events.SyncCookieValidate(cookie) - if err != nil { - log.Debug("SyncStreams:SyncHandlerV2.addLocalStreamToSync: invalid cookie", "err", err) - return nil - } - - cookieStreamId, err := StreamIdFromBytes(cookie.StreamId) - if err != nil { - return err - } - - if s := subs.getLocalStream(cookieStreamId); s != nil { - // stream is already subscribed. no need to re-subscribe. - log.Debug( - "SyncStreams:SyncHandlerV2.addLocalStreamToSync: stream already subscribed", - "streamId", - cookieStreamId, - ) - return nil - } - - streamSub, err := s.cache.GetSyncStream(ctx, cookieStreamId) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.addLocalStreamToSync: failed to get stream", - "streamId", - cookieStreamId, - "err", - err, - ) - return err - } - - err = subs.addLocalStream(ctx, cookie, &streamSub) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.addLocalStreamToSync: error subscribing to stream", - "streamId", - cookie.StreamId, - "err", - err, - ) - return err - } - - log.Debug( - "SyncStreams:SyncHandlerV2.addLocalStreamToSync LEAVE", - "syncId", - subs.syncId, - "streamId", - cookie.StreamId, - ) - return nil -} - -func (s *syncHandlerImpl) AddStreamToSync( - ctx context.Context, - req *connect.Request[AddStreamToSyncRequest], -) (*connect.Response[AddStreamToSyncResponse], error) { - ctx, log := ctxAndLogForRequest(ctx, req) - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync ENTER", "syncId", req.Msg.SyncId, "syncPos", req.Msg.SyncPos) - - syncId := req.Msg.SyncId - cookie := req.Msg.SyncPos - - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync: getting sub", "syncId", syncId) - sub := s.getSub(syncId) - if sub == nil { - log.Info("SyncStreams:SyncHandlerV2.AddStreamToSync LEAVE: SyncId not found", "syncId", syncId) - return nil, RiverError(Err_NOT_FOUND, "SyncId not found").Func("AddStreamToSync") - } - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync: got sub", "syncId", syncId) - - // Two cases to handle. Either local cookie or remote cookie. - if bytes.Equal(cookie.NodeAddress[:], s.wallet.Address[:]) { - // Case 1: local cookie - if err := s.addLocalStreamToSync(ctx, cookie, sub); err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.AddStreamToSync LEAVE: failed to add local streams", - "syncId", - syncId, - "err", - err, - ) - return nil, err - } - // done. - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync: LEAVE", "syncId", syncId) - return connect.NewResponse(&AddStreamToSyncResponse{}), nil - } - - // Case 2: remote cookie - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync: adding remote streams", "syncId", syncId) - nodeAddress := common.BytesToAddress(cookie.NodeAddress[:]) - remoteNode := sub.getRemoteNode(nodeAddress) - isNewRemoteNode := remoteNode == nil - log.Debug( - "SyncStreams:SyncHandlerV2.AddStreamToSync: remote node", - "syncId", - syncId, - "isNewRemoteNode", - isNewRemoteNode, - ) - if isNewRemoteNode { - // the remote node does not exist in the subscription. add it. - stub, err := s.nodeRegistry.GetStreamServiceClientForAddress(nodeAddress) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.AddStreamToSync: failed to get stream service client", - "syncId", - req.Msg.SyncId, - "err", - err, - ) - // TODO: Handle the case when node is no longer available. - return nil, err - } - if stub == nil { - panic("stub always should set for the remote node") - } - - remoteNode = &syncNode{ - address: nodeAddress, - forwarderSyncId: sub.syncId, - stub: stub, - } - sub.addRemoteNode(nodeAddress, remoteNode) - log.Info("SyncStreams:SyncHandlerV2.AddStreamToSync: added remote node", "syncId", req.Msg.SyncId) - } - err := sub.addRemoteStream(cookie) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.AddStreamToSync LEAVE: failed to add remote streams", - "syncId", - req.Msg.SyncId, - "err", - err, - ) - return nil, err - } - log.Info("SyncStreams:SyncHandlerV2.AddStreamToSync: added remote stream", "syncId", req.Msg.SyncId) - - if isNewRemoteNode { - // tell the new remote node to sync - syncPos := make([]*SyncCookie, 0, 1) - syncPos = append(syncPos, cookie) - log.Info("SyncStreams:SyncHandlerV2.AddStreamToSync: syncing new remote node", "syncId", req.Msg.SyncId) - go remoteNode.syncRemoteNode(sub.ctx, sub.syncId, syncPos, sub) - } else { - log.Info("SyncStreams:SyncHandlerV2.AddStreamToSync: adding stream to existing remote node", "syncId", req.Msg.SyncId) - // tell the existing remote nodes to add the streams to sync - go remoteNode.addStreamToSync(sub.ctx, cookie, sub) - } - - log.Debug("SyncStreams:SyncHandlerV2.AddStreamToSync LEAVE", "syncId", req.Msg.SyncId) - return connect.NewResponse(&AddStreamToSyncResponse{}), nil -} - -func (s *syncHandlerImpl) RemoveStreamFromSync( - ctx context.Context, - req *connect.Request[RemoveStreamFromSyncRequest], -) (*connect.Response[RemoveStreamFromSyncResponse], error) { - _, log := ctxAndLogForRequest(ctx, req) - log.Info( - "SyncStreams:SyncHandlerV2.RemoveStreamFromSync ENTER", - "syncId", - req.Msg.SyncId, - "streamId", - req.Msg.StreamId, - ) - - syncId := req.Msg.SyncId - streamId, err := StreamIdFromBytes(req.Msg.StreamId) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.RemoveStreamFromSync LEAVE: failed to parse streamId", - "syncId", - syncId, - "err", - err, - ) - return nil, err - } - - sub := s.getSub(syncId) - if sub == nil { - log.Info("SyncStreams:SyncHandlerV2.RemoveStreamFromSync LEAVE: SyncId not found", "syncId", syncId) - return nil, RiverError(Err_NOT_FOUND, "SyncId not found").Func("RemoveStreamFromSync") - } - - // remove the streamId from the local node - sub.removeLocalStream(streamId) - - // use the streamId to find the remote node to remove - remoteNode := sub.removeRemoteStream(streamId) - if remoteNode != nil { - log.Debug( - "SyncStreams:SyncHandlerV2.RemoveStreamFromSync: removing remote stream", - "syncId", - syncId, - "streamId", - streamId, - ) - err := remoteNode.removeStreamFromSync(sub.ctx, streamId, sub) - if err != nil { - log.Info( - "SyncStreams:SyncHandlerV2.RemoveStreamFromSync: failed to remove remote stream", - "syncId", - syncId, - "streamId", - streamId, - "err", - err, - ) - return nil, err - } - // remove any remote nodes that no longer have any streams to sync - sub.purgeUnusedRemoteNodes(log) - } - - log.Info("SyncStreams:SyncHandlerV2.RemoveStreamFromSync LEAVE", "syncId", syncId) - return connect.NewResponse(&RemoveStreamFromSyncResponse{}), nil -} - -func (s *syncHandlerImpl) addSubscription( - ctx context.Context, - syncId string, -) (*syncSubscriptionImpl, error) { - log := dlog.FromCtx(ctx) - s.mu.Lock() - defer s.mu.Unlock() - - if s.syncIdToSubscription == nil { - s.syncIdToSubscription = make(map[string]*syncSubscriptionImpl) - } - if sub := s.syncIdToSubscription[syncId]; sub != nil { - return nil, errors.New("syncId subscription already exists") - } - sub := newSyncSubscription(ctx, syncId) - s.syncIdToSubscription[syncId] = sub - log.Debug("SyncStreams:addSubscription: syncId subscription added", "syncId", syncId) - return sub, nil -} - -func (s *syncHandlerImpl) removeSubscription( - ctx context.Context, - syncId string, -) { - log := dlog.FromCtx(ctx) - sub := s.getSub(syncId) - if sub != nil { - sub.deleteRemoteNodes() - } - s.mu.Lock() - if _, exists := s.syncIdToSubscription[syncId]; exists { - delete(s.syncIdToSubscription, syncId) - log.Debug("SyncStreams:removeSubscription: syncId subscription removed", "syncId", syncId) - } else { - log.Debug("SyncStreams:removeSubscription: syncId not found", "syncId", syncId) - } - s.mu.Unlock() -} - -func (s *syncHandlerImpl) getSub( - syncId string, -) *syncSubscriptionImpl { - s.mu.Lock() - defer s.mu.Unlock() - return s.syncIdToSubscription[syncId] -} - -// TODO: connect-go is not using channels for streaming (>_<), so it's a bit tricky to close all these -// streams properly. For now basic protocol is to close entire sync if there is any error. -// Which in turn means that we need to close all outstanding streams to remote nodes. -// Without control signals there is no clean way to do so, so for now both ctx is canceled and Close is called -// async hoping this will trigger Receive to abort. -func (n *syncNode) syncRemoteNode( - ctx context.Context, - forwarderSyncId string, - syncPos []*SyncCookie, - receiver events.SyncResultReceiver, -) { - log := dlog.FromCtx(ctx) - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams: syncRemoteNode not started", "context_error", ctx.Err()) - return - } - if n.remoteSyncId != "" { - log.Debug( - "SyncStreams: syncRemoteNode not started because there is an existing sync", - "remoteSyncId", - n.remoteSyncId, - "forwarderSyncId", - forwarderSyncId, - ) - return - } - - defer func() { - if n != nil { - n.close() - } - }() - - responseStream, err := n.stub.SyncStreams( - ctx, - &connect.Request[SyncStreamsRequest]{ - Msg: &SyncStreamsRequest{ - SyncPos: syncPos, - }, - }, - ) - if err != nil { - log.Debug("SyncStreams: syncRemoteNode remote SyncStreams failed", "err", err) - receiver.OnSyncError(err) - return - } - defer responseStream.Close() - - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams: syncRemoteNode receive canceled", "context_error", ctx.Err()) - return - } - - if !responseStream.Receive() { - receiver.OnSyncError(responseStream.Err()) - return - } - - if responseStream.Msg().SyncOp != SyncOp_SYNC_NEW || responseStream.Msg().SyncId == "" { - receiver.OnSyncError( - RiverError(Err_INTERNAL, "first sync response should be SYNC_NEW and have SyncId").Func("syncRemoteNode"), - ) - return - } - - n.remoteSyncId = responseStream.Msg().SyncId - n.forwarderSyncId = forwarderSyncId - - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams: syncRemoteNode receive canceled", "context_error", ctx.Err()) - return - } - - for responseStream.Receive() { - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams: syncRemoteNode receive canceled", "context_error", ctx.Err()) - return - } - - log.Debug("SyncStreams: syncRemoteNode received update", "resp", responseStream.Msg()) - - receiver.OnUpdate(responseStream.Msg().GetStream()) - } - - if ctx.Err() != nil || n.isClosed() { - return - } - - if err := responseStream.Err(); err != nil { - log.Debug("SyncStreams: syncRemoteNode receive failed", "err", err) - receiver.OnSyncError(err) - return - } -} - -func (n *syncNode) addStreamToSync( - ctx context.Context, - cookie *SyncCookie, - receiver events.SyncResultReceiver, -) { - log := dlog.FromCtx(ctx) - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams:syncNode addStreamToSync not started", "context_error", ctx.Err()) - } - if n.remoteSyncId == "" { - log.Debug( - "SyncStreams:syncNode addStreamToSync not started because there is no existing sync", - "remoteSyncId", - n.remoteSyncId, - ) - } - - _, err := n.stub.AddStreamToSync( - ctx, - &connect.Request[AddStreamToSyncRequest]{ - Msg: &AddStreamToSyncRequest{ - SyncPos: cookie, - SyncId: n.remoteSyncId, - }, - }, - ) - if err != nil { - log.Debug("SyncStreams:syncNode addStreamToSync failed", "err", err) - receiver.OnSyncError(err) - } -} - -func (n *syncNode) removeStreamFromSync( - ctx context.Context, - streamId StreamId, - receiver events.SyncResultReceiver, -) error { - log := dlog.FromCtx(ctx) - if ctx.Err() != nil || n.isClosed() { - log.Debug("SyncStreams:syncNode removeStreamsFromSync not started", "context_error", ctx.Err()) - return ctx.Err() - } - if n.remoteSyncId == "" { - log.Debug( - "SyncStreams:syncNode removeStreamsFromSync not started because there is no existing sync", - "syncId", - n.remoteSyncId, - ) - return nil - } - - _, err := n.stub.RemoveStreamFromSync( - ctx, - &connect.Request[RemoveStreamFromSyncRequest]{ - Msg: &RemoveStreamFromSyncRequest{ - SyncId: n.remoteSyncId, - StreamId: streamId[:], - }, - }, - ) - if err != nil { - log.Debug("SyncStreams:syncNode removeStreamsFromSync failed", "err", err) - receiver.OnSyncError(err) - } - return err -} - -func (n *syncNode) isClosed() bool { - n.mu.Lock() - defer n.mu.Unlock() - return n.closed -} - -func (n *syncNode) close() { - n.mu.Lock() - defer n.mu.Unlock() - n.closed = true -} diff --git a/core/node/rpc/sync_subscription.go b/core/node/rpc/sync_subscription.go deleted file mode 100644 index 20562598c..000000000 --- a/core/node/rpc/sync_subscription.go +++ /dev/null @@ -1,423 +0,0 @@ -package rpc - -import ( - "context" - "log/slog" - "sync" - - "connectrpc.com/connect" - "github.com/ethereum/go-ethereum/common" - - . "github.com/river-build/river/core/node/base" - "github.com/river-build/river/core/node/dlog" - "github.com/river-build/river/core/node/events" - . "github.com/river-build/river/core/node/protocol" - . "github.com/river-build/river/core/node/shared" -) - -type syncOp interface { - getOp() SyncOp -} - -type baseSyncOp struct { - op SyncOp -} - -func (d *baseSyncOp) getOp() SyncOp { - return d.op -} - -type pingOp struct { - baseSyncOp - nonce string // used to match a response to a ping request -} - -type syncSubscriptionImpl struct { - ctx context.Context - syncId string - cancel context.CancelFunc - - mu sync.Mutex - firstError error - dataChannel chan *StreamAndCookie - controlChannel chan syncOp - localStreams map[StreamId]*events.SyncStream // mapping of streamId to local stream - remoteStreams map[StreamId]*syncNode // mapping of streamId to remote node - remoteNodes map[common.Address]*syncNode // mapping of node address to remote node -} - -func newSyncSubscription( - ctx context.Context, - syncId string, -) *syncSubscriptionImpl { - syncCtx, cancelSync := context.WithCancel(ctx) - return &syncSubscriptionImpl{ - ctx: syncCtx, - syncId: syncId, - cancel: cancelSync, - dataChannel: make(chan *StreamAndCookie, 256), - controlChannel: make(chan syncOp, 64), - localStreams: make(map[StreamId]*events.SyncStream), - remoteStreams: make(map[StreamId]*syncNode), - remoteNodes: make(map[common.Address]*syncNode), - } -} - -type syncStream interface { - Send(msg *SyncStreamsResponse) error -} - -func (s *syncSubscriptionImpl) addLocalStream( - ctx context.Context, - syncCookie *SyncCookie, - stream *events.SyncStream, -) error { - log := dlog.FromCtx(ctx) - log.Debug( - "SyncStreams:syncSubscriptionImpl:addLocalStream: adding local stream", - "syncId", - s.syncId, - "streamId", - syncCookie.StreamId, - ) - streamId, err := StreamIdFromBytes(syncCookie.StreamId) - if err != nil { - return err - } - - var exists bool - - s.mu.Lock() - - // only add the stream if it doesn't already exist in the subscription - if _, exists = s.localStreams[streamId]; !exists { - s.localStreams[streamId] = stream - } - s.mu.Unlock() - - if exists { - log.Debug( - "SyncStreams:syncSubscriptionImpl:addLocalStream: local stream already exists", - "syncId", - s.syncId, - "streamId", - syncCookie.StreamId, - ) - } else { - // subscribe to the stream - err := (*stream).Sub(ctx, syncCookie, s) - if err != nil { - log.Error("SyncStreams:syncSubscriptionImpl:addLocalStream: error subscribing to stream", "syncId", s.syncId, "streamId", syncCookie.StreamId, "err", err) - return err - } - log.Debug("SyncStreams:syncSubscriptionImpl:addLocalStream: added local stream", "syncId", s.syncId, "streamId", syncCookie.StreamId) - } - - return nil -} - -func (s *syncSubscriptionImpl) removeLocalStream( - streamId StreamId, -) { - var stream *events.SyncStream - - s.mu.Lock() - if st := s.localStreams[streamId]; st != nil { - stream = st - delete(s.localStreams, streamId) - } - s.mu.Unlock() - - if stream != nil { - (*stream).Unsub(s) - } -} - -func (s *syncSubscriptionImpl) unsubLocalStreams() { - s.mu.Lock() - defer s.mu.Unlock() - for key, st := range s.localStreams { - stream := *st - stream.Unsub(s) - delete(s.localStreams, key) - } -} - -func (s *syncSubscriptionImpl) addSyncNode( - node *syncNode, - cookies []*SyncCookie, -) error { - s.mu.Lock() - defer s.mu.Unlock() - - if _, exists := s.remoteNodes[node.address]; !exists { - s.remoteNodes[node.address] = node - } else { - node = s.remoteNodes[node.address] - } - for _, cookie := range cookies { - streamId, err := StreamIdFromBytes(cookie.StreamId) - if err != nil { - return err - } - s.remoteStreams[streamId] = node - } - return nil -} - -func (s *syncSubscriptionImpl) addRemoteNode( - address common.Address, - node *syncNode, -) bool { - s.mu.Lock() - defer s.mu.Unlock() - // only add the node if it doesn't already exist in the subscription - if _, exists := s.remoteNodes[address]; !exists { - s.remoteNodes[address] = node - return true // added - } - return false // not added -} - -func (s *syncSubscriptionImpl) getLocalStream( - streamId StreamId, -) *events.SyncStream { - s.mu.Lock() - defer s.mu.Unlock() - return s.localStreams[streamId] -} - -func (s *syncSubscriptionImpl) getRemoteNode( - address common.Address, -) *syncNode { - s.mu.Lock() - defer s.mu.Unlock() - return s.remoteNodes[address] -} - -func (s *syncSubscriptionImpl) getRemoteNodes() []*syncNode { - copy := make([]*syncNode, 0) - s.mu.Lock() - defer s.mu.Unlock() - for _, node := range s.remoteNodes { - copy = append(copy, node) - } - return copy -} - -func (s *syncSubscriptionImpl) addRemoteStream( - cookie *SyncCookie, -) error { - s.mu.Lock() - defer s.mu.Unlock() - nodeAddress := common.BytesToAddress(cookie.NodeAddress) - if remote := s.remoteNodes[nodeAddress]; remote != nil { - streamId, err := StreamIdFromBytes(cookie.StreamId) - if err != nil { - return err - } - s.remoteStreams[streamId] = remote - } - return nil -} - -func (s *syncSubscriptionImpl) removeRemoteStream( - streamId StreamId, -) *syncNode { - s.mu.Lock() - defer s.mu.Unlock() - if remote := s.remoteStreams[streamId]; remote != nil { - delete(s.remoteStreams, streamId) - return remote - } - return nil -} - -func (s *syncSubscriptionImpl) purgeUnusedRemoteNodes(log *slog.Logger) { - nodesToRemove := make([]*syncNode, 0) - - log.Debug( - "SyncStreams:syncSubscriptionImpl:purgeUnusedRemoteNodes: purging unused remote nodes", - "syncId", - s.syncId, - ) - - s.mu.Lock() - if len(s.remoteNodes) > 0 { - for _, remote := range s.remoteNodes { - isUsed := false - if len(s.remoteStreams) > 0 { - for _, n := range s.remoteStreams { - if n == remote { - isUsed = true - break - } - } - if !isUsed { - nodesToRemove = append(nodesToRemove, remote) - delete(s.remoteNodes, remote.address) - } - } - } - } - s.mu.Unlock() - - // now purge the nodes - for _, remote := range nodesToRemove { - if remote != nil { - remote.close() - } - } - - log.Debug("SyncStreams:syncSubscriptionImpl:purgeUnusedRemoteNodes: purged remote nodes done", "syncId", s.syncId) -} - -func (s *syncSubscriptionImpl) deleteRemoteNodes() { - s.mu.Lock() - defer s.mu.Unlock() - for key := range s.remoteNodes { - delete(s.remoteNodes, key) - } - for key := range s.remoteStreams { - delete(s.remoteStreams, key) - } -} - -func (s *syncSubscriptionImpl) setErrorAndCancel(err error) { - s.mu.Lock() - if s.firstError == nil { - s.firstError = err - } - s.mu.Unlock() - - s.cancel() -} - -func (s *syncSubscriptionImpl) OnSyncError(err error) { - if s.ctx.Err() != nil { - return - } - log := dlog.FromCtx(s.ctx) - log.Info("SyncStreams:syncSubscriptionImpl:OnSyncError: received error", "error", err) - s.setErrorAndCancel(err) - log.Warn("SyncStreams:syncSubscriptionImpl:OnSyncError: cancelling sync", "error", err) -} - -func (s *syncSubscriptionImpl) OnUpdate(r *StreamAndCookie) { - // cancel if context is done - if s.ctx.Err() != nil { - return - } - - select { - case s.dataChannel <- r: - return - default: - // end the update stream if the channel is full - err := RiverError( - Err_BUFFER_FULL, - "channel full, dropping update and canceling", - "streamId", - r.NextSyncCookie.StreamId, - ). - Func("OnUpdate"). - LogWarn(dlog.FromCtx(s.ctx)) - s.setErrorAndCancel(err) - return - } -} - -func (s *syncSubscriptionImpl) OnClose() { - // cancel if context is done - if s.ctx.Err() != nil { - return - } - - log := dlog.FromCtx(s.ctx) - log.Debug("SyncStreams:OnClose: closing stream", "syncId", s.syncId) - c := baseSyncOp{ - op: SyncOp_SYNC_CLOSE, - } - select { - case s.controlChannel <- &c: - return - default: - log.Info("SyncStreams:OnClose: control channel full") - return - } -} - -func (s *syncSubscriptionImpl) Dispatch(res *connect.ServerStream[SyncStreamsResponse]) { - log := dlog.FromCtx(s.ctx) - - for { - select { - case <-s.ctx.Done(): - err := s.ctx.Err() - s.setErrorAndCancel(err) - log.Debug("SyncStreams: context done", "err", err) - return - case data, ok := <-s.dataChannel: - log.Debug( - "SyncStreams: Dispatch received response in dispatch loop", - "syncId", - s.syncId, - "data", - data, - ) - if ok { - // gather the response metadata + content, and send it - resp := events.SyncStreamsResponseFromStreamAndCookie(data) - resp.SyncId = s.syncId - resp.SyncOp = SyncOp_SYNC_UPDATE - if err := res.Send(resp); err != nil { - log.Info("SyncStreams: Dispatch error sending response", "syncId", s.syncId, "err", err) - s.setErrorAndCancel(err) - return - } - } else { - log.Debug("SyncStreams: Dispatch data channel closed", "syncId", s.syncId) - } - case control := <-s.controlChannel: - log.Debug("SyncStreams: Dispatch received control message", "syncId", s.syncId, "control", control) - if control.getOp() == SyncOp_SYNC_CLOSE { - err := res.Send(&SyncStreamsResponse{ - SyncId: s.syncId, - SyncOp: SyncOp_SYNC_CLOSE, - }) - if err != nil { - log.Warn( - "SyncStreams: Dispatch error sending close response", - "syncId", - s.syncId, - "err", - err, - ) - log.Warn("SyncStreams: error closing stream", "err", err) - } - s.cancel() - log.Debug("SyncStreams: closed stream", "syncId", s.syncId) - } else if control.getOp() == SyncOp_SYNC_PONG { - log.Debug("SyncStreams: send pong to client", "syncId", s.syncId) - data := control.(*pingOp) - err := res.Send(&SyncStreamsResponse{ - SyncId: s.syncId, - SyncOp: SyncOp_SYNC_PONG, - PongNonce: data.nonce, - }) - if err != nil { - log.Warn("SyncStreams: cancel stream because of error sending pong response", "syncId", s.syncId, "err", err) - s.cancel() - } - } else { - log.Warn("SyncStreams: Dispatch received unknown control message", "syncId", s.syncId, "control", control) - } - } - } -} - -func (s *syncSubscriptionImpl) getError() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.firstError -} diff --git a/core/node/rpc/tester_test.go b/core/node/rpc/tester_test.go index 2deb294e6..0c8ad340f 100644 --- a/core/node/rpc/tester_test.go +++ b/core/node/rpc/tester_test.go @@ -258,6 +258,7 @@ func (st *serviceTester) startSingle(i int, opts ...startOpts) error { } return err } + st.nodes[i].service = service st.nodes[i].address = bc.Wallet.Address diff --git a/core/node/rpc/util.go b/core/node/rpc/util.go deleted file mode 100644 index f4070fad1..000000000 --- a/core/node/rpc/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package rpc - -import ( - "context" - "log/slog" - - . "github.com/river-build/river/core/node/base" - "github.com/river-build/river/core/node/dlog" - . "github.com/river-build/river/core/node/protocol" - - "connectrpc.com/connect" - "github.com/ethereum/go-ethereum/common" -) - -type RequestWithStreamId interface { - GetStreamId() string -} - -func ctxAndLogForRequest[T any](ctx context.Context, req *connect.Request[T]) (context.Context, *slog.Logger) { - log := dlog.FromCtx(ctx) - - // Add streamId to log context if present in request - if reqMsg, ok := any(req.Msg).(RequestWithStreamId); ok { - streamId := reqMsg.GetStreamId() - if streamId != "" { - log = log.With("streamId", streamId) - return dlog.CtxWithLog(ctx, log), log - } - } - - return ctx, log -} - -func ParseEthereumAddress(address string) (common.Address, error) { - if len(address) != 42 { - return common.Address{}, RiverError(Err_BAD_ADDRESS, "invalid address length") - } - if address[:2] != "0x" { - return common.Address{}, RiverError(Err_BAD_ADDRESS, "invalid address prefix") - } - return common.HexToAddress(address), nil -} - diff --git a/core/node/utils/rpc.go b/core/node/utils/rpc.go new file mode 100644 index 000000000..bb3f373dd --- /dev/null +++ b/core/node/utils/rpc.go @@ -0,0 +1,35 @@ +package utils + +import ( + "context" + "log/slog" + + "connectrpc.com/connect" + "github.com/river-build/river/core/node/dlog" +) + +const ( + // RpcStreamIdKey is key under which the streamId is set if the RPC call is made within the context of a stream. + RpcStreamIdKey = "streamId" +) + +type RequestWithStreamId interface { + GetStreamId() string +} + +// CtxAndLogForRequest returns a new context and logger for the given request. +// If the request is made in the context of a stream it will try to add the stream id to the logger. +func CtxAndLogForRequest[T any](ctx context.Context, req *connect.Request[T]) (context.Context, *slog.Logger) { + log := dlog.FromCtx(ctx) + + // Add streamId to log context if present in request + if reqMsg, ok := any(req.Msg).(RequestWithStreamId); ok { + streamId := reqMsg.GetStreamId() + if streamId != "" { + log = log.With(RpcStreamIdKey, streamId) + return dlog.CtxWithLog(ctx, log), log + } + } + + return ctx, log +} diff --git a/packages/proto/src/gen/protocol_pb.ts b/packages/proto/src/gen/protocol_pb.ts new file mode 100644 index 000000000..0abe9a110 --- /dev/null +++ b/packages/proto/src/gen/protocol_pb.ts @@ -0,0 +1,5189 @@ +// @generated by protoc-gen-es v1.9.0 with parameter "target=ts" +// @generated from file protocol.proto (package river, syntax proto3) +/* eslint-disable */ +// @ts-nocheck + +import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; +import { Empty, Message, proto3, protoInt64, Timestamp } from "@bufbuild/protobuf"; + +/** + * @generated from enum river.SyncOp + */ +export enum SyncOp { + /** + * @generated from enum value: SYNC_UNSPECIFIED = 0; + */ + SYNC_UNSPECIFIED = 0, + + /** + * new sync + * + * @generated from enum value: SYNC_NEW = 1; + */ + SYNC_NEW = 1, + + /** + * close the sync + * + * @generated from enum value: SYNC_CLOSE = 2; + */ + SYNC_CLOSE = 2, + + /** + * update from server + * + * @generated from enum value: SYNC_UPDATE = 3; + */ + SYNC_UPDATE = 3, + + /** + * respond to the ping message from the client. + * + * @generated from enum value: SYNC_PONG = 4; + */ + SYNC_PONG = 4, + + /** + * indication that stream updates could (temporarily) not be provided + * + * @generated from enum value: SYNC_DOWN = 5; + */ + SYNC_DOWN = 5, +} +// Retrieve enum metadata with: proto3.getEnumType(SyncOp) +proto3.util.setEnumType(SyncOp, "river.SyncOp", [ + { no: 0, name: "SYNC_UNSPECIFIED" }, + { no: 1, name: "SYNC_NEW" }, + { no: 2, name: "SYNC_CLOSE" }, + { no: 3, name: "SYNC_UPDATE" }, + { no: 4, name: "SYNC_PONG" }, + { no: 5, name: "SYNC_DOWN" }, +]); + +/** + * @generated from enum river.MembershipOp + */ +export enum MembershipOp { + /** + * @generated from enum value: SO_UNSPECIFIED = 0; + */ + SO_UNSPECIFIED = 0, + + /** + * @generated from enum value: SO_INVITE = 1; + */ + SO_INVITE = 1, + + /** + * @generated from enum value: SO_JOIN = 2; + */ + SO_JOIN = 2, + + /** + * @generated from enum value: SO_LEAVE = 3; + */ + SO_LEAVE = 3, +} +// Retrieve enum metadata with: proto3.getEnumType(MembershipOp) +proto3.util.setEnumType(MembershipOp, "river.MembershipOp", [ + { no: 0, name: "SO_UNSPECIFIED" }, + { no: 1, name: "SO_INVITE" }, + { no: 2, name: "SO_JOIN" }, + { no: 3, name: "SO_LEAVE" }, +]); + +/** + * @generated from enum river.ChannelOp + */ +export enum ChannelOp { + /** + * @generated from enum value: CO_UNSPECIFIED = 0; + */ + CO_UNSPECIFIED = 0, + + /** + * @generated from enum value: CO_CREATED = 1; + */ + CO_CREATED = 1, + + /** + * @generated from enum value: CO_DELETED = 2; + */ + CO_DELETED = 2, + + /** + * @generated from enum value: CO_UPDATED = 4; + */ + CO_UPDATED = 4, +} +// Retrieve enum metadata with: proto3.getEnumType(ChannelOp) +proto3.util.setEnumType(ChannelOp, "river.ChannelOp", [ + { no: 0, name: "CO_UNSPECIFIED" }, + { no: 1, name: "CO_CREATED" }, + { no: 2, name: "CO_DELETED" }, + { no: 4, name: "CO_UPDATED" }, +]); + +/** + * Codes from 1 to 16 match gRPC/Connect codes. + * + * @generated from enum river.Err + */ +export enum Err { + /** + * @generated from enum value: ERR_UNSPECIFIED = 0; + */ + ERR_UNSPECIFIED = 0, + + /** + * Canceled indicates that the operation was canceled, typically by the + * caller. + * + * @generated from enum value: CANCELED = 1; + */ + CANCELED = 1, + + /** + * Unknown indicates that the operation failed for an unknown reason. + * + * @generated from enum value: UNKNOWN = 2; + */ + UNKNOWN = 2, + + /** + * InvalidArgument indicates that client supplied an invalid argument. + * + * @generated from enum value: INVALID_ARGUMENT = 3; + */ + INVALID_ARGUMENT = 3, + + /** + * DeadlineExceeded indicates that deadline expired before the operation + * could complete. + * + * @generated from enum value: DEADLINE_EXCEEDED = 4; + */ + DEADLINE_EXCEEDED = 4, + + /** + * NotFound indicates that some requested entity (for example, a file or + * directory) was not found. + * + * @generated from enum value: NOT_FOUND = 5; + */ + NOT_FOUND = 5, + + /** + * AlreadyExists indicates that client attempted to create an entity (for + * example, a file or directory) that already exists. + * + * @generated from enum value: ALREADY_EXISTS = 6; + */ + ALREADY_EXISTS = 6, + + /** + * PermissionDenied indicates that the caller doesn't have permission to + * execute the specified operation. + * + * @generated from enum value: PERMISSION_DENIED = 7; + */ + PERMISSION_DENIED = 7, + + /** + * ResourceExhausted indicates that some resource has been exhausted. For + * example, a per-user quota may be exhausted or the entire file system may + * be full. + * + * @generated from enum value: RESOURCE_EXHAUSTED = 8; + */ + RESOURCE_EXHAUSTED = 8, + + /** + * FailedPrecondition indicates that the system is not in a state + * required for the operation's execution. + * + * @generated from enum value: FAILED_PRECONDITION = 9; + */ + FAILED_PRECONDITION = 9, + + /** + * Aborted indicates that operation was aborted by the system, usually + * because of a concurrency issue such as a sequencer check failure or + * transaction abort. + * + * @generated from enum value: ABORTED = 10; + */ + ABORTED = 10, + + /** + * OutOfRange indicates that the operation was attempted past the valid + * range (for example, seeking past end-of-file). + * + * @generated from enum value: OUT_OF_RANGE = 11; + */ + OUT_OF_RANGE = 11, + + /** + * Unimplemented indicates that the operation isn't implemented, + * supported, or enabled in this service. + * + * @generated from enum value: UNIMPLEMENTED = 12; + */ + UNIMPLEMENTED = 12, + + /** + * Internal indicates that some invariants expected by the underlying + * system have been broken. This code is reserved for serious errors. + * + * @generated from enum value: INTERNAL = 13; + */ + INTERNAL = 13, + + /** + * Unavailable indicates that the service is currently unavailable. This + * is usually temporary, so clients can back off and retry idempotent + * operations. + * + * @generated from enum value: UNAVAILABLE = 14; + */ + UNAVAILABLE = 14, + + /** + * DataLoss indicates that the operation has resulted in unrecoverable + * data loss or corruption. + * + * @generated from enum value: DATA_LOSS = 15; + */ + DATA_LOSS = 15, + + /** + * Unauthenticated indicates that the request does not have valid + * authentication credentials for the operation. + * + * @generated from enum value: UNAUTHENTICATED = 16; + */ + UNAUTHENTICATED = 16, + + /** + * @generated from enum value: DEBUG_ERROR = 17; + */ + DEBUG_ERROR = 17, + + /** + * @generated from enum value: BAD_STREAM_ID = 18; + */ + BAD_STREAM_ID = 18, + + /** + * @generated from enum value: BAD_STREAM_CREATION_PARAMS = 19; + */ + BAD_STREAM_CREATION_PARAMS = 19, + + /** + * @generated from enum value: INTERNAL_ERROR_SWITCH = 20; + */ + INTERNAL_ERROR_SWITCH = 20, + + /** + * @generated from enum value: BAD_EVENT_ID = 21; + */ + BAD_EVENT_ID = 21, + + /** + * @generated from enum value: BAD_EVENT_SIGNATURE = 22; + */ + BAD_EVENT_SIGNATURE = 22, + + /** + * @generated from enum value: BAD_HASH_FORMAT = 23; + */ + BAD_HASH_FORMAT = 23, + + /** + * @generated from enum value: BAD_PREV_MINIBLOCK_HASH = 24; + */ + BAD_PREV_MINIBLOCK_HASH = 24, + + /** + * @generated from enum value: NO_EVENT_SPECIFIED = 25; + */ + NO_EVENT_SPECIFIED = 25, + + /** + * @generated from enum value: BAD_EVENT = 26; + */ + BAD_EVENT = 26, + + /** + * @generated from enum value: USER_CANT_POST = 27; + */ + USER_CANT_POST = 27, + + /** + * @generated from enum value: STREAM_BAD_HASHES = 28; + */ + STREAM_BAD_HASHES = 28, + + /** + * @generated from enum value: STREAM_EMPTY = 29; + */ + STREAM_EMPTY = 29, + + /** + * @generated from enum value: STREAM_BAD_EVENT = 30; + */ + STREAM_BAD_EVENT = 30, + + /** + * @generated from enum value: BAD_DELEGATE_SIG = 31; + */ + BAD_DELEGATE_SIG = 31, + + /** + * @generated from enum value: BAD_PUBLIC_KEY = 32; + */ + BAD_PUBLIC_KEY = 32, + + /** + * @generated from enum value: BAD_PAYLOAD = 33; + */ + BAD_PAYLOAD = 33, + + /** + * @generated from enum value: BAD_HEX_STRING = 34; + */ + BAD_HEX_STRING = 34, + + /** + * @generated from enum value: BAD_EVENT_HASH = 35; + */ + BAD_EVENT_HASH = 35, + + /** + * @generated from enum value: BAD_SYNC_COOKIE = 36; + */ + BAD_SYNC_COOKIE = 36, + + /** + * @generated from enum value: DUPLICATE_EVENT = 37; + */ + DUPLICATE_EVENT = 37, + + /** + * @generated from enum value: BAD_BLOCK = 38; + */ + BAD_BLOCK = 38, + + /** + * @generated from enum value: STREAM_NO_INCEPTION_EVENT = 39; + */ + STREAM_NO_INCEPTION_EVENT = 39, + + /** + * @generated from enum value: BAD_BLOCK_NUMBER = 40; + */ + BAD_BLOCK_NUMBER = 40, + + /** + * @generated from enum value: BAD_MINIPOOL_SLOT = 41; + */ + BAD_MINIPOOL_SLOT = 41, + + /** + * @generated from enum value: BAD_CREATOR_ADDRESS = 42; + */ + BAD_CREATOR_ADDRESS = 42, + + /** + * @generated from enum value: STALE_DELEGATE = 43; + */ + STALE_DELEGATE = 43, + + /** + * @generated from enum value: BAD_LINK_WALLET_BAD_SIGNATURE = 44; + */ + BAD_LINK_WALLET_BAD_SIGNATURE = 44, + + /** + * @generated from enum value: BAD_ROOT_KEY_ID = 45; + */ + BAD_ROOT_KEY_ID = 45, + + /** + * @generated from enum value: UNKNOWN_NODE = 46; + */ + UNKNOWN_NODE = 46, + + /** + * @generated from enum value: DB_OPERATION_FAILURE = 47; + */ + DB_OPERATION_FAILURE = 47, + + /** + * @generated from enum value: MINIBLOCKS_STORAGE_FAILURE = 48; + */ + MINIBLOCKS_STORAGE_FAILURE = 48, + + /** + * @generated from enum value: BAD_ADDRESS = 49; + */ + BAD_ADDRESS = 49, + + /** + * @generated from enum value: BUFFER_FULL = 50; + */ + BUFFER_FULL = 50, + + /** + * @generated from enum value: BAD_CONFIG = 51; + */ + BAD_CONFIG = 51, + + /** + * @generated from enum value: BAD_CONTRACT = 52; + */ + BAD_CONTRACT = 52, + + /** + * @generated from enum value: CANNOT_CONNECT = 53; + */ + CANNOT_CONNECT = 53, + + /** + * @generated from enum value: CANNOT_GET_LINKED_WALLETS = 54; + */ + CANNOT_GET_LINKED_WALLETS = 54, + + /** + * @generated from enum value: CANNOT_CHECK_ENTITLEMENTS = 55; + */ + CANNOT_CHECK_ENTITLEMENTS = 55, + + /** + * @generated from enum value: CANNOT_CALL_CONTRACT = 56; + */ + CANNOT_CALL_CONTRACT = 56, + + /** + * @generated from enum value: SPACE_DISABLED = 57; + */ + SPACE_DISABLED = 57, + + /** + * @generated from enum value: CHANNEL_DISABLED = 58; + */ + CHANNEL_DISABLED = 58, + + /** + * @generated from enum value: WRONG_STREAM_TYPE = 59; + */ + WRONG_STREAM_TYPE = 59, + + /** + * @generated from enum value: MINIPOOL_MISSING_EVENTS = 60; + */ + MINIPOOL_MISSING_EVENTS = 60, + + /** + * @generated from enum value: STREAM_LAST_BLOCK_MISMATCH = 61; + */ + STREAM_LAST_BLOCK_MISMATCH = 61, + + /** + * @generated from enum value: DOWNSTREAM_NETWORK_ERROR = 62; + */ + DOWNSTREAM_NETWORK_ERROR = 62, +} +// Retrieve enum metadata with: proto3.getEnumType(Err) +proto3.util.setEnumType(Err, "river.Err", [ + { no: 0, name: "ERR_UNSPECIFIED" }, + { no: 1, name: "CANCELED" }, + { no: 2, name: "UNKNOWN" }, + { no: 3, name: "INVALID_ARGUMENT" }, + { no: 4, name: "DEADLINE_EXCEEDED" }, + { no: 5, name: "NOT_FOUND" }, + { no: 6, name: "ALREADY_EXISTS" }, + { no: 7, name: "PERMISSION_DENIED" }, + { no: 8, name: "RESOURCE_EXHAUSTED" }, + { no: 9, name: "FAILED_PRECONDITION" }, + { no: 10, name: "ABORTED" }, + { no: 11, name: "OUT_OF_RANGE" }, + { no: 12, name: "UNIMPLEMENTED" }, + { no: 13, name: "INTERNAL" }, + { no: 14, name: "UNAVAILABLE" }, + { no: 15, name: "DATA_LOSS" }, + { no: 16, name: "UNAUTHENTICATED" }, + { no: 17, name: "DEBUG_ERROR" }, + { no: 18, name: "BAD_STREAM_ID" }, + { no: 19, name: "BAD_STREAM_CREATION_PARAMS" }, + { no: 20, name: "INTERNAL_ERROR_SWITCH" }, + { no: 21, name: "BAD_EVENT_ID" }, + { no: 22, name: "BAD_EVENT_SIGNATURE" }, + { no: 23, name: "BAD_HASH_FORMAT" }, + { no: 24, name: "BAD_PREV_MINIBLOCK_HASH" }, + { no: 25, name: "NO_EVENT_SPECIFIED" }, + { no: 26, name: "BAD_EVENT" }, + { no: 27, name: "USER_CANT_POST" }, + { no: 28, name: "STREAM_BAD_HASHES" }, + { no: 29, name: "STREAM_EMPTY" }, + { no: 30, name: "STREAM_BAD_EVENT" }, + { no: 31, name: "BAD_DELEGATE_SIG" }, + { no: 32, name: "BAD_PUBLIC_KEY" }, + { no: 33, name: "BAD_PAYLOAD" }, + { no: 34, name: "BAD_HEX_STRING" }, + { no: 35, name: "BAD_EVENT_HASH" }, + { no: 36, name: "BAD_SYNC_COOKIE" }, + { no: 37, name: "DUPLICATE_EVENT" }, + { no: 38, name: "BAD_BLOCK" }, + { no: 39, name: "STREAM_NO_INCEPTION_EVENT" }, + { no: 40, name: "BAD_BLOCK_NUMBER" }, + { no: 41, name: "BAD_MINIPOOL_SLOT" }, + { no: 42, name: "BAD_CREATOR_ADDRESS" }, + { no: 43, name: "STALE_DELEGATE" }, + { no: 44, name: "BAD_LINK_WALLET_BAD_SIGNATURE" }, + { no: 45, name: "BAD_ROOT_KEY_ID" }, + { no: 46, name: "UNKNOWN_NODE" }, + { no: 47, name: "DB_OPERATION_FAILURE" }, + { no: 48, name: "MINIBLOCKS_STORAGE_FAILURE" }, + { no: 49, name: "BAD_ADDRESS" }, + { no: 50, name: "BUFFER_FULL" }, + { no: 51, name: "BAD_CONFIG" }, + { no: 52, name: "BAD_CONTRACT" }, + { no: 53, name: "CANNOT_CONNECT" }, + { no: 54, name: "CANNOT_GET_LINKED_WALLETS" }, + { no: 55, name: "CANNOT_CHECK_ENTITLEMENTS" }, + { no: 56, name: "CANNOT_CALL_CONTRACT" }, + { no: 57, name: "SPACE_DISABLED" }, + { no: 58, name: "CHANNEL_DISABLED" }, + { no: 59, name: "WRONG_STREAM_TYPE" }, + { no: 60, name: "MINIPOOL_MISSING_EVENTS" }, + { no: 61, name: "STREAM_LAST_BLOCK_MISMATCH" }, + { no: 62, name: "DOWNSTREAM_NETWORK_ERROR" }, +]); + +/** + * * + * Miniblock contains a list of events and the header event. + * Events must be in the same order as in the header, which is of type MiniblockHeader. + * Only signed data (Envelopes) should exist in this data structure. + * + * @generated from message river.Miniblock + */ +export class Miniblock extends Message { + /** + * @generated from field: repeated river.Envelope events = 1; + */ + events: Envelope[] = []; + + /** + * @generated from field: river.Envelope header = 2; + */ + header?: Envelope; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.Miniblock"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "events", kind: "message", T: Envelope, repeated: true }, + { no: 2, name: "header", kind: "message", T: Envelope }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): Miniblock { + return new Miniblock().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): Miniblock { + return new Miniblock().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): Miniblock { + return new Miniblock().fromJsonString(jsonString, options); + } + + static equals(a: Miniblock | PlainMessage | undefined, b: Miniblock | PlainMessage | undefined): boolean { + return proto3.util.equals(Miniblock, a, b); + } +} + +/** + * * + * Envelope contains serialized event, and its hash and signature. + * hash is used as event id. Subsequent events reference this event by hash. + * event is a serialized StreamEvent + * + * @generated from message river.Envelope + */ +export class Envelope extends Message { + /** + * * + * Hash of event. + * While hash can be recalculated from the event, having it here explicitely + * makes it easier to work with event. + * For the event to be valid, must match hash of event field. + * + * @generated from field: bytes hash = 1; + */ + hash = new Uint8Array(0); + + /** + * * + * Signature. + * For the event to be valid, signature must match event.creator_address + * or be signed by the address from evant.delegate_sig. + * + * @generated from field: bytes signature = 2; + */ + signature = new Uint8Array(0); + + /** + * @generated from field: bytes event = 3; + */ + event = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.Envelope"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "signature", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "event", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): Envelope { + return new Envelope().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): Envelope { + return new Envelope().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): Envelope { + return new Envelope().fromJsonString(jsonString, options); + } + + static equals(a: Envelope | PlainMessage | undefined, b: Envelope | PlainMessage | undefined): boolean { + return proto3.util.equals(Envelope, a, b); + } +} + +/** + * * + * StreamEvent is a single event in the stream. + * + * @generated from message river.StreamEvent + */ +export class StreamEvent extends Message { + /** + * * + * Address of the creator of the event. + * For user - address of the user's wallet. + * For server - address of the server's keypair in staking smart contract. + * + * For the event to be valid: + * If delegate_sig is present, creator_address must match delegate_sig. + * If delegate_sig is not present, creator_address must match event signature in the Envelope. + * + * @generated from field: bytes creator_address = 1; + */ + creatorAddress = new Uint8Array(0); + + /** + * * + * delegate_sig allows event to be signed by a delegate keypair + * + * delegate_sig constains signature of the + * public key of the delegate keypair + the delegate_expirary_epoch_ms. + * User's wallet is used to produce this signature. + * + * If present, for the event to be valid: + * 1. creator_address must match delegate_sig's signer public key + * 2. delegate_sig should be signed as an Ethereum Signed Message (eip-191) + * + * Server nodes sign node-produced events with their own keypair and do not + * need to use delegate_sig. + * + * @generated from field: bytes delegate_sig = 2; + */ + delegateSig = new Uint8Array(0); + + /** + * * Salt ensures that similar messages are not hashed to the same value. genId() from id.ts may be used. + * + * @generated from field: bytes salt = 3; + */ + salt = new Uint8Array(0); + + /** + * * Hash of a preceding miniblock. Null for the inception event. Must be a recent miniblock + * + * @generated from field: optional bytes prev_miniblock_hash = 4; + */ + prevMiniblockHash?: Uint8Array; + + /** + * * CreatedAt is the time when the event was created. + * NOTE: this value is set by clients and is not reliable for anything other than displaying + * the value to the user. Never use this value to sort events from different users. + * + * @generated from field: int64 created_at_epoch_ms = 5; + */ + createdAtEpochMs = protoInt64.zero; + + /** + * * DelegateExpiry is the time when the delegate signature expires. + * + * @generated from field: int64 delegate_expiry_epoch_ms = 6; + */ + delegateExpiryEpochMs = protoInt64.zero; + + /** + * * Variable-type payload. + * Payloads should obey the following rules: + * - payloads should have their own unique type + * - each payload should have a oneof content field + * - each payload, with the exception of miniblock header and member payloads + * should have an inception field inside the content oneof + * - each payload should have a unique Inception type + * - payloads can't violate previous type recursively to inception payload + * + * @generated from oneof river.StreamEvent.payload + */ + payload: { + /** + * @generated from field: river.MiniblockHeader miniblock_header = 100; + */ + value: MiniblockHeader; + case: "miniblockHeader"; + } | { + /** + * @generated from field: river.MemberPayload member_payload = 101; + */ + value: MemberPayload; + case: "memberPayload"; + } | { + /** + * @generated from field: river.SpacePayload space_payload = 102; + */ + value: SpacePayload; + case: "spacePayload"; + } | { + /** + * @generated from field: river.ChannelPayload channel_payload = 103; + */ + value: ChannelPayload; + case: "channelPayload"; + } | { + /** + * @generated from field: river.UserPayload user_payload = 104; + */ + value: UserPayload; + case: "userPayload"; + } | { + /** + * @generated from field: river.UserSettingsPayload user_settings_payload = 105; + */ + value: UserSettingsPayload; + case: "userSettingsPayload"; + } | { + /** + * @generated from field: river.UserDeviceKeyPayload user_device_key_payload = 106; + */ + value: UserDeviceKeyPayload; + case: "userDeviceKeyPayload"; + } | { + /** + * @generated from field: river.UserInboxPayload user_inbox_payload = 107; + */ + value: UserInboxPayload; + case: "userInboxPayload"; + } | { + /** + * @generated from field: river.MediaPayload media_payload = 108; + */ + value: MediaPayload; + case: "mediaPayload"; + } | { + /** + * @generated from field: river.DmChannelPayload dm_channel_payload = 109; + */ + value: DmChannelPayload; + case: "dmChannelPayload"; + } | { + /** + * @generated from field: river.GdmChannelPayload gdm_channel_payload = 110; + */ + value: GdmChannelPayload; + case: "gdmChannelPayload"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.StreamEvent"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "creator_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "delegate_sig", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "salt", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 4, name: "prev_miniblock_hash", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + { no: 5, name: "created_at_epoch_ms", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 6, name: "delegate_expiry_epoch_ms", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 100, name: "miniblock_header", kind: "message", T: MiniblockHeader, oneof: "payload" }, + { no: 101, name: "member_payload", kind: "message", T: MemberPayload, oneof: "payload" }, + { no: 102, name: "space_payload", kind: "message", T: SpacePayload, oneof: "payload" }, + { no: 103, name: "channel_payload", kind: "message", T: ChannelPayload, oneof: "payload" }, + { no: 104, name: "user_payload", kind: "message", T: UserPayload, oneof: "payload" }, + { no: 105, name: "user_settings_payload", kind: "message", T: UserSettingsPayload, oneof: "payload" }, + { no: 106, name: "user_device_key_payload", kind: "message", T: UserDeviceKeyPayload, oneof: "payload" }, + { no: 107, name: "user_inbox_payload", kind: "message", T: UserInboxPayload, oneof: "payload" }, + { no: 108, name: "media_payload", kind: "message", T: MediaPayload, oneof: "payload" }, + { no: 109, name: "dm_channel_payload", kind: "message", T: DmChannelPayload, oneof: "payload" }, + { no: 110, name: "gdm_channel_payload", kind: "message", T: GdmChannelPayload, oneof: "payload" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): StreamEvent { + return new StreamEvent().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): StreamEvent { + return new StreamEvent().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): StreamEvent { + return new StreamEvent().fromJsonString(jsonString, options); + } + + static equals(a: StreamEvent | PlainMessage | undefined, b: StreamEvent | PlainMessage | undefined): boolean { + return proto3.util.equals(StreamEvent, a, b); + } +} + +/** + * * + * MiniblockHeader is a special event that forms a block from set of the stream events. + * Hash of the serialized StreamEvent containing MiniblockHeader is used as a block hash. + * + * @generated from message river.MiniblockHeader + */ +export class MiniblockHeader extends Message { + /** + * Miniblock number. + * 0 for genesis block. + * Must be 1 greater than the previous block number. + * + * @generated from field: int64 miniblock_num = 1; + */ + miniblockNum = protoInt64.zero; + + /** + * Hash of the previous block. + * + * @generated from field: bytes prev_miniblock_hash = 2; + */ + prevMiniblockHash = new Uint8Array(0); + + /** + * Timestamp of the block. + * Must be greater than the previous block timestamp. + * + * @generated from field: google.protobuf.Timestamp timestamp = 3; + */ + timestamp?: Timestamp; + + /** + * Hashes of the events included in the block. + * + * @generated from field: repeated bytes event_hashes = 4; + */ + eventHashes: Uint8Array[] = []; + + /** + * Snapshot of the state at the end of the block. + * + * @generated from field: optional river.Snapshot snapshot = 5; + */ + snapshot?: Snapshot; + + /** + * count of all events in the stream before this block + * + * @generated from field: int64 event_num_offset = 6; + */ + eventNumOffset = protoInt64.zero; + + /** + * pointer to block with previous snapshot + * + * @generated from field: int64 prev_snapshot_miniblock_num = 7; + */ + prevSnapshotMiniblockNum = protoInt64.zero; + + /** + * stream payloads are required to have a content field + * + * @generated from oneof river.MiniblockHeader.content + */ + content: { + /** + * @generated from field: google.protobuf.Empty none = 100; + */ + value: Empty; + case: "none"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MiniblockHeader"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "miniblock_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 2, name: "prev_miniblock_hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "timestamp", kind: "message", T: Timestamp }, + { no: 4, name: "event_hashes", kind: "scalar", T: 12 /* ScalarType.BYTES */, repeated: true }, + { no: 5, name: "snapshot", kind: "message", T: Snapshot, opt: true }, + { no: 6, name: "event_num_offset", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 7, name: "prev_snapshot_miniblock_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 100, name: "none", kind: "message", T: Empty, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MiniblockHeader { + return new MiniblockHeader().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MiniblockHeader { + return new MiniblockHeader().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MiniblockHeader { + return new MiniblockHeader().fromJsonString(jsonString, options); + } + + static equals(a: MiniblockHeader | PlainMessage | undefined, b: MiniblockHeader | PlainMessage | undefined): boolean { + return proto3.util.equals(MiniblockHeader, a, b); + } +} + +/** + * * + * MemberPayload + * can appear in any stream + * + * @generated from message river.MemberPayload + */ +export class MemberPayload extends Message { + /** + * @generated from oneof river.MemberPayload.content + */ + content: { + /** + * @generated from field: river.MemberPayload.Membership membership = 1; + */ + value: MemberPayload_Membership; + case: "membership"; + } | { + /** + * @generated from field: river.MemberPayload.KeySolicitation key_solicitation = 2; + */ + value: MemberPayload_KeySolicitation; + case: "keySolicitation"; + } | { + /** + * @generated from field: river.MemberPayload.KeyFulfillment key_fulfillment = 3; + */ + value: MemberPayload_KeyFulfillment; + case: "keyFulfillment"; + } | { + /** + * @generated from field: river.EncryptedData username = 4; + */ + value: EncryptedData; + case: "username"; + } | { + /** + * @generated from field: river.EncryptedData display_name = 5; + */ + value: EncryptedData; + case: "displayName"; + } | { + /** + * @generated from field: bytes ens_address = 6; + */ + value: Uint8Array; + case: "ensAddress"; + } | { + /** + * @generated from field: river.MemberPayload.Nft nft = 7; + */ + value: MemberPayload_Nft; + case: "nft"; + } | { + /** + * @generated from field: river.MemberPayload.Pin pin = 8; + */ + value: MemberPayload_Pin; + case: "pin"; + } | { + /** + * @generated from field: river.MemberPayload.Unpin unpin = 9; + */ + value: MemberPayload_Unpin; + case: "unpin"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "membership", kind: "message", T: MemberPayload_Membership, oneof: "content" }, + { no: 2, name: "key_solicitation", kind: "message", T: MemberPayload_KeySolicitation, oneof: "content" }, + { no: 3, name: "key_fulfillment", kind: "message", T: MemberPayload_KeyFulfillment, oneof: "content" }, + { no: 4, name: "username", kind: "message", T: EncryptedData, oneof: "content" }, + { no: 5, name: "display_name", kind: "message", T: EncryptedData, oneof: "content" }, + { no: 6, name: "ens_address", kind: "scalar", T: 12 /* ScalarType.BYTES */, oneof: "content" }, + { no: 7, name: "nft", kind: "message", T: MemberPayload_Nft, oneof: "content" }, + { no: 8, name: "pin", kind: "message", T: MemberPayload_Pin, oneof: "content" }, + { no: 9, name: "unpin", kind: "message", T: MemberPayload_Unpin, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload { + return new MemberPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload { + return new MemberPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload { + return new MemberPayload().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload | PlainMessage | undefined, b: MemberPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Snapshot + */ +export class MemberPayload_Snapshot extends Message { + /** + * @generated from field: repeated river.MemberPayload.Snapshot.Member joined = 1; + */ + joined: MemberPayload_Snapshot_Member[] = []; + + /** + * @generated from field: repeated river.MemberPayload.SnappedPin pins = 2; + */ + pins: MemberPayload_SnappedPin[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "joined", kind: "message", T: MemberPayload_Snapshot_Member, repeated: true }, + { no: 2, name: "pins", kind: "message", T: MemberPayload_SnappedPin, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Snapshot { + return new MemberPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Snapshot { + return new MemberPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Snapshot { + return new MemberPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Snapshot | PlainMessage | undefined, b: MemberPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Snapshot.Member + */ +export class MemberPayload_Snapshot_Member extends Message { + /** + * @generated from field: bytes user_address = 1; + */ + userAddress = new Uint8Array(0); + + /** + * @generated from field: int64 miniblock_num = 2; + */ + miniblockNum = protoInt64.zero; + + /** + * @generated from field: int64 event_num = 3; + */ + eventNum = protoInt64.zero; + + /** + * @generated from field: repeated river.MemberPayload.KeySolicitation solicitations = 4; + */ + solicitations: MemberPayload_KeySolicitation[] = []; + + /** + * @generated from field: river.WrappedEncryptedData username = 5; + */ + username?: WrappedEncryptedData; + + /** + * @generated from field: river.WrappedEncryptedData display_name = 6; + */ + displayName?: WrappedEncryptedData; + + /** + * @generated from field: bytes ens_address = 7; + */ + ensAddress = new Uint8Array(0); + + /** + * @generated from field: river.MemberPayload.Nft nft = 8; + */ + nft?: MemberPayload_Nft; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Snapshot.Member"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "user_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "miniblock_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 3, name: "event_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 4, name: "solicitations", kind: "message", T: MemberPayload_KeySolicitation, repeated: true }, + { no: 5, name: "username", kind: "message", T: WrappedEncryptedData }, + { no: 6, name: "display_name", kind: "message", T: WrappedEncryptedData }, + { no: 7, name: "ens_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 8, name: "nft", kind: "message", T: MemberPayload_Nft }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Snapshot_Member { + return new MemberPayload_Snapshot_Member().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Snapshot_Member { + return new MemberPayload_Snapshot_Member().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Snapshot_Member { + return new MemberPayload_Snapshot_Member().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Snapshot_Member | PlainMessage | undefined, b: MemberPayload_Snapshot_Member | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Snapshot_Member, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Membership + */ +export class MemberPayload_Membership extends Message { + /** + * @generated from field: river.MembershipOp op = 1; + */ + op = MembershipOp.SO_UNSPECIFIED; + + /** + * @generated from field: bytes user_address = 2; + */ + userAddress = new Uint8Array(0); + + /** + * @generated from field: bytes initiator_address = 3; + */ + initiatorAddress = new Uint8Array(0); + + /** + * @generated from field: optional bytes stream_parent_id = 4; + */ + streamParentId?: Uint8Array; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Membership"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "op", kind: "enum", T: proto3.getEnumType(MembershipOp) }, + { no: 2, name: "user_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "initiator_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 4, name: "stream_parent_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Membership { + return new MemberPayload_Membership().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Membership { + return new MemberPayload_Membership().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Membership { + return new MemberPayload_Membership().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Membership | PlainMessage | undefined, b: MemberPayload_Membership | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Membership, a, b); + } +} + +/** + * @generated from message river.MemberPayload.KeySolicitation + */ +export class MemberPayload_KeySolicitation extends Message { + /** + * requesters device_key + * + * @generated from field: string device_key = 1; + */ + deviceKey = ""; + + /** + * requesters fallback_key + * + * @generated from field: string fallback_key = 2; + */ + fallbackKey = ""; + + /** + * true if this is a new device, session_ids will be empty + * + * @generated from field: bool is_new_device = 3; + */ + isNewDevice = false; + + /** + * @generated from field: repeated string session_ids = 4; + */ + sessionIds: string[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.KeySolicitation"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "device_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "fallback_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "is_new_device", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + { no: 4, name: "session_ids", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_KeySolicitation { + return new MemberPayload_KeySolicitation().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_KeySolicitation { + return new MemberPayload_KeySolicitation().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_KeySolicitation { + return new MemberPayload_KeySolicitation().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_KeySolicitation | PlainMessage | undefined, b: MemberPayload_KeySolicitation | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_KeySolicitation, a, b); + } +} + +/** + * @generated from message river.MemberPayload.KeyFulfillment + */ +export class MemberPayload_KeyFulfillment extends Message { + /** + * @generated from field: bytes user_address = 1; + */ + userAddress = new Uint8Array(0); + + /** + * @generated from field: string device_key = 2; + */ + deviceKey = ""; + + /** + * @generated from field: repeated string session_ids = 3; + */ + sessionIds: string[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.KeyFulfillment"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "user_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "device_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "session_ids", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_KeyFulfillment { + return new MemberPayload_KeyFulfillment().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_KeyFulfillment { + return new MemberPayload_KeyFulfillment().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_KeyFulfillment { + return new MemberPayload_KeyFulfillment().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_KeyFulfillment | PlainMessage | undefined, b: MemberPayload_KeyFulfillment | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_KeyFulfillment, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Nft + */ +export class MemberPayload_Nft extends Message { + /** + * @generated from field: int32 chain_id = 1; + */ + chainId = 0; + + /** + * @generated from field: bytes contract_address = 2; + */ + contractAddress = new Uint8Array(0); + + /** + * @generated from field: bytes token_id = 3; + */ + tokenId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Nft"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "chain_id", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, + { no: 2, name: "contract_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "token_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Nft { + return new MemberPayload_Nft().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Nft { + return new MemberPayload_Nft().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Nft { + return new MemberPayload_Nft().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Nft | PlainMessage | undefined, b: MemberPayload_Nft | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Nft, a, b); + } +} + +/** + * @generated from message river.MemberPayload.SnappedPin + */ +export class MemberPayload_SnappedPin extends Message { + /** + * @generated from field: bytes creator_address = 1; + */ + creatorAddress = new Uint8Array(0); + + /** + * @generated from field: river.MemberPayload.Pin pin = 2; + */ + pin?: MemberPayload_Pin; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.SnappedPin"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "creator_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "pin", kind: "message", T: MemberPayload_Pin }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_SnappedPin { + return new MemberPayload_SnappedPin().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_SnappedPin { + return new MemberPayload_SnappedPin().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_SnappedPin { + return new MemberPayload_SnappedPin().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_SnappedPin | PlainMessage | undefined, b: MemberPayload_SnappedPin | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_SnappedPin, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Pin + */ +export class MemberPayload_Pin extends Message { + /** + * @generated from field: bytes event_id = 1; + */ + eventId = new Uint8Array(0); + + /** + * @generated from field: river.StreamEvent event = 2; + */ + event?: StreamEvent; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Pin"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "event_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "event", kind: "message", T: StreamEvent }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Pin { + return new MemberPayload_Pin().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Pin { + return new MemberPayload_Pin().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Pin { + return new MemberPayload_Pin().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Pin | PlainMessage | undefined, b: MemberPayload_Pin | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Pin, a, b); + } +} + +/** + * @generated from message river.MemberPayload.Unpin + */ +export class MemberPayload_Unpin extends Message { + /** + * @generated from field: bytes event_id = 1; + */ + eventId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MemberPayload.Unpin"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "event_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MemberPayload_Unpin { + return new MemberPayload_Unpin().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MemberPayload_Unpin { + return new MemberPayload_Unpin().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MemberPayload_Unpin { + return new MemberPayload_Unpin().fromJsonString(jsonString, options); + } + + static equals(a: MemberPayload_Unpin | PlainMessage | undefined, b: MemberPayload_Unpin | PlainMessage | undefined): boolean { + return proto3.util.equals(MemberPayload_Unpin, a, b); + } +} + +/** + * * + * SpacePayload + * + * @generated from message river.SpacePayload + */ +export class SpacePayload extends Message { + /** + * @generated from oneof river.SpacePayload.content + */ + content: { + /** + * @generated from field: river.SpacePayload.Inception inception = 1; + */ + value: SpacePayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.SpacePayload.ChannelUpdate channel = 2; + */ + value: SpacePayload_ChannelUpdate; + case: "channel"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SpacePayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: SpacePayload_Inception, oneof: "content" }, + { no: 2, name: "channel", kind: "message", T: SpacePayload_ChannelUpdate, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SpacePayload { + return new SpacePayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SpacePayload { + return new SpacePayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SpacePayload { + return new SpacePayload().fromJsonString(jsonString, options); + } + + static equals(a: SpacePayload | PlainMessage | undefined, b: SpacePayload | PlainMessage | undefined): boolean { + return proto3.util.equals(SpacePayload, a, b); + } +} + +/** + * @generated from message river.SpacePayload.Snapshot + */ +export class SpacePayload_Snapshot extends Message { + /** + * inception + * + * @generated from field: river.SpacePayload.Inception inception = 1; + */ + inception?: SpacePayload_Inception; + + /** + * channels: sorted by channel_id + * + * @generated from field: repeated river.SpacePayload.ChannelMetadata channels = 2; + */ + channels: SpacePayload_ChannelMetadata[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SpacePayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: SpacePayload_Inception }, + { no: 2, name: "channels", kind: "message", T: SpacePayload_ChannelMetadata, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SpacePayload_Snapshot { + return new SpacePayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SpacePayload_Snapshot { + return new SpacePayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SpacePayload_Snapshot { + return new SpacePayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: SpacePayload_Snapshot | PlainMessage | undefined, b: SpacePayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(SpacePayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.SpacePayload.Inception + */ +export class SpacePayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 2; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SpacePayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SpacePayload_Inception { + return new SpacePayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SpacePayload_Inception { + return new SpacePayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SpacePayload_Inception { + return new SpacePayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: SpacePayload_Inception | PlainMessage | undefined, b: SpacePayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(SpacePayload_Inception, a, b); + } +} + +/** + * @generated from message river.SpacePayload.ChannelMetadata + */ +export class SpacePayload_ChannelMetadata extends Message { + /** + * @generated from field: river.ChannelOp op = 1; + */ + op = ChannelOp.CO_UNSPECIFIED; + + /** + * @generated from field: bytes channel_id = 2; + */ + channelId = new Uint8Array(0); + + /** + * @generated from field: river.EventRef origin_event = 3; + */ + originEvent?: EventRef; + + /** + * @generated from field: int64 updated_at_event_num = 6; + */ + updatedAtEventNum = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SpacePayload.ChannelMetadata"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "op", kind: "enum", T: proto3.getEnumType(ChannelOp) }, + { no: 2, name: "channel_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "origin_event", kind: "message", T: EventRef }, + { no: 6, name: "updated_at_event_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SpacePayload_ChannelMetadata { + return new SpacePayload_ChannelMetadata().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SpacePayload_ChannelMetadata { + return new SpacePayload_ChannelMetadata().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SpacePayload_ChannelMetadata { + return new SpacePayload_ChannelMetadata().fromJsonString(jsonString, options); + } + + static equals(a: SpacePayload_ChannelMetadata | PlainMessage | undefined, b: SpacePayload_ChannelMetadata | PlainMessage | undefined): boolean { + return proto3.util.equals(SpacePayload_ChannelMetadata, a, b); + } +} + +/** + * @generated from message river.SpacePayload.ChannelUpdate + */ +export class SpacePayload_ChannelUpdate extends Message { + /** + * @generated from field: river.ChannelOp op = 1; + */ + op = ChannelOp.CO_UNSPECIFIED; + + /** + * @generated from field: bytes channel_id = 2; + */ + channelId = new Uint8Array(0); + + /** + * @generated from field: river.EventRef origin_event = 3; + */ + originEvent?: EventRef; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SpacePayload.ChannelUpdate"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "op", kind: "enum", T: proto3.getEnumType(ChannelOp) }, + { no: 2, name: "channel_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "origin_event", kind: "message", T: EventRef }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SpacePayload_ChannelUpdate { + return new SpacePayload_ChannelUpdate().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SpacePayload_ChannelUpdate { + return new SpacePayload_ChannelUpdate().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SpacePayload_ChannelUpdate { + return new SpacePayload_ChannelUpdate().fromJsonString(jsonString, options); + } + + static equals(a: SpacePayload_ChannelUpdate | PlainMessage | undefined, b: SpacePayload_ChannelUpdate | PlainMessage | undefined): boolean { + return proto3.util.equals(SpacePayload_ChannelUpdate, a, b); + } +} + +/** + * * + * ChannelPayload + * + * @generated from message river.ChannelPayload + */ +export class ChannelPayload extends Message { + /** + * @generated from oneof river.ChannelPayload.content + */ + content: { + /** + * @generated from field: river.ChannelPayload.Inception inception = 1; + */ + value: ChannelPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.EncryptedData message = 2; + */ + value: EncryptedData; + case: "message"; + } | { + /** + * @generated from field: river.ChannelPayload.Redaction redaction = 3; + */ + value: ChannelPayload_Redaction; + case: "redaction"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.ChannelPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: ChannelPayload_Inception, oneof: "content" }, + { no: 2, name: "message", kind: "message", T: EncryptedData, oneof: "content" }, + { no: 3, name: "redaction", kind: "message", T: ChannelPayload_Redaction, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): ChannelPayload { + return new ChannelPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): ChannelPayload { + return new ChannelPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): ChannelPayload { + return new ChannelPayload().fromJsonString(jsonString, options); + } + + static equals(a: ChannelPayload | PlainMessage | undefined, b: ChannelPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(ChannelPayload, a, b); + } +} + +/** + * @generated from message river.ChannelPayload.Snapshot + */ +export class ChannelPayload_Snapshot extends Message { + /** + * inception + * + * @generated from field: river.ChannelPayload.Inception inception = 1; + */ + inception?: ChannelPayload_Inception; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.ChannelPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: ChannelPayload_Inception }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): ChannelPayload_Snapshot { + return new ChannelPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): ChannelPayload_Snapshot { + return new ChannelPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): ChannelPayload_Snapshot { + return new ChannelPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: ChannelPayload_Snapshot | PlainMessage | undefined, b: ChannelPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(ChannelPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.ChannelPayload.Inception + */ +export class ChannelPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: bytes space_id = 3; + */ + spaceId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 5; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.ChannelPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "space_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 5, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): ChannelPayload_Inception { + return new ChannelPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): ChannelPayload_Inception { + return new ChannelPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): ChannelPayload_Inception { + return new ChannelPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: ChannelPayload_Inception | PlainMessage | undefined, b: ChannelPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(ChannelPayload_Inception, a, b); + } +} + +/** + * @generated from message river.ChannelPayload.Redaction + */ +export class ChannelPayload_Redaction extends Message { + /** + * @generated from field: bytes event_id = 1; + */ + eventId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.ChannelPayload.Redaction"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "event_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): ChannelPayload_Redaction { + return new ChannelPayload_Redaction().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): ChannelPayload_Redaction { + return new ChannelPayload_Redaction().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): ChannelPayload_Redaction { + return new ChannelPayload_Redaction().fromJsonString(jsonString, options); + } + + static equals(a: ChannelPayload_Redaction | PlainMessage | undefined, b: ChannelPayload_Redaction | PlainMessage | undefined): boolean { + return proto3.util.equals(ChannelPayload_Redaction, a, b); + } +} + +/** + * * + * DmChannelPayload + * + * @generated from message river.DmChannelPayload + */ +export class DmChannelPayload extends Message { + /** + * @generated from oneof river.DmChannelPayload.content + */ + content: { + /** + * @generated from field: river.DmChannelPayload.Inception inception = 1; + */ + value: DmChannelPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.EncryptedData message = 3; + */ + value: EncryptedData; + case: "message"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.DmChannelPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: DmChannelPayload_Inception, oneof: "content" }, + { no: 3, name: "message", kind: "message", T: EncryptedData, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): DmChannelPayload { + return new DmChannelPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): DmChannelPayload { + return new DmChannelPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): DmChannelPayload { + return new DmChannelPayload().fromJsonString(jsonString, options); + } + + static equals(a: DmChannelPayload | PlainMessage | undefined, b: DmChannelPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(DmChannelPayload, a, b); + } +} + +/** + * @generated from message river.DmChannelPayload.Snapshot + */ +export class DmChannelPayload_Snapshot extends Message { + /** + * @generated from field: river.DmChannelPayload.Inception inception = 1; + */ + inception?: DmChannelPayload_Inception; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.DmChannelPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: DmChannelPayload_Inception }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): DmChannelPayload_Snapshot { + return new DmChannelPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): DmChannelPayload_Snapshot { + return new DmChannelPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): DmChannelPayload_Snapshot { + return new DmChannelPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: DmChannelPayload_Snapshot | PlainMessage | undefined, b: DmChannelPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(DmChannelPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.DmChannelPayload.Inception + */ +export class DmChannelPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: bytes first_party_address = 2; + */ + firstPartyAddress = new Uint8Array(0); + + /** + * @generated from field: bytes second_party_address = 3; + */ + secondPartyAddress = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 4; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.DmChannelPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "first_party_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "second_party_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 4, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): DmChannelPayload_Inception { + return new DmChannelPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): DmChannelPayload_Inception { + return new DmChannelPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): DmChannelPayload_Inception { + return new DmChannelPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: DmChannelPayload_Inception | PlainMessage | undefined, b: DmChannelPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(DmChannelPayload_Inception, a, b); + } +} + +/** + * * + * GdmChannelPayload + * + * @generated from message river.GdmChannelPayload + */ +export class GdmChannelPayload extends Message { + /** + * @generated from oneof river.GdmChannelPayload.content + */ + content: { + /** + * @generated from field: river.GdmChannelPayload.Inception inception = 1; + */ + value: GdmChannelPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.EncryptedData message = 2; + */ + value: EncryptedData; + case: "message"; + } | { + /** + * @generated from field: river.EncryptedData channel_properties = 3; + */ + value: EncryptedData; + case: "channelProperties"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GdmChannelPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: GdmChannelPayload_Inception, oneof: "content" }, + { no: 2, name: "message", kind: "message", T: EncryptedData, oneof: "content" }, + { no: 3, name: "channel_properties", kind: "message", T: EncryptedData, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GdmChannelPayload { + return new GdmChannelPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GdmChannelPayload { + return new GdmChannelPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GdmChannelPayload { + return new GdmChannelPayload().fromJsonString(jsonString, options); + } + + static equals(a: GdmChannelPayload | PlainMessage | undefined, b: GdmChannelPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(GdmChannelPayload, a, b); + } +} + +/** + * @generated from message river.GdmChannelPayload.Snapshot + */ +export class GdmChannelPayload_Snapshot extends Message { + /** + * @generated from field: river.GdmChannelPayload.Inception inception = 1; + */ + inception?: GdmChannelPayload_Inception; + + /** + * @generated from field: river.WrappedEncryptedData channel_properties = 2; + */ + channelProperties?: WrappedEncryptedData; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GdmChannelPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: GdmChannelPayload_Inception }, + { no: 2, name: "channel_properties", kind: "message", T: WrappedEncryptedData }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GdmChannelPayload_Snapshot { + return new GdmChannelPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GdmChannelPayload_Snapshot { + return new GdmChannelPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GdmChannelPayload_Snapshot { + return new GdmChannelPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: GdmChannelPayload_Snapshot | PlainMessage | undefined, b: GdmChannelPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(GdmChannelPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.GdmChannelPayload.Inception + */ +export class GdmChannelPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.EncryptedData channel_properties = 2; + */ + channelProperties?: EncryptedData; + + /** + * @generated from field: river.StreamSettings settings = 3; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GdmChannelPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "channel_properties", kind: "message", T: EncryptedData }, + { no: 3, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GdmChannelPayload_Inception { + return new GdmChannelPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GdmChannelPayload_Inception { + return new GdmChannelPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GdmChannelPayload_Inception { + return new GdmChannelPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: GdmChannelPayload_Inception | PlainMessage | undefined, b: GdmChannelPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(GdmChannelPayload_Inception, a, b); + } +} + +/** + * * + * UserPayload + * + * @generated from message river.UserPayload + */ +export class UserPayload extends Message { + /** + * @generated from oneof river.UserPayload.content + */ + content: { + /** + * @generated from field: river.UserPayload.Inception inception = 1; + */ + value: UserPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.UserPayload.UserMembership user_membership = 2; + */ + value: UserPayload_UserMembership; + case: "userMembership"; + } | { + /** + * @generated from field: river.UserPayload.UserMembershipAction user_membership_action = 3; + */ + value: UserPayload_UserMembershipAction; + case: "userMembershipAction"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserPayload_Inception, oneof: "content" }, + { no: 2, name: "user_membership", kind: "message", T: UserPayload_UserMembership, oneof: "content" }, + { no: 3, name: "user_membership_action", kind: "message", T: UserPayload_UserMembershipAction, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserPayload { + return new UserPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserPayload { + return new UserPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserPayload { + return new UserPayload().fromJsonString(jsonString, options); + } + + static equals(a: UserPayload | PlainMessage | undefined, b: UserPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(UserPayload, a, b); + } +} + +/** + * @generated from message river.UserPayload.Snapshot + */ +export class UserPayload_Snapshot extends Message { + /** + * inception + * + * @generated from field: river.UserPayload.Inception inception = 1; + */ + inception?: UserPayload_Inception; + + /** + * memberships, sorted by stream_id + * + * @generated from field: repeated river.UserPayload.UserMembership memberships = 2; + */ + memberships: UserPayload_UserMembership[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserPayload_Inception }, + { no: 2, name: "memberships", kind: "message", T: UserPayload_UserMembership, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserPayload_Snapshot { + return new UserPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserPayload_Snapshot { + return new UserPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserPayload_Snapshot { + return new UserPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: UserPayload_Snapshot | PlainMessage | undefined, b: UserPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(UserPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.UserPayload.Inception + */ +export class UserPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 2; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserPayload_Inception { + return new UserPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserPayload_Inception { + return new UserPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserPayload_Inception { + return new UserPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: UserPayload_Inception | PlainMessage | undefined, b: UserPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(UserPayload_Inception, a, b); + } +} + +/** + * update own membership + * + * @generated from message river.UserPayload.UserMembership + */ +export class UserPayload_UserMembership extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.MembershipOp op = 2; + */ + op = MembershipOp.SO_UNSPECIFIED; + + /** + * @generated from field: optional bytes inviter = 3; + */ + inviter?: Uint8Array; + + /** + * @generated from field: optional bytes stream_parent_id = 4; + */ + streamParentId?: Uint8Array; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserPayload.UserMembership"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "op", kind: "enum", T: proto3.getEnumType(MembershipOp) }, + { no: 3, name: "inviter", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + { no: 4, name: "stream_parent_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserPayload_UserMembership { + return new UserPayload_UserMembership().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserPayload_UserMembership { + return new UserPayload_UserMembership().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserPayload_UserMembership { + return new UserPayload_UserMembership().fromJsonString(jsonString, options); + } + + static equals(a: UserPayload_UserMembership | PlainMessage | undefined, b: UserPayload_UserMembership | PlainMessage | undefined): boolean { + return proto3.util.equals(UserPayload_UserMembership, a, b); + } +} + +/** + * update someone else's membership + * + * @generated from message river.UserPayload.UserMembershipAction + */ +export class UserPayload_UserMembershipAction extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: bytes user_id = 2; + */ + userId = new Uint8Array(0); + + /** + * @generated from field: river.MembershipOp op = 3; + */ + op = MembershipOp.SO_UNSPECIFIED; + + /** + * @generated from field: optional bytes stream_parent_id = 4; + */ + streamParentId?: Uint8Array; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserPayload.UserMembershipAction"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "user_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "op", kind: "enum", T: proto3.getEnumType(MembershipOp) }, + { no: 4, name: "stream_parent_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserPayload_UserMembershipAction { + return new UserPayload_UserMembershipAction().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserPayload_UserMembershipAction { + return new UserPayload_UserMembershipAction().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserPayload_UserMembershipAction { + return new UserPayload_UserMembershipAction().fromJsonString(jsonString, options); + } + + static equals(a: UserPayload_UserMembershipAction | PlainMessage | undefined, b: UserPayload_UserMembershipAction | PlainMessage | undefined): boolean { + return proto3.util.equals(UserPayload_UserMembershipAction, a, b); + } +} + +/** + * * + * UserInboxPayload + * messages to a user encrypted per deviceId + * + * @generated from message river.UserInboxPayload + */ +export class UserInboxPayload extends Message { + /** + * @generated from oneof river.UserInboxPayload.content + */ + content: { + /** + * @generated from field: river.UserInboxPayload.Inception inception = 1; + */ + value: UserInboxPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.UserInboxPayload.Ack ack = 2; + */ + value: UserInboxPayload_Ack; + case: "ack"; + } | { + /** + * @generated from field: river.UserInboxPayload.GroupEncryptionSessions group_encryption_sessions = 3; + */ + value: UserInboxPayload_GroupEncryptionSessions; + case: "groupEncryptionSessions"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserInboxPayload_Inception, oneof: "content" }, + { no: 2, name: "ack", kind: "message", T: UserInboxPayload_Ack, oneof: "content" }, + { no: 3, name: "group_encryption_sessions", kind: "message", T: UserInboxPayload_GroupEncryptionSessions, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload { + return new UserInboxPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload { + return new UserInboxPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload { + return new UserInboxPayload().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload | PlainMessage | undefined, b: UserInboxPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload, a, b); + } +} + +/** + * @generated from message river.UserInboxPayload.Snapshot + */ +export class UserInboxPayload_Snapshot extends Message { + /** + * @generated from field: river.UserInboxPayload.Inception inception = 1; + */ + inception?: UserInboxPayload_Inception; + + /** + * deviceKey: miniblockNum that the ack was snapshotted + * + * @generated from field: map device_summary = 2; + */ + deviceSummary: { [key: string]: UserInboxPayload_Snapshot_DeviceSummary } = {}; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserInboxPayload_Inception }, + { no: 2, name: "device_summary", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "message", T: UserInboxPayload_Snapshot_DeviceSummary} }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload_Snapshot { + return new UserInboxPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload_Snapshot { + return new UserInboxPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload_Snapshot { + return new UserInboxPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload_Snapshot | PlainMessage | undefined, b: UserInboxPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.UserInboxPayload.Snapshot.DeviceSummary + */ +export class UserInboxPayload_Snapshot_DeviceSummary extends Message { + /** + * * + * UpperBound = latest to device event sent from other client per deviceKey + * LowerBound = latest ack sent by stream owner per deviceKey + * on ack, if UpperBound <= LowerBound then delete this deviceKey entry from the record + * on ack or new session, if any device’s lower bound < N generations ago, delete the deviceKey entry from the record + * + * @generated from field: int64 lower_bound = 1; + */ + lowerBound = protoInt64.zero; + + /** + * @generated from field: int64 upper_bound = 2; + */ + upperBound = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload.Snapshot.DeviceSummary"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "lower_bound", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 2, name: "upper_bound", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload_Snapshot_DeviceSummary { + return new UserInboxPayload_Snapshot_DeviceSummary().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload_Snapshot_DeviceSummary { + return new UserInboxPayload_Snapshot_DeviceSummary().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload_Snapshot_DeviceSummary { + return new UserInboxPayload_Snapshot_DeviceSummary().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload_Snapshot_DeviceSummary | PlainMessage | undefined, b: UserInboxPayload_Snapshot_DeviceSummary | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload_Snapshot_DeviceSummary, a, b); + } +} + +/** + * @generated from message river.UserInboxPayload.Inception + */ +export class UserInboxPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 2; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload_Inception { + return new UserInboxPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload_Inception { + return new UserInboxPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload_Inception { + return new UserInboxPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload_Inception | PlainMessage | undefined, b: UserInboxPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload_Inception, a, b); + } +} + +/** + * @generated from message river.UserInboxPayload.GroupEncryptionSessions + */ +export class UserInboxPayload_GroupEncryptionSessions extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: string sender_key = 2; + */ + senderKey = ""; + + /** + * @generated from field: repeated string session_ids = 3; + */ + sessionIds: string[] = []; + + /** + * deviceKey: per device ciphertext of encrypted session keys that match session_ids + * + * @generated from field: map ciphertexts = 4; + */ + ciphertexts: { [key: string]: string } = {}; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload.GroupEncryptionSessions"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "sender_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "session_ids", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + { no: 4, name: "ciphertexts", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 9 /* ScalarType.STRING */} }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload_GroupEncryptionSessions { + return new UserInboxPayload_GroupEncryptionSessions().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload_GroupEncryptionSessions { + return new UserInboxPayload_GroupEncryptionSessions().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload_GroupEncryptionSessions { + return new UserInboxPayload_GroupEncryptionSessions().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload_GroupEncryptionSessions | PlainMessage | undefined, b: UserInboxPayload_GroupEncryptionSessions | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload_GroupEncryptionSessions, a, b); + } +} + +/** + * @generated from message river.UserInboxPayload.Ack + */ +export class UserInboxPayload_Ack extends Message { + /** + * @generated from field: string device_key = 1; + */ + deviceKey = ""; + + /** + * @generated from field: int64 miniblock_num = 2; + */ + miniblockNum = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserInboxPayload.Ack"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "device_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "miniblock_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserInboxPayload_Ack { + return new UserInboxPayload_Ack().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserInboxPayload_Ack { + return new UserInboxPayload_Ack().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserInboxPayload_Ack { + return new UserInboxPayload_Ack().fromJsonString(jsonString, options); + } + + static equals(a: UserInboxPayload_Ack | PlainMessage | undefined, b: UserInboxPayload_Ack | PlainMessage | undefined): boolean { + return proto3.util.equals(UserInboxPayload_Ack, a, b); + } +} + +/** + * * + * UserSettingsPayload + * + * @generated from message river.UserSettingsPayload + */ +export class UserSettingsPayload extends Message { + /** + * @generated from oneof river.UserSettingsPayload.content + */ + content: { + /** + * @generated from field: river.UserSettingsPayload.Inception inception = 1; + */ + value: UserSettingsPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.UserSettingsPayload.FullyReadMarkers fully_read_markers = 2; + */ + value: UserSettingsPayload_FullyReadMarkers; + case: "fullyReadMarkers"; + } | { + /** + * @generated from field: river.UserSettingsPayload.UserBlock user_block = 3; + */ + value: UserSettingsPayload_UserBlock; + case: "userBlock"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserSettingsPayload_Inception, oneof: "content" }, + { no: 2, name: "fully_read_markers", kind: "message", T: UserSettingsPayload_FullyReadMarkers, oneof: "content" }, + { no: 3, name: "user_block", kind: "message", T: UserSettingsPayload_UserBlock, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload { + return new UserSettingsPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload { + return new UserSettingsPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload { + return new UserSettingsPayload().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload | PlainMessage | undefined, b: UserSettingsPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.Snapshot + */ +export class UserSettingsPayload_Snapshot extends Message { + /** + * inception + * + * @generated from field: river.UserSettingsPayload.Inception inception = 1; + */ + inception?: UserSettingsPayload_Inception; + + /** + * fullyReadMarkers: sorted by stream_id + * + * @generated from field: repeated river.UserSettingsPayload.FullyReadMarkers fully_read_markers = 2; + */ + fullyReadMarkers: UserSettingsPayload_FullyReadMarkers[] = []; + + /** + * @generated from field: repeated river.UserSettingsPayload.Snapshot.UserBlocks user_blocks_list = 3; + */ + userBlocksList: UserSettingsPayload_Snapshot_UserBlocks[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserSettingsPayload_Inception }, + { no: 2, name: "fully_read_markers", kind: "message", T: UserSettingsPayload_FullyReadMarkers, repeated: true }, + { no: 3, name: "user_blocks_list", kind: "message", T: UserSettingsPayload_Snapshot_UserBlocks, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_Snapshot { + return new UserSettingsPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_Snapshot { + return new UserSettingsPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_Snapshot { + return new UserSettingsPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_Snapshot | PlainMessage | undefined, b: UserSettingsPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_Snapshot, a, b); + } +} + +/** + * for a specific blocked user, there might be multiple block or unblock events + * + * @generated from message river.UserSettingsPayload.Snapshot.UserBlocks + */ +export class UserSettingsPayload_Snapshot_UserBlocks extends Message { + /** + * @generated from field: bytes user_id = 1; + */ + userId = new Uint8Array(0); + + /** + * @generated from field: repeated river.UserSettingsPayload.Snapshot.UserBlocks.Block blocks = 2; + */ + blocks: UserSettingsPayload_Snapshot_UserBlocks_Block[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.Snapshot.UserBlocks"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "user_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "blocks", kind: "message", T: UserSettingsPayload_Snapshot_UserBlocks_Block, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks { + return new UserSettingsPayload_Snapshot_UserBlocks().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks { + return new UserSettingsPayload_Snapshot_UserBlocks().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks { + return new UserSettingsPayload_Snapshot_UserBlocks().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_Snapshot_UserBlocks | PlainMessage | undefined, b: UserSettingsPayload_Snapshot_UserBlocks | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_Snapshot_UserBlocks, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.Snapshot.UserBlocks.Block + */ +export class UserSettingsPayload_Snapshot_UserBlocks_Block extends Message { + /** + * @generated from field: bool is_blocked = 1; + */ + isBlocked = false; + + /** + * @generated from field: int64 event_num = 2; + */ + eventNum = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.Snapshot.UserBlocks.Block"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "is_blocked", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + { no: 2, name: "event_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks_Block { + return new UserSettingsPayload_Snapshot_UserBlocks_Block().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks_Block { + return new UserSettingsPayload_Snapshot_UserBlocks_Block().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_Snapshot_UserBlocks_Block { + return new UserSettingsPayload_Snapshot_UserBlocks_Block().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_Snapshot_UserBlocks_Block | PlainMessage | undefined, b: UserSettingsPayload_Snapshot_UserBlocks_Block | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_Snapshot_UserBlocks_Block, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.Inception + */ +export class UserSettingsPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 2; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_Inception { + return new UserSettingsPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_Inception { + return new UserSettingsPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_Inception { + return new UserSettingsPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_Inception | PlainMessage | undefined, b: UserSettingsPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_Inception, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.MarkerContent + */ +export class UserSettingsPayload_MarkerContent extends Message { + /** + * @generated from field: string data = 1; + */ + data = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.MarkerContent"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "data", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_MarkerContent { + return new UserSettingsPayload_MarkerContent().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_MarkerContent { + return new UserSettingsPayload_MarkerContent().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_MarkerContent { + return new UserSettingsPayload_MarkerContent().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_MarkerContent | PlainMessage | undefined, b: UserSettingsPayload_MarkerContent | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_MarkerContent, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.FullyReadMarkers + */ +export class UserSettingsPayload_FullyReadMarkers extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.UserSettingsPayload.MarkerContent content = 2; + */ + content?: UserSettingsPayload_MarkerContent; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.FullyReadMarkers"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "content", kind: "message", T: UserSettingsPayload_MarkerContent }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_FullyReadMarkers { + return new UserSettingsPayload_FullyReadMarkers().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_FullyReadMarkers { + return new UserSettingsPayload_FullyReadMarkers().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_FullyReadMarkers { + return new UserSettingsPayload_FullyReadMarkers().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_FullyReadMarkers | PlainMessage | undefined, b: UserSettingsPayload_FullyReadMarkers | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_FullyReadMarkers, a, b); + } +} + +/** + * @generated from message river.UserSettingsPayload.UserBlock + */ +export class UserSettingsPayload_UserBlock extends Message { + /** + * @generated from field: bytes user_id = 1; + */ + userId = new Uint8Array(0); + + /** + * @generated from field: bool is_blocked = 2; + */ + isBlocked = false; + + /** + * @generated from field: int64 event_num = 3; + */ + eventNum = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserSettingsPayload.UserBlock"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "user_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "is_blocked", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + { no: 3, name: "event_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserSettingsPayload_UserBlock { + return new UserSettingsPayload_UserBlock().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserSettingsPayload_UserBlock { + return new UserSettingsPayload_UserBlock().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserSettingsPayload_UserBlock { + return new UserSettingsPayload_UserBlock().fromJsonString(jsonString, options); + } + + static equals(a: UserSettingsPayload_UserBlock | PlainMessage | undefined, b: UserSettingsPayload_UserBlock | PlainMessage | undefined): boolean { + return proto3.util.equals(UserSettingsPayload_UserBlock, a, b); + } +} + +/** + * * + * UserDeviceKeyPayload + * + * @generated from message river.UserDeviceKeyPayload + */ +export class UserDeviceKeyPayload extends Message { + /** + * @generated from oneof river.UserDeviceKeyPayload.content + */ + content: { + /** + * @generated from field: river.UserDeviceKeyPayload.Inception inception = 1; + */ + value: UserDeviceKeyPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.UserDeviceKeyPayload.EncryptionDevice encryption_device = 2; + */ + value: UserDeviceKeyPayload_EncryptionDevice; + case: "encryptionDevice"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserDeviceKeyPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserDeviceKeyPayload_Inception, oneof: "content" }, + { no: 2, name: "encryption_device", kind: "message", T: UserDeviceKeyPayload_EncryptionDevice, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserDeviceKeyPayload { + return new UserDeviceKeyPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserDeviceKeyPayload { + return new UserDeviceKeyPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserDeviceKeyPayload { + return new UserDeviceKeyPayload().fromJsonString(jsonString, options); + } + + static equals(a: UserDeviceKeyPayload | PlainMessage | undefined, b: UserDeviceKeyPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(UserDeviceKeyPayload, a, b); + } +} + +/** + * @generated from message river.UserDeviceKeyPayload.Snapshot + */ +export class UserDeviceKeyPayload_Snapshot extends Message { + /** + * inception + * + * @generated from field: river.UserDeviceKeyPayload.Inception inception = 1; + */ + inception?: UserDeviceKeyPayload_Inception; + + /** + * device keys for this user, unique by device_key, capped at N, most recent last + * + * @generated from field: repeated river.UserDeviceKeyPayload.EncryptionDevice encryption_devices = 2; + */ + encryptionDevices: UserDeviceKeyPayload_EncryptionDevice[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserDeviceKeyPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: UserDeviceKeyPayload_Inception }, + { no: 2, name: "encryption_devices", kind: "message", T: UserDeviceKeyPayload_EncryptionDevice, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserDeviceKeyPayload_Snapshot { + return new UserDeviceKeyPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserDeviceKeyPayload_Snapshot { + return new UserDeviceKeyPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserDeviceKeyPayload_Snapshot { + return new UserDeviceKeyPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: UserDeviceKeyPayload_Snapshot | PlainMessage | undefined, b: UserDeviceKeyPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(UserDeviceKeyPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.UserDeviceKeyPayload.Inception + */ +export class UserDeviceKeyPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.StreamSettings settings = 2; + */ + settings?: StreamSettings; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserDeviceKeyPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "settings", kind: "message", T: StreamSettings }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserDeviceKeyPayload_Inception { + return new UserDeviceKeyPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserDeviceKeyPayload_Inception { + return new UserDeviceKeyPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserDeviceKeyPayload_Inception { + return new UserDeviceKeyPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: UserDeviceKeyPayload_Inception | PlainMessage | undefined, b: UserDeviceKeyPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(UserDeviceKeyPayload_Inception, a, b); + } +} + +/** + * @generated from message river.UserDeviceKeyPayload.EncryptionDevice + */ +export class UserDeviceKeyPayload_EncryptionDevice extends Message { + /** + * @generated from field: string device_key = 1; + */ + deviceKey = ""; + + /** + * @generated from field: string fallback_key = 2; + */ + fallbackKey = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.UserDeviceKeyPayload.EncryptionDevice"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "device_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "fallback_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): UserDeviceKeyPayload_EncryptionDevice { + return new UserDeviceKeyPayload_EncryptionDevice().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): UserDeviceKeyPayload_EncryptionDevice { + return new UserDeviceKeyPayload_EncryptionDevice().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): UserDeviceKeyPayload_EncryptionDevice { + return new UserDeviceKeyPayload_EncryptionDevice().fromJsonString(jsonString, options); + } + + static equals(a: UserDeviceKeyPayload_EncryptionDevice | PlainMessage | undefined, b: UserDeviceKeyPayload_EncryptionDevice | PlainMessage | undefined): boolean { + return proto3.util.equals(UserDeviceKeyPayload_EncryptionDevice, a, b); + } +} + +/** + * * + * MediaPayload + * + * @generated from message river.MediaPayload + */ +export class MediaPayload extends Message { + /** + * @generated from oneof river.MediaPayload.content + */ + content: { + /** + * @generated from field: river.MediaPayload.Inception inception = 1; + */ + value: MediaPayload_Inception; + case: "inception"; + } | { + /** + * @generated from field: river.MediaPayload.Chunk chunk = 2; + */ + value: MediaPayload_Chunk; + case: "chunk"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MediaPayload"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: MediaPayload_Inception, oneof: "content" }, + { no: 2, name: "chunk", kind: "message", T: MediaPayload_Chunk, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MediaPayload { + return new MediaPayload().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MediaPayload { + return new MediaPayload().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MediaPayload { + return new MediaPayload().fromJsonString(jsonString, options); + } + + static equals(a: MediaPayload | PlainMessage | undefined, b: MediaPayload | PlainMessage | undefined): boolean { + return proto3.util.equals(MediaPayload, a, b); + } +} + +/** + * @generated from message river.MediaPayload.Snapshot + */ +export class MediaPayload_Snapshot extends Message { + /** + * @generated from field: river.MediaPayload.Inception inception = 1; + */ + inception?: MediaPayload_Inception; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MediaPayload.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "inception", kind: "message", T: MediaPayload_Inception }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MediaPayload_Snapshot { + return new MediaPayload_Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MediaPayload_Snapshot { + return new MediaPayload_Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MediaPayload_Snapshot { + return new MediaPayload_Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: MediaPayload_Snapshot | PlainMessage | undefined, b: MediaPayload_Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(MediaPayload_Snapshot, a, b); + } +} + +/** + * @generated from message river.MediaPayload.Inception + */ +export class MediaPayload_Inception extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: optional bytes channel_id = 2; + */ + channelId?: Uint8Array; + + /** + * @generated from field: int32 chunk_count = 3; + */ + chunkCount = 0; + + /** + * @generated from field: river.StreamSettings settings = 4; + */ + settings?: StreamSettings; + + /** + * @generated from field: optional bytes space_id = 5; + */ + spaceId?: Uint8Array; + + /** + * @generated from field: optional bytes user_id = 6; + */ + userId?: Uint8Array; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MediaPayload.Inception"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "channel_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + { no: 3, name: "chunk_count", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, + { no: 4, name: "settings", kind: "message", T: StreamSettings }, + { no: 5, name: "space_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + { no: 6, name: "user_id", kind: "scalar", T: 12 /* ScalarType.BYTES */, opt: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MediaPayload_Inception { + return new MediaPayload_Inception().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MediaPayload_Inception { + return new MediaPayload_Inception().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MediaPayload_Inception { + return new MediaPayload_Inception().fromJsonString(jsonString, options); + } + + static equals(a: MediaPayload_Inception | PlainMessage | undefined, b: MediaPayload_Inception | PlainMessage | undefined): boolean { + return proto3.util.equals(MediaPayload_Inception, a, b); + } +} + +/** + * @generated from message river.MediaPayload.Chunk + */ +export class MediaPayload_Chunk extends Message { + /** + * @generated from field: bytes data = 1; + */ + data = new Uint8Array(0); + + /** + * @generated from field: int32 chunk_index = 2; + */ + chunkIndex = 0; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.MediaPayload.Chunk"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "data", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "chunk_index", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): MediaPayload_Chunk { + return new MediaPayload_Chunk().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): MediaPayload_Chunk { + return new MediaPayload_Chunk().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): MediaPayload_Chunk { + return new MediaPayload_Chunk().fromJsonString(jsonString, options); + } + + static equals(a: MediaPayload_Chunk | PlainMessage | undefined, b: MediaPayload_Chunk | PlainMessage | undefined): boolean { + return proto3.util.equals(MediaPayload_Chunk, a, b); + } +} + +/** + * * + * Snapshot contains a summary of all state events up to the most recent miniblock + * + * @generated from message river.Snapshot + */ +export class Snapshot extends Message { + /** + * @generated from field: river.MemberPayload.Snapshot members = 1; + */ + members?: MemberPayload_Snapshot; + + /** + * @generated from field: int32 snapshot_version = 2; + */ + snapshotVersion = 0; + + /** + * Snapshot data specific for each stream type. + * + * @generated from oneof river.Snapshot.content + */ + content: { + /** + * @generated from field: river.SpacePayload.Snapshot space_content = 101; + */ + value: SpacePayload_Snapshot; + case: "spaceContent"; + } | { + /** + * @generated from field: river.ChannelPayload.Snapshot channel_content = 102; + */ + value: ChannelPayload_Snapshot; + case: "channelContent"; + } | { + /** + * @generated from field: river.UserPayload.Snapshot user_content = 103; + */ + value: UserPayload_Snapshot; + case: "userContent"; + } | { + /** + * @generated from field: river.UserSettingsPayload.Snapshot user_settings_content = 104; + */ + value: UserSettingsPayload_Snapshot; + case: "userSettingsContent"; + } | { + /** + * @generated from field: river.UserDeviceKeyPayload.Snapshot user_device_key_content = 105; + */ + value: UserDeviceKeyPayload_Snapshot; + case: "userDeviceKeyContent"; + } | { + /** + * @generated from field: river.MediaPayload.Snapshot media_content = 106; + */ + value: MediaPayload_Snapshot; + case: "mediaContent"; + } | { + /** + * @generated from field: river.DmChannelPayload.Snapshot dm_channel_content = 107; + */ + value: DmChannelPayload_Snapshot; + case: "dmChannelContent"; + } | { + /** + * @generated from field: river.GdmChannelPayload.Snapshot gdm_channel_content = 108; + */ + value: GdmChannelPayload_Snapshot; + case: "gdmChannelContent"; + } | { + /** + * @generated from field: river.UserInboxPayload.Snapshot user_inbox_content = 109; + */ + value: UserInboxPayload_Snapshot; + case: "userInboxContent"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.Snapshot"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "members", kind: "message", T: MemberPayload_Snapshot }, + { no: 2, name: "snapshot_version", kind: "scalar", T: 5 /* ScalarType.INT32 */ }, + { no: 101, name: "space_content", kind: "message", T: SpacePayload_Snapshot, oneof: "content" }, + { no: 102, name: "channel_content", kind: "message", T: ChannelPayload_Snapshot, oneof: "content" }, + { no: 103, name: "user_content", kind: "message", T: UserPayload_Snapshot, oneof: "content" }, + { no: 104, name: "user_settings_content", kind: "message", T: UserSettingsPayload_Snapshot, oneof: "content" }, + { no: 105, name: "user_device_key_content", kind: "message", T: UserDeviceKeyPayload_Snapshot, oneof: "content" }, + { no: 106, name: "media_content", kind: "message", T: MediaPayload_Snapshot, oneof: "content" }, + { no: 107, name: "dm_channel_content", kind: "message", T: DmChannelPayload_Snapshot, oneof: "content" }, + { no: 108, name: "gdm_channel_content", kind: "message", T: GdmChannelPayload_Snapshot, oneof: "content" }, + { no: 109, name: "user_inbox_content", kind: "message", T: UserInboxPayload_Snapshot, oneof: "content" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): Snapshot { + return new Snapshot().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): Snapshot { + return new Snapshot().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): Snapshot { + return new Snapshot().fromJsonString(jsonString, options); + } + + static equals(a: Snapshot | PlainMessage | undefined, b: Snapshot | PlainMessage | undefined): boolean { + return proto3.util.equals(Snapshot, a, b); + } +} + +/** + * * + * Derived event is produces by server when there should be additional event to compliment + * received event. For example, when user joins a space through event in the space stream, server will produce a derived event + * in a user stream to indicate that user joined a particual space. + * + * EventRef is used to reference the event that caused the derived event to be produced. + * + * @generated from message river.EventRef + */ +export class EventRef extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: bytes hash = 2; + */ + hash = new Uint8Array(0); + + /** + * @generated from field: bytes signature = 3; + */ + signature = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.EventRef"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "signature", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): EventRef { + return new EventRef().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): EventRef { + return new EventRef().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): EventRef { + return new EventRef().fromJsonString(jsonString, options); + } + + static equals(a: EventRef | PlainMessage | undefined, b: EventRef | PlainMessage | undefined): boolean { + return proto3.util.equals(EventRef, a, b); + } +} + +/** + * * + * StreamSettings is a part of inception payload for each stream type. + * + * @generated from message river.StreamSettings + */ +export class StreamSettings extends Message { + /** + * Test setting for testing with manual miniblock creation through Info debug request. + * + * @generated from field: bool disable_miniblock_creation = 1; + */ + disableMiniblockCreation = false; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.StreamSettings"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "disable_miniblock_creation", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): StreamSettings { + return new StreamSettings().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): StreamSettings { + return new StreamSettings().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): StreamSettings { + return new StreamSettings().fromJsonString(jsonString, options); + } + + static equals(a: StreamSettings | PlainMessage | undefined, b: StreamSettings | PlainMessage | undefined): boolean { + return proto3.util.equals(StreamSettings, a, b); + } +} + +/** + * * + * EncryptedData + * + * @generated from message river.EncryptedData + */ +export class EncryptedData extends Message { + /** + * * + * Ciphertext of the encryption envelope. + * + * @generated from field: string ciphertext = 1; + */ + ciphertext = ""; + + /** + * * + * Encryption algorithm used to encrypt this event. + * + * @generated from field: string algorithm = 2; + */ + algorithm = ""; + + /** + * * + * Sender device public key identifying the sender's device. + * + * @generated from field: string sender_key = 3; + */ + senderKey = ""; + + /** + * * + * The ID of the session used to encrypt the message. + * + * @generated from field: string session_id = 4; + */ + sessionId = ""; + + /** + * * + * Optional checksum of the cleartext data. + * + * @generated from field: optional string checksum = 5; + */ + checksum?: string; + + /** + * * + * Optional reference to parent event ID + * + * @generated from field: optional string ref_event_id = 6; + */ + refEventId?: string; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.EncryptedData"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "ciphertext", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "algorithm", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "sender_key", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 4, name: "session_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 5, name: "checksum", kind: "scalar", T: 9 /* ScalarType.STRING */, opt: true }, + { no: 6, name: "ref_event_id", kind: "scalar", T: 9 /* ScalarType.STRING */, opt: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): EncryptedData { + return new EncryptedData().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): EncryptedData { + return new EncryptedData().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): EncryptedData { + return new EncryptedData().fromJsonString(jsonString, options); + } + + static equals(a: EncryptedData | PlainMessage | undefined, b: EncryptedData | PlainMessage | undefined): boolean { + return proto3.util.equals(EncryptedData, a, b); + } +} + +/** + * @generated from message river.WrappedEncryptedData + */ +export class WrappedEncryptedData extends Message { + /** + * @generated from field: river.EncryptedData data = 1; + */ + data?: EncryptedData; + + /** + * @generated from field: int64 event_num = 2; + */ + eventNum = protoInt64.zero; + + /** + * @generated from field: bytes event_hash = 3; + */ + eventHash = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.WrappedEncryptedData"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "data", kind: "message", T: EncryptedData }, + { no: 2, name: "event_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 3, name: "event_hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): WrappedEncryptedData { + return new WrappedEncryptedData().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): WrappedEncryptedData { + return new WrappedEncryptedData().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): WrappedEncryptedData { + return new WrappedEncryptedData().fromJsonString(jsonString, options); + } + + static equals(a: WrappedEncryptedData | PlainMessage | undefined, b: WrappedEncryptedData | PlainMessage | undefined): boolean { + return proto3.util.equals(WrappedEncryptedData, a, b); + } +} + +/** + * @generated from message river.SyncCookie + */ +export class SyncCookie extends Message { + /** + * @generated from field: bytes node_address = 1; + */ + nodeAddress = new Uint8Array(0); + + /** + * @generated from field: bytes stream_id = 2; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: int64 minipool_gen = 3; + */ + minipoolGen = protoInt64.zero; + + /** + * @generated from field: int64 minipool_slot = 4; + */ + minipoolSlot = protoInt64.zero; + + /** + * @generated from field: bytes prev_miniblock_hash = 5; + */ + prevMiniblockHash = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SyncCookie"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "node_address", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "minipool_gen", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 4, name: "minipool_slot", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 5, name: "prev_miniblock_hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SyncCookie { + return new SyncCookie().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SyncCookie { + return new SyncCookie().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SyncCookie { + return new SyncCookie().fromJsonString(jsonString, options); + } + + static equals(a: SyncCookie | PlainMessage | undefined, b: SyncCookie | PlainMessage | undefined): boolean { + return proto3.util.equals(SyncCookie, a, b); + } +} + +/** + * @generated from message river.StreamAndCookie + */ +export class StreamAndCookie extends Message { + /** + * @generated from field: repeated river.Envelope events = 1; + */ + events: Envelope[] = []; + + /** + * @generated from field: river.SyncCookie next_sync_cookie = 2; + */ + nextSyncCookie?: SyncCookie; + + /** + * if non-empty, contains all blocks since the latest snapshot, miniblocks[0].header is the latest snapshot + * + * @generated from field: repeated river.Miniblock miniblocks = 3; + */ + miniblocks: Miniblock[] = []; + + /** + * @generated from field: bool sync_reset = 4; + */ + syncReset = false; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.StreamAndCookie"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "events", kind: "message", T: Envelope, repeated: true }, + { no: 2, name: "next_sync_cookie", kind: "message", T: SyncCookie }, + { no: 3, name: "miniblocks", kind: "message", T: Miniblock, repeated: true }, + { no: 4, name: "sync_reset", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): StreamAndCookie { + return new StreamAndCookie().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): StreamAndCookie { + return new StreamAndCookie().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): StreamAndCookie { + return new StreamAndCookie().fromJsonString(jsonString, options); + } + + static equals(a: StreamAndCookie | PlainMessage | undefined, b: StreamAndCookie | PlainMessage | undefined): boolean { + return proto3.util.equals(StreamAndCookie, a, b); + } +} + +/** + * @generated from message river.GetStreamExRequest + */ +export class GetStreamExRequest extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetStreamExRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetStreamExRequest { + return new GetStreamExRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetStreamExRequest { + return new GetStreamExRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetStreamExRequest { + return new GetStreamExRequest().fromJsonString(jsonString, options); + } + + static equals(a: GetStreamExRequest | PlainMessage | undefined, b: GetStreamExRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(GetStreamExRequest, a, b); + } +} + +/** + * @generated from message river.Minipool + */ +export class Minipool extends Message { + /** + * @generated from field: repeated river.Envelope events = 1; + */ + events: Envelope[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.Minipool"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "events", kind: "message", T: Envelope, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): Minipool { + return new Minipool().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): Minipool { + return new Minipool().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): Minipool { + return new Minipool().fromJsonString(jsonString, options); + } + + static equals(a: Minipool | PlainMessage | undefined, b: Minipool | PlainMessage | undefined): boolean { + return proto3.util.equals(Minipool, a, b); + } +} + +/** + * GetStreamExResponse is a stream of raw data that represents the current state of the requested stream. + * These responses represent streams that are not expected to change once finalized, and have a optimized code path + * for retrieval. Response may potentially be very large, and are streamed back to the client. The client is expected + * to martial the raw data back into protobuf messages. + * + * @generated from message river.GetStreamExResponse + */ +export class GetStreamExResponse extends Message { + /** + * @generated from oneof river.GetStreamExResponse.data + */ + data: { + /** + * @generated from field: river.Miniblock miniblock = 1; + */ + value: Miniblock; + case: "miniblock"; + } | { + /** + * @generated from field: river.Minipool minipool = 2; + */ + value: Minipool; + case: "minipool"; + } | { case: undefined; value?: undefined } = { case: undefined }; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetStreamExResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "miniblock", kind: "message", T: Miniblock, oneof: "data" }, + { no: 2, name: "minipool", kind: "message", T: Minipool, oneof: "data" }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetStreamExResponse { + return new GetStreamExResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetStreamExResponse { + return new GetStreamExResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetStreamExResponse { + return new GetStreamExResponse().fromJsonString(jsonString, options); + } + + static equals(a: GetStreamExResponse | PlainMessage | undefined, b: GetStreamExResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(GetStreamExResponse, a, b); + } +} + +/** + * @generated from message river.CreateStreamRequest + */ +export class CreateStreamRequest extends Message { + /** + * @generated from field: repeated river.Envelope events = 1; + */ + events: Envelope[] = []; + + /** + * stream_id should match the stream_id in the inception payload of the first event + * + * @generated from field: bytes stream_id = 2; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: map metadata = 3; + */ + metadata: { [key: string]: Uint8Array } = {}; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.CreateStreamRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "events", kind: "message", T: Envelope, repeated: true }, + { no: 2, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 3, name: "metadata", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 12 /* ScalarType.BYTES */} }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): CreateStreamRequest { + return new CreateStreamRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): CreateStreamRequest { + return new CreateStreamRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): CreateStreamRequest { + return new CreateStreamRequest().fromJsonString(jsonString, options); + } + + static equals(a: CreateStreamRequest | PlainMessage | undefined, b: CreateStreamRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(CreateStreamRequest, a, b); + } +} + +/** + * @generated from message river.CreateStreamResponse + */ +export class CreateStreamResponse extends Message { + /** + * all events in current minipool and cookie allowing to sync from the end of the stream + * + * @generated from field: river.StreamAndCookie stream = 1; + */ + stream?: StreamAndCookie; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.CreateStreamResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream", kind: "message", T: StreamAndCookie }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): CreateStreamResponse { + return new CreateStreamResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): CreateStreamResponse { + return new CreateStreamResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): CreateStreamResponse { + return new CreateStreamResponse().fromJsonString(jsonString, options); + } + + static equals(a: CreateStreamResponse | PlainMessage | undefined, b: CreateStreamResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(CreateStreamResponse, a, b); + } +} + +/** + * @generated from message river.GetStreamRequest + */ +export class GetStreamRequest extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * if optional is true and stream doesn't exist, response will be a nil stream instead of ERROR NOT_FOUND + * + * @generated from field: bool optional = 2; + */ + optional = false; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetStreamRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "optional", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetStreamRequest { + return new GetStreamRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetStreamRequest { + return new GetStreamRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetStreamRequest { + return new GetStreamRequest().fromJsonString(jsonString, options); + } + + static equals(a: GetStreamRequest | PlainMessage | undefined, b: GetStreamRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(GetStreamRequest, a, b); + } +} + +/** + * @generated from message river.GetStreamResponse + */ +export class GetStreamResponse extends Message { + /** + * all events in current minipool and cookie allowing to sync from the end of the stream + * + * @generated from field: river.StreamAndCookie stream = 1; + */ + stream?: StreamAndCookie; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetStreamResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream", kind: "message", T: StreamAndCookie }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetStreamResponse { + return new GetStreamResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetStreamResponse { + return new GetStreamResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetStreamResponse { + return new GetStreamResponse().fromJsonString(jsonString, options); + } + + static equals(a: GetStreamResponse | PlainMessage | undefined, b: GetStreamResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(GetStreamResponse, a, b); + } +} + +/** + * @generated from message river.GetMiniblocksRequest + */ +export class GetMiniblocksRequest extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: int64 fromInclusive = 2; + */ + fromInclusive = protoInt64.zero; + + /** + * @generated from field: int64 toExclusive = 3; + */ + toExclusive = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetMiniblocksRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "fromInclusive", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 3, name: "toExclusive", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetMiniblocksRequest { + return new GetMiniblocksRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetMiniblocksRequest { + return new GetMiniblocksRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetMiniblocksRequest { + return new GetMiniblocksRequest().fromJsonString(jsonString, options); + } + + static equals(a: GetMiniblocksRequest | PlainMessage | undefined, b: GetMiniblocksRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(GetMiniblocksRequest, a, b); + } +} + +/** + * @generated from message river.GetMiniblocksResponse + */ +export class GetMiniblocksResponse extends Message { + /** + * @generated from field: repeated river.Miniblock miniblocks = 1; + */ + miniblocks: Miniblock[] = []; + + /** + * terminus: true if there are no more blocks to fetch because they've been garbage collected, or you've reached block 0 + * + * @generated from field: bool terminus = 2; + */ + terminus = false; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetMiniblocksResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "miniblocks", kind: "message", T: Miniblock, repeated: true }, + { no: 2, name: "terminus", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetMiniblocksResponse { + return new GetMiniblocksResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetMiniblocksResponse { + return new GetMiniblocksResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetMiniblocksResponse { + return new GetMiniblocksResponse().fromJsonString(jsonString, options); + } + + static equals(a: GetMiniblocksResponse | PlainMessage | undefined, b: GetMiniblocksResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(GetMiniblocksResponse, a, b); + } +} + +/** + * @generated from message river.GetLastMiniblockHashRequest + */ +export class GetLastMiniblockHashRequest extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetLastMiniblockHashRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetLastMiniblockHashRequest { + return new GetLastMiniblockHashRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetLastMiniblockHashRequest { + return new GetLastMiniblockHashRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetLastMiniblockHashRequest { + return new GetLastMiniblockHashRequest().fromJsonString(jsonString, options); + } + + static equals(a: GetLastMiniblockHashRequest | PlainMessage | undefined, b: GetLastMiniblockHashRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(GetLastMiniblockHashRequest, a, b); + } +} + +/** + * @generated from message river.GetLastMiniblockHashResponse + */ +export class GetLastMiniblockHashResponse extends Message { + /** + * @generated from field: bytes hash = 1; + */ + hash = new Uint8Array(0); + + /** + * @generated from field: int64 miniblock_num = 2; + */ + miniblockNum = protoInt64.zero; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.GetLastMiniblockHashResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "hash", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "miniblock_num", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): GetLastMiniblockHashResponse { + return new GetLastMiniblockHashResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): GetLastMiniblockHashResponse { + return new GetLastMiniblockHashResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): GetLastMiniblockHashResponse { + return new GetLastMiniblockHashResponse().fromJsonString(jsonString, options); + } + + static equals(a: GetLastMiniblockHashResponse | PlainMessage | undefined, b: GetLastMiniblockHashResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(GetLastMiniblockHashResponse, a, b); + } +} + +/** + * @generated from message river.AddEventRequest + */ +export class AddEventRequest extends Message { + /** + * @generated from field: bytes stream_id = 1; + */ + streamId = new Uint8Array(0); + + /** + * @generated from field: river.Envelope event = 2; + */ + event?: Envelope; + + /** + * if true, response will contain non nil error if event didn't pass validation + * + * @generated from field: bool optional = 3; + */ + optional = false; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.AddEventRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + { no: 2, name: "event", kind: "message", T: Envelope }, + { no: 3, name: "optional", kind: "scalar", T: 8 /* ScalarType.BOOL */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AddEventRequest { + return new AddEventRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AddEventRequest { + return new AddEventRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AddEventRequest { + return new AddEventRequest().fromJsonString(jsonString, options); + } + + static equals(a: AddEventRequest | PlainMessage | undefined, b: AddEventRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(AddEventRequest, a, b); + } +} + +/** + * @generated from message river.AddEventResponse + */ +export class AddEventResponse extends Message { + /** + * only set if AddEventRequest.optional is true + * + * @generated from field: river.AddEventResponse.Error error = 1; + */ + error?: AddEventResponse_Error; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.AddEventResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "error", kind: "message", T: AddEventResponse_Error }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AddEventResponse { + return new AddEventResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AddEventResponse { + return new AddEventResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AddEventResponse { + return new AddEventResponse().fromJsonString(jsonString, options); + } + + static equals(a: AddEventResponse | PlainMessage | undefined, b: AddEventResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(AddEventResponse, a, b); + } +} + +/** + * @generated from message river.AddEventResponse.Error + */ +export class AddEventResponse_Error extends Message { + /** + * @generated from field: river.Err code = 1; + */ + code = Err.ERR_UNSPECIFIED; + + /** + * @generated from field: string msg = 2; + */ + msg = ""; + + /** + * @generated from field: repeated string funcs = 3; + */ + funcs: string[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.AddEventResponse.Error"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "code", kind: "enum", T: proto3.getEnumType(Err) }, + { no: 2, name: "msg", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 3, name: "funcs", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AddEventResponse_Error { + return new AddEventResponse_Error().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AddEventResponse_Error { + return new AddEventResponse_Error().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AddEventResponse_Error { + return new AddEventResponse_Error().fromJsonString(jsonString, options); + } + + static equals(a: AddEventResponse_Error | PlainMessage | undefined, b: AddEventResponse_Error | PlainMessage | undefined): boolean { + return proto3.util.equals(AddEventResponse_Error, a, b); + } +} + +/** + * SyncStreamsRequest is a request to start a streams sync session. + * + * @generated from message river.SyncStreamsRequest + */ +export class SyncStreamsRequest extends Message { + /** + * sync_pos is the list of streams and positions in those streams to receive updates from. + * + * @generated from field: repeated river.SyncCookie sync_pos = 1; + */ + syncPos: SyncCookie[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SyncStreamsRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_pos", kind: "message", T: SyncCookie, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SyncStreamsRequest { + return new SyncStreamsRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SyncStreamsRequest { + return new SyncStreamsRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SyncStreamsRequest { + return new SyncStreamsRequest().fromJsonString(jsonString, options); + } + + static equals(a: SyncStreamsRequest | PlainMessage | undefined, b: SyncStreamsRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(SyncStreamsRequest, a, b); + } +} + +/** + * SyncStreamsResponse is a stream of updates that the client receives for streams it subscribed to within a streams + * sync session. + * + * @generated from message river.SyncStreamsResponse + */ +export class SyncStreamsResponse extends Message { + /** + * sync_id is the id of the sync session. + * + * @generated from field: string sync_id = 1; + */ + syncId = ""; + + /** + * sync_op marks the type of update. + * + * @generated from field: river.SyncOp sync_op = 2; + */ + syncOp = SyncOp.SYNC_UNSPECIFIED; + + /** + * stream indicates an update of a stream. + * only set when sync_op = SYNC_UPDATE + * + * @generated from field: river.StreamAndCookie stream = 3; + */ + stream?: StreamAndCookie; + + /** + * pong_nonce is returned after a ping request was made to the sync session through PingSync. + * Set with the ping value from the PingSync request when sync_op = SYNC_PONG + * + * @generated from field: string pong_nonce = 4; + */ + pongNonce = ""; + + /** + * stream_id is set when sync_op = SYNC_DOWN and indicates it will not receive updates anymore for this stream. + * If the client is still is interested in updates for this stream it must re-add the stream to the sync session. + * + * @generated from field: bytes stream_id = 5; + */ + streamId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.SyncStreamsResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "sync_op", kind: "enum", T: proto3.getEnumType(SyncOp) }, + { no: 3, name: "stream", kind: "message", T: StreamAndCookie }, + { no: 4, name: "pong_nonce", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 5, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): SyncStreamsResponse { + return new SyncStreamsResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): SyncStreamsResponse { + return new SyncStreamsResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): SyncStreamsResponse { + return new SyncStreamsResponse().fromJsonString(jsonString, options); + } + + static equals(a: SyncStreamsResponse | PlainMessage | undefined, b: SyncStreamsResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(SyncStreamsResponse, a, b); + } +} + +/** + * AddStreamToSyncRequest is a request to add a stream to an existing streams sync session. + * + * @generated from message river.AddStreamToSyncRequest + */ +export class AddStreamToSyncRequest extends Message { + /** + * sync_id is the id of the sync session. + * + * @generated from field: string sync_id = 1; + */ + syncId = ""; + + /** + * sync_pos identifies the stream and position in the stream to receive updates from. + * + * @generated from field: river.SyncCookie sync_pos = 2; + */ + syncPos?: SyncCookie; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.AddStreamToSyncRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "sync_pos", kind: "message", T: SyncCookie }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AddStreamToSyncRequest { + return new AddStreamToSyncRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AddStreamToSyncRequest { + return new AddStreamToSyncRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AddStreamToSyncRequest { + return new AddStreamToSyncRequest().fromJsonString(jsonString, options); + } + + static equals(a: AddStreamToSyncRequest | PlainMessage | undefined, b: AddStreamToSyncRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(AddStreamToSyncRequest, a, b); + } +} + +/** + * @generated from message river.AddStreamToSyncResponse + */ +export class AddStreamToSyncResponse extends Message { + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.AddStreamToSyncResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): AddStreamToSyncResponse { + return new AddStreamToSyncResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): AddStreamToSyncResponse { + return new AddStreamToSyncResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): AddStreamToSyncResponse { + return new AddStreamToSyncResponse().fromJsonString(jsonString, options); + } + + static equals(a: AddStreamToSyncResponse | PlainMessage | undefined, b: AddStreamToSyncResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(AddStreamToSyncResponse, a, b); + } +} + +/** + * RemoveStreamFromSyncRequest stops the client to receive updates from this stream in the sync session. + * Note that due to buffering in the stream it is possible still receives several updates for this stream after it was + * removed. + * + * @generated from message river.RemoveStreamFromSyncRequest + */ +export class RemoveStreamFromSyncRequest extends Message { + /** + * @generated from field: string sync_id = 1; + */ + syncId = ""; + + /** + * @generated from field: bytes stream_id = 2; + */ + streamId = new Uint8Array(0); + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.RemoveStreamFromSyncRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "stream_id", kind: "scalar", T: 12 /* ScalarType.BYTES */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): RemoveStreamFromSyncRequest { + return new RemoveStreamFromSyncRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): RemoveStreamFromSyncRequest { + return new RemoveStreamFromSyncRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): RemoveStreamFromSyncRequest { + return new RemoveStreamFromSyncRequest().fromJsonString(jsonString, options); + } + + static equals(a: RemoveStreamFromSyncRequest | PlainMessage | undefined, b: RemoveStreamFromSyncRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(RemoveStreamFromSyncRequest, a, b); + } +} + +/** + * @generated from message river.RemoveStreamFromSyncResponse + */ +export class RemoveStreamFromSyncResponse extends Message { + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.RemoveStreamFromSyncResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): RemoveStreamFromSyncResponse { + return new RemoveStreamFromSyncResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): RemoveStreamFromSyncResponse { + return new RemoveStreamFromSyncResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): RemoveStreamFromSyncResponse { + return new RemoveStreamFromSyncResponse().fromJsonString(jsonString, options); + } + + static equals(a: RemoveStreamFromSyncResponse | PlainMessage | undefined, b: RemoveStreamFromSyncResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(RemoveStreamFromSyncResponse, a, b); + } +} + +/** + * CancelSyncRequest cancels the sync session. + * + * @generated from message river.CancelSyncRequest + */ +export class CancelSyncRequest extends Message { + /** + * sync_id is the unique id of the sync session. + * + * @generated from field: string sync_id = 1; + */ + syncId = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.CancelSyncRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): CancelSyncRequest { + return new CancelSyncRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): CancelSyncRequest { + return new CancelSyncRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): CancelSyncRequest { + return new CancelSyncRequest().fromJsonString(jsonString, options); + } + + static equals(a: CancelSyncRequest | PlainMessage | undefined, b: CancelSyncRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(CancelSyncRequest, a, b); + } +} + +/** + * @generated from message river.CancelSyncResponse + */ +export class CancelSyncResponse extends Message { + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.CancelSyncResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): CancelSyncResponse { + return new CancelSyncResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): CancelSyncResponse { + return new CancelSyncResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): CancelSyncResponse { + return new CancelSyncResponse().fromJsonString(jsonString, options); + } + + static equals(a: CancelSyncResponse | PlainMessage | undefined, b: CancelSyncResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(CancelSyncResponse, a, b); + } +} + +/** + * PingSyncRequest is a request to receive a pong in the sync session stream. + * + * @generated from message river.PingSyncRequest + */ +export class PingSyncRequest extends Message { + /** + * sync_id is the unique id of the sync session. + * + * @generated from field: string sync_id = 1; + */ + syncId = ""; + + /** + * nonce is the pong to return in the sync session stream. + * + * @generated from field: string nonce = 2; + */ + nonce = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.PingSyncRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "sync_id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "nonce", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): PingSyncRequest { + return new PingSyncRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): PingSyncRequest { + return new PingSyncRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): PingSyncRequest { + return new PingSyncRequest().fromJsonString(jsonString, options); + } + + static equals(a: PingSyncRequest | PlainMessage | undefined, b: PingSyncRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(PingSyncRequest, a, b); + } +} + +/** + * @generated from message river.PingSyncResponse + */ +export class PingSyncResponse extends Message { + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.PingSyncResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): PingSyncResponse { + return new PingSyncResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): PingSyncResponse { + return new PingSyncResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): PingSyncResponse { + return new PingSyncResponse().fromJsonString(jsonString, options); + } + + static equals(a: PingSyncResponse | PlainMessage | undefined, b: PingSyncResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(PingSyncResponse, a, b); + } +} + +/** + * @generated from message river.InfoRequest + */ +export class InfoRequest extends Message { + /** + * @generated from field: repeated string debug = 1; + */ + debug: string[] = []; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.InfoRequest"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "debug", kind: "scalar", T: 9 /* ScalarType.STRING */, repeated: true }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): InfoRequest { + return new InfoRequest().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): InfoRequest { + return new InfoRequest().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): InfoRequest { + return new InfoRequest().fromJsonString(jsonString, options); + } + + static equals(a: InfoRequest | PlainMessage | undefined, b: InfoRequest | PlainMessage | undefined): boolean { + return proto3.util.equals(InfoRequest, a, b); + } +} + +/** + * @generated from message river.InfoResponse + */ +export class InfoResponse extends Message { + /** + * @generated from field: string graffiti = 1; + */ + graffiti = ""; + + /** + * @generated from field: google.protobuf.Timestamp start_time = 2; + */ + startTime?: Timestamp; + + /** + * @generated from field: string version = 3; + */ + version = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "river.InfoResponse"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "graffiti", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "start_time", kind: "message", T: Timestamp }, + { no: 3, name: "version", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): InfoResponse { + return new InfoResponse().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): InfoResponse { + return new InfoResponse().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): InfoResponse { + return new InfoResponse().fromJsonString(jsonString, options); + } + + static equals(a: InfoResponse | PlainMessage | undefined, b: InfoResponse | PlainMessage | undefined): boolean { + return proto3.util.equals(InfoResponse, a, b); + } +} + diff --git a/protocol/protocol.proto b/protocol/protocol.proto index 904ff92c4..e7a91701b 100644 --- a/protocol/protocol.proto +++ b/protocol/protocol.proto @@ -691,24 +691,43 @@ message AddEventResponse { Error error = 1; // only set if AddEventRequest.optional is true } +// SyncStreamsRequest is a request to start a streams sync session. message SyncStreamsRequest { + // sync_pos is the list of streams and positions in those streams to receive updates from. repeated SyncCookie sync_pos = 1; } +// SyncStreamsResponse is a stream of updates that the client receives for streams it subscribed to within a streams +// sync session. message SyncStreamsResponse { + // sync_id is the id of the sync session. string sync_id = 1; + // sync_op marks the type of update. SyncOp sync_op = 2; + // stream indicates an update of a stream. + // only set when sync_op = SYNC_UPDATE StreamAndCookie stream = 3; + // pong_nonce is returned after a ping request was made to the sync session through PingSync. + // Set with the ping value from the PingSync request when sync_op = SYNC_PONG string pong_nonce = 4; + // stream_id is set when sync_op = SYNC_DOWN and indicates it will not receive updates anymore for this stream. + // If the client is still is interested in updates for this stream it must re-add the stream to the sync session. + bytes stream_id = 5; } +// AddStreamToSyncRequest is a request to add a stream to an existing streams sync session. message AddStreamToSyncRequest { + // sync_id is the id of the sync session. string sync_id = 1; + // sync_pos identifies the stream and position in the stream to receive updates from. SyncCookie sync_pos = 2; } message AddStreamToSyncResponse {} +// RemoveStreamFromSyncRequest stops the client to receive updates from this stream in the sync session. +// Note that due to buffering in the stream it is possible still receives several updates for this stream after it was +// removed. message RemoveStreamFromSyncRequest { string sync_id = 1; bytes stream_id = 2; @@ -716,14 +735,19 @@ message RemoveStreamFromSyncRequest { message RemoveStreamFromSyncResponse {} +// CancelSyncRequest cancels the sync session. message CancelSyncRequest { + // sync_id is the unique id of the sync session. string sync_id = 1; } message CancelSyncResponse {} +// PingSyncRequest is a request to receive a pong in the sync session stream. message PingSyncRequest { + // sync_id is the unique id of the sync session. string sync_id = 1; + // nonce is the pong to return in the sync session stream. string nonce = 2; } @@ -760,6 +784,7 @@ enum SyncOp { SYNC_CLOSE = 2; // close the sync SYNC_UPDATE = 3; // update from server SYNC_PONG = 4; // respond to the ping message from the client. + SYNC_DOWN = 5; // indication that stream updates could (temporarily) not be provided } enum MembershipOp {