From ab3e4a49e99c101195d83c0be586a6f7533a9a05 Mon Sep 17 00:00:00 2001 From: Alexandr Burdiyan Date: Sat, 2 Sep 2023 14:09:44 +0200 Subject: [PATCH] Implement missing Group APIs (#1415) * Implement missing Group APIs This PR implement missing group APIs: - ListDocumentGroups: to search groups a document is published to. - ListAccountGroups: to search groups an account is member of. The change is massive, because it also includes a breaking change to make document IDs unforgeable, so this doesn't include a migration, but instead marks a new beginning in the list of migrations. I.e. your current data directory needs to be removed. It also bumps the network protocol version, to prevent being able to talk to older peers, just to avoid any weird issues. Frontend Protobuf codegen package changed its name, so all the generated proto files for JS are regenerated. * Fix the build * frontend: implement new groups APIs * stupid package dep * hd -> hm. better url matching * enable tests in all packages * HD -> HM in files and variables * hd:// => hm:// --------- Co-authored-by: Horacio Herrera --- .github/workflows/validate-site.yml | 28 +- .../daemon/api/accounts/v1alpha/accounts.go | 8 +- .../daemon/api/documents/v1alpha/changes.go | 2 +- .../api/documents/v1alpha/content_graph.go | 19 +- .../documents/v1alpha/content_graph_test.go | 4 +- .../api/documents/v1alpha/document_model.go | 64 +- .../documents/v1alpha/document_model_test.go | 96 +- .../daemon/api/documents/v1alpha/documents.go | 60 +- .../daemon/api/entities/v1alpha/entities.go | 12 +- backend/daemon/api/groups/v1alpha/groups.go | 327 +- .../daemon/api/groups/v1alpha/groups_test.go | 193 +- backend/daemon/daemon_e2e_test.go | 11 +- backend/daemon/storage/BUILD.plz | 5 +- backend/daemon/storage/gen.go | 21 +- backend/daemon/storage/litext/litext.go | 14 + backend/daemon/storage/litext/litext.h | 11 + backend/daemon/storage/litext/mycount.c | 48 + .../daemon/storage/litext}/sha1.c | 0 backend/daemon/storage/migrations.go | 95 +- backend/daemon/storage/migrations_test.go | 13 + backend/daemon/storage/schema.gen.go | 485 +- backend/daemon/storage/schema.gensum | 4 +- backend/daemon/storage/schema.go | 24 + backend/daemon/storage/schema.sql | 299 +- backend/daemon/storage/schema_test.go | 111 + backend/daemon/storage/sqlfmt.go | 4 +- backend/daemon/storage/sqlite.go | 13 +- backend/daemon/storage/sqlite_test.go | 1 + backend/genproto/groups/v1alpha/groups.pb.go | 830 +- .../genproto/groups/v1alpha/groups_grpc.pb.go | 76 + backend/hyper/entity.go | 104 +- backend/hyper/entity_test.go | 7 +- backend/hyper/hyper.go | 46 +- backend/hyper/hypersql/queries.gen.go | 506 +- backend/hyper/hypersql/queries.gensum | 4 +- backend/hyper/hypersql/queries.go | 309 +- backend/hyper/hypersql/queries.manual.go | 160 + backend/hyper/indexing.go | 421 +- backend/hyper/terra.go | 7 +- backend/lndhub/lndhubsql/lndhub.go | 4 +- backend/lndhub/lndhubsql/queries.gen.go | 20 +- backend/lndhub/lndhubsql/queries.gensum | 4 +- backend/lndhub/lndhubsql/queries.go | 16 +- backend/mttnet/list_objects.go | 4 +- backend/mttnet/mttnet.go | 8 +- backend/mttnet/providing.go | 4 +- backend/mttnet/site.go | 28 +- backend/mttnet/sitesV2.go | 2 +- backend/mttnet/sitesql/queries.gen.go | 80 +- backend/mttnet/sitesql/queries.gensum | 4 +- backend/mttnet/sitesql/queries.go | 68 +- backend/pkg/maputil/maputil.go | 20 + backend/pkg/sqlitegen/qb/qb.go | 42 + backend/pkg/sqlitegen/schema.go | 19 +- backend/pkg/sqlitegen/sqlitegen.go | 22 +- backend/wallet/walletsql/queries.gen.go | 32 +- backend/wallet/walletsql/queries.gensum | 4 +- backend/wallet/walletsql/queries.go | 20 +- backend/wallet/walletsql/wallet_test.go | 76 +- build/rules/mintter/mintter.build_defs | 10 +- build/tools/BUILD.plz | 2 +- dev | 46 +- docs/docs/document-linking.md | 14 +- frontend/apps/desktop/package.json | 1 + frontend/apps/site/account-page.tsx | 10 +- .../apps/site/pages/g/[groupId]/index.tsx | 16 +- frontend/apps/site/publication-metadata.tsx | 32 +- frontend/apps/site/publication-page.tsx | 39 +- .../site/server/{json-hd.ts => json-hm.ts} | 50 +- frontend/apps/site/server/routers/_app.ts | 53 +- frontend/apps/site/server/to-json-hd.ts | 46 - frontend/apps/site/server/to-json-hm.ts | 46 + frontend/apps/site/web-tipping.tsx | 8 +- frontend/packages/app/package.json | 2 +- ...ditor.test.ts => BlockNoteEditor._test.ts} | 0 .../src/blocknote-core/BlockNoteExtensions.ts | 6 +- ...ion.test.ts => blockManipulation._test.ts} | 0 ...ons.test.ts => formatConversions._test.ts} | 0 ...sions.test.ts => nodeConversions._test.ts} | 0 .../extensions/Blocks/api/defaultBlocks.ts | 5 +- .../HyperlinkToolbarPlugin.ts | 2 +- .../SlashMenu/defaultSlashMenuItems.tsx | 20 +- .../SlashMenu/defaultReactSlashMenuItems.tsx | 4 +- .../src/blocknote-react/hooks/useBlockNote.ts | 4 +- .../client/__tests__/editor-to-server.test.ts | 12 +- .../client/__tests__/server-to-editor.test.ts | 19 +- .../app/src/client/editor-to-server.ts | 6 +- .../packages/app/src/client/example-docs.ts | 4 +- frontend/packages/app/src/client/schema.ts | 4 +- .../app/src/client/server-to-editor.ts | 8 +- .../src/components/rightside-block-widget.tsx | 10 +- .../app/src/components/titlebar/common.tsx | 4 +- .../src/components/titlebar/publish-share.tsx | 58 +- frontend/packages/app/src/editor/editor.css | 2 +- frontend/packages/app/src/editor/editor.tsx | 2 +- .../packages/app/src/editor/embed-block.tsx | 12 +- frontend/packages/app/src/editor/file.tsx | 22 +- .../src/editor/heading-component-plugin.tsx | 2 +- .../app/src/editor/hyperdocs-link-plugin.tsx | 14 +- frontend/packages/app/src/editor/image.tsx | 20 +- frontend/packages/app/src/editor/video.tsx | 20 +- frontend/packages/app/src/models/documents.ts | 43 +- frontend/packages/app/src/models/groups.ts | 32 +- .../packages/app/src/models/query-keys.ts | 2 + frontend/packages/app/src/models/web-links.ts | 18 +- frontend/packages/app/src/open-url.ts | 16 +- .../packages/app/src/pages/account-page.tsx | 41 +- frontend/packages/app/src/pages/draft.tsx | 6 +- frontend/packages/app/src/pages/group.tsx | 22 +- .../packages/app/src/pages/publication.tsx | 11 +- .../tiptap-extension-link/helpers/autolink.ts | 2 +- .../helpers/pasteHandler.ts | 4 +- .../app/src/tiptap-extension-link/link.ts | 4 +- frontend/packages/shared/package.json | 2 +- ...unts_connectweb.ts => accounts_connect.ts} | 2 +- .../accounts/v1alpha/accounts_pb.ts | 2 +- ...daemon_connectweb.ts => daemon_connect.ts} | 2 +- .../.generated/daemon/v1alpha/daemon_pb.ts | 2 +- ...anges_connectweb.ts => changes_connect.ts} | 2 +- .../documents/v1alpha/changes_pb.ts | 2 +- ...ents_connectweb.ts => comments_connect.ts} | 2 +- .../documents/v1alpha/comments_pb.ts | 2 +- ...connectweb.ts => content_graph_connect.ts} | 2 +- .../documents/v1alpha/content_graph_pb.ts | 2 +- ...nts_connectweb.ts => documents_connect.ts} | 2 +- .../documents/v1alpha/documents_pb.ts | 2 +- ...onnectweb.ts => web_publishing_connect.ts} | 2 +- .../documents/v1alpha/web_publishing_pb.ts | 2 +- ...ties_connectweb.ts => entities_connect.ts} | 4 +- .../entities/v1alpha/entities_pb.ts | 2 +- ...groups_connectweb.ts => groups_connect.ts} | 30 +- .../.generated/groups/v1alpha/groups_pb.ts | 358 +- ...ng_connectweb.ts => networking_connect.ts} | 2 +- .../networking/v1alpha/networking_pb.ts | 2 +- .../shared/src/client/editor-types.ts | 4 +- .../src/client/hyperdocs-presentation.ts | 16 +- frontend/packages/shared/src/client/index.ts | 18 +- .../shared/src/client/server-to-editor.ts | 8 +- .../utils/__tests__/get-ids-from-url.test.ts | 75 + frontend/packages/shared/src/utils/date.ts | 4 +- frontend/packages/shared/src/utils/doc-url.ts | 4 +- .../shared/src/utils/get-ids-from-url.ts | 97 +- .../shared/src/utils/hyperdocs-link.ts | 6 +- go.mod | 1 - go.sum | 2 - package.json | 8 +- proto/accounts/v1alpha/js.gensum | 2 +- proto/daemon/v1alpha/js.gensum | 2 +- proto/documents/v1alpha/js.gensum | 2 +- proto/entities/v1alpha/entities.proto | 2 +- proto/entities/v1alpha/go.gensum | 2 +- proto/entities/v1alpha/js.gensum | 4 +- proto/groups/v1alpha/go.gensum | 4 +- proto/groups/v1alpha/groups.proto | 91 +- proto/groups/v1alpha/js.gensum | 4 +- proto/networking/v1alpha/js.gensum | 2 +- third_party/sqlite/c/sqlite3.c | 34230 +++++++++++----- third_party/sqlite/sha1.h | 11 - third_party/sqlite/sqlite.go | 5 +- third_party/sqlite/sqlite3.h | 1014 +- third_party/sqlite/sqlite3ext.h | 42 +- third_party/sqlite/sqlitex/exec.go | 127 + yarn.lock | 875 +- 163 files changed, 29684 insertions(+), 13161 deletions(-) create mode 100644 backend/daemon/storage/litext/litext.go create mode 100644 backend/daemon/storage/litext/litext.h create mode 100644 backend/daemon/storage/litext/mycount.c rename {third_party/sqlite => backend/daemon/storage/litext}/sha1.c (100%) create mode 100644 backend/daemon/storage/schema_test.go create mode 100644 backend/hyper/hypersql/queries.manual.go rename frontend/apps/site/server/{json-hd.ts => json-hm.ts} (69%) delete mode 100644 frontend/apps/site/server/to-json-hd.ts create mode 100644 frontend/apps/site/server/to-json-hm.ts rename frontend/packages/app/src/blocknote-core/{BlockNoteEditor.test.ts => BlockNoteEditor._test.ts} (100%) rename frontend/packages/app/src/blocknote-core/api/blockManipulation/{blockManipulation.test.ts => blockManipulation._test.ts} (100%) rename frontend/packages/app/src/blocknote-core/api/formatConversions/{formatConversions.test.ts => formatConversions._test.ts} (100%) rename frontend/packages/app/src/blocknote-core/api/nodeConversions/{nodeConversions.test.ts => nodeConversions._test.ts} (100%) rename frontend/packages/shared/src/client/.generated/accounts/v1alpha/{accounts_connectweb.ts => accounts_connect.ts} (97%) rename frontend/packages/shared/src/client/.generated/daemon/v1alpha/{daemon_connectweb.ts => daemon_connect.ts} (97%) rename frontend/packages/shared/src/client/.generated/documents/v1alpha/{changes_connectweb.ts => changes_connect.ts} (95%) rename frontend/packages/shared/src/client/.generated/documents/v1alpha/{comments_connectweb.ts => comments_connect.ts} (97%) rename frontend/packages/shared/src/client/.generated/documents/v1alpha/{content_graph_connectweb.ts => content_graph_connect.ts} (92%) rename frontend/packages/shared/src/client/.generated/documents/v1alpha/{documents_connectweb.ts => documents_connect.ts} (98%) rename frontend/packages/shared/src/client/.generated/documents/v1alpha/{web_publishing_connectweb.ts => web_publishing_connect.ts} (99%) rename frontend/packages/shared/src/client/.generated/entities/v1alpha/{entities_connectweb.ts => entities_connect.ts} (88%) rename frontend/packages/shared/src/client/.generated/groups/v1alpha/{groups_connectweb.ts => groups_connect.ts} (72%) rename frontend/packages/shared/src/client/.generated/networking/v1alpha/{networking_connectweb.ts => networking_connect.ts} (95%) create mode 100644 frontend/packages/shared/src/utils/__tests__/get-ids-from-url.test.ts delete mode 100644 third_party/sqlite/sha1.h diff --git a/.github/workflows/validate-site.yml b/.github/workflows/validate-site.yml index dd1ca22f58..5d996a4e90 100644 --- a/.github/workflows/validate-site.yml +++ b/.github/workflows/validate-site.yml @@ -6,21 +6,21 @@ on: - master - electron-ci paths: - - '.github/workflows/validate-site.yml' - - 'frontend/apps/site/**' - - 'frotend/packages/shared/**' - - 'frotend/packages/ui/**' - - 'package.json' - - 'frontend/apps/site/package.json' + - ".github/workflows/validate-site.yml" + - "frontend/apps/site/**" + - "frotend/packages/shared/**" + - "frotend/packages/ui/**" + - "package.json" + - "frontend/apps/site/package.json" pull_request: paths: - - '.github/workflows/validate-site.yml' - - 'frontend/apps/site/**' - - 'frotend/packages/**' - - 'package.json' - - 'frontend/apps/site/package.json' + - ".github/workflows/validate-site.yml" + - "frontend/apps/site/**" + - "frotend/packages/**" + - "package.json" + - "frontend/apps/site/package.json" branches-ignore: - - 'renovate/**' + - "renovate/**" concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -36,7 +36,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 20 - cache: 'yarn' + cache: "yarn" - name: Install Frontend Dependencies env: @@ -49,4 +49,4 @@ jobs: yarn validate - name: Run Site tests - run: yarn site:test \ No newline at end of file + run: yarn site:test diff --git a/backend/daemon/api/accounts/v1alpha/accounts.go b/backend/daemon/api/accounts/v1alpha/accounts.go index 4ff37652d0..40c343b525 100644 --- a/backend/daemon/api/accounts/v1alpha/accounts.go +++ b/backend/daemon/api/accounts/v1alpha/accounts.go @@ -97,7 +97,7 @@ func (srv *Server) GetAccount(ctx context.Context, in *accounts.GetAccountReques return nil, status.Errorf(codes.NotFound, "account %s not found", aids) } - entity, err := srv.blobs.LoadEntity(ctx, hyper.EntityID("hd://a/"+aids)) + entity, err := srv.blobs.LoadEntity(ctx, hyper.EntityID("hm://a/"+aids)) if err != nil { return nil, err } @@ -180,7 +180,7 @@ func (srv *Server) UpdateProfile(ctx context.Context, in *accounts.Profile) (*ac // UpdateProfile is public so it can be called from sites. func UpdateProfile(ctx context.Context, me core.Identity, blobs *hyper.Storage, in *accounts.Profile) error { - eid := hyper.EntityID("hd://a/" + me.Account().Principal().String()) + eid := hyper.EntityID("hm://a/" + me.Account().Principal().String()) e, err := blobs.LoadEntity(ctx, eid) if err != nil { @@ -279,7 +279,7 @@ func (srv *Server) ListAccounts(ctx context.Context, in *accounts.ListAccountsRe return nil, err } - entities, err := srv.blobs.ListEntities(ctx, "hd://a/") + entities, err := srv.blobs.ListEntities(ctx, "hm://a/") if err != nil { return nil, err } @@ -291,7 +291,7 @@ func (srv *Server) ListAccounts(ctx context.Context, in *accounts.ListAccountsRe } for _, e := range entities { - aid := e.TrimPrefix("hd://a/") + aid := e.TrimPrefix("hm://a/") if aid == mine { continue } diff --git a/backend/daemon/api/documents/v1alpha/changes.go b/backend/daemon/api/documents/v1alpha/changes.go index bdbbe657b9..bede5cd1da 100644 --- a/backend/daemon/api/documents/v1alpha/changes.go +++ b/backend/daemon/api/documents/v1alpha/changes.go @@ -39,7 +39,7 @@ func (api *Server) ListChanges(ctx context.Context, in *documents.ListChangesReq return nil, status.Errorf(codes.InvalidArgument, "must provide document id") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) out := &documents.ListChangesResponse{} diff --git a/backend/daemon/api/documents/v1alpha/content_graph.go b/backend/daemon/api/documents/v1alpha/content_graph.go index f692fd73f1..004e9f9db1 100644 --- a/backend/daemon/api/documents/v1alpha/content_graph.go +++ b/backend/daemon/api/documents/v1alpha/content_graph.go @@ -20,11 +20,16 @@ func (srv *Server) ListCitations(ctx context.Context, in *documents.ListCitation return nil, status.Error(codes.InvalidArgument, "must specify document ID") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + targetEntity := "hm://d/" + in.DocumentId var backlinks []hypersql.BacklinksForEntityResult if err := srv.blobs.Query(ctx, func(conn *sqlite.Conn) error { - list, err := hypersql.BacklinksForEntity(conn, string(eid)) + edb, err := hypersql.EntitiesLookupID(conn, targetEntity) + if err != nil { + return err + } + + list, err := hypersql.BacklinksForEntity(conn, edb.EntitiesID) backlinks = list return err }); err != nil { @@ -37,20 +42,20 @@ func (srv *Server) ListCitations(ctx context.Context, in *documents.ListCitation for i, link := range backlinks { var ld hyper.LinkData - if err := json.Unmarshal(link.ContentLinksViewData, &ld); err != nil { + if err := json.Unmarshal(link.BlobAttrsExtra, &ld); err != nil { return nil, fmt.Errorf("failed to decode link data: %w", err) } - src := cid.NewCidV1(uint64(link.ContentLinksViewSourceBlobCodec), link.ContentLinksViewSourceBlobMultihash) + src := cid.NewCidV1(uint64(link.BlobsCodec), link.BlobsMultihash) resp.Links[i] = &documents.Link{ Source: &documents.LinkNode{ - DocumentId: hyper.EntityID(link.ContentLinksViewSourceEID).TrimPrefix("hd://d/"), - BlockId: ld.SourceBlock, + DocumentId: hyper.EntityID(link.EntitiesEID).TrimPrefix("hm://d/"), + BlockId: link.BlobAttrsAnchor, Version: src.String(), }, Target: &documents.LinkNode{ - DocumentId: hyper.EntityID(link.ContentLinksViewTargetEID).TrimPrefix("hd://d/"), + DocumentId: in.DocumentId, BlockId: ld.TargetFragment, Version: ld.TargetVersion, }, diff --git a/backend/daemon/api/documents/v1alpha/content_graph_test.go b/backend/daemon/api/documents/v1alpha/content_graph_test.go index 7ee718c98e..e8044e6e3f 100644 --- a/backend/daemon/api/documents/v1alpha/content_graph_test.go +++ b/backend/daemon/api/documents/v1alpha/content_graph_test.go @@ -48,7 +48,7 @@ func TestBacklinks(t *testing.T) { Starts: []int32{0}, Ends: []int32{5}, Attributes: map[string]string{ - "url": "hd://d/" + pub.Document.Id + "?v=" + pub.Version + "#b1", + "url": "hm://d/" + pub.Document.Id + "?v=" + pub.Version + "#b1", }, }, }, @@ -64,7 +64,7 @@ func TestBacklinks(t *testing.T) { Starts: []int32{0}, Ends: []int32{5}, Attributes: map[string]string{ - "url": "hd://d/" + pub.Document.Id + "?v=" + pub.Version, + "url": "hm://d/" + pub.Document.Id + "?v=" + pub.Version, }, }, }, diff --git a/backend/daemon/api/documents/v1alpha/document_model.go b/backend/daemon/api/documents/v1alpha/document_model.go index 6cfa45f021..7d18c23014 100644 --- a/backend/daemon/api/documents/v1alpha/document_model.go +++ b/backend/daemon/api/documents/v1alpha/document_model.go @@ -28,6 +28,7 @@ type docModel struct { tree *Tree patch map[string]any oldDraft cid.Cid + oldChange hyper.Change done bool nextHLC hlc.Time origins map[string]cid.Cid // map of abbreviated origin hashes to actual cids; workaround, should not be necessary. @@ -64,6 +65,7 @@ func (dm *docModel) restoreDraft(c cid.Cid, ch hyper.Change) (err error) { panic("BUG: restoring draft when patch is not empty") } dm.oldDraft = c + dm.oldChange = ch if len(dm.e.Heads()) != len(ch.Deps) { return fmt.Errorf("failed to restore draft: state has %d heads while draft change has %d deps", len(dm.e.Heads()), len(ch.Deps)) @@ -81,6 +83,7 @@ func (dm *docModel) restoreDraft(c cid.Cid, ch hyper.Change) (err error) { } dm.nextHLC = dm.e.NextTimestamp() + moves := dm.patch["moves"] delete(dm.patch, "moves") @@ -138,18 +141,18 @@ func (dm *docModel) SetCreateTime(ct time.Time) error { return fmt.Errorf("create time is already set") } - dm.patch["createTime"] = ct.Unix() + dm.patch["createTime"] = int(ct.Unix()) return nil } func (dm *docModel) SetAuthor(author core.Principal) error { - _, ok := dm.e.Get("author") + _, ok := dm.e.Get("owner") if ok { return fmt.Errorf("author is already set") } - dm.patch["author"] = []byte(author) + dm.patch["owner"] = []byte(author) return nil } @@ -212,7 +215,12 @@ func (dm *docModel) Change() (hb hyper.Blob, err error) { dm.cleanupPatch() - return dm.e.CreateChange(dm.nextHLC, dm.signer, dm.delegation, dm.patch) + action := dm.oldChange.Action + if action == "" { + action = "Create" + } + + return dm.e.CreateChange(dm.nextHLC, dm.signer, dm.delegation, dm.patch, hyper.WithAction(action)) } func (dm *docModel) Commit(ctx context.Context, bs *hyper.Storage) (hb hyper.Blob, err error) { @@ -305,44 +313,28 @@ func (dm *docModel) cleanupPatch() { func (dm *docModel) hydrate(ctx context.Context, blobs *hyper.Storage) (*documents.Document, error) { e := dm.e - docpb := &documents.Document{ - Id: e.ID().TrimPrefix("hd://d/"), - Eid: string(e.ID()), - } - { - v, ok := e.Get("createTime") - if !ok { - return nil, fmt.Errorf("all documents must have create time") - } - switch vv := v.(type) { - case time.Time: - docpb.CreateTime = timestamppb.New(vv) - case int: - docpb.CreateTime = timestamppb.New(time.Unix(int64(vv), 0).UTC()) - default: - return nil, fmt.Errorf("unknown type %T for createTime field", v) - } - } + first := e.AppliedChanges()[0] - docpb.UpdateTime = timestamppb.New(e.LastChangeTime().Time()) + createTime, ok := first.Data.Patch["createTime"].(int) + if !ok { + return nil, fmt.Errorf("document must have createTime field") + } - { - v, ok := e.Get("author") - if !ok { - return nil, fmt.Errorf("all documents must have author") - } + owner, ok := first.Data.Patch["owner"].([]byte) + if !ok { + return nil, fmt.Errorf("document must have owner field") + } - switch vv := v.(type) { - case core.Principal: - docpb.Author = vv.String() - case []byte: - docpb.Author = core.Principal(vv).String() - default: - return nil, fmt.Errorf("unknown type %T for document author", v) - } + docpb := &documents.Document{ + Id: e.ID().TrimPrefix("hm://d/"), + Eid: string(e.ID()), + CreateTime: timestamppb.New(time.Unix(int64(createTime), 0)), + Author: core.Principal(owner).String(), } + docpb.UpdateTime = timestamppb.New(e.LastChangeTime().Time()) + { v, ok := e.Get("title") if ok { diff --git a/backend/daemon/api/documents/v1alpha/document_model_test.go b/backend/daemon/api/documents/v1alpha/document_model_test.go index 7cada2368c..df6d7d2de5 100644 --- a/backend/daemon/api/documents/v1alpha/document_model_test.go +++ b/backend/daemon/api/documents/v1alpha/document_model_test.go @@ -2,10 +2,12 @@ package documents import ( "context" + "mintter/backend/core" "mintter/backend/core/coretest" daemon "mintter/backend/daemon/api/daemon/v1alpha" "mintter/backend/daemon/storage" documents "mintter/backend/genproto/documents/v1alpha" + "mintter/backend/hlc" "mintter/backend/hyper" "mintter/backend/logging" "mintter/backend/pkg/must" @@ -21,25 +23,16 @@ func TestDocument_LoadingDrafts(t *testing.T) { db := storage.MakeTestDB(t) blobs := hyper.NewStorage(db, logging.New("mintter/hyper", "debug")) ctx := context.Background() - delegation, err := daemon.Register(ctx, blobs, alice.Account, alice.Device.PublicKey, time.Now()) - require.NoError(t, err) - entity := hyper.NewEntity(hyper.EntityID("hd://d/" + "doc-1")) - dm, err := newDocModel(entity, alice.Device, delegation) - require.NoError(t, err) - - dm.nextHLC = dm.e.NextTimestamp() // TODO(burdiyan): this is a workaround that should not be necessary. - require.NoError(t, dm.SetAuthor(alice.Account.Principal())) - require.NoError(t, dm.SetCreateTime(time.Now())) - - _, err = dm.Commit(ctx, blobs) + dm := newTestDocModel(t, blobs, alice.Account, alice.Device) + _, err := dm.Commit(ctx, blobs) require.NoError(t, err) - entity, err = blobs.LoadEntity(ctx, "hd://d/doc-1") + entity, err := blobs.LoadEntity(ctx, dm.e.ID()) require.NoError(t, err) require.Nil(t, entity) - entity, err = blobs.LoadDraftEntity(ctx, "hd://d/doc-1") + entity, err = blobs.LoadDraftEntity(ctx, dm.e.ID()) require.NoError(t, err) require.NotNil(t, entity) } @@ -48,16 +41,7 @@ func TestDocument_DeleteTurnaround(t *testing.T) { alice := coretest.NewTester("alice") db := storage.MakeTestDB(t) blobs := hyper.NewStorage(db, logging.New("mintter/hyper", "debug")) - ctx := context.Background() - delegation, err := daemon.Register(ctx, blobs, alice.Account, alice.Device.PublicKey, time.Now()) - require.NoError(t, err) - entity := hyper.NewEntity(hyper.EntityID("hd://d/" + "doc-1")) - dm, err := newDocModel(entity, alice.Device, delegation) - dm.nextHLC = dm.e.NextTimestamp() // TODO(burdiyan): this is a workaround that should not be necessary. - require.NoError(t, err) - - require.NoError(t, dm.SetAuthor(alice.Account.Principal())) - require.NoError(t, dm.SetCreateTime(time.Now())) + dm := newTestDocModel(t, blobs, alice.Account, alice.Device) require.NoError(t, dm.MoveBlock("b1", "", "")) require.NoError(t, dm.ReplaceBlock(&documents.Block{ @@ -105,16 +89,7 @@ func TestDocument_Cleanup(t *testing.T) { alice := coretest.NewTester("alice") db := storage.MakeTestDB(t) blobs := hyper.NewStorage(db, logging.New("mintter/hyper", "debug")) - ctx := context.Background() - delegation, err := daemon.Register(ctx, blobs, alice.Account, alice.Device.PublicKey, time.Now()) - require.NoError(t, err) - entity := hyper.NewEntity(hyper.EntityID("hd://d/" + "doc-1")) - dm, err := newDocModel(entity, alice.Device, delegation) - dm.nextHLC = dm.e.NextTimestamp() // TODO(burdiyan): this is a workaround that should not be necessary. - require.NoError(t, err) - - require.NoError(t, dm.SetAuthor(alice.Account.Principal())) - require.NoError(t, dm.SetCreateTime(time.Now())) + dm := newTestDocModel(t, blobs, alice.Account, alice.Device) require.NoError(t, dm.MoveBlock("b1", "", "")) require.NoError(t, dm.ReplaceBlock(&documents.Block{ @@ -202,31 +177,23 @@ func TestDocumentUpdatePublished(t *testing.T) { db := storage.MakeTestDB(t) blobs := hyper.NewStorage(db, logging.New("mintter/hyper", "debug")) ctx := context.Background() - delegation, err := daemon.Register(ctx, blobs, alice.Account, alice.Device.PublicKey, time.Now()) - require.NoError(t, err) - eid := hyper.EntityID("hd://d/" + "doc-1") - entity := hyper.NewEntity(eid) - dm, err := newDocModel(entity, alice.Device, delegation) - dm.nextHLC = dm.e.NextTimestamp() // TODO(burdiyan): this is a workaround that should not be necessary. - require.NoError(t, err) + dm := newTestDocModel(t, blobs, alice.Account, alice.Device) - require.NoError(t, dm.SetAuthor(alice.Account.Principal())) - require.NoError(t, dm.SetCreateTime(time.Now())) require.NoError(t, dm.SetTitle("My document")) hb, err := dm.Commit(ctx, blobs) require.NoError(t, err) - _, err = blobs.PublishDraft(ctx, eid) + _, err = blobs.PublishDraft(ctx, dm.e.ID()) require.NoError(t, err) - entity, err = blobs.LoadEntity(ctx, eid) + entity, err := blobs.LoadEntity(ctx, dm.e.ID()) require.NoError(t, err) _, ok := entity.Heads()[hb.CID] require.True(t, ok, "entity must have last published change as heads") - hb2, err := entity.CreateChange(entity.NextTimestamp(), alice.Device, delegation, map[string]any{}) + hb2, err := entity.CreateChange(entity.NextTimestamp(), alice.Device, dm.delegation, map[string]any{}, hyper.WithAction("Update")) require.NoError(t, err) require.Equal(t, []cid.Cid{hb.CID}, hb2.Decoded.(hyper.Change).Deps, "new change must have old one in deps") @@ -248,7 +215,7 @@ func TestBug_RedundantMoves(t *testing.T) { // Create draft. var c1 hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) must.Do(model.SetCreateTime(time.Now())) must.Do(model.SetTitle("Hello World!")) @@ -259,7 +226,7 @@ func TestBug_RedundantMoves(t *testing.T) { // Update draft in place. { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) must.Do(model.restoreDraft(c1.CID, c1.Decoded.(hyper.Change))) must.Do(model.MoveBlock("b1", "", "")) @@ -278,7 +245,7 @@ func TestBug_RedundantMoves(t *testing.T) { // Create a new change on top of the previous. var c2 hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) model.nextHLC = entity.NextTimestamp() @@ -295,7 +262,7 @@ func TestBug_RedundantMoves(t *testing.T) { } // Try to apply changes one by one. - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) must.Do(entity.ApplyChange(c2.CID, c2.Decoded.(hyper.Change))) model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) @@ -309,7 +276,7 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { // Create document. var c1 hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) must.Do(model.SetCreateTime(time.Now())) must.Do(model.SetTitle("Hello World!")) @@ -321,7 +288,7 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { // Create two concurrent changes. var c2 hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) model.nextHLC = entity.NextTimestamp() @@ -331,7 +298,7 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { var c3 hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) model := must.Do2(newDocModel(entity, alice.Device, kd.CID)) model.nextHLC = entity.NextTimestamp() @@ -342,7 +309,7 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { // Create draft from the all the changes. var draft hyper.Blob { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) must.Do(entity.ApplyChange(c2.CID, c2.Decoded.(hyper.Change))) must.Do(entity.ApplyChange(c3.CID, c3.Decoded.(hyper.Change))) @@ -356,7 +323,7 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { // Update the draft in place. { - entity := hyper.NewEntity("hd://d/foo") + entity := hyper.NewEntity("hm://d/foo") must.Do(entity.ApplyChange(c1.CID, c1.Decoded.(hyper.Change))) must.Do(entity.ApplyChange(c2.CID, c2.Decoded.(hyper.Change))) must.Do(entity.ApplyChange(c3.CID, c3.Decoded.(hyper.Change))) @@ -371,3 +338,24 @@ func TestBug_DraftWithMultipleDeps(t *testing.T) { require.Equal(t, hyper.SortCIDs([]cid.Cid{c2.CID, c3.CID}), draft.Decoded.(hyper.Change).Deps, "draft must have concurrent changes as deps") } + +func newTestDocModel(t *testing.T, blobs *hyper.Storage, account, device core.KeyPair) *docModel { + clock := hlc.NewClock() + ts := clock.Now() + now := ts.Time().Unix() + + id, nonce := hyper.NewUnforgeableID(account.Principal(), nil, now) + delegation, err := daemon.Register(context.Background(), blobs, account, device.PublicKey, time.Now()) + require.NoError(t, err) + + entity := hyper.NewEntity(hyper.EntityID("hm://d/" + id)) + dm, err := newDocModel(entity, device, delegation) + require.NoError(t, err) + + dm.patch["nonce"] = nonce + dm.patch["createTime"] = int(now) + dm.patch["owner"] = []byte(account.Principal()) + dm.nextHLC = ts + + return dm +} diff --git a/backend/daemon/api/documents/v1alpha/documents.go b/backend/daemon/api/documents/v1alpha/documents.go index ac70a911aa..d13b3d2873 100644 --- a/backend/daemon/api/documents/v1alpha/documents.go +++ b/backend/daemon/api/documents/v1alpha/documents.go @@ -7,17 +7,15 @@ import ( "fmt" "mintter/backend/core" documents "mintter/backend/genproto/documents/v1alpha" + "mintter/backend/hlc" "mintter/backend/hyper" "mintter/backend/hyper/hypersql" "mintter/backend/logging" "mintter/backend/pkg/future" - "mintter/backend/pkg/must" - "time" "crawshaw.io/sqlite" "crawshaw.io/sqlite/sqlitex" "github.com/ipfs/go-cid" - "github.com/jaevor/go-nanoid" "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -71,7 +69,7 @@ func (api *Server) CreateDraft(ctx context.Context, in *documents.CreateDraftReq } if in.ExistingDocumentId != "" { - eid := hyper.EntityID("hd://d/" + in.ExistingDocumentId) + eid := hyper.EntityID("hm://d/" + in.ExistingDocumentId) _, err := api.blobs.FindDraft(ctx, eid) if err == nil { @@ -101,7 +99,7 @@ func (api *Server) CreateDraft(ctx context.Context, in *documents.CreateDraftReq return nil, err } - hb, err := entity.CreateChange(entity.NextTimestamp(), me.DeviceKey(), del, map[string]any{}) + hb, err := entity.CreateChange(entity.NextTimestamp(), me.DeviceKey(), del, map[string]any{}, hyper.WithAction("Update")) if err != nil { return nil, err } @@ -113,10 +111,14 @@ func (api *Server) CreateDraft(ctx context.Context, in *documents.CreateDraftReq return api.GetDraft(ctx, &documents.GetDraftRequest{DocumentId: in.ExistingDocumentId}) } - docid := newDocumentID() - eid := hyper.EntityID("hd://d/" + docid) + clock := hlc.NewClock() + ts := clock.Now() + now := ts.Time().Unix() - entity := hyper.NewEntity(eid) + docid, nonce := hyper.NewUnforgeableID(me.Account().Principal(), nil, now) + eid := hyper.EntityID("hm://d/" + docid) + + entity := hyper.NewEntityWithClock(eid, clock) del, err := api.getDelegation(ctx) if err != nil { @@ -128,15 +130,10 @@ func (api *Server) CreateDraft(ctx context.Context, in *documents.CreateDraftReq return nil, err } - dm.nextHLC = dm.e.NextTimestamp() // TODO(burdiyan): this is a workaround that should not be necessary. - - now := time.Now() - if err := dm.SetCreateTime(now); err != nil { - return nil, err - } - if err := dm.SetAuthor(me.Account().Principal()); err != nil { - return nil, err - } + dm.nextHLC = ts + dm.patch["nonce"] = nonce + dm.patch["createTime"] = int(now) + dm.patch["owner"] = []byte(me.Account().Principal()) _, err = dm.Commit(ctx, api.blobs) if err != nil { @@ -163,7 +160,7 @@ func (api *Server) UpdateDraft(ctx context.Context, in *documents.UpdateDraftReq return nil, err } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) draft, err := api.blobs.LoadDraft(ctx, eid) if err != nil { @@ -235,7 +232,7 @@ func (api *Server) GetDraft(ctx context.Context, in *documents.GetDraftRequest) return nil, err } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) entity, err := api.blobs.LoadDraftEntity(ctx, eid) if err != nil { @@ -260,7 +257,7 @@ func (api *Server) GetDraft(ctx context.Context, in *documents.GetDraftRequest) // ListDrafts implements the corresponding gRPC method. func (api *Server) ListDrafts(ctx context.Context, in *documents.ListDraftsRequest) (*documents.ListDraftsResponse, error) { - entities, err := api.blobs.ListEntities(ctx, "hd://d/") + entities, err := api.blobs.ListEntities(ctx, "hm://d/") if err != nil { return nil, err } @@ -270,7 +267,7 @@ func (api *Server) ListDrafts(ctx context.Context, in *documents.ListDraftsReque } for _, e := range entities { - docid := e.TrimPrefix("hd://d/") + docid := e.TrimPrefix("hm://d/") draft, err := api.GetDraft(ctx, &documents.GetDraftRequest{ DocumentId: docid, }) @@ -289,7 +286,7 @@ func (api *Server) PublishDraft(ctx context.Context, in *documents.PublishDraftR return nil, status.Errorf(codes.InvalidArgument, "must specify document ID to get the draft") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) oid, err := eid.CID() if err != nil { @@ -320,7 +317,7 @@ func (api *Server) DeleteDraft(ctx context.Context, in *documents.DeleteDraftReq return nil, status.Errorf(codes.InvalidArgument, "must specify draft ID to delete") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) if err := api.blobs.DeleteDraft(ctx, eid); err != nil { return nil, err @@ -335,7 +332,7 @@ func (api *Server) GetPublication(ctx context.Context, in *documents.GetPublicat return nil, status.Errorf(codes.InvalidArgument, "must specify document ID to get the draft") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) version := hyper.Version(in.Version) pub, err := api.loadPublication(ctx, eid, version, in.TrustedOnly) @@ -424,7 +421,7 @@ func (api *Server) DeletePublication(ctx context.Context, in *documents.DeletePu return nil, status.Errorf(codes.InvalidArgument, "must specify publication ID to delete") } - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) if err := api.blobs.DeleteEntity(ctx, eid); err != nil { return nil, err @@ -435,7 +432,7 @@ func (api *Server) DeletePublication(ctx context.Context, in *documents.DeletePu // ListPublications implements the corresponding gRPC method. func (api *Server) ListPublications(ctx context.Context, in *documents.ListPublicationsRequest) (*documents.ListPublicationsResponse, error) { - entities, err := api.blobs.ListEntities(ctx, "hd://d/") + entities, err := api.blobs.ListEntities(ctx, "hm://d/") if err != nil { return nil, err } @@ -445,7 +442,7 @@ func (api *Server) ListPublications(ctx context.Context, in *documents.ListPubli } for _, e := range entities { - docid := e.TrimPrefix("hd://d/") + docid := e.TrimPrefix("hm://d/") pub, err := api.GetPublication(ctx, &documents.GetPublicationRequest{ DocumentId: docid, LocalOnly: true, @@ -504,12 +501,3 @@ func (api *Server) getDelegation(ctx context.Context) (cid.Cid, error) { return out, nil } - -// Almost same as standard nanoid, but removing non-alphanumeric chars, to get a bit nicer selectable string. -// Using a bit larger length to compensate. -// See https://zelark.github.io/nano-id-cc for playing around with collision resistance. -var nanogen = must.Do2(nanoid.CustomASCII("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", 22)) - -func newDocumentID() string { - return nanogen() -} diff --git a/backend/daemon/api/entities/v1alpha/entities.go b/backend/daemon/api/entities/v1alpha/entities.go index d95d0d7bb9..eeaab71396 100644 --- a/backend/daemon/api/entities/v1alpha/entities.go +++ b/backend/daemon/api/entities/v1alpha/entities.go @@ -73,11 +73,11 @@ func getChange(conn *sqlite.Conn, c cid.Cid, id int64) (*entities.Change, error) out = &entities.Change{ Id: c.String(), Author: core.Principal(info.PublicKeysPrincipal).String(), - CreateTime: timestamppb.New(hlc.Unpack(info.HDChangesHlcTime).Time()), + CreateTime: timestamppb.New(hlc.Unpack(info.ChangesHLCTime).Time()), IsTrusted: info.IsTrusted > 0, } - deps, err := hypersql.ChangesGetDeps(conn, info.HDChangesBlob) + deps, err := hypersql.ChangesGetDeps(conn, info.ChangesBlob) if err != nil { return nil, err } @@ -106,11 +106,11 @@ func (api *Server) GetEntityTimeline(ctx context.Context, in *entities.GetEntity if err != nil { return err } - if eid.HDEntitiesID == 0 { + if eid.EntitiesID == 0 { return errutil.NotFound("no such entity %s", in.Id) } - changes, err := hypersql.ChangesInfoForEntity(conn, eid.HDEntitiesID) + changes, err := hypersql.ChangesInfoForEntity(conn, eid.EntitiesID) if err != nil { return err } @@ -127,13 +127,13 @@ func (api *Server) GetEntityTimeline(ctx context.Context, in *entities.GetEntity chpb := &entities.Change{ Id: cs, Author: core.Principal(ch.PublicKeysPrincipal).String(), - CreateTime: timestamppb.New(hlc.Unpack(ch.HDChangesHlcTime).Time()), + CreateTime: timestamppb.New(hlc.Unpack(ch.ChangesHLCTime).Time()), IsTrusted: ch.IsTrusted > 0, } heads[cs] = struct{}{} out.ChangesByTime = append(out.ChangesByTime, cs) - deps, err := hypersql.ChangesGetDeps(conn, ch.HDChangesBlob) + deps, err := hypersql.ChangesGetDeps(conn, ch.ChangesBlob) if err != nil { return err } diff --git a/backend/daemon/api/groups/v1alpha/groups.go b/backend/daemon/api/groups/v1alpha/groups.go index bbf1feaf39..6d60f5bde3 100644 --- a/backend/daemon/api/groups/v1alpha/groups.go +++ b/backend/daemon/api/groups/v1alpha/groups.go @@ -4,12 +4,12 @@ package groups import ( "bytes" "context" - "crypto/rand" - "crypto/sha256" + "encoding/json" "fmt" "mintter/backend/core" groups "mintter/backend/genproto/groups/v1alpha" p2p "mintter/backend/genproto/p2p/v1alpha" + "mintter/backend/hlc" "mintter/backend/hyper" "mintter/backend/hyper/hypersql" "mintter/backend/mttnet" @@ -18,10 +18,11 @@ import ( "mintter/backend/pkg/future" "mintter/backend/pkg/maputil" "strings" + "time" "crawshaw.io/sqlite" + "crawshaw.io/sqlite/sqlitex" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multibase" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" @@ -54,29 +55,33 @@ func (srv *Server) CreateGroup(ctx context.Context, in *groups.CreateGroupReques return nil, err } - id, nonce := newID(me.Account().Principal()) - eid := hyper.EntityID("hd://g/" + id) + clock := hlc.NewClock() + ts := clock.Now() + createTime := ts.Time().Unix() + + id, nonce := hyper.NewUnforgeableID(me.Account().Principal(), nil, createTime) + eid := hyper.EntityID("hm://g/" + id) + e := hyper.NewEntityWithClock(eid, clock) patch := map[string]any{ - "nonce": nonce, - "title": in.Title, - "owner": me.Account().Principal(), + "nonce": nonce, + "title": in.Title, + "createTime": int(createTime), + "owner": []byte(me.Account().Principal()), } if in.Description != "" { patch["description"] = in.Description } if in.Members != nil { - // TODO(burdiyan): validate members are valid account IDs. - patch["members"] = in.Members + return nil, status.Errorf(codes.Unimplemented, "adding members when creating a group is not implemented yet") } del, err := srv.getDelegation(ctx) if err != nil { return nil, err } - e := hyper.NewEntity(eid) - hb, err := e.CreateChange(e.NextTimestamp(), me.DeviceKey(), del, patch) + hb, err := e.CreateChange(ts, me.DeviceKey(), del, patch, hyper.WithAction("Create")) if err != nil { return nil, err } @@ -85,7 +90,7 @@ func (srv *Server) CreateGroup(ctx context.Context, in *groups.CreateGroupReques return nil, err } - return groupToProto(e, true) + return groupToProto(srv.blobs, e) } // GetGroup gets a group. @@ -116,7 +121,7 @@ func (srv *Server) GetGroup(ctx context.Context, in *groups.GetGroupRequest) (*g e = v } - return groupToProto(e, true) + return groupToProto(srv.blobs, e) } // UpdateGroup updates a group. @@ -125,12 +130,6 @@ func (srv *Server) UpdateGroup(ctx context.Context, in *groups.UpdateGroupReques return nil, errutil.MissingArgument("id") } - if in.UpdatedMembers != nil { - return nil, status.Errorf(codes.Unimplemented, "TODO: updating members is not implemented yet") - } - - // TODO(burdiyan): check if we are allowed to update the group. - me, err := srv.getMe() if err != nil { return nil, err @@ -169,12 +168,19 @@ func (srv *Server) UpdateGroup(ctx context.Context, in *groups.UpdateGroupReques } } + for k, v := range in.UpdatedMembers { + if v == groups.Role_ROLE_UNSPECIFIED { + return nil, status.Errorf(codes.Unimplemented, "removing members is not implemented yet") + } + maputil.Set(patch, []string{"members", k}, int64(v)) + } + del, err := srv.getDelegation(ctx) if err != nil { return nil, err } - hb, err := e.CreateChange(e.NextTimestamp(), me.DeviceKey(), del, patch) + hb, err := e.CreateChange(e.NextTimestamp(), me.DeviceKey(), del, patch, hyper.WithAction("Update")) if err != nil { return nil, err } @@ -183,12 +189,12 @@ func (srv *Server) UpdateGroup(ctx context.Context, in *groups.UpdateGroupReques return nil, err } - return groupToProto(e, true) + return groupToProto(srv.blobs, e) } // ListGroups lists groups. func (srv *Server) ListGroups(ctx context.Context, in *groups.ListGroupsRequest) (*groups.ListGroupsResponse, error) { - entities, err := srv.blobs.ListEntities(ctx, "hd://g/") + entities, err := srv.blobs.ListEntities(ctx, "hm://g/") if err != nil { return nil, err } @@ -262,36 +268,52 @@ func (srv *Server) ListMembers(ctx context.Context, in *groups.ListMembersReques return nil, errutil.MissingArgument("id") } - eid := hyper.EntityID(in.Id) + if in.Version != "" { + return nil, status.Errorf(codes.Unimplemented, "listing members for groups at a specific version is not implemented yet") + } - var e *hyper.Entity - if in.Version == "" { - v, err := srv.blobs.LoadEntity(ctx, eid) + resp := &groups.ListMembersResponse{} + + if err := srv.blobs.Query(ctx, func(conn *sqlite.Conn) error { + edb, err := hypersql.EntitiesLookupID(conn, in.Id) if err != nil { - return nil, err + return err } - e = v - } else { - heads, err := hyper.Version(in.Version).Parse() + if edb.EntitiesID == 0 { + return fmt.Errorf("group %q not found", in.Id) + } + + owner, err := hypersql.ResourceGetOwner(conn, edb.EntitiesID) if err != nil { - return nil, err + return err } - v, err := srv.blobs.LoadEntityFromHeads(ctx, eid, heads...) + ownerPub, err := hypersql.PublicKeysLookupPrincipal(conn, owner) if err != nil { - return nil, err + return err } - e = v - } - o, ok := e.Get("owner") - if !ok { - return nil, fmt.Errorf("group entity must have owner") + resp.OwnerAccountId = core.Principal(ownerPub.PublicKeysPrincipal).String() + + return hypersql.GroupListMembers(conn, edb.EntitiesID, owner, func(principal []byte, role int64) error { + if resp.Members == nil { + resp.Members = make(map[string]groups.Role) + } + + p, r := core.Principal(principal).String(), groups.Role(role) + if r == groups.Role_ROLE_UNSPECIFIED { + delete(resp.Members, p) + } else { + resp.Members[p] = r + } + + return nil + }) + }); err != nil { + return nil, err } - return &groups.ListMembersResponse{ - OwnerAccountId: core.Principal(o.([]byte)).String(), - }, nil + return resp, nil } // GetSiteInfo gets information of a local site. @@ -302,13 +324,13 @@ func (srv *Server) GetSiteInfo(ctx context.Context, in *groups.GetSiteInfoReques if err != nil { return fmt.Errorf("No site info available: %w", err) } - ret.GroupId = res.HDEntitiesEID + ret.GroupId = res.EntitiesEID if res.ServedSitesVersion != "" { ret.Version = res.ServedSitesVersion } else { - entity, err := srv.blobs.LoadEntity(ctx, hyper.EntityID(res.HDEntitiesEID)) + entity, err := srv.blobs.LoadEntity(ctx, hyper.EntityID(res.EntitiesEID)) if err != nil { - return fmt.Errorf("could not get entity [%s]: %w", res.HDEntitiesEID, err) + return fmt.Errorf("could not get entity [%s]: %w", res.EntitiesEID, err) } ret.Version = entity.Version().String() } @@ -355,29 +377,187 @@ func (srv *Server) ConvertToSite(ctx context.Context, in *groups.ConvertToSiteRe OwnerId: res.OwnerId, Hostname: remoteHostname, }, nil +} + +// ListDocumentGroups lists groups that a document belongs to. +func (srv *Server) ListDocumentGroups(ctx context.Context, in *groups.ListDocumentGroupsRequest) (*groups.ListDocumentGroupsResponse, error) { + if in.DocumentId == "" { + return nil, errutil.MissingArgument("documentId") + } + + resp := &groups.ListDocumentGroupsResponse{} + + if err := srv.blobs.Query(ctx, func(conn *sqlite.Conn) error { + const q = ` + SELECT + lookup.value AS entity, + blobs.codec AS codec, + blobs.multihash AS hash, + blob_attrs.anchor AS anchor, + blob_attrs.extra AS extra, + blob_attrs.ts AS ts + FROM blob_attrs + JOIN changes ON changes.blob = blob_attrs.blob + JOIN lookup ON lookup.id = changes.entity + JOIN blobs ON blob_attrs.blob = blobs.id + WHERE blob_attrs.key = 'group/content' + AND blob_attrs.value_ptr IS NOT NULL + AND blob_attrs.value_ptr = :document + ` + + edb, err := hypersql.EntitiesLookupID(conn, in.DocumentId) + if err != nil { + return err + } + if edb.EntitiesID == 0 { + return fmt.Errorf("document %q not found: make sure to specify fully-qualified entity ID", in.DocumentId) + } + + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + var ( + entity string + codec int64 + hash []byte + anchor string + extra []byte + ts int64 + ) + stmt.Scan(&entity, &codec, &hash, &anchor, &extra, &ts) + + var ld hyper.LinkData + if err := json.Unmarshal(extra, &ld); err != nil { + return err + } + + var sb strings.Builder + sb.WriteString(in.DocumentId) + + if ld.TargetVersion != "" { + sb.WriteString("?v=") + sb.WriteString(ld.TargetVersion) + } + + if ld.TargetFragment != "" { + sb.WriteString("#") + sb.WriteString(ld.TargetFragment) + } + + rawURL := sb.String() + item := &groups.ListDocumentGroupsResponse_Item{ + GroupId: entity, + GroupChange: cid.NewCidV1(uint64(codec), hash).String(), + ChangeTime: timestamppb.New(time.UnixMicro(ts)), + Path: anchor, + RawUrl: rawURL, + } + + resp.Items = append(resp.Items, item) + return nil + }, edb.EntitiesID); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return resp, nil } -func groupToProto(e *hyper.Entity, isLatest bool) (*groups.Group, error) { - createTime := e.AppliedChanges()[0].Data.HLCTime.Time() - gpb := &groups.Group{ - Id: string(e.ID()), - CreateTime: timestamppb.New(createTime), - Version: e.Version().String(), +// ListAccountGroups lists groups that an account belongs to. +func (srv *Server) ListAccountGroups(ctx context.Context, in *groups.ListAccountGroupsRequest) (*groups.ListAccountGroupsResponse, error) { + if in.AccountId == "" { + return nil, errutil.MissingArgument("accountId") } - { - v, ok := e.Get("owner") - if !ok { - return nil, fmt.Errorf("group entity must have owner") + acc, err := core.DecodePrincipal(in.AccountId) + if err != nil { + return nil, err + } + + resp := &groups.ListAccountGroupsResponse{} + + if err := srv.blobs.Query(ctx, func(conn *sqlite.Conn) error { + accdb, err := hypersql.PublicKeysLookupID(conn, acc) + if err != nil { + return err + } + + if accdb.PublicKeysID == 0 { + return fmt.Errorf("account %q not found", in.AccountId) } - switch v := v.(type) { - case core.Principal: - gpb.OwnerAccountId = v.String() - case []byte: - gpb.OwnerAccountId = core.Principal(v).String() + // This query assumes that we've indexed only valid changes, + // i.e. group members are only mutated by the owner. + // TODO(burdiyan): support member removals and make sure to query + // only valid changes. + const q = ` + SELECT + lookup.value AS entity, + blob_attrs.extra AS role, + MAX(blob_attrs.ts) AS ts + FROM blob_attrs + JOIN changes ON changes.blob = blob_attrs.blob + JOIN lookup ON lookup.id = changes.entity + WHERE blob_attrs.key = 'group/member' + AND blob_attrs.value_ptr IS NOT NULL + AND blob_attrs.value_ptr = :member + GROUP BY changes.entity + ` + + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + var ( + group string + role int64 + ) + + stmt.Scan(&group, &role) + + // TODO(burdiyan): this is really bad. Just use the database to get this info. + g, err := srv.GetGroup(ctx, &groups.GetGroupRequest{ + Id: group, + }) + if err != nil { + return err + } + + resp.Items = append(resp.Items, &groups.ListAccountGroupsResponse_Item{ + Group: g, + Role: groups.Role(role), + }) + + return nil + }, accdb.PublicKeysID); err != nil { + return err } + + return nil + }); err != nil { + return nil, err + } + + return resp, nil +} + +func groupToProto(blobs *hyper.Storage, e *hyper.Entity) (*groups.Group, error) { + createTime, ok := e.AppliedChanges()[0].Data.Patch["createTime"].(int) + if !ok { + return nil, fmt.Errorf("group entity doesn't have createTime field") + } + + owner, ok := e.AppliedChanges()[0].Data.Patch["owner"].([]byte) + if !ok { + return nil, fmt.Errorf("group entity doesn't have owner field") + } + + gpb := &groups.Group{ + Id: string(e.ID()), + CreateTime: timestamppb.New(time.Unix(int64(createTime), 0)), + OwnerAccountId: core.Principal(owner).String(), + Version: e.Version().String(), + UpdateTime: timestamppb.New(e.LastChangeTime().Time()), } { @@ -442,30 +622,3 @@ func (srv *Server) getDelegation(ctx context.Context) (cid.Cid, error) { return out, nil } - -func newID(me core.Principal) (id string, nonce []byte) { - nonce = make([]byte, 16) - _, err := rand.Read(nonce) - if err != nil { - panic(err) - } - - h := sha256.New() - if _, err := h.Write(me); err != nil { - panic(err) - } - if _, err := h.Write(nonce); err != nil { - panic(err) - } - - dig := h.Sum(nil) - base, err := multibase.Encode(multibase.Base58BTC, dig) - if err != nil { - panic(err) - } - - // Using last 22 characters to avoid multibase prefix. - // We don't use full hash digest here, to make our IDs shorter. - // But it should have enough collision resistance for our purpose. - return base[len(base)-22:], nonce -} diff --git a/backend/daemon/api/groups/v1alpha/groups_test.go b/backend/daemon/api/groups/v1alpha/groups_test.go index e078b2149e..34f1e7381c 100644 --- a/backend/daemon/api/groups/v1alpha/groups_test.go +++ b/backend/daemon/api/groups/v1alpha/groups_test.go @@ -130,7 +130,7 @@ func TestListContent(t *testing.T) { group, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ Id: group.Id, UpdatedContent: map[string]string{ - "/": "hd://d/my-index-page?v=deadbeef", + "/": "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw", }, }) require.NoError(t, err) @@ -143,7 +143,7 @@ func TestListContent(t *testing.T) { want := &groups.ListContentResponse{ Content: map[string]string{ - "/": "hd://d/my-index-page?v=deadbeef", + "/": "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw", }, } @@ -156,8 +156,8 @@ func TestListContent(t *testing.T) { group, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ Id: group.Id, UpdatedContent: map[string]string{ - "/": "", - "foo": "bar", + "/": "", + "/foo": "bar", }, }) require.NoError(t, err) @@ -169,8 +169,8 @@ func TestListContent(t *testing.T) { want := &groups.ListContentResponse{ Content: map[string]string{ - "/": "", - "foo": "bar", + "/": "", + "/foo": "bar", }, } @@ -182,8 +182,8 @@ func TestListContent(t *testing.T) { group, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ Id: group.Id, UpdatedContent: map[string]string{ - "/": "", - "foo": "bar", + "/": "", + "/foo": "bar", }, }) require.NoError(t, err) @@ -196,7 +196,7 @@ func TestListContent(t *testing.T) { want := &groups.ListContentResponse{ Content: map[string]string{ - "/": "hd://d/my-index-page?v=deadbeef", + "/": "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw", }, } @@ -204,6 +204,57 @@ func TestListContent(t *testing.T) { } } +func TestMembers(t *testing.T) { + t.Parallel() + + srv := newTestSrv(t, "alice") + bob := coretest.NewTester("bob") + // carol := coretest.NewTester("carol") + ctx := context.Background() + + group, err := srv.CreateGroup(ctx, &groups.CreateGroupRequest{ + Title: "My Group", + }) + require.NoError(t, err) + + _, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + Id: group.Id, + UpdatedMembers: map[string]groups.Role{ + bob.Account.Principal().String(): groups.Role_EDITOR, + }, + }) + require.NoError(t, err) + + list, err := srv.ListMembers(ctx, &groups.ListMembersRequest{Id: group.Id}) + require.NoError(t, err) + want := &groups.ListMembersResponse{ + OwnerAccountId: srv.me.MustGet().Account().Principal().String(), + Members: map[string]groups.Role{ + srv.me.MustGet().Account().Principal().String(): groups.Role_OWNER, + bob.Account.Principal().String(): groups.Role_EDITOR, + }, + } + testutil.ProtoEqual(t, want, list, "list members response must match") + + // TODO(burdiyan): uncomment this when removing members is implemented. + // _, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + // Id: group.Id, + // UpdatedMembers: map[string]groups.Role{ + // bob.Account.Principal().String(): groups.Role_ROLE_UNSPECIFIED, + // carol.Account.Principal().String(): groups.Role_EDITOR, + // }, + // }) + // require.NoError(t, err) + + // list, err = srv.ListMembers(ctx, &groups.ListMembersRequest{Id: group.Id}) + // require.NoError(t, err) + // want = &groups.ListMembersResponse{ + // OwnerAccountId: srv.me.MustGet().Account().Principal().String(), + // Members: map[string]groups.Role{carol.Account.Principal().String(): groups.Role_EDITOR}, + // } + // testutil.ProtoEqual(t, want, list, "list members response must match") +} + func TestListGroups(t *testing.T) { t.Parallel() @@ -242,6 +293,130 @@ func TestListGroups(t *testing.T) { testutil.ProtoEqual(t, want, list, "list groups response must match") } +func TestDocumentGroupBacklinks(t *testing.T) { + t.Parallel() + + srv := newTestSrv(t, "alice") + ctx := context.Background() + + group1, err := srv.CreateGroup(ctx, &groups.CreateGroupRequest{ + Title: "My Group", + Description: "Description of my group", + }) + require.NoError(t, err) + + group1, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + Id: group1.Id, + UpdatedContent: map[string]string{ + "/": "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw", + }, + }) + require.NoError(t, err) + + group2, err := srv.CreateGroup(ctx, &groups.CreateGroupRequest{ + Title: "My another Group", + }) + require.NoError(t, err) + + group2, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + Id: group2.Id, + UpdatedContent: map[string]string{ + "/fragmented-document": "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw#some-fragment", + }, + }) + require.NoError(t, err) + + list, err := srv.ListDocumentGroups(ctx, &groups.ListDocumentGroupsRequest{ + DocumentId: "hm://d/my-index-page", + }) + require.NoError(t, err) + + want := &groups.ListDocumentGroupsResponse{ + Items: []*groups.ListDocumentGroupsResponse_Item{ + { + GroupId: group1.Id, + GroupChange: group1.Version, + ChangeTime: group1.UpdateTime, + Path: "/", + RawUrl: "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw", + }, + { + GroupId: group2.Id, + GroupChange: group2.Version, + ChangeTime: group2.UpdateTime, + Path: "/fragmented-document", + RawUrl: "hm://d/my-index-page?v=bafy2bzacectq4c4akk6bmlrdem6hzf5blrmnnj2sptedtd5t5hp6ggkky3tlw#some-fragment", + }, + }, + } + + testutil.ProtoEqual(t, want, list, "list groups for document response must match") +} + +func TestListAccountGroups(t *testing.T) { + t.Parallel() + + srv := newTestSrv(t, "alice") + alice := srv.me.MustGet() + bob := coretest.NewTester("bob") + carol := coretest.NewTester("carol") + ctx := context.Background() + + group1, err := srv.CreateGroup(ctx, &groups.CreateGroupRequest{ + Title: "My Group", + }) + require.NoError(t, err) + + group1, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + Id: group1.Id, + UpdatedMembers: map[string]groups.Role{ + bob.Account.Principal().String(): groups.Role_EDITOR, + }, + }) + require.NoError(t, err) + + group2, err := srv.CreateGroup(ctx, &groups.CreateGroupRequest{ + Title: "My Group 2", + }) + require.NoError(t, err) + + group2, err = srv.UpdateGroup(ctx, &groups.UpdateGroupRequest{ + Id: group2.Id, + UpdatedMembers: map[string]groups.Role{ + carol.Account.Principal().String(): groups.Role_EDITOR, + }, + }) + require.NoError(t, err) + + wants := map[string]*groups.ListAccountGroupsResponse{ + alice.Account().String(): { + Items: []*groups.ListAccountGroupsResponse_Item{ + {Group: group1, Role: groups.Role_OWNER}, + {Group: group2, Role: groups.Role_OWNER}, + }, + }, + bob.Account.String(): { + Items: []*groups.ListAccountGroupsResponse_Item{ + {Group: group1, Role: groups.Role_EDITOR}, + }, + }, + carol.Account.String(): { + Items: []*groups.ListAccountGroupsResponse_Item{ + {Group: group2, Role: groups.Role_EDITOR}, + }, + }, + } + + for acc, want := range wants { + list, err := srv.ListAccountGroups(ctx, &groups.ListAccountGroupsRequest{ + AccountId: acc, + }) + require.NoError(t, err) + + testutil.ProtoEqual(t, want, list, "list groups for account response must match") + } +} + func newTestSrv(t *testing.T, name string) *Server { u := coretest.NewTester("alice") diff --git a/backend/daemon/daemon_e2e_test.go b/backend/daemon/daemon_e2e_test.go index eb4e72886c..9a76d12e8f 100644 --- a/backend/daemon/daemon_e2e_test.go +++ b/backend/daemon/daemon_e2e_test.go @@ -692,7 +692,7 @@ func TestBug_ListObjectsMustHaveCausalOrder(t *testing.T) { var found *p2p.Object seen := map[cid.Cid]struct{}{} for _, obj := range list.Objects { - if obj.Id == "hd://d/"+pub.Document.Id { + if obj.Id == "hm://d/"+pub.Document.Id { found = obj } for _, ch := range obj.ChangeIds { @@ -803,14 +803,15 @@ func TestTrustedPeers(t *testing.T) { sr := must.Do2(alice.Syncing.MustGet().Sync(ctx)) require.Equal(t, int64(1), sr.NumSyncOK) require.Equal(t, int64(0), sr.NumSyncFailed) - require.Equal(t, []peer.ID{alice.Storage.Device().PeerID(), bob.Storage.Device().PeerID()}, sr.Peers) + require.ElementsMatch(t, []peer.ID{alice.Storage.Device().PeerID(), bob.Storage.Device().PeerID()}, sr.Peers) + } { sr := must.Do2(bob.Syncing.MustGet().Sync(ctx)) require.Equal(t, int64(1), sr.NumSyncOK) require.Equal(t, int64(0), sr.NumSyncFailed) - require.Equal(t, []peer.ID{bob.Storage.Device().PeerID(), alice.Storage.Device().PeerID()}, sr.Peers) + require.ElementsMatch(t, []peer.ID{bob.Storage.Device().PeerID(), alice.Storage.Device().PeerID()}, sr.Peers) } acc1 := must.Do2(alice.RPC.Accounts.GetAccount(ctx, &accounts.GetAccountRequest{Id: bob.Net.MustGet().ID().Account().Principal().String()})) @@ -827,14 +828,14 @@ func TestTrustedPeers(t *testing.T) { sr := must.Do2(alice.Syncing.MustGet().Sync(ctx)) require.Equal(t, int64(1), sr.NumSyncOK) require.Equal(t, int64(0), sr.NumSyncFailed) - require.Equal(t, []peer.ID{alice.Storage.Device().PeerID(), bob.Storage.Device().PeerID()}, sr.Peers) + require.ElementsMatch(t, []peer.ID{alice.Storage.Device().PeerID(), bob.Storage.Device().PeerID()}, sr.Peers) } { sr := must.Do2(bob.Syncing.MustGet().Sync(ctx)) require.Equal(t, int64(1), sr.NumSyncOK) require.Equal(t, int64(0), sr.NumSyncFailed) - require.Equal(t, []peer.ID{bob.Storage.Device().PeerID(), alice.Storage.Device().PeerID()}, sr.Peers) + require.ElementsMatch(t, []peer.ID{bob.Storage.Device().PeerID(), alice.Storage.Device().PeerID()}, sr.Peers) } time.Sleep(100 * time.Millisecond) // to give time to sync acc1 = must.Do2(alice.RPC.Accounts.GetAccount(ctx, &accounts.GetAccountRequest{Id: bob.Net.MustGet().ID().Account().Principal().String()})) diff --git a/backend/daemon/storage/BUILD.plz b/backend/daemon/storage/BUILD.plz index 9cb6f0a828..29df3b2eb8 100644 --- a/backend/daemon/storage/BUILD.plz +++ b/backend/daemon/storage/BUILD.plz @@ -26,6 +26,9 @@ $TOOLS_GORUN -tags codegen generateSchema filegroup( name = "go_library", - srcs = glob(["*.go"]) + ["schema.sql"], + srcs = glob([ + "*.go", + "litext/*", + ]) + ["schema.sql"], visibility = ["//backend/..."], ) diff --git a/backend/daemon/storage/gen.go b/backend/daemon/storage/gen.go index 88648a7d02..8d57c4b98e 100644 --- a/backend/daemon/storage/gen.go +++ b/backend/daemon/storage/gen.go @@ -4,12 +4,9 @@ package storage import ( + "context" "io/ioutil" "mintter/backend/pkg/sqlitegen" - "os" - "path/filepath" - - "crawshaw.io/sqlite" ) func init() { @@ -20,26 +17,28 @@ func init() { "IPLD", "EID", "JSON", - "HD", + "KV", + "HLC", + "URL", ) } func generateSchema() error { - dir, err := ioutil.TempDir("", "mintter-storage-") + db, err := OpenSQLite("file::memory:?mode=memory&cache=shared", 0, 1) if err != nil { return err } - defer os.RemoveAll(dir) + defer db.Close() - conn, err := sqlite.OpenConn(filepath.Join(dir, "db.sqlite")) - if err != nil { + if err := InitSQLiteSchema(db); err != nil { return err } - defer conn.Close() - if err := initSQLite(conn); err != nil { + conn, release, err := db.Conn(context.Background()) + if err != nil { return err } + defer release() schema, err := sqlitegen.IntrospectSchema(conn) if err != nil { diff --git a/backend/daemon/storage/litext/litext.go b/backend/daemon/storage/litext/litext.go new file mode 100644 index 0000000000..f3c91623ce --- /dev/null +++ b/backend/daemon/storage/litext/litext.go @@ -0,0 +1,14 @@ +// Package litext provides our custom extensions for SQLite. +package litext + +// #cgo CFLAGS: -I ../../../../third_party/sqlite +// #cgo CFLAGS: -DSQLITE_CORE +// #include "./litext.h" +import "C" + +// LoadExtensions loads our custom extensions into SQLite +// which will be loaded automatically by all connections. +func LoadExtensions() error { + C.load_extensions() + return nil +} diff --git a/backend/daemon/storage/litext/litext.h b/backend/daemon/storage/litext/litext.h new file mode 100644 index 0000000000..95731172c1 --- /dev/null +++ b/backend/daemon/storage/litext/litext.h @@ -0,0 +1,11 @@ +#include + +int sqlite3_sha_init(sqlite3 *db, char **pzErrMsg, sqlite3_api_routines *pApi); + +int sqlite3_mycount_init(sqlite3 *db, char **pzErrMsg, sqlite3_api_routines *pApi); + +static void load_extensions() +{ + sqlite3_auto_extension((void (*)(void))sqlite3_sha_init); + sqlite3_auto_extension((void (*)(void))sqlite3_mycount_init); +} diff --git a/backend/daemon/storage/litext/mycount.c b/backend/daemon/storage/litext/mycount.c new file mode 100644 index 0000000000..63b08014e3 --- /dev/null +++ b/backend/daemon/storage/litext/mycount.c @@ -0,0 +1,48 @@ +/* This is a demo extension */ + +#include +#include +SQLITE_EXTENSION_INIT1 + +typedef struct CountCtx CountCtx; + +struct CountCtx +{ + int64_t n; +}; + +static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv) +{ + CountCtx *p; + p = sqlite3_aggregate_context(context, sizeof(*p)); + if ((argc == 0 || SQLITE_NULL != sqlite3_value_type(argv[0])) && p) + { + p->n++; + } +} + +static void countFinalize(sqlite3_context *context) +{ + CountCtx *p; + p = sqlite3_aggregate_context(context, 0); + sqlite3_result_int64(context, p ? p->n : 0); +} + +#ifdef _WIN32 +__declspec(dllexport) +#endif + int sqlite3_mycount_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi) +{ + int rc = SQLITE_OK; + + SQLITE_EXTENSION_INIT2(pApi); + + (void)pzErrMsg; /* Unused parameter */ + + rc = sqlite3_create_function(db, "mycount", 0, SQLITE_UTF8 | SQLITE_INNOCUOUS | SQLITE_DETERMINISTIC, 0, 0, countStep, countFinalize); + + return rc; +} diff --git a/third_party/sqlite/sha1.c b/backend/daemon/storage/litext/sha1.c similarity index 100% rename from third_party/sqlite/sha1.c rename to backend/daemon/storage/litext/sha1.c diff --git a/backend/daemon/storage/migrations.go b/backend/daemon/storage/migrations.go index 3c14ca7f72..2428aedb64 100644 --- a/backend/daemon/storage/migrations.go +++ b/backend/daemon/storage/migrations.go @@ -47,101 +47,10 @@ type migration struct { // It's important to backup your data directory when trying out the code from a feature branch that has a migration. // Otherwise when you switch back to the main branch the program will complain about an unknown version of the data directory. var migrations = []migration{ - // The pre-migration version does nothing. - // It's here to find the starting point. - {Version: "2023-06-26.01", Run: func(*Dir, *sqlite.Conn) error { + // New beginning. + {Version: "2023-08-30.01", Run: func(d *Dir, conn *sqlite.Conn) error { return nil }}, - - // Clear the user_version pragma which we used to use before migration framework was implemented. - {Version: "2023-07-12.01", Run: func(d *Dir, conn *sqlite.Conn) error { - return sqlitex.ExecScript(conn, "PRAGMA user_version = 0;") - }}, - - // Replace tabs to spaces in the SQL schema text, to make it compatible with the new schema file. - {Version: "2023-07-24.01", Run: func(d *Dir, conn *sqlite.Conn) error { - return sqlitex.ExecScript(conn, ` - PRAGMA writable_schema = ON; - UPDATE sqlite_schema SET sql = replace(sql, ' ', ' '); - PRAGMA writable_schema = OFF; - `) - }}, - - // Remove foreign key from web_publications to hd_entities, to avoid losing data when reindexing. - {Version: "2023-07-25.01", Run: func(d *Dir, conn *sqlite.Conn) error { - if err := sqlitex.ExecScript(conn, sqlfmt(` - ALTER TABLE web_publications RENAME TO old_web_publications; - - CREATE TABLE web_publications ( - eid TEXT PRIMARY KEY CHECK (eid <> ''), - version TEXT NOT NULL, - path TEXT UNIQUE - ); - - INSERT INTO web_publications (eid, version, path) - SELECT hd_entities.eid, old_web_publications.version, old_web_publications.path - FROM old_web_publications - INNER JOIN hd_entities ON hd_entities.id = old_web_publications.document; - - DROP TABLE old_web_publications; - - PRAGMA foreign_key_check; - `)); err != nil { - return err - } - - // Committing the transaction started by the migration framework. - if err := sqlitex.ExecTransient(conn, "COMMIT", nil); err != nil { - return err - } - - // Running VACUUM to defragment the database. - if err := sqlitex.ExecTransient(conn, "VACUUM", nil); err != nil { - return err - } - - // Starting a new transaction because migration framework will always want to COMMIT. - return sqlitex.ExecTransient(conn, "BEGIN", nil) - }}, - - // Index the author of each change. - {Version: "2023-07-27.01", Run: func(d *Dir, conn *sqlite.Conn) error { - return sqlitex.ExecScript(conn, sqlfmt(` - DROP TABLE hd_changes; - CREATE TABLE hd_changes ( - entity INTEGER REFERENCES hd_entities (id) NOT NULL, - blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, - hlc_time INTEGER NOT NULL, - author INTEGER REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL, - PRIMARY KEY (entity, blob) - ) WITHOUT ROWID; - CREATE INDEX idx_hd_changes_to_entity ON hd_changes (blob, entity); - CREATE INDEX idx_key_delegations_by_blob ON key_delegations (blob, issuer, delegate); - DELETE FROM global_meta WHERE key = 'last_reindex_time'; - `)) - }}, - - // Adding a trusted table to store the accounts we trust. - {Version: "2023-07-31.01", Run: func(d *Dir, conn *sqlite.Conn) error { - return sqlitex.ExecScript(conn, sqlfmt(` - CREATE TABLE IF NOT EXISTS trusted_accounts ( - id INTEGER PRIMARY KEY REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL - ) WITHOUT ROWID; - INSERT OR REPLACE INTO trusted_accounts (id) VALUES (1); - `)) - }}, - // Adding a site registration table for sites to store what groups they serve. - {Version: "2023-08-17.01", Run: func(d *Dir, conn *sqlite.Conn) error { - return sqlitex.ExecScript(conn, sqlfmt(` - CREATE TABLE IF NOT EXISTS served_sites ( - hostname TEXT CHECK (hostname <> '') PRIMARY KEY, - group_id INTEGER REFERENCES hd_entities (id) ON DELETE NO ACTION NOT NULL, - version TEXT NOT NULL, - owner_id INTEGER REFERENCES public_keys (id) ON DELETE NO ACTION NOT NULL, - UNIQUE(group_id, version) ON CONFLICT REPLACE - ); - `)) - }}, } const ( diff --git a/backend/daemon/storage/migrations_test.go b/backend/daemon/storage/migrations_test.go index aae52d49d7..354950aba3 100644 --- a/backend/daemon/storage/migrations_test.go +++ b/backend/daemon/storage/migrations_test.go @@ -17,6 +17,19 @@ import ( ) func TestMigrateMatchesFreshSchema(t *testing.T) { + // We made a new breaking change so we have no migrations now. + // We can skip this test until we add at least one new migration, + // in which case we need to generate the initial data dir snapshot, + // and store it in testdata. Then we should apply migrations + // on top of this snapshot and verify that it has the same structure + // as new freshly created data dir. + + if len(migrations) == 1 { + t.SkipNow() + } + + t.Fatalf("We now have some migrations. Fix this text!") + // We have manually snapshot the data dir from before the migration framework was implemented. // It's stored in ./testdata/initial-data-dir. // We want to test that the data dir with all applied migrations matches the data dir created from scratch. diff --git a/backend/daemon/storage/schema.gen.go b/backend/daemon/storage/schema.gen.go index af838ddfb5..763dcfab62 100644 --- a/backend/daemon/storage/schema.gen.go +++ b/backend/daemon/storage/schema.gen.go @@ -6,6 +6,48 @@ import ( "mintter/backend/pkg/sqlitegen" ) +// Table blob_attrs. +const ( + BlobAttrs sqlitegen.Table = "blob_attrs" + BlobAttrsAnchor sqlitegen.Column = "blob_attrs.anchor" + BlobAttrsBlob sqlitegen.Column = "blob_attrs.blob" + BlobAttrsExtra sqlitegen.Column = "blob_attrs.extra" + BlobAttrsIsLookup sqlitegen.Column = "blob_attrs.is_lookup" + BlobAttrsKey sqlitegen.Column = "blob_attrs.key" + BlobAttrsTs sqlitegen.Column = "blob_attrs.ts" + BlobAttrsValue sqlitegen.Column = "blob_attrs.value" + BlobAttrsValuePtr sqlitegen.Column = "blob_attrs.value_ptr" +) + +// Table blob_attrs. Plain strings. +const ( + T_BlobAttrs = "blob_attrs" + C_BlobAttrsAnchor = "blob_attrs.anchor" + C_BlobAttrsBlob = "blob_attrs.blob" + C_BlobAttrsExtra = "blob_attrs.extra" + C_BlobAttrsIsLookup = "blob_attrs.is_lookup" + C_BlobAttrsKey = "blob_attrs.key" + C_BlobAttrsTs = "blob_attrs.ts" + C_BlobAttrsValue = "blob_attrs.value" + C_BlobAttrsValuePtr = "blob_attrs.value_ptr" +) + +// Table blob_links. +const ( + BlobLinks sqlitegen.Table = "blob_links" + BlobLinksRel sqlitegen.Column = "blob_links.rel" + BlobLinksSource sqlitegen.Column = "blob_links.source" + BlobLinksTarget sqlitegen.Column = "blob_links.target" +) + +// Table blob_links. Plain strings. +const ( + T_BlobLinks = "blob_links" + C_BlobLinksRel = "blob_links.rel" + C_BlobLinksSource = "blob_links.source" + C_BlobLinksTarget = "blob_links.target" +) + // Table blobs. const ( Blobs sqlitegen.Table = "blobs" @@ -28,172 +70,126 @@ const ( C_BlobsSize = "blobs.size" ) -// Table content_links_view. -const ( - ContentLinksView sqlitegen.Table = "content_links_view" - ContentLinksViewData sqlitegen.Column = "content_links_view.data" - ContentLinksViewRel sqlitegen.Column = "content_links_view.rel" - ContentLinksViewSourceBlob sqlitegen.Column = "content_links_view.source_blob" - ContentLinksViewSourceBlobCodec sqlitegen.Column = "content_links_view.source_blob_codec" - ContentLinksViewSourceBlobMultihash sqlitegen.Column = "content_links_view.source_blob_multihash" - ContentLinksViewSourceEID sqlitegen.Column = "content_links_view.source_eid" - ContentLinksViewSourceEntity sqlitegen.Column = "content_links_view.source_entity" - ContentLinksViewTargetEID sqlitegen.Column = "content_links_view.target_eid" - ContentLinksViewTargetEntity sqlitegen.Column = "content_links_view.target_entity" -) - -// Table content_links_view. Plain strings. -const ( - T_ContentLinksView = "content_links_view" - C_ContentLinksViewData = "content_links_view.data" - C_ContentLinksViewRel = "content_links_view.rel" - C_ContentLinksViewSourceBlob = "content_links_view.source_blob" - C_ContentLinksViewSourceBlobCodec = "content_links_view.source_blob_codec" - C_ContentLinksViewSourceBlobMultihash = "content_links_view.source_blob_multihash" - C_ContentLinksViewSourceEID = "content_links_view.source_eid" - C_ContentLinksViewSourceEntity = "content_links_view.source_entity" - C_ContentLinksViewTargetEID = "content_links_view.target_eid" - C_ContentLinksViewTargetEntity = "content_links_view.target_entity" -) - -// Table global_meta. +// Table change_deps. const ( - GlobalMeta sqlitegen.Table = "global_meta" - GlobalMetaKey sqlitegen.Column = "global_meta.key" - GlobalMetaValue sqlitegen.Column = "global_meta.value" + ChangeDeps sqlitegen.Table = "change_deps" + ChangeDepsChild sqlitegen.Column = "change_deps.child" + ChangeDepsParent sqlitegen.Column = "change_deps.parent" ) -// Table global_meta. Plain strings. +// Table change_deps. Plain strings. const ( - T_GlobalMeta = "global_meta" - C_GlobalMetaKey = "global_meta.key" - C_GlobalMetaValue = "global_meta.value" + T_ChangeDeps = "change_deps" + C_ChangeDepsChild = "change_deps.child" + C_ChangeDepsParent = "change_deps.parent" ) -// Table hd_change_deps. +// Table changes. const ( - HDChangeDeps sqlitegen.Table = "hd_change_deps" - HDChangeDepsChild sqlitegen.Column = "hd_change_deps.child" - HDChangeDepsParent sqlitegen.Column = "hd_change_deps.parent" + Changes sqlitegen.Table = "changes" + ChangesAuthor sqlitegen.Column = "changes.author" + ChangesBlob sqlitegen.Column = "changes.blob" + ChangesEntity sqlitegen.Column = "changes.entity" + ChangesHLCTime sqlitegen.Column = "changes.hlc_time" ) -// Table hd_change_deps. Plain strings. +// Table changes. Plain strings. const ( - T_HDChangeDeps = "hd_change_deps" - C_HDChangeDepsChild = "hd_change_deps.child" - C_HDChangeDepsParent = "hd_change_deps.parent" + T_Changes = "changes" + C_ChangesAuthor = "changes.author" + C_ChangesBlob = "changes.blob" + C_ChangesEntity = "changes.entity" + C_ChangesHLCTime = "changes.hlc_time" ) -// Table hd_changes. +// Table changes_view. const ( - HDChanges sqlitegen.Table = "hd_changes" - HDChangesAuthor sqlitegen.Column = "hd_changes.author" - HDChangesBlob sqlitegen.Column = "hd_changes.blob" - HDChangesEntity sqlitegen.Column = "hd_changes.entity" - HDChangesHlcTime sqlitegen.Column = "hd_changes.hlc_time" + ChangesView sqlitegen.Table = "changes_view" + ChangesViewBlobID sqlitegen.Column = "changes_view.blob_id" + ChangesViewCodec sqlitegen.Column = "changes_view.codec" + ChangesViewData sqlitegen.Column = "changes_view.data" + ChangesViewEntity sqlitegen.Column = "changes_view.entity" + ChangesViewEntityID sqlitegen.Column = "changes_view.entity_id" + ChangesViewHLCTime sqlitegen.Column = "changes_view.hlc_time" + ChangesViewMultihash sqlitegen.Column = "changes_view.multihash" + ChangesViewSize sqlitegen.Column = "changes_view.size" ) -// Table hd_changes. Plain strings. +// Table changes_view. Plain strings. const ( - T_HDChanges = "hd_changes" - C_HDChangesAuthor = "hd_changes.author" - C_HDChangesBlob = "hd_changes.blob" - C_HDChangesEntity = "hd_changes.entity" - C_HDChangesHlcTime = "hd_changes.hlc_time" + T_ChangesView = "changes_view" + C_ChangesViewBlobID = "changes_view.blob_id" + C_ChangesViewCodec = "changes_view.codec" + C_ChangesViewData = "changes_view.data" + C_ChangesViewEntity = "changes_view.entity" + C_ChangesViewEntityID = "changes_view.entity_id" + C_ChangesViewHLCTime = "changes_view.hlc_time" + C_ChangesViewMultihash = "changes_view.multihash" + C_ChangesViewSize = "changes_view.size" ) -// Table hd_changes_view. +// Table drafts. const ( - HDChangesView sqlitegen.Table = "hd_changes_view" - HDChangesViewBlobID sqlitegen.Column = "hd_changes_view.blob_id" - HDChangesViewCodec sqlitegen.Column = "hd_changes_view.codec" - HDChangesViewData sqlitegen.Column = "hd_changes_view.data" - HDChangesViewEntity sqlitegen.Column = "hd_changes_view.entity" - HDChangesViewEntityID sqlitegen.Column = "hd_changes_view.entity_id" - HDChangesViewHlcTime sqlitegen.Column = "hd_changes_view.hlc_time" - HDChangesViewMultihash sqlitegen.Column = "hd_changes_view.multihash" - HDChangesViewSize sqlitegen.Column = "hd_changes_view.size" + Drafts sqlitegen.Table = "drafts" + DraftsBlob sqlitegen.Column = "drafts.blob" + DraftsEntity sqlitegen.Column = "drafts.entity" ) -// Table hd_changes_view. Plain strings. +// Table drafts. Plain strings. const ( - T_HDChangesView = "hd_changes_view" - C_HDChangesViewBlobID = "hd_changes_view.blob_id" - C_HDChangesViewCodec = "hd_changes_view.codec" - C_HDChangesViewData = "hd_changes_view.data" - C_HDChangesViewEntity = "hd_changes_view.entity" - C_HDChangesViewEntityID = "hd_changes_view.entity_id" - C_HDChangesViewHlcTime = "hd_changes_view.hlc_time" - C_HDChangesViewMultihash = "hd_changes_view.multihash" - C_HDChangesViewSize = "hd_changes_view.size" + T_Drafts = "drafts" + C_DraftsBlob = "drafts.blob" + C_DraftsEntity = "drafts.entity" ) -// Table hd_drafts. +// Table drafts_view. const ( - HDDrafts sqlitegen.Table = "hd_drafts" - HDDraftsBlob sqlitegen.Column = "hd_drafts.blob" - HDDraftsEntity sqlitegen.Column = "hd_drafts.entity" + DraftsView sqlitegen.Table = "drafts_view" + DraftsViewBlobID sqlitegen.Column = "drafts_view.blob_id" + DraftsViewCodec sqlitegen.Column = "drafts_view.codec" + DraftsViewEntity sqlitegen.Column = "drafts_view.entity" + DraftsViewEntityID sqlitegen.Column = "drafts_view.entity_id" + DraftsViewMultihash sqlitegen.Column = "drafts_view.multihash" ) -// Table hd_drafts. Plain strings. +// Table drafts_view. Plain strings. const ( - T_HDDrafts = "hd_drafts" - C_HDDraftsBlob = "hd_drafts.blob" - C_HDDraftsEntity = "hd_drafts.entity" + T_DraftsView = "drafts_view" + C_DraftsViewBlobID = "drafts_view.blob_id" + C_DraftsViewCodec = "drafts_view.codec" + C_DraftsViewEntity = "drafts_view.entity" + C_DraftsViewEntityID = "drafts_view.entity_id" + C_DraftsViewMultihash = "drafts_view.multihash" ) -// Table hd_drafts_view. +// Table entities. const ( - HDDraftsView sqlitegen.Table = "hd_drafts_view" - HDDraftsViewBlobID sqlitegen.Column = "hd_drafts_view.blob_id" - HDDraftsViewCodec sqlitegen.Column = "hd_drafts_view.codec" - HDDraftsViewEntity sqlitegen.Column = "hd_drafts_view.entity" - HDDraftsViewEntityID sqlitegen.Column = "hd_drafts_view.entity_id" - HDDraftsViewMultihash sqlitegen.Column = "hd_drafts_view.multihash" + Entities sqlitegen.Table = "entities" + EntitiesEID sqlitegen.Column = "entities.eid" + EntitiesID sqlitegen.Column = "entities.id" ) -// Table hd_drafts_view. Plain strings. +// Table entities. Plain strings. const ( - T_HDDraftsView = "hd_drafts_view" - C_HDDraftsViewBlobID = "hd_drafts_view.blob_id" - C_HDDraftsViewCodec = "hd_drafts_view.codec" - C_HDDraftsViewEntity = "hd_drafts_view.entity" - C_HDDraftsViewEntityID = "hd_drafts_view.entity_id" - C_HDDraftsViewMultihash = "hd_drafts_view.multihash" + T_Entities = "entities" + C_EntitiesEID = "entities.eid" + C_EntitiesID = "entities.id" ) -// Table hd_entities. +// Table heads. const ( - HDEntities sqlitegen.Table = "hd_entities" - HDEntitiesEID sqlitegen.Column = "hd_entities.eid" - HDEntitiesID sqlitegen.Column = "hd_entities.id" + Heads sqlitegen.Table = "heads" + HeadsBlob sqlitegen.Column = "heads.blob" + HeadsName sqlitegen.Column = "heads.name" + HeadsResource sqlitegen.Column = "heads.resource" ) -// Table hd_entities. Plain strings. +// Table heads. Plain strings. const ( - T_HDEntities = "hd_entities" - C_HDEntitiesEID = "hd_entities.eid" - C_HDEntitiesID = "hd_entities.id" -) - -// Table hd_links. -const ( - HDLinks sqlitegen.Table = "hd_links" - HDLinksData sqlitegen.Column = "hd_links.data" - HDLinksRel sqlitegen.Column = "hd_links.rel" - HDLinksSourceBlob sqlitegen.Column = "hd_links.source_blob" - HDLinksTargetBlob sqlitegen.Column = "hd_links.target_blob" - HDLinksTargetEntity sqlitegen.Column = "hd_links.target_entity" -) - -// Table hd_links. Plain strings. -const ( - T_HDLinks = "hd_links" - C_HDLinksData = "hd_links.data" - C_HDLinksRel = "hd_links.rel" - C_HDLinksSourceBlob = "hd_links.source_blob" - C_HDLinksTargetBlob = "hd_links.target_blob" - C_HDLinksTargetEntity = "hd_links.target_entity" + T_Heads = "heads" + C_HeadsBlob = "heads.blob" + C_HeadsName = "heads.name" + C_HeadsResource = "heads.resource" ) // Table invite_tokens. @@ -214,20 +210,18 @@ const ( // Table key_delegations. const ( - KeyDelegations sqlitegen.Table = "key_delegations" - KeyDelegationsBlob sqlitegen.Column = "key_delegations.blob" - KeyDelegationsDelegate sqlitegen.Column = "key_delegations.delegate" - KeyDelegationsIssueTime sqlitegen.Column = "key_delegations.issue_time" - KeyDelegationsIssuer sqlitegen.Column = "key_delegations.issuer" + KeyDelegations sqlitegen.Table = "key_delegations" + KeyDelegationsBlob sqlitegen.Column = "key_delegations.blob" + KeyDelegationsDelegate sqlitegen.Column = "key_delegations.delegate" + KeyDelegationsIssuer sqlitegen.Column = "key_delegations.issuer" ) // Table key_delegations. Plain strings. const ( - T_KeyDelegations = "key_delegations" - C_KeyDelegationsBlob = "key_delegations.blob" - C_KeyDelegationsDelegate = "key_delegations.delegate" - C_KeyDelegationsIssueTime = "key_delegations.issue_time" - C_KeyDelegationsIssuer = "key_delegations.issuer" + T_KeyDelegations = "key_delegations" + C_KeyDelegationsBlob = "key_delegations.blob" + C_KeyDelegationsDelegate = "key_delegations.delegate" + C_KeyDelegationsIssuer = "key_delegations.issuer" ) // Table key_delegations_view. @@ -237,7 +231,6 @@ const ( KeyDelegationsViewBlobCodec sqlitegen.Column = "key_delegations_view.blob_codec" KeyDelegationsViewBlobMultihash sqlitegen.Column = "key_delegations_view.blob_multihash" KeyDelegationsViewDelegate sqlitegen.Column = "key_delegations_view.delegate" - KeyDelegationsViewIssueTime sqlitegen.Column = "key_delegations_view.issue_time" KeyDelegationsViewIssuer sqlitegen.Column = "key_delegations_view.issuer" ) @@ -248,10 +241,39 @@ const ( C_KeyDelegationsViewBlobCodec = "key_delegations_view.blob_codec" C_KeyDelegationsViewBlobMultihash = "key_delegations_view.blob_multihash" C_KeyDelegationsViewDelegate = "key_delegations_view.delegate" - C_KeyDelegationsViewIssueTime = "key_delegations_view.issue_time" C_KeyDelegationsViewIssuer = "key_delegations_view.issuer" ) +// Table kv. +const ( + KV sqlitegen.Table = "kv" + KVKey sqlitegen.Column = "kv.key" + KVValue sqlitegen.Column = "kv.value" +) + +// Table kv. Plain strings. +const ( + T_KV = "kv" + C_KVKey = "kv.key" + C_KVValue = "kv.value" +) + +// Table lookup. +const ( + Lookup sqlitegen.Table = "lookup" + LookupID sqlitegen.Column = "lookup.id" + LookupType sqlitegen.Column = "lookup.type" + LookupValue sqlitegen.Column = "lookup.value" +) + +// Table lookup. Plain strings. +const ( + T_Lookup = "lookup" + C_LookupID = "lookup.id" + C_LookupType = "lookup.type" + C_LookupValue = "lookup.value" +) + // Table public_blobs_view. const ( PublicBlobsView sqlitegen.Table = "public_blobs_view" @@ -334,16 +356,16 @@ const ( // Table sqlite_sequence. const ( - SQLITESequence sqlitegen.Table = "sqlite_sequence" - SQLITESequenceName sqlitegen.Column = "sqlite_sequence.name" - SQLITESequenceSeq sqlitegen.Column = "sqlite_sequence.seq" + SQLiteSequence sqlitegen.Table = "sqlite_sequence" + SQLiteSequenceName sqlitegen.Column = "sqlite_sequence.name" + SQLiteSequenceSeq sqlitegen.Column = "sqlite_sequence.seq" ) // Table sqlite_sequence. Plain strings. const ( - T_SQLITESequence = "sqlite_sequence" - C_SQLITESequenceName = "sqlite_sequence.name" - C_SQLITESequenceSeq = "sqlite_sequence.seq" + T_SQLiteSequence = "sqlite_sequence" + C_SQLiteSequenceName = "sqlite_sequence.name" + C_SQLiteSequenceSeq = "sqlite_sequence.seq" ) // Table trusted_accounts. @@ -403,92 +425,93 @@ const ( // Schema describes SQLite columns. var Schema = sqlitegen.Schema{ Columns: map[sqlitegen.Column]sqlitegen.ColumnInfo{ - BlobsCodec: {Table: Blobs, SQLType: "INTEGER"}, - BlobsData: {Table: Blobs, SQLType: "BLOB"}, - BlobsID: {Table: Blobs, SQLType: "INTEGER"}, - BlobsInsertTime: {Table: Blobs, SQLType: "INTEGER"}, - BlobsMultihash: {Table: Blobs, SQLType: "BLOB"}, - BlobsSize: {Table: Blobs, SQLType: "INTEGER"}, - ContentLinksViewData: {Table: ContentLinksView, SQLType: "BLOB"}, - ContentLinksViewRel: {Table: ContentLinksView, SQLType: "TEXT"}, - ContentLinksViewSourceBlob: {Table: ContentLinksView, SQLType: "INTEGER"}, - ContentLinksViewSourceBlobCodec: {Table: ContentLinksView, SQLType: "INTEGER"}, - ContentLinksViewSourceBlobMultihash: {Table: ContentLinksView, SQLType: "BLOB"}, - ContentLinksViewSourceEID: {Table: ContentLinksView, SQLType: "TEXT"}, - ContentLinksViewSourceEntity: {Table: ContentLinksView, SQLType: "INTEGER"}, - ContentLinksViewTargetEID: {Table: ContentLinksView, SQLType: "TEXT"}, - ContentLinksViewTargetEntity: {Table: ContentLinksView, SQLType: "INTEGER"}, - GlobalMetaKey: {Table: GlobalMeta, SQLType: "TEXT"}, - GlobalMetaValue: {Table: GlobalMeta, SQLType: "TEXT"}, - HDChangeDepsChild: {Table: HDChangeDeps, SQLType: "INTEGER"}, - HDChangeDepsParent: {Table: HDChangeDeps, SQLType: "INTEGER"}, - HDChangesAuthor: {Table: HDChanges, SQLType: "INTEGER"}, - HDChangesBlob: {Table: HDChanges, SQLType: "INTEGER"}, - HDChangesEntity: {Table: HDChanges, SQLType: "INTEGER"}, - HDChangesHlcTime: {Table: HDChanges, SQLType: "INTEGER"}, - HDChangesViewBlobID: {Table: HDChangesView, SQLType: "INTEGER"}, - HDChangesViewCodec: {Table: HDChangesView, SQLType: "INTEGER"}, - HDChangesViewData: {Table: HDChangesView, SQLType: "BLOB"}, - HDChangesViewEntity: {Table: HDChangesView, SQLType: "TEXT"}, - HDChangesViewEntityID: {Table: HDChangesView, SQLType: "INTEGER"}, - HDChangesViewHlcTime: {Table: HDChangesView, SQLType: "INTEGER"}, - HDChangesViewMultihash: {Table: HDChangesView, SQLType: "BLOB"}, - HDChangesViewSize: {Table: HDChangesView, SQLType: "INTEGER"}, - HDDraftsBlob: {Table: HDDrafts, SQLType: "INTEGER"}, - HDDraftsEntity: {Table: HDDrafts, SQLType: "INTEGER"}, - HDDraftsViewBlobID: {Table: HDDraftsView, SQLType: "INTEGER"}, - HDDraftsViewCodec: {Table: HDDraftsView, SQLType: "INTEGER"}, - HDDraftsViewEntity: {Table: HDDraftsView, SQLType: "TEXT"}, - HDDraftsViewEntityID: {Table: HDDraftsView, SQLType: "INTEGER"}, - HDDraftsViewMultihash: {Table: HDDraftsView, SQLType: "BLOB"}, - HDEntitiesEID: {Table: HDEntities, SQLType: "TEXT"}, - HDEntitiesID: {Table: HDEntities, SQLType: "INTEGER"}, - HDLinksData: {Table: HDLinks, SQLType: "BLOB"}, - HDLinksRel: {Table: HDLinks, SQLType: "TEXT"}, - HDLinksSourceBlob: {Table: HDLinks, SQLType: "INTEGER"}, - HDLinksTargetBlob: {Table: HDLinks, SQLType: "INTEGER"}, - HDLinksTargetEntity: {Table: HDLinks, SQLType: "INTEGER"}, - InviteTokensExpireTime: {Table: InviteTokens, SQLType: "INTEGER"}, - InviteTokensRole: {Table: InviteTokens, SQLType: "INTEGER"}, - InviteTokensToken: {Table: InviteTokens, SQLType: "TEXT"}, - KeyDelegationsBlob: {Table: KeyDelegations, SQLType: "INTEGER"}, - KeyDelegationsDelegate: {Table: KeyDelegations, SQLType: "INTEGER"}, - KeyDelegationsIssueTime: {Table: KeyDelegations, SQLType: "INTEGER"}, - KeyDelegationsIssuer: {Table: KeyDelegations, SQLType: "INTEGER"}, - KeyDelegationsViewBlob: {Table: KeyDelegationsView, SQLType: "INTEGER"}, - KeyDelegationsViewBlobCodec: {Table: KeyDelegationsView, SQLType: "INTEGER"}, - KeyDelegationsViewBlobMultihash: {Table: KeyDelegationsView, SQLType: "BLOB"}, - KeyDelegationsViewDelegate: {Table: KeyDelegationsView, SQLType: "BLOB"}, - KeyDelegationsViewIssueTime: {Table: KeyDelegationsView, SQLType: "INTEGER"}, - KeyDelegationsViewIssuer: {Table: KeyDelegationsView, SQLType: "BLOB"}, - PublicBlobsViewCodec: {Table: PublicBlobsView, SQLType: "INTEGER"}, - PublicBlobsViewID: {Table: PublicBlobsView, SQLType: "INTEGER"}, - PublicBlobsViewMultihash: {Table: PublicBlobsView, SQLType: "BLOB"}, - PublicKeysID: {Table: PublicKeys, SQLType: "INTEGER"}, - PublicKeysPrincipal: {Table: PublicKeys, SQLType: "BLOB"}, - ServedSitesGroupID: {Table: ServedSites, SQLType: "INTEGER"}, - ServedSitesHostname: {Table: ServedSites, SQLType: "TEXT"}, - ServedSitesOwnerID: {Table: ServedSites, SQLType: "INTEGER"}, - ServedSitesVersion: {Table: ServedSites, SQLType: "TEXT"}, - SiteMembersAccountID: {Table: SiteMembers, SQLType: "INTEGER"}, - SiteMembersRole: {Table: SiteMembers, SQLType: "INTEGER"}, - SitesAccountID: {Table: Sites, SQLType: "INTEGER"}, - SitesAddresses: {Table: Sites, SQLType: "TEXT"}, - SitesHostname: {Table: Sites, SQLType: "TEXT"}, - SitesRole: {Table: Sites, SQLType: "INTEGER"}, - SQLITESequenceName: {Table: SQLITESequence, SQLType: ""}, - SQLITESequenceSeq: {Table: SQLITESequence, SQLType: ""}, - TrustedAccountsID: {Table: TrustedAccounts, SQLType: "INTEGER"}, - WalletsAddress: {Table: Wallets, SQLType: "TEXT"}, - WalletsBalance: {Table: Wallets, SQLType: "INTEGER"}, - WalletsID: {Table: Wallets, SQLType: "TEXT"}, - WalletsLogin: {Table: Wallets, SQLType: "BLOB"}, - WalletsName: {Table: Wallets, SQLType: "TEXT"}, - WalletsPassword: {Table: Wallets, SQLType: "BLOB"}, - WalletsToken: {Table: Wallets, SQLType: "BLOB"}, - WalletsType: {Table: Wallets, SQLType: "TEXT"}, - WebPublicationsEID: {Table: WebPublications, SQLType: "TEXT"}, - WebPublicationsPath: {Table: WebPublications, SQLType: "TEXT"}, - WebPublicationsVersion: {Table: WebPublications, SQLType: "TEXT"}, + BlobAttrsAnchor: {Table: BlobAttrs, SQLType: "TEXT"}, + BlobAttrsBlob: {Table: BlobAttrs, SQLType: "INTEGER"}, + BlobAttrsExtra: {Table: BlobAttrs, SQLType: ""}, + BlobAttrsIsLookup: {Table: BlobAttrs, SQLType: "INTEGER"}, + BlobAttrsKey: {Table: BlobAttrs, SQLType: "TEXT"}, + BlobAttrsTs: {Table: BlobAttrs, SQLType: "INTEGER"}, + BlobAttrsValue: {Table: BlobAttrs, SQLType: ""}, + BlobAttrsValuePtr: {Table: BlobAttrs, SQLType: "INTEGER"}, + BlobLinksRel: {Table: BlobLinks, SQLType: "TEXT"}, + BlobLinksSource: {Table: BlobLinks, SQLType: "INTEGER"}, + BlobLinksTarget: {Table: BlobLinks, SQLType: "INTEGER"}, + BlobsCodec: {Table: Blobs, SQLType: "INTEGER"}, + BlobsData: {Table: Blobs, SQLType: "BLOB"}, + BlobsID: {Table: Blobs, SQLType: "INTEGER"}, + BlobsInsertTime: {Table: Blobs, SQLType: "INTEGER"}, + BlobsMultihash: {Table: Blobs, SQLType: "BLOB"}, + BlobsSize: {Table: Blobs, SQLType: "INTEGER"}, + ChangeDepsChild: {Table: ChangeDeps, SQLType: "INTEGER"}, + ChangeDepsParent: {Table: ChangeDeps, SQLType: "INTEGER"}, + ChangesAuthor: {Table: Changes, SQLType: "INTEGER"}, + ChangesBlob: {Table: Changes, SQLType: "INTEGER"}, + ChangesEntity: {Table: Changes, SQLType: "INTEGER"}, + ChangesHLCTime: {Table: Changes, SQLType: "INTEGER"}, + ChangesViewBlobID: {Table: ChangesView, SQLType: "INTEGER"}, + ChangesViewCodec: {Table: ChangesView, SQLType: "INTEGER"}, + ChangesViewData: {Table: ChangesView, SQLType: "BLOB"}, + ChangesViewEntity: {Table: ChangesView, SQLType: "BLOB"}, + ChangesViewEntityID: {Table: ChangesView, SQLType: "INTEGER"}, + ChangesViewHLCTime: {Table: ChangesView, SQLType: "INTEGER"}, + ChangesViewMultihash: {Table: ChangesView, SQLType: "BLOB"}, + ChangesViewSize: {Table: ChangesView, SQLType: "INTEGER"}, + DraftsBlob: {Table: Drafts, SQLType: "INTEGER"}, + DraftsEntity: {Table: Drafts, SQLType: "INTEGER"}, + DraftsViewBlobID: {Table: DraftsView, SQLType: "INTEGER"}, + DraftsViewCodec: {Table: DraftsView, SQLType: "INTEGER"}, + DraftsViewEntity: {Table: DraftsView, SQLType: "BLOB"}, + DraftsViewEntityID: {Table: DraftsView, SQLType: "INTEGER"}, + DraftsViewMultihash: {Table: DraftsView, SQLType: "BLOB"}, + EntitiesEID: {Table: Entities, SQLType: "BLOB"}, + EntitiesID: {Table: Entities, SQLType: "INTEGER"}, + HeadsBlob: {Table: Heads, SQLType: "INTEGER"}, + HeadsName: {Table: Heads, SQLType: "TEXT"}, + HeadsResource: {Table: Heads, SQLType: "INTEGER"}, + InviteTokensExpireTime: {Table: InviteTokens, SQLType: "INTEGER"}, + InviteTokensRole: {Table: InviteTokens, SQLType: "INTEGER"}, + InviteTokensToken: {Table: InviteTokens, SQLType: "TEXT"}, + KeyDelegationsBlob: {Table: KeyDelegations, SQLType: "INTEGER"}, + KeyDelegationsDelegate: {Table: KeyDelegations, SQLType: ""}, + KeyDelegationsIssuer: {Table: KeyDelegations, SQLType: ""}, + KeyDelegationsViewBlob: {Table: KeyDelegationsView, SQLType: "INTEGER"}, + KeyDelegationsViewBlobCodec: {Table: KeyDelegationsView, SQLType: "INTEGER"}, + KeyDelegationsViewBlobMultihash: {Table: KeyDelegationsView, SQLType: "BLOB"}, + KeyDelegationsViewDelegate: {Table: KeyDelegationsView, SQLType: "BLOB"}, + KeyDelegationsViewIssuer: {Table: KeyDelegationsView, SQLType: "BLOB"}, + KVKey: {Table: KV, SQLType: "TEXT"}, + KVValue: {Table: KV, SQLType: "TEXT"}, + LookupID: {Table: Lookup, SQLType: "INTEGER"}, + LookupType: {Table: Lookup, SQLType: "INTEGER"}, + LookupValue: {Table: Lookup, SQLType: ""}, + PublicBlobsViewCodec: {Table: PublicBlobsView, SQLType: "INTEGER"}, + PublicBlobsViewID: {Table: PublicBlobsView, SQLType: "INTEGER"}, + PublicBlobsViewMultihash: {Table: PublicBlobsView, SQLType: "BLOB"}, + PublicKeysID: {Table: PublicKeys, SQLType: "INTEGER"}, + PublicKeysPrincipal: {Table: PublicKeys, SQLType: "BLOB"}, + ServedSitesGroupID: {Table: ServedSites, SQLType: "INTEGER"}, + ServedSitesHostname: {Table: ServedSites, SQLType: "TEXT"}, + ServedSitesOwnerID: {Table: ServedSites, SQLType: "INTEGER"}, + ServedSitesVersion: {Table: ServedSites, SQLType: "TEXT"}, + SiteMembersAccountID: {Table: SiteMembers, SQLType: "INTEGER"}, + SiteMembersRole: {Table: SiteMembers, SQLType: "INTEGER"}, + SitesAccountID: {Table: Sites, SQLType: "INTEGER"}, + SitesAddresses: {Table: Sites, SQLType: "TEXT"}, + SitesHostname: {Table: Sites, SQLType: "TEXT"}, + SitesRole: {Table: Sites, SQLType: "INTEGER"}, + SQLiteSequenceName: {Table: SQLiteSequence, SQLType: ""}, + SQLiteSequenceSeq: {Table: SQLiteSequence, SQLType: ""}, + TrustedAccountsID: {Table: TrustedAccounts, SQLType: "INTEGER"}, + WalletsAddress: {Table: Wallets, SQLType: "TEXT"}, + WalletsBalance: {Table: Wallets, SQLType: "INTEGER"}, + WalletsID: {Table: Wallets, SQLType: "TEXT"}, + WalletsLogin: {Table: Wallets, SQLType: "BLOB"}, + WalletsName: {Table: Wallets, SQLType: "TEXT"}, + WalletsPassword: {Table: Wallets, SQLType: "BLOB"}, + WalletsToken: {Table: Wallets, SQLType: "BLOB"}, + WalletsType: {Table: Wallets, SQLType: "TEXT"}, + WebPublicationsEID: {Table: WebPublications, SQLType: "TEXT"}, + WebPublicationsPath: {Table: WebPublications, SQLType: "TEXT"}, + WebPublicationsVersion: {Table: WebPublications, SQLType: "TEXT"}, }, } diff --git a/backend/daemon/storage/schema.gensum b/backend/daemon/storage/schema.gensum index a93ad8554a..1490875db5 100644 --- a/backend/daemon/storage/schema.gensum +++ b/backend/daemon/storage/schema.gensum @@ -1,2 +1,2 @@ -srcs: 35ec1af44c0efa67c79c5cd47953b220 -outs: d52099402babd408fef7e900466b357c +srcs: c7f91d3e4cde43c64d88ece5b807d154 +outs: 1668302d9dde81889419d6a3a94b2d8f diff --git a/backend/daemon/storage/schema.go b/backend/daemon/storage/schema.go index bd56806e92..3f920873ba 100644 --- a/backend/daemon/storage/schema.go +++ b/backend/daemon/storage/schema.go @@ -6,6 +6,15 @@ import ( "strings" ) +// Types for the lookup table. +// Needs to be an integer, but we're using +// unicode code points which are easier to remember. +const ( + LookupLiteral = int('l') + LookupPublicKey = int('p') + LookupResource = int('r') +) + //go:embed schema.sql var schema string @@ -13,6 +22,21 @@ func init() { schema = removeSQLComments(schema) } +func init() { + // Overwriting types for columns that we left unspecified, so our query codegen can actually work. + col := Schema.Columns[EntitiesEID] + col.SQLType = "TEXT" + Schema.Columns[EntitiesEID] = col + + col = Schema.Columns[KeyDelegationsIssuer] + col.SQLType = "INTEGER" + Schema.Columns[KeyDelegationsIssuer] = col + + col = Schema.Columns[KeyDelegationsDelegate] + col.SQLType = "INTEGER" + Schema.Columns[KeyDelegationsDelegate] = col +} + // removeSQLComments is written with the help of ChatGPT, but it seems to work. // We don't need to store comments in the database file, but we want to use them for ourselves. func removeSQLComments(sql string) string { diff --git a/backend/daemon/storage/schema.sql b/backend/daemon/storage/schema.sql index 56bc1a5400..b1956aecbb 100644 --- a/backend/daemon/storage/schema.sql +++ b/backend/daemon/storage/schema.sql @@ -1,15 +1,32 @@ --- Stores global metadata/configuration about any other table -CREATE TABLE global_meta ( +-- Stores arbitrary key/value data that didn't deserve its own table. +CREATE TABLE kv ( key TEXT PRIMARY KEY, value TEXT ) WITHOUT ROWID; +-- Lookup values that are used in other tables +-- as integers to reduce the database size. +-- Using a single table for different types of values, +-- to allow polymorphic foreign keys in other tables. +-- TODO(burdiyan): eventually this table would need periodic cleanup, +-- because when values get unreferenced they will remain in the table anyway. +CREATE TABLE lookup ( + id INTEGER PRIMARY KEY, + -- Type of the value. + -- See Lookup* constants in schema.go file for possible options. + -- We use unicode code points to make it easier to write queries. + type INTEGER NOT NULL, + value NOT NULL +); + +-- Using hash of the value to reduce the size of the index. +-- We additionally have a covering index by type and value. +CREATE UNIQUE INDEX lookup_value_unique ON lookup (sha1(value)); + +CREATE INDEX lookup_by_type ON lookup (type, value); + -- Stores the content of IPFS blobs. CREATE TABLE blobs ( - -- Short numerical ID to be used internally. - -- The same ID is used for table 'changes' - -- to avoid unnecessary joins. - -- Using AUTOINCREMENT here to use monotonically increasing IDs as a cursor for syncing. id INTEGER PRIMARY KEY AUTOINCREMENT, -- Original multihash of the IPFS blob. -- We don't store CIDs, this is what most blockstore @@ -31,165 +48,158 @@ CREATE TABLE blobs ( insert_time INTEGER DEFAULT (strftime('%s', 'now')) NOT NULL ); --- Stores known public keys and maps them to local short integer IDs. -CREATE TABLE public_keys ( - id INTEGER PRIMARY KEY, - -- Principal is multicodec prefixed public key bytes. - -- See https://github.com/multiformats/multicodec/blob/master/table.csv for possible values. - principal BLOB UNIQUE NOT NULL -); +CREATE VIEW public_keys AS +SELECT + id, + value AS principal +FROM lookup +WHERE type = unicode('p'); -- Stores the accounts that used marked as trusted. CREATE TABLE trusted_accounts ( - -- Account that we trust - id INTEGER PRIMARY KEY REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL + -- Account that we trust. Lookup value must be of type public key. + id INTEGER REFERENCES lookup (id) ON DELETE CASCADE NOT NULL, + PRIMARY KEY (id) ) WITHOUT ROWID; --- Stores derived information from Key Delegation blobs. -CREATE TABLE key_delegations ( - -- Issuer key. - issuer INTEGER REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL, - -- Delegate key. - delegate INTEGER REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL, - -- Key delegation blob ID. - blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, - -- Issue time. - issue_time INTEGER NOT NULL, - PRIMARY KEY (issuer, delegate, blob) -) WITHOUT ROWID; - -CREATE INDEX idx_key_delegations_by_delegate ON key_delegations (delegate, issuer, blob); - -CREATE INDEX idx_key_delegations_by_blob ON key_delegations (blob, issuer, delegate); - --- View of key delegations dereferencing foreign keys. -CREATE VIEW key_delegations_view AS - SELECT - kd.blob AS blob, - blobs.codec AS blob_codec, - blobs.multihash AS blob_multihash, - iss.principal AS issuer, - del.principal AS delegate, - kd.issue_time AS issue_time - FROM key_delegations kd - JOIN blobs ON blobs.id = kd.blob - JOIN public_keys iss ON iss.id = kd.issuer - JOIN public_keys del ON del.id = kd.delegate -; - --- Stores IDs of Hypermedia Entities. -CREATE TABLE hd_entities ( - -- Local shorthand ID. - id INTEGER PRIMARY KEY, - -- Entity ID. - eid TEXT UNIQUE NOT NULL CHECK (eid != '') -); +CREATE VIEW entities AS +SELECT + id, + value AS eid +FROM lookup +WHERE type = unicode('r'); -- Changes to the Hypermedia Entities. -CREATE TABLE hd_changes ( +CREATE TABLE changes ( -- Entity being changed. - entity INTEGER REFERENCES hd_entities (id) NOT NULL, + entity INTEGER REFERENCES lookup (id) NOT NULL, -- Blob ID of the change. blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, -- HLC timestamp of the change. hlc_time INTEGER NOT NULL, -- Author of the change. - author INTEGER REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL, + author INTEGER REFERENCES lookup (id) ON DELETE CASCADE NOT NULL, PRIMARY KEY (entity, blob) ) WITHOUT ROWID; -CREATE INDEX idx_hd_changes_to_entity ON hd_changes (blob, entity); +CREATE INDEX changes_by_entity ON changes (blob, entity); +CREATE INDEX changes_by_author ON changes (author); -- View of changes with dereferences foreign keys. -CREATE VIEW hd_changes_view AS - SELECT - hd_changes.blob AS blob_id, - hd_changes.entity AS entity_id, - hd_changes.hlc_time AS hlc_time, - hd_entities.eid AS entity, - blobs.codec AS codec, - blobs.multihash AS multihash, - blobs.data AS data, - blobs.size AS size - FROM hd_changes - JOIN blobs ON blobs.id = hd_changes.blob - JOIN hd_entities ON hd_changes.entity = hd_entities.id -; +CREATE VIEW changes_view AS +SELECT + changes.blob AS blob_id, + changes.entity AS entity_id, + changes.hlc_time AS hlc_time, + entities.eid AS entity, + blobs.codec AS codec, + blobs.multihash AS multihash, + blobs.data AS data, + blobs.size AS size +FROM changes +JOIN blobs ON blobs.id = changes.blob +JOIN entities ON changes.entity = entities.id; -- Draft changes. Only one draft is allowed for now. -CREATE TABLE hd_drafts ( - entity INTEGER PRIMARY KEY REFERENCES hd_entities (id) ON DELETE CASCADE NOT NULL, - blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL -); +CREATE TABLE drafts ( + entity INTEGER REFERENCES lookup (id) ON DELETE CASCADE NOT NULL, + blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, + PRIMARY KEY (entity, blob) +) WITHOUT ROWID; + +CREATE INDEX drafts_by_blob ON drafts (blob); + +-- Index to ensure only one draft is allowed. Defining it separately, +-- so it's easier to drop eventually without a complex migration. +CREATE UNIQUE INDEX drafts_unique ON drafts (entity); CREATE VIEW public_blobs_view AS - SELECT - blobs.id, - blobs.codec, - blobs.multihash - FROM blobs - LEFT OUTER JOIN hd_drafts ON hd_drafts.blob = blobs.id - WHERE hd_drafts.blob IS NULL -; +SELECT + blobs.id, + blobs.codec, + blobs.multihash +FROM blobs +LEFT OUTER JOIN drafts ON drafts.blob = blobs.id +WHERE drafts.blob IS NULL; -- View of drafts with dereferenced foreign keys. -CREATE VIEW hd_drafts_view AS - SELECT - hd_drafts.entity AS entity_id, - hd_drafts.blob AS blob_id, - hd_entities.eid AS entity, - blobs.codec AS codec, - blobs.multihash AS multihash - FROM hd_drafts - JOIN hd_entities ON hd_entities.id = hd_drafts.entity - JOIN blobs ON blobs.id = hd_drafts.blob -; - --- Stores links between blobs. -CREATE TABLE hd_links ( - source_blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, - -- TODO(burdiyan): normalize this to reduce disk usage. +CREATE VIEW drafts_view AS +SELECT + drafts.entity AS entity_id, + drafts.blob AS blob_id, + entities.eid AS entity, + blobs.codec AS codec, + blobs.multihash AS multihash +FROM drafts +JOIN entities ON entities.id = drafts.entity +JOIN blobs ON blobs.id = drafts.blob; + +CREATE TABLE blob_links ( + source INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, + target INTEGER REFERENCES blobs (id) NOT NULL, rel TEXT NOT NULL, - target_entity INTEGER REFERENCES hd_entities (id), - target_blob INTEGER REFERENCES blobs (id), - data BLOB, - CHECK ((target_entity, target_blob) IS NOT (null, null)) + PRIMARY KEY (source, rel, target) +) WITHOUT ROWID; + +CREATE UNIQUE INDEX blob_links_by_rel ON blob_links (rel, source, target); +CREATE UNIQUE INDEX blob_links_by_target ON blob_links (target, rel, source); + +CREATE TABLE blob_attrs ( + blob INTEGER REFERENCES blobs (id) ON DELETE CASCADE NOT NULL, + key TEXT NOT NULL, + anchor TEXT NOT NULL DEFAULT (''), + is_lookup INTEGER NOT NULL DEFAULT (0), + value, + extra, + ts INTEGER NOT NULL, + value_ptr INTEGER REFERENCES lookup (id) GENERATED ALWAYS AS (IIF(is_lookup = 1, value, NULL)) VIRTUAL ); --- These are probably not the most optimal indices. -CREATE INDEX idx_hd_links_blobs ON hd_links (source_blob, target_blob) WHERE target_blob IS NOT NULL; -CREATE INDEX idx_hd_links_blobs_rev ON hd_links (target_blob, source_blob) WHERE target_blob IS NOT NULL; -CREATE INDEX idx_hd_links_by_target_entity ON hd_links (target_entity) WHERE target_entity IS NOT NULL; +CREATE INDEX blob_attrs_by_key ON blob_attrs (key, blob); +CREATE INDEX blob_attrs_by_blob ON blob_attrs (blob, key); +CREATE INDEX blob_attrs_by_value ON blob_attrs (value_ptr, key) WHERE value_ptr IS NOT NULL; + +CREATE VIEW key_delegations AS +SELECT + blob AS blob, + MAX(IIF(key = 'kd/issuer', value_ptr, NULL)) AS issuer, + MAX(IIF(key = 'kd/delegate', value_ptr, NULL)) AS delegate +FROM blob_attrs +WHERE key IN ('kd/issuer', 'kd/delegate') +GROUP BY blob; + +-- View of key delegations dereferencing foreign keys. +CREATE VIEW key_delegations_view AS +SELECT + kd.blob AS blob, + blobs.codec AS blob_codec, + blobs.multihash AS blob_multihash, + iss.principal AS issuer, + del.principal AS delegate +FROM key_delegations kd +JOIN blobs ON blobs.id = kd.blob +JOIN public_keys iss ON iss.id = kd.issuer +JOIN public_keys del ON del.id = kd.delegate; + +-- Stores head blobs for each resource. +-- Each named head can have more than one blob, +-- so there can be multiple rows for each resource and name. +CREATE TABLE heads ( + resource INTEGER REFERENCES lookup (id) NOT NULL, + name TEXT NOT NULL, + blob INTEGER REFERENCES blobs (id) NOT NULL, + PRIMARY KEY (resource, name, blob) +) WITHOUT ROWID; + +CREATE INDEX heads_by_blob ON heads (blob); -- View for dependency links between changes. -CREATE VIEW hd_change_deps AS - SELECT - source_blob AS child, - target_blob AS parent - FROM hd_links - WHERE rel = 'change:depends' - AND target_blob IS NOT NULL -; - -CREATE VIEW content_links_view AS - SELECT - hd_changes.entity AS source_entity, - sources.eid AS source_eid, - hd_links.source_blob AS source_blob, - blobs.codec AS source_blob_codec, - blobs.multihash AS source_blob_multihash, - hd_links.rel AS rel, - hd_links.target_entity AS target_entity, - targets.eid AS target_eid, - hd_links.data AS data - FROM hd_links - JOIN hd_changes ON hd_changes.blob = hd_links.source_blob - JOIN blobs ON blobs.id = hd_links.source_blob - JOIN hd_entities sources ON sources.id = hd_changes.entity - JOIN hd_entities targets ON targets.id = hd_links.target_entity - WHERE rel GLOB 'href*' - AND target_entity IS NOT NULL -; +CREATE VIEW change_deps AS +SELECT + source AS child, + target AS parent +FROM blob_links +WHERE rel = 'change/dep'; -- Stores Lightning wallets both externals (imported wallets like bluewallet -- based on lndhub) and internals (based on the LND embedded node). @@ -225,9 +235,11 @@ CREATE TABLE sites ( addresses TEXT NOT NULL CHECK(addresses <> ''), -- The account ID of the site. We need a previous connection to the site so the -- actual account is inserted in the accounts table when handshake. - account_id INTEGER REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL + account_id INTEGER REFERENCES lookup (id) ON DELETE CASCADE NOT NULL ) WITHOUT ROWID; +CREATE INDEX sites_by_account ON sites (account_id); + -- Table that stores all the tokens not yet redeemed inside a site. Although this table is relevant only -- for sites at the beginning, keep in mind that any regular node can be upgraded to a site. CREATE TABLE invite_tokens ( @@ -244,11 +256,12 @@ CREATE TABLE invite_tokens ( -- for sites at the beginning, keep in mind that any regular node can be upgraded to a site. CREATE TABLE site_members ( -- The account id that has been linked to a role on this site - account_id INTEGER PRIMARY KEY REFERENCES public_keys (id) ON DELETE CASCADE NOT NULL, + account_id INTEGER REFERENCES lookup (id) ON DELETE CASCADE NOT NULL, -- The role of the site member. -- OWNER = 1 | EDITOR = 2. - role INTEGER NOT NULL CHECK (role != 0) -); + role INTEGER NOT NULL CHECK (role != 0), + PRIMARY KEY (account_id) +) WITHOUT ROWID; -- We currently only allow one owner per site. CREATE UNIQUE INDEX idx_site_owner ON site_members (role) WHERE role = 1; @@ -270,11 +283,13 @@ CREATE TABLE served_sites ( -- the domain + protocol the site is served in. hostname TEXT CHECK (hostname <> '') PRIMARY KEY, -- entity ID of the group the site is associated with. - group_id INTEGER REFERENCES hd_entities (id) ON DELETE NO ACTION NOT NULL, + group_id INTEGER REFERENCES lookup (id) NOT NULL, -- the version of the group the site is serving. version TEXT NOT NULL, -- account id of the owner of the group. - owner_id INTEGER REFERENCES public_keys (id) ON DELETE NO ACTION NOT NULL, + owner_id INTEGER REFERENCES lookup (id) NOT NULL, -- same version + groupid cannot be published in different histnames. UNIQUE(group_id, version) ON CONFLICT REPLACE ); + +CREATE INDEX served_sites_by_owner ON served_sites (owner_id); diff --git a/backend/daemon/storage/schema_test.go b/backend/daemon/storage/schema_test.go new file mode 100644 index 0000000000..7734115bf8 --- /dev/null +++ b/backend/daemon/storage/schema_test.go @@ -0,0 +1,111 @@ +package storage + +import ( + "context" + "fmt" + "log" + "testing" + + "crawshaw.io/sqlite" + "github.com/stretchr/testify/require" +) + +func TestSchemaForeignKeyIndexes(t *testing.T) { + // This test makes sure that all child foreign key columns are covered by at least one index. + // Sometimes not having one could be justified, e.g. when the child table is very small, and not expensive to full scan, + // but on the other hand, the overhead of having an index for these small tables would be even smaller. So it's probably + // easier to just have a rule to make these columns always indexed. + + db, err := OpenSQLite("file::memory:?mode=memory&cache=shared", 0, 1) + require.NoError(t, err) + defer db.Close() + + ctx := context.Background() + require.NoError(t, InitSQLiteSchema(db)) + + conn, release, err := db.Conn(ctx) + require.NoError(t, err) + defer release() + + introspectSchema(t, conn) +} + +func introspectSchema(t *testing.T, conn *sqlite.Conn) { + // This code was written with the help of ChatGPT, + // so it's not the most optimal thing in the world. + + stmt := conn.Prep("SELECT name FROM sqlite_master WHERE type = 'table';") + + for { + hasRow, err := stmt.Step() + if err != nil { + log.Fatal(err) + } + if !hasRow { + require.NoError(t, stmt.Finalize()) + break + } + + tableName := stmt.ColumnText(0) + + foreignKeysStmt := conn.Prep(fmt.Sprintf("PRAGMA foreign_key_list(%s);", tableName)) + + for { + hasRow, err := foreignKeysStmt.Step() + if err != nil { + log.Fatal(err) + } + if !hasRow { + require.NoError(t, foreignKeysStmt.Finalize()) + break + } + + from := foreignKeysStmt.ColumnText(3) + + indexesStmt := conn.Prep(fmt.Sprintf("PRAGMA index_list(%s);", tableName)) + + var found bool + for { + hasRow, err := indexesStmt.Step() + if err != nil { + log.Fatal(err) + } + if !hasRow { + require.NoError(t, indexesStmt.Finalize()) + break + } + + indexName := indexesStmt.ColumnText(1) + + // We are only interested in the first column in case of a compound index. + indexColumnsStmt := conn.Prep(fmt.Sprintf("SELECT * FROM pragma_index_info('%s') WHERE seqno = 0;", indexName)) + + for { + hasRow, err := indexColumnsStmt.Step() + if err != nil { + log.Fatal(err) + } + if !hasRow { + require.NoError(t, indexColumnsStmt.Finalize()) + break + } + + columnName := indexColumnsStmt.ColumnText(2) + if columnName == from { + found = true + require.NoError(t, indexColumnsStmt.Finalize()) + break + } + } + if found { + require.NoError(t, indexesStmt.Finalize()) + break + } + } + + if !found { + t.Errorf("Table %s foreign key on column %s is not covered by any index", tableName, from) + } + } + } +} diff --git a/backend/daemon/storage/sqlfmt.go b/backend/daemon/storage/sqlfmt.go index 6e1637f0bf..21d43b5c95 100644 --- a/backend/daemon/storage/sqlfmt.go +++ b/backend/daemon/storage/sqlfmt.go @@ -47,5 +47,7 @@ func sqlfmt(text string) string { text = regexp.MustCompile("(?m)^"+margin).ReplaceAllString(text, "") } - return strings.Replace(text, "\t", " ", -1) + return removeSQLComments( + strings.Replace(text, "\t", " ", -1), + ) } diff --git a/backend/daemon/storage/sqlite.go b/backend/daemon/storage/sqlite.go index dee2038d06..dace3079a3 100644 --- a/backend/daemon/storage/sqlite.go +++ b/backend/daemon/storage/sqlite.go @@ -2,6 +2,7 @@ package storage import ( "context" + "mintter/backend/daemon/storage/litext" "mintter/backend/testutil" "path/filepath" "testing" @@ -11,18 +12,28 @@ import ( "github.com/stretchr/testify/require" ) +import "C" + // OpenSQLite opens a connection pool for SQLite, enabling some needed functionality for our schema // like foreign keys. func OpenSQLite(uri string, flags sqlite.OpenFlags, poolSize int) (*sqlitex.Pool, error) { return openSQLite(uri, flags, poolSize, - "PRAGMA encoding = \"UTF-8\";", "PRAGMA foreign_keys = ON;", "PRAGMA synchronous = NORMAL;", "PRAGMA journal_mode = WAL;", + + // Setting up some in-memory tables for materializing some query results temporarily. + "ATTACH DATABASE ':memory:' AS mem;", + "CREATE TABLE mem.changes (id INTEGER PRIMARY KEY);", + "CREATE TABLE mem.change_deps (child INTEGER, parent INTEGER, PRIMARY KEY (child, parent), UNIQUE (parent, child)) WITHOUT ROWID;", ) } func openSQLite(uri string, flags sqlite.OpenFlags, poolSize int, prelude ...string) (*sqlitex.Pool, error) { + if err := litext.LoadExtensions(); err != nil { + return nil, err + } + pool, err := sqlitex.Open(uri, flags, poolSize) if err != nil { return nil, err diff --git a/backend/daemon/storage/sqlite_test.go b/backend/daemon/storage/sqlite_test.go index cd1279f55a..c7ac68a746 100644 --- a/backend/daemon/storage/sqlite_test.go +++ b/backend/daemon/storage/sqlite_test.go @@ -15,4 +15,5 @@ func TestSQLite(t *testing.T) { defer pool.Close() sqlitedbg.ExecPool(pool, os.Stdout, "select sha1('hello')") + sqlitedbg.ExecPool(pool, os.Stdout, "select mycount() from (values (1), (2));") } diff --git a/backend/genproto/groups/v1alpha/groups.pb.go b/backend/genproto/groups/v1alpha/groups.pb.go index edf0fb280f..494bbd0f64 100644 --- a/backend/genproto/groups/v1alpha/groups.pb.go +++ b/backend/genproto/groups/v1alpha/groups.pb.go @@ -28,19 +28,23 @@ const ( // Zero value which is an invalid role. This role is used to delete members, // and serves as a tombstone, because in our CRDT there's no way to truly delete something. Role_ROLE_UNSPECIFIED Role = 0 + // Output only. Role for an owner. Cannot be used in updates, can only be returned in queries. + Role_OWNER Role = 1 // Editor role which allows members to manage content of the group. - Role_EDITOR Role = 1 + Role_EDITOR Role = 2 ) // Enum value maps for Role. var ( Role_name = map[int32]string{ 0: "ROLE_UNSPECIFIED", - 1: "EDITOR", + 1: "OWNER", + 2: "EDITOR", } Role_value = map[string]int32{ "ROLE_UNSPECIFIED": 0, - "EDITOR": 1, + "OWNER": 1, + "EDITOR": 2, } ) @@ -225,7 +229,7 @@ type UpdateGroupRequest struct { UpdatedMembers map[string]Role `protobuf:"bytes,4,rep,name=updated_members,json=updatedMembers,proto3" json:"updated_members,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=com.mintter.groups.v1alpha.Role"` // Optional. List of content to be updated in the Group. // Key is a pretty path on which the content is published, - // value is a Hyperdocs URL of the content. + // value is a Hypermedia URL of the content. // To unpublish content set the value to an empty string for a given pretty path. // Only updated records have to be sent, not all the content of the group. UpdatedContent map[string]string `protobuf:"bytes,5,rep,name=updated_content,json=updatedContent,proto3" json:"updated_content,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -384,7 +388,7 @@ type ListMembersResponse struct { // ID of the group owner. OwnerAccountId string `protobuf:"bytes,1,opt,name=owner_account_id,json=ownerAccountId,proto3" json:"owner_account_id,omitempty"` - // List of members of the group. + // List of members of the group, including the owner. Members map[string]Role `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=com.mintter.groups.v1alpha.Role"` // Token to continue listing members from. NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` @@ -528,7 +532,7 @@ type ListContentResponse struct { unknownFields protoimpl.UnknownFields // List of content of the group. - // Key is the path and value is a Hyperdocs URL to the content. + // Key is the path and value is a Hypermedia URL to the content. Content map[string]string `protobuf:"bytes,1,rep,name=content,proto3" json:"content,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Token to continue listing content from. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` @@ -957,6 +961,258 @@ func (x *GetSiteInfoResponse) GetOwnerId() string { return "" } +// Request to list groups for a document. +type ListDocumentGroupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. ID of the document to list groups for. + // Must be fully-qualified Entity ID. + DocumentId string `protobuf:"bytes,1,opt,name=document_id,json=documentId,proto3" json:"document_id,omitempty"` + // Optional. Maximum number of groups to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Page token to continue listing groups from. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListDocumentGroupsRequest) Reset() { + *x = ListDocumentGroupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDocumentGroupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDocumentGroupsRequest) ProtoMessage() {} + +func (x *ListDocumentGroupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDocumentGroupsRequest.ProtoReflect.Descriptor instead. +func (*ListDocumentGroupsRequest) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{13} +} + +func (x *ListDocumentGroupsRequest) GetDocumentId() string { + if x != nil { + return x.DocumentId + } + return "" +} + +func (x *ListDocumentGroupsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListDocumentGroupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response to list groups for a document. +type ListDocumentGroupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of document publications in groups. + Items []*ListDocumentGroupsResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + // Token to continue listing groups from. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListDocumentGroupsResponse) Reset() { + *x = ListDocumentGroupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDocumentGroupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDocumentGroupsResponse) ProtoMessage() {} + +func (x *ListDocumentGroupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDocumentGroupsResponse.ProtoReflect.Descriptor instead. +func (*ListDocumentGroupsResponse) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{14} +} + +func (x *ListDocumentGroupsResponse) GetItems() []*ListDocumentGroupsResponse_Item { + if x != nil { + return x.Items + } + return nil +} + +func (x *ListDocumentGroupsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request to list groups for an account. +type ListAccountGroupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. ID of the account to list groups for. + // Must be Account ID, not the ID of the Account Entity. + AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + // Optional. Maximum number of groups to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Page token to continue listing groups from. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListAccountGroupsRequest) Reset() { + *x = ListAccountGroupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAccountGroupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAccountGroupsRequest) ProtoMessage() {} + +func (x *ListAccountGroupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAccountGroupsRequest.ProtoReflect.Descriptor instead. +func (*ListAccountGroupsRequest) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{15} +} + +func (x *ListAccountGroupsRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *ListAccountGroupsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListAccountGroupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response to list groups for an account. +type ListAccountGroupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of groups that the account is a member of with their role. + Items []*ListAccountGroupsResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + // Token to continue listing groups from. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListAccountGroupsResponse) Reset() { + *x = ListAccountGroupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAccountGroupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAccountGroupsResponse) ProtoMessage() {} + +func (x *ListAccountGroupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAccountGroupsResponse.ProtoReflect.Descriptor instead. +func (*ListAccountGroupsResponse) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{16} +} + +func (x *ListAccountGroupsResponse) GetItems() []*ListAccountGroupsResponse_Item { + if x != nil { + return x.Items + } + return nil +} + +func (x *ListAccountGroupsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + // Group represents the basic information about a group entity. type Group struct { state protoimpl.MessageState @@ -976,12 +1232,14 @@ type Group struct { // Version of the group entity that is being returned by the server. // When latest version is requested, this is the same as latest_version. Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + // Timestamp of the version of the group. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` } func (x *Group) Reset() { *x = Group{} if protoimpl.UnsafeEnabled { - mi := &file_groups_v1alpha_groups_proto_msgTypes[13] + mi := &file_groups_v1alpha_groups_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -994,7 +1252,7 @@ func (x *Group) String() string { func (*Group) ProtoMessage() {} func (x *Group) ProtoReflect() protoreflect.Message { - mi := &file_groups_v1alpha_groups_proto_msgTypes[13] + mi := &file_groups_v1alpha_groups_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1007,7 +1265,7 @@ func (x *Group) ProtoReflect() protoreflect.Message { // Deprecated: Use Group.ProtoReflect.Descriptor instead. func (*Group) Descriptor() ([]byte, []int) { - return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{13} + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{17} } func (x *Group) GetId() string { @@ -1052,6 +1310,152 @@ func (x *Group) GetVersion() string { return "" } +func (x *Group) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +type ListDocumentGroupsResponse_Item struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of the group that the document is published to. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // ID of the group change that published the document to the group. + GroupChange string `protobuf:"bytes,2,opt,name=group_change,json=groupChange,proto3" json:"group_change,omitempty"` + // Timestamp of the change that published the document to the group. + ChangeTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=change_time,json=changeTime,proto3" json:"change_time,omitempty"` + // Path at which document is published. + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + // Raw URL that is published to the group. + RawUrl string `protobuf:"bytes,5,opt,name=raw_url,json=rawUrl,proto3" json:"raw_url,omitempty"` +} + +func (x *ListDocumentGroupsResponse_Item) Reset() { + *x = ListDocumentGroupsResponse_Item{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDocumentGroupsResponse_Item) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDocumentGroupsResponse_Item) ProtoMessage() {} + +func (x *ListDocumentGroupsResponse_Item) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDocumentGroupsResponse_Item.ProtoReflect.Descriptor instead. +func (*ListDocumentGroupsResponse_Item) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ListDocumentGroupsResponse_Item) GetGroupId() string { + if x != nil { + return x.GroupId + } + return "" +} + +func (x *ListDocumentGroupsResponse_Item) GetGroupChange() string { + if x != nil { + return x.GroupChange + } + return "" +} + +func (x *ListDocumentGroupsResponse_Item) GetChangeTime() *timestamppb.Timestamp { + if x != nil { + return x.ChangeTime + } + return nil +} + +func (x *ListDocumentGroupsResponse_Item) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ListDocumentGroupsResponse_Item) GetRawUrl() string { + if x != nil { + return x.RawUrl + } + return "" +} + +type ListAccountGroupsResponse_Item struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Group *Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` + Role Role `protobuf:"varint,2,opt,name=role,proto3,enum=com.mintter.groups.v1alpha.Role" json:"role,omitempty"` +} + +func (x *ListAccountGroupsResponse_Item) Reset() { + *x = ListAccountGroupsResponse_Item{} + if protoimpl.UnsafeEnabled { + mi := &file_groups_v1alpha_groups_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAccountGroupsResponse_Item) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAccountGroupsResponse_Item) ProtoMessage() {} + +func (x *ListAccountGroupsResponse_Item) ProtoReflect() protoreflect.Message { + mi := &file_groups_v1alpha_groups_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAccountGroupsResponse_Item.ProtoReflect.Descriptor instead. +func (*ListAccountGroupsResponse_Item) Descriptor() ([]byte, []int) { + return file_groups_v1alpha_groups_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ListAccountGroupsResponse_Item) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *ListAccountGroupsResponse_Item) GetRole() Role { + if x != nil { + return x.Role + } + return Role_ROLE_UNSPECIFIED +} + var File_groups_v1alpha_groups_proto protoreflect.FileDescriptor var file_groups_v1alpha_groups_proto_rawDesc = []byte{ @@ -1194,80 +1598,154 @@ var file_groups_v1alpha_groups_proto_rawDesc = []byte{ 0x70, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0xd0, 0x01, 0x0a, 0x05, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x28, 0x0a, 0x04, 0x52, - 0x6f, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x44, 0x49, - 0x54, 0x4f, 0x52, 0x10, 0x01, 0x32, 0xdb, 0x06, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x12, 0x60, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, - 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x12, 0x5a, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x2b, - 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, + 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x78, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0xc8, 0x02, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x51, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0xae, 0x01, 0x0a, + 0x04, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x61, 0x77, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x61, 0x77, 0x55, 0x72, 0x6c, 0x22, 0x75, 0x0a, + 0x18, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x50, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x75, 0x0a, 0x04, + 0x49, 0x74, 0x65, 0x6d, 0x12, 0x37, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, + 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x60, - 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x2e, 0x2e, - 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, - 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x12, 0x6e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, - 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2f, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x6e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, - 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2f, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x6b, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x2d, - 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, - 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, - 0x0d, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, 0x65, 0x12, 0x30, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x22, 0x8d, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x2a, 0x33, 0x0a, 0x04, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x52, + 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x45, 0x44, 0x49, 0x54, 0x4f, 0x52, 0x10, 0x02, 0x32, 0xe4, 0x08, 0x0a, 0x06, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x12, 0x60, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5a, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x2b, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6e, 0x76, - 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, - 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x60, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x6e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2f, 0x62, - 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x3b, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x74, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, + 0x65, 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, + 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, + 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, + 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, + 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x35, 0x2e, + 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, + 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x80, 0x01, 0x0a, + 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, + 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x30, 0x5a, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x74, 0x65, 0x72, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x3b, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1283,62 +1761,78 @@ func file_groups_v1alpha_groups_proto_rawDescGZIP() []byte { } var file_groups_v1alpha_groups_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_groups_v1alpha_groups_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_groups_v1alpha_groups_proto_msgTypes = make([]protoimpl.MessageInfo, 25) var file_groups_v1alpha_groups_proto_goTypes = []interface{}{ - (Role)(0), // 0: com.mintter.groups.v1alpha.Role - (*CreateGroupRequest)(nil), // 1: com.mintter.groups.v1alpha.CreateGroupRequest - (*GetGroupRequest)(nil), // 2: com.mintter.groups.v1alpha.GetGroupRequest - (*UpdateGroupRequest)(nil), // 3: com.mintter.groups.v1alpha.UpdateGroupRequest - (*ListMembersRequest)(nil), // 4: com.mintter.groups.v1alpha.ListMembersRequest - (*ListMembersResponse)(nil), // 5: com.mintter.groups.v1alpha.ListMembersResponse - (*ListContentRequest)(nil), // 6: com.mintter.groups.v1alpha.ListContentRequest - (*ListContentResponse)(nil), // 7: com.mintter.groups.v1alpha.ListContentResponse - (*ListGroupsRequest)(nil), // 8: com.mintter.groups.v1alpha.ListGroupsRequest - (*ListGroupsResponse)(nil), // 9: com.mintter.groups.v1alpha.ListGroupsResponse - (*ConvertToSiteRequest)(nil), // 10: com.mintter.groups.v1alpha.ConvertToSiteRequest - (*ConvertToSiteResponse)(nil), // 11: com.mintter.groups.v1alpha.ConvertToSiteResponse - (*GetSiteInfoRequest)(nil), // 12: com.mintter.groups.v1alpha.GetSiteInfoRequest - (*GetSiteInfoResponse)(nil), // 13: com.mintter.groups.v1alpha.GetSiteInfoResponse - (*Group)(nil), // 14: com.mintter.groups.v1alpha.Group - nil, // 15: com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry - nil, // 16: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry - nil, // 17: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedContentEntry - nil, // 18: com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry - nil, // 19: com.mintter.groups.v1alpha.ListContentResponse.ContentEntry - (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp + (Role)(0), // 0: com.mintter.groups.v1alpha.Role + (*CreateGroupRequest)(nil), // 1: com.mintter.groups.v1alpha.CreateGroupRequest + (*GetGroupRequest)(nil), // 2: com.mintter.groups.v1alpha.GetGroupRequest + (*UpdateGroupRequest)(nil), // 3: com.mintter.groups.v1alpha.UpdateGroupRequest + (*ListMembersRequest)(nil), // 4: com.mintter.groups.v1alpha.ListMembersRequest + (*ListMembersResponse)(nil), // 5: com.mintter.groups.v1alpha.ListMembersResponse + (*ListContentRequest)(nil), // 6: com.mintter.groups.v1alpha.ListContentRequest + (*ListContentResponse)(nil), // 7: com.mintter.groups.v1alpha.ListContentResponse + (*ListGroupsRequest)(nil), // 8: com.mintter.groups.v1alpha.ListGroupsRequest + (*ListGroupsResponse)(nil), // 9: com.mintter.groups.v1alpha.ListGroupsResponse + (*ConvertToSiteRequest)(nil), // 10: com.mintter.groups.v1alpha.ConvertToSiteRequest + (*ConvertToSiteResponse)(nil), // 11: com.mintter.groups.v1alpha.ConvertToSiteResponse + (*GetSiteInfoRequest)(nil), // 12: com.mintter.groups.v1alpha.GetSiteInfoRequest + (*GetSiteInfoResponse)(nil), // 13: com.mintter.groups.v1alpha.GetSiteInfoResponse + (*ListDocumentGroupsRequest)(nil), // 14: com.mintter.groups.v1alpha.ListDocumentGroupsRequest + (*ListDocumentGroupsResponse)(nil), // 15: com.mintter.groups.v1alpha.ListDocumentGroupsResponse + (*ListAccountGroupsRequest)(nil), // 16: com.mintter.groups.v1alpha.ListAccountGroupsRequest + (*ListAccountGroupsResponse)(nil), // 17: com.mintter.groups.v1alpha.ListAccountGroupsResponse + (*Group)(nil), // 18: com.mintter.groups.v1alpha.Group + nil, // 19: com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry + nil, // 20: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry + nil, // 21: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedContentEntry + nil, // 22: com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry + nil, // 23: com.mintter.groups.v1alpha.ListContentResponse.ContentEntry + (*ListDocumentGroupsResponse_Item)(nil), // 24: com.mintter.groups.v1alpha.ListDocumentGroupsResponse.Item + (*ListAccountGroupsResponse_Item)(nil), // 25: com.mintter.groups.v1alpha.ListAccountGroupsResponse.Item + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp } var file_groups_v1alpha_groups_proto_depIdxs = []int32{ - 15, // 0: com.mintter.groups.v1alpha.CreateGroupRequest.members:type_name -> com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry - 16, // 1: com.mintter.groups.v1alpha.UpdateGroupRequest.updated_members:type_name -> com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry - 17, // 2: com.mintter.groups.v1alpha.UpdateGroupRequest.updated_content:type_name -> com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedContentEntry - 18, // 3: com.mintter.groups.v1alpha.ListMembersResponse.members:type_name -> com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry - 19, // 4: com.mintter.groups.v1alpha.ListContentResponse.content:type_name -> com.mintter.groups.v1alpha.ListContentResponse.ContentEntry - 14, // 5: com.mintter.groups.v1alpha.ListGroupsResponse.groups:type_name -> com.mintter.groups.v1alpha.Group - 20, // 6: com.mintter.groups.v1alpha.Group.create_time:type_name -> google.protobuf.Timestamp - 0, // 7: com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role - 0, // 8: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role - 0, // 9: com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role - 1, // 10: com.mintter.groups.v1alpha.Groups.CreateGroup:input_type -> com.mintter.groups.v1alpha.CreateGroupRequest - 2, // 11: com.mintter.groups.v1alpha.Groups.GetGroup:input_type -> com.mintter.groups.v1alpha.GetGroupRequest - 3, // 12: com.mintter.groups.v1alpha.Groups.UpdateGroup:input_type -> com.mintter.groups.v1alpha.UpdateGroupRequest - 4, // 13: com.mintter.groups.v1alpha.Groups.ListMembers:input_type -> com.mintter.groups.v1alpha.ListMembersRequest - 6, // 14: com.mintter.groups.v1alpha.Groups.ListContent:input_type -> com.mintter.groups.v1alpha.ListContentRequest - 8, // 15: com.mintter.groups.v1alpha.Groups.ListGroups:input_type -> com.mintter.groups.v1alpha.ListGroupsRequest - 10, // 16: com.mintter.groups.v1alpha.Groups.ConvertToSite:input_type -> com.mintter.groups.v1alpha.ConvertToSiteRequest - 12, // 17: com.mintter.groups.v1alpha.Groups.GetSiteInfo:input_type -> com.mintter.groups.v1alpha.GetSiteInfoRequest - 14, // 18: com.mintter.groups.v1alpha.Groups.CreateGroup:output_type -> com.mintter.groups.v1alpha.Group - 14, // 19: com.mintter.groups.v1alpha.Groups.GetGroup:output_type -> com.mintter.groups.v1alpha.Group - 14, // 20: com.mintter.groups.v1alpha.Groups.UpdateGroup:output_type -> com.mintter.groups.v1alpha.Group - 5, // 21: com.mintter.groups.v1alpha.Groups.ListMembers:output_type -> com.mintter.groups.v1alpha.ListMembersResponse - 7, // 22: com.mintter.groups.v1alpha.Groups.ListContent:output_type -> com.mintter.groups.v1alpha.ListContentResponse - 9, // 23: com.mintter.groups.v1alpha.Groups.ListGroups:output_type -> com.mintter.groups.v1alpha.ListGroupsResponse - 11, // 24: com.mintter.groups.v1alpha.Groups.ConvertToSite:output_type -> com.mintter.groups.v1alpha.ConvertToSiteResponse - 13, // 25: com.mintter.groups.v1alpha.Groups.GetSiteInfo:output_type -> com.mintter.groups.v1alpha.GetSiteInfoResponse - 18, // [18:26] is the sub-list for method output_type - 10, // [10:18] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 19, // 0: com.mintter.groups.v1alpha.CreateGroupRequest.members:type_name -> com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry + 20, // 1: com.mintter.groups.v1alpha.UpdateGroupRequest.updated_members:type_name -> com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry + 21, // 2: com.mintter.groups.v1alpha.UpdateGroupRequest.updated_content:type_name -> com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedContentEntry + 22, // 3: com.mintter.groups.v1alpha.ListMembersResponse.members:type_name -> com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry + 23, // 4: com.mintter.groups.v1alpha.ListContentResponse.content:type_name -> com.mintter.groups.v1alpha.ListContentResponse.ContentEntry + 18, // 5: com.mintter.groups.v1alpha.ListGroupsResponse.groups:type_name -> com.mintter.groups.v1alpha.Group + 24, // 6: com.mintter.groups.v1alpha.ListDocumentGroupsResponse.items:type_name -> com.mintter.groups.v1alpha.ListDocumentGroupsResponse.Item + 25, // 7: com.mintter.groups.v1alpha.ListAccountGroupsResponse.items:type_name -> com.mintter.groups.v1alpha.ListAccountGroupsResponse.Item + 26, // 8: com.mintter.groups.v1alpha.Group.create_time:type_name -> google.protobuf.Timestamp + 26, // 9: com.mintter.groups.v1alpha.Group.update_time:type_name -> google.protobuf.Timestamp + 0, // 10: com.mintter.groups.v1alpha.CreateGroupRequest.MembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role + 0, // 11: com.mintter.groups.v1alpha.UpdateGroupRequest.UpdatedMembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role + 0, // 12: com.mintter.groups.v1alpha.ListMembersResponse.MembersEntry.value:type_name -> com.mintter.groups.v1alpha.Role + 26, // 13: com.mintter.groups.v1alpha.ListDocumentGroupsResponse.Item.change_time:type_name -> google.protobuf.Timestamp + 18, // 14: com.mintter.groups.v1alpha.ListAccountGroupsResponse.Item.group:type_name -> com.mintter.groups.v1alpha.Group + 0, // 15: com.mintter.groups.v1alpha.ListAccountGroupsResponse.Item.role:type_name -> com.mintter.groups.v1alpha.Role + 1, // 16: com.mintter.groups.v1alpha.Groups.CreateGroup:input_type -> com.mintter.groups.v1alpha.CreateGroupRequest + 2, // 17: com.mintter.groups.v1alpha.Groups.GetGroup:input_type -> com.mintter.groups.v1alpha.GetGroupRequest + 3, // 18: com.mintter.groups.v1alpha.Groups.UpdateGroup:input_type -> com.mintter.groups.v1alpha.UpdateGroupRequest + 4, // 19: com.mintter.groups.v1alpha.Groups.ListMembers:input_type -> com.mintter.groups.v1alpha.ListMembersRequest + 6, // 20: com.mintter.groups.v1alpha.Groups.ListContent:input_type -> com.mintter.groups.v1alpha.ListContentRequest + 8, // 21: com.mintter.groups.v1alpha.Groups.ListGroups:input_type -> com.mintter.groups.v1alpha.ListGroupsRequest + 10, // 22: com.mintter.groups.v1alpha.Groups.ConvertToSite:input_type -> com.mintter.groups.v1alpha.ConvertToSiteRequest + 12, // 23: com.mintter.groups.v1alpha.Groups.GetSiteInfo:input_type -> com.mintter.groups.v1alpha.GetSiteInfoRequest + 14, // 24: com.mintter.groups.v1alpha.Groups.ListDocumentGroups:input_type -> com.mintter.groups.v1alpha.ListDocumentGroupsRequest + 16, // 25: com.mintter.groups.v1alpha.Groups.ListAccountGroups:input_type -> com.mintter.groups.v1alpha.ListAccountGroupsRequest + 18, // 26: com.mintter.groups.v1alpha.Groups.CreateGroup:output_type -> com.mintter.groups.v1alpha.Group + 18, // 27: com.mintter.groups.v1alpha.Groups.GetGroup:output_type -> com.mintter.groups.v1alpha.Group + 18, // 28: com.mintter.groups.v1alpha.Groups.UpdateGroup:output_type -> com.mintter.groups.v1alpha.Group + 5, // 29: com.mintter.groups.v1alpha.Groups.ListMembers:output_type -> com.mintter.groups.v1alpha.ListMembersResponse + 7, // 30: com.mintter.groups.v1alpha.Groups.ListContent:output_type -> com.mintter.groups.v1alpha.ListContentResponse + 9, // 31: com.mintter.groups.v1alpha.Groups.ListGroups:output_type -> com.mintter.groups.v1alpha.ListGroupsResponse + 11, // 32: com.mintter.groups.v1alpha.Groups.ConvertToSite:output_type -> com.mintter.groups.v1alpha.ConvertToSiteResponse + 13, // 33: com.mintter.groups.v1alpha.Groups.GetSiteInfo:output_type -> com.mintter.groups.v1alpha.GetSiteInfoResponse + 15, // 34: com.mintter.groups.v1alpha.Groups.ListDocumentGroups:output_type -> com.mintter.groups.v1alpha.ListDocumentGroupsResponse + 17, // 35: com.mintter.groups.v1alpha.Groups.ListAccountGroups:output_type -> com.mintter.groups.v1alpha.ListAccountGroupsResponse + 26, // [26:36] is the sub-list for method output_type + 16, // [16:26] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_groups_v1alpha_groups_proto_init() } @@ -1504,6 +1998,54 @@ func file_groups_v1alpha_groups_proto_init() { } } file_groups_v1alpha_groups_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDocumentGroupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_groups_v1alpha_groups_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDocumentGroupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_groups_v1alpha_groups_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListAccountGroupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_groups_v1alpha_groups_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListAccountGroupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_groups_v1alpha_groups_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Group); i { case 0: return &v.state @@ -1515,6 +2057,30 @@ func file_groups_v1alpha_groups_proto_init() { return nil } } + file_groups_v1alpha_groups_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDocumentGroupsResponse_Item); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_groups_v1alpha_groups_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListAccountGroupsResponse_Item); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1522,7 +2088,7 @@ func file_groups_v1alpha_groups_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_groups_v1alpha_groups_proto_rawDesc, NumEnums: 1, - NumMessages: 19, + NumMessages: 25, NumExtensions: 0, NumServices: 1, }, diff --git a/backend/genproto/groups/v1alpha/groups_grpc.pb.go b/backend/genproto/groups/v1alpha/groups_grpc.pb.go index 83c5b9b47d..4823ff164c 100644 --- a/backend/genproto/groups/v1alpha/groups_grpc.pb.go +++ b/backend/genproto/groups/v1alpha/groups_grpc.pb.go @@ -38,6 +38,10 @@ type GroupsClient interface { ConvertToSite(ctx context.Context, in *ConvertToSiteRequest, opts ...grpc.CallOption) (*ConvertToSiteResponse, error) // Gets information about a site. GetSiteInfo(ctx context.Context, in *GetSiteInfoRequest, opts ...grpc.CallOption) (*GetSiteInfoResponse, error) + // Lists groups that a document is published to. + ListDocumentGroups(ctx context.Context, in *ListDocumentGroupsRequest, opts ...grpc.CallOption) (*ListDocumentGroupsResponse, error) + // Lists groups that an account is a member of. + ListAccountGroups(ctx context.Context, in *ListAccountGroupsRequest, opts ...grpc.CallOption) (*ListAccountGroupsResponse, error) } type groupsClient struct { @@ -120,6 +124,24 @@ func (c *groupsClient) GetSiteInfo(ctx context.Context, in *GetSiteInfoRequest, return out, nil } +func (c *groupsClient) ListDocumentGroups(ctx context.Context, in *ListDocumentGroupsRequest, opts ...grpc.CallOption) (*ListDocumentGroupsResponse, error) { + out := new(ListDocumentGroupsResponse) + err := c.cc.Invoke(ctx, "/com.mintter.groups.v1alpha.Groups/ListDocumentGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupsClient) ListAccountGroups(ctx context.Context, in *ListAccountGroupsRequest, opts ...grpc.CallOption) (*ListAccountGroupsResponse, error) { + out := new(ListAccountGroupsResponse) + err := c.cc.Invoke(ctx, "/com.mintter.groups.v1alpha.Groups/ListAccountGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // GroupsServer is the server API for Groups service. // All implementations should embed UnimplementedGroupsServer // for forward compatibility @@ -140,6 +162,10 @@ type GroupsServer interface { ConvertToSite(context.Context, *ConvertToSiteRequest) (*ConvertToSiteResponse, error) // Gets information about a site. GetSiteInfo(context.Context, *GetSiteInfoRequest) (*GetSiteInfoResponse, error) + // Lists groups that a document is published to. + ListDocumentGroups(context.Context, *ListDocumentGroupsRequest) (*ListDocumentGroupsResponse, error) + // Lists groups that an account is a member of. + ListAccountGroups(context.Context, *ListAccountGroupsRequest) (*ListAccountGroupsResponse, error) } // UnimplementedGroupsServer should be embedded to have forward compatible implementations. @@ -170,6 +196,12 @@ func (UnimplementedGroupsServer) ConvertToSite(context.Context, *ConvertToSiteRe func (UnimplementedGroupsServer) GetSiteInfo(context.Context, *GetSiteInfoRequest) (*GetSiteInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSiteInfo not implemented") } +func (UnimplementedGroupsServer) ListDocumentGroups(context.Context, *ListDocumentGroupsRequest) (*ListDocumentGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListDocumentGroups not implemented") +} +func (UnimplementedGroupsServer) ListAccountGroups(context.Context, *ListAccountGroupsRequest) (*ListAccountGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAccountGroups not implemented") +} // UnsafeGroupsServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to GroupsServer will @@ -326,6 +358,42 @@ func _Groups_GetSiteInfo_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Groups_ListDocumentGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDocumentGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupsServer).ListDocumentGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/com.mintter.groups.v1alpha.Groups/ListDocumentGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupsServer).ListDocumentGroups(ctx, req.(*ListDocumentGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Groups_ListAccountGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAccountGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupsServer).ListAccountGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/com.mintter.groups.v1alpha.Groups/ListAccountGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupsServer).ListAccountGroups(ctx, req.(*ListAccountGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Groups_ServiceDesc is the grpc.ServiceDesc for Groups service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -365,6 +433,14 @@ var Groups_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSiteInfo", Handler: _Groups_GetSiteInfo_Handler, }, + { + MethodName: "ListDocumentGroups", + Handler: _Groups_ListDocumentGroups_Handler, + }, + { + MethodName: "ListAccountGroups", + Handler: _Groups_ListAccountGroups_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "groups/v1alpha/groups.proto", diff --git a/backend/hyper/entity.go b/backend/hyper/entity.go index 7681c5ee24..bd14dd0704 100644 --- a/backend/hyper/entity.go +++ b/backend/hyper/entity.go @@ -2,6 +2,9 @@ package hyper import ( "context" + "crypto/rand" + "crypto/sha256" + "encoding/binary" "encoding/json" "fmt" "mintter/backend/core" @@ -55,7 +58,7 @@ type Entity struct { applied map[cid.Cid]int heads map[cid.Cid]struct{} state *crdt2.Map - clock hlc.Clock + clock *hlc.Clock } // NewEntity creates a new entity with a given ID. @@ -65,9 +68,17 @@ func NewEntity(id EntityID) *Entity { applied: make(map[cid.Cid]int), heads: make(map[cid.Cid]struct{}), state: crdt2.NewMap(), + clock: hlc.NewClock(), } } +// NewEntityWithClock creates a new entity with a provided clock. +func NewEntityWithClock(id EntityID, clock *hlc.Clock) *Entity { + e := NewEntity(id) + e.clock = clock + return e +} + // ID returns the ID of the entity. func (e *Entity) ID() EntityID { return e.id } @@ -191,9 +202,26 @@ func (e *Entity) NextTimestamp() hlc.Time { return e.clock.Now() } +// ChangeOption is a functional option for creating Changes. +type ChangeOption func(*Change) + +// WithAction sets the action field of the change. +func WithAction(action string) ChangeOption { + return func(c *Change) { + c.Action = action + } +} + +// WithMessage sets the message field of the change. +func WithMessage(msg string) ChangeOption { + return func(c *Change) { + c.Message = msg + } +} + // CreateChange entity creating a change blob, and applying it to the internal state. -func (e *Entity) CreateChange(ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any) (hb Blob, err error) { - hb, err = NewChange(e.id, maps.Keys(e.heads), ts, signer, delegation, patch) +func (e *Entity) CreateChange(ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any, opts ...ChangeOption) (hb Blob, err error) { + hb, err = NewChange(e.id, maps.Keys(e.heads), ts, signer, delegation, patch, opts...) if err != nil { return hb, err } @@ -206,7 +234,7 @@ func (e *Entity) CreateChange(ts hlc.Time, signer core.KeyPair, delegation cid.C } // ReplaceChange creates a new change instead of an existing one. The change to replace must be the current head. -func (e *Entity) ReplaceChange(old cid.Cid, ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any) (hb Blob, err error) { +func (e *Entity) ReplaceChange(old cid.Cid, ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any, opts ...ChangeOption) (hb Blob, err error) { if len(e.heads) != 1 { return hb, fmt.Errorf("must only have one head change to replace") } @@ -228,7 +256,7 @@ func (e *Entity) ReplaceChange(old cid.Cid, ts hlc.Time, signer core.KeyPair, de delete(e.applied, old) delete(e.heads, old) - hb, err = NewChange(e.id, prev.Deps, ts, signer, delegation, patch) + hb, err = NewChange(e.id, prev.Deps, ts, signer, delegation, patch, opts...) if err != nil { return hb, err } @@ -248,7 +276,7 @@ func SortCIDs(cids []cid.Cid) []cid.Cid { } // NewChange creates a new Change blob. -func NewChange(eid EntityID, deps []cid.Cid, ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any) (hb Blob, err error) { +func NewChange(eid EntityID, deps []cid.Cid, ts hlc.Time, signer core.KeyPair, delegation cid.Cid, patch map[string]any, opts ...ChangeOption) (hb Blob, err error) { // Make sure deps field is not present in the patch if there're no deps. if len(deps) == 0 { deps = nil @@ -265,6 +293,9 @@ func NewChange(eid EntityID, deps []cid.Cid, ts hlc.Time, signer core.KeyPair, d Patch: patch, Signer: signer.Principal(), } + for _, o := range opts { + o(&ch) + } sigdata, err := cbornode.DumpObject(ch) if err != nil { @@ -297,7 +328,7 @@ func (bs *Storage) ForEachChange(ctx context.Context, eid EntityID, fn func(c ci if err != nil { return err } - if edb.HDEntitiesID == 0 { + if edb.EntitiesID == 0 { return status.Errorf(codes.NotFound, "entity %q not found", eid) } @@ -308,12 +339,12 @@ func (bs *Storage) ForEachChange(ctx context.Context, eid EntityID, fn func(c ci buf := make([]byte, 0, 1024*1024) // preallocating 1MB for decompression. for _, change := range changes { - buf, err = bs.bs.decoder.DecodeAll(change.HDChangesViewData, buf) + buf, err = bs.bs.decoder.DecodeAll(change.ChangesViewData, buf) if err != nil { return err } - chcid := cid.NewCidV1(uint64(change.HDChangesViewCodec), change.HDChangesViewMultihash) + chcid := cid.NewCidV1(uint64(change.ChangesViewCodec), change.ChangesViewMultihash) var ch Change if err := cbornode.DecodeInto(buf, &ch); err != nil { return fmt.Errorf("failed to decode change %s for entity %s: %w", chcid, eid, err) @@ -345,11 +376,11 @@ func (bs *Storage) LoadEntity(ctx context.Context, eid EntityID) (e *Entity, err if err != nil { return nil, err } - if edb.HDEntitiesID == 0 { + if edb.EntitiesID == 0 { return nil, status.Errorf(codes.NotFound, "entity %q not found", eid) } - heads, err := hypersql.ChangesGetPublicHeadsJSON(conn, edb.HDEntitiesID) + heads, err := hypersql.ChangesGetPublicHeadsJSON(conn, edb.EntitiesID) if err != nil { return nil, err } @@ -371,11 +402,11 @@ func (bs *Storage) LoadTrustedEntity(ctx context.Context, eid EntityID) (e *Enti if err != nil { return nil, err } - if edb.HDEntitiesID == 0 { + if edb.EntitiesID == 0 { return nil, status.Errorf(codes.NotFound, "entity %q not found", eid) } - heads, err := hypersql.ChangesGetTrustedHeadsJSON(conn, edb.HDEntitiesID) + heads, err := hypersql.ChangesGetTrustedHeadsJSON(conn, edb.EntitiesID) if err != nil { return nil, err } @@ -447,11 +478,11 @@ func (bs *Storage) FindDraft(ctx context.Context, eid EntityID) (cid.Cid, error) if err != nil { return cid.Undef, err } - if res.HDDraftsViewBlobID == 0 { + if res.DraftsViewBlobID == 0 { return cid.Undef, fmt.Errorf("no draft for entity %s", eid) } - return cid.NewCidV1(uint64(res.HDDraftsViewCodec), res.HDDraftsViewMultihash), nil + return cid.NewCidV1(uint64(res.DraftsViewCodec), res.DraftsViewMultihash), nil } // LoadEntityFromHeads returns the loaded entity at a given "version" corresponding to the provided HEAD changes. @@ -512,12 +543,12 @@ func (bs *Storage) loadFromHeads(conn *sqlite.Conn, eid EntityID, heads localHea entity := NewEntity(eid) buf := make([]byte, 0, 1024*1024) // preallocating 1MB for decompression. for _, change := range changes { - buf, err = bs.bs.decoder.DecodeAll(change.HDChangesViewData, buf) + buf, err = bs.bs.decoder.DecodeAll(change.ChangesViewData, buf) if err != nil { return nil, err } - chcid := cid.NewCidV1(uint64(change.HDChangesViewCodec), change.HDChangesViewMultihash) + chcid := cid.NewCidV1(uint64(change.ChangesViewCodec), change.ChangesViewMultihash) var ch Change if err := cbornode.DecodeInto(buf, &ch); err != nil { return nil, fmt.Errorf("failed to decode change %s for entity %s: %w", chcid, eid, err) @@ -538,3 +569,42 @@ type ParsedBlob[T any] struct { CID cid.Cid Data T } + +// NewUnforgeableID creates a new random ID that is verifiable with the author's public key. +// It return the ID and the nonce. The nonce argument can be nil in which case a new nonce will be created. +// Otherwise the same nonce will be returned. +func NewUnforgeableID(author core.Principal, nonce []byte, ts int64) (string, []byte) { + if nonce == nil { + nonce = make([]byte, 16) + _, err := rand.Read(nonce) + if err != nil { + panic(err) + } + } + + h := sha256.New() + if _, err := h.Write(author); err != nil { + panic(err) + } + if _, err := h.Write(nonce); err != nil { + panic(err) + } + + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], uint64(ts)) + + if _, err := h.Write(buf[:]); err != nil { + panic(err) + } + + dig := h.Sum(nil) + base, err := multibase.Encode(multibase.Base58BTC, dig) + if err != nil { + panic(err) + } + + // Using last 22 characters to avoid multibase prefix. + // We don't use full hash digest here, to make our IDs shorter. + // But it should have enough collision resistance for our purpose. + return base[len(base)-22:], nonce +} diff --git a/backend/hyper/entity_test.go b/backend/hyper/entity_test.go index fe34e4e14a..37dad76dff 100644 --- a/backend/hyper/entity_test.go +++ b/backend/hyper/entity_test.go @@ -13,7 +13,7 @@ import ( ) func TestEntity(t *testing.T) { - e := NewEntity("hd://a/alice") + e := NewEntity("hm://a/alice") alice := coretest.NewTester("alice") name, _ := e.Get("name") @@ -264,10 +264,9 @@ func TestTrustedEntity(t *testing.T) { require.NoError(t, err) // Alice now receives Carol changes - require.NoError(t, aliceBlobs.SaveBlob(ctx, chF)) - _, err = aliceBlobs.LoadEntity(ctx, "foo") - require.Error(t, err, "missing dependency for change C") + require.Error(t, aliceBlobs.SaveBlob(ctx, chF), "must fail because missing dependency") require.NoError(t, aliceBlobs.SaveBlob(ctx, chC)) + require.NoError(t, aliceBlobs.SaveBlob(ctx, chF)) aliceView, err := aliceBlobs.LoadEntity(ctx, "foo") require.NoError(t, err) require.Equal(t, carolLatestChanges.heads, aliceView.heads) diff --git a/backend/hyper/hyper.go b/backend/hyper/hyper.go index 87d0adff2a..64595f78e9 100644 --- a/backend/hyper/hyper.go +++ b/backend/hyper/hyper.go @@ -17,8 +17,6 @@ import ( cbornode "github.com/ipfs/go-ipld-cbor" "github.com/multiformats/go-multicodec" "github.com/multiformats/go-multihash" - "github.com/sanity-io/litter" - "go.uber.org/multierr" "go.uber.org/zap" ) @@ -49,12 +47,17 @@ func (bs *Storage) Query(ctx context.Context, fn func(conn *sqlite.Conn) error) } defer release() - if err := sqlitex.ExecTransient(conn, "PRAGMA query_only = on;", nil); err != nil { - return err - } - defer func() { - err = multierr.Combine(err, sqlitex.ExecTransient(conn, "PRAGMA query_only = off;", nil)) - }() + // TODO(burdiyan): make the main database read-only. + // This is commented because we want to allow writing into an attached in-memory database + // while keeping the main database read-only. Apparently this is not possible in SQLite. + // There're a bunch of other ways to achieve this but there's currently no time for implementing them. + // + // if err := sqlitex.ExecTransient(conn, "PRAGMA query_only = on;", nil); err != nil { + // return err + // } + // defer func() { + // err = multierr.Combine(err, sqlitex.ExecTransient(conn, "PRAGMA query_only = off;", nil)) + // }() return fn(conn) } @@ -138,11 +141,11 @@ func (bs *Storage) SaveDraftBlob(ctx context.Context, eid EntityID, blob Blob) e if err != nil { return err } - if resp.HDEntitiesID == 0 { + if resp.EntitiesID == 0 { panic("BUG: failed to lookup entity after inserting the blob") } - return hypersql.DraftsInsert(conn, resp.HDEntitiesID, id) + return hypersql.DraftsInsert(conn, resp.EntitiesID, id) }) } @@ -160,7 +163,7 @@ func (bs *Storage) ListEntities(ctx context.Context, prefix string) ([]EntityID, out := make([]EntityID, len(resp)) for i, r := range resp { - out[i] = EntityID(r.HDEntitiesEID) + out[i] = EntityID(r.EntitiesEID) } return out, nil @@ -179,15 +182,15 @@ func (bs *Storage) PublishDraft(ctx context.Context, eid EntityID) (cid.Cid, err if err != nil { return err } - if res.HDDraftsViewBlobID == 0 { + if res.DraftsViewBlobID == 0 { return fmt.Errorf("no draft to publish for entity %s", eid) } - if err := hypersql.DraftsDelete(conn, res.HDDraftsViewBlobID); err != nil { + if err := hypersql.DraftsDelete(conn, res.DraftsViewBlobID); err != nil { return err } - out = cid.NewCidV1(uint64(res.HDDraftsViewCodec), res.HDDraftsViewMultihash) + out = cid.NewCidV1(uint64(res.DraftsViewCodec), res.DraftsViewMultihash) return nil }); err != nil { @@ -213,15 +216,15 @@ func (bs *Storage) DeleteDraft(ctx context.Context, eid EntityID) error { if err != nil { return err } - if res.HDDraftsViewBlobID == 0 { + if res.DraftsViewBlobID == 0 { return fmt.Errorf("no draft to publish for entity %s", eid) } - if err := hypersql.DraftsDelete(conn, res.HDDraftsViewBlobID); err != nil { + if err := hypersql.DraftsDelete(conn, res.DraftsViewBlobID); err != nil { return err } - _, err = hypersql.BlobsDelete(conn, res.HDDraftsViewMultihash) + _, err = hypersql.BlobsDelete(conn, res.DraftsViewMultihash) if err != nil { return err } @@ -247,11 +250,11 @@ func (bs *Storage) DeleteEntity(ctx context.Context, eid EntityID) error { if err != nil { return err } - if edb.HDEntitiesID == 0 { + if edb.EntitiesID == 0 { return fmt.Errorf("no such entity: %s", eid) } - if err := hypersql.ChangesDeleteForEntity(conn, edb.HDEntitiesID); err != nil { + if err := hypersql.ChangesDeleteForEntity(conn, edb.EntitiesID); err != nil { return err } @@ -298,11 +301,11 @@ func (bs *Storage) ReplaceDraftBlob(ctx context.Context, eid EntityID, old cid.C if err != nil { return err } - if resp.HDEntitiesID == 0 { + if resp.EntitiesID == 0 { panic("BUG: failed to lookup entity after inserting the blob") } - return hypersql.DraftsInsert(conn, resp.HDEntitiesID, id) + return hypersql.DraftsInsert(conn, resp.EntitiesID, id) }) } @@ -388,7 +391,6 @@ func DecodeBlob(c cid.Cid, data []byte) (hb Blob, err error) { if err := cbornode.DecodeInto(data, &vv); err != nil { panic(err) } - litter.Dump(vv) return hb, fmt.Errorf("failed to infer hyper blob %s: %w", c, err) } diff --git a/backend/hyper/hypersql/queries.gen.go b/backend/hyper/hypersql/queries.gen.go index 3c3e31efb3..3e229a0209 100644 --- a/backend/hyper/hypersql/queries.gen.go +++ b/backend/hyper/hypersql/queries.gen.go @@ -244,6 +244,28 @@ WHERE blobs.size >= 0` return out, err } +func BlobLinksInsertOrIgnore(conn *sqlite.Conn, blobLinksSource int64, blobLinksRel string, blobLinksTarget int64) error { + const query = `INSERT OR IGNORE INTO blob_links (source, rel, target) +VALUES (:blobLinksSource, :blobLinksRel, :blobLinksTarget)` + + before := func(stmt *sqlite.Stmt) { + stmt.SetInt64(":blobLinksSource", blobLinksSource) + stmt.SetText(":blobLinksRel", blobLinksRel) + stmt.SetInt64(":blobLinksTarget", blobLinksTarget) + } + + onStep := func(i int, stmt *sqlite.Stmt) error { + return nil + } + + err := sqlitegen.ExecStmt(conn, query, before, onStep) + if err != nil { + err = fmt.Errorf("failed query: BlobLinksInsertOrIgnore: %w", err) + } + + return err +} + type PublicKeysLookupIDResult struct { PublicKeysID int64 } @@ -314,15 +336,15 @@ type PublicKeysInsertResult struct { PublicKeysID int64 } -func PublicKeysInsert(conn *sqlite.Conn, publicKeysPrincipal []byte) (PublicKeysInsertResult, error) { - const query = `INSERT INTO public_keys (principal) -VALUES (:publicKeysPrincipal) -RETURNING public_keys.id` +func PublicKeysInsert(conn *sqlite.Conn, principal []byte) (PublicKeysInsertResult, error) { + const query = `INSERT INTO lookup (type, value) +VALUES (112, :principal) +RETURNING lookup.id AS public_keys_id` var out PublicKeysInsertResult before := func(stmt *sqlite.Stmt) { - stmt.SetBytes(":publicKeysPrincipal", publicKeysPrincipal) + stmt.SetBytes(":principal", principal) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -414,52 +436,16 @@ WHERE trusted_accounts.id IN (SELECT public_keys.id FROM public_keys WHERE publi return out, err } -type KeyDelegationsInsertOrIgnoreResult struct { - KeyDelegationsBlob int64 -} - -func KeyDelegationsInsertOrIgnore(conn *sqlite.Conn, keyDelegationsBlob int64, keyDelegationsIssuer int64, keyDelegationsDelegate int64, keyDelegationsIssueTime int64) (KeyDelegationsInsertOrIgnoreResult, error) { - const query = `INSERT OR IGNORE INTO key_delegations (blob, issuer, delegate, issue_time) -VALUES (:keyDelegationsBlob, :keyDelegationsIssuer, :keyDelegationsDelegate, :keyDelegationsIssueTime) -RETURNING key_delegations.blob` - - var out KeyDelegationsInsertOrIgnoreResult - - before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":keyDelegationsBlob", keyDelegationsBlob) - stmt.SetInt64(":keyDelegationsIssuer", keyDelegationsIssuer) - stmt.SetInt64(":keyDelegationsDelegate", keyDelegationsDelegate) - stmt.SetInt64(":keyDelegationsIssueTime", keyDelegationsIssueTime) - } - - onStep := func(i int, stmt *sqlite.Stmt) error { - if i > 1 { - return errors.New("KeyDelegationsInsertOrIgnore: more than one result return for a single-kind query") - } - - out.KeyDelegationsBlob = stmt.ColumnInt64(0) - return nil - } - - err := sqlitegen.ExecStmt(conn, query, before, onStep) - if err != nil { - err = fmt.Errorf("failed query: KeyDelegationsInsertOrIgnore: %w", err) - } - - return out, err -} - type KeyDelegationsListResult struct { KeyDelegationsViewBlob int64 KeyDelegationsViewBlobCodec int64 KeyDelegationsViewBlobMultihash []byte KeyDelegationsViewIssuer []byte KeyDelegationsViewDelegate []byte - KeyDelegationsViewIssueTime int64 } func KeyDelegationsList(conn *sqlite.Conn, keyDelegationsViewIssuer []byte) ([]KeyDelegationsListResult, error) { - const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate, key_delegations_view.issue_time + const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate FROM key_delegations_view WHERE key_delegations_view.issuer = :keyDelegationsViewIssuer` @@ -476,7 +462,6 @@ WHERE key_delegations_view.issuer = :keyDelegationsViewIssuer` KeyDelegationsViewBlobMultihash: stmt.ColumnBytes(2), KeyDelegationsViewIssuer: stmt.ColumnBytes(3), KeyDelegationsViewDelegate: stmt.ColumnBytes(4), - KeyDelegationsViewIssueTime: stmt.ColumnInt64(5), }) return nil @@ -496,11 +481,10 @@ type KeyDelegationsListAllResult struct { KeyDelegationsViewBlobMultihash []byte KeyDelegationsViewIssuer []byte KeyDelegationsViewDelegate []byte - KeyDelegationsViewIssueTime int64 } func KeyDelegationsListAll(conn *sqlite.Conn) ([]KeyDelegationsListAllResult, error) { - const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate, key_delegations_view.issue_time + const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate FROM key_delegations_view` var out []KeyDelegationsListAllResult @@ -515,7 +499,6 @@ FROM key_delegations_view` KeyDelegationsViewBlobMultihash: stmt.ColumnBytes(2), KeyDelegationsViewIssuer: stmt.ColumnBytes(3), KeyDelegationsViewDelegate: stmt.ColumnBytes(4), - KeyDelegationsViewIssueTime: stmt.ColumnInt64(5), }) return nil @@ -535,11 +518,10 @@ type KeyDelegationsListByDelegateResult struct { KeyDelegationsViewBlobMultihash []byte KeyDelegationsViewIssuer []byte KeyDelegationsViewDelegate []byte - KeyDelegationsViewIssueTime int64 } func KeyDelegationsListByDelegate(conn *sqlite.Conn, keyDelegationsViewDelegate []byte) ([]KeyDelegationsListByDelegateResult, error) { - const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate, key_delegations_view.issue_time + const query = `SELECT key_delegations_view.blob, key_delegations_view.blob_codec, key_delegations_view.blob_multihash, key_delegations_view.issuer, key_delegations_view.delegate FROM key_delegations_view WHERE key_delegations_view.delegate = :keyDelegationsViewDelegate` @@ -556,7 +538,6 @@ WHERE key_delegations_view.delegate = :keyDelegationsViewDelegate` KeyDelegationsViewBlobMultihash: stmt.ColumnBytes(2), KeyDelegationsViewIssuer: stmt.ColumnBytes(3), KeyDelegationsViewDelegate: stmt.ColumnBytes(4), - KeyDelegationsViewIssueTime: stmt.ColumnInt64(5), }) return nil @@ -605,18 +586,18 @@ LIMIT 1` } type EntitiesInsertOrIgnoreResult struct { - HDEntitiesID int64 + EntitiesID int64 } -func EntitiesInsertOrIgnore(conn *sqlite.Conn, hdEntitiesEID string) (EntitiesInsertOrIgnoreResult, error) { - const query = `INSERT OR IGNORE INTO hd_entities (eid) -VALUES (:hdEntitiesEID) -RETURNING hd_entities.id` +func EntitiesInsertOrIgnore(conn *sqlite.Conn, entity_id string) (EntitiesInsertOrIgnoreResult, error) { + const query = `INSERT OR IGNORE INTO lookup (type, value) +VALUES (114, :entity_id) +RETURNING lookup.id AS entities_id` var out EntitiesInsertOrIgnoreResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdEntitiesEID", hdEntitiesEID) + stmt.SetText(":entity_id", entity_id) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -624,7 +605,7 @@ RETURNING hd_entities.id` return errors.New("EntitiesInsertOrIgnore: more than one result return for a single-kind query") } - out.HDEntitiesID = stmt.ColumnInt64(0) + out.EntitiesID = stmt.ColumnInt64(0) return nil } @@ -637,19 +618,19 @@ RETURNING hd_entities.id` } type EntitiesLookupIDResult struct { - HDEntitiesID int64 + EntitiesID int64 } -func EntitiesLookupID(conn *sqlite.Conn, hdEntitiesEID string) (EntitiesLookupIDResult, error) { - const query = `SELECT hd_entities.id -FROM hd_entities -WHERE hd_entities.eid = :hdEntitiesEID +func EntitiesLookupID(conn *sqlite.Conn, entities_eid string) (EntitiesLookupIDResult, error) { + const query = `SELECT entities.id +FROM entities +WHERE entities.eid = :entities_eid LIMIT 1` var out EntitiesLookupIDResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdEntitiesEID", hdEntitiesEID) + stmt.SetText(":entities_eid", entities_eid) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -657,7 +638,7 @@ LIMIT 1` return errors.New("EntitiesLookupID: more than one result return for a single-kind query") } - out.HDEntitiesID = stmt.ColumnInt64(0) + out.EntitiesID = stmt.ColumnInt64(0) return nil } @@ -670,15 +651,15 @@ LIMIT 1` } type EntitiesListByPrefixResult struct { - HDEntitiesID int64 - HDEntitiesEID string + EntitiesID int64 + EntitiesEID string } func EntitiesListByPrefix(conn *sqlite.Conn, prefix string) ([]EntitiesListByPrefixResult, error) { - const query = `SELECT hd_entities.id, hd_entities.eid -FROM hd_entities -WHERE hd_entities.eid GLOB :prefix -ORDER BY hd_entities.id` + const query = `SELECT entities.id, entities.eid +FROM entities +WHERE entities.eid GLOB :prefix +ORDER BY entities.id` var out []EntitiesListByPrefixResult @@ -688,8 +669,8 @@ ORDER BY hd_entities.id` onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, EntitiesListByPrefixResult{ - HDEntitiesID: stmt.ColumnInt64(0), - HDEntitiesEID: stmt.ColumnText(1), + EntitiesID: stmt.ColumnInt64(0), + EntitiesEID: stmt.ColumnText(1), }) return nil @@ -703,12 +684,13 @@ ORDER BY hd_entities.id` return out, err } -func EntitiesDelete(conn *sqlite.Conn, hdEntitiesEID string) error { - const query = `DELETE FROM hd_entities -WHERE hd_entities.eid = :hdEntitiesEID` +func EntitiesDelete(conn *sqlite.Conn, entities_eid string) error { + const query = `DELETE FROM lookup +WHERE lookup.type = 114 +AND lookup.value = :entities_eid` before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdEntitiesEID", hdEntitiesEID) + stmt.SetText(":entities_eid", entities_eid) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -723,15 +705,15 @@ WHERE hd_entities.eid = :hdEntitiesEID` return err } -func ChangesInsertOrIgnore(conn *sqlite.Conn, hdChangesBlob int64, hdChangesEntity int64, hdChangesHlcTime int64, hdChangesAuthor int64) error { - const query = `INSERT OR IGNORE INTO hd_changes (blob, entity, hlc_time, author) -VALUES (:hdChangesBlob, :hdChangesEntity, :hdChangesHlcTime, :hdChangesAuthor)` +func ChangesInsertOrIgnore(conn *sqlite.Conn, changesBlob int64, changesEntity int64, changesHLCTime int64, changesAuthor int64) error { + const query = `INSERT OR IGNORE INTO changes (blob, entity, hlc_time, author) +VALUES (:changesBlob, :changesEntity, :changesHLCTime, :changesAuthor)` before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdChangesBlob", hdChangesBlob) - stmt.SetInt64(":hdChangesEntity", hdChangesEntity) - stmt.SetInt64(":hdChangesHlcTime", hdChangesHlcTime) - stmt.SetInt64(":hdChangesAuthor", hdChangesAuthor) + stmt.SetInt64(":changesBlob", changesBlob) + stmt.SetInt64(":changesEntity", changesEntity) + stmt.SetInt64(":changesHLCTime", changesHLCTime) + stmt.SetInt64(":changesAuthor", changesAuthor) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -747,38 +729,38 @@ VALUES (:hdChangesBlob, :hdChangesEntity, :hdChangesHlcTime, :hdChangesAuthor)` } type ChangesListFromChangeSetResult struct { - HDChangesViewBlobID int64 - HDChangesViewCodec int64 - HDChangesViewData []byte - HDChangesViewEntityID int64 - HDChangesViewHlcTime int64 - HDChangesViewMultihash []byte - HDChangesViewSize int64 -} - -func ChangesListFromChangeSet(conn *sqlite.Conn, cset []byte, hdChangesViewEntity string) ([]ChangesListFromChangeSetResult, error) { - const query = `SELECT hd_changes_view.blob_id, hd_changes_view.codec, hd_changes_view.data, hd_changes_view.entity_id, hd_changes_view.hlc_time, hd_changes_view.multihash, hd_changes_view.size -FROM hd_changes_view, json_each(:cset) AS cset -WHERE hd_changes_view.entity = :hdChangesViewEntity -AND hd_changes_view.blob_id = cset.value -ORDER BY hd_changes_view.hlc_time` + ChangesViewBlobID int64 + ChangesViewCodec int64 + ChangesViewData []byte + ChangesViewEntityID int64 + ChangesViewHLCTime int64 + ChangesViewMultihash []byte + ChangesViewSize int64 +} + +func ChangesListFromChangeSet(conn *sqlite.Conn, cset []byte, changesViewEntity string) ([]ChangesListFromChangeSetResult, error) { + const query = `SELECT changes_view.blob_id, changes_view.codec, changes_view.data, changes_view.entity_id, changes_view.hlc_time, changes_view.multihash, changes_view.size +FROM changes_view, json_each(:cset) AS cset +WHERE changes_view.entity = :changesViewEntity +AND changes_view.blob_id = cset.value +ORDER BY changes_view.hlc_time` var out []ChangesListFromChangeSetResult before := func(stmt *sqlite.Stmt) { stmt.SetBytes(":cset", cset) - stmt.SetText(":hdChangesViewEntity", hdChangesViewEntity) + stmt.SetText(":changesViewEntity", changesViewEntity) } onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, ChangesListFromChangeSetResult{ - HDChangesViewBlobID: stmt.ColumnInt64(0), - HDChangesViewCodec: stmt.ColumnInt64(1), - HDChangesViewData: stmt.ColumnBytes(2), - HDChangesViewEntityID: stmt.ColumnInt64(3), - HDChangesViewHlcTime: stmt.ColumnInt64(4), - HDChangesViewMultihash: stmt.ColumnBytes(5), - HDChangesViewSize: stmt.ColumnInt64(6), + ChangesViewBlobID: stmt.ColumnInt64(0), + ChangesViewCodec: stmt.ColumnInt64(1), + ChangesViewData: stmt.ColumnBytes(2), + ChangesViewEntityID: stmt.ColumnInt64(3), + ChangesViewHLCTime: stmt.ColumnInt64(4), + ChangesViewMultihash: stmt.ColumnBytes(5), + ChangesViewSize: stmt.ColumnInt64(6), }) return nil @@ -793,36 +775,36 @@ ORDER BY hd_changes_view.hlc_time` } type ChangesListForEntityResult struct { - HDChangesViewBlobID int64 - HDChangesViewCodec int64 - HDChangesViewData []byte - HDChangesViewEntityID int64 - HDChangesViewHlcTime int64 - HDChangesViewMultihash []byte - HDChangesViewSize int64 + ChangesViewBlobID int64 + ChangesViewCodec int64 + ChangesViewData []byte + ChangesViewEntityID int64 + ChangesViewHLCTime int64 + ChangesViewMultihash []byte + ChangesViewSize int64 } -func ChangesListForEntity(conn *sqlite.Conn, hdChangesViewEntity string) ([]ChangesListForEntityResult, error) { - const query = `SELECT hd_changes_view.blob_id, hd_changes_view.codec, hd_changes_view.data, hd_changes_view.entity_id, hd_changes_view.hlc_time, hd_changes_view.multihash, hd_changes_view.size -FROM hd_changes_view -WHERE hd_changes_view.entity = :hdChangesViewEntity -ORDER BY hd_changes_view.hlc_time` +func ChangesListForEntity(conn *sqlite.Conn, changesViewEntity string) ([]ChangesListForEntityResult, error) { + const query = `SELECT changes_view.blob_id, changes_view.codec, changes_view.data, changes_view.entity_id, changes_view.hlc_time, changes_view.multihash, changes_view.size +FROM changes_view +WHERE changes_view.entity = :changesViewEntity +ORDER BY changes_view.hlc_time` var out []ChangesListForEntityResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdChangesViewEntity", hdChangesViewEntity) + stmt.SetText(":changesViewEntity", changesViewEntity) } onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, ChangesListForEntityResult{ - HDChangesViewBlobID: stmt.ColumnInt64(0), - HDChangesViewCodec: stmt.ColumnInt64(1), - HDChangesViewData: stmt.ColumnBytes(2), - HDChangesViewEntityID: stmt.ColumnInt64(3), - HDChangesViewHlcTime: stmt.ColumnInt64(4), - HDChangesViewMultihash: stmt.ColumnBytes(5), - HDChangesViewSize: stmt.ColumnInt64(6), + ChangesViewBlobID: stmt.ColumnInt64(0), + ChangesViewCodec: stmt.ColumnInt64(1), + ChangesViewData: stmt.ColumnBytes(2), + ChangesViewEntityID: stmt.ColumnInt64(3), + ChangesViewHLCTime: stmt.ColumnInt64(4), + ChangesViewMultihash: stmt.ColumnBytes(5), + ChangesViewSize: stmt.ColumnInt64(6), }) return nil @@ -837,22 +819,22 @@ ORDER BY hd_changes_view.hlc_time` } type ChangesListPublicNoDataResult struct { - HDChangesViewBlobID int64 - HDChangesViewCodec int64 - HDChangesViewEntityID int64 - HDChangesViewHlcTime int64 - HDChangesViewMultihash []byte - HDChangesViewSize int64 - HDChangesViewEntity string - HDDraftsBlob int64 + ChangesViewBlobID int64 + ChangesViewCodec int64 + ChangesViewEntityID int64 + ChangesViewHLCTime int64 + ChangesViewMultihash []byte + ChangesViewSize int64 + ChangesViewEntity []byte + DraftsBlob int64 } func ChangesListPublicNoData(conn *sqlite.Conn) ([]ChangesListPublicNoDataResult, error) { - const query = `SELECT hd_changes_view.blob_id, hd_changes_view.codec, hd_changes_view.entity_id, hd_changes_view.hlc_time, hd_changes_view.multihash, hd_changes_view.size, hd_changes_view.entity, hd_drafts.blob -FROM hd_changes_view -LEFT JOIN hd_drafts ON hd_drafts.entity = hd_changes_view.entity_id -WHERE hd_drafts.blob IS NULL -ORDER BY hd_changes_view.entity, hd_changes_view.hlc_time` + const query = `SELECT changes_view.blob_id, changes_view.codec, changes_view.entity_id, changes_view.hlc_time, changes_view.multihash, changes_view.size, changes_view.entity, drafts.blob +FROM changes_view +LEFT JOIN drafts ON drafts.entity = changes_view.entity_id +WHERE drafts.blob IS NULL +ORDER BY changes_view.entity, changes_view.hlc_time` var out []ChangesListPublicNoDataResult @@ -861,14 +843,14 @@ ORDER BY hd_changes_view.entity, hd_changes_view.hlc_time` onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, ChangesListPublicNoDataResult{ - HDChangesViewBlobID: stmt.ColumnInt64(0), - HDChangesViewCodec: stmt.ColumnInt64(1), - HDChangesViewEntityID: stmt.ColumnInt64(2), - HDChangesViewHlcTime: stmt.ColumnInt64(3), - HDChangesViewMultihash: stmt.ColumnBytes(4), - HDChangesViewSize: stmt.ColumnInt64(5), - HDChangesViewEntity: stmt.ColumnText(6), - HDDraftsBlob: stmt.ColumnInt64(7), + ChangesViewBlobID: stmt.ColumnInt64(0), + ChangesViewCodec: stmt.ColumnInt64(1), + ChangesViewEntityID: stmt.ColumnInt64(2), + ChangesViewHLCTime: stmt.ColumnInt64(3), + ChangesViewMultihash: stmt.ColumnBytes(4), + ChangesViewSize: stmt.ColumnInt64(5), + ChangesViewEntity: stmt.ColumnBytes(6), + DraftsBlob: stmt.ColumnInt64(7), }) return nil @@ -887,7 +869,7 @@ type ChangesResolveHeadsResult struct { } func ChangesResolveHeads(conn *sqlite.Conn, heads []byte) (ChangesResolveHeadsResult, error) { - const query = `WITH RECURSIVE changeset (change) AS (SELECT value FROM json_each(:heads) UNION SELECT hd_change_deps.parent FROM hd_change_deps JOIN changeset ON changeset.change = hd_change_deps.child) + const query = `WITH RECURSIVE changeset (change) AS (SELECT value FROM json_each(:heads) UNION SELECT change_deps.parent FROM change_deps JOIN changeset ON changeset.change = change_deps.child) SELECT json_group_array(change) AS resolved_json FROM changeset LIMIT 1` @@ -922,16 +904,16 @@ type ChangesGetPublicHeadsJSONResult struct { func ChangesGetPublicHeadsJSON(conn *sqlite.Conn, entity int64) (ChangesGetPublicHeadsJSONResult, error) { const query = `WITH non_drafts (blob) AS ( - SELECT hd_changes.blob - FROM hd_changes - LEFT JOIN hd_drafts ON hd_drafts.entity = hd_changes.entity AND hd_changes.blob = hd_drafts.blob - WHERE hd_changes.entity = :entity - AND hd_drafts.blob IS NULL + SELECT changes.blob + FROM changes + LEFT JOIN drafts ON drafts.entity = changes.entity AND changes.blob = drafts.blob + WHERE changes.entity = :entity + AND drafts.blob IS NULL ), deps (blob) AS ( - SELECT DISTINCT hd_change_deps.parent - FROM hd_change_deps - JOIN non_drafts ON non_drafts.blob = hd_change_deps.child + SELECT DISTINCT change_deps.parent + FROM change_deps + JOIN non_drafts ON non_drafts.blob = change_deps.child ) SELECT json_group_array(blob) AS heads FROM non_drafts @@ -965,10 +947,10 @@ type ChangesGetTrustedHeadsJSONResult struct { } func ChangesGetTrustedHeadsJSON(conn *sqlite.Conn, entity int64) (ChangesGetTrustedHeadsJSONResult, error) { - const query = `SELECT json_group_array(hd_changes.blob) AS heads -FROM hd_changes -JOIN trusted_accounts ON trusted_accounts.id = hd_changes.author -WHERE hd_changes.entity = :entity` + const query = `SELECT json_group_array(changes.blob) AS heads +FROM changes +JOIN trusted_accounts ON trusted_accounts.id = changes.author +WHERE changes.entity = :entity` var out ChangesGetTrustedHeadsJSONResult @@ -993,12 +975,12 @@ WHERE hd_changes.entity = :entity` return out, err } -func ChangesDeleteForEntity(conn *sqlite.Conn, hdChangesEntity int64) error { +func ChangesDeleteForEntity(conn *sqlite.Conn, changesEntity int64) error { const query = `DELETE FROM blobs -WHERE blobs.id IN (SELECT hd_changes.blob FROM hd_changes WHERE hd_changes.entity = :hdChangesEntity)` +WHERE blobs.id IN (SELECT changes.blob FROM changes WHERE changes.entity = :changesEntity)` before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdChangesEntity", hdChangesEntity) + stmt.SetInt64(":changesEntity", changesEntity) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1014,24 +996,24 @@ WHERE blobs.id IN (SELECT hd_changes.blob FROM hd_changes WHERE hd_changes.entit } type ChangesGetInfoResult struct { - HDChangesBlob int64 - HDChangesHlcTime int64 + ChangesBlob int64 + ChangesHLCTime int64 PublicKeysPrincipal []byte IsTrusted int64 } -func ChangesGetInfo(conn *sqlite.Conn, hdChangesBlob int64) (ChangesGetInfoResult, error) { - const query = `SELECT hd_changes.blob, hd_changes.hlc_time, public_keys.principal, trusted_accounts.id > 0 AS is_trusted -FROM hd_changes -JOIN public_keys ON public_keys.id = hd_changes.author -LEFT JOIN trusted_accounts ON trusted_accounts.id = hd_changes.author -WHERE hd_changes.blob = :hdChangesBlob +func ChangesGetInfo(conn *sqlite.Conn, changesBlob int64) (ChangesGetInfoResult, error) { + const query = `SELECT changes.blob, changes.hlc_time, public_keys.principal, trusted_accounts.id > 0 AS is_trusted +FROM changes +JOIN public_keys ON public_keys.id = changes.author +LEFT JOIN trusted_accounts ON trusted_accounts.id = changes.author +WHERE changes.blob = :changesBlob LIMIT 1` var out ChangesGetInfoResult before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdChangesBlob", hdChangesBlob) + stmt.SetInt64(":changesBlob", changesBlob) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1039,8 +1021,8 @@ LIMIT 1` return errors.New("ChangesGetInfo: more than one result return for a single-kind query") } - out.HDChangesBlob = stmt.ColumnInt64(0) - out.HDChangesHlcTime = stmt.ColumnInt64(1) + out.ChangesBlob = stmt.ColumnInt64(0) + out.ChangesHLCTime = stmt.ColumnInt64(1) out.PublicKeysPrincipal = stmt.ColumnBytes(2) out.IsTrusted = stmt.ColumnInt64(3) return nil @@ -1059,16 +1041,16 @@ type ChangesGetDepsResult struct { BlobsMultihash []byte } -func ChangesGetDeps(conn *sqlite.Conn, hdChangeDepsChild int64) ([]ChangesGetDepsResult, error) { +func ChangesGetDeps(conn *sqlite.Conn, changeDepsChild int64) ([]ChangesGetDepsResult, error) { const query = `SELECT blobs.codec, blobs.multihash -FROM hd_change_deps -JOIN blobs ON blobs.id = hd_change_deps.parent -WHERE hd_change_deps.child = :hdChangeDepsChild` +FROM change_deps +JOIN blobs ON blobs.id = change_deps.parent +WHERE change_deps.child = :changeDepsChild` var out []ChangesGetDepsResult before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdChangeDepsChild", hdChangeDepsChild) + stmt.SetInt64(":changeDepsChild", changeDepsChild) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1091,32 +1073,32 @@ WHERE hd_change_deps.child = :hdChangeDepsChild` type ChangesInfoForEntityResult struct { BlobsCodec int64 BlobsMultihash []byte - HDChangesBlob int64 - HDChangesHlcTime int64 + ChangesBlob int64 + ChangesHLCTime int64 PublicKeysPrincipal []byte IsTrusted int64 } -func ChangesInfoForEntity(conn *sqlite.Conn, hdChangesEntity int64) ([]ChangesInfoForEntityResult, error) { - const query = `SELECT blobs.codec, blobs.multihash, hd_changes.blob, hd_changes.hlc_time, public_keys.principal, trusted_accounts.id > 0 AS is_trusted -FROM hd_changes -JOIN blobs ON blobs.id = hd_changes.blob -JOIN public_keys ON public_keys.id = hd_changes.author -LEFT JOIN trusted_accounts ON trusted_accounts.id = hd_changes.author -WHERE hd_changes.entity = :hdChangesEntity` +func ChangesInfoForEntity(conn *sqlite.Conn, changesEntity int64) ([]ChangesInfoForEntityResult, error) { + const query = `SELECT blobs.codec, blobs.multihash, changes.blob, changes.hlc_time, public_keys.principal, trusted_accounts.id > 0 AS is_trusted +FROM changes +JOIN blobs ON blobs.id = changes.blob +JOIN public_keys ON public_keys.id = changes.author +LEFT JOIN trusted_accounts ON trusted_accounts.id = changes.author +WHERE changes.entity = :changesEntity` var out []ChangesInfoForEntityResult before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdChangesEntity", hdChangesEntity) + stmt.SetInt64(":changesEntity", changesEntity) } onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, ChangesInfoForEntityResult{ BlobsCodec: stmt.ColumnInt64(0), BlobsMultihash: stmt.ColumnBytes(1), - HDChangesBlob: stmt.ColumnInt64(2), - HDChangesHlcTime: stmt.ColumnInt64(3), + ChangesBlob: stmt.ColumnInt64(2), + ChangesHLCTime: stmt.ColumnInt64(3), PublicKeysPrincipal: stmt.ColumnBytes(4), IsTrusted: stmt.ColumnInt64(5), }) @@ -1132,64 +1114,43 @@ WHERE hd_changes.entity = :hdChangesEntity` return out, err } -func LinksInsert(conn *sqlite.Conn, hdLinksSourceBlob int64, hdLinksRel string, hdLinksTargetBlob int64, hdLinksTargetEntity int64, hdLinksData []byte) error { - const query = `INSERT OR IGNORE INTO hd_links (source_blob, rel, target_blob, target_entity, data) -VALUES (:hdLinksSourceBlob, :hdLinksRel, NULLIF(:hdLinksTargetBlob, 0), NULLIF(:hdLinksTargetEntity, 0), :hdLinksData)` - - before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdLinksSourceBlob", hdLinksSourceBlob) - stmt.SetText(":hdLinksRel", hdLinksRel) - stmt.SetInt64(":hdLinksTargetBlob", hdLinksTargetBlob) - stmt.SetInt64(":hdLinksTargetEntity", hdLinksTargetEntity) - stmt.SetBytes(":hdLinksData", hdLinksData) - } - - onStep := func(i int, stmt *sqlite.Stmt) error { - return nil - } - - err := sqlitegen.ExecStmt(conn, query, before, onStep) - if err != nil { - err = fmt.Errorf("failed query: LinksInsert: %w", err) - } - - return err -} - type BacklinksForEntityResult struct { - ContentLinksViewData []byte - ContentLinksViewRel string - ContentLinksViewSourceBlob int64 - ContentLinksViewSourceBlobCodec int64 - ContentLinksViewSourceBlobMultihash []byte - ContentLinksViewSourceEID string - ContentLinksViewSourceEntity int64 - ContentLinksViewTargetEID string - ContentLinksViewTargetEntity int64 -} - -func BacklinksForEntity(conn *sqlite.Conn, contentLinksViewTargetEID string) ([]BacklinksForEntityResult, error) { - const query = `SELECT content_links_view.data, content_links_view.rel, content_links_view.source_blob, content_links_view.source_blob_codec, content_links_view.source_blob_multihash, content_links_view.source_eid, content_links_view.source_entity, content_links_view.target_eid, content_links_view.target_entity -FROM content_links_view -WHERE content_links_view.target_eid = :contentLinksViewTargetEID` + EntitiesID int64 + EntitiesEID string + BlobsCodec int64 + BlobsMultihash []byte + BlobAttrsBlob int64 + BlobAttrsKey string + BlobAttrsAnchor string + BlobAttrsExtra []byte +} + +func BacklinksForEntity(conn *sqlite.Conn, blobAttrsValuePtr int64) ([]BacklinksForEntityResult, error) { + const query = `SELECT entities.id, entities.eid, blobs.codec, blobs.multihash, blob_attrs.blob, blob_attrs.key, blob_attrs.anchor, blob_attrs.extra +FROM blob_attrs +JOIN changes ON changes.blob = blob_attrs.blob +JOIN entities ON entities.id = changes.entity +JOIN blobs ON blobs.id = blob_attrs.blob +WHERE blob_attrs.key GLOB 'href/*' +AND blob_attrs.value_ptr IS NOT NULL +AND blob_attrs.value_ptr = :blobAttrsValuePtr` var out []BacklinksForEntityResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":contentLinksViewTargetEID", contentLinksViewTargetEID) + stmt.SetInt64(":blobAttrsValuePtr", blobAttrsValuePtr) } onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, BacklinksForEntityResult{ - ContentLinksViewData: stmt.ColumnBytes(0), - ContentLinksViewRel: stmt.ColumnText(1), - ContentLinksViewSourceBlob: stmt.ColumnInt64(2), - ContentLinksViewSourceBlobCodec: stmt.ColumnInt64(3), - ContentLinksViewSourceBlobMultihash: stmt.ColumnBytes(4), - ContentLinksViewSourceEID: stmt.ColumnText(5), - ContentLinksViewSourceEntity: stmt.ColumnInt64(6), - ContentLinksViewTargetEID: stmt.ColumnText(7), - ContentLinksViewTargetEntity: stmt.ColumnInt64(8), + EntitiesID: stmt.ColumnInt64(0), + EntitiesEID: stmt.ColumnText(1), + BlobsCodec: stmt.ColumnInt64(2), + BlobsMultihash: stmt.ColumnBytes(3), + BlobAttrsBlob: stmt.ColumnInt64(4), + BlobAttrsKey: stmt.ColumnText(5), + BlobAttrsAnchor: stmt.ColumnText(6), + BlobAttrsExtra: stmt.ColumnBytes(7), }) return nil @@ -1203,13 +1164,13 @@ WHERE content_links_view.target_eid = :contentLinksViewTargetEID` return out, err } -func DraftsInsert(conn *sqlite.Conn, hdDraftsEntity int64, hdDraftsBlob int64) error { - const query = `INSERT INTO hd_drafts (entity, blob) -VALUES (:hdDraftsEntity, :hdDraftsBlob)` +func DraftsInsert(conn *sqlite.Conn, draftsEntity int64, draftsBlob int64) error { + const query = `INSERT INTO drafts (entity, blob) +VALUES (:draftsEntity, :draftsBlob)` before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdDraftsEntity", hdDraftsEntity) - stmt.SetInt64(":hdDraftsBlob", hdDraftsBlob) + stmt.SetInt64(":draftsEntity", draftsEntity) + stmt.SetInt64(":draftsBlob", draftsBlob) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1225,22 +1186,23 @@ VALUES (:hdDraftsEntity, :hdDraftsBlob)` } type DraftsGetResult struct { - HDDraftsViewBlobID int64 - HDDraftsViewCodec int64 - HDDraftsViewEntity string - HDDraftsViewEntityID int64 - HDDraftsViewMultihash []byte + DraftsViewBlobID int64 + DraftsViewCodec int64 + DraftsViewEntity []byte + DraftsViewEntityID int64 + DraftsViewMultihash []byte } -func DraftsGet(conn *sqlite.Conn, hdDraftsViewEntity string) (DraftsGetResult, error) { - const query = `SELECT hd_drafts_view.blob_id, hd_drafts_view.codec, hd_drafts_view.entity, hd_drafts_view.entity_id, hd_drafts_view.multihash -FROM hd_drafts_view -WHERE hd_drafts_view.entity = :hdDraftsViewEntity LIMIT 1` +func DraftsGet(conn *sqlite.Conn, draftsViewEntity string) (DraftsGetResult, error) { + const query = `SELECT drafts_view.blob_id, drafts_view.codec, drafts_view.entity, drafts_view.entity_id, drafts_view.multihash +FROM drafts_view +WHERE drafts_view.entity = :draftsViewEntity +LIMIT 1` var out DraftsGetResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdDraftsViewEntity", hdDraftsViewEntity) + stmt.SetText(":draftsViewEntity", draftsViewEntity) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1248,11 +1210,11 @@ WHERE hd_drafts_view.entity = :hdDraftsViewEntity LIMIT 1` return errors.New("DraftsGet: more than one result return for a single-kind query") } - out.HDDraftsViewBlobID = stmt.ColumnInt64(0) - out.HDDraftsViewCodec = stmt.ColumnInt64(1) - out.HDDraftsViewEntity = stmt.ColumnText(2) - out.HDDraftsViewEntityID = stmt.ColumnInt64(3) - out.HDDraftsViewMultihash = stmt.ColumnBytes(4) + out.DraftsViewBlobID = stmt.ColumnInt64(0) + out.DraftsViewCodec = stmt.ColumnInt64(1) + out.DraftsViewEntity = stmt.ColumnBytes(2) + out.DraftsViewEntityID = stmt.ColumnInt64(3) + out.DraftsViewMultihash = stmt.ColumnBytes(4) return nil } @@ -1264,12 +1226,12 @@ WHERE hd_drafts_view.entity = :hdDraftsViewEntity LIMIT 1` return out, err } -func DraftsDelete(conn *sqlite.Conn, hdDraftsBlob int64) error { - const query = `DELETE FROM hd_drafts -WHERE hd_drafts.blob = :hdDraftsBlob` +func DraftsDelete(conn *sqlite.Conn, draftsBlob int64) error { + const query = `DELETE FROM drafts +WHERE drafts.blob = :draftsBlob` before := func(stmt *sqlite.Stmt) { - stmt.SetInt64(":hdDraftsBlob", hdDraftsBlob) + stmt.SetInt64(":draftsBlob", draftsBlob) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1284,13 +1246,13 @@ WHERE hd_drafts.blob = :hdDraftsBlob` return err } -func SetReindexTime(conn *sqlite.Conn, globalMetaValue string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) -VALUES ('last_reindex_time', :globalMetaValue) +func SetReindexTime(conn *sqlite.Conn, kvValue string) error { + const query = `INSERT OR REPLACE INTO kv (key, value) +VALUES ('last_reindex_time', :kvValue) ` before := func(stmt *sqlite.Stmt) { - stmt.SetText(":globalMetaValue", globalMetaValue) + stmt.SetText(":kvValue", kvValue) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -1306,13 +1268,13 @@ VALUES ('last_reindex_time', :globalMetaValue) } type GetReindexTimeResult struct { - GlobalMetaValue string + KVValue string } func GetReindexTime(conn *sqlite.Conn) (GetReindexTimeResult, error) { - const query = `SELECT global_meta.value -FROM global_meta -WHERE global_meta.key = 'last_reindex_time' + const query = `SELECT kv.value +FROM kv +WHERE kv.key = 'last_reindex_time' LIMIT 1` var out GetReindexTimeResult @@ -1325,7 +1287,7 @@ LIMIT 1` return errors.New("GetReindexTime: more than one result return for a single-kind query") } - out.GlobalMetaValue = stmt.ColumnText(0) + out.KVValue = stmt.ColumnText(0) return nil } diff --git a/backend/hyper/hypersql/queries.gensum b/backend/hyper/hypersql/queries.gensum index dcfae74380..f6693c226b 100644 --- a/backend/hyper/hypersql/queries.gensum +++ b/backend/hyper/hypersql/queries.gensum @@ -1,2 +1,2 @@ -srcs: 01db0b17708ae12f341e46887eaf0406 -outs: c738fb9535225e95c435ebe01ce18ec6 +srcs: 5f4df6221b0363f61885141e79436aba +outs: 2e7e6bbc01b388623e327f0d129ea266 diff --git a/backend/hyper/hypersql/queries.go b/backend/hyper/hypersql/queries.go index aaa0cd7194..6c63cf82fa 100644 --- a/backend/hyper/hypersql/queries.go +++ b/backend/hyper/hypersql/queries.go @@ -78,6 +78,10 @@ func generateQueries() error { "WHERE", s.BlobsSize, ">=", "0", ), + qb.MakeQuery(s.Schema, "BlobLinksInsertOrIgnore", sgen.QueryKindExec, + qb.InsertOrIgnore(s.BlobLinksSource, s.BlobLinksRel, s.BlobLinksTarget), + ), + qb.MakeQuery(s.Schema, "PublicKeysLookupID", sgen.QueryKindSingle, "SELECT", qb.Results( s.PublicKeysID, @@ -97,13 +101,15 @@ func generateQueries() error { ), qb.MakeQuery(s.Schema, "PublicKeysInsert", sgen.QueryKindSingle, - "INSERT INTO", s.PublicKeys, qb.ListColShort( - s.PublicKeysPrincipal, + "INSERT INTO", s.Lookup, qb.ListColShort( + s.LookupType, + s.LookupValue, ), '\n', "VALUES", qb.List( - qb.VarCol(s.PublicKeysPrincipal), + storage.LookupPublicKey, + qb.Var("principal", sgen.TypeBytes), ), '\n', - "RETURNING", qb.Results(s.PublicKeysID), + "RETURNING", qb.Results(qb.ResultColAlias(s.LookupID, "public_keys_id")), ), qb.MakeQuery(s.Schema, "SetAccountTrust", sgen.QueryKindExec, @@ -137,21 +143,6 @@ func generateQueries() error { "WHERE", s.PublicKeysPrincipal, "=", qb.Var("principal", sgen.TypeBytes), ), ), - qb.MakeQuery(s.Schema, "KeyDelegationsInsertOrIgnore", sgen.QueryKindSingle, - "INSERT OR IGNORE INTO", s.KeyDelegations, qb.ListColShort( - s.KeyDelegationsBlob, - s.KeyDelegationsIssuer, - s.KeyDelegationsDelegate, - s.KeyDelegationsIssueTime, - ), '\n', - "VALUES", qb.List( - qb.VarCol(s.KeyDelegationsBlob), - qb.VarCol(s.KeyDelegationsIssuer), - qb.VarCol(s.KeyDelegationsDelegate), - qb.VarCol(s.KeyDelegationsIssueTime), - ), '\n', - "RETURNING", qb.Results(s.KeyDelegationsBlob), - ), qb.MakeQuery(s.Schema, "KeyDelegationsList", sgen.QueryKindMany, "SELECT", qb.Results( s.KeyDelegationsViewBlob, @@ -159,7 +150,6 @@ func generateQueries() error { s.KeyDelegationsViewBlobMultihash, s.KeyDelegationsViewIssuer, s.KeyDelegationsViewDelegate, - s.KeyDelegationsViewIssueTime, ), '\n', "FROM", s.KeyDelegationsView, '\n', "WHERE", s.KeyDelegationsViewIssuer, "=", qb.VarCol(s.KeyDelegationsViewIssuer), @@ -171,7 +161,6 @@ func generateQueries() error { s.KeyDelegationsViewBlobMultihash, s.KeyDelegationsViewIssuer, s.KeyDelegationsViewDelegate, - s.KeyDelegationsViewIssueTime, ), '\n', "FROM", s.KeyDelegationsView, ), @@ -182,7 +171,6 @@ func generateQueries() error { s.KeyDelegationsViewBlobMultihash, s.KeyDelegationsViewIssuer, s.KeyDelegationsViewDelegate, - s.KeyDelegationsViewIssueTime, ), '\n', "FROM", s.KeyDelegationsView, '\n', "WHERE", s.KeyDelegationsViewDelegate, "=", qb.VarCol(s.KeyDelegationsViewDelegate), @@ -198,103 +186,106 @@ func generateQueries() error { ), qb.MakeQuery(s.Schema, "EntitiesInsertOrIgnore", sgen.QueryKindSingle, - "INSERT OR IGNORE INTO", s.HDEntities, qb.ListColShort( - s.HDEntitiesEID, + "INSERT OR IGNORE INTO", s.Lookup, qb.ListColShort( + s.LookupType, + s.LookupValue, ), '\n', "VALUES", qb.List( - qb.VarCol(s.HDEntitiesEID), + storage.LookupResource, + qb.Var("entity_id", sgen.TypeText), ), '\n', - "RETURNING", qb.Results(s.HDEntitiesID), + "RETURNING", qb.Results(qb.ResultColAlias(s.LookupID, "entities_id")), ), qb.MakeQuery(s.Schema, "EntitiesLookupID", sgen.QueryKindSingle, "SELECT", qb.Results( - s.HDEntitiesID, + s.EntitiesID, ), '\n', - "FROM", s.HDEntities, '\n', - "WHERE", s.HDEntitiesEID, "=", qb.VarCol(s.HDEntitiesEID), '\n', + "FROM", s.Entities, '\n', + "WHERE", s.EntitiesEID, "=", qb.Var("entities_eid", sgen.TypeText), '\n', "LIMIT 1", ), qb.MakeQuery(s.Schema, "EntitiesListByPrefix", sgen.QueryKindMany, "SELECT", qb.Results( - s.HDEntitiesID, - s.HDEntitiesEID, + s.EntitiesID, + s.EntitiesEID, ), '\n', - "FROM", s.HDEntities, '\n', - "WHERE", s.HDEntitiesEID, "GLOB", qb.Var("prefix", sgen.TypeText), '\n', - "ORDER BY", s.HDEntitiesID, + "FROM", s.Entities, '\n', + "WHERE", s.EntitiesEID, "GLOB", qb.Var("prefix", sgen.TypeText), '\n', + "ORDER BY", s.EntitiesID, ), qb.MakeQuery(s.Schema, "EntitiesDelete", sgen.QueryKindExec, - "DELETE FROM", s.HDEntities, '\n', - "WHERE", s.HDEntitiesEID, "=", qb.VarCol(s.HDEntitiesEID), + "DELETE FROM", s.Lookup, '\n', + "WHERE", s.LookupType, "=", storage.LookupResource, '\n', + "AND", s.LookupValue, "=", qb.Var("entities_eid", sgen.TypeText), ), qb.MakeQuery(s.Schema, "ChangesInsertOrIgnore", sgen.QueryKindExec, - "INSERT OR IGNORE INTO", s.HDChanges, qb.ListColShort( - s.HDChangesBlob, - s.HDChangesEntity, - s.HDChangesHlcTime, - s.HDChangesAuthor, + "INSERT OR IGNORE INTO", s.Changes, qb.ListColShort( + s.ChangesBlob, + s.ChangesEntity, + s.ChangesHLCTime, + s.ChangesAuthor, ), '\n', "VALUES", qb.List( - qb.VarCol(s.HDChangesBlob), - qb.VarCol(s.HDChangesEntity), - qb.VarCol(s.HDChangesHlcTime), - qb.VarCol(s.HDChangesAuthor), + qb.VarCol(s.ChangesBlob), + qb.VarCol(s.ChangesEntity), + qb.VarCol(s.ChangesHLCTime), + qb.VarCol(s.ChangesAuthor), ), ), qb.MakeQuery(s.Schema, "ChangesListFromChangeSet", sgen.QueryKindMany, "SELECT", qb.Results( - s.HDChangesViewBlobID, - s.HDChangesViewCodec, - s.HDChangesViewData, - s.HDChangesViewEntityID, - s.HDChangesViewHlcTime, - s.HDChangesViewMultihash, - s.HDChangesViewSize, - ), '\n', - "FROM", qb.Concat(s.HDChangesView, ", ", "json_each(", qb.Var("cset", sgen.TypeBytes), ") AS cset"), '\n', - "WHERE", s.HDChangesViewEntity, "=", qb.VarCol(s.HDChangesViewEntity), '\n', - "AND", s.HDChangesViewBlobID, "= cset.value", '\n', - "ORDER BY", s.HDChangesViewHlcTime, + s.ChangesViewBlobID, + s.ChangesViewCodec, + s.ChangesViewData, + s.ChangesViewEntityID, + s.ChangesViewHLCTime, + s.ChangesViewMultihash, + s.ChangesViewSize, + ), '\n', + "FROM", qb.Concat(s.ChangesView, ", ", "json_each(", qb.Var("cset", sgen.TypeBytes), ") AS cset"), '\n', + "WHERE", s.ChangesViewEntity, "=", qb.VarColType(s.ChangesViewEntity, sgen.TypeText), '\n', + "AND", s.ChangesViewBlobID, "= cset.value", '\n', + "ORDER BY", s.ChangesViewHLCTime, ), qb.MakeQuery(s.Schema, "ChangesListForEntity", sgen.QueryKindMany, "SELECT", qb.Results( - s.HDChangesViewBlobID, - s.HDChangesViewCodec, - s.HDChangesViewData, - s.HDChangesViewEntityID, - s.HDChangesViewHlcTime, - s.HDChangesViewMultihash, - s.HDChangesViewSize, - ), '\n', - "FROM", s.HDChangesView, '\n', - "WHERE", s.HDChangesViewEntity, "=", qb.VarCol(s.HDChangesViewEntity), '\n', - "ORDER BY", s.HDChangesViewHlcTime, + s.ChangesViewBlobID, + s.ChangesViewCodec, + s.ChangesViewData, + s.ChangesViewEntityID, + s.ChangesViewHLCTime, + s.ChangesViewMultihash, + s.ChangesViewSize, + ), '\n', + "FROM", s.ChangesView, '\n', + "WHERE", s.ChangesViewEntity, "=", qb.VarColType(s.ChangesViewEntity, sgen.TypeText), '\n', + "ORDER BY", s.ChangesViewHLCTime, ), qb.MakeQuery(s.Schema, "ChangesListPublicNoData", sgen.QueryKindMany, "SELECT", qb.Results( - s.HDChangesViewBlobID, - s.HDChangesViewCodec, - s.HDChangesViewEntityID, - s.HDChangesViewHlcTime, - s.HDChangesViewMultihash, - s.HDChangesViewSize, - s.HDChangesViewEntity, - s.HDDraftsBlob, - ), '\n', - "FROM", s.HDChangesView, '\n', - "LEFT JOIN", s.HDDrafts, "ON", s.HDDraftsEntity, "=", s.HDChangesViewEntityID, '\n', - "WHERE", s.HDDraftsBlob, "IS NULL", '\n', - "ORDER BY", qb.Enumeration(s.HDChangesViewEntity, s.HDChangesViewHlcTime), + s.ChangesViewBlobID, + s.ChangesViewCodec, + s.ChangesViewEntityID, + s.ChangesViewHLCTime, + s.ChangesViewMultihash, + s.ChangesViewSize, + s.ChangesViewEntity, + s.DraftsBlob, + ), '\n', + "FROM", s.ChangesView, '\n', + "LEFT JOIN", s.Drafts, "ON", s.DraftsEntity, "=", s.ChangesViewEntityID, '\n', + "WHERE", s.DraftsBlob, "IS NULL", '\n', + "ORDER BY", qb.Enumeration(s.ChangesViewEntity, s.ChangesViewHLCTime), ), qb.MakeQuery(s.Schema, "ChangesResolveHeads", sgen.QueryKindSingle, "WITH RECURSIVE changeset (change) AS", qb.SubQuery( "SELECT value", "FROM", qb.Concat("json_each(", qb.Var("heads", sgen.TypeBytes), ")"), "UNION", - "SELECT", storage.HDChangeDepsParent, - "FROM", storage.HDChangeDeps, - "JOIN changeset ON changeset.change", "=", storage.HDChangeDepsChild, + "SELECT", storage.ChangeDepsParent, + "FROM", storage.ChangeDeps, + "JOIN changeset ON changeset.change", "=", storage.ChangeDepsChild, ), '\n', "SELECT", qb.Results( qb.ResultRaw("json_group_array(change) AS resolved_json", "resolved_json", sgen.TypeBytes), @@ -313,16 +304,16 @@ func generateQueries() error { }, SQL: `WITH non_drafts (blob) AS ( - SELECT ` + s.C_HDChangesBlob + ` - FROM ` + s.T_HDChanges + ` - LEFT JOIN ` + s.T_HDDrafts + ` ON ` + s.C_HDDraftsEntity + ` = ` + s.C_HDChangesEntity + ` AND ` + s.C_HDChangesBlob + ` = ` + s.C_HDDraftsBlob + ` - WHERE ` + s.C_HDChangesEntity + ` = :entity - AND ` + s.C_HDDraftsBlob + ` IS NULL + SELECT ` + s.C_ChangesBlob + ` + FROM ` + s.T_Changes + ` + LEFT JOIN ` + s.T_Drafts + ` ON ` + s.C_DraftsEntity + ` = ` + s.C_ChangesEntity + ` AND ` + s.C_ChangesBlob + ` = ` + s.C_DraftsBlob + ` + WHERE ` + s.C_ChangesEntity + ` = :entity + AND ` + s.C_DraftsBlob + ` IS NULL ), deps (blob) AS ( - SELECT DISTINCT ` + s.C_HDChangeDepsParent + ` - FROM ` + s.T_HDChangeDeps + ` - JOIN non_drafts ON non_drafts.blob = ` + s.C_HDChangeDepsChild + ` + SELECT DISTINCT ` + s.C_ChangeDepsParent + ` + FROM ` + s.T_ChangeDeps + ` + JOIN non_drafts ON non_drafts.blob = ` + s.C_ChangeDepsChild + ` ) SELECT json_group_array(blob) AS heads FROM non_drafts @@ -337,30 +328,30 @@ WHERE blob NOT IN deps`, Outputs: []sgen.GoSymbol{ {Name: "Heads", Type: sgen.TypeBytes}, }, - SQL: `SELECT json_group_array(` + s.C_HDChangesBlob + `) AS heads -FROM ` + s.T_HDChanges + ` -JOIN ` + s.T_TrustedAccounts + ` ON ` + s.C_TrustedAccountsID + ` = ` + s.C_HDChangesAuthor + ` -WHERE ` + s.C_HDChangesEntity + ` = :entity`, + SQL: `SELECT json_group_array(` + s.C_ChangesBlob + `) AS heads +FROM ` + s.T_Changes + ` +JOIN ` + s.T_TrustedAccounts + ` ON ` + s.C_TrustedAccountsID + ` = ` + s.C_ChangesAuthor + ` +WHERE ` + s.C_ChangesEntity + ` = :entity`, }, qb.MakeQuery(s.Schema, "ChangesDeleteForEntity", sgen.QueryKindExec, "DELETE FROM", s.Blobs, '\n', "WHERE", s.BlobsID, "IN", qb.SubQuery( - "SELECT", s.HDChangesBlob, - "FROM", s.HDChanges, - "WHERE", s.HDChangesEntity, "=", qb.VarCol(s.HDChangesEntity), + "SELECT", s.ChangesBlob, + "FROM", s.Changes, + "WHERE", s.ChangesEntity, "=", qb.VarCol(s.ChangesEntity), ), ), qb.MakeQuery(s.Schema, "ChangesGetInfo", sgen.QueryKindSingle, "SELECT", qb.Results( - s.HDChangesBlob, - s.HDChangesHlcTime, + s.ChangesBlob, + s.ChangesHLCTime, s.PublicKeysPrincipal, qb.ResultExpr(s.C_TrustedAccountsID+" > 0", "is_trusted", sgen.TypeInt), ), '\n', - "FROM", s.HDChanges, '\n', - "JOIN", s.PublicKeys, "ON", s.PublicKeysID, "=", s.HDChangesAuthor, '\n', - "LEFT JOIN", s.TrustedAccounts, "ON", s.TrustedAccountsID, "=", s.HDChangesAuthor, '\n', - "WHERE", s.HDChangesBlob, "=", qb.VarCol(s.HDChangesBlob), '\n', + "FROM", s.Changes, '\n', + "JOIN", s.PublicKeys, "ON", s.PublicKeysID, "=", s.ChangesAuthor, '\n', + "LEFT JOIN", s.TrustedAccounts, "ON", s.TrustedAccountsID, "=", s.ChangesAuthor, '\n', + "WHERE", s.ChangesBlob, "=", qb.VarCol(s.ChangesBlob), '\n', "LIMIT 1", ), qb.MakeQuery(s.Schema, "ChangesGetDeps", sgen.QueryKindMany, @@ -368,99 +359,87 @@ WHERE ` + s.C_HDChangesEntity + ` = :entity`, s.BlobsCodec, s.BlobsMultihash, ), '\n', - "FROM", s.HDChangeDeps, '\n', - "JOIN", s.Blobs, "ON", s.BlobsID, "=", s.HDChangeDepsParent, '\n', - "WHERE", s.HDChangeDepsChild, "=", qb.VarCol(s.HDChangeDepsChild), + "FROM", s.ChangeDeps, '\n', + "JOIN", s.Blobs, "ON", s.BlobsID, "=", s.ChangeDepsParent, '\n', + "WHERE", s.ChangeDepsChild, "=", qb.VarCol(s.ChangeDepsChild), ), qb.MakeQuery(s.Schema, "ChangesInfoForEntity", sgen.QueryKindMany, "SELECT", qb.Results( s.BlobsCodec, s.BlobsMultihash, - s.HDChangesBlob, - s.HDChangesHlcTime, + s.ChangesBlob, + s.ChangesHLCTime, s.PublicKeysPrincipal, qb.ResultExpr(s.C_TrustedAccountsID+" > 0", "is_trusted", sgen.TypeInt), ), '\n', - "FROM", s.HDChanges, '\n', - "JOIN", s.Blobs, "ON", s.BlobsID, "=", s.HDChangesBlob, '\n', - "JOIN", s.PublicKeys, "ON", s.PublicKeysID, "=", s.HDChangesAuthor, '\n', - "LEFT JOIN", s.TrustedAccounts, "ON", s.TrustedAccountsID, "=", s.HDChangesAuthor, '\n', - "WHERE", s.HDChangesEntity, "=", qb.VarCol(s.HDChangesEntity), + "FROM", s.Changes, '\n', + "JOIN", s.Blobs, "ON", s.BlobsID, "=", s.ChangesBlob, '\n', + "JOIN", s.PublicKeys, "ON", s.PublicKeysID, "=", s.ChangesAuthor, '\n', + "LEFT JOIN", s.TrustedAccounts, "ON", s.TrustedAccountsID, "=", s.ChangesAuthor, '\n', + "WHERE", s.ChangesEntity, "=", qb.VarCol(s.ChangesEntity), ), - qb.MakeQuery(s.Schema, "LinksInsert", sgen.QueryKindExec, - "INSERT OR IGNORE INTO", s.HDLinks, qb.ListColShort( - s.HDLinksSourceBlob, - s.HDLinksRel, - s.HDLinksTargetBlob, - s.HDLinksTargetEntity, - s.HDLinksData, - ), '\n', - "VALUES", qb.List( - qb.VarCol(s.HDLinksSourceBlob), - qb.VarCol(s.HDLinksRel), - qb.Concat("NULLIF(", qb.VarCol(s.HDLinksTargetBlob), ", 0)"), - qb.Concat("NULLIF(", qb.VarCol(s.HDLinksTargetEntity), ", 0)"), - qb.VarCol(s.HDLinksData), - ), - ), qb.MakeQuery(s.Schema, "BacklinksForEntity", sgen.QueryKindMany, "SELECT", qb.Results( - s.ContentLinksViewData, - s.ContentLinksViewRel, - s.ContentLinksViewSourceBlob, - s.ContentLinksViewSourceBlobCodec, - s.ContentLinksViewSourceBlobMultihash, - s.ContentLinksViewSourceEID, - s.ContentLinksViewSourceEntity, - s.ContentLinksViewTargetEID, - s.ContentLinksViewTargetEntity, - ), '\n', - "FROM", s.ContentLinksView, '\n', - "WHERE", s.ContentLinksViewTargetEID, "=", qb.VarCol(s.ContentLinksViewTargetEID), + s.EntitiesID, + s.EntitiesEID, + s.BlobsCodec, + s.BlobsMultihash, + s.BlobAttrsBlob, + s.BlobAttrsKey, + s.BlobAttrsAnchor, + s.BlobAttrsExtra, + ), '\n', + "FROM", s.BlobAttrs, '\n', + "JOIN", s.Changes, "ON", s.ChangesBlob, "=", s.BlobAttrsBlob, '\n', + "JOIN", s.Entities, "ON", s.EntitiesID, "=", s.ChangesEntity, '\n', + "JOIN", s.Blobs, "ON", s.BlobsID, "=", s.BlobAttrsBlob, '\n', + "WHERE", s.BlobAttrsKey, "GLOB 'href/*'", '\n', + "AND", s.BlobAttrsValuePtr, "IS NOT NULL", '\n', + "AND", s.BlobAttrsValuePtr, "=", qb.VarCol(s.BlobAttrsValuePtr), ), qb.MakeQuery(s.Schema, "DraftsInsert", sgen.QueryKindExec, - "INSERT INTO", s.HDDrafts, qb.ListColShort( - s.HDDraftsEntity, - s.HDDraftsBlob, + "INSERT INTO", s.Drafts, qb.ListColShort( + s.DraftsEntity, + s.DraftsBlob, ), '\n', "VALUES", qb.List( - qb.VarCol(s.HDDraftsEntity), - qb.VarCol(s.HDDraftsBlob), + qb.VarCol(s.DraftsEntity), + qb.VarCol(s.DraftsBlob), ), ), qb.MakeQuery(s.Schema, "DraftsGet", sgen.QueryKindSingle, "SELECT", qb.Results( - s.HDDraftsViewBlobID, - s.HDDraftsViewCodec, - s.HDDraftsViewEntity, - s.HDDraftsViewEntityID, - s.HDDraftsViewMultihash, - ), '\n', - "FROM", s.HDDraftsView, '\n', - "WHERE", s.HDDraftsViewEntity, "=", qb.VarCol(s.HDDraftsViewEntity), + s.DraftsViewBlobID, + s.DraftsViewCodec, + s.DraftsViewEntity, + s.DraftsViewEntityID, + s.DraftsViewMultihash, + ), '\n', + "FROM", s.DraftsView, '\n', + "WHERE", s.DraftsViewEntity, "=", qb.VarColType(s.DraftsViewEntity, sgen.TypeText), '\n', "LIMIT 1", ), qb.MakeQuery(s.Schema, "DraftsDelete", sgen.QueryKindExec, - "DELETE FROM", s.HDDrafts, '\n', - "WHERE", s.HDDraftsBlob, "=", qb.VarCol(s.HDDraftsBlob), + "DELETE FROM", s.Drafts, '\n', + "WHERE", s.DraftsBlob, "=", qb.VarCol(s.DraftsBlob), ), qb.MakeQuery(s.Schema, "SetReindexTime", sgen.QueryKindExec, - "INSERT OR REPLACE INTO", s.GlobalMeta, qb.ListColShort( - s.GlobalMetaKey, - s.GlobalMetaValue, + "INSERT OR REPLACE INTO", s.KV, qb.ListColShort( + s.KVKey, + s.KVValue, ), '\n', "VALUES", qb.List( "'last_reindex_time'", - qb.VarCol(s.GlobalMetaValue), + qb.VarCol(s.KVValue), ), '\n', ), qb.MakeQuery(s.Schema, "GetReindexTime", sgen.QueryKindSingle, - "SELECT", qb.Results(s.GlobalMetaValue), '\n', - "FROM", s.GlobalMeta, '\n', - "WHERE", s.GlobalMetaKey, "= 'last_reindex_time'", '\n', + "SELECT", qb.Results(s.KVValue), '\n', + "FROM", s.KV, '\n', + "WHERE", s.KVKey, "= 'last_reindex_time'", '\n', "LIMIT 1", ), ) diff --git a/backend/hyper/hypersql/queries.manual.go b/backend/hyper/hypersql/queries.manual.go new file mode 100644 index 0000000000..298c9dd66b --- /dev/null +++ b/backend/hyper/hypersql/queries.manual.go @@ -0,0 +1,160 @@ +package hypersql + +import ( + "fmt" + "mintter/backend/daemon/storage" + + "crawshaw.io/sqlite" + "crawshaw.io/sqlite/sqlitex" +) + +// LookupInsert is a query to insert lookup values. +func LookupInsert(conn *sqlite.Conn, ltype int, value any) (id int64, err error) { + const q = `INSERT INTO lookup (type, value) VALUES (?, ?) RETURNING id;` + + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + id = stmt.ColumnInt64(0) + return nil + }, ltype, value); err != nil { + return 0, err + } + + if id == 0 { + return 0, fmt.Errorf("failed to insert lookup value") + } + + return id, nil +} + +// LookupGet find the ID of the lookup value. +func LookupGet(conn *sqlite.Conn, ltype int, value any) (id int64, err error) { + const q = "SELECT id FROM lookup WHERE type = ? AND value = ?;" + + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + id = stmt.ColumnInt64(0) + return nil + }, ltype, value); err != nil { + return 0, err + } + + return id, nil +} + +// LookupEnsure makes sure lookup value exists and returns its ID. +func LookupEnsure(conn *sqlite.Conn, ltype int, value any) (id int64, err error) { + id, err = LookupGet(conn, ltype, value) + if err != nil { + return 0, fmt.Errorf("failed to get lookup value: %w", err) + } + + if id != 0 { + return id, nil + } + + return LookupInsert(conn, ltype, value) +} + +var qAttrInsert = `INSERT INTO ` + storage.BlobAttrs.String() + ` ( + ` + storage.BlobAttrsBlob.ShortName() + `, + ` + storage.BlobAttrsKey.ShortName() + `, + ` + storage.BlobAttrsAnchor.ShortName() + `, + ` + storage.BlobAttrsIsLookup.ShortName() + `, + ` + storage.BlobAttrsValue.ShortName() + `, + ` + storage.BlobAttrsExtra.ShortName() + `, + ` + storage.BlobAttrsTs.ShortName() + ` +) VALUES (?, ?, ?, ?, ?, ?, ?);` + +// BlobAttrsInsert inserts blob attribute. +func BlobAttrsInsert(conn *sqlite.Conn, blob int64, key, anchor string, isLookup bool, value, extra any, ts int64) error { + return sqlitex.Exec(conn, qAttrInsert, nil, blob, key, anchor, isLookup, value, extra, ts) +} + +// IsResourceOwner checks if the account is the owner of the resource. +func IsResourceOwner(conn *sqlite.Conn, resource, account int64) (bool, error) { + owner, err := ResourceGetOwner(conn, resource) + if err != nil { + return false, err + } + + return account == owner, nil +} + +// ResourceGetOwner returns the owner of the resource. +func ResourceGetOwner(conn *sqlite.Conn, resource int64) (int64, error) { + const q = ` + SELECT + blob_attrs.value_ptr + FROM changes + JOIN blob_attrs ON blob_attrs.blob = changes.blob + WHERE changes.entity = :entity + AND blob_attrs.key = 'resource/owner' + AND blob_attrs.value_ptr IS NOT NULL` + + var owner int64 + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + if owner != 0 { + return fmt.Errorf("more than one owner resource owner found") + } + owner = stmt.ColumnInt64(0) + return nil + }, resource); err != nil { + return 0, err + } + + if owner == 0 { + return 0, fmt.Errorf("resource not found or has no owner") + } + + return owner, nil +} + +// GroupListMembers return the list of group members. +func GroupListMembers(conn *sqlite.Conn, resource, owner int64, fn func(principal []byte, role int64) error) error { + const q = ` + SELECT + lookup.value AS principal, + blob_attrs.extra AS role + FROM changes + JOIN blob_attrs ON blob_attrs.blob = changes.blob + JOIN lookup ON lookup.id = blob_attrs.value_ptr + WHERE changes.entity = :entity + AND changes.author = :owner + AND blob_attrs.key = 'group/member' + AND blob_attrs.value_ptr IS NOT NULL + ` + + return sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + principal := stmt.ColumnBytes(0) + role := stmt.ColumnInt64(1) + return fn(principal, role) + }, resource, owner) +} + +// GroupGetRole returns the role of the member in the group. +func GroupGetRole(conn *sqlite.Conn, resource, owner, member int64) (int64, error) { + const q = ` + SELECT + blob_attrs.extra AS role + FROM changes + JOIN blob_attrs ON blob_attrs.blob = changes.blob + JOIN lookup ON lookup.id = blob_attrs.value_ptr + WHERE changes.entity = :entity + AND changes.author = :owner + AND blob_attrs.key = 'group/member' + AND blob_attrs.value_ptr IS NOT NULL + AND blob_attrs.value_ptr = :member + ORDER BY blob_attrs.ts DESC + LIMIT 1 + ` + + var role int64 + + if err := sqlitex.Exec(conn, q, func(stmt *sqlite.Stmt) error { + role = stmt.ColumnInt64(0) + return nil + }); err != nil { + return 0, err + } + + return role, nil +} diff --git a/backend/hyper/indexing.go b/backend/hyper/indexing.go index 23cc24168f..de9fc75303 100644 --- a/backend/hyper/indexing.go +++ b/backend/hyper/indexing.go @@ -1,13 +1,16 @@ package hyper import ( + "bytes" "context" "encoding/json" "fmt" "mintter/backend/core" "mintter/backend/daemon/storage" documents "mintter/backend/genproto/documents/v1alpha" + groups "mintter/backend/genproto/groups/v1alpha" "mintter/backend/hyper/hypersql" + "mintter/backend/ipfs" "net/url" "time" @@ -41,10 +44,9 @@ func (bs *Storage) reindex(conn *sqlite.Conn) (err error) { // Order is important to ensure foreign key constraints are not violated. derivedTables := []string{ - storage.T_HDLinks, - storage.T_KeyDelegations, - storage.T_HDChanges, - storage.T_HDEntities, + storage.T_Changes, + storage.T_BlobLinks, + storage.T_BlobAttrs, } const q = "SELECT * FROM " + storage.T_Blobs @@ -109,7 +111,7 @@ func (bs *Storage) MaybeReindex(ctx context.Context) error { return err } - if res.GlobalMetaValue == "" { + if res.KVValue == "" { return bs.reindex(conn) } @@ -122,29 +124,47 @@ func (bs *Storage) MaybeReindex(ctx context.Context) error { func (bs *Storage) indexBlob(conn *sqlite.Conn, id int64, blob Blob) error { switch v := blob.Decoded.(type) { case KeyDelegation: - iss, err := bs.ensurePublicKey(conn, v.Issuer) + // Validate key delegation. + { + if v.Purpose != DelegationPurposeRegistration { + return fmt.Errorf("unknown key delegation purpose %q", v.Purpose) + } + + if _, err := v.Issuer.Libp2pKey(); err != nil { + return fmt.Errorf("key delegation issuer is not a valid libp2p public key: %w", err) + } + + if _, err := v.Delegate.Libp2pKey(); err != nil { + return fmt.Errorf("key delegation delegate is not a valid libp2p public key: %w", err) + } + } + + iss, err := hypersql.LookupEnsure(conn, storage.LookupPublicKey, v.Issuer) if err != nil { return err } - del, err := bs.ensurePublicKey(conn, v.Delegate) + del, err := hypersql.LookupEnsure(conn, storage.LookupPublicKey, v.Delegate) if err != nil { return err } - if v.Purpose != DelegationPurposeRegistration { - bs.log.Warn("UnknownKeyDelegationPurpose", zap.String("purpose", v.Purpose)) - } else { - _, err := hypersql.EntitiesInsertOrIgnore(conn, "hd://a/"+v.Issuer.String()) - if err != nil { - return err - } + // We know issuer is an account when delegation purpose is registration. + accEntity := EntityID("hm://a/" + v.Issuer.String()) + if _, err := hypersql.LookupEnsure(conn, storage.LookupResource, accEntity); err != nil { + return err } - if _, err := hypersql.KeyDelegationsInsertOrIgnore(conn, id, iss, del, v.IssueTime.Unix()); err != nil { + if err := hypersql.BlobAttrsInsert(conn, id, "kd/issuer", "", true, iss, nil, 0); err != nil { + return err + } + + if err := hypersql.BlobAttrsInsert(conn, id, "kd/delegate", "", true, del, nil, 0); err != nil { return err } case Change: + // TODO(burdiyan): ensure there's only one change that brings an entity into life. + iss, err := hypersql.KeyDelegationsGetIssuer(conn, v.Delegation.Hash()) if err != nil { return err @@ -173,23 +193,48 @@ func (bs *Storage) indexBlob(conn *sqlite.Conn, id int64, blob Blob) error { } } + // TODO(burdiyan): remove this when all the tests are fixed. Sometimes CBOR codec decodes into + // different types that what was encoded, and we might not have accounted for that during indexing. + // So we re-encode the patch here to make sure. + // This is of course very wasteful. + { + data, err := cbornode.DumpObject(v.Patch) + if err != nil { + return err + } + v.Patch = nil + + if err := cbornode.DecodeInto(data, &v.Patch); err != nil { + return err + } + } + + isspk, err := hypersql.PublicKeysLookupPrincipal(conn, iss.KeyDelegationsIssuer) + if err != nil { + return err + } + // ensure entity eid, err := bs.ensureEntity(conn, v.Entity) if err != nil { return err } + if err := hypersql.BlobAttrsInsert(conn, id, "resource/id", "", true, eid, nil, v.HLCTime.Pack()); err != nil { + return err + } + for _, dep := range v.Deps { res, err := hypersql.BlobsGetSize(conn, dep.Hash()) if err != nil { return err } - if res.BlobsSize < 0 { + if res.BlobsSize < 0 || res.BlobsID == 0 { return fmt.Errorf("missing causal dependency %s of change %s", dep, blob.CID) } - if err := hypersql.LinksInsert(conn, id, "change:depends", res.BlobsID, 0, nil); err != nil { - return fmt.Errorf("failed to link dependency %s of change %s", dep, blob.CID) + if err := hypersql.BlobLinksInsertOrIgnore(conn, id, "change/dep", res.BlobsID); err != nil { + return fmt.Errorf("failed to link dependency %s of change %s: %w", dep, blob.CID, err) } } @@ -197,8 +242,12 @@ func (bs *Storage) indexBlob(conn *sqlite.Conn, id int64, blob Blob) error { return err } - if err := bs.indexLinks(conn, id, blob.CID, v); err != nil { - return err + if v.Entity.HasPrefix("hm://d/") { + return bs.indexDocumentChange(conn, id, isspk.PublicKeysPrincipal, blob.CID, v) + } + + if v.Entity.HasPrefix("hm://g/") { + return bs.indexGroupChange(conn, id, isspk.PublicKeysPrincipal, blob.CID, v) } } @@ -210,19 +259,19 @@ func (bs *Storage) ensureEntity(conn *sqlite.Conn, eid EntityID) (int64, error) if err != nil { return 0, err } - if look.HDEntitiesID != 0 { - return look.HDEntitiesID, nil + if look.EntitiesID != 0 { + return look.EntitiesID, nil } ins, err := hypersql.EntitiesInsertOrIgnore(conn, string(eid)) if err != nil { return 0, err } - if ins.HDEntitiesID == 0 { + if ins.EntitiesID == 0 { return 0, fmt.Errorf("failed to insert entity for some reason") } - return ins.HDEntitiesID, nil + return ins.EntitiesID, nil } func (bs *Storage) ensurePublicKey(conn *sqlite.Conn, key core.Principal) (int64, error) { @@ -247,55 +296,244 @@ func (bs *Storage) ensurePublicKey(conn *sqlite.Conn, key core.Principal) (int64 return ins.PublicKeysID, nil } -func (bs *Storage) indexLinks(conn *sqlite.Conn, blobID int64, c cid.Cid, ch Change) error { - if !ch.Entity.HasPrefix("hd://d/") { - return nil - } +func (bs *Storage) indexGroupChange(conn *sqlite.Conn, blobID int64, author core.Principal, c cid.Cid, ch Change) error { + hlc := ch.HLCTime.Pack() - blocks, ok := ch.Patch["blocks"].(map[string]any) - if !ok { - return nil - } + // Validate group change. + { + if ch.Patch == nil { + return fmt.Errorf("group change must have a patch") + } - handleURL := func(sourceBlockID, linkType, rawURL string) error { - if rawURL == "" { - return nil + pkdb, err := hypersql.LookupEnsure(conn, storage.LookupPublicKey, author) + if err != nil { + return err } - u, err := url.Parse(rawURL) + edb, err := hypersql.LookupEnsure(conn, storage.LookupResource, ch.Entity) if err != nil { - bs.log.Warn("FailedToParseURL", zap.String("url", rawURL), zap.Error(err)) - return nil + return err } - switch u.Scheme { - case "hd": - ld := LinkData{ - SourceBlock: sourceBlockID, - TargetFragment: u.Fragment, - TargetVersion: u.Query().Get("v"), + switch ch.Action { + case "Create": + if len(ch.Deps) != 0 { + return fmt.Errorf("group change with Create action must have no deps, got = %d", len(ch.Deps)) + } + + nonce, ok := ch.Patch["nonce"].([]byte) + if !ok { + return fmt.Errorf("change that creates a group must have a nonce to verify the ID") + } + + ct, ok := ch.Patch["createTime"].(int) + if !ok { + return fmt.Errorf("change that creates a group must have a createTime field in its patch") + } + + ownerField, ok := ch.Patch["owner"].([]byte) + if !ok { + return fmt.Errorf("change that creates a group must have an owner field in its patch") + } + + if !bytes.Equal(ownerField, author) { + return fmt.Errorf("owner field in the create change must correspond with the author of the change") + } + + id, _ := NewUnforgeableID(ownerField, nonce, int64(ct)) + if ch.Entity.TrimPrefix("hm://g/") != id { + return fmt.Errorf("failed to verify group ID %s with a nonce", ch.Entity) + } + + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/owner", "", true, pkdb, nil, hlc); err != nil { + return err + } + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/create-time", "", false, ct, nil, 0); err != nil { + return err + } + + // Convenience attribute to include owner as a member. + if err := hypersql.BlobAttrsInsert(conn, blobID, "group/member", "", true, pkdb, int(groups.Role_OWNER), hlc); err != nil { + return err + } + case "Update": + if len(ch.Deps) == 0 { + return fmt.Errorf("group change with Update action must have at least one dep") + } + + if ch.Patch["nonce"] != nil { + return fmt.Errorf("update change must not have nonce set") + } + + if ch.Patch["owner"] != nil { + return fmt.Errorf("update change must not have owner field") } - target := EntityID("hd://" + u.Host + u.Path) - rel := "href:" + linkType + if ch.Patch["createTime"] != nil { + return fmt.Errorf("update change must not have createTime field") + } - targetID, err := bs.ensureEntity(conn, target) + owner, err := hypersql.ResourceGetOwner(conn, edb) if err != nil { return err } - ldjson, err := json.Marshal(ld) + isOwner := owner == pkdb + + // We only care about role if we're not an owner. + var role int64 + if !isOwner { + role, err = hypersql.GroupGetRole(conn, edb, owner, pkdb) + if err != nil { + return err + } + } + + if !isOwner && role == 0 { + return fmt.Errorf("group change author is not allowed to edit the group") + } + + if ch.Patch["members"] != nil && !isOwner { + return fmt.Errorf("group members can only be updated by an owner") + } + default: + return fmt.Errorf("unknown group action %q", ch.Action) + } + } + + title, ok := ch.Patch["title"].(string) + if ok { + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/title", "", false, title, nil, hlc); err != nil { + return err + } + } + + desc, ok := ch.Patch["description"].(string) + if ok { + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/description", "", false, desc, nil, hlc); err != nil { + return err + } + } + + content, ok := ch.Patch["content"].(map[string]any) + if ok { + for path, v := range content { + rawURL, ok := v.(string) + if !ok { + bs.log.Warn("Group content value is not string", zap.Any("value", v), zap.String("path", path)) + continue + } + + if err := bs.indexURL(conn, blobID, "group/content", path, rawURL, hlc); err != nil { + return err + } + } + } + + members, ok := ch.Patch["members"].(map[string]any) + if ok { + for k, v := range members { + acc, err := core.DecodePrincipal(k) if err != nil { - return fmt.Errorf("failed to encode link data: %w", err) + return fmt.Errorf("failed to parse group member as principal: %w", err) + } + + role, ok := v.(int) + if !ok { + return fmt.Errorf("member must have valid role") + } + + if role == int(groups.Role_OWNER) { + return fmt.Errorf("owner role can't be used in updates") + } + + accid, err := bs.ensurePublicKey(conn, acc) + if err != nil { + return err } - if err := hypersql.LinksInsert(conn, blobID, rel, 0, targetID, ldjson); err != nil { + if err := hypersql.BlobAttrsInsert(conn, blobID, "group/member", "", true, accid, role, hlc); err != nil { return err } - case "ipfs": - // TODO: parse ipfs links } + } + + return nil +} + +func (bs *Storage) indexDocumentChange(conn *sqlite.Conn, blobID int64, author core.Principal, c cid.Cid, ch Change) error { + hlc := ch.HLCTime.Pack() + + // Validate document change. + { + if ch.Patch == nil { + return fmt.Errorf("document change must have a patch") + } + + pkdb, err := hypersql.LookupEnsure(conn, storage.LookupPublicKey, author) + if err != nil { + return err + } + + switch ch.Action { + case "Create": + if len(ch.Deps) != 0 { + return fmt.Errorf("document change with Create action must have no deps, got = %d", len(ch.Deps)) + } + + nonce, ok := ch.Patch["nonce"].([]byte) + if !ok { + return fmt.Errorf("change that creates a document must have a nonce to verify the ID") + } + + ct, ok := ch.Patch["createTime"].(int) + if !ok { + return fmt.Errorf("change that creates a document must have a createTime field in its patch") + } + + ownerField, ok := ch.Patch["owner"].([]byte) + if !ok { + return fmt.Errorf("change that creates a document must have an owner field in its patch") + } + + if !bytes.Equal(ownerField, author) { + return fmt.Errorf("owner field in the create change must correspond with the author of the change") + } + + id, _ := NewUnforgeableID(ownerField, nonce, int64(ct)) + if ch.Entity.TrimPrefix("hm://d/") != id { + return fmt.Errorf("failed to verify document ID %s with a nonce", ch.Entity) + } + + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/owner", "", true, pkdb, nil, hlc); err != nil { + return err + } + if err := hypersql.BlobAttrsInsert(conn, blobID, "resource/create-time", "", false, ct, nil, 0); err != nil { + return err + } + case "Update": + if len(ch.Deps) == 0 { + return fmt.Errorf("group change with Update action must have at least one dep") + } + + if ch.Patch["nonce"] != nil { + return fmt.Errorf("update change must not have nonce set") + } + + if ch.Patch["owner"] != nil { + return fmt.Errorf("update change must not have owner field") + } + if ch.Patch["createTime"] != nil { + return fmt.Errorf("update change must not have createTime field") + } + default: + return fmt.Errorf("unknown document change action %q", ch.Action) + } + } + + blocks, ok := ch.Patch["blocks"].(map[string]any) + if !ok { return nil } @@ -317,17 +555,17 @@ func (bs *Storage) indexLinks(conn *sqlite.Conn, blobID int64, c cid.Cid, ch Cha blk.Id = id blk.Revision = c.String() - if err := handleURL(blk.Id, blk.Type, blk.Ref); err != nil { + if err := bs.indexURL(conn, blobID, "href/"+blk.Type, blk.Id, blk.Ref, hlc); err != nil { return err } for _, ann := range blk.Annotations { - if err := handleURL(blk.Id, ann.Type, ann.Ref); err != nil { + if err := bs.indexURL(conn, blobID, "href/"+ann.Type, blk.Id, ann.Ref, hlc); err != nil { return err } // Legacy behavior. We only care about annotations with URL attribute. - if err := handleURL(blk.Id, ann.Type, ann.Attributes["url"]); err != nil { + if err := bs.indexURL(conn, blobID, "href/"+ann.Type, blk.Id, ann.Attributes["url"], hlc); err != nil { return err } } @@ -337,7 +575,78 @@ func (bs *Storage) indexLinks(conn *sqlite.Conn, blobID int64, c cid.Cid, ch Cha } type LinkData struct { - SourceBlock string `json:"b,omitempty"` TargetFragment string `json:"f,omitempty"` TargetVersion string `json:"v,omitempty"` } + +func (bs *Storage) indexURL(conn *sqlite.Conn, blobID int64, key, anchor, rawURL string, ts int64) error { + if rawURL == "" { + return nil + } + + u, err := url.Parse(rawURL) + if err != nil { + bs.log.Warn("FailedToParseURL", zap.String("url", rawURL), zap.Error(err)) + return nil + } + + switch u.Scheme { + case "hm": + ld := LinkData{ + TargetFragment: u.Fragment, + TargetVersion: u.Query().Get("v"), + } + + target := EntityID("hm://" + u.Host + u.Path) + + targetID, err := bs.ensureEntity(conn, target) + if err != nil { + return err + } + + ldjson, err := json.Marshal(ld) + if err != nil { + return fmt.Errorf("failed to encode link data: %w", err) + } + + if err := hypersql.BlobAttrsInsert(conn, + blobID, + key, + anchor, + true, + targetID, + ldjson, + ts, + ); err != nil { + return err + } + + vblobs, err := Version(ld.TargetVersion).Parse() + if err != nil { + return err + } + + for _, vcid := range vblobs { + codec, hash := ipfs.DecodeCID(vcid) + + res, err := hypersql.BlobsGetSize(conn, hash) + if err != nil { + return err + } + if res.BlobsID == 0 { + r, err := hypersql.BlobsInsert(conn, 0, hash, int64(codec), nil, -1) + if err != nil { + return err + } + res.BlobsID = r.BlobsID + } + if err := hypersql.BlobLinksInsertOrIgnore(conn, blobID, key, res.BlobsID); err != nil { + return err + } + } + case "ipfs": + // TODO: parse ipfs links + } + + return nil +} diff --git a/backend/hyper/terra.go b/backend/hyper/terra.go index f626f239be..4d16a84c79 100644 --- a/backend/hyper/terra.go +++ b/backend/hyper/terra.go @@ -38,8 +38,8 @@ func init() { // Available types. const ( - TypeKeyDelegation BlobType = "hyperdocs:KeyDelegation" - TypeChange BlobType = "hyperdocs:Change" + TypeKeyDelegation BlobType = "KeyDelegation" + TypeChange BlobType = "Change" ) // Delegation purposes. @@ -132,6 +132,9 @@ type Change struct { // on which behalf this blob is signed. Delegation cid.Cid `refmt:"delegation,omitempty"` // points to the delegation where we can get the account id + // Action is an option machine-readable description of an action that Change describes. + Action string `refmt:"action,omitempty"` + // Message is an optional human readable message. Message string `refmt:"message,omitempty"` diff --git a/backend/lndhub/lndhubsql/lndhub.go b/backend/lndhub/lndhubsql/lndhub.go index 52c368cf62..e08d25a008 100644 --- a/backend/lndhub/lndhubsql/lndhub.go +++ b/backend/lndhub/lndhubsql/lndhub.go @@ -81,8 +81,8 @@ func SetLoginSignature(conn *sqlite.Conn, signature string) error { // signed login message to access to account settings in lndhub.go. func GetLoginSignature(conn *sqlite.Conn) (string, error) { res, err := getLoginSignature(conn, LoginSignatureKey) - if err == nil && res.GlobalMetaValue == "" { + if err == nil && res.KVValue == "" { return "", fmt.Errorf("Could not find any signature associated with self node: %w", ErrEmptyResult) } - return res.GlobalMetaValue, err + return res.KVValue, err } diff --git a/backend/lndhub/lndhubsql/queries.gen.go b/backend/lndhub/lndhubsql/queries.gen.go index a49a24721b..d2e60df8f0 100644 --- a/backend/lndhub/lndhubsql/queries.gen.go +++ b/backend/lndhub/lndhubsql/queries.gen.go @@ -153,13 +153,13 @@ func setToken(conn *sqlite.Conn, walletsToken []byte, walletsID string) error { return err } -func setLoginSignature(conn *sqlite.Conn, globalMetaKey string, globalMetaValue string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) -VALUES (:globalMetaKey, :globalMetaValue)` +func setLoginSignature(conn *sqlite.Conn, kvKey string, kvValue string) error { + const query = `INSERT OR REPLACE INTO kv (key, value) +VALUES (:kvKey, :kvValue)` before := func(stmt *sqlite.Stmt) { - stmt.SetText(":globalMetaKey", globalMetaKey) - stmt.SetText(":globalMetaValue", globalMetaValue) + stmt.SetText(":kvKey", kvKey) + stmt.SetText(":kvValue", kvValue) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -175,16 +175,16 @@ VALUES (:globalMetaKey, :globalMetaValue)` } type getLoginSignatureResult struct { - GlobalMetaValue string + KVValue string } -func getLoginSignature(conn *sqlite.Conn, globalMetaKey string) (getLoginSignatureResult, error) { - const query = `SELECT global_meta.value FROM global_meta WHERE global_meta.key = :globalMetaKey` +func getLoginSignature(conn *sqlite.Conn, kvKey string) (getLoginSignatureResult, error) { + const query = `SELECT kv.value FROM kv WHERE kv.key = :kvKey` var out getLoginSignatureResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":globalMetaKey", globalMetaKey) + stmt.SetText(":kvKey", kvKey) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -192,7 +192,7 @@ func getLoginSignature(conn *sqlite.Conn, globalMetaKey string) (getLoginSignatu return errors.New("getLoginSignature: more than one result return for a single-kind query") } - out.GlobalMetaValue = stmt.ColumnText(0) + out.KVValue = stmt.ColumnText(0) return nil } diff --git a/backend/lndhub/lndhubsql/queries.gensum b/backend/lndhub/lndhubsql/queries.gensum index a8c0f0db66..70a3197366 100644 --- a/backend/lndhub/lndhubsql/queries.gensum +++ b/backend/lndhub/lndhubsql/queries.gensum @@ -1,2 +1,2 @@ -srcs: 7f2b99ad612885bb1cfbc9e87c9a541e -outs: a6a975d609a045cee9dfaee73cfc4b70 +srcs: 9082e81fac69c5d483e9205709bd9c02 +outs: daf4133d4620e229ad67c0f83617638c diff --git a/backend/lndhub/lndhubsql/queries.go b/backend/lndhub/lndhubsql/queries.go index dd32fe7969..7b09bad4e1 100644 --- a/backend/lndhub/lndhubsql/queries.go +++ b/backend/lndhub/lndhubsql/queries.go @@ -53,21 +53,21 @@ func generateQueries() error { ") WHERE", storage.WalletsID, "=", qb.VarCol(storage.WalletsID), ), qb.MakeQuery(storage.Schema, "setLoginSignature", sqlitegen.QueryKindExec, - "INSERT OR REPLACE INTO", storage.GlobalMeta, qb.ListColShort( - storage.GlobalMetaKey, - storage.GlobalMetaValue, + "INSERT OR REPLACE INTO", storage.KV, qb.ListColShort( + storage.KVKey, + storage.KVValue, ), qb.Line, "VALUES", qb.List( - qb.VarCol(storage.GlobalMetaKey), - qb.VarCol(storage.GlobalMetaValue), + qb.VarCol(storage.KVKey), + qb.VarCol(storage.KVValue), ), ), qb.MakeQuery(storage.Schema, "getLoginSignature", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(storage.GlobalMetaValue), + qb.ResultCol(storage.KVValue), ), - "FROM", storage.GlobalMeta, - "WHERE", storage.GlobalMetaKey, "=", qb.VarCol(storage.GlobalMetaKey), + "FROM", storage.KV, + "WHERE", storage.KVKey, "=", qb.VarCol(storage.KVKey), ), ) if err != nil { diff --git a/backend/mttnet/list_objects.go b/backend/mttnet/list_objects.go index 881fc01646..70bdb464d4 100644 --- a/backend/mttnet/list_objects.go +++ b/backend/mttnet/list_objects.go @@ -62,7 +62,7 @@ func (srv *Server) ListObjects(ctx context.Context, in *p2p.ListObjectsRequest) } for _, l := range list { - eid := hyper.EntityID(l.HDChangesViewEntity) + eid := hyper.EntityID(l.ChangesViewEntity) obj, ok := objs[eid] if !ok { obj = &p2p.Object{ @@ -71,7 +71,7 @@ func (srv *Server) ListObjects(ctx context.Context, in *p2p.ListObjectsRequest) objs[eid] = obj } - c := cid.NewCidV1(uint64(l.HDChangesViewCodec), l.HDChangesViewMultihash) + c := cid.NewCidV1(uint64(l.ChangesViewCodec), l.ChangesViewMultihash) obj.ChangeIds = append(obj.ChangeIds, c.String()) } diff --git a/backend/mttnet/mttnet.go b/backend/mttnet/mttnet.go index 12af008188..acc08fda75 100644 --- a/backend/mttnet/mttnet.go +++ b/backend/mttnet/mttnet.go @@ -46,8 +46,8 @@ import ( // Protocol values. const ( - protocolPrefix = "/hyperdocs/" - protocolVersion = "0.0.6" + protocolPrefix = "/hypermedia/" + protocolVersion = "0.1.0" ProtocolID protocol.ID = protocolPrefix + protocolVersion @@ -194,7 +194,7 @@ func NewServer(ctx context.Context, siteCfg config.Site, node *future.ReadOnly[* if err != nil { return err } - link := currentLink.GlobalMetaValue + link := currentLink.KVValue if link == "" || siteCfg.Hostname != strings.Split(link, "/secret-invite/")[0] { link = siteCfg.Hostname + "/secret-invite/" + base64.RawURLEncoding.EncodeToString(randomBytes) if err := sitesql.SetSiteRegistrationLink(conn, link); err != nil { @@ -210,7 +210,7 @@ func NewServer(ctx context.Context, siteCfg config.Site, node *future.ReadOnly[* return err } - if title.GlobalMetaValue != siteCfg.Title { + if title.KVValue != siteCfg.Title { if err := sitesql.SetSiteTitle(conn, siteCfg.Title); err != nil { return err } diff --git a/backend/mttnet/providing.go b/backend/mttnet/providing.go index 998fa8c423..276e56d253 100644 --- a/backend/mttnet/providing.go +++ b/backend/mttnet/providing.go @@ -70,9 +70,9 @@ FROM ` + storage.T_PublicBlobsView + `;` } for _, e := range entities { - c, err := hyper.EntityID(e.HDEntitiesEID).CID() + c, err := hyper.EntityID(e.EntitiesEID).CID() if err != nil { - log.Warn("BadEntityID", zap.Error(err), zap.String("entity", e.HDEntitiesEID)) + log.Warn("BadEntityID", zap.Error(err), zap.String("entity", e.EntitiesEID)) return } diff --git a/backend/mttnet/site.go b/backend/mttnet/site.go index b8214d644c..41fba9e100 100644 --- a/backend/mttnet/site.go +++ b/backend/mttnet/site.go @@ -252,8 +252,8 @@ func (srv *Server) GetSiteInfo(ctx context.Context, in *site.GetSiteInfoRequest) } return &site.SiteInfo{ Hostname: srv.hostname, - Title: title.GlobalMetaValue, - Description: description.GlobalMetaValue, + Title: title.KVValue, + Description: description.KVValue, Owner: srv.owner.String(), }, nil } @@ -447,12 +447,12 @@ func (srv *Server) PublishDocument(ctx context.Context, in *site.PublishDocument return nil, fmt.Errorf("can't proxy: local p2p node is not ready yet: %w", err) } - docEntity := hyper.EntityID("hd://d/" + in.DocumentId) + docEntity := hyper.EntityID("hm://d/" + in.DocumentId) toSync := []hyper.EntityID{docEntity} for _, ref := range in.ReferencedDocuments { - toSync = append(toSync, hyper.EntityID("hd://d/"+ref.DocumentId)) + toSync = append(toSync, hyper.EntityID("hm://d/"+ref.DocumentId)) } ctx, cancel := context.WithTimeout(ctx, time.Duration(7*time.Second)) @@ -481,10 +481,10 @@ func (srv *Server) PublishDocument(ctx context.Context, in *site.PublishDocument return err } - if record.HDEntitiesID != 0 { - recordEntity := hyper.EntityID(record.HDEntitiesEID) - if !recordEntity.HasPrefix("hd://d/") { - return fmt.Errorf("invalid entity ID for mintter document: %s", record.HDEntitiesEID) + if record.EntitiesID != 0 { + recordEntity := hyper.EntityID(record.EntitiesEID) + if !recordEntity.HasPrefix("hm://d/") { + return fmt.Errorf("invalid entity ID for mintter document: %s", record.EntitiesEID) } if recordEntity == docEntity && record.WebPublicationsVersion == in.Version { @@ -493,7 +493,7 @@ func (srv *Server) PublishDocument(ctx context.Context, in *site.PublishDocument if recordEntity != docEntity { return fmt.Errorf("path %q is already taken by a different entity %q, can't use it for document %q", in.Path, recordEntity, in.DocumentId) } - if err = sitesql.RemoveWebPublicationRecord(conn, record.HDEntitiesEID, record.WebPublicationsVersion); err != nil { + if err = sitesql.RemoveWebPublicationRecord(conn, record.EntitiesEID, record.WebPublicationsVersion); err != nil { return fmt.Errorf("could not remove previous version [%s] in the same path: %w", record.WebPublicationsVersion, err) } } @@ -533,7 +533,7 @@ func (srv *Server) UnpublishDocument(ctx context.Context, in *site.UnpublishDocu } defer cancel() - eid := hyper.EntityID("hd://d/" + in.DocumentId) + eid := hyper.EntityID("hm://d/" + in.DocumentId) records, err := sitesql.GetWebPublicationsByID(conn, string(eid)) if err != nil { @@ -589,9 +589,9 @@ func (srv *Server) ListWebPublications(ctx context.Context, in *site.ListWebPubl } for _, record := range records { - docid := hyper.EntityID(record.HDEntitiesEID).TrimPrefix("hd://d/") - if docid == record.HDEntitiesEID { - return nil, fmt.Errorf("BUG: invalid entity ID %q for a document in web publications", record.HDEntitiesEID) + docid := hyper.EntityID(record.EntitiesEID).TrimPrefix("hm://d/") + if docid == record.EntitiesEID { + return nil, fmt.Errorf("BUG: invalid entity ID %q for a document in web publications", record.EntitiesEID) } if in.DocumentId != "" && in.DocumentId != docid { @@ -638,7 +638,7 @@ func (srv *Server) GetPath(ctx context.Context, in *site.GetPathRequest) (*site. return nil, fmt.Errorf("Could not get record for path [%s]: %w", in.Path, err) } ret, err := srv.localFunctions.GetPublication(ctx, &site.GetPublicationRequest{ - DocumentId: hyper.EntityID(record.HDEntitiesEID).TrimPrefix("hd://d/"), + DocumentId: hyper.EntityID(record.EntitiesEID).TrimPrefix("hm://d/"), Version: record.WebPublicationsVersion, LocalOnly: true, }) diff --git a/backend/mttnet/sitesV2.go b/backend/mttnet/sitesV2.go index 10677db4ee..29b6e14236 100644 --- a/backend/mttnet/sitesV2.go +++ b/backend/mttnet/sitesV2.go @@ -52,7 +52,7 @@ func (srv *Server) CreateSite(ctx context.Context, in *sitesV2.CreateSiteRequest return nil, err } - if link.GlobalMetaValue != in.Link { + if link.KVValue != in.Link { return nil, fmt.Errorf("Provided link not valid") } diff --git a/backend/mttnet/sitesql/queries.gen.go b/backend/mttnet/sitesql/queries.gen.go index b546d5c01b..350540b1df 100644 --- a/backend/mttnet/sitesql/queries.gen.go +++ b/backend/mttnet/sitesql/queries.gen.go @@ -14,7 +14,7 @@ var _ = errors.New func RegisterSite(conn *sqlite.Conn, servedSitesHostname string, group_eid string, servedSitesVersion string, publicKeysPrincipal []byte) error { const query = `INSERT OR REPLACE INTO served_sites (hostname, group_id, version, owner_id) -VALUES (:servedSitesHostname, (SELECT hd_entities.id FROM hd_entities WHERE hd_entities.eid = :group_eid), :servedSitesVersion, (SELECT public_keys.id FROM public_keys WHERE public_keys.principal = :publicKeysPrincipal))` +VALUES (:servedSitesHostname, (SELECT entities.id FROM entities WHERE entities.eid = :group_eid), :servedSitesVersion, (SELECT public_keys.id FROM public_keys WHERE public_keys.principal = :publicKeysPrincipal))` before := func(stmt *sqlite.Stmt) { stmt.SetText(":servedSitesHostname", servedSitesHostname) @@ -36,15 +36,15 @@ VALUES (:servedSitesHostname, (SELECT hd_entities.id FROM hd_entities WHERE hd_e } type GetSiteInfoResult struct { - HDEntitiesEID string + EntitiesEID string ServedSitesVersion string PublicKeysPrincipal []byte } func GetSiteInfo(conn *sqlite.Conn, servedSitesHostname string) (GetSiteInfoResult, error) { - const query = `SELECT hd_entities.eid, served_sites.version, public_keys.principal + const query = `SELECT entities.eid, served_sites.version, public_keys.principal FROM served_sites -JOIN hd_entities ON hd_entities.id = served_sites.group_id +JOIN entities ON entities.id = served_sites.group_id JOIN public_keys ON public_keys.principal = served_sites.owner_id WHERE served_sites.hostname = :servedSitesHostname` @@ -59,7 +59,7 @@ WHERE served_sites.hostname = :servedSitesHostname` return errors.New("GetSiteInfo: more than one result return for a single-kind query") } - out.HDEntitiesEID = stmt.ColumnText(0) + out.EntitiesEID = stmt.ColumnText(0) out.ServedSitesVersion = stmt.ColumnText(1) out.PublicKeysPrincipal = stmt.ColumnBytes(2) return nil @@ -191,7 +191,7 @@ JOIN public_keys ON public_keys.id = sites.account_id` } func SetSiteRegistrationLink(conn *sqlite.Conn, link string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) + const query = `INSERT OR REPLACE INTO kv (key, value) VALUES ('site_registration_link', :link)` before := func(stmt *sqlite.Stmt) { @@ -211,11 +211,11 @@ VALUES ('site_registration_link', :link)` } type GetSiteRegistrationLinkResult struct { - GlobalMetaValue string + KVValue string } func GetSiteRegistrationLink(conn *sqlite.Conn) (GetSiteRegistrationLinkResult, error) { - const query = `SELECT global_meta.value FROM global_meta WHERE global_meta.key ='site_registration_link'` + const query = `SELECT kv.value FROM kv WHERE kv.key ='site_registration_link'` var out GetSiteRegistrationLinkResult @@ -227,7 +227,7 @@ func GetSiteRegistrationLink(conn *sqlite.Conn) (GetSiteRegistrationLinkResult, return errors.New("GetSiteRegistrationLink: more than one result return for a single-kind query") } - out.GlobalMetaValue = stmt.ColumnText(0) + out.KVValue = stmt.ColumnText(0) return nil } @@ -240,7 +240,7 @@ func GetSiteRegistrationLink(conn *sqlite.Conn) (GetSiteRegistrationLinkResult, } func SetSiteTitle(conn *sqlite.Conn, title string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) + const query = `INSERT OR REPLACE INTO kv (key, value) VALUES ('site_title', :title)` before := func(stmt *sqlite.Stmt) { @@ -260,11 +260,11 @@ VALUES ('site_title', :title)` } type GetSiteTitleResult struct { - GlobalMetaValue string + KVValue string } func GetSiteTitle(conn *sqlite.Conn) (GetSiteTitleResult, error) { - const query = `SELECT global_meta.value FROM global_meta WHERE global_meta.key ='site_title'` + const query = `SELECT kv.value FROM kv WHERE kv.key ='site_title'` var out GetSiteTitleResult @@ -276,7 +276,7 @@ func GetSiteTitle(conn *sqlite.Conn) (GetSiteTitleResult, error) { return errors.New("GetSiteTitle: more than one result return for a single-kind query") } - out.GlobalMetaValue = stmt.ColumnText(0) + out.KVValue = stmt.ColumnText(0) return nil } @@ -289,7 +289,7 @@ func GetSiteTitle(conn *sqlite.Conn) (GetSiteTitleResult, error) { } func SetSiteDescription(conn *sqlite.Conn, description string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) + const query = `INSERT OR REPLACE INTO kv (key, value) VALUES ('site_description', :description)` before := func(stmt *sqlite.Stmt) { @@ -309,11 +309,11 @@ VALUES ('site_description', :description)` } type GetSiteDescriptionResult struct { - GlobalMetaValue string + KVValue string } func GetSiteDescription(conn *sqlite.Conn) (GetSiteDescriptionResult, error) { - const query = `SELECT global_meta.value FROM global_meta WHERE global_meta.key ='site_description'` + const query = `SELECT kv.value FROM kv WHERE kv.key ='site_description'` var out GetSiteDescriptionResult @@ -325,7 +325,7 @@ func GetSiteDescription(conn *sqlite.Conn) (GetSiteDescriptionResult, error) { return errors.New("GetSiteDescription: more than one result return for a single-kind query") } - out.GlobalMetaValue = stmt.ColumnText(0) + out.KVValue = stmt.ColumnText(0) return nil } @@ -567,11 +567,11 @@ VALUES (:webPublicationsEID, :webPublicationsVersion, :webPublicationsPath)` return err } -func RemoveWebPublicationRecord(conn *sqlite.Conn, hdEntitiesEID string, webPublicationsVersion string) error { - const query = `DELETE FROM web_publications WHERE web_publications.eid = :hdEntitiesEID AND web_publications.version = :webPublicationsVersion` +func RemoveWebPublicationRecord(conn *sqlite.Conn, entitiesEID string, webPublicationsVersion string) error { + const query = `DELETE FROM web_publications WHERE web_publications.eid = :entitiesEID AND web_publications.version = :webPublicationsVersion` before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdEntitiesEID", hdEntitiesEID) + stmt.SetText(":entitiesEID", entitiesEID) stmt.SetText(":webPublicationsVersion", webPublicationsVersion) } @@ -588,16 +588,16 @@ func RemoveWebPublicationRecord(conn *sqlite.Conn, hdEntitiesEID string, webPubl } type ListWebPublicationsResult struct { - HDEntitiesID int64 - HDEntitiesEID string + EntitiesID int64 + EntitiesEID string WebPublicationsVersion string WebPublicationsPath string } func ListWebPublications(conn *sqlite.Conn) ([]ListWebPublicationsResult, error) { - const query = `SELECT hd_entities.id, hd_entities.eid, web_publications.version, web_publications.path + const query = `SELECT entities.id, entities.eid, web_publications.version, web_publications.path FROM web_publications -JOIN hd_entities ON web_publications.eid = hd_entities.eid` +JOIN entities ON web_publications.eid = entities.eid` var out []ListWebPublicationsResult @@ -606,8 +606,8 @@ JOIN hd_entities ON web_publications.eid = hd_entities.eid` onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, ListWebPublicationsResult{ - HDEntitiesID: stmt.ColumnInt64(0), - HDEntitiesEID: stmt.ColumnText(1), + EntitiesID: stmt.ColumnInt64(0), + EntitiesEID: stmt.ColumnText(1), WebPublicationsVersion: stmt.ColumnText(2), WebPublicationsPath: stmt.ColumnText(3), }) @@ -624,16 +624,16 @@ JOIN hd_entities ON web_publications.eid = hd_entities.eid` } type GetWebPublicationRecordByPathResult struct { - HDEntitiesID int64 - HDEntitiesEID string + EntitiesID int64 + EntitiesEID string WebPublicationsVersion string WebPublicationsPath string } func GetWebPublicationRecordByPath(conn *sqlite.Conn, webPublicationsPath string) (GetWebPublicationRecordByPathResult, error) { - const query = `SELECT hd_entities.id, hd_entities.eid, web_publications.version, web_publications.path + const query = `SELECT entities.id, entities.eid, web_publications.version, web_publications.path FROM web_publications -JOIN hd_entities ON web_publications.eid = hd_entities.eid WHERE web_publications.path = :webPublicationsPath` +JOIN entities ON web_publications.eid = entities.eid WHERE web_publications.path = :webPublicationsPath` var out GetWebPublicationRecordByPathResult @@ -646,8 +646,8 @@ JOIN hd_entities ON web_publications.eid = hd_entities.eid WHERE web_publication return errors.New("GetWebPublicationRecordByPath: more than one result return for a single-kind query") } - out.HDEntitiesID = stmt.ColumnInt64(0) - out.HDEntitiesEID = stmt.ColumnText(1) + out.EntitiesID = stmt.ColumnInt64(0) + out.EntitiesEID = stmt.ColumnText(1) out.WebPublicationsVersion = stmt.ColumnText(2) out.WebPublicationsPath = stmt.ColumnText(3) return nil @@ -662,27 +662,27 @@ JOIN hd_entities ON web_publications.eid = hd_entities.eid WHERE web_publication } type GetWebPublicationsByIDResult struct { - HDEntitiesID int64 - HDEntitiesEID string + EntitiesID int64 + EntitiesEID string WebPublicationsVersion string WebPublicationsPath string } -func GetWebPublicationsByID(conn *sqlite.Conn, hdEntitiesEID string) ([]GetWebPublicationsByIDResult, error) { - const query = `SELECT hd_entities.id, hd_entities.eid, web_publications.version, web_publications.path +func GetWebPublicationsByID(conn *sqlite.Conn, entitiesEID string) ([]GetWebPublicationsByIDResult, error) { + const query = `SELECT entities.id, entities.eid, web_publications.version, web_publications.path FROM web_publications -JOIN hd_entities ON web_publications.eid = hd_entities.eid WHERE hd_entities.eid = :hdEntitiesEID` +JOIN entities ON web_publications.eid = entities.eid WHERE entities.eid = :entitiesEID` var out []GetWebPublicationsByIDResult before := func(stmt *sqlite.Stmt) { - stmt.SetText(":hdEntitiesEID", hdEntitiesEID) + stmt.SetText(":entitiesEID", entitiesEID) } onStep := func(i int, stmt *sqlite.Stmt) error { out = append(out, GetWebPublicationsByIDResult{ - HDEntitiesID: stmt.ColumnInt64(0), - HDEntitiesEID: stmt.ColumnText(1), + EntitiesID: stmt.ColumnInt64(0), + EntitiesEID: stmt.ColumnText(1), WebPublicationsVersion: stmt.ColumnText(2), WebPublicationsPath: stmt.ColumnText(3), }) diff --git a/backend/mttnet/sitesql/queries.gensum b/backend/mttnet/sitesql/queries.gensum index efca51966d..5ba8e91745 100644 --- a/backend/mttnet/sitesql/queries.gensum +++ b/backend/mttnet/sitesql/queries.gensum @@ -1,2 +1,2 @@ -srcs: 909c9e4910c823d89fa014daf2aee922 -outs: 3e904b889e69db630e0c1ab0c77e3fbf +srcs: 84683062c33b87172439a32b124ba8ec +outs: d05d8ee6ec5ace4e550935fcd4f2b3a3 diff --git a/backend/mttnet/sitesql/queries.go b/backend/mttnet/sitesql/queries.go index 3193ae6c40..a3803a083a 100644 --- a/backend/mttnet/sitesql/queries.go +++ b/backend/mttnet/sitesql/queries.go @@ -32,9 +32,9 @@ func generateQueries() error { "VALUES", qb.List( qb.VarCol(s.ServedSitesHostname), qb.SubQuery( - "SELECT", s.HDEntitiesID, - "FROM", s.HDEntities, - "WHERE", s.HDEntitiesEID, "=", qb.Var("group_eid", sqlitegen.TypeText), + "SELECT", s.EntitiesID, + "FROM", s.Entities, + "WHERE", s.EntitiesEID, "=", qb.Var("group_eid", sqlitegen.TypeText), ), qb.VarCol(s.ServedSitesVersion), qb.SubQuery( @@ -47,12 +47,12 @@ func generateQueries() error { qb.MakeQuery(s.Schema, "GetSiteInfo", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(s.HDEntitiesEID), + qb.ResultCol(s.EntitiesEID), qb.ResultCol(s.ServedSitesVersion), qb.ResultCol(s.PublicKeysPrincipal), ), '\n', "FROM", s.ServedSites, '\n', - "JOIN", s.HDEntities, "ON", s.HDEntitiesID, "=", s.ServedSitesGroupID, '\n', + "JOIN", s.Entities, "ON", s.EntitiesID, "=", s.ServedSitesGroupID, '\n', "JOIN", s.PublicKeys, "ON", s.PublicKeysPrincipal, "=", s.ServedSitesOwnerID, '\n', "WHERE", s.ServedSitesHostname, "=", qb.VarCol(s.ServedSitesHostname), ), @@ -105,9 +105,9 @@ func generateQueries() error { ), qb.MakeQuery(s.Schema, "SetSiteRegistrationLink", sqlitegen.QueryKindExec, - "INSERT OR REPLACE INTO", s.GlobalMeta, qb.ListColShort( - s.GlobalMetaKey, - s.GlobalMetaValue, + "INSERT OR REPLACE INTO", s.KV, qb.ListColShort( + s.KVKey, + s.KVValue, ), '\n', "VALUES", qb.List( "'"+SiteRegistrationLinkKey+"'", @@ -117,16 +117,16 @@ func generateQueries() error { qb.MakeQuery(s.Schema, "GetSiteRegistrationLink", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(s.GlobalMetaValue), + qb.ResultCol(s.KVValue), ), - "FROM", s.GlobalMeta, - "WHERE", s.GlobalMetaKey, "='"+SiteRegistrationLinkKey+"'", + "FROM", s.KV, + "WHERE", s.KVKey, "='"+SiteRegistrationLinkKey+"'", ), qb.MakeQuery(s.Schema, "SetSiteTitle", sqlitegen.QueryKindExec, - "INSERT OR REPLACE INTO", s.GlobalMeta, qb.ListColShort( - s.GlobalMetaKey, - s.GlobalMetaValue, + "INSERT OR REPLACE INTO", s.KV, qb.ListColShort( + s.KVKey, + s.KVValue, ), '\n', "VALUES", qb.List( "'"+SiteTitleKey+"'", @@ -136,16 +136,16 @@ func generateQueries() error { qb.MakeQuery(s.Schema, "GetSiteTitle", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(s.GlobalMetaValue), + qb.ResultCol(s.KVValue), ), - "FROM", s.GlobalMeta, - "WHERE", s.GlobalMetaKey, "='"+SiteTitleKey+"'", + "FROM", s.KV, + "WHERE", s.KVKey, "='"+SiteTitleKey+"'", ), qb.MakeQuery(s.Schema, "SetSiteDescription", sqlitegen.QueryKindExec, - "INSERT OR REPLACE INTO", s.GlobalMeta, qb.ListColShort( - s.GlobalMetaKey, - s.GlobalMetaValue, + "INSERT OR REPLACE INTO", s.KV, qb.ListColShort( + s.KVKey, + s.KVValue, ), '\n', "VALUES", qb.List( "'"+SiteDescriptionKey+"'", @@ -155,10 +155,10 @@ func generateQueries() error { qb.MakeQuery(s.Schema, "GetSiteDescription", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(s.GlobalMetaValue), + qb.ResultCol(s.KVValue), ), - "FROM", s.GlobalMeta, - "WHERE", s.GlobalMetaKey, "='"+SiteDescriptionKey+"'", + "FROM", s.KV, + "WHERE", s.KVKey, "='"+SiteDescriptionKey+"'", ), qb.MakeQuery(s.Schema, "AddToken", sqlitegen.QueryKindExec, @@ -242,45 +242,45 @@ func generateQueries() error { qb.MakeQuery(s.Schema, "RemoveWebPublicationRecord", sqlitegen.QueryKindExec, "DELETE FROM", s.WebPublications, - "WHERE", s.WebPublicationsEID, "=", qb.VarCol(s.HDEntitiesEID), + "WHERE", s.WebPublicationsEID, "=", qb.VarCol(s.EntitiesEID), "AND", s.WebPublicationsVersion, "=", qb.VarCol(s.WebPublicationsVersion), ), qb.MakeQuery(s.Schema, "ListWebPublications", sqlitegen.QueryKindMany, "SELECT", qb.Results( - qb.ResultCol(s.HDEntitiesID), - qb.ResultCol(s.HDEntitiesEID), + qb.ResultCol(s.EntitiesID), + qb.ResultCol(s.EntitiesEID), qb.ResultCol(s.WebPublicationsVersion), qb.ResultCol(s.WebPublicationsPath), ), '\n', "FROM", s.WebPublications, '\n', - "JOIN", s.HDEntities, "ON", s.WebPublicationsEID, "=", s.HDEntitiesEID, + "JOIN", s.Entities, "ON", s.WebPublicationsEID, "=", s.EntitiesEID, ), qb.MakeQuery(s.Schema, "GetWebPublicationRecordByPath", sqlitegen.QueryKindSingle, "SELECT", qb.Results( - qb.ResultCol(s.HDEntitiesID), - qb.ResultCol(s.HDEntitiesEID), + qb.ResultCol(s.EntitiesID), + qb.ResultCol(s.EntitiesEID), qb.ResultCol(s.WebPublicationsVersion), qb.ResultCol(s.WebPublicationsPath), ), '\n', "FROM", s.WebPublications, '\n', - "JOIN", s.HDEntities, "ON", s.WebPublicationsEID, "=", s.HDEntitiesEID, + "JOIN", s.Entities, "ON", s.WebPublicationsEID, "=", s.EntitiesEID, "WHERE", s.WebPublicationsPath, "=", qb.VarCol(s.WebPublicationsPath), ), qb.MakeQuery(s.Schema, "GetWebPublicationsByID", sqlitegen.QueryKindMany, "SELECT", qb.Results( - qb.ResultCol(s.HDEntitiesID), - qb.ResultCol(s.HDEntitiesEID), + qb.ResultCol(s.EntitiesID), + qb.ResultCol(s.EntitiesEID), qb.ResultCol(s.WebPublicationsVersion), qb.ResultCol(s.WebPublicationsPath), ), '\n', "FROM", s.WebPublications, '\n', - "JOIN", s.HDEntities, "ON", s.WebPublicationsEID, "=", s.HDEntitiesEID, - "WHERE", s.HDEntitiesEID, "=", qb.VarCol(s.HDEntitiesEID), + "JOIN", s.Entities, "ON", s.WebPublicationsEID, "=", s.EntitiesEID, + "WHERE", s.EntitiesEID, "=", qb.VarCol(s.EntitiesEID), ), ) if err != nil { diff --git a/backend/pkg/maputil/maputil.go b/backend/pkg/maputil/maputil.go index 8c7efa4c1c..f74fb8c68a 100644 --- a/backend/pkg/maputil/maputil.go +++ b/backend/pkg/maputil/maputil.go @@ -27,3 +27,23 @@ func Delete(v map[string]any, path []string) { delete(v, path[len(path)-1]) } + +func Get(v map[string]any, path []string) (value any, ok bool) { + if v == nil { + return nil, false + } + + for i := 0; i < len(path)-1; i++ { + key := path[i] + + vv, ok := v[key].(map[string]any) + if !ok { + return nil, false + } + + v = vv + } + + value, ok = v[path[len(path)-1]] + return value, ok +} diff --git a/backend/pkg/sqlitegen/qb/qb.go b/backend/pkg/sqlitegen/qb/qb.go index e7a899d39d..7cadcdf07a 100644 --- a/backend/pkg/sqlitegen/qb/qb.go +++ b/backend/pkg/sqlitegen/qb/qb.go @@ -7,6 +7,7 @@ package qb import ( "fmt" "mintter/backend/pkg/sqlitegen" + "strconv" "strings" ) @@ -75,6 +76,8 @@ func newSegment(s sqlitegen.Schema, v interface{}) (writeFunc func(*queryBuilder if opt == "\n" { isNewLine = true } + case int: + writeFunc = func(qb *queryBuilder) { qb.WriteString(strconv.Itoa(opt)) } case rune: writeFunc = func(qb *queryBuilder) { qb.WriteRune(opt) } if opt == '\n' { @@ -232,6 +235,33 @@ func Insert(cols ...sqlitegen.Column) Opt { } } +// InsertOrIgnore generates a complete insert or ignore statement. +func InsertOrIgnore(cols ...sqlitegen.Column) Opt { + if len(cols) == 0 { + panic("INSERT OR IGNORE statement must have columns to insert") + } + return func(qb *queryBuilder) { + var table sqlitegen.Table + + varCols := make([]interface{}, len(cols)) + for i, c := range cols { + if i == 0 { + table = qb.schema.GetColumnTable(c) + } else { + if table != qb.schema.GetColumnTable(c) { + panic("BUG: inserting columns from unrelated tables") + } + } + varCols[i] = VarCol(c) + } + + qb.writeSegments( + "INSERT OR IGNORE INTO", table, ListColShort(cols...), Line, + "VALUES", List(varCols...), + ) + } +} + // Results annotates SQL expressions or concrete columns to become outputs of a SQL query. // The argument must be ResultOpt or Column. func Results(rr ...any) Opt { @@ -371,6 +401,18 @@ func VarCol(col sqlitegen.Column) Opt { } } +// VarColType is the same as VarCol but forces the type. +func VarColType(col sqlitegen.Column, t sqlitegen.Type) Opt { + return func(qb *queryBuilder) { + sym := sqlitegen.GoSymbol{ + Name: sqlitegen.GoNameFromSQLName(col.String(), false), + Type: t, + } + qb.AddInput(sym) + qb.WriteString(":" + sym.Name) + } +} + // Indent doesn't do anything special and just writes the segments that were given. // It's useful for visual indentation of a query though, and can help organize // large join clauses and others into nested logical blocks. diff --git a/backend/pkg/sqlitegen/schema.go b/backend/pkg/sqlitegen/schema.go index 1446b0997f..c2568ed5e8 100644 --- a/backend/pkg/sqlitegen/schema.go +++ b/backend/pkg/sqlitegen/schema.go @@ -33,7 +33,13 @@ func (s *Schema) GetColumnTable(c Column) Table { // It panics if column is unknown. func (s *Schema) GetColumnType(c Column) Type { info := s.columnInfo(c) - return typeFromSQLType(info.SQLType) + + t, ok := sqlTypes[info.SQLType] + if !ok { + panic(fmt.Errorf("unsupported SQL type %q for column %q", info.SQLType, c.String())) + } + + return t } func (s *Schema) columnInfo(c Column) ColumnInfo { @@ -129,15 +135,6 @@ var sqlTypes = map[string]Type{ "BLOB": TypeBytes, } -func typeFromSQLType(sqlType string) Type { - t, ok := sqlTypes[sqlType] - if !ok { - panic("unsupported SQL type " + sqlType) - } - - return t -} - // IntrospectSchema attempt to infer the Schema from existing SQLite tables. // We only support base SQLite data types. func IntrospectSchema[T *sqlite.Conn | *sqlitex.Pool](db T) (Schema, error) { @@ -161,7 +158,7 @@ SELECT p.name AS column_name, p.type AS column_type FROM sqlite_master AS m -JOIN pragma_table_info(m.name) AS p +JOIN pragma_table_xinfo(m.name) AS p ORDER BY m.name, p.cid ` var s Schema diff --git a/backend/pkg/sqlitegen/sqlitegen.go b/backend/pkg/sqlitegen/sqlitegen.go index 3d1f77bb56..b39aa0eff5 100644 --- a/backend/pkg/sqlitegen/sqlitegen.go +++ b/backend/pkg/sqlitegen/sqlitegen.go @@ -11,24 +11,24 @@ import ( "golang.org/x/text/language" ) -var initialisms = map[string]struct{}{ - "ip": {}, - "id": {}, - "http": {}, +var initialisms = map[string]string{ + "ip": "IP", + "id": "ID", + "http": "HTTP", } // AddInitialism allows to add a custom initialism so that generated code knows about them. func AddInitialism(ss ...string) { for _, s := range ss { - s = strings.ToLower(s) - initialisms[s] = struct{}{} + low := strings.ToLower(s) + initialisms[low] = s } } -func isInitialism(s string) bool { +func getInitialism(s string) (out string, ok bool) { s = strings.ToLower(s) - _, ok := initialisms[s] - return ok + out, ok = initialisms[s] + return out, ok } // QueryKind defines kinds of queries. @@ -74,8 +74,8 @@ func GoNameFromSQLName(s string, exported bool) string { continue } - if isInitialism(p) { - parts[i] = strings.ToUpper(p) + if s, ok := getInitialism(p); ok { + parts[i] = s continue } diff --git a/backend/wallet/walletsql/queries.gen.go b/backend/wallet/walletsql/queries.gen.go index ca7038b200..a116547239 100644 --- a/backend/wallet/walletsql/queries.gen.go +++ b/backend/wallet/walletsql/queries.gen.go @@ -117,20 +117,20 @@ func listWallets(conn *sqlite.Conn, cursor string, limit int64) ([]listWalletsRe } type getDefaultWalletResult struct { - WalletsID string - WalletsAddress string - WalletsName string - WalletsBalance int64 - WalletsType string - GlobalMetaValue string + WalletsID string + WalletsAddress string + WalletsName string + WalletsBalance int64 + WalletsType string + KVValue string } func getDefaultWallet(conn *sqlite.Conn, key string) (getDefaultWalletResult, error) { const query = `SELECT wallets.id, wallets.address, wallets.name, wallets.balance, wallets.type FROM wallets -WHERE wallets.id IN (SELECT global_meta.value -FROM global_meta -WHERE global_meta.key = :key )` +WHERE wallets.id IN (SELECT kv.value +FROM kv +WHERE kv.key = :key )` var out getDefaultWalletResult @@ -148,7 +148,7 @@ WHERE global_meta.key = :key )` out.WalletsName = stmt.ColumnText(2) out.WalletsBalance = stmt.ColumnInt64(3) out.WalletsType = stmt.ColumnText(4) - out.GlobalMetaValue = stmt.ColumnText(5) + out.KVValue = stmt.ColumnText(5) return nil } @@ -160,13 +160,13 @@ WHERE global_meta.key = :key )` return out, err } -func setDefaultWallet(conn *sqlite.Conn, globalMetaKey string, globalMetaValue string) error { - const query = `INSERT OR REPLACE INTO global_meta (key, value) -VALUES (:globalMetaKey, :globalMetaValue)` +func setDefaultWallet(conn *sqlite.Conn, kvKey string, kvValue string) error { + const query = `INSERT OR REPLACE INTO kv (key, value) +VALUES (:kvKey, :kvValue)` before := func(stmt *sqlite.Stmt) { - stmt.SetText(":globalMetaKey", globalMetaKey) - stmt.SetText(":globalMetaValue", globalMetaValue) + stmt.SetText(":kvKey", kvKey) + stmt.SetText(":kvValue", kvValue) } onStep := func(i int, stmt *sqlite.Stmt) error { @@ -182,7 +182,7 @@ VALUES (:globalMetaKey, :globalMetaValue)` } func removeDefaultWallet(conn *sqlite.Conn, key string) error { - const query = `DELETE FROM global_meta WHERE global_meta.key = :key ` + const query = `DELETE FROM kv WHERE kv.key = :key ` before := func(stmt *sqlite.Stmt) { stmt.SetText(":key", key) diff --git a/backend/wallet/walletsql/queries.gensum b/backend/wallet/walletsql/queries.gensum index 8f5453b645..5bb8d5f0e7 100644 --- a/backend/wallet/walletsql/queries.gensum +++ b/backend/wallet/walletsql/queries.gensum @@ -1,2 +1,2 @@ -srcs: ab232d4e76be125fc707e44b91cb9cae -outs: 6fc772e2e286abc8f256545fa54899e6 +srcs: 5e8f7383f07f830458afe211ac84e396 +outs: 5d37bc7653857bc96d0dd85be98ec9a6 diff --git a/backend/wallet/walletsql/queries.go b/backend/wallet/walletsql/queries.go index 72a2e36dce..a0a775b749 100644 --- a/backend/wallet/walletsql/queries.go +++ b/backend/wallet/walletsql/queries.go @@ -58,26 +58,26 @@ func generateQueries() error { ), qb.Line, "FROM", storage.Wallets, qb.Line, "WHERE", storage.WalletsID, "IN (SELECT", qb.Results( - qb.ResultCol(storage.GlobalMetaValue), + qb.ResultCol(storage.KVValue), ), qb.Line, - "FROM", storage.GlobalMeta, qb.Line, - "WHERE", storage.GlobalMetaKey, "=", qb.Var("key", sqlitegen.TypeText), ")", + "FROM", storage.KV, qb.Line, + "WHERE", storage.KVKey, "=", qb.Var("key", sqlitegen.TypeText), ")", ), qb.MakeQuery(storage.Schema, "setDefaultWallet", sqlitegen.QueryKindExec, - "INSERT OR REPLACE INTO", storage.GlobalMeta, qb.ListColShort( - storage.GlobalMetaKey, - storage.GlobalMetaValue, + "INSERT OR REPLACE INTO", storage.KV, qb.ListColShort( + storage.KVKey, + storage.KVValue, ), qb.Line, "VALUES", qb.List( - qb.VarCol(storage.GlobalMetaKey), - qb.VarCol(storage.GlobalMetaValue), + qb.VarCol(storage.KVKey), + qb.VarCol(storage.KVValue), ), ), qb.MakeQuery(storage.Schema, "removeDefaultWallet", sqlitegen.QueryKindExec, - "DELETE FROM", storage.GlobalMeta, - "WHERE", storage.GlobalMetaKey, "=", qb.Var("key", sqlitegen.TypeText), "", + "DELETE FROM", storage.KV, + "WHERE", storage.KVKey, "=", qb.Var("key", sqlitegen.TypeText), "", ), qb.MakeQuery(storage.Schema, "updateWalletName", sqlitegen.QueryKindExec, diff --git a/backend/wallet/walletsql/wallet_test.go b/backend/wallet/walletsql/wallet_test.go index e3a7221826..409b1370a3 100644 --- a/backend/wallet/walletsql/wallet_test.go +++ b/backend/wallet/walletsql/wallet_test.go @@ -1,16 +1,12 @@ package walletsql import ( - "io/ioutil" - "os" - "path/filepath" + "context" + "mintter/backend/daemon/storage" "strings" "testing" - "crawshaw.io/sqlite" - "crawshaw.io/sqlite/sqlitex" "github.com/stretchr/testify/require" - "go.uber.org/multierr" ) const ( @@ -39,9 +35,11 @@ var ( ) func TestQueries(t *testing.T) { - conn, closer, err := makeConn() + pool := storage.MakeTestDB(t) + + conn, release, err := pool.Conn(context.Background()) require.NoError(t, err) - defer func() { require.NoError(t, closer()) }() + defer release() { err = InsertWallet(conn, Wallet{ @@ -130,65 +128,3 @@ func TestQueries(t *testing.T) { require.Equal(t, int64(2), nwallets.Count) } } - -func makeConn() (conn *sqlite.Conn, closer func() error, err error) { - dir, err := ioutil.TempDir("", "sqlitegen-") - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - conn, err = sqlite.OpenConn(filepath.Join(dir, "db.sqlite")) - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - conn.Close() - } - }() - - err = sqlitex.ExecScript(conn, ` - CREATE TABLE wallets ( - -- Wallet unique ID. Is the connection uri hash. - id TEXT PRIMARY KEY, - -- The type of the wallet. - type TEXT CHECK( type IN ('lnd','lndhub.go','lndhub') ) NOT NULL DEFAULT 'lndhub.go', - -- Address of the LND node backing up this wallet. In case lndhub, this will be the - -- URL to connect via rest api. In case LND wallet, this will be the gRPC address. - address TEXT NOT NULL, - -- The login to access the wallet. Login in case lndhub and the macaroon - -- bytes in case lnd. - login BLOB NOT NULL, - -- The password to access the wallet. Passphrase in case of lndhub and the encrytion - -- key to unlock the internal wallet in case of LND. - password BLOB NOT NULL, - -- The Authentication token of the wallet. api token in case of lndhub - token BLOB, - -- Human readable name to help the user identify each wallet - name TEXT NOT NULL, - -- The balance in satoshis - balance INTEGER DEFAULT 0 - ); - -- Stores global metadata/configuration about any other table - CREATE TABLE global_meta ( - key TEXT PRIMARY KEY, - value TEXT - ) WITHOUT ROWID; - -`) - if err != nil { - return nil, nil, err - } - - return conn, func() error { - return multierr.Combine( - os.RemoveAll(dir), - conn.Close(), - ) - }, nil -} diff --git a/build/rules/mintter/mintter.build_defs b/build/rules/mintter/mintter.build_defs index bbbbe2a492..a8fe91fd3b 100644 --- a/build/rules/mintter/mintter.build_defs +++ b/build/rules/mintter/mintter.build_defs @@ -32,22 +32,22 @@ $TOOLS_PROTOC -I proto \ generated( name = "js", srcs = srcs, - outs = [x.replace(".proto", "_pb.ts") for x in srcs] + [x.replace(".proto", "_connectweb.ts") for x in srcs], + outs = [x.replace(".proto", "_pb.ts") for x in srcs] + [x.replace(".proto", "_connect.ts") for x in srcs], cmd = """ $TOOLS_PROTOC -I proto \ --plugin=protoc-gen-es=$TOOLS_PROTOC_GEN_ES \ - --plugin=protoc-gen-connect-web=$TOOLS_PROTOC_GEN_CONNECT_WEB \ + --plugin=protoc-gen-connect-es=$TOOLS_PROTOC_GEN_CONNECT_ES \ --es_opt=target=ts,import_extension=none \ - --connect-web_opt=target=ts,import_extension=none \ + --connect-es_opt=target=ts,import_extension=none \ --es_out=frontend/packages/shared/src/client/.generated/ \ - --connect-web_out=frontend/packages/shared/src/client/.generated/ \ + --connect-es_out=frontend/packages/shared/src/client/.generated/ \ $SRCS """, out_dir = "//" + package_name().replace("proto", "frontend/packages/shared/src/client/.generated"), tools = [ "//build/nix:protoc", "//build/tools:protoc-gen-es", - "//build/tools:protoc-gen-connect-web", + "//build/tools:protoc-gen-connect-es", ], ) else: diff --git a/build/tools/BUILD.plz b/build/tools/BUILD.plz index 1835710311..59f3c70c64 100644 --- a/build/tools/BUILD.plz +++ b/build/tools/BUILD.plz @@ -35,7 +35,7 @@ yarn_binary( ) yarn_binary( - name = "protoc-gen-connect-web", + name = "protoc-gen-connect-es", yarn_deps = "//:yarn", ) diff --git a/dev b/dev index 1f46976089..2b734e494e 100755 --- a/dev +++ b/dev @@ -52,12 +52,17 @@ def main(): "Check all the generated code is up to date. Otherwise run the code generation process to fix it.", ) def gen(args): - out = run( - "plz run parallel $(plz query filter -i 'generated:check') " - + str.join(" ", args), - capture_output=True, + targets_to_check = ( + run( + f"plz query filter -i 'generated:check' {str.join(' ', args)}", + capture_output=True, + ) + .stdout.decode("utf-8") + .split("\n") ) + out = run(f"plz run parallel {' '.join(targets_to_check)}", capture_output=True) + targets_to_gen = [] for line in out.stdout.decode("utf-8").split("\n"): idx = line.find("plz run") @@ -68,25 +73,22 @@ def main(): if len(targets_to_gen) == 0: return return run("plz run parallel " + " ".join(targets_to_gen)) - + @cmd(cmds, "run-desktop", "Run frontend desktop app for development.") def run_desktop(args): run("./scripts/cleanup-desktop.sh") run("yarn install") run("plz build //backend:mintterd //:yarn") return run("yarn desktop", args=args) - - @cmd( - cmds, "build-desktop", "Builds the desktop app for the current platform." - ) + + @cmd(cmds, "build-desktop", "Builds the desktop app for the current platform.") def build_desktop(args): run("./scripts/cleanup-frontend.sh") run("./scripts/cleanup-desktop.sh") run("yarn install") run("plz build //backend:mintterd //:yarn") run("yarn desktop:make") - - + @cmd(cmds, "run-site", "Run sites app for development.") def run_site(args): run("./scripts/cleanup-site.sh") @@ -97,7 +99,7 @@ def main(): args=args, env={**os.environ, "GW_NEXT_HOST": "http://localhost:3000"}, ) - + @cmd(cmds, "build-site", "Build site app for production.") def build_site(args): run("./scripts/cleanup-frontend.sh") @@ -109,24 +111,19 @@ def main(): args=args, # env={**os.environ, "GW_NEXT_HOST": "http://localhost:3000"}, ) - - @cmd( - cmds, "frontend-validate", "Formats, Validates" - ) + + @cmd(cmds, "frontend-validate", "Formats, Validates") def frontend_validate(args): run("yarn validate") - - @cmd( - cmds, "frontend-test", "Tests frontend code" - ) + @cmd(cmds, "frontend-test", "Tests frontend code") def frontend_test(args): run("yarn test") @cmd(cmds, "run-backend", "Build and run mintterd binary for the current platform.") def run_backend(args): return run("plz run //backend:mintterd", args=args) - + @cmd(cmds, "build-backend", "Build mintterd binary for the current platform.") def build_backend(args): return run("plz build //backend:mintterd") @@ -138,16 +135,17 @@ def main(): @cmd(cmds, "ping-p2p", "Execute ping utility to check visibility.") def ping_p2p(args): return run("plz run //backend:pingp2p", args=args) - + @cmd( - cmds, "release", "Create a new Release. this will create a new tag and push it to the remote repository" + cmds, + "release", + "Create a new Release. this will create a new tag and push it to the remote repository", ) def release(args): # run("yarn validate") # run("yarn test") run("node scripts/tag.mjs") - if len(sys.argv) == 1: cli.print_help() return diff --git a/docs/docs/document-linking.md b/docs/docs/document-linking.md index 7e27abbaa2..c0faf80cd3 100644 --- a/docs/docs/document-linking.md +++ b/docs/docs/document-linking.md @@ -1,17 +1,17 @@ # Linking -HyperDocs links, prefixed with `hd://`, take the following form: +HyperDocs links, prefixed with `hm://`, take the following form: ``` -hd://d/DOCUMENT_ID?v=VERSION_REF#BLOCK_REF +hm://d/DOCUMENT_ID?v=VERSION_REF#BLOCK_REF ``` ### Entity References The following portion is required: -- `hd://` - the HyperDocs URL scheme +- `hm://` - the HyperDocs URL scheme - `d/` - the Entity Type ID - `DOCUMENT_ID` - the ID of the Document to reference @@ -26,7 +26,7 @@ Additional types and conventions are reserved for future use ### Version References -A link with only `hd://d/DOCUMENT_ID` is valid, but the entity may change over time. To link to the **exact version** of an Entity, include: +A link with only `hm://d/DOCUMENT_ID` is valid, but the entity may change over time. To link to the **exact version** of an Entity, include: - `?v=` - optional query parameter designating the version ref - `VERSION_REF` - the CID(s) of the EntityChange that represent an exact version of the Entity, separated by periods (`.`) @@ -37,8 +37,8 @@ When you link to [Document Entities](./document-entity), you may deep-link to a A block reference points to a specific block within a Entity Reference or a Version Reference. -- `hd://d/DOCUMENT_ID#BLOCK_REF` - Block of Entity Ref -- `hd://d/DOCUMENT_ID?v=VERSION_REF#BLOCK_REF` - Block of Version Ref +- `hm://d/DOCUMENT_ID#BLOCK_REF` - Block of Entity Ref +- `hm://d/DOCUMENT_ID?v=VERSION_REF#BLOCK_REF` - Block of Version Ref A simple `BLOCK_ID` is a valid Block Ref, but it may also include a character range, pointing to a string within a block. This is a Block Range Ref: @@ -59,4 +59,4 @@ The following are real example links: ## Web Links -> Note: The Mintter application uses the emerging ["Aer" protocol](./hyperdocs-aer) to convert HTTPS URLs to `hd://` links automatically under the hood \ No newline at end of file +> Note: The Mintter application uses the emerging ["Aer" protocol](./hyperdocs-aer) to convert HTTPS URLs to `hm://` links automatically under the hood \ No newline at end of file diff --git a/frontend/apps/desktop/package.json b/frontend/apps/desktop/package.json index 66fe3a45a8..8b3df8e0e2 100644 --- a/frontend/apps/desktop/package.json +++ b/frontend/apps/desktop/package.json @@ -62,6 +62,7 @@ "@mintter/prettier": "workspace:*", "@playwright/test": "1.37.1", "@reforged/maker-appimage": "3.3.1", + "@sentry-internal/tracing": "^7.66.0", "@tamagui/vite-plugin": "1.53.1", "@types/react": "18.2.20", "@types/react-dom": "18.2.7", diff --git a/frontend/apps/site/account-page.tsx b/frontend/apps/site/account-page.tsx index 9b575b328a..a038fc689c 100644 --- a/frontend/apps/site/account-page.tsx +++ b/frontend/apps/site/account-page.tsx @@ -1,3 +1,4 @@ +import {HYPERMEDIA_ACCOUNT_PREFIX} from '@mintter/shared' import { Avatar, Heading, @@ -9,12 +10,12 @@ import { } from '@mintter/ui' import {cidURL} from 'ipfs' import Head from 'next/head' -import {HDAccount} from 'server/json-hd' +import {HMAccount} from 'server/json-hm' import {SiteHead} from 'site-head' import {trpc} from 'trpc' import Footer from './footer' -function AccountContent({account}: {account: HDAccount | null | undefined}) { +function AccountContent({account}: {account: HMAccount | null | undefined}) { if (isEmptyObject(account?.profile)) { return } @@ -51,7 +52,10 @@ export default function AccountPage({accountId}: {accountId: string}) { return ( - + diff --git a/frontend/apps/site/pages/g/[groupId]/index.tsx b/frontend/apps/site/pages/g/[groupId]/index.tsx index 59a27372aa..1537f02dfa 100644 --- a/frontend/apps/site/pages/g/[groupId]/index.tsx +++ b/frontend/apps/site/pages/g/[groupId]/index.tsx @@ -21,11 +21,15 @@ import { View, SimpleTooltip, } from '@mintter/ui' -import {HDGroup, HDPublication} from 'server/json-hd' +import {HMGroup, HMPublication} from '@mintter/ui' import {ReactElement} from 'react' import {GestureResponderEvent} from 'react-native' import {Timestamp} from '@bufbuild/protobuf' -import {entityIdToSitePath, formattedDate} from '@mintter/shared' +import { + HYPERMEDIA_GROUP_PREFIX, + entityIdToSitePath, + formattedDate, +} from '@mintter/shared' import {AccountAvatarLink, AccountRow} from 'components/account-row' import {format} from 'date-fns' import {Paragraph} from 'tamagui' @@ -75,7 +79,7 @@ function GroupMetadata({ group, groupEid, }: { - group?: null | HDGroup + group?: null | HMGroup groupEid: string }) { if (!group) return null @@ -136,8 +140,8 @@ function GroupContentItem({ item, group, }: { - item: {pathName: string; publication: null | HDPublication} - group?: null | HDGroup + item: {pathName: string; publication: null | HMPublication} + group?: null | HMGroup }) { return ( { return surfaceEmbedRefs(publication?.document?.children) }, [publication?.document?.children]) @@ -370,7 +370,7 @@ function EmbedMeta({publication}: {publication?: HDPublication | null}) { ) } -function CitationPreview({citationLink}: {citationLink: HDLink}) { +function CitationPreview({citationLink}: {citationLink: HMLink}) { const {source, target} = citationLink const sourcePub = trpc.publication.get.useQuery( { @@ -392,7 +392,7 @@ function CitationPreview({citationLink}: {citationLink: HDLink}) { } function CitationsMeta({ publication, -}: {publication?: HDPublication | null} = {}) { +}: {publication?: HMPublication | null} = {}) { const citations = trpc.publication.getCitations.useQuery( { documentId: publication?.document?.id, @@ -448,7 +448,7 @@ function TOCHeading({heading}: {heading: SectionHeading}) { ) } -function getToc(blockNodes?: HDBlockNode[] | null): SectionHeading[] { +function getToc(blockNodes?: HMBlockNode[] | null): SectionHeading[] { if (!blockNodes) return [] let headings: SectionHeading[] = [] for (let blockNode of blockNodes) { @@ -470,7 +470,7 @@ function getToc(blockNodes?: HDBlockNode[] | null): SectionHeading[] { export function TableOfContents({ publication, -}: {publication?: HDPublication | null} = {}) { +}: {publication?: HMPublication | null} = {}) { const toc = useMemo( () => getToc(publication?.document?.children), [publication], @@ -490,7 +490,7 @@ export function PublicationMetadata({ publication, pathName, }: { - publication?: HDPublication | null + publication?: HMPublication | null pathName?: string }) { if (!publication) return null @@ -570,7 +570,7 @@ export function PublishedMeta({ publication, pathName, }: { - publication?: HDPublication | null + publication?: HMPublication | null pathName?: string }) { const pathRecord = trpc.publication.getPath.useQuery( diff --git a/frontend/apps/site/publication-page.tsx b/frontend/apps/site/publication-page.tsx index 96b80153e4..9640057651 100644 --- a/frontend/apps/site/publication-page.tsx +++ b/frontend/apps/site/publication-page.tsx @@ -15,6 +15,7 @@ import { Block, Publication, entityIdToSitePath, + HYPERMEDIA_DOCUMENT_PREFIX, } from '@mintter/shared' import { Button, @@ -34,14 +35,14 @@ import {cidURL} from 'ipfs' import Head from 'next/head' import {useRouter} from 'next/router' import {useMemo, useState} from 'react' -import {HDBlock, HDBlockNode, HDGroup, HDPublication} from 'server/json-hd' +import {HMBlock, HMBlockNode, HMGroup, HMPublication} from '@mintter/ui' import {WebTipping} from 'web-tipping' import {PublicationMetadata} from './publication-metadata' import {SiteHead} from './site-head' import {trpc} from './trpc' import Link from 'next/link' -function hdLinkToSitePath(link: string) { +function hmLinkToSitePath(link: string) { const [docId, version, block] = getIdsfromUrl(link) if (!docId) return link let path = `/d/${docId}` @@ -69,7 +70,7 @@ export type PublicationPageData = { function PublicationContent({ publication, }: { - publication: HDPublication | undefined + publication: HMPublication | undefined }) { return ( @@ -88,12 +89,12 @@ function PublicationContent({ } function getBlockNodeById( - blocks: Array, + blocks: Array, blockId: string, -): HDBlockNode | null { +): HMBlockNode | null { if (!blockId) return null - let res: HDBlockNode | undefined + let res: HMBlockNode | undefined blocks.find((bn) => { if (bn.block?.id == blockId) { res = bn @@ -116,9 +117,9 @@ function GroupSidebarContent({ content, }: { activePathName: string - group?: HDGroup + group?: HMGroup content?: Array @@ -308,13 +309,13 @@ function InlineContentView({ } if (content.type === 'link') { const href = isHyperdocsScheme(content.href) - ? hdLinkToSitePath(content.href) + ? hmLinkToSitePath(content.href) : content.href return ( @@ -371,8 +372,8 @@ function StaticImageBlock({block}: {block: ImageBlock}) { // return } -function stripHDLinkPrefix(link: string) { - return link.replace(/^hd:\//, '') +function stripHMLinkPrefix(link: string) { + return link.replace(/^hm:\//, '') } function StaticEmbedBlock({block}: {block: EmbedBlock}) { @@ -425,10 +426,10 @@ function StaticEmbedBlock({block}: {block: EmbedBlock}) { cursor: 'pointer', }} onPress={() => { - const ref = stripHDLinkPrefix(block.ref) + const ref = stripHMLinkPrefix(block.ref) router.push(ref) }} - href={stripHDLinkPrefix(block.ref)} + href={stripHMLinkPrefix(block.ref)} > {content} {/* */} @@ -437,7 +438,7 @@ function StaticEmbedBlock({block}: {block: EmbedBlock}) { ) } -function StaticBlock({block}: {block: HDBlock}) { +function StaticBlock({block}: {block: HMBlock}) { let niceBlock = block as PresentationBlock // todo, validation if (niceBlock.type === 'paragraph' || niceBlock.type === 'heading') { @@ -491,7 +492,7 @@ function StaticBlockNode({ block, ctx, }: { - block: HDBlockNode + block: HMBlockNode ctx?: PublicationViewContext }) { const [isHovering, setIsHovering] = useState(false) diff --git a/frontend/apps/site/server/json-hd.ts b/frontend/apps/site/server/json-hm.ts similarity index 69% rename from frontend/apps/site/server/json-hd.ts rename to frontend/apps/site/server/json-hm.ts index ea2dadaeb6..d85f91d20a 100644 --- a/frontend/apps/site/server/json-hd.ts +++ b/frontend/apps/site/server/json-hm.ts @@ -8,23 +8,23 @@ import type { Profile, Publication, SiteInfo, - HDTimestamp, + HMTimestamp, ChangeInfo, MttLink, Group, } from '@mintter/shared' export type ServerChangeInfo = ChangeInfo -export type HDChangeInfo = { +export type HMChangeInfo = { id?: string author?: string - createTime?: HDTimestamp + createTime?: HMTimestamp version?: string deps?: string[] } export type ServerAnnotation = Annotation -export type HDAnnotation = { +export type HMAnnotation = { type?: string ref?: string attributes?: {[key: string]: string} @@ -33,43 +33,43 @@ export type HDAnnotation = { } export type ServerBlock = Block -export type HDBlock = { +export type HMBlock = { id?: string type?: string text?: string ref?: string attributes?: {[key: string]: string} - annotations?: HDAnnotation[] + annotations?: HMAnnotation[] revision?: string } export type ServerBlockNode = BlockNode -export type HDBlockNode = { - block?: HDBlock - children?: HDBlockNode[] +export type HMBlockNode = { + block?: HMBlock + children?: HMBlockNode[] } export type ServerDocument = Document -export type HDDocument = { +export type HMDocument = { title?: string id?: string author?: string webUrl?: string editors?: string[] - children?: HDBlockNode[] - createTime?: HDTimestamp - updateTime?: HDTimestamp - publishTime?: HDTimestamp + children?: HMBlockNode[] + createTime?: HMTimestamp + updateTime?: HMTimestamp + publishTime?: HMTimestamp } export type ServerPublication = Publication -export type HDPublication = { - document?: HDDocument +export type HMPublication = { + document?: HMDocument version?: string } export type ServerSiteInfo = SiteInfo -export type HDSiteInfo = { +export type HMSiteInfo = { hostname?: string title?: string description?: string @@ -77,36 +77,36 @@ export type HDSiteInfo = { } export type ServerDevice = Device -export type HDDevice = { +export type HMDevice = { deviceId?: string } export type ServerProfile = Profile -export type HDProfile = { +export type HMProfile = { alias?: string bio?: string avatar?: string } export type ServerAccount = Account -export type HDAccount = { +export type HMAccount = { id?: string - profile?: HDProfile - devices?: {[key: string]: HDDevice} + profile?: HMProfile + devices?: {[key: string]: HMDevice} } export type ServerGroup = Group -export type HDGroup = { +export type HMGroup = { id?: string title?: string description?: string ownerAccountId?: string - createTime?: HDTimestamp + createTime?: HMTimestamp version?: string } export type ServerLink = MttLink -export type HDLink = { +export type HMLink = { target?: { documentId?: string version?: string diff --git a/frontend/apps/site/server/routers/_app.ts b/frontend/apps/site/server/routers/_app.ts index 09b6ed5aa9..1e758ab573 100644 --- a/frontend/apps/site/server/routers/_app.ts +++ b/frontend/apps/site/server/routers/_app.ts @@ -4,6 +4,7 @@ import { Changes, ContentGraph, Groups, + HYPERMEDIA_GROUP_PREFIX, Publications, Role, WebPublishing, @@ -11,15 +12,15 @@ import { } from '@mintter/shared' import {localWebsiteClient, transport} from 'client' import {getSiteInfo} from 'get-site-info' -import {HDChangeInfo} from 'server/json-hd' +import {HMChangeInfo} from '@mintter/ui' import { - hdAccount, - hdChangeInfo, - hdGroup, - hdLink, - hdPublication, - hdSiteInfo, -} from 'server/to-json-hd' + hmAccount, + hmChangeInfo, + hmGroup, + hmLink, + hmPublication, + hmSiteInfo, +} from 'server/to-json-hm' import {z} from 'zod' import {procedure, router} from '../trpc' @@ -102,7 +103,7 @@ const publicationRouter = router({ version: input.versionId || '', }) return { - publication: hdPublication(pub), + publication: hmPublication(pub), } }), getEmbedMeta: procedure @@ -122,7 +123,7 @@ const publicationRouter = router({ }) return { embeds: [], - // publication: hdPublication(pub), + // publication: hmPublication(pub), } }), getCitations: procedure @@ -139,7 +140,7 @@ const publicationRouter = router({ documentId: input.documentId, }) return { - citationLinks: citationList.links.map(hdLink), + citationLinks: citationList.links.map(hmLink), } }), @@ -160,15 +161,15 @@ const publicationRouter = router({ if (!docId) throw new Error('docId not retreived from getPublication') const version = pub.version if (!version) throw new Error('version not retrieved from getPublication') - const changesIndex: Map = new Map() + const changesIndex: Map = new Map() const changeDeps: Map> = new Map() const downstreamChanges: Map> = new Map() // pub.changes = pub.changes || [] const {documentId} = input const {changes} = await changesClient.listChanges({documentId}) changes.forEach((change) => { - const hdChange = hdChangeInfo(change) - hdChange && changesIndex.set(change.id, hdChange) + const hmChange = hmChangeInfo(change) + hmChange && changesIndex.set(change.id, hmChange) if (!changeDeps.has(change.id)) changeDeps.set(change.id, new Set()) change.deps.forEach((dep) => { changeDeps.get(change.id)!.add(dep) @@ -214,7 +215,7 @@ const publicationRouter = router({ ), ), allDeps: changeIdsToChanges(allDeps), - pub: hdPublication(pub), + pub: hmPublication(pub), } }), }) @@ -245,7 +246,7 @@ const groupRouter = router({ .query(async ({input: {pathName, groupEid, version}}) => { // todo. get current group content and find the pathName, return the corresponding doc console.log('getting site info') - const groupId = `hd://g/${groupEid}` + const groupId = `${HYPERMEDIA_GROUP_PREFIX}${groupEid}` const siteInfo = await groupsClient.listContent({ id: groupId, version, @@ -263,13 +264,13 @@ const groupRouter = router({ version: documentVersion, }) return { - publication: hdPublication(pub), + publication: hmPublication(pub), pathName, documentId, documentVersion, groupVersion: version, groupEid, - group: hdGroup(group), + group: hmGroup(group), } }), get: procedure @@ -281,11 +282,11 @@ const groupRouter = router({ .query(async ({input}) => { console.log('will getGroup with id', input) const group = await groupsClient.getGroup({ - id: `hd://g/${input.groupEid}`, + id: `${HYPERMEDIA_GROUP_PREFIX}${input.groupEid}`, }) - console.log('did get group', hdGroup(group)) + console.log('did get group', hmGroup(group)) return { - group: hdGroup(group), + group: hmGroup(group), } }), listContent: procedure @@ -296,7 +297,7 @@ const groupRouter = router({ ) .query(async ({input}) => { const list = await groupsClient.listContent({ - id: `hd://g/${input.groupEid}`, + id: `${HYPERMEDIA_GROUP_PREFIX}${input.groupEid}`, }) const listedDocs = await Promise.all( Object.entries(list.content).map(async ([pathName, pubUrl]) => { @@ -311,7 +312,7 @@ const groupRouter = router({ pathName, docId, version, - publication: hdPublication(pub), + publication: hmPublication(pub), } }), ) @@ -331,7 +332,7 @@ const groupRouter = router({ ) .query(async ({input}) => { const list = await groupsClient.listMembers({ - id: `hd://g/${input.groupEid}`, + id: `${HYPERMEDIA_GROUP_PREFIX}${input.groupEid}`, }) return Object.entries(list.members || {}).map(([account, role]) => ({ account, @@ -352,7 +353,7 @@ const accountRouter = router({ id: input.accountId, }) return { - account: hdAccount(account), + account: hmAccount(account), } }), }) @@ -360,7 +361,7 @@ const accountRouter = router({ const siteInfoRouter = router({ get: procedure.query(async () => { const siteInfo = await getSiteInfo() - return hdSiteInfo(siteInfo) + return hmSiteInfo(siteInfo) }), }) diff --git a/frontend/apps/site/server/to-json-hd.ts b/frontend/apps/site/server/to-json-hd.ts deleted file mode 100644 index 9cc942afa2..0000000000 --- a/frontend/apps/site/server/to-json-hd.ts +++ /dev/null @@ -1,46 +0,0 @@ -import { - Account, - ChangeInfo, - MttLink, - Publication, - SiteInfo, - Group, -} from '@mintter/shared' -import { - HDAccount, - HDChangeInfo, - HDPublication, - HDSiteInfo, - HDLink, - HDGroup, -} from './json-hd' - -export function hdPublication(input?: Publication | null) { - if (!input) return null - return input.toJson() as HDPublication -} - -export function hdSiteInfo(input?: SiteInfo | null) { - if (!input) return null - return input.toJson() as HDSiteInfo -} - -export function hdAccount(input?: Account | null) { - if (!input) return null - return input.toJson() as HDAccount -} - -export function hdGroup(input?: Group | null) { - if (!input) return null - return input.toJson() as HDGroup -} - -export function hdChangeInfo(input?: ChangeInfo | null) { - if (!input) return null - return input.toJson() as HDChangeInfo -} - -export function hdLink(input?: MttLink) { - if (!input) return null - return input.toJson() as HDLink -} diff --git a/frontend/apps/site/server/to-json-hm.ts b/frontend/apps/site/server/to-json-hm.ts new file mode 100644 index 0000000000..3c816d0625 --- /dev/null +++ b/frontend/apps/site/server/to-json-hm.ts @@ -0,0 +1,46 @@ +import { + Account, + ChangeInfo, + MttLink, + Publication, + SiteInfo, + Group, +} from '@mintter/shared' +import { + HMAccount, + HMChangeInfo, + HMPublication, + HMSiteInfo, + HMLink, + HMGroup, +} from './json-hm' + +export function hmPublication(input?: Publication | null) { + if (!input) return null + return input.toJson() as HMPublication +} + +export function hmSiteInfo(input?: SiteInfo | null) { + if (!input) return null + return input.toJson() as HMSiteInfo +} + +export function hmAccount(input?: Account | null) { + if (!input) return null + return input.toJson() as HMAccount +} + +export function hmGroup(input?: Group | null) { + if (!input) return null + return input.toJson() as HMGroup +} + +export function hmChangeInfo(input?: ChangeInfo | null) { + if (!input) return null + return input.toJson() as HMChangeInfo +} + +export function hmLink(input?: MttLink) { + if (!input) return null + return input.toJson() as HMLink +} diff --git a/frontend/apps/site/web-tipping.tsx b/frontend/apps/site/web-tipping.tsx index d8c40e49df..711b64f941 100644 --- a/frontend/apps/site/web-tipping.tsx +++ b/frontend/apps/site/web-tipping.tsx @@ -19,7 +19,7 @@ import Link from 'next/link' import React, {useEffect, useMemo, useReducer, useRef, useState} from 'react' import {toast} from 'react-hot-toast' import QRCode from 'react-qr-code' -import {HDAccount} from 'server/json-hd' +import {HMAccount} from '@mintter/ui' const options: {value: string; label: string; sats: number | null}[] = [ {value: '100', label: '100 sats', sats: 100}, @@ -49,7 +49,7 @@ export function WebTipping({ editors = [], }: { docId?: string - editors: Array + editors: Array }) { const [open, onOpenChange] = useState(false) @@ -272,7 +272,7 @@ function CreateInvoiceStep({ }: { onInvoice: (invoice: InternalInvoice) => void onComplete: (complete: boolean) => void - editors: Array + editors: Array docId: string }) { let [amount, setAmount] = useState(100) @@ -547,7 +547,7 @@ function DontationDialog({ open: boolean onOpenChange: (open: boolean) => void docId: string - editors: Array + editors: Array }) { let [invoice, setInvoice] = useState(null) let [completion, setCompletion] = useState(false) diff --git a/frontend/packages/app/package.json b/frontend/packages/app/package.json index 647c7f9206..828a502c95 100644 --- a/frontend/packages/app/package.json +++ b/frontend/packages/app/package.json @@ -8,7 +8,7 @@ "scripts": { "lint": "eslint .", "format": "prettier \"**/*.{ts,tsx,md,mdx,json,js}\" --check --ignore-path ../../../.prettierignore", - "test": "echo TODO", + "test": "vitest --run", "typecheck": "tsc --noEmit" }, "dependencies": { diff --git a/frontend/packages/app/src/blocknote-core/BlockNoteEditor.test.ts b/frontend/packages/app/src/blocknote-core/BlockNoteEditor._test.ts similarity index 100% rename from frontend/packages/app/src/blocknote-core/BlockNoteEditor.test.ts rename to frontend/packages/app/src/blocknote-core/BlockNoteEditor._test.ts diff --git a/frontend/packages/app/src/blocknote-core/BlockNoteExtensions.ts b/frontend/packages/app/src/blocknote-core/BlockNoteExtensions.ts index e8fea76d4b..db5df5f37c 100644 --- a/frontend/packages/app/src/blocknote-core/BlockNoteExtensions.ts +++ b/frontend/packages/app/src/blocknote-core/BlockNoteExtensions.ts @@ -1,4 +1,4 @@ -import {HDBlockSchema} from '@mintter/app/src/client/schema' +import {HMBlockSchema} from '@mintter/app/src/client/schema' import {createRightsideBlockWidgetExtension} from '@mintter/app/src/components/rightside-block-widget' import {WidgetDecorationFactory} from '@prosemirror-adapter/core' import {Extensions, extensions} from '@tiptap/core' @@ -45,7 +45,7 @@ import {TrailingNode} from './extensions/TrailingNode/TrailingNodeExtension' import UniqueID from './extensions/UniqueID/UniqueID' import {SuggestionsMenuFactory} from './shared/plugins/suggestion/SuggestionsMenuFactoryTypes' -export type UiFactories = Partial<{ +export type UiFactories = Partial<{ formattingToolbarFactory: FormattingToolbarFactory hyperlinkToolbarFactory: HyperlinkToolbarFactory slashMenuFactory: SuggestionsMenuFactory> @@ -56,7 +56,7 @@ export type UiFactories = Partial<{ /** * Get all the Tiptap extensions BlockNote is configured with by default */ -export const getBlockNoteExtensions = (opts: { +export const getBlockNoteExtensions = (opts: { editable?: boolean editor: BlockNoteEditor domAttributes: Partial diff --git a/frontend/packages/app/src/blocknote-core/api/blockManipulation/blockManipulation.test.ts b/frontend/packages/app/src/blocknote-core/api/blockManipulation/blockManipulation._test.ts similarity index 100% rename from frontend/packages/app/src/blocknote-core/api/blockManipulation/blockManipulation.test.ts rename to frontend/packages/app/src/blocknote-core/api/blockManipulation/blockManipulation._test.ts diff --git a/frontend/packages/app/src/blocknote-core/api/formatConversions/formatConversions.test.ts b/frontend/packages/app/src/blocknote-core/api/formatConversions/formatConversions._test.ts similarity index 100% rename from frontend/packages/app/src/blocknote-core/api/formatConversions/formatConversions.test.ts rename to frontend/packages/app/src/blocknote-core/api/formatConversions/formatConversions._test.ts diff --git a/frontend/packages/app/src/blocknote-core/api/nodeConversions/nodeConversions.test.ts b/frontend/packages/app/src/blocknote-core/api/nodeConversions/nodeConversions._test.ts similarity index 100% rename from frontend/packages/app/src/blocknote-core/api/nodeConversions/nodeConversions.test.ts rename to frontend/packages/app/src/blocknote-core/api/nodeConversions/nodeConversions._test.ts diff --git a/frontend/packages/app/src/blocknote-core/extensions/Blocks/api/defaultBlocks.ts b/frontend/packages/app/src/blocknote-core/extensions/Blocks/api/defaultBlocks.ts index 2887c4f156..efcaefe65e 100644 --- a/frontend/packages/app/src/blocknote-core/extensions/Blocks/api/defaultBlocks.ts +++ b/frontend/packages/app/src/blocknote-core/extensions/Blocks/api/defaultBlocks.ts @@ -1,5 +1,4 @@ -import {HDHeadingBlockContent} from '@mintter/app/src/editor/heading-component-plugin' -import {HeadingBlockContent} from '../nodes/BlockContent/HeadingBlockContent/HeadingBlockContent' +import {HMHeadingBlockContent} from '../../../../editor/heading-component-plugin' import {BulletListItemBlockContent} from '../nodes/BlockContent/ListItemBlockContent/BulletListItemBlockContent/BulletListItemBlockContent' import {NumberedListItemBlockContent} from '../nodes/BlockContent/ListItemBlockContent/NumberedListItemBlockContent/NumberedListItemBlockContent' import {ParagraphBlockContent} from '../nodes/BlockContent/ParagraphBlockContent/ParagraphBlockContent' @@ -33,7 +32,7 @@ export const defaultBlockSchema = { ...defaultProps, level: {default: '2', values: ['1', '2', '3'] as const}, }, - node: HDHeadingBlockContent, + node: HMHeadingBlockContent, }, bulletListItem: { propSchema: defaultProps, diff --git a/frontend/packages/app/src/blocknote-core/extensions/HyperlinkToolbar/HyperlinkToolbarPlugin.ts b/frontend/packages/app/src/blocknote-core/extensions/HyperlinkToolbar/HyperlinkToolbarPlugin.ts index ca1354d575..3dea51833d 100644 --- a/frontend/packages/app/src/blocknote-core/extensions/HyperlinkToolbar/HyperlinkToolbarPlugin.ts +++ b/frontend/packages/app/src/blocknote-core/extensions/HyperlinkToolbar/HyperlinkToolbarPlugin.ts @@ -228,7 +228,7 @@ class HyperlinkToolbarView { this.hyperlinkMarkRange!.from, this.hyperlinkMarkRange!.from + text.length, this.editor.schema.mark('link', {href: url, id}), - ).setMeta('hdPlugin:uncheckedLink', id) + ).setMeta('hmPlugin:uncheckedLink', id) this.editor.view.dispatch(tr) this.editor.view.focus() diff --git a/frontend/packages/app/src/blocknote-core/extensions/SlashMenu/defaultSlashMenuItems.tsx b/frontend/packages/app/src/blocknote-core/extensions/SlashMenu/defaultSlashMenuItems.tsx index 1f443a272b..919602d6a9 100644 --- a/frontend/packages/app/src/blocknote-core/extensions/SlashMenu/defaultSlashMenuItems.tsx +++ b/frontend/packages/app/src/blocknote-core/extensions/SlashMenu/defaultSlashMenuItems.tsx @@ -1,9 +1,9 @@ -import {HDBlockSchema} from '@mintter/app/src/client/schema' +import {HMBlockSchema} from '@mintter/app/src/client/schema' import {BlockNoteEditor} from '../../BlockNoteEditor' import {PartialBlock} from '../Blocks/api/blockTypes' import {BaseSlashMenuItem} from './BaseSlashMenuItem' -export function insertOrUpdateBlock( +export function insertOrUpdateBlock( editor: BlockNoteEditor, block: PartialBlock, ) { @@ -27,7 +27,7 @@ export function insertOrUpdateBlock( */ export const defaultSlashMenuItems = [ // Command for creating a level 1 heading - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Heading', (editor) => insertOrUpdateBlock(editor, { @@ -38,7 +38,7 @@ export const defaultSlashMenuItems = [ ), // Command for creating a level 2 heading - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Heading 2', (editor) => insertOrUpdateBlock(editor, { @@ -49,7 +49,7 @@ export const defaultSlashMenuItems = [ ), // Command for creating a level 3 heading - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Heading 3', (editor) => insertOrUpdateBlock(editor, { @@ -60,7 +60,7 @@ export const defaultSlashMenuItems = [ ), // Command for creating an ordered list - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Numbered List', (editor) => insertOrUpdateBlock(editor, { @@ -70,7 +70,7 @@ export const defaultSlashMenuItems = [ ), // Command for creating a bullet list - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Bullet List', (editor) => insertOrUpdateBlock(editor, { @@ -80,7 +80,7 @@ export const defaultSlashMenuItems = [ ), // Command for creating a paragraph (pretty useless) - new BaseSlashMenuItem( + new BaseSlashMenuItem( 'Paragraph', (editor) => insertOrUpdateBlock(editor, { @@ -89,7 +89,7 @@ export const defaultSlashMenuItems = [ ['paragraph', 'p'], ), - // new BaseSlashMenuItem( + // new BaseSlashMenuItem( // 'Code', // (editor) => // insertOrUpdateBlock(editor, { @@ -99,7 +99,7 @@ export const defaultSlashMenuItems = [ // ['code'] // ), - // new BaseSlashMenuItem( + // new BaseSlashMenuItem( // 'Blockquote', // (editor) => // insertOrUpdateBlock(editor, { diff --git a/frontend/packages/app/src/blocknote-react/SlashMenu/defaultReactSlashMenuItems.tsx b/frontend/packages/app/src/blocknote-react/SlashMenu/defaultReactSlashMenuItems.tsx index 26952cf761..d6aa851e0c 100644 --- a/frontend/packages/app/src/blocknote-react/SlashMenu/defaultReactSlashMenuItems.tsx +++ b/frontend/packages/app/src/blocknote-react/SlashMenu/defaultReactSlashMenuItems.tsx @@ -3,7 +3,7 @@ import { DefaultBlockSchema, defaultSlashMenuItems, } from '@mintter/app/src/blocknote-core' -import {HDBlockSchema} from '@mintter/app/src/client/schema' +import {HMBlockSchema} from '@mintter/app/src/client/schema' import {MdPreview} from 'react-icons/md' import { RiChatQuoteLine, @@ -96,7 +96,7 @@ export const defaultReactSlashMenuItems = defaultSlashMenuItems if (!extraFields[item.name]) { return false } - return new ReactSlashMenuItem( + return new ReactSlashMenuItem( item.name, item.execute, item.aliases, diff --git a/frontend/packages/app/src/blocknote-react/hooks/useBlockNote.ts b/frontend/packages/app/src/blocknote-react/hooks/useBlockNote.ts index b3179b94fe..5fc71aafee 100644 --- a/frontend/packages/app/src/blocknote-react/hooks/useBlockNote.ts +++ b/frontend/packages/app/src/blocknote-react/hooks/useBlockNote.ts @@ -4,7 +4,7 @@ import { BlockSchema, DefaultBlockSchema, } from '@mintter/app/src/blocknote-core' -import {HDBlockSchema} from '@mintter/app/src/client/schema' +import {HMBlockSchema} from '@mintter/app/src/client/schema' import {DependencyList, FC, useEffect, useState} from 'react' import {blockNoteToMantineTheme} from '../BlockNoteTheme' import {createReactBlockSideMenuFactory} from '../BlockSideMenu/BlockSideMenuFactory' @@ -31,7 +31,7 @@ function useForceUpdate() { /** * Main hook for importing a BlockNote editor into a React project */ -export const useBlockNote = ( +export const useBlockNote = ( options: Partial< BlockNoteEditorOptions & { customElements: CustomElements diff --git a/frontend/packages/app/src/client/__tests__/editor-to-server.test.ts b/frontend/packages/app/src/client/__tests__/editor-to-server.test.ts index 2e2e379b18..81f6e0fbfa 100644 --- a/frontend/packages/app/src/client/__tests__/editor-to-server.test.ts +++ b/frontend/packages/app/src/client/__tests__/editor-to-server.test.ts @@ -102,7 +102,7 @@ describe('Editor to Server: ', () => { // test('single embed', () => { // const extracted = extractContent([ // {type: 'text', text: 'Hello', styles: {}}, - // {type: 'embed', ref: 'hd://foobar'}, + // {type: 'embed', ref: 'hm://foobar'}, // ]) // expect(extracted).toEqual({ @@ -110,7 +110,7 @@ describe('Editor to Server: ', () => { // annotations: [ // { // type: 'embed', - // ref: 'hd://foobar', + // ref: 'hm://foobar', // starts: [5], // ends: [6], // attributes: {}, @@ -162,7 +162,7 @@ describe('editorBlockToServerBlock', () => { children: [], content: [], props: { - ref: 'hd://foo', + ref: 'hm://foo', // why is this garbage required for embed props??: backgroundColor: 'default', textColor: 'default', @@ -174,7 +174,7 @@ describe('editorBlockToServerBlock', () => { id: 'abc', type: 'embed', attributes: {}, - ref: 'hd://foo', + ref: 'hm://foo', }), ) }) @@ -189,7 +189,7 @@ describe('editorBlockToServerBlock', () => { // children: [], // content: [], // props: { - // ref: 'hd://foobar', + // ref: 'hm://foobar', // // TODO: remove this garbage for image props // backgroundColor: 'default', // textColor: 'default', @@ -201,7 +201,7 @@ describe('editorBlockToServerBlock', () => { // id: 'abc', // type: 'embed', // attributes: {}, - // ref: 'hd://foobar', + // ref: 'hm://foobar', // }), // ) // }) diff --git a/frontend/packages/app/src/client/__tests__/server-to-editor.test.ts b/frontend/packages/app/src/client/__tests__/server-to-editor.test.ts index 3726a5d7f1..c644e5e0d6 100644 --- a/frontend/packages/app/src/client/__tests__/server-to-editor.test.ts +++ b/frontend/packages/app/src/client/__tests__/server-to-editor.test.ts @@ -291,7 +291,7 @@ describe('Editor: ', () => { const result = serverChildrenToEditorChildren([ new BlockNode({ block: new Block({ - id: 'a', + id: 'ab', type: 'image', text: 'new alt image', annotations: [], @@ -300,9 +300,10 @@ describe('Editor: ', () => { }), }), ]) + expect(result).toEqual([ { - id: 'a', + id: 'ab', type: 'image', props: { url: 'ABC', @@ -312,6 +313,7 @@ describe('Editor: ', () => { textAlignment: 'left', textColor: 'default', }, + children: [], content: [{type: 'text', text: 'new alt image', styles: {}}], }, ]) @@ -328,7 +330,7 @@ describe('Editor: ', () => { text: '', annotations: [], attributes: {}, - ref: 'hd://foobar', + ref: 'hm://foobar', }), }), ]) @@ -337,12 +339,13 @@ describe('Editor: ', () => { id: 'a', type: 'embed', props: { - ref: 'hd://foobar', + ref: 'hm://foobar', // junk: backgroundColor: 'default', textAlignment: 'left', textColor: 'default', }, + children: [], }, ]) }) @@ -356,14 +359,14 @@ describe('Editor: ', () => { // annotations: [ // { // type: 'embed', - // ref: 'hd://foobar', + // ref: 'hm://foobar', // starts: [0], // ends: [1], // }, // ], // }), // ) - // expect(result).toEqual([{type: 'embed', ref: 'hd://foobar'}]) + // expect(result).toEqual([{type: 'embed', ref: 'hm://foobar'}]) // }) // test('overlapping annotations + embed', () => { @@ -378,7 +381,7 @@ describe('Editor: ', () => { // }, // { // type: 'embed', - // ref: 'hd://foobar', + // ref: 'hm://foobar', // starts: [3], // ends: [4], // }, @@ -393,7 +396,7 @@ describe('Editor: ', () => { // expect(result).toEqual([ // {text: 'A', type: 'text', styles: {}}, // {text: 'BC', type: 'text', styles: {bold: true}}, - // {type: 'embed', ref: 'hd://foobar'}, + // {type: 'embed', ref: 'hm://foobar'}, // {text: 'DE', type: 'text', styles: {italic: true}}, // ]) // }) diff --git a/frontend/packages/app/src/client/editor-to-server.ts b/frontend/packages/app/src/client/editor-to-server.ts index 31e3d10d25..263c9297ec 100644 --- a/frontend/packages/app/src/client/editor-to-server.ts +++ b/frontend/packages/app/src/client/editor-to-server.ts @@ -9,7 +9,7 @@ import { InlineContent, Styles, } from '@mintter/app/src/blocknote-core' -import {hdBlockSchema} from './schema' +import {hmBlockSchema} from './schema' import {TextAnnotation} from '@mintter/shared' function styleMarkToAnnotationType( @@ -113,7 +113,7 @@ export function extractContent(content: InlineContent[]): { } export function editorBlockToServerBlock( - editorBlock: EditorBlock, + editorBlock: EditorBlock, ): ServerBlock { if (!editorBlock.id) throw new Error('this block has no id') @@ -195,7 +195,7 @@ export function editorBlockToServerBlock( function extractChildrenType( block: ServerBlock, - editorBlock: EditorBlock, + editorBlock: EditorBlock, ): ServerBlock { if (editorBlock.props.childrenType) { block.attributes.childrenType = editorBlock.props.childrenType diff --git a/frontend/packages/app/src/client/example-docs.ts b/frontend/packages/app/src/client/example-docs.ts index 212e6f87bb..ad097b9476 100644 --- a/frontend/packages/app/src/client/example-docs.ts +++ b/frontend/packages/app/src/client/example-docs.ts @@ -3,7 +3,7 @@ import { Block, BlockNode, Document, - HDBlockAttributes, + HMBlockAttributes, } from '@mintter/shared' function createAnnotation( @@ -47,7 +47,7 @@ function createSectionNode( type?: 'section' | 'paragraph' | 'heading' id: string annotations?: Annotation[] - attributes?: HDBlockAttributes + attributes?: HMBlockAttributes }, children?: BlockNode[], ) { diff --git a/frontend/packages/app/src/client/schema.ts b/frontend/packages/app/src/client/schema.ts index 21a98ec17c..812f5f5b71 100644 --- a/frontend/packages/app/src/client/schema.ts +++ b/frontend/packages/app/src/client/schema.ts @@ -8,7 +8,7 @@ import {FileBlock} from '@mintter/app/src/editor/file' import {ImageBlock} from '@mintter/app/src/editor/image' import {VideoBlock} from '@mintter/app/src/editor/video' -export const hdBlockSchema: BlockSchema = { +export const hmBlockSchema: BlockSchema = { paragraph: defaultBlockSchema.paragraph, heading: defaultBlockSchema.heading, image: ImageBlock, @@ -17,4 +17,4 @@ export const hdBlockSchema: BlockSchema = { file: FileBlock, } -export type HDBlockSchema = TypesMatch +export type HMBlockSchema = TypesMatch diff --git a/frontend/packages/app/src/client/server-to-editor.ts b/frontend/packages/app/src/client/server-to-editor.ts index fafe69954a..fb8d41630d 100644 --- a/frontend/packages/app/src/client/server-to-editor.ts +++ b/frontend/packages/app/src/client/server-to-editor.ts @@ -4,7 +4,7 @@ import { EditorChildrenType, ServerToEditorRecursiveOpts, } from '@mintter/shared' -import {hdBlockSchema} from './schema' +import {hmBlockSchema} from './schema' import { serverBlockNodeToEditorParagraph as _serverBlockNodeToEditorParagraph, serverBlockToHeading as _serverBlockToHeading, @@ -14,14 +14,14 @@ import { export function serverBlockNodeToEditorParagraph( serverBlock: BlockNode, opts: ServerToEditorRecursiveOpts, -): PartialBlock { +): PartialBlock { return _serverBlockNodeToEditorParagraph(serverBlock, opts) } export function serverBlockToHeading( serverBlock: BlockNode, opts?: ServerToEditorRecursiveOpts, -): PartialBlock { +): PartialBlock { return _serverBlockToHeading(serverBlock, opts) } @@ -30,6 +30,6 @@ export function serverChildrenToEditorChildren( opts?: ServerToEditorRecursiveOpts & { childrenType?: EditorChildrenType }, -): PartialBlock[] { +): PartialBlock[] { return _serverChildrenToEditorChildren(children, opts) } diff --git a/frontend/packages/app/src/components/rightside-block-widget.tsx b/frontend/packages/app/src/components/rightside-block-widget.tsx index 7cd758f285..efb424d4a5 100644 --- a/frontend/packages/app/src/components/rightside-block-widget.tsx +++ b/frontend/packages/app/src/components/rightside-block-widget.tsx @@ -6,7 +6,7 @@ import {EditorState, Plugin, PluginKey} from '@tiptap/pm/state' import {Decoration, DecorationSet, EditorView} from '@tiptap/pm/view' import {useMemo} from 'react' import {BlockNoteEditor} from '../blocknote-core' -import {HDBlockSchema} from '../client/schema' +import {HMBlockSchema} from '../client/schema' import appError from '../errors' import {useDocCitations} from '../models/content-graph' import {usePublication} from '../models/documents' @@ -20,7 +20,7 @@ export function createRightsideBlockWidgetExtension({ editor, }: { getWidget: WidgetDecorationFactory - editor: BlockNoteEditor + editor: BlockNoteEditor }) { return Extension.create({ name: 'rightside-block', @@ -44,7 +44,7 @@ export function createRightsideBlockWidgetPlugin({ ttEditor, }: { getWidget: WidgetDecorationFactory - editor: BlockNoteEditor + editor: BlockNoteEditor ttEditor: Editor }) { return new Plugin({ @@ -186,7 +186,7 @@ function useBlockCitation() { } class MouseMoveView { - editor: BlockNoteEditor + editor: BlockNoteEditor ttEditor: Editor hoveredBlock: HTMLElement | undefined @@ -195,7 +195,7 @@ class MouseMoveView { editor, ttEditor, }: { - editor: BlockNoteEditor + editor: BlockNoteEditor ttEditor: Editor }) { this.editor = editor diff --git a/frontend/packages/app/src/components/titlebar/common.tsx b/frontend/packages/app/src/components/titlebar/common.tsx index b108762d4c..44fc7d8b6a 100644 --- a/frontend/packages/app/src/components/titlebar/common.tsx +++ b/frontend/packages/app/src/components/titlebar/common.tsx @@ -140,7 +140,7 @@ export function GroupOptionsButton() { function getReferenceUrlOfRoute(route: NavRoute) { if (route.key === 'group') { - const url = getPublicEntityUrl(route.groupId) // we use this because group IDs are full URLs with hd://g/ prefix, so this more generic conversion is available. + const url = getPublicEntityUrl(route.groupId) // we use this because group IDs are full URLs with hm://g/ prefix, so this more generic conversion is available. if (!url) return null return { label: 'Group URL', @@ -148,7 +148,7 @@ function getReferenceUrlOfRoute(route: NavRoute) { } } if (route.key === 'publication') { - // docIds currently do not include this hd:// prefix so we use the specific doc url function + // docIds currently do not include this hm:// prefix so we use the specific doc url function const url = getPublicDocUrl(route.documentId, route.versionId) if (!url) return null return { diff --git a/frontend/packages/app/src/components/titlebar/publish-share.tsx b/frontend/packages/app/src/components/titlebar/publish-share.tsx index 53bd66b090..75895b2c5a 100644 --- a/frontend/packages/app/src/components/titlebar/publish-share.tsx +++ b/frontend/packages/app/src/components/titlebar/publish-share.tsx @@ -1,35 +1,23 @@ -import {Tooltip} from '@mintter/app/components/tooltip' -import {copyTextToClipboard} from '@mintter/app/copy-to-clipboard' import {useMyAccount} from '@mintter/app/models/accounts' +import {usePublication, usePublishDraft} from '@mintter/app/models/documents' import { - EditorDraftState, - useDraft, - usePublication, - usePublishDraft, - useWriteDraftWebUrl, -} from '@mintter/app/models/documents' -import { + useDocumentGroups, useGroup, useGroups, usePublishDocToGroup, } from '@mintter/app/models/groups' -import {useDocWebPublications, useSiteList} from '@mintter/app/models/sites' import {useDaemonReady} from '@mintter/app/node-status-context' import {usePopoverState} from '@mintter/app/use-popover-state' -import {getDocUrl} from '@mintter/shared' import { GroupPublicationRouteContext, NavContextProvider, - PublicationRouteContext, useNavRoute, useNavigate, useNavigation, } from '@mintter/app/utils/navigation' -import {hostnameStripProtocol} from '@mintter/app/utils/site-hostname' import { Button, Check, - Copy, Dialog, DialogProps, DialogTitle, @@ -42,15 +30,12 @@ import { Select, SizableText, Spinner, - Text, YStack, } from '@mintter/ui' -import {ChevronDown, ChevronUp, Folder, Upload, X} from '@tamagui/lucide-icons' -import {useEffect, useMemo, useRef, useState} from 'react' +import {ChevronDown, ChevronUp, Folder, X} from '@tamagui/lucide-icons' +import {useEffect, useMemo, useState} from 'react' import toast from 'react-hot-toast' -import {useAppDialog} from '../dialog' import DiscardDraftButton from './discard-draft-button' -import {usePublicationDialog} from './publication-dialog' // function DraftPublicationDialog({ // draft, @@ -191,7 +176,7 @@ function GroupPublishDialog({ -
+