Skip to content

Commit

Permalink
fix(repair): schedule and run repair on single node cluster
Browse files Browse the repository at this point in the history
SM 3.2 changed repair param validation so that scheduling and running repair of a single node cluster is treated as an error. Although this is acceptable in terms of real users, the ability to schedule/run repair on a simple single node cluster is useful for tests and some experiments.

Fixes #3649
  • Loading branch information
Michal-Leszczynski authored and karol-kokoszka committed Dec 6, 2023
1 parent 9a84eb8 commit 69dbb76
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 14 deletions.
7 changes: 5 additions & 2 deletions pkg/scyllaclient/client_scylla.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,15 +470,18 @@ func ReplicaHash(replicaSet []string) uint64 {

// Repair invokes async repair and returns the repair command ID.
func (c *Client) Repair(ctx context.Context, keyspace, table, master string, replicaSet []string, ranges []TokenRange) (int32, error) {
hosts := strings.Join(replicaSet, ",")
dr := dumpRanges(ranges)
p := operations.StorageServiceRepairAsyncByKeyspacePostParams{
Context: forceHost(ctx, master),
Keyspace: keyspace,
ColumnFamilies: &table,
Hosts: &hosts,
Ranges: &dr,
}
// Single node cluster repair fails with hosts param
if len(replicaSet) > 1 {
hosts := strings.Join(replicaSet, ",")
p.Hosts = &hosts
}

resp, err := c.scyllaOps.StorageServiceRepairAsyncByKeyspacePost(&p)
if err != nil {
Expand Down
22 changes: 10 additions & 12 deletions pkg/service/repair/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,20 @@ type plan struct {
}

func newPlan(ctx context.Context, target Target, client *scyllaclient.Client) (*plan, error) {
filtered, err := filteredHosts(ctx, target, client)
status, err := client.Status(ctx)
if err != nil {
return nil, errors.Wrap(err, "filter hosts")
return nil, errors.Wrap(err, "get status")
}
filtered := filteredHosts(target, status)

p := new(plan)
for _, u := range target.Units {
ring, err := client.DescribeRing(ctx, u.Keyspace)
if err != nil {
return nil, errors.Wrapf(err, "keyspace %s: get ring description", u.Keyspace)
}
if ring.Replication == scyllaclient.LocalStrategy {
// Allow repairing single node cluster for better UX and tests
if ring.Replication == scyllaclient.LocalStrategy && len(status) > 1 {
continue
}

Expand All @@ -53,8 +55,9 @@ func newPlan(ctx context.Context, target Target, client *scyllaclient.Client) (*
Ranges: rep.Ranges,
}

// Don't add keyspace with some ranges not replicated in filtered hosts
if len(rtr.ReplicaSet) <= 1 {
// Don't add keyspace with some ranges not replicated in filtered hosts,
// unless it's a single node cluster.
if len(rtr.ReplicaSet) <= 1 && len(status) > 1 {
skip = true
break
}
Expand Down Expand Up @@ -447,12 +450,7 @@ func (tp tablePlan) MarkRange(repIdx int, r scyllaclient.TokenRange) bool {
}

// filteredHosts returns hosts passing '--dc' and '--ignore-down-hosts' criteria.
func filteredHosts(ctx context.Context, target Target, client *scyllaclient.Client) (*strset.Set, error) {
status, err := client.Status(ctx)
if err != nil {
return nil, errors.Wrap(err, "get status")
}

func filteredHosts(target Target, status scyllaclient.NodeStatusInfoSlice) *strset.Set {
ignoredHosts := strset.New(target.IgnoreHosts...)
dcs := strset.New(target.DC...)
filtered := strset.New()
Expand All @@ -463,7 +461,7 @@ func filteredHosts(ctx context.Context, target Target, client *scyllaclient.Clie
}
}

return filtered, nil
return filtered
}

// filterReplicaSet returns hosts present in filteredHosts and passing '--host' criteria.
Expand Down

0 comments on commit 69dbb76

Please sign in to comment.