Skip to content

Commit

Permalink
Change the names of the hosts getters in ringDescriber
Browse files Browse the repository at this point in the history
  • Loading branch information
sylwiaszunejko committed Dec 13, 2024
1 parent 2b12b53 commit b128b35
Show file tree
Hide file tree
Showing 12 changed files with 28 additions and 28 deletions.
4 changes: 2 additions & 2 deletions cassandra_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -919,7 +919,7 @@ func TestReconnection(t *testing.T) {
session := createSessionFromCluster(cluster, t)
defer session.Close()

h := session.hostSource.allHosts()[0]
h := session.hostSource.getHostsList()[0]
session.handleNodeDown(h.ConnectAddress(), h.Port())

if h.State() != NodeDown {
Expand Down Expand Up @@ -2273,7 +2273,7 @@ func TestTokenAwareConnPool(t *testing.T) {
session := createSessionFromCluster(cluster, t)
defer session.Close()

expectedPoolSize := cluster.NumConns * len(session.hostSource.allHosts())
expectedPoolSize := cluster.NumConns * len(session.hostSource.getHostsList())

// wait for pool to fill
for i := 0; i < 50; i++ {
Expand Down
2 changes: 1 addition & 1 deletion control.go
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ func (c *controlConn) reconnect() {
}

func (c *controlConn) attemptReconnect() (*Conn, error) {
hosts := c.session.hostSource.allHosts()
hosts := c.session.hostSource.getHostsList()
hosts = shuffleHosts(hosts)

// keep the old behavior of connecting to the old host first by moving it to
Expand Down
4 changes: 2 additions & 2 deletions control_ccm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ func TestControlConn_ReconnectRefreshesRing(t *testing.T) {
}()
assertNodeDown := func() error {
hosts := session.hostSource.currentHosts()
hosts := session.hostSource.getHostsMap()
if len(hosts) != 1 {
return fmt.Errorf("expected 1 host in ring but there were %v", len(hosts))
}
Expand Down Expand Up @@ -146,7 +146,7 @@ func TestControlConn_ReconnectRefreshesRing(t *testing.T) {
}
assertNodeUp := func() error {
hosts := session.hostSource.currentHosts()
hosts := session.hostSource.getHostsMap()
if len(hosts) != len(allCcmHosts) {
return fmt.Errorf("expected %v hosts in ring but there were %v", len(allCcmHosts), len(hosts))
}
Expand Down
2 changes: 1 addition & 1 deletion export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ var TestLogger = &testLogger{}
var WaitUntilPoolsStopFilling = waitUntilPoolsStopFilling

func GetRingAllHosts(sess *Session) []*HostInfo {
return sess.hostSource.allHosts()
return sess.hostSource.getHostsList()
}
4 changes: 2 additions & 2 deletions host_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -675,11 +675,11 @@ func (s *Session) refreshRingNow() error {
}

func (s *Session) refreshRing() error {
hosts, partitioner, err := s.hostSource.GetHosts()
hosts, partitioner, err := s.hostSource.GetHostsFromSystem()
if err != nil {
return err
}
prevHosts := s.hostSource.currentHosts()
prevHosts := s.hostSource.getHostsMap()

for _, h := range hosts {
if s.cfg.filterHost(h) {
Expand Down
4 changes: 2 additions & 2 deletions integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ func TestAuthentication(t *testing.T) {
session.Close()
}

func TestGetHosts(t *testing.T) {
func TestGetHostsFromSystem(t *testing.T) {
clusterHosts := getClusterHosts()
cluster := createCluster()
session := createSessionFromCluster(cluster, t)

hosts, partitioner, err := session.hostSource.GetHosts()
hosts, partitioner, err := session.hostSource.GetHostsFromSystem()

assertTrue(t, "err == nil", err == nil)
assertEqual(t, "len(hosts)", len(clusterHosts), len(hosts))
Expand Down
4 changes: 2 additions & 2 deletions policies.go
Original file line number Diff line number Diff line change
Expand Up @@ -982,7 +982,7 @@ func (d *dcAwareRR) IsOperational(session *Session) error {
return nil
}

hosts := session.hostSource.allHosts()
hosts := session.hostSource.getHostsList()
for _, host := range hosts {
if !session.cfg.filterHost(host) && host.DataCenter() == d.local {
// Policy can work properly only if there is at least one host from target DC
Expand Down Expand Up @@ -1100,7 +1100,7 @@ func (d *rackAwareRR) IsOperational(session *Session) error {
if session.cfg.disableInit || session.cfg.disableControlConn {
return nil
}
hosts := session.hostSource.allHosts()
hosts := session.hostSource.getHostsList()
for _, host := range hosts {
if !session.cfg.filterHost(host) && host.DataCenter() == d.localDC && host.Rack() == d.localRack {
// Policy can work properly only if there is at least one host from target DC+Rack
Expand Down
8 changes: 4 additions & 4 deletions ring_describer.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ func isZeroToken(host *HostInfo) bool {
return len(host.tokens) == 0
}

// GetHosts returns a list of hosts found via queries to system.local and system.peers
func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) {
// GetHostsFromSystem returns a list of hosts found via queries to system.local and system.peers
func (r *ringDescriber) GetHostsFromSystem() ([]*HostInfo, string, error) {
r.mu.Lock()
defer r.mu.Unlock()

Expand Down Expand Up @@ -204,7 +204,7 @@ func (r *ringDescriber) getHost(hostID string) *HostInfo {
return host
}

func (r *ringDescriber) allHosts() []*HostInfo {
func (r *ringDescriber) getHostsList() []*HostInfo {
r.mu.RLock()
hosts := make([]*HostInfo, 0, len(r.hosts))
for _, host := range r.hosts {
Expand All @@ -214,7 +214,7 @@ func (r *ringDescriber) allHosts() []*HostInfo {
return hosts
}

func (r *ringDescriber) currentHosts() map[string]*HostInfo {
func (r *ringDescriber) getHostsMap() map[string]*HostInfo {
r.mu.RLock()
hosts := make(map[string]*HostInfo, len(r.hosts))
for k, v := range r.hosts {
Expand Down
4 changes: 2 additions & 2 deletions ring_describer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -322,10 +322,10 @@ func marshalMetadataMust(metadata resultMetadata, data []interface{}) [][]byte {
return res
}

func TestGetHosts(t *testing.T) {
func TestGetHostsFromSystem(t *testing.T) {
r := &ringDescriber{control: &mockControlConn{}, cfg: &ClusterConfig{}}

hosts, _, err := r.GetHosts()
hosts, _, err := r.GetHostsFromSystem()
if err != nil {
t.Fatalf("unable to get hosts: %v", err)
}
Expand Down
10 changes: 5 additions & 5 deletions scylla_shard_aware_port_common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func testShardAwarePortNoReconnections(t *testing.T, makeCluster makeClusterTest
return
}

hosts := sess.hostSource.allHosts()
hosts := sess.hostSource.getHostsList()
for _, host := range hosts {
t.Logf("checking host %q hostID: %q", host.hostname, host.hostId)
hostPool, ok := sess.pool.getPool(host)
Expand Down Expand Up @@ -191,7 +191,7 @@ func testShardAwarePortUnusedIfNotEnabled(t *testing.T, makeCluster makeClusterT
t.Fatal(err)
}

hosts := sess.hostSource.allHosts()
hosts := sess.hostSource.getHostsList()
for _, host := range hosts {
t.Logf("checking host %s", host.hostname)
hostPool, _ := sess.pool.getPool(host)
Expand Down Expand Up @@ -237,7 +237,7 @@ func getShardAwareAddress(pool *hostConnPool, useTLS bool) string {
}

func triggerPoolsRefill(sess *Session) {
hosts := sess.hostSource.allHosts()
hosts := sess.hostSource.getHostsList()
for _, host := range hosts {
hostPool, _ := sess.pool.getPool(host)
go hostPool.fill_debounce()
Expand All @@ -263,7 +263,7 @@ func waitUntilPoolsStopFilling(ctx context.Context, sess *Session, timeout time.
}

func checkIfPoolsStoppedFilling(sess *Session) bool {
hosts := sess.hostSource.allHosts()
hosts := sess.hostSource.getHostsList()
for _, host := range hosts {
hostPool, _ := sess.pool.getPool(host)

Expand All @@ -280,7 +280,7 @@ func checkIfPoolsStoppedFilling(sess *Session) bool {
}

func checkIfPoolsAreFull(sess *Session) bool {
hosts := sess.hostSource.allHosts()
hosts := sess.hostSource.getHostsList()
for _, host := range hosts {
hostPool, _ := sess.pool.getPool(host)

Expand Down
8 changes: 4 additions & 4 deletions session.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ func (s *Session) init() error {

if !s.cfg.DisableInitialHostLookup {
var partitioner string
newHosts, partitioner, err := s.hostSource.GetHosts()
newHosts, partitioner, err := s.hostSource.GetHostsFromSystem()
if err != nil {
return err
}
Expand Down Expand Up @@ -371,7 +371,7 @@ func (s *Session) init() error {
newer, _ := checkSystemSchema(s.control)
s.useSystemSchema = newer
} else {
version := s.hostSource.allHosts()[0].Version()
version := s.hostSource.getHostsList()[0].Version()
s.useSystemSchema = version.AtLeast(3, 0, 0)
s.hasAggregatesAndFunctions = version.AtLeast(2, 2, 0)
}
Expand Down Expand Up @@ -414,7 +414,7 @@ func (s *Session) reconnectDownedHosts(intv time.Duration) {
for {
select {
case <-reconnectTicker.C:
hosts := s.hostSource.allHosts()
hosts := s.hostSource.getHostsList()

// Print session.hostSource for debug.
if gocqlDebug {
Expand Down Expand Up @@ -618,7 +618,7 @@ func (s *Session) TabletsMetadata() (TabletInfoList, error) {
}

func (s *Session) getConn() *Conn {
hosts := s.hostSource.allHosts()
hosts := s.hostSource.getHostsList()
for _, host := range hosts {
if !host.IsUp() {
continue
Expand Down
2 changes: 1 addition & 1 deletion tablet_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func TestTablets(t *testing.T) {
panic(fmt.Sprintf("unable to create table: %v", err))
}

hosts := session.hostSource.allHosts()
hosts := session.hostSource.getHostsList()

hostAddresses := []string{}
for _, host := range hosts {
Expand Down

0 comments on commit b128b35

Please sign in to comment.