Skip to content

Commit

Permalink
add ms cluster case
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan Leung <[email protected]>
  • Loading branch information
rleungx committed Oct 30, 2024
1 parent e257097 commit a47e809
Show file tree
Hide file tree
Showing 5 changed files with 129 additions and 21 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
"go.uber.org/zap"
)

type realClusterSuite struct {
type clusterSuite struct {
suite.Suite

clusterCnt int
Expand All @@ -42,7 +42,7 @@ var (
)

// SetupSuite will run before the tests in the suite are run.
func (s *realClusterSuite) SetupSuite() {
func (s *clusterSuite) SetupSuite() {
t := s.T()

// Clean the data dir. It is the default data dir of TiUP.
Expand All @@ -60,36 +60,36 @@ func (s *realClusterSuite) SetupSuite() {
}

// TearDownSuite will run after all the tests in the suite have been run.
func (s *realClusterSuite) TearDownSuite() {
func (s *clusterSuite) TearDownSuite() {
// Even if the cluster deployment fails, we still need to destroy the cluster.
// If the cluster does not fail to deploy, the cluster will be destroyed in
// the cleanup function. And these code will not work.
s.clusterCnt++
s.stopRealCluster(s.T())
}

func (s *realClusterSuite) startRealCluster(t *testing.T) {
func (s *clusterSuite) startRealCluster(t *testing.T) {
log.Info("start to deploy a real cluster")

tag := s.tag()
deployTiupPlayground(t, tag)
deployTiupPlayground(t, tag, false)
waitTiupReady(t, tag)
s.clusterCnt++
}

func (s *realClusterSuite) stopRealCluster(t *testing.T) {
func (s *clusterSuite) stopRealCluster(t *testing.T) {
s.clusterCnt--

log.Info("start to destroy a real cluster", zap.String("tag", s.tag()))
destroy(t, s.tag())
time.Sleep(5 * time.Second)
}

func (s *realClusterSuite) tag() string {
func (s *clusterSuite) tag() string {
return fmt.Sprintf("pd_real_cluster_test_%s_%d", s.suiteName, s.clusterCnt)
}

func (s *realClusterSuite) restart() {
func (s *clusterSuite) restart() {
tag := s.tag()
log.Info("start to restart", zap.String("tag", tag))
s.stopRealCluster(s.T())
Expand All @@ -111,7 +111,7 @@ func destroy(t *testing.T, tag string) {
log.Info("destroy success", zap.String("tag", tag))
}

func deployTiupPlayground(t *testing.T, tag string) {
func deployTiupPlayground(t *testing.T, tag string, ms bool) {
curPath, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir("../../.."))
Expand All @@ -130,15 +130,32 @@ func deployTiupPlayground(t *testing.T, tag string) {
if !fileExists(playgroundLogDir) {
require.NoError(t, os.MkdirAll(playgroundLogDir, 0755))
}

// nolint:errcheck
go func() {
runCommand("sh", "-c",
tiupBin+` playground nightly --kv 3 --tiflash 1 --db 1 --pd 3 \
--without-monitor --tag `+tag+` --pd.binpath ./bin/pd-server \
if ms {
runCommand("sh", "-c",
tiupBin+` playground nightly --pd.mode ms --kv 3 --tiflash 1 --db 1 --pd 3 --tso 1 --scheduling 1 \
--without-monitor --tag `+tag+` \
--pd.binpath ./bin/pd-server \
--kv.binpath ./third_bin/tikv-server \
--db.binpath ./third_bin/tidb-server \
--tiflash.binpath ./third_bin/tiflash \
--tso.binpath ./bin/pd-server \
--scheduling.binpath ./bin/pd-server \
--pd.config ./tests/integrations/realcluster/pd.toml \
> `+filepath.Join(playgroundLogDir, tag+".log")+` 2>&1 & `)
} else {
runCommand("sh", "-c",
tiupBin+` playground nightly --kv 3 --tiflash 1 --db 1 --pd 3 \
--without-monitor --tag `+tag+` \
--pd.binpath ./bin/pd-server \
--kv.binpath ./third_bin/tikv-server \
--db.binpath ./third_bin/tidb-server --tiflash.binpath ./third_bin/tiflash \
--db.binpath ./third_bin/tidb-server \
--tiflash.binpath ./third_bin/tiflash \
--pd.config ./tests/integrations/realcluster/pd.toml \
> `+filepath.Join(playgroundLogDir, tag+".log")+` 2>&1 & `)
}
}()

// Avoid to change the dir before execute `tiup playground`.
Expand All @@ -165,3 +182,58 @@ func waitTiupReady(t *testing.T, tag string) {
}
require.Failf(t, "TiUP is not ready", "tag: %s", tag)
}

type msClusterSuite struct {
suite.Suite

clusterCnt int
suiteName string
}

// SetupSuite will run before the tests in the suite are run.
func (s *msClusterSuite) SetupSuite() {
t := s.T()

// Clean the data dir. It is the default data dir of TiUP.
dataDir := filepath.Join(os.Getenv("HOME"), ".tiup", "data", "pd_ms_cluster_test_"+s.suiteName+"_*")
matches, err := filepath.Glob(dataDir)
require.NoError(t, err)

for _, match := range matches {
require.NoError(t, runCommand("rm", "-rf", match))
}
s.startCluster(t)
t.Cleanup(func() {
s.stopCluster(t)
})
}

// TearDownSuite will run after all the tests in the suite have been run.
func (s *msClusterSuite) TearDownSuite() {
// Even if the cluster deployment fails, we still need to destroy the cluster.
// If the cluster does not fail to deploy, the cluster will be destroyed in
// the cleanup function. And these code will not work.
s.clusterCnt++
s.stopCluster(s.T())
}

func (s *msClusterSuite) startCluster(t *testing.T) {
log.Info("start to deploy a ms cluster")

tag := s.tag()
deployTiupPlayground(t, tag, true)
waitTiupReady(t, tag)
s.clusterCnt++
}

func (s *msClusterSuite) stopCluster(t *testing.T) {
s.clusterCnt--

log.Info("start to destroy a ms cluster", zap.String("tag", s.tag()))
destroy(t, s.tag())
time.Sleep(5 * time.Second)
}

func (s *msClusterSuite) tag() string {
return fmt.Sprintf("pd_ms_cluster_test_%s_%d", s.suiteName, s.clusterCnt)
}
4 changes: 2 additions & 2 deletions tests/integrations/realcluster/cluster_id_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ import (
)

type clusterIDSuite struct {
realClusterSuite
clusterSuite
}

func TestClusterID(t *testing.T) {
suite.Run(t, &clusterIDSuite{
realClusterSuite: realClusterSuite{
clusterSuite: clusterSuite{
suiteName: "cluster_id",
},
})
Expand Down
4 changes: 2 additions & 2 deletions tests/integrations/realcluster/reboot_pd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ import (
)

type rebootPDSuite struct {
realClusterSuite
clusterSuite
}

func TestRebootPD(t *testing.T) {
suite.Run(t, &rebootPDSuite{
realClusterSuite: realClusterSuite{
clusterSuite: clusterSuite{
suiteName: "reboot_pd",
},
})
Expand Down
4 changes: 2 additions & 2 deletions tests/integrations/realcluster/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ import (
)

type schedulerSuite struct {
realClusterSuite
clusterSuite
}

func TestScheduler(t *testing.T) {
suite.Run(t, &schedulerSuite{
realClusterSuite: realClusterSuite{
clusterSuite: clusterSuite{
suiteName: "scheduler",
},
})
Expand Down
40 changes: 38 additions & 2 deletions tests/integrations/realcluster/ts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ import (
)

type tsSuite struct {
realClusterSuite
clusterSuite
}

func TestTS(t *testing.T) {
suite.Run(t, &tsSuite{
realClusterSuite: realClusterSuite{
clusterSuite: clusterSuite{
suiteName: "ts",
},
})
Expand Down Expand Up @@ -56,3 +56,39 @@ func (s *tsSuite) TestTS() {

db.MustClose()
}

type msTSSuite struct {
msClusterSuite
}

func TestMSTS(t *testing.T) {
suite.Run(t, &msTSSuite{
msClusterSuite: msClusterSuite{
suiteName: "ts",
},
})
}

func (s *msTSSuite) TestTS() {
re := require.New(s.T())

db := OpenTestDB(s.T())
db.MustExec("use test")
db.MustExec("drop table if exists t")
db.MustExec("create table t(a int, index i(a))")
db.MustExec("insert t values (1), (2), (3)")
var rows int
err := db.inner.Raw("select count(*) from t").Row().Scan(&rows)
re.NoError(err)
re.Equal(3, rows)

re.NoError(err)
re.Equal(3, rows)

var ts uint64
err = db.inner.Begin().Raw("select @@tidb_current_ts").Scan(&ts).Rollback().Error
re.NoError(err)
re.NotEqual(0, GetTimeFromTS(ts))

db.MustClose()
}

0 comments on commit a47e809

Please sign in to comment.