diff --git a/auth/main_test.go b/auth/main_test.go
new file mode 100644
index 0000000000..c82624696e
--- /dev/null
+++ b/auth/main_test.go
@@ -0,0 +1,18 @@
+package auth
+
+import (
+ "os"
+ "testing"
+
+ "github.com/couchbase/sync_gateway/base"
+)
+
+func TestMain(m *testing.M) {
+ base.GTestBucketPool = base.NewTestBucketPool(base.FlushBucketEmptierFunc, base.NoopInitFunc)
+
+ status := m.Run()
+
+ base.GTestBucketPool.Close()
+
+ os.Exit(status)
+}
diff --git a/auth/oidc.go b/auth/oidc.go
index 3696e13e3c..c014e335fa 100644
--- a/auth/oidc.go
+++ b/auth/oidc.go
@@ -10,6 +10,7 @@
package auth
import (
+ "errors"
"fmt"
"net/http"
"net/url"
@@ -30,6 +31,12 @@ const (
var OIDCDiscoveryRetryWait = 500 * time.Millisecond
+// Request parameter to specify the OpenID Connect provider to be used for authentication,
+// from the list of providers defined in the Sync Gateway configuration.
+var OIDCAuthProvider = "provider"
+
+var ErrAddURLQueryParam = errors.New("URL, parameter name and value must not be empty")
+
// Options for OpenID Connect
type OIDCOptions struct {
Providers OIDCProviderMap `json:"providers,omitempty"` // List of OIDC issuers
@@ -57,7 +64,7 @@ type OIDCProvider struct {
type OIDCProviderMap map[string]*OIDCProvider
-type OIDCCallbackURLFunc func() string
+type OIDCCallbackURLFunc func(string, bool) string
func (opm OIDCProviderMap) GetDefaultProvider() *OIDCProvider {
for _, provider := range opm {
@@ -93,7 +100,7 @@ func (op *OIDCProvider) GetClient(buildCallbackURLFunc OIDCCallbackURLFunc) *oid
// If the redirect URL is not defined for the provider generate it from the
// handler request and set it on the provider
if op.CallbackURL == nil || *op.CallbackURL == "" {
- callbackURL := buildCallbackURLFunc()
+ callbackURL := buildCallbackURLFunc(op.Name, op.IsDefault)
if callbackURL != "" {
op.CallbackURL = &callbackURL
}
@@ -282,3 +289,20 @@ func OIDCToHTTPError(err error) error {
}
return err
}
+
+func AddURLQueryParam(strURL, name, value string) (string, error) {
+ if strURL == "" || name == "" || value == "" {
+ return "", ErrAddURLQueryParam
+ }
+ uri, err := url.Parse(strURL)
+ if err != nil {
+ return "", err
+ }
+ rawQuery, err := url.ParseQuery(uri.RawQuery)
+ if err != nil {
+ return "", err
+ }
+ rawQuery.Add(name, value)
+ uri.RawQuery = rawQuery.Encode()
+ return uri.String(), nil
+}
diff --git a/auth/oidc_test.go b/auth/oidc_test.go
index 1f368a9d31..036ed32dc2 100644
--- a/auth/oidc_test.go
+++ b/auth/oidc_test.go
@@ -10,7 +10,9 @@
package auth
import (
+ "errors"
"net/http"
+ "net/url"
"strconv"
"testing"
"time"
@@ -253,7 +255,7 @@ func TestOIDCProvider_InitOIDCClient(t *testing.T) {
}
if test.Provider != nil {
- client := test.Provider.GetClient(func() string { return "" })
+ client := test.Provider.GetClient(func(string, bool) string { return "" })
if test.ExpectOIDCClient {
assert.NotEqual(tt, (*oidc.Client)(nil), client)
} else {
@@ -378,3 +380,69 @@ func TestOIDCToHTTPError(t *testing.T) {
assert.Error(t, httpErr)
assert.Contains(t, httpErr.Error(), strconv.Itoa(http.StatusBadRequest))
}
+
+func TestAddURLQueryParam(t *testing.T) {
+ var oidcAuthProviderGoogle = "google"
+ tests := []struct {
+ name string
+ inputCallbackURL string
+ inputParamName string
+ inputParamValue string
+ wantCallbackURL string
+ wantError error
+ }{{
+ name: "Add provider to callback URL",
+ inputCallbackURL: "http://localhost:4984/default/_oidc_callback",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "http://localhost:4984/default/_oidc_callback?provider=google",
+ }, {
+ name: "Add provider to callback URL with ? character",
+ inputCallbackURL: "http://localhost:4984/default/_oidc_callback?",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "http://localhost:4984/default/_oidc_callback?provider=google",
+ }, {
+ name: "Add provider to empty callback URL",
+ inputCallbackURL: "",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "",
+ wantError: ErrAddURLQueryParam,
+ }, {
+ name: "Add empty provider value to callback URL",
+ inputCallbackURL: "http://localhost:4984/default/_oidc_callback",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: "",
+ wantCallbackURL: "",
+ wantError: ErrAddURLQueryParam,
+ }, {
+ name: "Add empty provider name to callback URL",
+ inputCallbackURL: "http://localhost:4984/default/_oidc_callback",
+ inputParamName: "",
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "",
+ wantError: ErrAddURLQueryParam,
+ }, {
+ name: "Add provider to callback URL with illegal value in query param",
+ inputCallbackURL: "http://localhost:4984/default/_oidc_callback?provider=%%3",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "",
+ wantError: url.EscapeError("%%3"),
+ }, {
+ name: "Add provider to callback URL with missing protocol scheme",
+ inputCallbackURL: "://localhost:4984/default/_oidc_callback",
+ inputParamName: OIDCAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantCallbackURL: "",
+ wantError: &url.Error{Op: "parse", URL: "://localhost:4984/default/_oidc_callback", Err: errors.New("missing protocol scheme")},
+ }}
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ callbackURL, err := AddURLQueryParam(test.inputCallbackURL, test.inputParamName, test.inputParamValue)
+ assert.Equal(t, test.wantError, err)
+ assert.Equal(t, test.wantCallbackURL, callbackURL)
+ })
+ }
+}
diff --git a/base/bucket_gocb.go b/base/bucket_gocb.go
index a042614695..40c133422f 100644
--- a/base/bucket_gocb.go
+++ b/base/bucket_gocb.go
@@ -72,8 +72,6 @@ type CouchbaseBucketGoCB struct {
// Creates a Bucket that talks to a real live Couchbase server.
func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err error) {
-
- // TODO: Push the above down into spec.GetConnString
connString, err := spec.GetGoCBConnString()
if err != nil {
Warnf("Unable to parse server value: %s error: %v", SD(spec.Server), err)
@@ -86,17 +84,17 @@ func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err e
return nil, err
}
- password := ""
+ bucketPassword := ""
// Check for client cert (x.509) authentication
if spec.Certpath != "" {
- Infof(KeyAuth, "Attempting cert authentication against bucket %s on %s", MD(spec.BucketName), MD(connString))
+ Infof(KeyAuth, "Attempting cert authentication against bucket %s on %s", MD(spec.BucketName), MD(spec.Server))
certAuthErr := cluster.Authenticate(gocb.CertAuthenticator{})
if certAuthErr != nil {
Infof(KeyAuth, "Error Attempting certificate authentication %s", certAuthErr)
return nil, pkgerrors.WithStack(certAuthErr)
}
} else if spec.Auth != nil {
- Infof(KeyAuth, "Attempting credential authentication against bucket %s on %s", MD(spec.BucketName), MD(connString))
+ Infof(KeyAuth, "Attempting credential authentication against bucket %s on %s", MD(spec.BucketName), MD(spec.Server))
user, pass, _ := spec.Auth.GetCredentials()
authErr := cluster.Authenticate(gocb.PasswordAuthenticator{
Username: user,
@@ -105,11 +103,15 @@ func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err e
// If RBAC authentication fails, revert to non-RBAC authentication by including the password to OpenBucket
if authErr != nil {
Warnf("RBAC authentication against bucket %s as user %s failed - will re-attempt w/ bucketname, password", MD(spec.BucketName), UD(user))
- password = pass
+ bucketPassword = pass
}
}
- goCBBucket, err := cluster.OpenBucket(spec.BucketName, password)
+ return GetCouchbaseBucketGoCBFromAuthenticatedCluster(cluster, spec, bucketPassword)
+}
+
+func GetCouchbaseBucketGoCBFromAuthenticatedCluster(cluster *gocb.Cluster, spec BucketSpec, bucketPassword string) (bucket *CouchbaseBucketGoCB, err error) {
+ goCBBucket, err := cluster.OpenBucket(spec.BucketName, bucketPassword)
if err != nil {
Infof(KeyAll, "Error opening bucket %s: %v", spec.BucketName, err)
return nil, pkgerrors.WithStack(err)
@@ -181,9 +183,7 @@ func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err e
bucket.Bucket.SetN1qlTimeout(bucket.spec.GetViewQueryTimeout())
Infof(KeyAll, "Set query timeouts for bucket %s to cluster:%v, bucket:%v", spec.BucketName, cluster.N1qlTimeout(), bucket.N1qlTimeout())
-
return bucket, err
-
}
func (bucket *CouchbaseBucketGoCB) GetBucketCredentials() (username, password string) {
@@ -1786,6 +1786,35 @@ func (bucket *CouchbaseBucketGoCB) Incr(k string, amt, def uint64, exp uint32) (
}
+func (bucket *CouchbaseBucketGoCB) GetDDocs(into interface{}) error {
+ bucketManager, err := bucket.getBucketManager()
+ if err != nil {
+ return err
+ }
+
+ ddocs, err := bucketManager.GetDesignDocuments()
+ if err != nil {
+ return err
+ }
+
+ result := make(map[string]*gocb.DesignDocument, len(ddocs))
+ for _, ddoc := range ddocs {
+ result[ddoc.Name] = ddoc
+ }
+
+ resultBytes, err := JSONMarshal(result)
+ if err != nil {
+ return err
+ }
+
+ // Deserialize []byte into "into" empty interface
+ if err := JSONUnmarshal(resultBytes, into); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (bucket *CouchbaseBucketGoCB) GetDDoc(docname string, into interface{}) error {
bucketManager, err := bucket.getBucketManager()
@@ -1882,7 +1911,18 @@ func (bucket *CouchbaseBucketGoCB) PutDDoc(docname string, value interface{}) er
return bucket.putDDocForTombstones(gocbDesignDoc)
}
- return manager.UpsertDesignDocument(gocbDesignDoc)
+ // Retry for all errors (The view service sporadically returns 500 status codes with Erlang errors (for unknown reasons) - E.g: 500 {"error":"case_clause","reason":"false"})
+ var worker RetryWorker = func() (bool, error, interface{}) {
+ err := manager.UpsertDesignDocument(gocbDesignDoc)
+ if err != nil {
+ Warnf("Got error from UpsertDesignDocument: %v - Retrying...", err)
+ return true, err, nil
+ }
+ return false, nil, nil
+ }
+
+ err, _ = RetryLoop("PutDDocRetryLoop", worker, CreateSleeperFunc(5, 100))
+ return err
}
@@ -2346,10 +2386,22 @@ func (bucket *CouchbaseBucketGoCB) Flush() error {
}
+// BucketItemCount first tries to retrieve an accurate bucket count via N1QL,
+// but falls back to the REST API if that cannot be done (when there's no index to count all items in a bucket)
+func (bucket *CouchbaseBucketGoCB) BucketItemCount() (itemCount int, err error) {
+ itemCount, err = bucket.QueryBucketItemCount()
+ if err == nil {
+ return itemCount, nil
+ }
+
+ itemCount, err = bucket.APIBucketItemCount()
+ return itemCount, err
+}
+
// Get the number of items in the bucket.
// GOCB doesn't currently offer a way to do this, and so this is a workaround to go directly
// to Couchbase Server REST API.
-func (bucket *CouchbaseBucketGoCB) BucketItemCount() (itemCount int, err error) {
+func (bucket *CouchbaseBucketGoCB) APIBucketItemCount() (itemCount int, err error) {
uri := fmt.Sprintf("/pools/default/buckets/%s", bucket.Name())
resp, err := bucket.mgmtRequest(http.MethodGet, uri, "application/json", nil)
if err != nil {
@@ -2379,6 +2431,22 @@ func (bucket *CouchbaseBucketGoCB) BucketItemCount() (itemCount int, err error)
return int(itemCountFloat), nil
}
+// QueryBucketItemCount uses a request plus query to get the number of items in a bucket, as the REST API can be slow to update its value.
+func (bucket *CouchbaseBucketGoCB) QueryBucketItemCount() (itemCount int, err error) {
+ r, err := bucket.Query("SELECT COUNT(1) AS count FROM `$_bucket`", nil, gocb.RequestPlus, true)
+ if err != nil {
+ return -1, err
+ }
+ var val struct {
+ Count int `json:"count"`
+ }
+ err = r.One(&val)
+ if err != nil {
+ return -1, err
+ }
+ return val.Count, nil
+}
+
func (bucket *CouchbaseBucketGoCB) getExpirySingleAttempt(k string) (expiry uint32, getMetaError error) {
bucket.singleOps <- struct{}{}
@@ -2653,7 +2721,7 @@ func AsGoCBBucket(bucket Bucket) (*CouchbaseBucketGoCB, bool) {
underlyingBucket = typedBucket.GetUnderlyingBucket()
case *LeakyBucket:
underlyingBucket = typedBucket.GetUnderlyingBucket()
- case TestBucket:
+ case *TestBucket:
underlyingBucket = typedBucket.Bucket
default:
// bail out for unrecognised/unsupported buckets
diff --git a/base/bucket_gocb_test.go b/base/bucket_gocb_test.go
index 9b874444e3..5230f50275 100644
--- a/base/bucket_gocb_test.go
+++ b/base/bucket_gocb_test.go
@@ -62,7 +62,7 @@ func TestSetGetRaw(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestSetGetRaw2"
+ key := getTestKeyNamespace(t)
val := []byte("bar")
_, _, err := bucket.GetRaw(key)
@@ -91,7 +91,7 @@ func TestAddRaw(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestAddRaw"
+ key := getTestKeyNamespace(t)
val := []byte("bar")
_, _, err := bucket.GetRaw(key)
@@ -135,19 +135,18 @@ func TestAddRawTimeoutRetry(t *testing.T) {
gocbBucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
if ok {
- gocbBucket.Bucket.SetOperationTimeout(100 * time.Millisecond)
+ gocbBucket.Bucket.SetOperationTimeout(250 * time.Millisecond)
}
- keyPrefix := "TestAddRawTimeout"
largeDoc := make([]byte, 1000000)
rand.Read(largeDoc)
var wg sync.WaitGroup
- for i := 0; i < 100; i++ {
+ for i := 0; i < 50; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
- key := fmt.Sprintf("%s_%d", keyPrefix, i)
+ key := fmt.Sprintf("%s_%d", getTestKeyNamespace(t), i)
added, err := bucket.AddRaw(key, 0, largeDoc)
if err != nil {
if pkgerrors.Cause(err) != gocb.ErrTimeout {
@@ -169,7 +168,6 @@ func TestBulkGetRaw(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- keyPrefix := "TestBulkGetRaw"
keySet := make([]string, 1000)
valueSet := make(map[string][]byte, 1000)
@@ -185,7 +183,7 @@ func TestBulkGetRaw(t *testing.T) {
for i := 0; i < 1000; i++ {
iStr := strconv.Itoa(i)
- key := keyPrefix + iStr
+ key := getTestKeyNamespace(t) + iStr
val := []byte("bar" + iStr)
keySet[i] = key
valueSet[key] = val
@@ -236,7 +234,7 @@ func TestWriteCasBasic(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestWriteCas"
+ key := getTestKeyNamespace(t)
val := []byte("bar2")
_, _, err := bucket.GetRaw(key)
@@ -278,7 +276,7 @@ func TestWriteCasAdvanced(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestWriteCas"
+ key := getTestKeyNamespace(t)
_, _, err := bucket.GetRaw(key)
if err == nil {
@@ -322,14 +320,14 @@ func TestSetBulk(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestSetBulk1"
- key2 := "TestSetBulk2"
- key3 := "TestSetBulk3"
+ key1 := getTestKeyNamespace(t) + "1"
+ key2 := getTestKeyNamespace(t) + "2"
+ key3 := getTestKeyNamespace(t) + "3"
var returnVal interface{}
// Cleanup
defer func() {
- keys2del := []string{key, key2, key3}
+ keys2del := []string{key1, key2, key3}
for _, key2del := range keys2del {
err := bucket.Delete(key2del)
if err != nil {
@@ -339,20 +337,20 @@ func TestSetBulk(t *testing.T) {
}
}()
- _, err := bucket.Get(key, &returnVal)
+ _, err := bucket.Get(key1, &returnVal)
if err == nil {
t.Errorf("Key should not exist yet, expected error but got nil")
}
// Write a single key, get cas val: casStale
casZero := uint64(0)
- casStale, err := bucket.WriteCas(key, 0, 0, casZero, "key-initial", sgbucket.Raw)
+ casStale, err := bucket.WriteCas(key1, 0, 0, casZero, "key-initial", sgbucket.Raw)
if err != nil {
t.Errorf("Error doing WriteCas: %v", err)
}
// Update that key so that casStale is now stale, get casFresh
- casUpdated, err := bucket.WriteCas(key, 0, 0, casStale, "key-updated", sgbucket.Raw)
+ casUpdated, err := bucket.WriteCas(key1, 0, 0, casStale, "key-updated", sgbucket.Raw)
if err != nil {
t.Errorf("Error doing WriteCas: %v", err)
}
@@ -360,7 +358,7 @@ func TestSetBulk(t *testing.T) {
// Do bulk set with a new key and the prev key with casStale
entries := []*sgbucket.BulkSetEntry{}
entries = append(entries, &sgbucket.BulkSetEntry{
- Key: key,
+ Key: key1,
Value: "key-updated2",
Cas: casStale,
})
@@ -385,7 +383,7 @@ func TestSetBulk(t *testing.T) {
// Retry with bulk set with another new key and casFresh key
entries = []*sgbucket.BulkSetEntry{}
entries = append(entries, &sgbucket.BulkSetEntry{
- Key: key,
+ Key: key1,
Value: "key-updated3",
Cas: casUpdated,
})
@@ -402,7 +400,7 @@ func TestSetBulk(t *testing.T) {
goassert.Equals(t, numNonNilErrors(entries), 0)
// Make sure the original key that previously failed now works
- _, err = bucket.Get(key, &returnVal)
+ _, err = bucket.Get(key1, &returnVal)
goassert.True(t, err == nil)
goassert.Equals(t, returnVal, "key-updated3")
@@ -424,7 +422,7 @@ func TestUpdate(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestUpdate"
+ key := getTestKeyNamespace(t)
valInitial := []byte("initial")
valUpdated := []byte("updated")
@@ -480,7 +478,7 @@ func TestIncrCounter(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestIncr"
+ key := getTestKeyNamespace(t)
defer func() {
err := bucket.Delete(key)
@@ -519,7 +517,7 @@ func TestGetAndTouchRaw(t *testing.T) {
// There's no easy way to validate the expiry time of a doc (that I know of)
// so this is just a smoke test
- key := "TestGetAndTouchRaw"
+ key := getTestKeyNamespace(t)
val := []byte("bar")
testBucket := GetTestBucket(t)
@@ -633,7 +631,7 @@ func TestXattrWriteCasSimple(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestWriteCasXATTRSimple"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["body_field"] = "1234"
@@ -701,7 +699,7 @@ func TestXattrWriteCasUpsert(t *testing.T) {
}
bucket.SetTranscoder(SGTranscoder{})
- key := "TestWriteCasXATTRUpsert"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["body_field"] = "1234"
@@ -767,7 +765,7 @@ func TestXattrWriteCasWithXattrCasCheck(t *testing.T) {
defer testBucket.Close()
bucket := testBucket.Bucket
- key := "TestWriteCasXATTRSimple"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["sg_field"] = "sg_value"
@@ -841,7 +839,7 @@ func TestXattrWriteCasRaw(t *testing.T) {
}
bucket.SetTranscoder(SGTranscoder{})
- key := "TestWriteCasXattrRaw"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["body_field"] = "1234"
@@ -894,7 +892,7 @@ func TestXattrWriteCasTombstoneResurrect(t *testing.T) {
}
bucket.SetTranscoder(SGTranscoder{})
- key := "TestWriteCasXattrTombstoneResurrect"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["body_field"] = "1234"
@@ -978,7 +976,7 @@ func TestXattrWriteCasTombstoneUpdate(t *testing.T) {
}
bucket.SetTranscoder(SGTranscoder{})
- key := "TestWriteCasXattrTombstoneXattrUpdate"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["body_field"] = "1234"
@@ -1063,7 +1061,7 @@ func TestXattrWriteUpdateXattr(t *testing.T) {
}
bucket.SetTranscoder(SGTranscoder{})
- key := "TestWriteUpdateXATTR"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
val := make(map[string]interface{})
val["counter"] = float64(1)
@@ -1187,7 +1185,7 @@ func TestXattrDeleteDocument(t *testing.T) {
xattrVal["seq"] = 123
xattrVal["rev"] = "1-1234"
- key := "TestDeleteDocumentHavingXATTR"
+ key := getTestKeyNamespace(t)
_, _, err := bucket.GetRaw(key)
if err == nil {
log.Printf("Key should not exist yet, expected error but got nil. Doing cleanup, assuming couchbase bucket testing")
@@ -1242,7 +1240,7 @@ func TestXattrDeleteDocumentUpdate(t *testing.T) {
xattrVal["seq"] = 1
xattrVal["rev"] = "1-1234"
- key := "TestDeleteDocumentHavingXATTR"
+ key := getTestKeyNamespace(t)
_, _, err := bucket.GetRaw(key)
if err == nil {
log.Printf("Key should not exist yet, expected error but got nil. Doing cleanup, assuming couchbase bucket testing")
@@ -1315,7 +1313,7 @@ func TestXattrDeleteDocumentAndUpdateXattr(t *testing.T) {
xattrVal["seq"] = 123
xattrVal["rev"] = "1-1234"
- key := "TestDeleteDocumentAndUpdateXATTR_2"
+ key := getTestKeyNamespace(t)
_, _, err := bucket.GetRaw(key)
if err == nil {
log.Printf("Key should not exist yet, expected error but got nil. Doing cleanup, assuming couchbase bucket testing")
@@ -1363,10 +1361,10 @@ func TestXattrTombstoneDocAndUpdateXattr(t *testing.T) {
return
}
- key1 := "DocExistsXattrExists"
- key2 := "DocExistsNoXattr"
- key3 := "XattrExistsNoDoc"
- key4 := "NoDocNoXattr"
+ key1 := getTestKeyNamespace(t) + "DocExistsXattrExists"
+ key2 := getTestKeyNamespace(t) + "DocExistsNoXattr"
+ key3 := getTestKeyNamespace(t) + "XattrExistsNoDoc"
+ key4 := getTestKeyNamespace(t) + "NoDocNoXattr"
// 1. Create document with XATTR
val := make(map[string]interface{})
@@ -1463,10 +1461,10 @@ func TestXattrDeleteDocAndXattr(t *testing.T) {
return
}
- key1 := "DocExistsXattrExists"
- key2 := "DocExistsNoXattr"
- key3 := "XattrExistsNoDoc"
- key4 := "NoDocNoXattr"
+ key1 := getTestKeyNamespace(t) + "DocExistsXattrExists"
+ key2 := getTestKeyNamespace(t) + "DocExistsNoXattr"
+ key3 := getTestKeyNamespace(t) + "XattrExistsNoDoc"
+ key4 := getTestKeyNamespace(t) + "NoDocNoXattr"
// 1. Create document with XATTR
val := make(map[string]interface{})
@@ -1513,12 +1511,6 @@ func TestXattrDeleteDocAndXattr(t *testing.T) {
}
// 4. No xattr, no document
- updatedVal := make(map[string]interface{})
- updatedVal["type"] = "updated"
-
- updatedXattrVal := make(map[string]interface{})
- updatedXattrVal["seq"] = 123
- updatedXattrVal["rev"] = "2-1234"
// Attempt to delete DocExistsXattrExists, DocExistsNoXattr, and XattrExistsNoDoc
// No errors should be returned when deleting these.
@@ -1533,9 +1525,9 @@ func TestXattrDeleteDocAndXattr(t *testing.T) {
// Now attempt to delete key4 (NoDocNoXattr), which is expected to return a Key Not Found error
log.Printf("Deleting key: %v", key4)
errDelete := bucket.DeleteWithXattr(key4, xattrName)
- assert.True(t, bucket.IsKeyNotFoundError(errDelete), "Exepcted keynotfound error")
+ assert.Error(t, errDelete, "Expected error when calling bucket.DeleteWithXattr")
+ assert.Truef(t, bucket.IsKeyNotFoundError(errDelete), "Exepcted keynotfound error but got %v", errDelete)
assert.True(t, verifyDocAndXattrDeleted(bucket, key4, xattrName), "Expected doc to be deleted")
-
}
// This simulates a race condition by calling deleteWithXattrInternal() and passing a custom
@@ -1553,7 +1545,7 @@ func TestDeleteWithXattrWithSimulatedRaceResurrect(t *testing.T) {
return
}
- key := "TestDeleteWithXattrWithSimulatedRace"
+ key := getTestKeyNamespace(t)
xattrName := SyncXattrName
createTombstonedDoc(bucket, key, xattrName)
@@ -1602,10 +1594,10 @@ func TestXattrRetrieveDocumentAndXattr(t *testing.T) {
return
}
- key1 := "DocExistsXattrExists"
- key2 := "DocExistsNoXattr"
- key3 := "XattrExistsNoDoc"
- key4 := "NoDocNoXattr"
+ key1 := getTestKeyNamespace(t) + "DocExistsXattrExists"
+ key2 := getTestKeyNamespace(t) + "DocExistsNoXattr"
+ key3 := getTestKeyNamespace(t) + "XattrExistsNoDoc"
+ key4 := getTestKeyNamespace(t) + "NoDocNoXattr"
// 1. Create document with XATTR
val := make(map[string]interface{})
@@ -1694,10 +1686,10 @@ func TestXattrMutateDocAndXattr(t *testing.T) {
return
}
- key1 := "DocExistsXattrExists"
- key2 := "DocExistsNoXattr"
- key3 := "XattrExistsNoDoc"
- key4 := "NoDocNoXattr"
+ key1 := getTestKeyNamespace(t) + "DocExistsXattrExists"
+ key2 := getTestKeyNamespace(t) + "DocExistsNoXattr"
+ key3 := getTestKeyNamespace(t) + "XattrExistsNoDoc"
+ key4 := getTestKeyNamespace(t) + "NoDocNoXattr"
// 1. Create document with XATTR
val := make(map[string]interface{})
@@ -1808,7 +1800,7 @@ func TestGetXattr(t *testing.T) {
}
//Doc 1
- key1 := "DocExistsXattrExists"
+ key1 := getTestKeyNamespace(t) + "DocExistsXattrExists"
val1 := make(map[string]interface{})
val1["type"] = key1
xattrName1 := "sync"
@@ -1817,7 +1809,7 @@ func TestGetXattr(t *testing.T) {
xattrVal1["rev"] = "1-foo"
//Doc 2 - Tombstone
- key2 := "TombstonedDocXattrExists"
+ key2 := getTestKeyNamespace(t) + "TombstonedDocXattrExists"
val2 := make(map[string]interface{})
val2["type"] = key2
xattrVal2 := make(map[string]interface{})
@@ -1825,7 +1817,7 @@ func TestGetXattr(t *testing.T) {
xattrVal2["rev"] = "1-foo"
//Doc 3 - To Delete
- key3 := "DeletedDocXattrExists"
+ key3 := getTestKeyNamespace(t) + "DeletedDocXattrExists"
val3 := make(map[string]interface{})
val3["type"] = key3
xattrName3 := "sync"
@@ -2101,10 +2093,20 @@ func TestCouchbaseServerIncorrectLogin(t *testing.T) {
t.Skip("This test only works against Couchbase Server")
}
- // Bad auth creds cause a fatal error with logs indicating the reason why.
- _, err := GetBucketWithInvalidUsernamePassword(DataBucket)
- goassert.Equals(t, err, ErrFatalBucketConnection)
+ testBucket := GetTestBucket(t)
+ defer testBucket.Close()
+
+ // Override test bucket spec with invalid creds
+ testBucket.BucketSpec.Auth = TestAuthenticator{
+ Username: "invalid_username",
+ Password: "invalid_password",
+ BucketName: testBucket.BucketSpec.BucketName,
+ }
+ // Attempt to open the bucket again using invalid creds. We should expect an error.
+ bucket, err := GetBucket(testBucket.BucketSpec)
+ assert.Equal(t, ErrFatalBucketConnection, err)
+ assert.Nil(t, bucket)
}
// TestCouchbaseServerIncorrectX509Login tries to open a bucket using an example X509 Cert/Key
@@ -2114,26 +2116,27 @@ func TestCouchbaseServerIncorrectX509Login(t *testing.T) {
t.Skip("This test only works against Couchbase Server")
}
- spec := GetTestBucketSpec(DataBucket)
+ testBucket := GetTestBucket(t)
+ defer testBucket.Close()
// Remove existing password-based authentication
- spec.Auth = nil
+ testBucket.BucketSpec.Auth = nil
// Force use of TLS so we are able to use X509
- if strings.HasPrefix(spec.Server, "http://") {
- spec.Server = "couchbases://" + spec.Server[7:]
- } else if strings.HasPrefix(spec.Server, "couchbase://") {
- spec.Server = "couchbases://" + spec.Server[12:]
+ if strings.HasPrefix(testBucket.BucketSpec.Server, "http://") {
+ testBucket.BucketSpec.Server = "couchbases://" + testBucket.BucketSpec.Server[7:]
+ } else if strings.HasPrefix(testBucket.BucketSpec.Server, "couchbase://") {
+ testBucket.BucketSpec.Server = "couchbases://" + testBucket.BucketSpec.Server[12:]
}
- spec.Server = strings.TrimSuffix(spec.Server, ":8091")
+ testBucket.BucketSpec.Server = strings.TrimSuffix(testBucket.BucketSpec.Server, ":8091")
// Set CertPath/KeyPath for X509 auth
certPath, keyPath, x509CleanupFn := tempX509Certs(t)
- spec.Certpath = certPath
- spec.Keypath = keyPath
+ testBucket.BucketSpec.Certpath = certPath
+ testBucket.BucketSpec.Keypath = keyPath
// Attempt to open a test bucket with invalid certs
- bucket, err := GetBucket(spec)
+ bucket, err := GetBucket(testBucket.BucketSpec)
// We no longer need the cert files, so go ahead and clean those up now before any assertions stop the test.
x509CleanupFn()
diff --git a/base/bucket_n1ql.go b/base/bucket_n1ql.go
index ba2e0f559f..865c6659c2 100644
--- a/base/bucket_n1ql.go
+++ b/base/bucket_n1ql.go
@@ -14,6 +14,7 @@ const MaxQueryRetries = 30 // Maximum query retries on indexer error
const IndexStateOnline = "online" // bucket state value, as returned by SELECT FROM system:indexes. Index has been created and built.
const IndexStateDeferred = "deferred" // bucket state value, as returned by SELECT FROM system:indexes. Index has been created but not built.
const IndexStatePending = "pending" // bucket state value, as returned by SELECT FROM system:indexes. Index has been created, build is in progress
+const PrimaryIndexName = "#primary"
var SlowQueryWarningThreshold time.Duration
diff --git a/base/bucket_n1ql_test.go b/base/bucket_n1ql_test.go
index 2cfed6a5c3..98e519ed35 100644
--- a/base/bucket_n1ql_test.go
+++ b/base/bucket_n1ql_test.go
@@ -20,8 +20,8 @@ func TestN1qlQuery(t *testing.T) {
// Disabled due to CBG-755:
t.Skip("WARNING: TEST DISABLED - the testIndex_value creation is causing issues with CB 6.5.0")
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
testBucket := GetTestBucket(t)
@@ -128,8 +128,8 @@ func TestN1qlFilterExpression(t *testing.T) {
// Disabled due to CBG-755:
t.Skip("WARNING: TEST DISABLED - the testIndex_value creation is causing issues with CB 6.5.0")
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
testBucket := GetTestBucket(t)
@@ -205,8 +205,8 @@ func TestIndexMeta(t *testing.T) {
// Disabled due to CBG-755:
t.Skip("WARNING: TEST DISABLED - the testIndex_value creation is causing issues with CB 6.5.0")
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
testBucket := GetTestBucket(t)
@@ -248,9 +248,10 @@ func TestIndexMeta(t *testing.T) {
// Ensure that n1ql query errors are handled and returned (and don't result in panic etc)
func TestMalformedN1qlQuery(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
bucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
@@ -314,9 +315,10 @@ func TestMalformedN1qlQuery(t *testing.T) {
}
func TestCreateAndDropIndex(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
bucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
@@ -341,9 +343,10 @@ func TestCreateAndDropIndex(t *testing.T) {
}
func TestCreateDuplicateIndex(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
bucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
@@ -372,9 +375,10 @@ func TestCreateDuplicateIndex(t *testing.T) {
}
func TestCreateAndDropIndexSpecialCharacters(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
bucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
@@ -399,9 +403,10 @@ func TestCreateAndDropIndexSpecialCharacters(t *testing.T) {
}
func TestDeferredCreateIndex(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
@@ -441,9 +446,10 @@ func TestDeferredCreateIndex(t *testing.T) {
}
func TestBuildDeferredIndexes(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
@@ -506,10 +512,10 @@ func TestBuildDeferredIndexes(t *testing.T) {
}
func TestCreateAndDropIndexErrors(t *testing.T) {
-
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
+
testBucket := GetTestBucket(t)
defer testBucket.Close()
bucket, ok := testBucket.Bucket.(*CouchbaseBucketGoCB)
@@ -584,8 +590,9 @@ func tearDownTestIndex(bucket *CouchbaseBucketGoCB, indexName string) (err error
}
func TestWaitForBucketExistence(t *testing.T) {
- if UnitTestUrlIsWalrus() {
- t.Skip("This test only works against Couchbase Server")
+
+ if TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
testBucket := GetTestBucket(t)
diff --git a/base/constants.go b/base/constants.go
index cb5da7d3ec..f6d9fdc6e7 100644
--- a/base/constants.go
+++ b/base/constants.go
@@ -47,6 +47,9 @@ const (
// Should the tests drop the GSI indexes?
TestEnvSyncGatewayDropIndexes = "SG_TEST_DROP_INDEXES"
+ // Should the tests disable the use of any GSI-related code?
+ TestEnvSyncGatewayDisableGSI = "SG_TEST_DISABLE_GSI"
+
// Don't use an auth handler by default, but provide a way to override
TestEnvSyncGatewayUseAuthHandler = "SG_TEST_USE_AUTH_HANDLER"
@@ -140,6 +143,7 @@ var (
DefaultWarnThresholdGrantsPerDoc = uint32(50)
)
+// UnitTestUrl returns the configured test URL.
func UnitTestUrl() string {
if TestUseCouchbaseServer() {
testCouchbaseServerUrl := os.Getenv(TestEnvCouchbaseServerUrl)
@@ -154,6 +158,7 @@ func UnitTestUrl() string {
}
}
+// UnitTestUrlIsWalrus returns true if we're running with a Walrus test URL.
func UnitTestUrlIsWalrus() bool {
unitTestUrl := UnitTestUrl()
return strings.Contains(unitTestUrl, kTestWalrusURL)
diff --git a/base/leaky_bucket.go b/base/leaky_bucket.go
index 9a5e391753..956af04b9d 100644
--- a/base/leaky_bucket.go
+++ b/base/leaky_bucket.go
@@ -55,7 +55,7 @@ type LeakyBucketConfig struct {
IncrCallback func()
}
-func NewLeakyBucket(bucket Bucket, config LeakyBucketConfig) Bucket {
+func NewLeakyBucket(bucket Bucket, config LeakyBucketConfig) *LeakyBucket {
return &LeakyBucket{
bucket: bucket,
config: config,
@@ -154,6 +154,9 @@ func (b *LeakyBucket) Incr(k string, amt, def uint64, exp uint32) (uint64, error
return val, err
}
+func (b *LeakyBucket) GetDDocs(value interface{}) error {
+ return b.bucket.GetDDocs(value)
+}
func (b *LeakyBucket) GetDDoc(docname string, value interface{}) error {
if b.config.DDocGetErrorCount > 0 {
b.config.DDocGetErrorCount--
diff --git a/base/log_keys.go b/base/log_keys.go
index 1b360ba020..2d223ae724 100644
--- a/base/log_keys.go
+++ b/base/log_keys.go
@@ -41,6 +41,7 @@ const (
KeySyncMsg
KeyWebSocket
KeyWebSocketFrame
+ KeySGTest
LogKeyCount // Count for logKeyNames init
)
@@ -70,6 +71,7 @@ var (
KeySyncMsg: "SyncMsg",
KeyWebSocket: "WS",
KeyWebSocketFrame: "WSFrame",
+ KeySGTest: "TEST",
}
logKeyNamesInverse = inverselogKeyNames(logKeyNames)
)
diff --git a/base/logging_bucket.go b/base/logging_bucket.go
index 45dfdb8d00..00187c7f22 100644
--- a/base/logging_bucket.go
+++ b/base/logging_bucket.go
@@ -1,7 +1,9 @@
package base
import (
+ "context"
"expvar"
+ "sync"
"time"
sgbucket "github.com/couchbase/sg-bucket"
@@ -9,222 +11,207 @@ import (
// A wrapper around a Bucket that transparently adds logging of all the API calls.
type LoggingBucket struct {
- bucket Bucket
+ bucket Bucket
+ logCtx context.Context
+ logCtxOnce sync.Once
+}
+
+func (b *LoggingBucket) ctx() context.Context {
+ b.logCtxOnce.Do(func() {
+ b.logCtx = bucketCtx(context.Background(), b)
+ })
+ return b.logCtx
+}
+
+func (b *LoggingBucket) log(start time.Time, args ...interface{}) {
+ caller := GetCallersName(1, false)
+ TracefCtx(b.ctx(), KeyBucket, "%s(%v) [%v]", caller, UD(args), time.Since(start))
}
func (b *LoggingBucket) GetName() string {
- //Tracef(KeyBucket, "GetName()")
+ // b.log() depends on this, so don't log here otherwise we'd stack overflow
return b.bucket.GetName()
}
func (b *LoggingBucket) Get(k string, rv interface{}) (uint64, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Get(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k)
return b.bucket.Get(k, rv)
}
func (b *LoggingBucket) GetRaw(k string) (v []byte, cas uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetRaw(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k)
return b.bucket.GetRaw(k)
}
func (b *LoggingBucket) GetAndTouchRaw(k string, exp uint32) (v []byte, cas uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetAndTouchRaw(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.GetAndTouchRaw(k, exp)
}
func (b *LoggingBucket) Touch(k string, exp uint32) (cas uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Touch(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.Touch(k, exp)
}
func (b *LoggingBucket) GetBulkRaw(keys []string) (map[string][]byte, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetBulkRaw(%q) [%v]", UD(keys), time.Since(start)) }()
+ defer b.log(time.Now(), keys)
return b.bucket.GetBulkRaw(keys)
}
func (b *LoggingBucket) Add(k string, exp uint32, v interface{}) (added bool, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Add(%q, %d, ...) [%v]", UD(k), exp, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.Add(k, exp, v)
}
func (b *LoggingBucket) AddRaw(k string, exp uint32, v []byte) (added bool, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "AddRaw(%q, %d, ...) [%v]", UD(k), exp, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.AddRaw(k, exp, v)
}
func (b *LoggingBucket) Append(k string, data []byte) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Append(%q, ...) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k)
return b.bucket.Append(k, data)
}
func (b *LoggingBucket) Set(k string, exp uint32, v interface{}) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Set(%q, %d, ...) [%v]", UD(k), exp, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.Set(k, exp, v)
}
func (b *LoggingBucket) SetRaw(k string, exp uint32, v []byte) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "SetRaw(%q, %d, ...) [%v]", UD(k), exp, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.SetRaw(k, exp, v)
}
func (b *LoggingBucket) Delete(k string) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Delete(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k)
return b.bucket.Delete(k)
}
func (b *LoggingBucket) Remove(k string, cas uint64) (casOut uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Remove(%q) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, cas)
return b.bucket.Remove(k, cas)
}
func (b *LoggingBucket) Write(k string, flags int, exp uint32, v interface{}, opt sgbucket.WriteOptions) error {
- start := time.Now()
- defer func() {
- Tracef(KeyBucket, "Write(%q, 0x%x, %d, ..., 0x%x) [%v]", UD(k), flags, exp, opt, time.Since(start))
- }()
+ defer b.log(time.Now(), k, flags, exp, opt)
return b.bucket.Write(k, flags, exp, v, opt)
}
func (b *LoggingBucket) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error) {
- start := time.Now()
- defer func() {
- Tracef(KeyBucket, "WriteCas(%q, 0x%x, %d, %d, ..., 0x%x) [%v]", UD(k), flags, exp, cas, opt, time.Since(start))
- }()
+ defer b.log(time.Now(), k, flags, exp, cas, opt)
return b.bucket.WriteCas(k, flags, exp, cas, v, opt)
}
func (b *LoggingBucket) Update(k string, exp uint32, callback sgbucket.UpdateFunc) (casOut uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Update(%q, %d, ...) --> %v [%v]", UD(k), exp, err, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.Update(k, exp, callback)
}
func (b *LoggingBucket) WriteUpdate(k string, exp uint32, callback sgbucket.WriteUpdateFunc) (casOut uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "WriteUpdate(%q, %d, ...) --> %v [%v]", UD(k), exp, err, time.Since(start)) }()
+ defer b.log(time.Now(), k, exp)
return b.bucket.WriteUpdate(k, exp, callback)
}
func (b *LoggingBucket) Incr(k string, amt, def uint64, exp uint32) (uint64, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Incr(%q, %d, %d, %d) [%v]", UD(k), amt, def, exp, time.Since(start)) }()
+ defer b.log(time.Now(), k, amt, def, exp)
return b.bucket.Incr(k, amt, def, exp)
}
func (b *LoggingBucket) WriteCasWithXattr(k string, xattr string, exp uint32, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "WriteCasWithXattr(%q, ...) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, xattr, exp, cas)
return b.bucket.WriteCasWithXattr(k, xattr, exp, cas, v, xv)
}
func (b *LoggingBucket) WriteUpdateWithXattr(k string, xattr string, exp uint32, previous *sgbucket.BucketDocument, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error) {
- start := time.Now()
- defer func() {
- Tracef(KeyBucket, "WriteUpdateWithXattr(%q, %d, ...) --> %v [%v]", UD(k), exp, err, time.Since(start))
- }()
+ defer b.log(time.Now(), k, xattr, exp)
return b.bucket.WriteUpdateWithXattr(k, xattr, exp, previous, callback)
}
func (b *LoggingBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetWithXattr(%q, ...) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, xattr)
return b.bucket.GetWithXattr(k, xattr, rv, xv)
}
func (b *LoggingBucket) DeleteWithXattr(k string, xattr string) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "DeleteWithXattr(%q, ...) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, xattr)
return b.bucket.DeleteWithXattr(k, xattr)
}
func (b *LoggingBucket) GetXattr(k string, xattr string, xv interface{}) (cas uint64, err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetXattr(%q, ...) [%v]", UD(k), time.Since(start)) }()
+ defer b.log(time.Now(), k, xattr)
return b.bucket.GetXattr(k, xattr, xv)
}
+func (b *LoggingBucket) GetDDocs(value interface{}) error {
+ defer b.log(time.Now())
+ return b.bucket.GetDDocs(value)
+}
func (b *LoggingBucket) GetDDoc(docname string, value interface{}) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "GetDDoc(%q, ...) [%v]", UD(docname), time.Since(start)) }()
+ defer b.log(time.Now(), docname)
return b.bucket.GetDDoc(docname, value)
}
func (b *LoggingBucket) PutDDoc(docname string, value interface{}) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "PutDDoc(%q, ...) [%v]", UD(docname), time.Since(start)) }()
+ defer b.log(time.Now(), docname)
return b.bucket.PutDDoc(docname, value)
}
func (b *LoggingBucket) DeleteDDoc(docname string) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "DeleteDDoc(%q, ...) [%v]", UD(docname), time.Since(start)) }()
+ defer b.log(time.Now(), docname)
return b.bucket.DeleteDDoc(docname)
}
func (b *LoggingBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "View(%q, %q, ...) [%v]", MD(ddoc), UD(name), time.Since(start)) }()
+ defer b.log(time.Now(), ddoc, name)
return b.bucket.View(ddoc, name, params)
}
func (b *LoggingBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "ViewCustom(%q, %q, ...) [%v]", MD(ddoc), UD(name), time.Since(start)) }()
+ defer b.log(time.Now(), ddoc, name)
return b.bucket.ViewCustom(ddoc, name, params, vres)
}
func (b *LoggingBucket) ViewQuery(ddoc, name string, params map[string]interface{}) (sgbucket.QueryResultIterator, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "ViewQuery(%q, %q, ...) [%v]", MD(ddoc), UD(name), time.Since(start)) }()
+ defer b.log(time.Now(), ddoc, name)
return b.bucket.ViewQuery(ddoc, name, params)
}
func (b *LoggingBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "SetBulk(%q, ...) --> %v [%v]", UD(entries), err, time.Since(start)) }()
+ defer b.log(time.Now(), entries)
return b.bucket.SetBulk(entries)
}
func (b *LoggingBucket) Refresh() error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Refresh() [%v]", time.Since(start)) }()
+ defer b.log(time.Now())
return b.bucket.Refresh()
}
func (b *LoggingBucket) StartTapFeed(args sgbucket.FeedArguments, dbStats *expvar.Map) (sgbucket.MutationFeed, error) {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "StartTapFeed(...) [%v]", time.Since(start)) }()
+ defer b.log(time.Now())
return b.bucket.StartTapFeed(args, dbStats)
}
func (b *LoggingBucket) StartDCPFeed(args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "StartDcpFeed(...) [%v]", time.Since(start)) }()
+ defer b.log(time.Now())
return b.bucket.StartDCPFeed(args, callback, dbStats)
}
func (b *LoggingBucket) Close() {
- start := time.Now()
- defer func() { Tracef(KeyBucket, "Close() [%v]", time.Since(start)) }()
+ defer b.log(time.Now())
b.bucket.Close()
}
func (b *LoggingBucket) Dump() {
- Tracef(KeyBucket, "Dump()")
+ defer b.log(time.Now())
b.bucket.Dump()
}
func (b *LoggingBucket) VBHash(docID string) uint32 {
- Tracef(KeyBucket, "VBHash()")
+ defer b.log(time.Now())
return b.bucket.VBHash(docID)
}
func (b *LoggingBucket) GetMaxVbno() (uint16, error) {
+ defer b.log(time.Now())
return b.bucket.GetMaxVbno()
}
func (b *LoggingBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string) {
+ defer b.log(time.Now())
return b.bucket.CouchbaseServerVersion()
}
func (b *LoggingBucket) UUID() (string, error) {
+ defer b.log(time.Now())
return b.bucket.UUID()
}
func (b *LoggingBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error) {
+ defer b.log(time.Now())
return b.bucket.GetStatsVbSeqno(maxVbno, useAbsHighSeqNo)
}
// GetUnderlyingBucket returns the underlying bucket for the LoggingBucket.
func (b *LoggingBucket) GetUnderlyingBucket() Bucket {
+ defer b.log(time.Now())
return b.bucket
}
func (b *LoggingBucket) IsSupported(feature sgbucket.BucketFeature) bool {
+ defer b.log(time.Now())
return b.bucket.IsSupported(feature)
}
diff --git a/base/logging_context.go b/base/logging_context.go
index ff5bdb86cf..d03ac830f9 100644
--- a/base/logging_context.go
+++ b/base/logging_context.go
@@ -1,8 +1,10 @@
package base
import (
+ "context"
"math/rand"
"strconv"
+ "testing"
)
// LogContextKey is used to key a LogContext value
@@ -13,6 +15,12 @@ type LogContext struct {
// CorrelationID is a pre-formatted identifier used to correlate logs.
// E.g: Either blip context ID or HTTP Serial number.
CorrelationID string
+
+ // TestName can be a unit test name (from t.Name())
+ TestName string
+
+ // TestBucketName is the name of a bucket used during a test
+ TestBucketName string
}
// addContext returns a string format with additional log context if present.
@@ -25,6 +33,14 @@ func (lc *LogContext) addContext(format string) string {
format = "c:" + lc.CorrelationID + " " + format
}
+ if lc.TestBucketName != "" {
+ format = "b:" + lc.TestBucketName + " " + format
+ }
+
+ if lc.TestName != "" {
+ format = "t:" + lc.TestName + " " + format
+ }
+
return format
}
@@ -35,3 +51,23 @@ func FormatBlipContextID(contextID string) string {
func NewTaskID(contextID string, taskName string) string {
return contextID + "-" + taskName + "-" + strconv.Itoa(rand.Intn(65536))
}
+
+// testCtx creates a log context for the given test.
+func testCtx(t testing.TB) context.Context {
+ return context.WithValue(context.Background(), LogContextKey{}, LogContext{TestName: t.Name()})
+}
+
+// bucketCtx extends the parent context with a bucket name.
+func bucketCtx(parent context.Context, b Bucket) context.Context {
+ return bucketNameCtx(parent, b.GetName())
+}
+
+// bucketNameCtx extends the parent context with a bucket name.
+func bucketNameCtx(parent context.Context, bucketName string) context.Context {
+ parentLogCtx, _ := parent.Value(LogContextKey{}).(LogContext)
+ newCtx := LogContext{
+ TestName: parentLogCtx.TestName,
+ TestBucketName: bucketName,
+ }
+ return context.WithValue(parent, LogContextKey{}, newCtx)
+}
diff --git a/base/logging_test.go b/base/logging_test.go
index b0fb78a572..f9ed551502 100644
--- a/base/logging_test.go
+++ b/base/logging_test.go
@@ -338,3 +338,60 @@ func CaptureConsolefLogOutput(f func()) string {
consoleFOutput = os.Stderr
return buf.String()
}
+
+func BenchmarkGetCallersName(b *testing.B) {
+ tests := []struct {
+ depth int
+ includeLine bool
+ }{
+ {
+ depth: 1,
+ includeLine: false,
+ },
+ {
+ depth: 2,
+ includeLine: false,
+ },
+ {
+ depth: 3,
+ includeLine: false,
+ },
+ {
+ // depth of 4 exceeds the call stack size for this benchnark
+ // this should actually exit-early and be faster than the above
+ depth: 4,
+ includeLine: false,
+ },
+ {
+ depth: 100,
+ includeLine: false,
+ },
+ {
+ depth: 1,
+ includeLine: true,
+ },
+ {
+ depth: 2,
+ includeLine: true,
+ },
+ {
+ depth: 3,
+ includeLine: true,
+ },
+ {
+ depth: 4,
+ includeLine: true,
+ },
+ {
+ depth: 100,
+ includeLine: true,
+ },
+ }
+ for _, tt := range tests {
+ b.Run(fmt.Sprintf("%v-%v", tt.depth, tt.includeLine), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ GetCallersName(tt.depth, tt.includeLine)
+ }
+ })
+ }
+}
diff --git a/base/main_test.go b/base/main_test.go
new file mode 100644
index 0000000000..fdd36df61a
--- /dev/null
+++ b/base/main_test.go
@@ -0,0 +1,16 @@
+package base
+
+import (
+ "os"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ GTestBucketPool = NewTestBucketPool(FlushBucketEmptierFunc, NoopInitFunc)
+
+ status := m.Run()
+
+ GTestBucketPool.Close()
+
+ os.Exit(status)
+}
diff --git a/base/main_test_bucket_pool.go b/base/main_test_bucket_pool.go
new file mode 100644
index 0000000000..61945625a5
--- /dev/null
+++ b/base/main_test_bucket_pool.go
@@ -0,0 +1,691 @@
+package base
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/couchbase/gocb"
+ "github.com/couchbaselabs/walrus"
+ "github.com/pkg/errors"
+)
+
+// GTestBucketPool is a global instance of a TestBucketPool used to manage a pool of buckets for integration testing.
+var GTestBucketPool *TestBucketPool
+
+const (
+ tbpEnvClusterUsername = "SG_TEST_USERNAME"
+ tbpDefaultClusterUsername = DefaultCouchbaseAdministrator
+ tbpEnvClusterPassword = "SG_TEST_PASSWORD"
+ tbpDefaultClusterPassword = DefaultCouchbasePassword
+
+ tbpBucketNamePrefix = "sg_int_"
+
+ // Creates this many buckets in the backing store to be pooled for testing.
+ tbpDefaultBucketPoolSize = 3
+ tbpEnvPoolSize = "SG_TEST_BUCKET_POOL_SIZE"
+
+ defaultBucketQuotaMB = 150
+ tbpEnvBucketQuotaMB = "SG_TEST_BUCKET_QUOTA_MB"
+
+ // Prevents reuse and cleanup of buckets used in failed tests for later inspection.
+ // When all pooled buckets are in a preserved state, any remaining tests are skipped instead of blocking waiting for a bucket.
+ tbpEnvPreserve = "SG_TEST_BUCKET_POOL_PRESERVE"
+
+ // When set to true, all existing test buckets are removed and recreated, instead of running the bucket readier.
+ tbpEnvRecreate = "SG_TEST_BACKING_STORE_RECREATE"
+
+ // Prints detailed debug logs from the test pooling framework.
+ tbpEnvVerbose = "SG_TEST_BUCKET_POOL_DEBUG"
+)
+
+// TestBucketPool is used to manage a pool of gocb buckets on a Couchbase Server for testing purposes.
+// The zero-value/uninitialized version of this struct is safe to use as Walrus buckets are returned.
+type TestBucketPool struct {
+ // integrationMode should be true if using Couchbase Server. If this is false, Walrus buckets are returned instead of pooled buckets.
+ integrationMode bool
+
+ readyBucketPool chan *CouchbaseBucketGoCB
+ bucketReadierQueue chan tbpBucketName
+ bucketReadierWaitGroup *sync.WaitGroup
+ cluster *gocb.Cluster
+ clusterMgr *gocb.ClusterManager
+ ctxCancelFunc context.CancelFunc
+ defaultBucketSpec BucketSpec
+
+ bucketInitFunc TBPBucketInitFunc
+
+ stats bucketPoolStats
+
+ // preserveBuckets can be set to true to prevent removal of a bucket used in a failing test.
+ preserveBuckets bool
+ // preservedBucketCount keeps track of number of preserved buckets to prevent bucket exhaustion deadlock.
+ preservedBucketCount uint32
+
+ // verbose flag controls debug test pool logging.
+ verbose AtomicBool
+}
+
+// NewTestBucketPool initializes a new TestBucketPool. To be called from TestMain for packages requiring test buckets.
+func NewTestBucketPool(bucketReadierFunc TBPBucketReadierFunc, bucketInitFunc TBPBucketInitFunc) *TestBucketPool {
+ // We can safely skip setup when we want Walrus buckets to be used. They'll be created on-demand via GetTestBucketAndSpec.
+ if !TestUseCouchbaseServer() {
+ tbp := TestBucketPool{
+ bucketInitFunc: bucketInitFunc,
+ }
+ tbp.verbose.Set(tbpVerbose())
+ return &tbp
+ }
+
+ _, err := SetMaxFileDescriptors(5000)
+ if err != nil {
+ panic(err)
+ }
+
+ numBuckets := tbpNumBuckets()
+ // TODO: What about pooling servers too??
+ // That way, we can have unlimited buckets available in a single test pool... True horizontal scalability in tests!
+ cluster := tbpCluster(UnitTestUrl())
+
+ // Used to manage cancellation of worker goroutines
+ ctx, ctxCancelFunc := context.WithCancel(context.Background())
+
+ preserveBuckets, _ := strconv.ParseBool(os.Getenv(tbpEnvPreserve))
+
+ tbp := TestBucketPool{
+ integrationMode: true,
+ readyBucketPool: make(chan *CouchbaseBucketGoCB, numBuckets),
+ bucketReadierQueue: make(chan tbpBucketName, numBuckets),
+ bucketReadierWaitGroup: &sync.WaitGroup{},
+ cluster: cluster,
+ clusterMgr: cluster.Manager(tbpClusterUsername(), tbpClusterPassword()),
+ ctxCancelFunc: ctxCancelFunc,
+ defaultBucketSpec: tbpDefaultBucketSpec,
+ preserveBuckets: preserveBuckets,
+ bucketInitFunc: bucketInitFunc,
+ }
+
+ tbp.verbose.Set(tbpVerbose())
+
+ // Start up an async readier worker to process dirty buckets
+ go tbp.bucketReadierWorker(ctx, bucketReadierFunc)
+
+ // Remove old test buckets (if desired)
+ removeOldBuckets, _ := strconv.ParseBool(os.Getenv(tbpEnvRecreate))
+ if removeOldBuckets {
+ err := tbp.removeOldTestBuckets()
+ if err != nil {
+ log.Fatalf("Couldn't remove old test buckets: %v", err)
+ }
+ }
+
+ // Make sure the test buckets are created and put into the readier worker queue
+ start := time.Now()
+ if err := tbp.createTestBuckets(numBuckets, tbpBucketQuotaMB(), bucketInitFunc); err != nil {
+ log.Fatalf("Couldn't create test buckets: %v", err)
+ }
+ atomic.AddInt32(&tbp.stats.TotalBucketInitCount, int32(numBuckets))
+ atomic.AddInt64(&tbp.stats.TotalBucketInitDurationNano, time.Since(start).Nanoseconds())
+
+ return &tbp
+}
+
+// Logf formats the given test bucket logging and logs to stderr.
+func (tbp *TestBucketPool) Logf(ctx context.Context, format string, args ...interface{}) {
+ if tbp != nil && !tbp.verbose.IsTrue() {
+ return
+ }
+
+ format = addPrefixes(format, ctx, LevelNone, KeySGTest)
+ if colorEnabled() {
+ // Green
+ format = "\033[0;32m" + format + "\033[0m"
+ }
+
+ _, _ = fmt.Fprintf(consoleFOutput, format+"\n", args...)
+}
+
+// GetTestBucketAndSpec returns a bucket to be used during a test.
+// The returned teardownFn MUST be called once the test is done,
+// which closes the bucket, readies it for a new test, and releases back into the pool.
+func (tbp *TestBucketPool) GetTestBucketAndSpec(t testing.TB) (b Bucket, s BucketSpec, teardownFn func()) {
+
+ ctx := testCtx(t)
+
+ // Return a new Walrus bucket when tbp has not been initialized
+ if !tbp.integrationMode {
+ if !UnitTestUrlIsWalrus() {
+ tbp.Logf(ctx, "nil TestBucketPool, but not using a Walrus test URL")
+ os.Exit(1)
+ }
+
+ walrusBucket := walrus.NewBucket(tbpBucketNamePrefix + "walrus_" + GenerateRandomID())
+
+ // Wrap Walrus buckets with a leaky bucket to support vbucket IDs on feed.
+ b = &LeakyBucket{bucket: walrusBucket, config: LeakyBucketConfig{TapFeedVbuckets: true}}
+
+ ctx := bucketCtx(ctx, b)
+ tbp.Logf(ctx, "Creating new walrus test bucket")
+
+ initFuncStart := time.Now()
+ err := tbp.bucketInitFunc(ctx, b, tbp)
+ if err != nil {
+ panic(err)
+ }
+ atomic.AddInt32(&tbp.stats.TotalBucketInitCount, 1)
+ atomic.AddInt64(&tbp.stats.TotalBucketInitDurationNano, time.Since(initFuncStart).Nanoseconds())
+
+ atomic.AddInt32(&tbp.stats.NumBucketsOpened, 1)
+ openedStart := time.Now()
+ return b, getBucketSpec(tbpBucketName(b.GetName())), func() {
+ atomic.AddInt32(&tbp.stats.NumBucketsClosed, 1)
+ atomic.AddInt64(&tbp.stats.TotalInuseBucketNano, time.Since(openedStart).Nanoseconds())
+ tbp.Logf(ctx, "Teardown called - Closing walrus test bucket")
+ b.Close()
+ }
+ }
+
+ if atomic.LoadUint32(&tbp.preservedBucketCount) >= uint32(cap(tbp.readyBucketPool)) {
+ tbp.Logf(ctx,
+ "No more buckets available for testing. All pooled buckets have been preserved by failing tests.")
+ t.Skipf("No more buckets available for testing. All pooled buckets have been preserved for failing tests.")
+ }
+
+ tbp.Logf(ctx, "Attempting to get test bucket from pool")
+ waitingBucketStart := time.Now()
+ gocbBucket := <-tbp.readyBucketPool
+ atomic.AddInt64(&tbp.stats.TotalWaitingForReadyBucketNano, time.Since(waitingBucketStart).Nanoseconds())
+ ctx = bucketCtx(ctx, gocbBucket)
+ tbp.Logf(ctx, "Got test bucket from pool")
+
+ atomic.AddInt32(&tbp.stats.NumBucketsOpened, 1)
+ bucketOpenStart := time.Now()
+ return gocbBucket, getBucketSpec(tbpBucketName(gocbBucket.GetName())), func() {
+ atomic.AddInt32(&tbp.stats.NumBucketsClosed, 1)
+ atomic.AddInt64(&tbp.stats.TotalInuseBucketNano, time.Since(bucketOpenStart).Nanoseconds())
+ tbp.Logf(ctx, "Teardown called - closing bucket")
+ gocbBucket.Close()
+
+ if tbp.preserveBuckets && t.Failed() {
+ tbp.Logf(ctx, "Test using bucket failed. Preserving bucket for later inspection")
+ atomic.AddUint32(&tbp.preservedBucketCount, 1)
+ return
+ }
+
+ tbp.Logf(ctx, "Teardown called - Pushing into bucketReadier queue")
+ tbp.addBucketToReadierQueue(ctx, tbpBucketName(gocbBucket.GetName()))
+ }
+}
+
+func (tbp *TestBucketPool) addBucketToReadierQueue(ctx context.Context, name tbpBucketName) {
+ tbp.bucketReadierWaitGroup.Add(1)
+ tbp.Logf(ctx, "Putting bucket onto bucketReadierQueue")
+ tbp.bucketReadierQueue <- name
+}
+
+// Close waits for any buckets to be cleaned, and closes the pool.
+func (tbp *TestBucketPool) Close() {
+ if tbp == nil {
+ // noop
+ return
+ }
+
+ // Cancel async workers
+ if tbp.ctxCancelFunc != nil {
+ tbp.bucketReadierWaitGroup.Wait()
+ tbp.ctxCancelFunc()
+ }
+
+ if tbp.cluster != nil {
+ if err := tbp.cluster.Close(); err != nil {
+ tbp.Logf(context.Background(), "Couldn't close cluster connection: %v", err)
+ }
+ }
+
+ tbp.printStats()
+}
+
+// printStats outputs test bucket stats for the current package's test run.
+func (tbp *TestBucketPool) printStats() {
+
+ numBucketsOpened := time.Duration(atomic.LoadInt32(&tbp.stats.NumBucketsOpened))
+ if numBucketsOpened == 0 {
+ // we may have been running benchmarks if we've opened zero test buckets
+ // in any case; if we have no stats, don't bother printing anything.
+ return
+ }
+
+ totalBucketInitTime := time.Duration(atomic.LoadInt64(&tbp.stats.TotalBucketInitDurationNano))
+ totalBucketInitCount := time.Duration(atomic.LoadInt32(&tbp.stats.TotalBucketInitCount))
+
+ totalBucketReadierTime := time.Duration(atomic.LoadInt64(&tbp.stats.TotalBucketReadierDurationNano))
+ totalBucketReadierCount := time.Duration(atomic.LoadInt32(&tbp.stats.TotalBucketReadierCount))
+
+ totalBucketWaitTime := time.Duration(atomic.LoadInt64(&tbp.stats.TotalWaitingForReadyBucketNano))
+
+ totalBucketUseTime := time.Duration(atomic.LoadInt64(&tbp.stats.TotalInuseBucketNano))
+
+ origVerbose := tbp.verbose.IsTrue()
+ tbp.verbose.Set(true)
+ ctx := context.Background()
+
+ tbp.Logf(ctx, "==========================")
+ tbp.Logf(ctx, "= Test Bucket Pool Stats =")
+ tbp.Logf(ctx, "==========================")
+ if totalBucketInitCount > 0 {
+ tbp.Logf(ctx, "Total bucket init time: %s for %d buckets (avg: %s)", totalBucketInitTime, totalBucketInitCount, totalBucketInitTime/totalBucketInitCount)
+ } else {
+ tbp.Logf(ctx, "Total bucket init time: %s for %d buckets", totalBucketInitTime, totalBucketInitCount)
+ }
+ if totalBucketReadierCount > 0 {
+ tbp.Logf(ctx, "Total bucket readier time: %s for %d buckets (avg: %s)", totalBucketReadierTime, totalBucketReadierCount, totalBucketReadierTime/totalBucketReadierCount)
+ } else {
+ tbp.Logf(ctx, "Total bucket readier time: %s for %d buckets", totalBucketReadierTime, totalBucketReadierCount)
+ }
+ tbp.Logf(ctx, "Total buckets opened/closed: %d/%d", numBucketsOpened, atomic.LoadInt32(&tbp.stats.NumBucketsClosed))
+ if numBucketsOpened > 0 {
+ tbp.Logf(ctx, "Total time waiting for ready bucket: %s over %d buckets (avg: %s)", totalBucketWaitTime, numBucketsOpened, totalBucketWaitTime/numBucketsOpened)
+ tbp.Logf(ctx, "Total time tests using buckets: %s (avg: %s)", totalBucketUseTime, totalBucketUseTime/numBucketsOpened)
+ } else {
+ tbp.Logf(ctx, "Total time waiting for ready bucket: %s over %d buckets", totalBucketWaitTime, numBucketsOpened)
+ tbp.Logf(ctx, "Total time tests using buckets: %s", totalBucketUseTime)
+ }
+ tbp.Logf(ctx, "==========================")
+
+ tbp.verbose.Set(origVerbose)
+}
+
+// removeOldTestBuckets removes all buckets starting with testBucketNamePrefix
+func (tbp *TestBucketPool) removeOldTestBuckets() error {
+ buckets, err := getBuckets(tbp.clusterMgr)
+ if err != nil {
+ return errors.Wrap(err, "couldn't retrieve buckets from cluster manager")
+ }
+
+ wg := sync.WaitGroup{}
+
+ for _, b := range buckets {
+ if strings.HasPrefix(b.Name, tbpBucketNamePrefix) {
+ ctx := bucketNameCtx(context.Background(), b.Name)
+ tbp.Logf(ctx, "Removing old test bucket")
+ wg.Add(1)
+
+ // Run the RemoveBucket requests concurrently, as it takes a while per bucket.
+ go func(b *gocb.BucketSettings) {
+ err := tbp.clusterMgr.RemoveBucket(b.Name)
+ if err != nil {
+ tbp.Logf(ctx, "Error removing old test bucket: %v", err)
+ } else {
+ tbp.Logf(ctx, "Removed old test bucket")
+ }
+
+ wg.Done()
+ }(b)
+ }
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+// getBuckets returns a list of buckets in the cluster.
+func getBuckets(cm *gocb.ClusterManager) ([]*gocb.BucketSettings, error) {
+ buckets, err := cm.GetBuckets()
+ if err != nil {
+ // special handling for gocb's empty non-nil error if we send this request with invalid credentials
+ if err.Error() == "" {
+ err = errors.New("couldn't get buckets from cluster, check authentication credentials")
+ }
+ return nil, err
+ }
+ return buckets, nil
+}
+
+// createTestBuckets creates a new set of integration test buckets and pushes them into the readier queue.
+func (tbp *TestBucketPool) createTestBuckets(numBuckets int, bucketQuotaMB int, bucketInitFunc TBPBucketInitFunc) error {
+
+ // get a list of any existing buckets, so we can skip creation of them.
+ existingBuckets, err := getBuckets(tbp.clusterMgr)
+ if err != nil {
+ return err
+ }
+
+ // keep references to opened buckets for use later in this function
+ openBuckets := make([]*CouchbaseBucketGoCB, numBuckets)
+
+ wg := sync.WaitGroup{}
+ wg.Add(numBuckets)
+
+ // create required number of buckets (skipping any already existing ones)
+ for i := 0; i < numBuckets; i++ {
+ testBucketName := tbpBucketNamePrefix + strconv.Itoa(i)
+ ctx := bucketNameCtx(context.Background(), testBucketName)
+
+ var bucketExists bool
+ for _, b := range existingBuckets {
+ if testBucketName == b.Name {
+ tbp.Logf(ctx, "Skipping InsertBucket... Bucket already exists")
+ bucketExists = true
+ }
+ }
+
+ // Bucket creation takes a few seconds for each bucket,
+ // so create and wait for readiness concurrently.
+ go func(i int, bucketExists bool) {
+ if !bucketExists {
+ tbp.Logf(ctx, "Creating new test bucket")
+ err := tbp.clusterMgr.InsertBucket(&gocb.BucketSettings{
+ Name: testBucketName,
+ Quota: bucketQuotaMB,
+ Type: gocb.Couchbase,
+ FlushEnabled: true,
+ IndexReplicas: false,
+ Replicas: 0,
+ })
+ if err != nil {
+ tbp.Logf(ctx, "Couldn't create test bucket: %v", err)
+ os.Exit(1)
+ }
+ }
+
+ b, err := tbp.openTestBucket(tbpBucketName(testBucketName), CreateSleeperFunc(5*numBuckets, 1000))
+ if err != nil {
+ tbp.Logf(ctx, "Timed out trying to open new bucket: %v", err)
+ os.Exit(1)
+ }
+ openBuckets[i] = b
+
+ wg.Done()
+ }(i, bucketExists)
+ }
+
+ // wait for the async bucket creation and opening of buckets to finish
+ wg.Wait()
+
+ // All the buckets are created and opened, so now we can perform some synchronous setup (e.g. Creating GSI indexes)
+ for i := 0; i < numBuckets; i++ {
+ testBucketName := tbpBucketNamePrefix + strconv.Itoa(i)
+ ctx := bucketNameCtx(context.Background(), testBucketName)
+
+ tbp.Logf(ctx, "running bucketInitFunc")
+ b := openBuckets[i]
+
+ if err, _ := RetryLoop(b.GetName()+"bucketInitRetry", func() (bool, error, interface{}) {
+ tbp.Logf(ctx, "Running bucket through init function")
+ err = bucketInitFunc(ctx, b, tbp)
+ if err != nil {
+ tbp.Logf(ctx, "Couldn't init bucket, got error: %v - Retrying", err)
+ return true, err, nil
+ }
+ return false, nil, nil
+ }, CreateSleeperFunc(5, 1000)); err != nil {
+ tbp.Logf(ctx, "Couldn't init bucket, got error: %v - Aborting", err)
+ os.Exit(1)
+ }
+
+ b.Close()
+ tbp.addBucketToReadierQueue(ctx, tbpBucketName(testBucketName))
+ }
+
+ return nil
+}
+
+// bucketReadierWorker reads a channel of "dirty" buckets (bucketReadierQueue), does something to get them ready, and then puts them back into the pool.
+// The mechanism for getting the bucket ready can vary by package being tested (for instance, a package not requiring views or GSI can use FlushBucketEmptierFunc)
+// A package requiring views or GSI, will need to pass in the db.ViewsAndGSIBucketReadier function.
+func (tbp *TestBucketPool) bucketReadierWorker(ctx context.Context, bucketReadierFunc TBPBucketReadierFunc) {
+ tbp.Logf(context.Background(), "Starting bucketReadier")
+
+loop:
+ for {
+ select {
+ case <-ctx.Done():
+ tbp.Logf(context.Background(), "bucketReadier got ctx cancelled")
+ break loop
+
+ case testBucketName := <-tbp.bucketReadierQueue:
+ atomic.AddInt32(&tbp.stats.TotalBucketReadierCount, 1)
+ ctx := bucketNameCtx(ctx, string(testBucketName))
+ tbp.Logf(ctx, "bucketReadier got bucket")
+
+ go func(testBucketName tbpBucketName) {
+ // We might not actually be "done" with the bucket if something fails,
+ // but we need to release the waitgroup so tbp.Close() doesn't block forever.
+ defer tbp.bucketReadierWaitGroup.Done()
+
+ start := time.Now()
+ b, err := tbp.openTestBucket(testBucketName, CreateSleeperFunc(5, 1000))
+ if err != nil {
+ tbp.Logf(ctx, "Couldn't open bucket to get ready, got error: %v", err)
+ return
+ }
+
+ err, _ = RetryLoop(b.GetName()+"bucketReadierRetry", func() (bool, error, interface{}) {
+ tbp.Logf(ctx, "Running bucket through readier function")
+ err = bucketReadierFunc(ctx, b, tbp)
+ if err != nil {
+ tbp.Logf(ctx, "Couldn't ready bucket, got error: %v - Retrying", err)
+ return true, err, nil
+ }
+ return false, nil, nil
+ }, CreateSleeperFunc(5, 1000))
+ if err != nil {
+ tbp.Logf(ctx, "Couldn't ready bucket, got error: %v - Aborting readier for bucket", err)
+ return
+ }
+
+ tbp.Logf(ctx, "Bucket ready, putting back into ready pool")
+ tbp.readyBucketPool <- b
+ atomic.AddInt64(&tbp.stats.TotalBucketReadierDurationNano, time.Since(start).Nanoseconds())
+ }(testBucketName)
+ }
+ }
+
+ tbp.Logf(context.Background(), "Stopped bucketReadier")
+}
+
+// openTestBucket opens the bucket of the given name for the gocb cluster in the given TestBucketPool.
+func (tbp *TestBucketPool) openTestBucket(testBucketName tbpBucketName, sleeper RetrySleeper) (*CouchbaseBucketGoCB, error) {
+
+ ctx := bucketNameCtx(context.Background(), string(testBucketName))
+
+ bucketSpec := tbp.defaultBucketSpec
+ bucketSpec.BucketName = string(testBucketName)
+
+ waitForNewBucketWorker := func() (shouldRetry bool, err error, value interface{}) {
+ gocbBucket, err := GetCouchbaseBucketGoCBFromAuthenticatedCluster(tbp.cluster, bucketSpec, "")
+ if err != nil {
+ tbp.Logf(ctx, "Retrying OpenBucket")
+ return true, err, nil
+ }
+ return false, nil, gocbBucket
+ }
+
+ tbp.Logf(ctx, "Opening bucket")
+ err, val := RetryLoop("waitForNewBucket", waitForNewBucketWorker, sleeper)
+
+ gocbBucket, _ := val.(*CouchbaseBucketGoCB)
+
+ return gocbBucket, err
+}
+
+// TBPBucketInitFunc is a function that is run once (synchronously) when creating/opening a bucket.
+type TBPBucketInitFunc func(ctx context.Context, b Bucket, tbp *TestBucketPool) error
+
+// NoopInitFunc does nothing to init a bucket. This can be used in conjunction with FlushBucketReadier when there's no requirement for views/GSI.
+var NoopInitFunc TBPBucketInitFunc = func(ctx context.Context, b Bucket, tbp *TestBucketPool) error {
+ return nil
+}
+
+// PrimaryIndexInitFunc creates a primary index on the given bucket. This can then be used with N1QLBucketEmptierFunc, for improved compatibility with GSI.
+// Will be used when GSI is re-enabled (CBG-813)
+var PrimaryIndexInitFunc TBPBucketInitFunc = func(ctx context.Context, b Bucket, tbp *TestBucketPool) error {
+ gocbBucket, ok := AsGoCBBucket(b)
+ if !ok {
+ tbp.Logf(ctx, "skipping primary index creation for non-gocb bucket")
+ return nil
+ }
+
+ if hasPrimary, _, err := gocbBucket.getIndexMetaWithoutRetry(PrimaryIndexName); err != nil {
+ return err
+ } else if !hasPrimary {
+ err := gocbBucket.CreatePrimaryIndex(PrimaryIndexName, nil)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TBPBucketReadierFunc is a function that runs once a test is finished with a bucket. This runs asynchronously.
+type TBPBucketReadierFunc func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error
+
+// FlushBucketEmptierFunc ensures the bucket is empty by flushing. It is not recommended to use with GSI.
+var FlushBucketEmptierFunc TBPBucketReadierFunc = func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error {
+ return b.Flush()
+}
+
+// N1QLBucketEmptierFunc ensures the bucket is empty by using N1QL deletes. This is the preferred approach when using GSI.
+// Will be used when GSI is re-enabled (CBG-813)
+var N1QLBucketEmptierFunc TBPBucketReadierFunc = func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error {
+ if hasPrimary, _, err := b.getIndexMetaWithoutRetry(PrimaryIndexName); err != nil {
+ return err
+ } else if !hasPrimary {
+ return fmt.Errorf("bucket does not have primary index, so can't empty bucket using N1QL")
+ }
+
+ if itemCount, err := b.QueryBucketItemCount(); err != nil {
+ return err
+ } else if itemCount == 0 {
+ tbp.Logf(ctx, "Bucket already empty - skipping")
+ } else {
+ tbp.Logf(ctx, "Bucket not empty (%d items), emptying bucket via N1QL", itemCount)
+ // Use N1QL to empty bucket, with the hope that the query service is happier to deal with this than a bucket flush/rollback.
+ // Requires a primary index on the bucket.
+ res, err := b.Query(`DELETE FROM $_bucket`, nil, gocb.RequestPlus, false)
+ if err != nil {
+ return err
+ }
+ _ = res.Close()
+ }
+
+ return nil
+}
+
+// bucketPoolStats is the struct used to track runtime/counts of various test bucket operations.
+// printStats() is called once a package's tests have finished to output these stats.
+type bucketPoolStats struct {
+ TotalBucketInitDurationNano int64
+ TotalBucketInitCount int32
+ TotalBucketReadierDurationNano int64
+ TotalBucketReadierCount int32
+ NumBucketsOpened int32
+ NumBucketsClosed int32
+ TotalWaitingForReadyBucketNano int64
+ TotalInuseBucketNano int64
+}
+
+// tbpBucketName use a strongly typed bucket name.
+type tbpBucketName string
+
+// tbpCluster returns an authenticated gocb Cluster for the given server URL.
+func tbpCluster(server string) *gocb.Cluster {
+ spec := BucketSpec{
+ Server: server,
+ }
+
+ connStr, err := spec.GetGoCBConnString()
+ if err != nil {
+ log.Fatalf("error getting connection string: %v", err)
+ }
+
+ cluster, err := gocb.Connect(connStr)
+ if err != nil {
+ log.Fatalf("Couldn't connect to %q: %v", server, err)
+ }
+
+ err = cluster.Authenticate(gocb.PasswordAuthenticator{
+ Username: tbpClusterUsername(),
+ Password: tbpClusterPassword(),
+ })
+ if err != nil {
+ log.Fatalf("Couldn't authenticate with %q: %v", server, err)
+ }
+
+ return cluster
+}
+
+var tbpDefaultBucketSpec = BucketSpec{
+ Server: UnitTestUrl(),
+ CouchbaseDriver: GoCBCustomSGTranscoder,
+ Auth: TestAuthenticator{
+ Username: tbpClusterUsername(),
+ Password: tbpClusterPassword(),
+ },
+ UseXattrs: TestUseXattrs(),
+}
+
+// getBucketSpec returns a new BucketSpec for the given bucket name.
+func getBucketSpec(testBucketName tbpBucketName) BucketSpec {
+ bucketSpec := tbpDefaultBucketSpec
+ bucketSpec.BucketName = string(testBucketName)
+ return bucketSpec
+}
+
+// tbpNumBuckets returns the configured number of buckets to use in the pool.
+func tbpNumBuckets() int {
+ numBuckets := tbpDefaultBucketPoolSize
+ if envPoolSize := os.Getenv(tbpEnvPoolSize); envPoolSize != "" {
+ var err error
+ numBuckets, err = strconv.Atoi(envPoolSize)
+ if err != nil {
+ log.Fatalf("Couldn't parse %s: %v", tbpEnvPoolSize, err)
+ }
+ }
+ return numBuckets
+}
+
+// tbpBucketQuotaMB returns the configured bucket RAM quota.
+func tbpBucketQuotaMB() int {
+ bucketQuota := defaultBucketQuotaMB
+ if envBucketQuotaMB := os.Getenv(tbpEnvBucketQuotaMB); envBucketQuotaMB != "" {
+ var err error
+ bucketQuota, err = strconv.Atoi(envBucketQuotaMB)
+ if err != nil {
+ log.Fatalf("Couldn't parse %s: %v", tbpEnvBucketQuotaMB, err)
+ }
+ }
+ return bucketQuota
+}
+
+// tbpVerbose returns the configured test bucket pool verbose flag.
+func tbpVerbose() bool {
+ verbose, _ := strconv.ParseBool(os.Getenv(tbpEnvVerbose))
+ return verbose
+}
+
+// tbpClusterUsername returns the configured cluster username.
+func tbpClusterUsername() string {
+ username := tbpDefaultClusterUsername
+ if envClusterUsername := os.Getenv(tbpEnvClusterUsername); envClusterUsername != "" {
+ username = envClusterUsername
+ }
+ return username
+}
+
+// tbpClusterPassword returns the configured cluster password.
+func tbpClusterPassword() string {
+ password := tbpDefaultClusterPassword
+ if envClusterPassword := os.Getenv(tbpEnvClusterPassword); envClusterPassword != "" {
+ password = envClusterPassword
+ }
+ return password
+}
diff --git a/base/util_testing.go b/base/util_testing.go
index ecec5b00de..849682e900 100644
--- a/base/util_testing.go
+++ b/base/util_testing.go
@@ -7,12 +7,12 @@ import (
"log"
"math/rand"
"os"
+ "strconv"
"strings"
"sync"
"testing"
"time"
- "github.com/couchbase/gocb"
"github.com/stretchr/testify/assert"
)
@@ -35,133 +35,20 @@ func init() {
type TestBucket struct {
Bucket
BucketSpec BucketSpec
+ closeFn func()
}
func (tb TestBucket) Close() {
-
- tb.Bucket.Close()
-
- DecrNumOpenBuckets(tb.Bucket.GetName())
-}
-
-func GetTestBucket(tester testing.TB) TestBucket {
- return GetBucketCommon(DataBucket, tester)
-}
-
-func GetTestIndexBucket(tester testing.TB) TestBucket {
- return GetBucketCommon(IndexBucket, tester)
-}
-
-func GetTestBucketSpec(bucketType CouchbaseBucketType) BucketSpec {
-
- bucketName := DefaultTestBucketname
- username := DefaultTestUsername
- password := DefaultTestPassword
-
- // Use a different bucket name for index buckets to avoid interference
- switch bucketType {
- case IndexBucket:
- bucketName = DefaultTestIndexBucketname
- username = DefaultTestIndexUsername
- password = DefaultTestIndexPassword
- }
-
- testAuth := TestAuthenticator{
- Username: username,
- Password: password,
- BucketName: bucketName,
- }
-
- spec := BucketSpec{
- Server: UnitTestUrl(),
- BucketName: bucketName,
-
- CouchbaseDriver: ChooseCouchbaseDriver(bucketType),
- Auth: testAuth,
- UseXattrs: TestUseXattrs(),
- }
-
- if spec.IsWalrusBucket() {
- // Use a unique bucket name to reduce the chance of interference between temporary test walrus buckets
- spec.BucketName = fmt.Sprintf("%s-%s", spec.BucketName, GenerateRandomID())
- }
-
- return spec
-
+ tb.closeFn()
}
-func GetBucketCommon(bucketType CouchbaseBucketType, tester testing.TB) TestBucket {
-
- spec := GetTestBucketSpec(bucketType)
-
- if !spec.IsWalrusBucket() {
-
- // If this is not testing against a walrus bucket, then it's testing against a Coucbhase Server bucket,
- // and therefore needs to create the bucket if it doesn't already exist, or flush it if it does.
-
- tbm := NewTestBucketManager(spec)
- bucketExists, err := tbm.OpenTestBucket()
- if err != nil {
- tester.Fatalf("Error checking if bucket exists. Spec: %+v err: %v", spec, err)
- }
- switch bucketExists {
- case true:
- // Empty it
- if err := tbm.RecreateOrEmptyBucket(); err != nil {
- tester.Fatalf("Error trying to empty bucket. Spec: %+v. err: %v", spec, err)
-
- }
- case false:
- // Create a brand new bucket
- // TODO: in this case, we should still wait until it's empty, just in case there was somehow residue
- // TODO: in between deleting and recreating it, if it happened in rapid succession
- if err := tbm.CreateTestBucket(); err != nil {
- tester.Fatalf("Could not create bucket. Spec: %+v Err: %v", spec, err)
- }
- }
-
- // Close the bucket and any other temporary resources associated with the TestBucketManager
- tbm.Close()
-
- }
-
- // Now open the bucket _again_ to ensure it's open with the correct driver
- bucket, err := GetBucket(spec)
- if err != nil {
- tester.Fatalf("Could not open bucket: %v", err)
- }
-
- return TestBucket{
+func GetTestBucket(t testing.TB) *TestBucket {
+ bucket, spec, closeFn := GTestBucketPool.GetTestBucketAndSpec(t)
+ return &TestBucket{
Bucket: bucket,
BucketSpec: spec,
+ closeFn: closeFn,
}
-
-}
-
-func GetBucketWithInvalidUsernamePassword(bucketType CouchbaseBucketType) (TestBucket, error) {
-
- spec := GetTestBucketSpec(bucketType)
-
- // Override spec's auth with invalid creds
- spec.Auth = TestAuthenticator{
- Username: "invalid_username",
- Password: "invalid_password",
- BucketName: spec.BucketName,
- }
-
- // Attempt to open a test bucket with invalid creds. We should expect an error.
- bucket, err := GetBucket(spec)
- return TestBucket{Bucket: bucket}, err
-
-}
-
-// Convenience function that will cause a bucket to be created if it doesn't already exist.
-func InitializeBucket(bucketType CouchbaseBucketType, tester testing.TB) {
-
- // Create
- tempBucket := GetBucketCommon(bucketType, tester)
- tempBucket.Close()
-
}
// Should Sync Gateway use XATTRS functionality when running unit tests?
@@ -194,6 +81,22 @@ func TestsShouldDropIndexes() bool {
}
+// TestsDisableGSI returns true if tests should be forced to avoid any GSI-specific code.
+func TestsDisableGSI() bool {
+ // FIXME: CBG-813 - Re-enable GSI in integration tests after CB 6.5.1 Beta
+ if true {
+ return true
+ }
+
+ // Disable GSI when running with Walrus
+ if !TestUseCouchbaseServer() && UnitTestUrlIsWalrus() {
+ return true
+ }
+
+ disableGSI, _ := strconv.ParseBool(os.Getenv(TestEnvSyncGatewayDisableGSI))
+ return disableGSI
+}
+
// Check the whether tests are being run with SG_TEST_BACKING_STORE=Couchbase
func TestUseCouchbaseServer() bool {
backingStore := os.Getenv(TestEnvSyncGatewayBackingStore)
@@ -210,62 +113,6 @@ func (t TestAuthenticator) GetCredentials() (username, password, bucketname stri
return t.Username, t.Password, t.BucketName
}
-type TestBucketManager struct {
- AdministratorUsername string
- AdministratorPassword string
- BucketSpec BucketSpec
- Bucket *CouchbaseBucketGoCB
- AuthHandler AuthHandler
- Cluster *gocb.Cluster
- ClusterManager *gocb.ClusterManager
-}
-
-func NewTestBucketManager(spec BucketSpec) *TestBucketManager {
-
- tbm := TestBucketManager{
- AdministratorUsername: DefaultCouchbaseAdministrator,
- AdministratorPassword: DefaultCouchbasePassword,
- AuthHandler: spec.Auth,
- BucketSpec: spec,
- }
-
- return &tbm
-
-}
-
-func (tbm *TestBucketManager) OpenTestBucket() (bucketExists bool, err error) {
-
- if NumOpenBuckets(tbm.BucketSpec.BucketName) > 0 {
- return false, fmt.Errorf("There are already %d open buckets with name: %s. The tests expect all buckets to be closed.", NumOpenBuckets(tbm.BucketSpec.BucketName), tbm.BucketSpec.BucketName)
- }
-
- IncrNumOpenBuckets(tbm.BucketSpec.BucketName)
-
- tbm.Bucket, err = GetCouchbaseBucketGoCB(tbm.BucketSpec)
- if err != nil {
- return false, err
- }
-
- return true, nil
-
-}
-
-func (tbm *TestBucketManager) Close() {
- tbm.Bucket.Close()
-}
-
-// GOCB doesn't currently offer a way to do this, and so this is a workaround to go directly
-// to Couchbase Server REST API.
-// See https://forums.couchbase.com/t/is-there-a-way-to-get-the-number-of-items-in-a-bucket/12816/4
-// for GOCB discussion.
-func (tbm *TestBucketManager) BucketItemCount() (itemCount int, err error) {
- return tbm.Bucket.BucketItemCount()
-}
-
-func (tbm *TestBucketManager) DropIndexes() error {
- return DropAllBucketIndexes(tbm.Bucket)
-}
-
// Reset bucket state
func DropAllBucketIndexes(gocbBucket *CouchbaseBucketGoCB) error {
@@ -334,153 +181,6 @@ func getIndexes(gocbBucket *CouchbaseBucketGoCB) (indexes []string, err error) {
}
return indexes, nil
-
-}
-
-func (tbm *TestBucketManager) FlushBucket() error {
-
- // Try to Flush the bucket in a retry loop
- // Ignore sporadic errors like:
- // Error trying to empty bucket. err: {"_":"Flush failed with unexpected error. Check server logs for details."}
-
- Infof(KeyAll, "Flushing bucket %s", tbm.Bucket.Name())
-
- workerFlush := func() (shouldRetry bool, err error, value interface{}) {
- err = tbm.Bucket.Flush()
- if err != nil {
- Warnf("Error flushing bucket: %v Will retry.", err)
- }
- shouldRetry = (err != nil) // retry (until max attempts) if there was an error
- return shouldRetry, err, nil
- }
-
- err, _ := RetryLoop("EmptyTestBucket", workerFlush, CreateDoublingSleeperFunc(12, 10))
- if err != nil {
- return err
- }
-
- maxTries := 20
- numTries := 0
- for {
-
- itemCount, err := tbm.BucketItemCount()
- if err != nil {
- return err
- }
-
- if itemCount == 0 {
- // Bucket flushed, we're done
- break
- }
-
- if numTries > maxTries {
- return fmt.Errorf("Timed out waiting for bucket to be empty after flush. ItemCount: %v", itemCount)
- }
-
- // Still items left, wait a little bit and try again
- Warnf("TestBucketManager.EmptyBucket(): still %d items in bucket after flush, waiting for no items. Will retry.", itemCount)
- time.Sleep(time.Millisecond * 500)
-
- numTries += 1
-
- }
-
- return nil
-
-}
-
-func (tbm *TestBucketManager) RecreateOrEmptyBucket() error {
-
- if TestsShouldDropIndexes() {
- if err := tbm.DropIndexes(); err != nil {
- return err
- }
- }
-
- if err := tbm.FlushBucket(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tbm *TestBucketManager) DeleteTestBucket() error {
-
- err := tbm.ClusterManager.RemoveBucket(tbm.BucketSpec.BucketName)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (tbm *TestBucketManager) CreateTestBucket() error {
-
- username, password, _ := tbm.BucketSpec.Auth.GetCredentials()
-
- log.Printf("Create bucket with username: %v password: %v", username, password)
-
- ramQuotaMB := 100
-
- bucketSettings := gocb.BucketSettings{
- Name: tbm.BucketSpec.BucketName,
- Type: gocb.Couchbase,
- Password: password,
- Quota: ramQuotaMB,
- Replicas: 0,
- IndexReplicas: false,
- FlushEnabled: true,
- }
-
- err := tbm.ClusterManager.InsertBucket(&bucketSettings)
- if err != nil {
- return err
- }
-
- // Add an RBAC user
- // TODO: This isn't working, filed a question here: https://forums.couchbase.com/t/creating-rbac-user-via-go-sdk-against-couchbase-server-5-0-0-build-2958/12983
- // TODO: This is only needed if server is 5.0 or later, but not sure how to check couchbase server version
- //roles := []gocb.UserRole{
- // gocb.UserRole{
- // Role: "bucket_admin",
- // // BucketName: tbm.BucketSpec.BucketName,
- // BucketName: "test_data_bucket",
- // },
- //}
- //userSettings := &gocb.UserSettings{
- // // Name: username,
- // // Password: password,
- // Name: "test_data_bucket",
- // Password: "password",
- // Roles: roles,
- //}
- //err = tbm.ClusterManager.UpsertUser(username, userSettings)
- //if err != nil {
- // log.Printf("Error UpsertUser: %v", err)
- // return err
- //}
-
- numTries := 0
- maxTries := 20
- for {
-
- bucket, errOpen := GetBucket(tbm.BucketSpec)
-
- if errOpen == nil {
- // We were able to open the bucket, so it worked and we're done
- bucket.Close()
- return nil
- }
-
- if numTries >= maxTries {
- return fmt.Errorf("Created bucket, but unable to connect to it after several attempts. Spec: %+v", tbm.BucketSpec)
- }
-
- // Maybe it's not ready yet, wait a little bit and retry
- numTries += 1
- time.Sleep(time.Millisecond * 500)
-
- }
}
// Generates a string of size int
@@ -494,39 +194,6 @@ func CreateProperty(size int) (result string) {
return string(resultBytes)
}
-func IncrNumOpenBuckets(bucketName string) {
- MutateNumOpenBuckets(bucketName, 1)
-
-}
-
-func DecrNumOpenBuckets(bucketName string) {
- MutateNumOpenBuckets(bucketName, -1)
-}
-
-func MutateNumOpenBuckets(bucketName string, delta int32) {
- mutexNumOpenBucketsByName.Lock()
- defer mutexNumOpenBucketsByName.Unlock()
-
- numOpen, ok := numOpenBucketsByName[bucketName]
- if !ok {
- numOpen = 0
- numOpenBucketsByName[bucketName] = numOpen
- }
-
- numOpen += delta
- numOpenBucketsByName[bucketName] = numOpen
-}
-
-func NumOpenBuckets(bucketName string) int32 {
- mutexNumOpenBucketsByName.Lock()
- defer mutexNumOpenBucketsByName.Unlock()
- numOpen, ok := numOpenBucketsByName[bucketName]
- if !ok {
- return 0
- }
- return numOpen
-}
-
// SetUpTestLogging will set the given log level and log keys,
// and return a function that can be deferred for teardown.
//
@@ -654,3 +321,8 @@ func DirExists(filename string) bool {
}
return info.IsDir()
}
+
+// getTestKeyNamespace returns a unique doc key namespace that can be prepended in tests.
+func getTestKeyNamespace(t *testing.T) string {
+ return t.Name()
+}
diff --git a/db/attachment_test.go b/db/attachment_test.go
index 29b80ddcad..39b2684306 100644
--- a/db/attachment_test.go
+++ b/db/attachment_test.go
@@ -226,7 +226,7 @@ func TestAttachments(t *testing.T) {
func TestAttachmentForRejectedDocument(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
bucket := testBucket.Bucket
@@ -341,8 +341,10 @@ func TestAttachmentCASRetryAfterNewAttachment(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
- defer tearDownTestDB(t, db)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
+ defer db.Close()
// Test creating & updating a document:
@@ -400,8 +402,10 @@ func TestAttachmentCASRetryDuringNewAttachment(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
- defer tearDownTestDB(t, db)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
+ defer db.Close()
// Test creating & updating a document:
diff --git a/db/change_cache_test.go b/db/change_cache_test.go
index 75b51f6182..7b1fd6d31e 100644
--- a/db/change_cache_test.go
+++ b/db/change_cache_test.go
@@ -76,14 +76,6 @@ func logEntry(seq uint64, docid string, revid string, channelNames []string) *Lo
return entry
}
-func testBucketContext(tester testing.TB) *DatabaseContext {
- contextOptions := DatabaseContextOptions{}
- cacheOptions := DefaultCacheOptions()
- contextOptions.CacheOptions = &cacheOptions
- context, _ := NewDatabaseContext("db", testBucket(tester).Bucket, false, contextOptions)
- return context
-}
-
func TestSkippedSequenceList(t *testing.T) {
skipList := NewSkippedSequenceList()
@@ -125,9 +117,9 @@ func TestSkippedSequenceList(t *testing.T) {
func TestLateSequenceHandling(t *testing.T) {
- context := testBucketContext(t)
+ context, testBucket := setupTestDBWithCacheOptions(t, DefaultCacheOptions())
+ defer testBucket.Close()
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
cacheStats := &expvar.Map{}
cache := newSingleChannelCache(context, "Test1", 0, cacheStats)
@@ -192,9 +184,11 @@ func TestLateSequenceHandling(t *testing.T) {
func TestLateSequenceHandlingWithMultipleListeners(t *testing.T) {
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
goassert.True(t, cache != nil)
@@ -253,8 +247,8 @@ func TestLateSequenceErrorRecovery(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyCache)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -375,8 +369,8 @@ func TestLateSequenceHandlingDuringCompact(t *testing.T) {
cacheOptions := shortWaitCache()
cacheOptions.ChannelCacheOptions.MaxNumChannels = 100
db, testBucket := setupTestDBWithCacheOptions(t, cacheOptions)
- defer tearDownTestDB(t, db)
defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -554,8 +548,8 @@ func TestChannelCacheBufferingWithUserDoc(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyCache, base.KeyChanges, base.KeyDCP)()
db, testBucket := setupTestDB(t)
- defer tearDownTestDB(t, db)
defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Simulate seq 1 (user doc) being delayed - write 2 first
@@ -593,8 +587,8 @@ func TestChannelCacheBackfill(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyCache, base.KeyChanges)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
@@ -659,7 +653,7 @@ func TestContinuousChangesBackfill(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache, base.KeyChanges, base.KeyDCP)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -760,7 +754,7 @@ func TestLowSequenceHandling(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyCache, base.KeyChanges, base.KeyQuery)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -826,7 +820,7 @@ func TestLowSequenceHandlingAcrossChannels(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyCache, base.KeyChanges, base.KeyQuery)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -884,7 +878,7 @@ func TestLowSequenceHandlingWithAccessGrant(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyChanges, base.KeyQuery)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -991,9 +985,10 @@ func TestChannelQueryCancellation(t *testing.T) {
PostQueryCallback: postQueryCallback,
}
- db := setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ db, testBucket := setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Write a handful of docs/sequences to the bucket
_, _, err := db.Put("key1", Body{"channels": "ABC"})
@@ -1085,7 +1080,7 @@ func TestLowSequenceHandlingNoDuplicates(t *testing.T) {
defer base.SetUpTestLogging(base.LevelDebug, base.KeyChanges, base.KeyCache)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1178,7 +1173,7 @@ func TestChannelRace(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges)()
db, testBucket := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1281,8 +1276,9 @@ func TestSkippedViewRetrieval(t *testing.T) {
leakyConfig := base.LeakyBucketConfig{
TapFeedMissingDocs: []string{"doc-3", "doc-7", "doc-10", "doc-13", "doc-14"},
}
- db := setupTestLeakyDBWithCacheOptions(t, shortWaitCache(), leakyConfig)
- defer tearDownTestDB(t, db)
+ db, testBucket := setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), leakyConfig)
+ defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Allow db to initialize and run initial CleanSkippedSequenceQueue
@@ -1355,7 +1351,8 @@ func TestStopChangeCache(t *testing.T) {
leakyConfig := base.LeakyBucketConfig{
TapFeedMissingDocs: []string{"doc-3"},
}
- db := setupTestLeakyDBWithCacheOptions(t, cacheOptions, leakyConfig)
+ db, testBucket := setupTestLeakyDBWithCacheOptions(t, cacheOptions, leakyConfig)
+ defer testBucket.Close()
// Write sequences direct
WriteDirect(db, []string{"ABC"}, 1)
@@ -1389,7 +1386,7 @@ func TestChannelCacheSize(t *testing.T) {
log.Printf("Options in test:%+v", options)
db, testBucket := setupTestDBWithCacheOptions(t, options)
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1610,7 +1607,7 @@ func TestLateArrivingSequenceTriggersOnChange(t *testing.T) {
options.ChannelCacheOptions.ChannelCacheMaxLength = 600
db, testBucket := setupTestDBWithCacheOptions(t, options)
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
// -------- Setup notifyChange callback ----------------
@@ -1696,7 +1693,7 @@ func TestInitializeEmptyCache(t *testing.T) {
db, testBucket := setupTestDBWithCacheOptions(t, cacheOptions)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1749,7 +1746,7 @@ func TestInitializeCacheUnderLoad(t *testing.T) {
db, testBucket := setupTestDBWithCacheOptions(t, cacheOptions)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1804,7 +1801,7 @@ func TestNotifyForInactiveChannel(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache, base.KeyDCP)()
db, testBucket := setupTestDB(t)
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
// -------- Setup notifyChange callback ----------------
@@ -1871,7 +1868,7 @@ func TestChangeCache_InsertPendingEntries(t *testing.T) {
cacheOptions.CachePendingSeqMaxWait = 100 * time.Millisecond
db, testBucket := setupTestDBWithCacheOptions(t, cacheOptions)
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1998,7 +1995,9 @@ func BenchmarkProcessEntry(b *testing.B) {
for _, bm := range processEntryBenchmarks {
b.Run(bm.name, func(b *testing.B) {
b.StopTimer()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
changeCache := &changeCache{}
if err := changeCache.Init(context, nil, nil); err != nil {
log.Printf("Init failed for changeCache: %v", err)
@@ -2028,7 +2027,6 @@ func BenchmarkProcessEntry(b *testing.B) {
_ = changeCache.processEntry(entry)
}
- base.DecrNumOpenBuckets(context.Bucket.GetName())
context.Close()
})
}
@@ -2223,7 +2221,9 @@ func BenchmarkDocChanged(b *testing.B) {
for _, bm := range processEntryBenchmarks {
b.Run(bm.name, func(b *testing.B) {
b.StopTimer()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
changeCache := &changeCache{}
if err := changeCache.Init(context, nil, nil); err != nil {
log.Printf("Init failed for changeCache: %v", err)
@@ -2256,7 +2256,6 @@ func BenchmarkDocChanged(b *testing.B) {
//log.Printf("maxNumPending: %v", changeCache.context.DbStats.StatsCblReplicationPull().Get(base.StatKeyMaxPending))
//log.Printf("cachingCount: %v", changeCache.context.DbStats.StatsDatabase().Get(base.StatKeyDcpCachingCount))
- base.DecrNumOpenBuckets(context.Bucket.GetName())
context.Close()
})
}
diff --git a/db/change_listener_test.go b/db/change_listener_test.go
index 1332cb94fc..eadecf438c 100644
--- a/db/change_listener_test.go
+++ b/db/change_listener_test.go
@@ -16,7 +16,7 @@ func TestUserWaiter(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create user
username := "bob"
@@ -60,7 +60,7 @@ func TestUserWaiterForRoleChange(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create role
roleName := "good_egg"
diff --git a/db/changes_test.go b/db/changes_test.go
index ed883f1493..40f7876491 100644
--- a/db/changes_test.go
+++ b/db/changes_test.go
@@ -31,7 +31,7 @@ func TestChangesAfterChannelAdded(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer base.SetUpTestLogging(base.LevelDebug, base.KeyCache, base.KeyChanges)()
@@ -132,7 +132,7 @@ func TestDocDeletionFromChannelCoalescedRemoved(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -217,7 +217,7 @@ func TestDocDeletionFromChannelCoalesced(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -292,7 +292,7 @@ func BenchmarkChangesFeedDocUnmarshalling(b *testing.B) {
db, testBucket := setupTestDB(b)
defer testBucket.Close()
- defer tearDownTestDB(b, db)
+ defer db.Close()
fieldVal := func(valSizeBytes int) string {
buffer := bytes.Buffer{}
diff --git a/db/channel_cache.go b/db/channel_cache.go
index 02f6b514e4..8f5be38c5d 100644
--- a/db/channel_cache.go
+++ b/db/channel_cache.go
@@ -323,7 +323,7 @@ func (c *channelCacheImpl) addChannelCache(channelName string) (*singleChannelCa
singleChannelCache = AsSingleChannelCache(cacheValue)
- if !c.isCompactActive() && cacheSize > c.compactHighWatermark {
+ if cacheSize > c.compactHighWatermark {
c.startCacheCompaction()
}
@@ -368,9 +368,12 @@ func (c *channelCacheImpl) isCompactActive() bool {
return c.compactRunning.IsTrue()
}
+// startCacheCompaction starts a goroutine for cache compaction if it's not already running.
func (c *channelCacheImpl) startCacheCompaction() {
- c.compactRunning.Set(true)
- go c.compactChannelCache()
+ compactNotStarted := c.compactRunning.CompareAndSwap(false, true)
+ if compactNotStarted {
+ go c.compactChannelCache()
+ }
}
// Compact runs until the number of channels in the cache is lower than compactLowWatermark
diff --git a/db/channel_cache_single_test.go b/db/channel_cache_single_test.go
index c917824724..69ea4b4893 100644
--- a/db/channel_cache_single_test.go
+++ b/db/channel_cache_single_test.go
@@ -18,9 +18,11 @@ func TestDuplicateDocID(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
@@ -65,10 +67,11 @@ func TestLateArrivingSequence(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
// Add some entries to cache
@@ -98,10 +101,11 @@ func TestLateSequenceAsFirst(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
// Add some entries to cache
@@ -131,10 +135,11 @@ func TestDuplicateLateArrivingSequence(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
// Add some entries to cache
@@ -205,10 +210,11 @@ func TestPrependChanges(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
// 1. Test prepend to empty cache
cache := newSingleChannelCache(context, "PrependEmptyCache", 0, &expvar.Map{})
changesToPrepend := LogEntries{
@@ -389,10 +395,11 @@ func TestChannelCacheRemove(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Test1", 0, &expvar.Map{})
// Add some entries to cache
@@ -429,10 +436,11 @@ func TestChannelCacheStats(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
testStats := &expvar.Map{}
cache := newSingleChannelCache(context, "Test1", 0, testStats)
@@ -500,10 +508,11 @@ func TestChannelCacheStatsOnPrune(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
testStats := &expvar.Map{}
cache := newSingleChannelCache(context, "Test1", 0, testStats)
cache.options.ChannelCacheMaxLength = 5
@@ -531,10 +540,11 @@ func TestChannelCacheStatsOnPrepend(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ b := base.GetTestBucket(t)
+ defer b.Close()
+ context, err := NewDatabaseContext("db", b.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
testStats := &expvar.Map{}
cache := newSingleChannelCache(context, "Test1", 99, testStats)
cache.options.ChannelCacheMaxLength = 15
@@ -610,20 +620,21 @@ func TestBypassSingleChannelCache(t *testing.T) {
entries, err := bypassCache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.NoError(t, err)
- assert.Equal(t, 10, len(entries))
+ require.Len(t, entries, 10)
validFrom, cachedEntries := bypassCache.GetCachedChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equal(t, uint64(math.MaxUint64), validFrom)
- assert.Equal(t, 0, len(cachedEntries))
+ require.Len(t, cachedEntries, 0)
}
func BenchmarkChannelCacheUniqueDocs_Ordered(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
docIDs := make([]string, b.N)
@@ -639,10 +650,11 @@ func BenchmarkChannelCacheUniqueDocs_Ordered(b *testing.B) {
func BenchmarkChannelCacheRepeatedDocs5(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
@@ -656,10 +668,11 @@ func BenchmarkChannelCacheRepeatedDocs5(b *testing.B) {
func BenchmarkChannelCacheRepeatedDocs20(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
@@ -673,10 +686,11 @@ func BenchmarkChannelCacheRepeatedDocs20(b *testing.B) {
func BenchmarkChannelCacheRepeatedDocs50(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
@@ -690,10 +704,11 @@ func BenchmarkChannelCacheRepeatedDocs50(b *testing.B) {
func BenchmarkChannelCacheRepeatedDocs80(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
@@ -708,10 +723,11 @@ func BenchmarkChannelCacheRepeatedDocs95(b *testing.B) {
defer base.SetUpBenchmarkLogging(base.LevelInfo, base.KeyHTTP)()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate doc IDs
@@ -725,10 +741,11 @@ func BenchmarkChannelCacheRepeatedDocs95(b *testing.B) {
func BenchmarkChannelCacheUniqueDocs_Unordered(b *testing.B) {
defer base.DisableTestLogging()()
- context := testBucketContext(b)
+ testBucket := base.GetTestBucket(b)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(b, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := newSingleChannelCache(context, "Benchmark", 0, &expvar.Map{})
// generate docs
docs := make([]*LogEntry, b.N)
diff --git a/db/channel_cache_test.go b/db/channel_cache_test.go
index 7bfe6e43b5..59d31c1f6f 100644
--- a/db/channel_cache_test.go
+++ b/db/channel_cache_test.go
@@ -29,14 +29,15 @@ func TestChannelCacheMaxSize(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyCache)()
- context := testBucketContext(t)
+ testBucket := base.GetTestBucket(t)
+ defer testBucket.Close()
+ context, err := NewDatabaseContext("db", testBucket.Bucket, false, DatabaseContextOptions{})
+ require.NoError(t, err)
defer context.Close()
- defer base.DecrNumOpenBuckets(context.Bucket.GetName())
-
cache := context.changeCache.getChannelCache()
// Make channels active
- _, err := cache.GetChanges("TestA", ChangesOptions{})
+ _, err = cache.GetChanges("TestA", ChangesOptions{})
require.NoError(t, err)
_, err = cache.GetChanges("TestB", ChangesOptions{})
require.NoError(t, err)
diff --git a/db/crud_test.go b/db/crud_test.go
index 1fc1ca1f97..171eada094 100644
--- a/db/crud_test.go
+++ b/db/crud_test.go
@@ -56,7 +56,7 @@ func TestRevisionCacheLoad(t *testing.T) {
db, testBucket := setupTestDBWithViewsEnabled(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
base.TestExternalRevStorage = true
@@ -100,7 +100,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
base.TestExternalRevStorage = true
@@ -284,7 +284,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
base.TestExternalRevStorage = true
@@ -442,7 +442,7 @@ func TestOldRevisionStorage(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
prop_1000_bytes := base.CreateProperty(1000)
@@ -602,8 +602,9 @@ func TestOldRevisionStorageError(t *testing.T) {
leakyConfig := base.LeakyBucketConfig{
ForceErrorSetRawKeys: []string{forceErrorKey},
}
- db := setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), leakyConfig)
- defer tearDownTestDB(t, db)
+ db, testBucket := setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), leakyConfig)
+ defer testBucket.Close()
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {channel(doc.channels);}`)
@@ -731,7 +732,7 @@ func TestOldRevisionStorageError(t *testing.T) {
func TestLargeSequence(t *testing.T) {
db, testBucket := setupTestDBWithCustomSyncSeq(t, 9223372036854775807)
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer testBucket.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -772,7 +773,7 @@ const rawDocMalformedRevisionStorage = `
func TestMalformedRevisionStorageRecovery(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {channel(doc.channels);}`)
@@ -823,7 +824,7 @@ func BenchmarkDatabaseGet1xRev(b *testing.B) {
db, testBucket := setupTestDB(b)
defer testBucket.Close()
- defer tearDownTestDB(b, db)
+ defer db.Close()
body := Body{"foo": "bar", "rev": "1-a"}
_, _, _ = db.PutExistingRevWithBody("doc1", body, []string{"1-a"}, false)
@@ -880,7 +881,7 @@ func BenchmarkDatabaseGetRev(b *testing.B) {
db, testBucket := setupTestDB(b)
defer testBucket.Close()
- defer tearDownTestDB(b, db)
+ defer db.Close()
body := Body{"foo": "bar", "rev": "1-a"}
_, _, _ = db.PutExistingRevWithBody("doc1", body, []string{"1-a"}, false)
@@ -938,7 +939,7 @@ func BenchmarkHandleRevDelta(b *testing.B) {
db, testBucket := setupTestDB(b)
defer testBucket.Close()
- defer tearDownTestDB(b, db)
+ defer db.Close()
body := Body{"foo": "bar"}
_, _, _ = db.PutExistingRevWithBody("doc1", body, []string{"1-a"}, false)
diff --git a/db/database.go b/db/database.go
index da37aa27d8..8672ac5284 100644
--- a/db/database.go
+++ b/db/database.go
@@ -342,11 +342,9 @@ func NewDatabaseContext(dbName string, bucket base.Bucket, autoImport bool, opti
// If this isn't the default provider, add the provider to the callback URL (needed to identify provider to _oidc_callback)
if !provider.IsDefault && provider.CallbackURL != nil {
- var updatedCallback string
- if strings.Contains(*provider.CallbackURL, "?") {
- updatedCallback = fmt.Sprintf("%s&provider=%s", *provider.CallbackURL, name)
- } else {
- updatedCallback = fmt.Sprintf("%s?provider=%s", *provider.CallbackURL, name)
+ updatedCallback, err := auth.AddURLQueryParam(*provider.CallbackURL, auth.OIDCAuthProvider, name)
+ if err != nil {
+ return nil, base.RedactErrorf("Failed to add provider %q to OIDC callback URL", base.UD(name))
}
provider.CallbackURL = &updatedCallback
}
@@ -513,7 +511,7 @@ func (context *DatabaseContext) RemoveObsoleteDesignDocs(previewOnly bool) (remo
return removeObsoleteDesignDocs(context.Bucket, previewOnly, context.UseViews())
}
-// Removes previous versions of Sync Gateway's design docs found on the server
+// Removes previous versions of Sync Gateway's indexes found on the server
func (context *DatabaseContext) RemoveObsoleteIndexes(previewOnly bool) (removedIndexes []string, err error) {
gocbBucket, ok := base.AsGoCBBucket(context.Bucket)
diff --git a/db/database_test.go b/db/database_test.go
index 3b2e17de5c..fd3a12df6d 100644
--- a/db/database_test.go
+++ b/db/database_test.go
@@ -37,41 +37,20 @@ func init() {
underscore.Disable() // It really slows down unit tests (by making otto.New take a lot longer)
}
-type UnitTestAuth struct {
- Username string
- Password string
- Bucketname string
-}
-
-func (u *UnitTestAuth) GetCredentials() (string, string, string) {
- return base.TransformBucketCredentials(u.Username, u.Password, u.Bucketname)
-}
-
-func testLeakyBucket(config base.LeakyBucketConfig, tester testing.TB) base.Bucket {
-
- testBucket := testBucket(tester)
- // Since this doesn't return the testbucket handle, disable the "open bucket counting system" by immediately
- // decrementing counter
- base.DecrNumOpenBuckets(testBucket.Bucket.GetName())
-
- leakyBucket := base.NewLeakyBucket(testBucket.Bucket, config)
- return leakyBucket
-}
-
// Its important to call tearDownTestDB() on the database and .Close() on the TestBucket that is returned by this helper.
// For example, if .Close() is not called on the TestBucket before the test is finished, it will be detected and
// the next test will fail.
-func setupTestDB(t testing.TB) (*Database, base.TestBucket) {
+func setupTestDB(t testing.TB) (*Database, *base.TestBucket) {
return setupTestDBWithCacheOptions(t, DefaultCacheOptions())
}
-func setupTestDBWithCacheOptions(t testing.TB, options CacheOptions) (*Database, base.TestBucket) {
+func setupTestDBWithCacheOptions(t testing.TB, options CacheOptions) (*Database, *base.TestBucket) {
dbcOptions := DatabaseContextOptions{
CacheOptions: &options,
}
AddOptionsFromEnvironmentVariables(&dbcOptions)
- tBucket := testBucket(t)
+ tBucket := base.GetTestBucket(t)
context, err := NewDatabaseContext("db", tBucket.Bucket, false, dbcOptions)
assert.NoError(t, err, "Couldn't create context for database 'db'")
db, err := CreateDatabase(context)
@@ -81,13 +60,13 @@ func setupTestDBWithCacheOptions(t testing.TB, options CacheOptions) (*Database,
// Forces UseViews:true in the database context. Useful for testing w/ views while running
// tests against Couchbase Server
-func setupTestDBWithViewsEnabled(t testing.TB) (*Database, base.TestBucket) {
+func setupTestDBWithViewsEnabled(t testing.TB) (*Database, *base.TestBucket) {
dbcOptions := DatabaseContextOptions{
UseViews: true,
}
AddOptionsFromEnvironmentVariables(&dbcOptions)
- tBucket := testBucketUseViews(t)
+ tBucket := base.GetTestBucket(t)
context, err := NewDatabaseContext("db", tBucket.Bucket, false, dbcOptions)
assert.NoError(t, err, "Couldn't create context for database 'db'")
db, err := CreateDatabase(context)
@@ -97,11 +76,11 @@ func setupTestDBWithViewsEnabled(t testing.TB) (*Database, base.TestBucket) {
// Sets up a test bucket with _sync:seq initialized to a high value prior to database creation. Used to test
// issues with custom _sync:seq values without triggering skipped sequences between 0 and customSeq
-func setupTestDBWithCustomSyncSeq(t testing.TB, customSeq uint64) (*Database, base.TestBucket) {
+func setupTestDBWithCustomSyncSeq(t testing.TB, customSeq uint64) (*Database, *base.TestBucket) {
dbcOptions := DatabaseContextOptions{}
AddOptionsFromEnvironmentVariables(&dbcOptions)
- tBucket := testBucket(t)
+ tBucket := base.GetTestBucket(t)
log.Printf("Initializing test %s to %d", base.SyncSeqPrefix, customSeq)
_, incrErr := tBucket.Incr(base.SyncSeqKey, customSeq, customSeq, 0)
@@ -114,81 +93,28 @@ func setupTestDBWithCustomSyncSeq(t testing.TB, customSeq uint64) (*Database, ba
return db, tBucket
}
-func testBucket(tester testing.TB) base.TestBucket {
- return testBucketInit(tester, true)
-}
-
-func testBucketUseViews(tester testing.TB) base.TestBucket {
- return testBucketInit(tester, false)
-}
-
-func testBucketInit(tester testing.TB, useGSI bool) base.TestBucket {
-
- // Retry loop in case the GSI indexes don't handle the flush and we need to drop them and retry
- for i := 0; i < 2; i++ {
-
- testBucket := base.GetTestBucket(tester)
- err := installViews(testBucket.Bucket)
- if err != nil {
- tester.Fatalf("Couldn't connect to bucket: %v", err)
- // ^^ effectively panics
- }
-
- if useGSI {
- err = InitializeIndexes(testBucket.Bucket, base.TestUseXattrs(), 0)
- if err != nil {
- tester.Fatalf("Unable to initialize GSI indexes for test: %v", err)
- // ^^ effectively panics
- }
-
- // Since GetTestBucket() always returns an _empty_ bucket, it's safe to wait for the indexes to be empty
- gocbBucket, isGoCbBucket := base.AsGoCBBucket(testBucket.Bucket)
- if isGoCbBucket {
- waitForIndexRollbackErr := WaitForIndexEmpty(gocbBucket, testBucket.BucketSpec.UseXattrs)
- if waitForIndexRollbackErr != nil {
- base.Infof(base.KeyAll, "Error WaitForIndexEmpty: %v. Drop indexes and retry", waitForIndexRollbackErr)
- if err := base.DropAllBucketIndexes(gocbBucket); err != nil {
- tester.Fatalf("Unable to drop GSI indexes for test: %v", err)
- // ^^ effectively panics
- }
- testBucket.Close() // Close the bucket, it will get re-opened on next loop iteration
- continue // Goes to top of outer for loop to retry
- }
-
- }
- }
-
- return testBucket
-
- }
-
- panic(fmt.Sprintf("Failed to create a testbucket after multiple attempts"))
-
-}
-
-func setupTestLeakyDBWithCacheOptions(t *testing.T, options CacheOptions, leakyOptions base.LeakyBucketConfig) *Database {
+func setupTestLeakyDBWithCacheOptions(t *testing.T, options CacheOptions, leakyOptions base.LeakyBucketConfig) (*Database, *base.TestBucket) {
dbcOptions := DatabaseContextOptions{
CacheOptions: &options,
}
AddOptionsFromEnvironmentVariables(&dbcOptions)
- leakyBucket := testLeakyBucket(leakyOptions, t)
+ testBucket := base.GetTestBucket(t)
+ leakyBucket := base.NewLeakyBucket(testBucket.Bucket, leakyOptions)
context, err := NewDatabaseContext("db", leakyBucket, false, dbcOptions)
assert.NoError(t, err, "Couldn't create context for database 'db'")
db, err := CreateDatabase(context)
assert.NoError(t, err, "Couldn't create database 'db'")
- return db
+ return db, testBucket
}
// If certain environemnt variables are set, for example to turn on XATTR support, then update
// the DatabaseContextOptions accordingly
func AddOptionsFromEnvironmentVariables(dbcOptions *DatabaseContextOptions) {
-
if base.TestUseXattrs() {
dbcOptions.EnableXattr = true
}
- // Force views if not testing against Couchbase Server
- if !base.TestUseCouchbaseServer() {
+ if base.TestsDisableGSI() {
dbcOptions.UseViews = true
}
}
@@ -212,7 +138,7 @@ func TestDatabase(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Test creating & updating a document:
log.Printf("Create rev 1...")
@@ -307,7 +233,7 @@ func TestGetDeleted(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
body := Body{"key1": 1234}
rev1id, _, err := db.Put("doc1", body)
@@ -348,7 +274,7 @@ func TestGetRemovedAsUser(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
rev1body := Body{
"key1": 1234,
@@ -433,7 +359,7 @@ func TestGetRemoved(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
rev1body := Body{
"key1": 1234,
@@ -509,7 +435,7 @@ func TestGetRemovedAndDeleted(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
rev1body := Body{
"key1": 1234,
@@ -614,7 +540,7 @@ func TestAllDocsOnly(t *testing.T) {
db, testBucket := setupTestDBWithCacheOptions(t, cacheOptions)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -720,7 +646,7 @@ func TestUpdatePrincipal(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -753,7 +679,7 @@ func TestRepeatedConflict(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create rev 1 of "doc":
body := Body{"n": 1, "channels": []string{"all", "1"}}
@@ -794,7 +720,7 @@ func TestConflicts(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -912,7 +838,7 @@ func TestConflictRevLimit(t *testing.T) {
}
AddOptionsFromEnvironmentVariables(&dbOptions)
- bucket = testBucket(t)
+ bucket = base.GetTestBucket(t)
context, _ := NewDatabaseContext("db", bucket, false, dbOptions)
db, _ = CreateDatabase(context)
assert.Equal(t, uint32(DefaultRevsLimitConflicts), db.RevsLimit)
@@ -925,7 +851,7 @@ func TestConflictRevLimit(t *testing.T) {
}
AddOptionsFromEnvironmentVariables(&dbOptions)
- bucket = testBucket(t)
+ bucket = base.GetTestBucket(t)
context, _ = NewDatabaseContext("db", bucket, false, dbOptions)
db, _ = CreateDatabase(context)
assert.Equal(t, uint32(DefaultRevsLimitNoConflicts), db.RevsLimit)
@@ -938,7 +864,7 @@ func TestNoConflictsMode(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Strictly speaking, this flag should be set before opening the database, but it only affects
// Put operations and replication, so it doesn't make a difference if we do it afterwards.
db.Options.AllowConflicts = base.BoolPtr(false)
@@ -1006,7 +932,7 @@ func TestNoConflictsMode(t *testing.T) {
func TestAllowConflictsFalseTombstoneExistingConflict(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create documents with multiple non-deleted branches
log.Printf("Creating docs")
@@ -1083,7 +1009,7 @@ func TestAllowConflictsFalseTombstoneExistingConflict(t *testing.T) {
func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create documents with multiple non-deleted branches
log.Printf("Creating docs")
@@ -1152,7 +1078,7 @@ func TestSyncFnOnPush(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
log("doc _id = "+doc._id+", _rev = "+doc._rev);
@@ -1190,7 +1116,7 @@ func TestInvalidChannel(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewDefaultChannelMapper()
@@ -1203,7 +1129,7 @@ func TestAccessFunctionValidation(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
var err error
db.ChannelMapper = channels.NewChannelMapper(`function(doc){access(doc.users,doc.userChannels);}`)
@@ -1237,7 +1163,7 @@ func TestAccessFunctionDb(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
authenticator := auth.NewAuthenticator(db.Bucket, db)
@@ -1283,7 +1209,7 @@ func TestUpdateDesignDoc(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
mapFunction := `function (doc, meta) { emit(); }`
err := db.PutDesignDoc("official", sgbucket.DesignDoc{
@@ -1319,7 +1245,7 @@ func TestPostWithExistingId(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Test creating a document with existing id property:
customDocId := "customIdValue"
@@ -1355,7 +1281,7 @@ func TestPutWithUserSpecialProperty(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Test creating a document with existing id property:
customDocId := "customIdValue"
@@ -1372,7 +1298,7 @@ func TestWithNullPropertyKey(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Test creating a document with null property key
customDocId := "customIdValue"
@@ -1388,7 +1314,7 @@ func TestPostWithUserSpecialProperty(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Test creating a document with existing id property:
customDocId := "customIdValue"
@@ -1423,7 +1349,7 @@ func TestRecentSequenceHistory(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
seqTracker := uint64(0)
@@ -1500,7 +1426,7 @@ func TestChannelView(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Create doc
log.Printf("Create doc 1...")
@@ -1540,7 +1466,7 @@ func TestConcurrentImport(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
defer base.SetUpTestLogging(base.LevelInfo, base.KeyImport)()
@@ -1575,7 +1501,7 @@ func TestViewCustom(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// add some docs
docId := base.GenerateRandomID()
@@ -1759,7 +1685,7 @@ func mockOIDCOptionsWithBadName() *auth.OIDCOptions {
}
func TestNewDatabaseContextWithOIDCProviderOptionErrors(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
tests := []struct {
name string
inputOptions *auth.OIDCOptions
@@ -1808,7 +1734,7 @@ func TestNewDatabaseContextWithOIDCProviderOptionErrors(t *testing.T) {
}
func TestNewDatabaseContextWithOIDCProviderOptions(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
tests := []struct {
name string
inputOptions *auth.OIDCOptions
@@ -1866,7 +1792,7 @@ func TestGetOIDCProvider(t *testing.T) {
mockedOIDCOptions := mockOIDCOptions()
options := DatabaseContextOptions{OIDCOptions: mockedOIDCOptions}
AddOptionsFromEnvironmentVariables(&options)
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
context, err := NewDatabaseContext("db", testBucket.Bucket, false, options)
assert.NoError(t, err, "Couldn't create context for database 'db'")
@@ -1896,7 +1822,7 @@ func TestSyncFnMutateBody(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
doc.key1 = "mutatedValue"
@@ -1941,7 +1867,9 @@ func TestConcurrentPushSameNewRevision(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
defer tearDownTestDB(t, db)
enableCallback = true
@@ -1977,7 +1905,9 @@ func TestConcurrentPushSameNewNonWinningRevision(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
defer tearDownTestDB(t, db)
body := Body{"name": "Olivia", "age": 80}
@@ -2032,7 +1962,9 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
defer tearDownTestDB(t, db)
body := Body{"name": "Olivia", "age": 80}
@@ -2087,7 +2019,9 @@ func TestConcurrentPushDifferentUpdateNonWinningRevision(t *testing.T) {
WriteUpdateCallback: writeUpdateCallback,
}
- db = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ var testBucket *base.TestBucket
+ db, testBucket = setupTestLeakyDBWithCacheOptions(t, DefaultCacheOptions(), queryCallbackConfig)
+ defer testBucket.Close()
defer tearDownTestDB(t, db)
body := Body{"name": "Olivia", "age": 80}
diff --git a/db/design_doc_test.go b/db/design_doc_test.go
index f8f8c9baca..35aed70d97 100644
--- a/db/design_doc_test.go
+++ b/db/design_doc_test.go
@@ -11,7 +11,7 @@ import (
func TestRemoveObsoleteDesignDocs(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
bucket := testBucket.Bucket
mapFunction := `function (doc, meta) { emit(); }`
@@ -75,7 +75,7 @@ func TestRemoveDesignDocsUseViewsTrueAndFalse(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
mapFunction := `function (doc, meta){ emit(); }`
@@ -130,10 +130,10 @@ func TestRemoveObsoleteDesignDocsErrors(t *testing.T) {
DDocGetErrorCount: 1,
DDocDeleteErrorCount: 1,
}
- testBucket := testLeakyBucket(leakyBucketConfig, t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
+ bucket := base.NewLeakyBucket(testBucket.Bucket, leakyBucketConfig)
- bucket := testBucket
mapFunction := `function (doc, meta){ emit(); }`
err := bucket.PutDDoc(DesignDocSyncGatewayPrefix+"_test", sgbucket.DesignDoc{
diff --git a/db/import_test.go b/db/import_test.go
index 625f92f466..d398ed7a80 100644
--- a/db/import_test.go
+++ b/db/import_test.go
@@ -33,7 +33,7 @@ func TestMigrateMetadata(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
key := "TestMigrateMetadata"
bodyBytes := rawDocWithSyncMeta()
@@ -104,7 +104,7 @@ func TestImportWithStaleBucketDocCorrectExpiry(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
type testcase struct {
docBody []byte
@@ -229,7 +229,7 @@ func TestImportNullDoc(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
key := "TestImportNullDoc"
var body Body
@@ -247,7 +247,7 @@ func TestImportNullDocRaw(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Feed import of null doc
exp := uint32(0)
@@ -271,7 +271,7 @@ func TestEvaluateFunction(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyImport)()
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Simulate unexpected error invoking import filter for document
body := Body{"key": "value", "version": "1a"}
diff --git a/db/indexes_test.go b/db/indexes_test.go
index 1f99323d0a..38f87a30b9 100644
--- a/db/indexes_test.go
+++ b/db/indexes_test.go
@@ -14,16 +14,16 @@ import (
)
func TestInitializeIndexes(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("Index tests require Couchbase Bucket")
+ if base.TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
goCbBucket, isGoCBBucket := base.AsGoCBBucket(testBucket)
- goassert.True(t, isGoCBBucket)
+ require.True(t, isGoCBBucket)
dropErr := base.DropAllBucketIndexes(goCbBucket)
assert.NoError(t, dropErr, "Error dropping all indexes")
@@ -31,6 +31,11 @@ func TestInitializeIndexes(t *testing.T) {
initErr := InitializeIndexes(testBucket, db.UseXattrs(), 0)
assert.NoError(t, initErr, "Error initializing all indexes")
+ if !base.TestsDisableGSI() {
+ err := goCbBucket.CreatePrimaryIndex(base.PrimaryIndexName, nil)
+ assert.NoError(t, err)
+ }
+
validateErr := validateAllIndexesOnline(testBucket)
assert.NoError(t, validateErr, "Error validating indexes online")
@@ -70,13 +75,13 @@ func validateAllIndexesOnline(bucket base.Bucket) error {
func TestPostUpgradeIndexesSimple(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("Index tests require Couchbase Bucket")
+ if base.TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
gocbBucket, ok := base.AsGoCBBucket(testBucket.Bucket)
assert.True(t, ok)
@@ -111,17 +116,24 @@ func TestPostUpgradeIndexesSimple(t *testing.T) {
goassert.Equals(t, len(removedIndexes), 0)
assert.NoError(t, removeErr, "Unexpected error running removeObsoleteIndexes in post-cleanup no-op")
+ // Restore indexes after test
+ err = InitializeIndexes(testBucket, db.UseXattrs(), 0)
+ assert.NoError(t, err)
}
func TestPostUpgradeIndexesVersionChange(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("Index tests require Couchbase Bucket")
+ // FIXME: CBG-815 - Overwriting sgIndexes global map is disrupting the async bucket pooling workers
+ // Is there a way of refactoring removeObsoleteIndexes to pass in the index map instead?
+ t.Skipf("FIXME: can't touch sgIndexes map - bucket pooling relies on it")
+
+ if base.TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
gocbBucket, ok := base.AsGoCBBucket(testBucket.Bucket)
assert.True(t, ok)
@@ -151,13 +163,18 @@ func TestPostUpgradeIndexesVersionChange(t *testing.T) {
}
func TestRemoveIndexesUseViewsTrueAndFalse(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("Index tests require Couchbase Bucket")
+
+ // FIXME: CBG-815 - Overwriting sgIndexes global map is disrupting the async bucket pooling workers
+ // Is there a way of refactoring removeObsoleteIndexes to pass in the index map instead?
+ t.Skipf("FIXME: can't touch sgIndexes map - bucket pooling relies on it")
+
+ if base.TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
gocbBucket, ok := base.AsGoCBBucket(testBucket.Bucket)
assert.True(t, ok)
@@ -190,21 +207,27 @@ func TestRemoveIndexesUseViewsTrueAndFalse(t *testing.T) {
assert.NoError(t, err)
_, err = removeObsoleteDesignDocs(gocbBucket, !db.UseXattrs(), !db.UseViews())
assert.NoError(t, err)
+
+ // Restore ddocs after test
+ err = InitializeViews(gocbBucket)
+ assert.NoError(t, err)
}
func TestRemoveObsoleteIndexOnFail(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("Index tests require Couchbase Bucket")
+ // FIXME: CBG-815 - Overwriting sgIndexes global map is disrupting the async bucket pooling workers
+ // Is there a way of refactoring removeObsoleteIndexes to pass in the index map instead?
+ t.Skipf("FIXME: can't touch sgIndexes map - bucket pooling relies on it")
+
+ if base.TestsDisableGSI() {
+ t.Skip("This test only works with Couchbase Server and UseViews=false")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
leakyBucket := base.NewLeakyBucket(testBucket.Bucket, base.LeakyBucketConfig{DropIndexErrorNames: []string{"sg_access_1", "sg_access_x1"}})
- b, ok := leakyBucket.(*base.LeakyBucket)
- assert.True(t, ok)
//Copy references to existing indexes to variable for future use
oldIndexes := sgIndexes
@@ -231,7 +254,7 @@ func TestRemoveObsoleteIndexOnFail(t *testing.T) {
channelIndex.previousVersions = []int{1}
sgIndexes[IndexChannels] = channelIndex
- removedIndex, removeErr := removeObsoleteIndexes(b, false, db.UseXattrs(), db.UseViews())
+ removedIndex, removeErr := removeObsoleteIndexes(leakyBucket, false, db.UseXattrs(), db.UseViews())
assert.NoError(t, removeErr)
if base.TestUseXattrs() {
@@ -239,6 +262,11 @@ func TestRemoveObsoleteIndexOnFail(t *testing.T) {
} else {
assert.Contains(t, removedIndex, "sg_channels_1")
}
+
+ // Restore indexes after test
+ err := InitializeIndexes(testBucket, db.UseXattrs(), 0)
+ assert.NoError(t, err)
+
}
func TestIsIndexerError(t *testing.T) {
diff --git a/db/main_test.go b/db/main_test.go
new file mode 100644
index 0000000000..a77fa95c0c
--- /dev/null
+++ b/db/main_test.go
@@ -0,0 +1,18 @@
+package db
+
+import (
+ "os"
+ "testing"
+
+ "github.com/couchbase/sync_gateway/base"
+)
+
+func TestMain(m *testing.M) {
+ base.GTestBucketPool = base.NewTestBucketPool(ViewsAndGSIBucketReadier, ViewsAndGSIBucketInit)
+
+ status := m.Run()
+
+ base.GTestBucketPool.Close()
+
+ os.Exit(status)
+}
diff --git a/db/query_test.go b/db/query_test.go
index d1497da818..47e3253a61 100644
--- a/db/query_test.go
+++ b/db/query_test.go
@@ -16,13 +16,13 @@ import (
// Validate stats for view query
func TestQueryChannelsStatsView(t *testing.T) {
- if !base.UnitTestUrlIsWalrus() {
- t.Skip("This test is walrus-only (requires views)")
+ if !base.UnitTestUrlIsWalrus() || !base.TestsDisableGSI() {
+ t.Skip("This test is Walrus and UseViews=true only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// docID -> Sequence
docSeqMap := make(map[string]uint64, 3)
@@ -68,13 +68,13 @@ func TestQueryChannelsStatsView(t *testing.T) {
// Validate stats for n1ql query
func TestQueryChannelsStatsN1ql(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
t.Skip("This test is Couchbase Server only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// docID -> Sequence
docSeqMap := make(map[string]uint64, 3)
@@ -122,7 +122,7 @@ func TestQuerySequencesStatsView(t *testing.T) {
db, testBucket := setupTestDBWithViewsEnabled(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// docID -> Sequence
docSeqMap := make(map[string]uint64, 20)
@@ -213,13 +213,13 @@ func TestQuerySequencesStatsView(t *testing.T) {
// Validate query and stats for sequence view query
func TestQuerySequencesStatsN1ql(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("This test is Couchbase Server only")
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
+ t.Skip("This test is Couchbase Server and UseViews=false only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// docID -> Sequence
docSeqMap := make(map[string]uint64, 20)
@@ -310,13 +310,13 @@ func TestQuerySequencesStatsN1ql(t *testing.T) {
// Validate that channels queries (channels, starChannel) are covering
func TestCoveringQueries(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("This test is Couchbase Server only")
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
+ t.Skip("This test is Couchbase Server and UseViews=false only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
gocbBucket, ok := base.AsGoCBBucket(testBucket)
if !ok {
@@ -365,13 +365,13 @@ func TestCoveringQueries(t *testing.T) {
func TestAllDocsQuery(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("This test is Couchbase Server only")
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
+ t.Skip("This test is Couchbase Server and UseViews=false only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Add docs with channel assignment
for i := 1; i <= 10; i++ {
@@ -428,13 +428,13 @@ func TestAllDocsQuery(t *testing.T) {
}
func TestAccessQuery(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("This test is Couchbase Server only")
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
+ t.Skip("This test is Couchbase Server and UseViews=false only")
}
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
access(doc.accessUser, doc.accessChannel)
@@ -485,7 +485,7 @@ func TestRoleAccessQuery(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
role(doc.accessUser, "role:" + doc.accessChannel)
@@ -571,8 +571,8 @@ func countQueryResults(results sgbucket.QueryResultIterator) int {
}
func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) {
- if base.UnitTestUrlIsWalrus() {
- t.Skip("This test require Couchbase Server")
+ if base.UnitTestUrlIsWalrus() || base.TestsDisableGSI() {
+ t.Skip("This test is Couchbase Server and UseViews=false only")
}
db, testBucket := setupTestDB(t)
diff --git a/db/repair_bucket_test.go b/db/repair_bucket_test.go
index e225ed1701..93d380ad20 100644
--- a/db/repair_bucket_test.go
+++ b/db/repair_bucket_test.go
@@ -16,7 +16,7 @@ const (
docIdProblematicRevTree2 = "docIdProblematicRevTree2"
)
-func testBucketWithViewsAndBrokenDoc(t testing.TB) (tBucket base.TestBucket, numDocs int) {
+func testBucketWithViewsAndBrokenDoc(t testing.TB) (tBucket *base.TestBucket, numDocs int) {
numDocsAdded := 0
tBucket = base.GetTestBucket(t)
diff --git a/db/revision_cache_test.go b/db/revision_cache_test.go
index f8b310496b..75a8351ba6 100644
--- a/db/revision_cache_test.go
+++ b/db/revision_cache_test.go
@@ -169,7 +169,7 @@ func TestRevisionCacheInternalProperties(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
// Invalid _revisions property will be stripped. Should also not be present in the rev cache.
rev1body := Body{
@@ -218,7 +218,7 @@ func TestBypassRevisionCache(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
docBody := Body{
"value": 1234,
@@ -280,7 +280,7 @@ func TestPutRevisionCacheAttachmentProperty(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
rev1body := Body{
"value": 1234,
@@ -322,7 +322,7 @@ func TestPutExistingRevRevisionCacheAttachmentProperty(t *testing.T) {
db, testBucket := setupTestDB(t)
defer testBucket.Close()
- defer tearDownTestDB(t, db)
+ defer db.Close()
docKey := "doc1"
rev1body := Body{
diff --git a/db/sequence_allocator_test.go b/db/sequence_allocator_test.go
index a384e2d1cc..314e66737a 100644
--- a/db/sequence_allocator_test.go
+++ b/db/sequence_allocator_test.go
@@ -13,7 +13,7 @@ import (
func TestSequenceAllocator(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
bucket := testBucket.Bucket
testStats := new(expvar.Map).Init()
@@ -77,7 +77,7 @@ func TestSequenceAllocator(t *testing.T) {
func TestReleaseSequencesOnStop(t *testing.T) {
- testBucket := testBucket(t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
bucket := testBucket.Bucket
testStats := new(expvar.Map).Init()
@@ -150,15 +150,17 @@ func TestSequenceAllocatorDeadlock(t *testing.T) {
}
}
- testBucket := testLeakyBucket(base.LeakyBucketConfig{IncrCallback: incrCallback}, t)
+ testBucket := base.GetTestBucket(t)
defer testBucket.Close()
+ bucket := base.NewLeakyBucket(testBucket.Bucket, base.LeakyBucketConfig{IncrCallback: incrCallback})
+
testStats := new(expvar.Map).Init()
oldFrequency := MaxSequenceIncrFrequency
defer func() { MaxSequenceIncrFrequency = oldFrequency }()
MaxSequenceIncrFrequency = 1000 * time.Millisecond
- a, err = newSequenceAllocator(testBucket, testStats)
+ a, err = newSequenceAllocator(bucket, testStats)
// Reduce sequence wait for Stop testing
a.releaseSequenceWait = 10 * time.Millisecond
assert.NoError(t, err, "error creating allocator")
diff --git a/db/util_testing.go b/db/util_testing.go
index 25ea0c0b87..187d7717a6 100644
--- a/db/util_testing.go
+++ b/db/util_testing.go
@@ -1,6 +1,7 @@
package db
import (
+ "context"
"errors"
"expvar"
"fmt"
@@ -18,56 +19,53 @@ import (
func WaitForIndexEmpty(bucket *base.CouchbaseBucketGoCB, useXattrs bool) error {
retryWorker := func() (shouldRetry bool, err error, value interface{}) {
-
- var results gocb.QueryResults
-
- // Create the star channel query
- statement := fmt.Sprintf("%s LIMIT 1", QueryStarChannel.statement) // append LIMIT 1 since we only care if there are any results or not
- starChannelQueryStatement := replaceActiveOnlyFilter(statement, false)
- starChannelQueryStatement = replaceSyncTokensQuery(starChannelQueryStatement, useXattrs)
- starChannelQueryStatement = replaceIndexTokensQuery(starChannelQueryStatement, sgIndexes[IndexAllDocs], useXattrs)
- params := map[string]interface{}{}
- params[QueryParamStartSeq] = 0
- params[QueryParamEndSeq] = math.MaxInt64
-
- // Execute the query
- results, err = bucket.Query(starChannelQueryStatement, params, gocb.RequestPlus, true)
-
- // If there was an error, then retry. Assume it's an "index rollback" error which happens as
- // the index processes the bucket flush operation
+ empty, err := isIndexEmpty(bucket, useXattrs)
if err != nil {
- base.Infof(base.KeyAll, "Error querying star channel: %v. Assuming it's a temp error, will retry", err)
return true, err, nil
}
-
- // If it's empty, we're done
- var queryRow AllDocsIndexQueryRow
- found := results.Next(&queryRow)
- resultsCloseErr := results.Close()
- if resultsCloseErr != nil {
- return false, resultsCloseErr, nil
- }
- if !found {
- base.Infof(base.KeyAll, "WaitForIndexEmpty found 0 results. GSI index appears to be empty.")
- return false, nil, nil
- }
-
- // Otherwise, retry
- base.Infof(base.KeyAll, "WaitForIndexEmpty found non-zero results. Retrying until the GSI index is empty.")
- return true, nil, nil
-
+ return !empty, nil, empty
}
// Kick off the retry loop
err, _ := base.RetryLoop(
"Wait for index to be empty",
retryWorker,
- base.CreateMaxDoublingSleeperFunc(30, 100, 2000),
+ base.CreateMaxDoublingSleeperFunc(60, 500, 5000),
)
return err
}
+func isIndexEmpty(bucket *base.CouchbaseBucketGoCB, useXattrs bool) (bool, error) {
+ // Create the star channel query
+ statement := fmt.Sprintf("%s LIMIT 1", QueryStarChannel.statement) // append LIMIT 1 since we only care if there are any results or not
+ starChannelQueryStatement := replaceActiveOnlyFilter(statement, false)
+ starChannelQueryStatement = replaceSyncTokensQuery(starChannelQueryStatement, useXattrs)
+ starChannelQueryStatement = replaceIndexTokensQuery(starChannelQueryStatement, sgIndexes[IndexAllDocs], useXattrs)
+ params := map[string]interface{}{}
+ params[QueryParamStartSeq] = 0
+ params[QueryParamEndSeq] = math.MaxInt64
+
+ // Execute the query
+ results, err := bucket.Query(starChannelQueryStatement, params, gocb.RequestPlus, true)
+
+ // If there was an error, then retry. Assume it's an "index rollback" error which happens as
+ // the index processes the bucket flush operation
+ if err != nil {
+ return false, err
+ }
+
+ // If it's empty, we're done
+ var queryRow AllDocsIndexQueryRow
+ found := results.Next(&queryRow)
+ resultsCloseErr := results.Close()
+ if resultsCloseErr != nil {
+ return false, err
+ }
+
+ return !found, nil
+}
+
// Count how many rows are in gocb.QueryResults
func ResultsEmpty(results gocb.QueryResults) (resultsEmpty bool) {
@@ -185,3 +183,103 @@ func WaitForUserWaiterChange(userWaiter *ChangeWaiter) bool {
}
return isChanged
}
+
+// ViewsAndGSIBucketReadier empties the bucket, initializes Views, and waits until GSI indexes are empty. It is run asynchronously as soon as a test is finished with a bucket.
+var ViewsAndGSIBucketReadier base.TBPBucketReadierFunc = func(ctx context.Context, b *base.CouchbaseBucketGoCB, tbp *base.TestBucketPool) error {
+
+ if base.TestsDisableGSI() {
+ tbp.Logf(ctx, "flushing bucket and readying views only")
+ if err := base.FlushBucketEmptierFunc(ctx, b, tbp); err != nil {
+ return err
+ }
+ // Exit early if we're not using GSI.
+ return viewBucketReadier(ctx, b, tbp)
+ }
+
+ tbp.Logf(ctx, "emptying bucket via N1QL, readying views and indexes")
+ if err := base.N1QLBucketEmptierFunc(ctx, b, tbp); err != nil {
+ return err
+ }
+ if err := viewBucketReadier(ctx, b, tbp); err != nil {
+ return err
+ }
+
+ tbp.Logf(ctx, "waiting for empty bucket indexes")
+ // we can't init indexes concurrently, so we'll just wait for them to be empty after emptying instead of recreating.
+ if err := WaitForIndexEmpty(b, base.TestUseXattrs()); err != nil {
+ tbp.Logf(ctx, "WaitForIndexEmpty returned an error: %v", err)
+ return err
+ }
+ tbp.Logf(ctx, "bucket indexes empty")
+
+ return nil
+}
+
+// ViewsAndGSIBucketInit is run synchronously only once per-bucket to do any initial setup. For non-integration Walrus buckets, this is run for each new Walrus bucket.
+var ViewsAndGSIBucketInit base.TBPBucketInitFunc = func(ctx context.Context, b base.Bucket, tbp *base.TestBucketPool) error {
+ gocbBucket, ok := base.AsGoCBBucket(b)
+ if !ok {
+ // Check we're not running with an invalid combination of backing store and xattrs.
+ if base.TestUseXattrs() {
+ return fmt.Errorf("xattrs not supported when using Walrus buckets")
+ }
+
+ tbp.Logf(ctx, "bucket not a gocb bucket... skipping GSI setup")
+ return viewBucketReadier(ctx, b, tbp)
+ }
+
+ // Exit early if we're not using GSI.
+ if base.TestsDisableGSI() {
+ return nil
+ }
+
+ if empty, err := isIndexEmpty(gocbBucket, base.TestUseXattrs()); empty && err == nil {
+ tbp.Logf(ctx, "indexes already created, and already empty - skipping")
+ return nil
+ } else {
+ tbp.Logf(ctx, "indexes not empty (or doesn't exist) - %v %v", empty, err)
+ }
+
+ tbp.Logf(ctx, "dropping existing bucket indexes")
+ if err := base.DropAllBucketIndexes(gocbBucket); err != nil {
+ tbp.Logf(ctx, "Failed to drop bucket indexes: %v", err)
+ return err
+ }
+
+ tbp.Logf(ctx, "creating SG bucket indexes")
+ if err := InitializeIndexes(gocbBucket, base.TestUseXattrs(), 0); err != nil {
+ return err
+ }
+
+ err := gocbBucket.CreatePrimaryIndex(base.PrimaryIndexName, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// viewBucketReadier removes any existing views and installs a new set into the given bucket.
+func viewBucketReadier(ctx context.Context, b base.Bucket, tbp *base.TestBucketPool) error {
+ var ddocs map[string]interface{}
+ err := b.GetDDocs(&ddocs)
+ if err != nil {
+ return err
+ }
+
+ for ddocName, _ := range ddocs {
+ tbp.Logf(ctx, "removing existing view: %s", ddocName)
+ if err := b.DeleteDDoc(ddocName); err != nil {
+ return err
+ }
+ }
+
+ tbp.Logf(ctx, "initializing bucket views")
+ err = InitializeViews(b)
+ if err != nil {
+ return err
+ }
+
+ tbp.Logf(ctx, "bucket views initialized")
+ return nil
+}
diff --git a/manifest/default.xml b/manifest/default.xml
index 43b83c1b7e..cc76b5bf73 100644
--- a/manifest/default.xml
+++ b/manifest/default.xml
@@ -31,14 +31,14 @@
-
+
-
-
+
+
-
+
@@ -46,7 +46,7 @@
-
+
@@ -111,7 +111,7 @@
-
+
diff --git a/rest/api_test_no_race_test.go b/rest/api_test_no_race_test.go
index 5d044c89c5..1708947229 100644
--- a/rest/api_test_no_race_test.go
+++ b/rest/api_test_no_race_test.go
@@ -34,24 +34,24 @@ func TestChangesAccessNotifyInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel); access(doc.accessUser, doc.accessChannel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel); access(doc.accessUser, doc.accessChannel);}`})
+ defer rt.Close()
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "ABC"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
// Put several documents in channel PBS
- response := it.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- caughtUpWaiter := it.GetDatabase().NewPullReplicationCaughtUpWaiter(t)
+ caughtUpWaiter := rt.GetDatabase().NewPullReplicationCaughtUpWaiter(t)
// Start longpoll changes request
var wg sync.WaitGroup
wg.Add(1)
@@ -62,7 +62,7 @@ func TestChangesAccessNotifyInteger(t *testing.T) {
Last_Seq db.SequenceID
}
changesJSON := `{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"0"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
goassert.Equals(t, len(changes.Results), 3)
}()
@@ -71,7 +71,7 @@ func TestChangesAccessNotifyInteger(t *testing.T) {
caughtUpWaiter.AddAndWait(1)
// Put document that triggers access grant for user, PBS
- response = it.SendAdminRequest("PUT", "/db/access1", `{"accessUser":"bernard", "accessChannel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/access1", `{"accessUser":"bernard", "accessChannel":["PBS"]}`)
assertStatus(t, response, 201)
wg.Wait()
@@ -89,16 +89,16 @@ func TestChangesNotifyChannelFilter(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
// Create user:
- userResponse := it.SendAdminRequest("PUT", "/db/_user/bernard", `{"name":"bernard", "password":"letmein", "admin_channels":["ABC"]}`)
+ userResponse := rt.SendAdminRequest("PUT", "/db/_user/bernard", `{"name":"bernard", "password":"letmein", "admin_channels":["ABC"]}`)
assertStatus(t, userResponse, 201)
// Get user, to trigger all_channels calculation and bump the user change count BEFORE we write the PBS docs - otherwise the user key count
// will still be higher than the latest change count.
- userResponse = it.SendAdminRequest("GET", "/db/_user/bernard", "")
+ userResponse = rt.SendAdminRequest("GET", "/db/_user/bernard", "")
assertStatus(t, userResponse, 200)
/*
@@ -109,11 +109,11 @@ func TestChangesNotifyChannelFilter(t *testing.T) {
*/
// Put several documents in channel PBS
- response := it.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
assertStatus(t, response, 201)
// Run an initial changes request to get the user doc, and update since based on last_seq:
@@ -129,13 +129,13 @@ func TestChangesNotifyChannelFilter(t *testing.T) {
"filter":"sync_gateway/bychannel",
"channels":"ABC,PBS"}`
sinceZeroJSON := fmt.Sprintf(changesJSON, "0")
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", sinceZeroJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", sinceZeroJSON, "bernard"))
err := base.JSONUnmarshal(changesResponse.Body.Bytes(), &initialChanges)
assert.NoError(t, err, "Unexpected error unmarshalling initialChanges")
lastSeq := initialChanges.Last_Seq.String()
goassert.Equals(t, lastSeq, "1")
- caughtUpWaiter := it.GetDatabase().NewPullReplicationCaughtUpWaiter(t)
+ caughtUpWaiter := rt.GetDatabase().NewPullReplicationCaughtUpWaiter(t)
caughtUpWaiter.Add(1)
// Start longpoll changes request, requesting (unavailable) channel PBS. Should block.
var wg sync.WaitGroup
@@ -147,7 +147,7 @@ func TestChangesNotifyChannelFilter(t *testing.T) {
Last_Seq db.SequenceID
}
sinceLastJSON := fmt.Sprintf(changesJSON, lastSeq)
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", sinceLastJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", sinceLastJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
goassert.Equals(t, len(changes.Results), 1)
}()
@@ -156,7 +156,7 @@ func TestChangesNotifyChannelFilter(t *testing.T) {
caughtUpWaiter.Wait()
// Put public document that triggers termination of the longpoll
- response = it.SendAdminRequest("PUT", "/db/abc1", `{"value":3, "channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc1", `{"value":3, "channel":["ABC"]}`)
assertStatus(t, response, 201)
wg.Wait()
}
diff --git a/rest/blip_api_test.go b/rest/blip_api_test.go
index c83098a97f..dbe161f5f0 100644
--- a/rest/blip_api_test.go
+++ b/rest/blip_api_test.go
@@ -1499,10 +1499,11 @@ func TestPutAttachmentViaBlipGetViaBlip(t *testing.T) {
goassert.True(t, sent)
// Get all docs and attachment via subChanges request
- allDocs := bt.WaitForNumDocsViaChanges(1)
+ allDocs, ok := bt.WaitForNumDocsViaChanges(1)
+ require.True(t, ok)
// make assertions on allDocs -- make sure attachment is present w/ expected body
- goassert.Equals(t, len(allDocs), 1)
+ require.Len(t, allDocs, 1)
retrievedDoc := allDocs[input.docId]
// doc assertions
@@ -1924,9 +1925,11 @@ func TestMissingNoRev(t *testing.T) {
}
defer rt.Close()
bt, err := NewBlipTesterFromSpec(t, btSpec)
- assert.NoError(t, err, "Unexpected error creating BlipTester")
+ require.NoError(t, err, "Unexpected error creating BlipTester")
defer bt.Close()
+ require.NoError(t, rt.WaitForDBOnline())
+
// Create 5 docs
for i := 0; i < 5; i++ {
docId := fmt.Sprintf("doc-%d", i)
@@ -1943,8 +1946,8 @@ func TestMissingNoRev(t *testing.T) {
assert.NoError(t, err, "failed")
// Pull docs, expect to pull 5 docs since none of them has purged yet.
- docs := bt.WaitForNumDocsViaChanges(5)
- goassert.True(t, len(docs) == 5)
+ docs, ok := bt.WaitForNumDocsViaChanges(5)
+ assert.Len(t, docs, 5)
// Purge one doc
doc0Id := fmt.Sprintf("doc-%d", 0)
@@ -1955,8 +1958,10 @@ func TestMissingNoRev(t *testing.T) {
targetDb.FlushRevisionCacheForTest()
// Pull docs, expect to pull 4 since one was purged. (also expect to NOT get stuck)
- docs = bt.WaitForNumDocsViaChanges(4)
- goassert.True(t, len(docs) == 4)
+ docs, ok = bt.WaitForNumDocsViaChanges(4)
+ assert.True(t, ok)
+ assert.Len(t, docs, 4)
+
}
// TestBlipDeltaSyncPull tests that a simple pull replication uses deltas in EE,
diff --git a/rest/changes_api_test.go b/rest/changes_api_test.go
index 269afbef0d..8eefbf7d86 100644
--- a/rest/changes_api_test.go
+++ b/rest/changes_api_test.go
@@ -30,66 +30,6 @@ import (
"github.com/stretchr/testify/require"
)
-type indexTester struct {
- RestTester
- _indexBucket base.Bucket
-}
-
-func initRestTester(syncFn string, testing testing.TB) *indexTester {
- return initIndexTester(syncFn, testing)
-}
-
-func initIndexTester(syncFn string, tb testing.TB) *indexTester {
-
- it := &indexTester{RestTester: *NewRestTester(tb, nil)}
- it.SyncFn = syncFn
-
- it.RestTesterServerContext = NewServerContext(&ServerConfig{
- Facebook: &FacebookConfig{},
- })
-
- var syncFnPtr *string
- if len(it.SyncFn) > 0 {
- syncFnPtr = &it.SyncFn
- }
-
- // TODO: this should be able to use either a Walrus or a Couchbase bucket.
- // When supported, set dbConfig.UseViews conditionally
-
- serverName := "walrus:"
- //serverName := "http://localhost:8091"
- bucketName := "sg_bucket"
-
- feedType := "tap"
-
- dbConfig := &DbConfig{
- BucketConfig: BucketConfig{
- Server: &serverName,
- Bucket: &bucketName},
- Name: "db",
- Sync: syncFnPtr,
- FeedType: feedType,
- UseViews: true, // walrus only supports views
- }
-
- _, err := it.RestTesterServerContext.AddDatabaseFromConfig(dbConfig)
- if err != nil {
- panic(fmt.Sprintf("Error from AddDatabaseFromConfig: %v", err))
- }
-
- it.RestTesterBucket = it.RestTesterServerContext.Database("db").Bucket
-
- return it
-}
-
-func (it *indexTester) Close() {
- it.RestTesterServerContext.Close()
-}
-
-func (it *indexTester) ServerContext() *ServerContext {
- return it.RestTesterServerContext
-}
-
// Reproduces issue #2383 by forcing a partial error from the view on the first changes request.
func TestReproduce2383(t *testing.T) {
@@ -127,8 +67,9 @@ func TestReproduce2383(t *testing.T) {
Last_Seq interface{}
}
- leakyBucket, ok := rt.Bucket().(*base.LeakyBucket)
- assert.True(t, ok, "Bucket was not of type LeakyBucket")
+ leakyBucket, ok := rt.testBucket.Bucket.(*base.LeakyBucket)
+ require.True(t, ok)
+
// Force a partial error for the first ViewCustom call we make to initialize an invalid cache.
leakyBucket.SetFirstTimeViewCustomPartialError(true)
@@ -249,28 +190,28 @@ func TestPostChangesInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
- postChanges(t, it)
+ postChanges(t, rt)
}
-func postChanges(t *testing.T, it *indexTester) {
+func postChanges(t *testing.T, rt *RestTester) {
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "PBS"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
// Put several documents
- response := it.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc1", `{"value":1, "channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc1", `{"value":1, "channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
assertStatus(t, response, 201)
var changes struct {
@@ -278,7 +219,7 @@ func postChanges(t *testing.T, it *indexTester) {
Last_Seq db.SequenceID
}
changesJSON := `{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"0"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
@@ -295,11 +236,11 @@ func TestPostChangesUserTiming(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel); access(doc.accessUser, doc.accessChannel)}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel); access(doc.accessUser, doc.accessChannel)}`})
+ defer rt.Close()
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "bernard"))
assert.True(t, err == nil)
assert.NoError(t, a.Save(bernard))
@@ -307,14 +248,14 @@ func TestPostChangesUserTiming(t *testing.T) {
var wg sync.WaitGroup
// Put several documents to channel PBS
- response := it.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- caughtUpCount := base.ExpvarVar2Int(it.GetDatabase().DbStats.StatsCblReplicationPull().Get(base.StatKeyPullReplicationsCaughtUp))
+ caughtUpCount := base.ExpvarVar2Int(rt.GetDatabase().DbStats.StatsCblReplicationPull().Get(base.StatKeyPullReplicationsCaughtUp))
wg.Add(1)
go func() {
@@ -324,7 +265,7 @@ func TestPostChangesUserTiming(t *testing.T) {
Last_Seq string
}
changesJSON := `{"style":"all_docs", "timeout":6000, "feed":"longpoll", "limit":50, "since":"0"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
// Validate that the user receives backfill plus the new doc
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
@@ -339,10 +280,10 @@ func TestPostChangesUserTiming(t *testing.T) {
}()
// Wait for changes feed to get into wait mode where it is blocked on the longpoll changes feed response
- require.NoError(t, it.GetDatabase().WaitForCaughtUp(caughtUpCount+1))
+ require.NoError(t, rt.GetDatabase().WaitForCaughtUp(caughtUpCount+1))
// Put a doc in channel bernard, that also grants bernard access to channel PBS
- response = it.SendAdminRequest("PUT", "/db/grant1", `{"value":1, "accessUser":"bernard", "accessChannel":"PBS"}`)
+ response = rt.SendAdminRequest("PUT", "/db/grant1", `{"value":1, "accessUser":"bernard", "accessChannel":"PBS"}`)
assertStatus(t, response, 201)
wg.Wait()
@@ -356,27 +297,27 @@ func TestPostChangesSinceInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyAll)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
- postChangesSince(t, it)
+ postChangesSince(t, rt)
}
func TestPostChangesWithQueryString(t *testing.T) {
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
// Put several documents
- response := it.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc1", `{"value":1, "channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc1", `{"value":1, "channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3", `{"value":3, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- _ = it.WaitForPendingChanges()
+ _ = rt.WaitForPendingChanges()
var changes struct {
Results []db.ChangeEntry
@@ -385,7 +326,7 @@ func TestPostChangesWithQueryString(t *testing.T) {
// Test basic properties
changesJSON := `{"heartbeat":50, "feed":"normal", "limit":1, "since":"3"}`
- changesResponse := it.SendAdminRequest("POST", "/db/_changes?feed=longpoll&limit=10&since=0&heartbeat=50000", changesJSON)
+ changesResponse := rt.SendAdminRequest("POST", "/db/_changes?feed=longpoll&limit=10&since=0&heartbeat=50000", changesJSON)
err := base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
@@ -397,7 +338,7 @@ func TestPostChangesWithQueryString(t *testing.T) {
Last_Seq db.SequenceID
}
changesJSON = `{"feed":"longpoll"}`
- changesResponse = it.SendAdminRequest("POST", "/db/_changes?feed=longpoll&filter=sync_gateway/bychannel&channels=ABC", changesJSON)
+ changesResponse = rt.SendAdminRequest("POST", "/db/_changes?feed=longpoll&filter=sync_gateway/bychannel&channels=ABC", changesJSON)
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &filteredChanges)
assert.NoError(t, err, "Error unmarshalling changes response")
@@ -405,24 +346,24 @@ func TestPostChangesWithQueryString(t *testing.T) {
}
// Basic _changes test with since value
-func postChangesSince(t *testing.T, it *indexTester) {
+func postChangesSince(t *testing.T, rt *RestTester) {
// Create user
- response := it.SendAdminRequest("PUT", "/db/_user/bernard", `{"email":"bernard@bb.com", "password":"letmein", "admin_channels":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/_user/bernard", `{"email":"bernard@bb.com", "password":"letmein", "admin_channels":["PBS"]}`)
assertStatus(t, response, 201)
- cacheWaiter := it.GetDatabase().NewDCPCachingCountWaiter(t)
+ cacheWaiter := rt.GetDatabase().NewDCPCachingCountWaiter(t)
// Put several documents
- response = it.SendAdminRequest("PUT", "/db/pbs1-0000609", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs1-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000609", `{"channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000609", `{"channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000799", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000799", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2-0000609", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3-0000609", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(5)
@@ -431,7 +372,7 @@ func postChangesSince(t *testing.T, it *indexTester) {
Last_Seq interface{}
}
changesJSON := `{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"0"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err := base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
log.Printf("Changes:%s", changesResponse.Body.Bytes())
@@ -439,18 +380,18 @@ func postChangesSince(t *testing.T, it *indexTester) {
require.Len(t, changes.Results, 5)
// Put several more documents, some to the same vbuckets
- response = it.SendAdminRequest("PUT", "/db/pbs1-0000799", `{"value":1, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs1-0000799", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc1-0000609", `{"value":1, "channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc1-0000609", `{"value":1, "channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2-0000799", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2-0000799", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs4", `{"value":4, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs4", `{"value":4, "channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(4)
changesJSON = fmt.Sprintf(`{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"%s"}`, changes.Last_Seq)
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
log.Printf("Changes:%s", changesResponse.Body.Bytes())
assert.NoError(t, err, "Error unmarshalling changes response")
@@ -462,32 +403,32 @@ func TestPostChangesChannelFilterInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
- postChangesChannelFilter(t, it)
+ postChangesChannelFilter(t, rt)
}
// Test _changes with channel filter
-func postChangesChannelFilter(t *testing.T, it *indexTester) {
+func postChangesChannelFilter(t *testing.T, rt *RestTester) {
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "PBS"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
- cacheWaiter := it.GetDatabase().NewDCPCachingCountWaiter(t)
+ cacheWaiter := rt.GetDatabase().NewDCPCachingCountWaiter(t)
// Put several documents
- response := it.SendAdminRequest("PUT", "/db/pbs1-0000609", `{"channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs1-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000609", `{"channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000609", `{"channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000799", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/samevbdiffchannel-0000799", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2-0000609", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs3-0000609", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs3-0000609", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(5)
@@ -497,23 +438,23 @@ func postChangesChannelFilter(t *testing.T, it *indexTester) {
}
changesJSON := `{"filter":"sync_gateway/bychannel", "channels":"PBS"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 4)
// Put several more documents, some to the same vbuckets
- response = it.SendAdminRequest("PUT", "/db/pbs1-0000799", `{"value":1, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs1-0000799", `{"value":1, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc1-0000609", `{"value":1, "channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc1-0000609", `{"value":1, "channel":["ABC"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs2-0000799", `{"value":2, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs2-0000799", `{"value":2, "channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs4", `{"value":4, "channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs4", `{"value":4, "channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(4)
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
for _, result := range changes.Results {
@@ -531,32 +472,32 @@ func TestPostChangesAdminChannelGrantInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
- postChangesAdminChannelGrant(t, it)
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
+ postChangesAdminChannelGrant(t, rt)
}
// _changes with admin-based channel grant
-func postChangesAdminChannelGrant(t *testing.T, it *indexTester) {
+func postChangesAdminChannelGrant(t *testing.T, rt *RestTester) {
// Create user with access to channel ABC:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "ABC"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
- cacheWaiter := it.GetDatabase().NewDCPCachingCountWaiter(t)
+ cacheWaiter := rt.GetDatabase().NewDCPCachingCountWaiter(t)
// Put several documents in channel ABC and PBS
- response := it.SendAdminRequest("PUT", "/db/pbs-1", `{"channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/pbs-1", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs-2", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs-2", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs-3", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs-3", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/pbs-4", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs-4", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc-1", `{"channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc-1", `{"channel":["ABC"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(5)
@@ -566,7 +507,7 @@ func postChangesAdminChannelGrant(t *testing.T, it *indexTester) {
}
// Issue simple changes request
- changesResponse := it.Send(requestByUser("GET", "/db/_changes", "", "bernard"))
+ changesResponse := rt.Send(requestByUser("GET", "/db/_changes", "", "bernard"))
assertStatus(t, changesResponse, 200)
log.Printf("Response:%+v", changesResponse.Body)
@@ -575,13 +516,13 @@ func postChangesAdminChannelGrant(t *testing.T, it *indexTester) {
require.Len(t, changes.Results, 1)
// Update the user doc to grant access to PBS
- response = it.SendAdminRequest("PUT", "/db/_user/bernard", `{"admin_channels":["ABC", "PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/_user/bernard", `{"admin_channels":["ABC", "PBS"]}`)
assertStatus(t, response, 200)
time.Sleep(500 * time.Millisecond)
// Issue a new changes request with since=last_seq ensure that user receives all records for channel PBS
- changesResponse = it.Send(requestByUser("GET", fmt.Sprintf("/db/_changes?since=%s", changes.Last_Seq),
+ changesResponse = rt.Send(requestByUser("GET", fmt.Sprintf("/db/_changes?since=%s", changes.Last_Seq),
"", "bernard"))
assertStatus(t, changesResponse, 200)
@@ -594,15 +535,15 @@ func postChangesAdminChannelGrant(t *testing.T, it *indexTester) {
require.Len(t, changes.Results, 5) // 4 PBS docs, plus the updated user doc
// Write a few more docs
- response = it.SendAdminRequest("PUT", "/db/pbs-5", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/pbs-5", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/abc-2", `{"channel":["ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/abc-2", `{"channel":["ABC"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(2)
// Issue another changes request - ensure we don't backfill again
- changesResponse = it.Send(requestByUser("GET", fmt.Sprintf("/db/_changes?since=%s", changes.Last_Seq),
+ changesResponse = rt.Send(requestByUser("GET", fmt.Sprintf("/db/_changes?since=%s", changes.Last_Seq),
"", "bernard"))
assertStatus(t, changesResponse, 200)
log.Printf("Response:%+v", changesResponse.Body)
@@ -1269,9 +1210,9 @@ func _testConcurrentNewEditsFalseDelete(t *testing.T) {
func TestChangesActiveOnlyInteger(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyChanges, base.KeyHTTP)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
- changesActiveOnly(t, it)
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
+ changesActiveOnly(t, rt)
}
func TestOneShotChangesWithExplicitDocIds(t *testing.T) {
@@ -1691,42 +1632,42 @@ func TestChangesIncludeDocs(t *testing.T) {
}
// Test _changes with channel filter
-func changesActiveOnly(t *testing.T, it *indexTester) {
+func changesActiveOnly(t *testing.T, rt *RestTester) {
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "PBS", "ABC"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
// Put several documents
var body db.Body
- response := it.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
deletedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
removedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
partialRemovalRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
// Create a conflict, then tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
+ response = rt.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
assertStatus(t, response, 200)
// Create a conflict, and don't tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
assertStatus(t, response, 201)
var changes struct {
@@ -1736,28 +1677,28 @@ func changesActiveOnly(t *testing.T, it *indexTester) {
// Pre-delete changes
changesJSON := `{"style":"all_docs"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
// Delete
- response = it.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
+ response = rt.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
assertStatus(t, response, 200)
// Removed
- response = it.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
assertStatus(t, response, 201)
// Partially removed
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
assertStatus(t, response, 201)
time.Sleep(100 * time.Millisecond)
// Normal changes
changesJSON = `{"style":"all_docs"}`
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
@@ -1771,7 +1712,7 @@ func changesActiveOnly(t *testing.T, it *indexTester) {
// Active only, POST
changesJSON = `{"style":"all_docs", "active_only":true}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 3)
@@ -1784,7 +1725,7 @@ func changesActiveOnly(t *testing.T, it *indexTester) {
}
// Active only, GET
changes.Results = nil
- changesResponse = it.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true", "", "bernard"))
+ changesResponse = rt.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true", "", "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 3)
@@ -2332,8 +2273,6 @@ func TestChangesViewBackfillSlowQuery(t *testing.T) {
// Set up PostQueryCallback on bucket - will be invoked when changes triggers the cache backfill view query
- leakyBucket, ok := rt.Bucket().(*base.LeakyBucket)
- assert.True(t, ok, "Bucket was not of type LeakyBucket")
postQueryCallback := func(ddoc, viewName string, params map[string]interface{}) {
log.Printf("Got callback for %s, %s, %v", ddoc, viewName, params)
// Check which channel the callback was invoked for
@@ -2353,6 +2292,10 @@ func TestChangesViewBackfillSlowQuery(t *testing.T) {
}
}
+
+ leakyBucket, ok := rt.testBucket.Bucket.(*base.LeakyBucket)
+ require.True(t, ok)
+
leakyBucket.SetPostQueryCallback(postQueryCallback)
// Issue a since=0 changes request. Will cause the following:
@@ -2400,10 +2343,10 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyHTTP, base.KeyChanges)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
- testDb := it.ServerContext().Database("db")
+ testDb := rt.ServerContext().Database("db")
// Create user:
a := testDb.Authenticator()
@@ -2415,33 +2358,33 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
// Put several documents
var body db.Body
- response := it.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
deletedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
removedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc0", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc0", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
partialRemovalRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
// Create a conflict, then tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
+ response = rt.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
assertStatus(t, response, 200)
// Create a conflict, and don't tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
assertStatus(t, response, 201)
var changes struct {
@@ -2452,40 +2395,40 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
// Pre-delete changes
changesJSON := `{"style":"all_docs"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
// Delete
- response = it.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
+ response = rt.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
assertStatus(t, response, 200)
// Removed
- response = it.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
assertStatus(t, response, 201)
// Partially removed
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
assertStatus(t, response, 201)
//Create additional active docs
- response = it.SendAdminRequest("PUT", "/db/activeDoc1", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc1", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc2", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc2", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc3", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc3", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc4", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc4", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc5", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc5", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(8)
// Normal changes
changesJSON = `{"style":"all_docs"}`
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 10)
@@ -2499,7 +2442,7 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
// Active only NO Limit, POST
changesJSON = `{"style":"all_docs", "active_only":true}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 8)
@@ -2514,7 +2457,7 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
// Active only with Limit, POST
changesJSON = `{"style":"all_docs", "active_only":true, "limit":5}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
@@ -2527,7 +2470,7 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
}
// Active only with Limit, GET
changes.Results = nil
- changesResponse = it.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true&limit=5", "", "bernard"))
+ changesResponse = rt.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true&limit=5", "", "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
@@ -2541,7 +2484,7 @@ func TestChangesActiveOnlyWithLimit(t *testing.T) {
// Active only with Limit set higher than number of revisions, POST
changesJSON = `{"style":"all_docs", "active_only":true, "limit":15}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 8)
@@ -2561,45 +2504,45 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
defer base.SetUpTestLogging(base.LevelInfo, base.KeyHTTP, base.KeyChanges, base.KeyCache)()
- it := initIndexTester(`function(doc) {channel(doc.channel);}`, t)
- defer it.Close()
+ rt := NewRestTester(t, &RestTesterConfig{SyncFn: `function(doc) {channel(doc.channel);}`})
+ defer rt.Close()
// Create user:
- a := it.ServerContext().Database("db").Authenticator()
+ a := rt.ServerContext().Database("db").Authenticator()
bernard, err := a.NewUser("bernard", "letmein", channels.SetOf(t, "PBS", "ABC"))
assert.NoError(t, err)
assert.NoError(t, a.Save(bernard))
- cacheWaiter := it.ServerContext().Database("db").NewDCPCachingCountWaiter(t)
+ cacheWaiter := rt.ServerContext().Database("db").NewDCPCachingCountWaiter(t)
// Put several documents
var body db.Body
- response := it.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
+ response := rt.SendAdminRequest("PUT", "/db/deletedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
deletedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", `{"channel":["PBS"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
removedRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc0", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc0", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", `{"channel":["PBS","ABC"]}`)
assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))
partialRemovalRev := body["rev"].(string)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/conflictedDoc", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
// Create a conflict, then tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictTombstone"}], "new_edits":false}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
+ response = rt.SendAdminRequest("DELETE", "/db/conflictedDoc?rev=1-conflictTombstone", "")
assertStatus(t, response, 200)
// Create a conflict, and don't tombstone it
- response = it.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
+ response = rt.SendAdminRequest("POST", "/db/_bulk_docs", `{"docs":[{"_id":"conflictedDoc","channel":["PBS"], "_rev":"1-conflictActive"}], "new_edits":false}`)
assertStatus(t, response, 201)
var changes struct {
@@ -2611,40 +2554,40 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
// Get pre-delete changes
changesJSON := `{"style":"all_docs"}`
- changesResponse := it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse := rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
// Delete
- response = it.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
+ response = rt.SendAdminRequest("DELETE", fmt.Sprintf("/db/deletedDoc?rev=%s", deletedRev), "")
assertStatus(t, response, 200)
// Removed
- response = it.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
+ response = rt.SendAdminRequest("PUT", "/db/removedDoc", fmt.Sprintf(`{"_rev":%q, "channel":["HBO"]}`, removedRev))
assertStatus(t, response, 201)
// Partially removed
- response = it.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
+ response = rt.SendAdminRequest("PUT", "/db/partialRemovalDoc", fmt.Sprintf(`{"_rev":%q, "channel":["PBS"]}`, partialRemovalRev))
assertStatus(t, response, 201)
//Create additional active docs
- response = it.SendAdminRequest("PUT", "/db/activeDoc1", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc1", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc2", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc2", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc3", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc3", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc4", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc4", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
- response = it.SendAdminRequest("PUT", "/db/activeDoc5", `{"channel":["PBS"]}`)
+ response = rt.SendAdminRequest("PUT", "/db/activeDoc5", `{"channel":["PBS"]}`)
assertStatus(t, response, 201)
cacheWaiter.AddAndWait(8)
// Normal changes
changesJSON = `{"style":"all_docs"}`
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 10)
@@ -2656,12 +2599,12 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
}
// Active only NO Limit, POST
- testDb := it.ServerContext().Database("db")
+ testDb := rt.ServerContext().Database("db")
assert.NoError(t, testDb.FlushChannelCache())
changesJSON = `{"style":"all_docs", "active_only":true}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 8)
@@ -2677,7 +2620,7 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
assert.NoError(t, testDb.FlushChannelCache())
changesJSON = `{"style":"all_docs", "active_only":true, "limit":5}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
@@ -2692,7 +2635,7 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
// Active only with Limit, GET
assert.NoError(t, testDb.FlushChannelCache())
changes.Results = nil
- changesResponse = it.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true&limit=5", "", "bernard"))
+ changesResponse = rt.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true&limit=5", "", "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 5)
@@ -2707,7 +2650,7 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
assert.NoError(t, testDb.FlushChannelCache())
changesJSON = `{"style":"all_docs", "active_only":true, "limit":15}`
changes.Results = nil
- changesResponse = it.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
+ changesResponse = rt.Send(requestByUser("POST", "/db/_changes", changesJSON, "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, changes.Results, 8)
@@ -2722,10 +2665,10 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
// No limit active only, GET, followed by normal (https://github.com/couchbase/sync_gateway/issues/2955)
assert.NoError(t, testDb.FlushChannelCache())
changes.Results = nil
- changesResponse = it.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true", "", "bernard"))
+ changesResponse = rt.Send(requestByUser("GET", "/db/_changes?style=all_docs&active_only=true", "", "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &changes)
assert.NoError(t, err, "Error unmarshalling changes response")
- assert.Equal(t, 8, len(changes.Results))
+ require.Len(t, changes.Results, 8)
for _, entry := range changes.Results {
log.Printf("Entry:%+v", entry)
if entry.ID == "conflictedDoc" {
@@ -2737,7 +2680,7 @@ func TestChangesActiveOnlyWithLimitAndViewBackfill(t *testing.T) {
Results []db.ChangeEntry
Last_Seq interface{}
}
- changesResponse = it.Send(requestByUser("GET", "/db/_changes", "", "bernard"))
+ changesResponse = rt.Send(requestByUser("GET", "/db/_changes", "", "bernard"))
err = base.JSONUnmarshal(changesResponse.Body.Bytes(), &updatedChanges)
assert.NoError(t, err, "Error unmarshalling changes response")
require.Len(t, updatedChanges.Results, 10)
@@ -3200,7 +3143,7 @@ func TestTombstoneCompaction(t *testing.T) {
defer rt.Close()
compactionTotal := 0
- queryTotal := 0
+ expectedBatches := 0
TestCompact := func(numDocs int) {
@@ -3225,8 +3168,15 @@ func TestTombstoneCompaction(t *testing.T) {
compactionTotal += numDocs
assert.Equal(t, compactionTotal, int(base.ExpvarVar2Int(rt.GetDatabase().DbStats.StatsDatabase().Get(base.StatKeyNumTombstonesCompacted))))
- queryTotal += numDocs/db.QueryTombstoneBatch + 1
- assert.Equal(t, queryTotal, int(base.ExpvarVar2Int(rt.GetDatabase().DbStats.StatsGsiViews().Get(fmt.Sprintf(base.StatKeyN1qlQueryCountExpvarFormat, db.QueryTypeTombstones)))))
+ var actualBatches int
+ if base.TestsDisableGSI() {
+ actualBatches = int(base.ExpvarVar2Int(rt.GetDatabase().DbStats.StatsGsiViews().Get(fmt.Sprintf(base.StatKeyViewQueryCountExpvarFormat, db.DesignDocSyncHousekeeping(), db.ViewTombstones))))
+ } else {
+ actualBatches = int(base.ExpvarVar2Int(rt.GetDatabase().DbStats.StatsGsiViews().Get(fmt.Sprintf(base.StatKeyN1qlQueryCountExpvarFormat, db.QueryTypeTombstones))))
+ }
+
+ expectedBatches += numDocs/db.QueryTombstoneBatch + 1
+ assert.Equal(t, expectedBatches, actualBatches)
}
// Multiples of Batch Size
diff --git a/rest/config_test.go b/rest/config_test.go
index 609baee605..ac4a21a5a0 100644
--- a/rest/config_test.go
+++ b/rest/config_test.go
@@ -811,41 +811,44 @@ func TestValidateServerContext(t *testing.T) {
t.Skip("Skipping this test; requires Couchbase Bucket")
}
- var (
- couchbaseURL = base.UnitTestUrl()
- testDataBucket = base.DefaultTestBucketname
- testIndexBucket = base.DefaultTestIndexBucketname
- username = base.DefaultCouchbaseAdministrator
- password = base.DefaultCouchbasePassword
- )
+ tb1 := base.GetTestBucket(t)
+ defer tb1.Close()
+ tb2 := base.GetTestBucket(t)
+ defer tb2.Close()
+
+ tb1User, tb1Password, _ := tb1.BucketSpec.Auth.GetCredentials()
+ tb2User, tb2Password, _ := tb2.BucketSpec.Auth.GetCredentials()
config = &ServerConfig{
Databases: map[string]*DbConfig{
"db1": {
BucketConfig: BucketConfig{
- Server: &couchbaseURL,
- Bucket: &testDataBucket,
- Username: username,
- Password: password,
+ Server: &tb1.BucketSpec.Server,
+ Bucket: &tb1.BucketSpec.BucketName,
+ Username: tb1User,
+ Password: tb1Password,
},
+ UseViews: base.TestsDisableGSI(),
NumIndexReplicas: base.UintPtr(0),
},
"db2": {
BucketConfig: BucketConfig{
- Server: &couchbaseURL,
- Bucket: &testDataBucket,
- Username: username,
- Password: password,
+ Server: &tb1.BucketSpec.Server,
+ Bucket: &tb1.BucketSpec.BucketName,
+ Username: tb1User,
+ Password: tb1Password,
},
+ UseViews: base.TestsDisableGSI(),
NumIndexReplicas: base.UintPtr(0),
},
"db3": {
BucketConfig: BucketConfig{
- Server: &couchbaseURL,
- Bucket: &testIndexBucket,
- Username: username,
- Password: password,
+ Server: &tb2.BucketSpec.Server,
+ Bucket: &tb2.BucketSpec.BucketName,
+ Username: tb2User,
+ Password: tb2Password,
},
+ UseViews: base.TestsDisableGSI(),
NumIndexReplicas: base.UintPtr(0),
},
},
@@ -863,7 +866,7 @@ func TestValidateServerContext(t *testing.T) {
sharedBucketErrors := validateServerContext(sc)
SharedBucketError, ok := sharedBucketErrors[0].(*SharedBucketError)
require.True(t, ok)
- assert.Equal(t, testDataBucket, SharedBucketError.GetSharedBucket().bucketName)
+ assert.Equal(t, tb1.BucketSpec.BucketName, SharedBucketError.GetSharedBucket().bucketName)
assert.Subset(t, []string{"db1", "db2"}, SharedBucketError.GetSharedBucket().dbNames)
}
diff --git a/rest/main_test.go b/rest/main_test.go
new file mode 100644
index 0000000000..bca1dbb387
--- /dev/null
+++ b/rest/main_test.go
@@ -0,0 +1,19 @@
+package rest
+
+import (
+ "os"
+ "testing"
+
+ "github.com/couchbase/sync_gateway/base"
+ "github.com/couchbase/sync_gateway/db"
+)
+
+func TestMain(m *testing.M) {
+ base.GTestBucketPool = base.NewTestBucketPool(db.ViewsAndGSIBucketReadier, db.ViewsAndGSIBucketInit)
+
+ status := m.Run()
+
+ base.GTestBucketPool.Close()
+
+ os.Exit(status)
+}
diff --git a/rest/oidc_api.go b/rest/oidc_api.go
index ac2eb249d9..cd43e495a1 100644
--- a/rest/oidc_api.go
+++ b/rest/oidc_api.go
@@ -10,6 +10,7 @@
package rest
import (
+ "errors"
"fmt"
"net/http"
"net/url"
@@ -24,13 +25,26 @@ const (
OIDC_AUTH_RESPONSE_TYPE = "response_type"
OIDC_AUTH_CLIENT_ID = "client_id"
OIDC_AUTH_SCOPE = "scope"
- OIDC_AUTH_REDIRECT_URI = "redirect_uri"
OIDC_AUTH_STATE = "state"
+ // Request parameter to specify the OpenID Connect provider to be used for authentication,
+ // from the list of providers defined in the Sync Gateway configuration.
+ oidcAuthProvider = "provider"
+
+ // Request parameter to specify the URL to which you want the end-user to be redirected
+ // after the authorization is complete.
+ oidcAuthRedirectURI = "redirect_uri"
+
OIDC_RESPONSE_TYPE_CODE = "code"
OIDC_RESPONSE_TYPE_IMPLICIT = "id_token%20token"
)
+// Error codes returned by failures to add parameters to callback URL.
+var (
+ ErrBadCallbackURL = errors.New("oidc: callback URL must not be nil")
+ ErrNoRedirectURI = errors.New("oidc: no redirect_uri parameter found in URL")
+)
+
type OIDCTokenResponse struct {
IDToken string `json:"id_token"` // ID token, from OP
RefreshToken string `json:"refresh_token,omitempty"` // Refresh token, from OP
@@ -99,9 +113,44 @@ func (h *handler) handleOIDCCommon() (redirectURLString string, err error) {
return redirectURLString, err
}
+ if !provider.IsDefault {
+ base.Debugf(base.KeyAuth, "Adding provider (%v) to callback URL", base.UD(provider.Name))
+ if err = addCallbackURLQueryParam(redirectURL, oidcAuthProvider, provider.Name); err != nil {
+ base.Errorf("Failed to add provider to callback URL, err: %v", err)
+ }
+ base.Debugf(base.KeyAuth, "Callback URL: %s", redirectURL.String())
+ }
+
return redirectURL.String(), nil
}
+func addCallbackURLQueryParam(uri *url.URL, name, value string) error {
+ if uri == nil {
+ return ErrBadCallbackURL
+ }
+ rawQuery, err := url.ParseQuery(uri.RawQuery)
+ if err != nil {
+ return err
+ }
+ redirectURL := rawQuery.Get(oidcAuthRedirectURI)
+ if redirectURL == "" {
+ return ErrNoRedirectURI
+ }
+ redirectURI, err := url.Parse(redirectURL)
+ if err != nil {
+ return err
+ }
+ rawQueryRedirectURI, err := url.ParseQuery(redirectURI.RawQuery)
+ if err != nil {
+ return err
+ }
+ rawQueryRedirectURI.Set(name, value)
+ redirectURI.RawQuery = rawQueryRedirectURI.Encode()
+ rawQuery.Set(oidcAuthRedirectURI, redirectURI.String())
+ uri.RawQuery = rawQuery.Encode()
+ return nil
+}
+
func (h *handler) handleOIDCCallback() error {
callbackError := h.getQuery("error")
if callbackError != "" {
@@ -228,7 +277,8 @@ func (h *handler) getOIDCProvider(providerName string) (*auth.OIDCProvider, erro
// Builds the OIDC callback based on the current request and database. Used during OIDC Client lazy initialization. Needs to pass
// in dbName, as it's not necessarily initialized on the request yet.
-func (h *handler) getOIDCCallbackURL() string {
+func (h *handler) getOIDCCallbackURL(providerName string, isDefault bool) string {
+ callbackPath := "_oidc_callback"
scheme := "http"
if h.rq.TLS != nil {
scheme = "https"
@@ -236,7 +286,9 @@ func (h *handler) getOIDCCallbackURL() string {
if dbName := h.PathVar("db"); dbName == "" {
base.Warnf("Can't calculate OIDC callback URL without DB in path.")
return ""
+ } else if !isDefault && providerName != "" {
+ return fmt.Sprintf("%s://%s/%s/%s?%s=%s", scheme, h.rq.Host, dbName, callbackPath, auth.OIDCAuthProvider, providerName)
} else {
- return fmt.Sprintf("%s://%s/%s/%s", scheme, h.rq.Host, dbName, "_oidc_callback")
+ return fmt.Sprintf("%s://%s/%s/%s", scheme, h.rq.Host, dbName, callbackPath)
}
}
diff --git a/rest/oidc_api_test.go b/rest/oidc_api_test.go
new file mode 100644
index 0000000000..2817b18c6a
--- /dev/null
+++ b/rest/oidc_api_test.go
@@ -0,0 +1,61 @@
+package rest
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddCallbackURLQueryParam(t *testing.T) {
+ var oidcAuthProviderGoogle = "google"
+ tests := []struct {
+ name string
+ inputURL string
+ inputParamName string
+ inputParamValue string
+ wantURL string
+ wantError error
+ }{{
+ name: "Add provider parameter to callback URL",
+ inputURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback&response_type=code&scope=openid+email&state=GDCEm",
+ inputParamName: oidcAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback%3Fprovider%3Dgoogle&response_type=code&scope=openid+email&state=GDCEm",
+ }, {
+ name: "Add provider parameter with empty value to callback URL",
+ inputURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback&response_type=code&scope=openid+email&state=GDCEm",
+ inputParamName: oidcAuthProvider,
+ wantURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback%3Fprovider%3D&response_type=code&scope=openid+email&state=GDCEm",
+ }, {
+ name: "Add provider parameter to callback URL which doesn't have redirect_uri",
+ inputURL: "https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&client_id=client123&prompt=consent",
+ inputParamName: oidcAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantURL: "https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&client_id=client123&prompt=consent",
+ wantError: ErrNoRedirectURI,
+ }, {
+ name: "Add provider parameter to callback URL which has invalid redirect_uri",
+ inputURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback&response_type=code&scope=openid+email&state=GDCEm",
+ inputParamName: oidcAuthProvider,
+ inputParamValue: oidcAuthProviderGoogle,
+ wantURL: "https://accounts.google.com/o/oauth2/v2/auth?client_id=EADGBE&redirect_uri=http%%3A%2F%2Flocalhost%3A4984%2Fdefault%2F_oidc_callback&response_type=code&scope=openid+email&state=GDCEm",
+ wantError: url.EscapeError("%%3"),
+ }}
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ inputURL, err := url.Parse(test.inputURL)
+ require.NoError(t, err, "Couldn't parse URL")
+ err = addCallbackURLQueryParam(inputURL, test.inputParamName, test.inputParamValue)
+ assert.Equal(t, test.wantError, err)
+ assert.Equal(t, test.wantURL, inputURL.String())
+ })
+ }
+}
+
+func TestAddCallbackURLQueryParamNoURL(t *testing.T) {
+ var oidcAuthProviderGoogle = "google"
+ err := addCallbackURLQueryParam(nil, oidcAuthProvider, oidcAuthProviderGoogle)
+ assert.Equal(t, ErrBadCallbackURL, err)
+}
diff --git a/rest/server_context.go b/rest/server_context.go
index 728664c5e8..dd07e1fd14 100644
--- a/rest/server_context.go
+++ b/rest/server_context.go
@@ -230,7 +230,10 @@ func (sc *ServerContext) PostUpgrade(preview bool) (postUpgradeResults PostUpgra
removedDDocs, _ := database.RemoveObsoleteDesignDocs(preview)
// Index cleanup
- removedIndexes, _ := database.RemoveObsoleteIndexes(preview)
+ var removedIndexes []string
+ if !base.TestsDisableGSI() {
+ removedIndexes, _ = database.RemoveObsoleteIndexes(preview)
+ }
postUpgradeResults[name] = PostUpgradeDatabaseResult{
RemovedDDocs: removedDDocs,
diff --git a/rest/utilities_testing.go b/rest/utilities_testing.go
index d51cb9d09a..cbdd23b986 100644
--- a/rest/utilities_testing.go
+++ b/rest/utilities_testing.go
@@ -30,13 +30,10 @@ import (
// are available to any package that imports rest. (if they were in a _test.go
// file, they wouldn't be publicly exported to other packages)
-var gBucketCounter = 0
-
type RestTesterConfig struct {
noAdminParty bool // Unless this is true, Admin Party is in full effect
SyncFn string // put the sync() function source in here (optional)
DatabaseConfig *DbConfig // Supports additional config options. BucketConfig, Name, Sync, Unsupported will be ignored (overridden)
- NoFlush bool // Skip bucket flush step during creation. Used by tests that need to simulate start/stop of Sync Gateway with backing bucket intact.
InitSyncSeq uint64 // If specified, initializes _sync:seq on bucket creation. Not supported when running against walrus
EnableNoConflictsMode bool // Enable no-conflicts mode. By default, conflicts will be allowed, which is the default behavior
distributedIndex bool // Test with walrus-based index bucket
@@ -45,7 +42,9 @@ type RestTesterConfig struct {
type RestTester struct {
*RestTesterConfig
tb testing.TB
- RestTesterBucket base.Bucket
+ testBucket *base.TestBucket
+ bucketInitOnce sync.Once
+ bucketDone base.AtomicBool
RestTesterServerContext *ServerContext
AdminHandler http.Handler
adminHandlerOnce sync.Once
@@ -67,14 +66,8 @@ func NewRestTester(tb testing.TB, restConfig *RestTesterConfig) *RestTester {
return &rt
}
-func NewRestTesterWithBucket(tb testing.TB, restConfig *RestTesterConfig, bucket base.Bucket) *RestTester {
- rt := NewRestTester(tb, restConfig)
- if bucket == nil {
- panic("nil bucket supplied. Use NewRestTester if you aren't supplying a bucket")
- }
- rt.RestTesterBucket = bucket
-
- return rt
+func (rt *RestTester) WithTestBucket(testBucket *base.TestBucket) {
+ rt.testBucket = testBucket
}
func (rt *RestTester) Bucket() base.Bucket {
@@ -83,151 +76,80 @@ func (rt *RestTester) Bucket() base.Bucket {
panic("RestTester not properly initialized please use NewRestTester function")
}
- if rt.RestTesterBucket != nil {
- return rt.RestTesterBucket
+ if rt.testBucket != nil {
+ return rt.testBucket.Bucket
}
- // Put this in a loop in case certain operations fail, like waiting for GSI indexes to be empty.
- // Limit number of attempts to 2.
- for i := 0; i < 2; i++ {
-
- // Initialize the bucket. For couchbase-backed tests, triggers with creation/flushing of the bucket
- if !rt.NoFlush {
- tempBucket := base.GetTestBucket(rt.tb) // side effect of creating/flushing bucket
- if rt.InitSyncSeq > 0 {
- log.Printf("Initializing %s to %d", base.SyncSeqKey, rt.InitSyncSeq)
- _, incrErr := tempBucket.Incr(base.SyncSeqKey, rt.InitSyncSeq, rt.InitSyncSeq, 0)
- if incrErr != nil {
- rt.tb.Fatalf("Error initializing %s in test bucket: %v", base.SyncSeqKey, incrErr)
- return nil
- }
- }
- tempBucket.Close()
- } else {
- if rt.InitSyncSeq > 0 {
- rt.tb.Fatal("RestTester doesn't support NoFlush and InitSyncSeq in same test")
- return nil
- }
- }
-
- spec := base.GetTestBucketSpec(base.DataBucket)
-
- username, password, _ := spec.Auth.GetCredentials()
-
- server := spec.Server
- gBucketCounter++
+ testBucket := base.GetTestBucket(rt.tb)
- var syncFnPtr *string
- if len(rt.SyncFn) > 0 {
- syncFnPtr = &rt.SyncFn
- }
-
- corsConfig := &CORSConfig{
- Origin: []string{"http://example.com", "*", "http://staging.example.com"},
- LoginOrigin: []string{"http://example.com"},
- Headers: []string{},
- MaxAge: 1728000,
- }
-
- rt.RestTesterServerContext = NewServerContext(&ServerConfig{
- CORS: corsConfig,
- Facebook: &FacebookConfig{},
- AdminInterface: &DefaultAdminInterface,
- })
-
- useXattrs := base.TestUseXattrs()
-
- if rt.DatabaseConfig == nil {
- // If no db config was passed in, create one
- rt.DatabaseConfig = &DbConfig{}
- }
-
- // Force views if running against walrus
- if !base.TestUseCouchbaseServer() {
- rt.DatabaseConfig.UseViews = true
- }
-
- // numReplicas set to 0 for test buckets, since it should assume that there may only be one indexing node.
- numReplicas := uint(0)
- rt.DatabaseConfig.NumIndexReplicas = &numReplicas
-
- rt.DatabaseConfig.BucketConfig = BucketConfig{
- Server: &server,
- Bucket: &spec.BucketName,
- Username: username,
- Password: password,
- }
- rt.DatabaseConfig.Name = "db"
- rt.DatabaseConfig.Sync = syncFnPtr
- rt.DatabaseConfig.EnableXattrs = &useXattrs
- if rt.EnableNoConflictsMode {
- boolVal := false
- rt.DatabaseConfig.AllowConflicts = &boolVal
- }
-
- _, err := rt.RestTesterServerContext.AddDatabaseFromConfig(rt.DatabaseConfig)
- if err != nil {
- rt.tb.Fatalf("Error from AddDatabaseFromConfig: %v", err)
- return nil
+ if rt.InitSyncSeq > 0 {
+ log.Printf("Initializing %s to %d", base.SyncSeqKey, rt.InitSyncSeq)
+ _, incrErr := testBucket.Incr(base.SyncSeqKey, rt.InitSyncSeq, rt.InitSyncSeq, 0)
+ if incrErr != nil {
+ rt.tb.Fatalf("Error initializing %s in test bucket: %v", base.SyncSeqKey, incrErr)
}
- rt.RestTesterBucket = rt.RestTesterServerContext.Database("db").Bucket
-
- // As long as bucket flushing wasn't disabled, wait for index to be empty (if this is a gocb bucket)
- if !rt.NoFlush {
- asGoCbBucket, isGoCbBucket := base.AsGoCBBucket(rt.RestTesterBucket)
- if isGoCbBucket {
- if err := db.WaitForIndexEmpty(asGoCbBucket, spec.UseXattrs); err != nil {
- base.Infof(base.KeyAll, "WaitForIndexEmpty returned an error: %v. Dropping indexes and retrying", err)
- // if WaitForIndexEmpty returns error, drop the indexes and retry
- if err := base.DropAllBucketIndexes(asGoCbBucket); err != nil {
- rt.tb.Fatalf("Failed to drop bucket indexes: %v", err)
- return nil
- }
-
- continue // Go to the top of the for loop to retry
- }
- }
- }
-
- if !rt.noAdminParty {
- rt.SetAdminParty(true)
- }
-
- return rt.RestTesterBucket
}
- rt.tb.Fatalf("Failed to create a RestTesterBucket after multiple attempts")
- return nil
-}
-
-func (rt *RestTester) BucketAllowEmptyPassword() base.Bucket {
+ var syncFnPtr *string
+ if len(rt.SyncFn) > 0 {
+ syncFnPtr = &rt.SyncFn
+ }
- //Create test DB with "AllowEmptyPassword" true
- server := "walrus:"
- bucketName := fmt.Sprintf("sync_gateway_test_%d", gBucketCounter)
- gBucketCounter++
+ corsConfig := &CORSConfig{
+ Origin: []string{"http://example.com", "*", "http://staging.example.com"},
+ LoginOrigin: []string{"http://example.com"},
+ Headers: []string{},
+ MaxAge: 1728000,
+ }
rt.RestTesterServerContext = NewServerContext(&ServerConfig{
- CORS: &CORSConfig{},
+ CORS: corsConfig,
Facebook: &FacebookConfig{},
AdminInterface: &DefaultAdminInterface,
})
- _, err := rt.RestTesterServerContext.AddDatabaseFromConfig(&DbConfig{
- BucketConfig: BucketConfig{
- Server: &server,
- Bucket: &bucketName},
- Name: "db",
- AllowEmptyPassword: true,
- UseViews: true, // walrus only supports views
- })
+ useXattrs := base.TestUseXattrs()
+
+ if rt.DatabaseConfig == nil {
+ // If no db config was passed in, create one
+ rt.DatabaseConfig = &DbConfig{}
+ }
+ if base.TestsDisableGSI() {
+ rt.DatabaseConfig.UseViews = true
+ }
+
+ // numReplicas set to 0 for test buckets, since it should assume that there may only be one indexing node.
+ numReplicas := uint(0)
+ rt.DatabaseConfig.NumIndexReplicas = &numReplicas
+ un, pw, _ := testBucket.BucketSpec.Auth.GetCredentials()
+ rt.DatabaseConfig.BucketConfig = BucketConfig{
+ Server: &testBucket.BucketSpec.Server,
+ Bucket: &testBucket.BucketSpec.BucketName,
+ Username: un,
+ Password: pw,
+ }
+ rt.DatabaseConfig.Name = "db"
+ rt.DatabaseConfig.Sync = syncFnPtr
+ rt.DatabaseConfig.EnableXattrs = &useXattrs
+ if rt.EnableNoConflictsMode {
+ boolVal := false
+ rt.DatabaseConfig.AllowConflicts = &boolVal
+ }
+
+ _, err := rt.RestTesterServerContext.AddDatabaseFromConfig(rt.DatabaseConfig)
if err != nil {
rt.tb.Fatalf("Error from AddDatabaseFromConfig: %v", err)
}
- rt.RestTesterBucket = rt.RestTesterServerContext.Database("db").Bucket
- return rt.RestTesterBucket
+ rt.WithTestBucket(testBucket)
+ rt.testBucket.Bucket = rt.RestTesterServerContext.Database("db").Bucket
+
+ if !rt.noAdminParty {
+ rt.SetAdminParty(true)
+ }
+
+ return rt.testBucket.Bucket
}
func (rt *RestTester) ServerContext() *ServerContext {
@@ -299,14 +221,14 @@ func (rt *RestTester) SetAdminParty(partyTime bool) {
_ = a.Save(guest)
}
-func (rt *RestTester) DisableGuestUser() {
- rt.SetAdminParty(false)
-}
-
func (rt *RestTester) Close() {
if rt.tb == nil {
panic("RestTester not properly initialized please use NewRestTester function")
}
+ if rt.testBucket != nil {
+ rt.testBucket.Close()
+ rt.testBucket = nil
+ }
if rt.RestTesterServerContext != nil {
rt.RestTesterServerContext.Close()
}
@@ -1153,9 +1075,10 @@ func (bt *BlipTester) GetChanges() (changes [][]interface{}) {
}
-func (bt *BlipTester) WaitForNumDocsViaChanges(numDocsExpected int) (docs map[string]RestDocument) {
+func (bt *BlipTester) WaitForNumDocsViaChanges(numDocsExpected int) (docs map[string]RestDocument, ok bool) {
retryWorker := func() (shouldRetry bool, err error, value interface{}) {
+ fmt.Println("BT WaitForNumDocsViaChanges retry")
allDocs := bt.PullDocs()
if len(allDocs) >= numDocsExpected {
return false, nil, allDocs
@@ -1169,11 +1092,11 @@ func (bt *BlipTester) WaitForNumDocsViaChanges(numDocsExpected int) (docs map[st
_, allDocs := base.RetryLoop(
"WaitForNumDocsViaChanges",
retryWorker,
- base.CreateDoublingSleeperFunc(10, 10),
+ base.CreateDoublingSleeperFunc(20, 10),
)
- return allDocs.(map[string]RestDocument)
-
+ docs, ok = allDocs.(map[string]RestDocument)
+ return docs, ok
}
// Get all documents and their attachments via the following steps: