diff --git a/go.mod b/go.mod index 55bf431b75..ebf511b9e0 100644 --- a/go.mod +++ b/go.mod @@ -302,7 +302,7 @@ replace ( github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20230324214216-7f88436db3de github.com/portworx/kdmp => github.com/portworx/kdmp v0.4.1-0.20230316085313-95fc97e8493b github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 - github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 + github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230512010535-e219f683f7f9 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 helm.sh/helm/v3 => helm.sh/helm/v3 v3.10.3 diff --git a/go.sum b/go.sum index 1b5e9c7ad2..ecfe774af8 100644 --- a/go.sum +++ b/go.sum @@ -568,6 +568,12 @@ github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs= @@ -632,6 +638,7 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -1094,6 +1101,7 @@ github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hb github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= @@ -1228,7 +1236,6 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= @@ -1381,6 +1388,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= @@ -1906,8 +1914,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1943,7 +1951,6 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -2051,8 +2058,8 @@ github.com/mattn/go-sqlite3 v0.0.0-20160514122348-38ee283dabf1/go.mod h1:FPy6Kqz github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -2115,8 +2122,6 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/moby v20.10.23+incompatible h1:5+Q6jGL7oH89tx+ms0fGsTYEXrQ3P4vuL3i7DayMUuM= -github.com/moby/moby v20.10.23+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/moby v20.10.24+incompatible h1:hjfxUufgeyrgolyaOWASyR9SvehpNcq/QHp/tx4VgsM= github.com/moby/moby v20.10.24+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -2143,6 +2148,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= @@ -2150,6 +2156,7 @@ github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -2340,6 +2347,7 @@ github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -2360,7 +2368,7 @@ github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= -github.com/portworx/pds-api-go-client v0.0.0-20220901142946-b6ecf97f5e71/go.mod h1:yn0abd9g4Q3fBpY720Eb+hwRV1OnTjROUaSb9mOlzBk= +github.com/portworx/pds-api-go-client v0.0.0-20230328163250-90d945a030b9/go.mod h1:yn0abd9g4Q3fBpY720Eb+hwRV1OnTjROUaSb9mOlzBk= github.com/portworx/px-backup-api v1.2.2-0.20230302013809-a43e52d727ba/go.mod h1:fwJb8kCDTvEwzQbId6jJL+z6GX6m6acfHgPBFncZJGE= github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 h1:VNBTmIPjJRZ2QP64zdsrif3ELDHiMzoyNNX74VNHgZ8= github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= @@ -2369,14 +2377,14 @@ github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 h1:58JTP github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0/go.mod h1:4Tm81DwlMhF4kNT+6toXeLnxGrqacMHtgz6x2FfFUnQ= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/talisman v1.1.3/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= -github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 h1:dvMA2uZo8BKC+NC1IL6GIe58GCrmS6pim5QAMlh3a5g= -github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670/go.mod h1:s3fDUFWKAYiHKz/CEMRcyn3C/u2yYk5rTJpyGTlq+/0= +github.com/portworx/torpedo v0.0.0-20230512010535-e219f683f7f9 h1:+5WzCgIP1IuCFOLWUrBHUUga7WgdmDlr0CAhp2mJTig= +github.com/portworx/torpedo v0.0.0-20230512010535-e219f683f7f9/go.mod h1:AdGVLVk4KBEZxfZJJJPqgG7wBMAtA+f2sggqrovVI04= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= @@ -2766,7 +2774,6 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/zoido/yag-config v0.4.0 h1:KahwuvyTIWjgo5CBCpGcktCaBCOTQOEtwyXk6OqUW50= github.com/zoido/yag-config v0.4.0/go.mod h1:HcK2GbfzhDVmgwP4miBIfD2qKz6Y5LAJayebVhie/nE= @@ -2911,6 +2918,7 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -3040,6 +3048,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= diff --git a/test/integration_test/migration_failover_failback_test.go b/test/integration_test/migration_failover_failback_test.go index 2f0beea6cb..28d2ee4d75 100644 --- a/test/integration_test/migration_failover_failback_test.go +++ b/test/integration_test/migration_failover_failback_test.go @@ -6,11 +6,14 @@ package integrationtest import ( "fmt" "testing" + "time" "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/portworx/sched-ops/k8s/core" + storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/portworx/sched-ops/task" "github.com/portworx/torpedo/drivers/scheduler" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -54,10 +57,11 @@ func testMigrationFailoverFailback(t *testing.T) { t.Run("vanillaFailoverAndFailbackMigrationTest", vanillaFailoverAndFailbackMigrationTest) t.Run("rancherFailoverAndFailbackMigrationTest", rancherFailoverAndFailbackMigrationTest) + t.Run("stickyFlagFailoverAndFailbackMigrationTest", stickyFlagFailoverAndFailbackMigrationTest) } func vanillaFailoverAndFailbackMigrationTest(t *testing.T) { - failoverAndFailbackMigrationTest(t) + failoverAndFailbackMigrationTest(t, "mysql-enc-pvc", "mysql-migration-failover-failback", true) } func rancherFailoverAndFailbackMigrationTest(t *testing.T) { @@ -105,20 +109,22 @@ func rancherFailoverAndFailbackMigrationTest(t *testing.T) { scaleFactor := testMigrationFailover(t, preMigrationCtx, ctxs, "", appKey, instanceID) - testMigrationFailback(t, preMigrationCtx, ctxs, scaleFactor, projectIDMappingsReverse, appKey, instanceID) + testMigrationFailback(t, preMigrationCtx, ctxs, scaleFactor, projectIDMappingsReverse, appKey, instanceID, true) } -func failoverAndFailbackMigrationTest(t *testing.T) { +func stickyFlagFailoverAndFailbackMigrationTest(t *testing.T) { + failoverAndFailbackMigrationTest(t, "mysql-sticky", "mysql-migration-sticky", false) +} + +func failoverAndFailbackMigrationTest(t *testing.T, appKey, migrationKey string, failbackSuccessExpected bool) { - appKey := "mysql-enc-pvc" - instanceID := "mysql-migration-failover-failback" // Migrate the resources ctxs, preMigrationCtx := triggerMigration( t, - instanceID, + migrationKey, appKey, nil, - []string{instanceID}, + []string{migrationKey}, true, false, false, @@ -150,9 +156,9 @@ func failoverAndFailbackMigrationTest(t *testing.T) { // validate the migration summary based on the application specs that were deployed by the test validateMigrationSummary(t, preMigrationCtx, expectedResources, expectedVolumes, migrationObj.Name, migrationObj.Namespace) - scaleFactor := testMigrationFailover(t, preMigrationCtx, ctxs, "", appKey, instanceID) + scaleFactor := testMigrationFailover(t, preMigrationCtx, ctxs, "", appKey, migrationKey) - testMigrationFailback(t, preMigrationCtx, ctxs, scaleFactor, "", appKey, instanceID) + testMigrationFailback(t, preMigrationCtx, ctxs, scaleFactor, "", appKey, migrationKey, failbackSuccessExpected) } func testMigrationFailover( @@ -242,6 +248,7 @@ func testMigrationFailback( scaleFactor map[string]int32, projectIDMappings string, appKey, instanceID string, + failbackSuccessExpected bool, ) { // Failback the application // Trigger a reverse migration @@ -251,6 +258,8 @@ func testMigrationFailback( require.NoError(t, err, "Error scheduling task") require.Equal(t, 1, len(ctxsReverse), "Only one task should have started") + appCtx := ctxsReverse[0] + err = schedulerDriver.WaitForRunning(ctxsReverse[0], defaultWaitTimeout, defaultWaitInterval) require.NoError(t, err, "Error waiting for app to get to running state") @@ -265,6 +274,50 @@ func testMigrationFailback( scheduler.ScheduleOptions{AppKeys: []string{instanceID}}) require.NoError(t, err, "Error scheduling migration specs") + if !failbackSuccessExpected { + // In case of sticky volumes migration failure is expected, so will update volume and trigger migration again + err = schedulerDriver.WaitForRunning(ctxsReverse[0], defaultWaitTimeout/10, defaultWaitInterval) + require.Error(t, err, "Expected failback migration to fail") + + // Get volumes for this migration on source cluster and update sticky flag + err = setSourceKubeConfig() + require.NoError(t, err, "Error resetting source config, for updating sticky volume") + + vols, err := schedulerDriver.GetVolumes(appCtx) + require.NoError(t, err, "Error getting volumes for app") + for _, v := range vols { + err = volumeDriver.UpdateStickyFlag(v.ID, "off") + require.NoError(t, err, "Error updating sticky flag for volumes %s", v.Name) + } + time.Sleep(3 * time.Minute) + + // Trigger migration on destination cluster again + err = setDestinationKubeConfig() + require.NoError(t, err, "Error setting destination config after updating sticky volume") + + var failedMigrationObj *v1alpha1.Migration + var ok bool + for _, specObj := range ctxsReverse[0].App.SpecList { + if failedMigrationObj, ok = specObj.(*v1alpha1.Migration); ok { + break + } + } + failedMigrationObj, err = storkops.Instance().GetMigration(failedMigrationObj.Name, failedMigrationObj.Namespace) + require.NoError(t, err, "Error getting the failed migration") + logrus.Infof("Failed migration object found: %s in namespace: %s. Status: %s", failedMigrationObj.Name, failedMigrationObj.Namespace, failedMigrationObj.Status.Status) + + err = deleteMigrations([]*v1alpha1.Migration{failedMigrationObj}) + require.NoError(t, err, "error in deleting failed migrations.") + + // apply migration specs again, it should pass this time + err = schedulerDriver.AddTasks(ctxsReverse[0], + scheduler.ScheduleOptions{AppKeys: []string{instanceID}}) + require.NoError(t, err, "Error scheduling migration specs") + + err = schedulerDriver.WaitForRunning(ctxsReverse[0], defaultWaitTimeout, defaultWaitInterval) + require.NoError(t, err, "Error waiting for migration to complete post sticky flag update") + } + err = schedulerDriver.WaitForRunning(ctxsReverse[0], defaultWaitTimeout, defaultWaitInterval) require.NoError(t, err, "Error waiting for migration to complete") @@ -322,8 +375,16 @@ func testMigrationFailback( require.Equal(t, projectValue, "project-A") } } - destroyAndWait(t, []*scheduler.Context{postMigrationCtx}) + destroyAndWait(t, ctxs) + + err = setDestinationKubeConfig() + require.NoError(t, err, "Error resetting remote config") + destroyAndWait(t, ctxsReverse) + + err = setSourceKubeConfig() + require.NoError(t, err, "Error resetting remote config") + } // The below two functions are currently not invoked during the tests since the namespaceSelector diff --git a/test/integration_test/specs/mysql-migration-sticky/migration.yaml b/test/integration_test/specs/mysql-migration-sticky/migration.yaml new file mode 100644 index 0000000000..983694dbe4 --- /dev/null +++ b/test/integration_test/specs/mysql-migration-sticky/migration.yaml @@ -0,0 +1,14 @@ +apiVersion: stork.libopenstorage.org/v1alpha1 +kind: Migration +metadata: + name: mysql-sticky-migration +spec: + # This should be the name of the cluster pair + clusterPair: remoteclusterpair + # If set to false this will migrate only the volumes. No PVCs, apps, etc will be migrated + includeResources: true + # If set to false, the deployments and stateful set replicas will be set to 0 on the destination. There will be an annotation with "stork.openstorage.org/migrationReplicas" to store the replica count from the source + startApplications: false + # List of namespaces to migrate + namespaces: + - mysql-sticky-mysql-migration-sticky diff --git a/test/integration_test/specs/mysql-sticky/aws/aws-sc.yaml b/test/integration_test/specs/mysql-sticky/aws/aws-sc.yaml new file mode 100644 index 0000000000..209a336dda --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/aws/aws-sc.yaml @@ -0,0 +1,8 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: mysql-sc +provisioner: kubernetes.io/aws-ebs +parameters: + type: gp2 +reclaimPolicy: Delete diff --git a/test/integration_test/specs/mysql-sticky/azure/azure-sc.yaml b/test/integration_test/specs/mysql-sticky/azure/azure-sc.yaml new file mode 100644 index 0000000000..f6b2991b00 --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/azure/azure-sc.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: mysql-sc +provisioner: kubernetes.io/azure-disk +parameters: + skuName: Standard_LRS diff --git a/test/integration_test/specs/mysql-sticky/gce/gke-sc.yaml b/test/integration_test/specs/mysql-sticky/gce/gke-sc.yaml new file mode 100644 index 0000000000..ad25c1acdf --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/gce/gke-sc.yaml @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: mysql-sc +parameters: + type: pd-standard +provisioner: kubernetes.io/gce-pd +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true diff --git a/test/integration_test/specs/mysql-sticky/linstor/linstor-sc.yaml b/test/integration_test/specs/mysql-sticky/linstor/linstor-sc.yaml new file mode 100644 index 0000000000..8372edac4e --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/linstor/linstor-sc.yaml @@ -0,0 +1,10 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: mysql-sc +provisioner: linstor.csi.linbit.com +allowVolumeExpansion: true +reclaimPolicy: Delete +parameters: + autoPlace: "2" + storagePool: sda diff --git a/test/integration_test/specs/mysql-sticky/mysql.yaml b/test/integration_test/specs/mysql-sticky/mysql.yaml new file mode 100644 index 0000000000..db76c999f8 --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/mysql.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mysql + labels: + app: mysql +spec: + serviceName: mysql-service + replicas: 1 + selector: + matchLabels: + app: mysql + version: "1" + template: + metadata: + labels: + app: mysql + version: "1" + spec: + schedulerName: stork + containers: + - image: mysql:5.6 + name: mysql + env: + - name: MYSQL_ROOT_PASSWORD + value: password + ports: + - containerPort: 3306 + livenessProbe: + exec: + command: ["sh", "-c", "mysqladmin -u root -p$MYSQL_ROOT_PASSWORD ping"] + initialDelaySeconds: 70 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + exec: + command: ["sh", "-c", "mysql -u root -p$MYSQL_ROOT_PASSWORD -e \"select 1\""] + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: mysql-data + mountPath: /var/lib/mysql + volumeClaimTemplates: + - metadata: + name: mysql-data + annotations: + volume.beta.kubernetes.io/storage-class: mysql-sticky-sc + px/secret-name: volume-secrets + px/secret-namespace: kube-system + px/secret-key: mysql-secret + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 8Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mysql-service + labels: + app: mysql +spec: + selector: + app: mysql + ports: + - name: transport + port: 3306 diff --git a/test/integration_test/specs/mysql-sticky/portworx/sc-sticky.yaml b/test/integration_test/specs/mysql-sticky/portworx/sc-sticky.yaml new file mode 100644 index 0000000000..d8764c2cb8 --- /dev/null +++ b/test/integration_test/specs/mysql-sticky/portworx/sc-sticky.yaml @@ -0,0 +1,8 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: mysql-sticky-sc +provisioner: kubernetes.io/portworx-volume +parameters: + repl: "2" + sticky: "true" diff --git a/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go b/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go index 626d59fe92..19fd92b399 100644 --- a/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go +++ b/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go @@ -77,6 +77,16 @@ func GetMasterNodes() []Node { return nodeList } +// IsMasterNode returns true if node is a Masternode +func IsMasterNode(n Node) bool { + for _, each := range GetMasterNodes() { + if each.uuid == n.uuid { + return true + } + } + return false +} + // GetStorageDriverNodes returns only the worker node where storage // driver is installed func GetStorageDriverNodes() []Node { @@ -217,3 +227,15 @@ func GetNodeDetailsByNodeName(nodeName string) (Node, error) { } return Node{}, fmt.Errorf("failed to get Node Details by Node Name [%s] ", nodeName) } + +// GetNodeDetailsByNodeID get node details for a given node name +func GetNodeDetailsByNodeID(nodeID string) (Node, error) { + storageNodes := GetStorageNodes() + + for _, each := range storageNodes { + if each.Id == nodeID { + return each, nil + } + } + return Node{}, fmt.Errorf("failed to get Node Details by Node ID [%s] ", nodeID) +} diff --git a/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go b/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go index 5d28ae4ed0..1275bb2be6 100644 --- a/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go +++ b/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go @@ -35,6 +35,7 @@ import ( "github.com/libopenstorage/openstorage/pkg/units" storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" admissionregistration "github.com/portworx/sched-ops/k8s/admissionregistration" + "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/apps" "github.com/portworx/sched-ops/k8s/autopilot" "github.com/portworx/sched-ops/k8s/batch" @@ -204,6 +205,7 @@ var ( k8sMonitoring = prometheus.Instance() k8sPolicy = policy.Instance() k8sAdmissionRegistration = admissionregistration.Instance() + k8sApiExtensions = apiextensions.Instance() // k8sExternalsnap is a instance of csisnapshot instance k8sExternalsnap = csisnapshot.Instance() @@ -211,6 +213,14 @@ var ( SnapshotAPIGroup = "snapshot.storage.k8s.io" ) +// CustomResourceObjectYAML Used as spec object for all CRs +type CustomResourceObjectYAML struct { + Path string + // Namespace will only be assigned DURING creation + Namespace string + Name string +} + // K8s The kubernetes structure type K8s struct { SpecFactory *spec.Factory @@ -350,6 +360,7 @@ func (k *K8s) SetConfig(kubeconfigPath string) error { k8sMonitoring.SetConfig(config) k8sPolicy.SetConfig(config) k8sAdmissionRegistration.SetConfig(config) + k8sApiExtensions.SetConfig(config) return nil } @@ -411,7 +422,16 @@ func (k *K8s) ParseSpecs(specDir, storageProvisioner string) ([]interface{}, err if err != nil { return nil, err } - if !isHelmChart { + + splitPath := strings.Split(fileName, "/") + if strings.HasPrefix(splitPath[len(splitPath)-1], "cr-") { + // TODO: process with templates + specObj := &CustomResourceObjectYAML{ + Path: fileName, + } + specs = append(specs, specObj) + log.Warnf("custom res: %v", specObj) //TODO: remove + } else if !isHelmChart { file, err := ioutil.ReadFile(fileName) if err != nil { return nil, err @@ -870,6 +890,23 @@ func (k *K8s) CreateSpecObjects(app *spec.AppSpec, namespace string, options sch } } + for _, appSpec := range app.SpecList { + t := func() (interface{}, bool, error) { + obj, err := k.createCRDObjects(appSpec, ns, app) + if err != nil { + return nil, true, err + } + return obj, false, nil + } + obj, err := task.DoRetryWithTimeout(t, k8sObjectCreateTimeout, DefaultRetryInterval) + if err != nil { + return nil, err + } + if obj != nil { + specObjects = append(specObjects, obj) + } + } + for _, appSpec := range app.SpecList { t := func() (interface{}, bool, error) { obj, err := k.createVolumeSnapshotRestore(appSpec, ns, app) @@ -1055,6 +1092,25 @@ func (k *K8s) CreateSpecObjects(app *spec.AppSpec, namespace string, options sch } } + // creation of CustomResourceObjects must most likely be done *last*, + // as it may have resources that depend on other resources, which should be create *before* this + for _, appSpec := range app.SpecList { + t := func() (interface{}, bool, error) { + obj, err := k.createCustomResourceObjects(appSpec, ns, app) + if err != nil { + return nil, true, err + } + return obj, false, nil + } + obj, err := task.DoRetryWithTimeout(t, k8sObjectCreateTimeout, DefaultRetryInterval) + if err != nil { + return nil, err + } + if obj != nil { + specObjects = append(specObjects, obj) + } + } + return specObjects, nil } @@ -2151,6 +2207,36 @@ func (k *K8s) destroyAdmissionRegistrationObjects(spec interface{}, app *spec.Ap return nil } +// destroyCRDObjects is used to destroy Resources in the group `apiextensions` (like CRDs) +func (k *K8s) destroyCRDObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*apiextensionsv1.CustomResourceDefinition); ok { + err := k8sApiExtensions.DeleteCRD(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy CRD: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CRD: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*apiextensionsv1beta1.CustomResourceDefinition); ok { + err := k8sApiExtensions.DeleteCRDV1beta1(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy CRDV1beta1: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CRDV1beta1: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + func (k *K8s) substituteNamespaceInContainers(containers []corev1.Container, ns string) []corev1.Container { var updatedContainers []corev1.Container for _, container := range containers { @@ -2470,6 +2556,22 @@ func (k *K8s) WaitForRunning(ctx *scheduler.Context, timeout, retryInterval time func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { var podList []corev1.Pod + // destruction of CustomResourceObjects must most likely be done *first*, + // as it may have resources that depend on other resources, which should be deleted *after* this + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyCustomResourceObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + var removeSpecs []interface{} for _, appSpec := range ctx.App.SpecList { if repoInfo, ok := appSpec.(*scheduler.HelmRepo); ok { @@ -2495,6 +2597,7 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } } + for _, appSpec := range ctx.App.SpecList { t := func() (interface{}, bool, error) { err := k.destroyAdmissionRegistrationObjects(appSpec, ctx.App) @@ -2509,6 +2612,20 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyRbacObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + for _, appSpec := range ctx.App.SpecList { t := func() (interface{}, bool, error) { currPods, err := k.destroyCoreObject(appSpec, opts, ctx.App) @@ -2601,6 +2718,20 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyCRDObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + if value, ok := opts[scheduler.OptionsWaitForResourceLeakCleanup]; ok && value { if err := k.WaitForDestroy(ctx, k8sDestroyTimeout); err != nil { return err @@ -4222,6 +4353,132 @@ func (k *K8s) createAdmissionRegistrationObjects( return nil, nil } +// createCustomResourceObjects is used to create objects whose resource `kind` is defined by a CRD. NOTE: this is done using the `kubectl apply -f` command instead of the conventional method of using an api library +func (k *K8s) createCustomResourceObjects( + spec interface{}, + ns *corev1.Namespace, + app *spec.AppSpec, +) (interface{}, error) { + + if obj, ok := spec.(*CustomResourceObjectYAML); ok { + log.Warn("applying custom resources") + cryaml := obj.Path + if _, err := os.Stat(cryaml); baseErrors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("Cannot find yaml in path %s", cryaml) + } + cmdArgs := []string{"apply", "-f", cryaml, "-n", ns.Name} + err := osutils.Kubectl(cmdArgs) + if err != nil { + return nil, fmt.Errorf("Error applying spec [%s], Error: %s", cryaml, err) + } + obj.Namespace = ns.Name + obj.Name = "placeholder" //TODO1 + return obj, nil + } + + return nil, nil +} + +// destroyCustomResourceObjects is used to delete objects whose resource `kind` is defined by a CRD. NOTE: this is done using the `kubectl delete -f` command instead of the conventional method of using an api library +func (k *K8s) destroyCustomResourceObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*CustomResourceObjectYAML); ok { + cryaml := obj.Path + if _, err := os.Stat(cryaml); baseErrors.Is(err, os.ErrNotExist) { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy Custom Resource Object: %v. Err: Cannot find yaml in path: %v", obj.Name, cryaml), + } + } + + cmdArgs := []string{"delete", "-f", cryaml, "-n", obj.Namespace} + err := osutils.Kubectl(cmdArgs) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy Custom Resource Object: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CustomResourceObject: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + +// createCRDObjects is used to create Resources in the group `apiextensions` group (like CRDs) +func (k *K8s) createCRDObjects( + specObj interface{}, + ns *corev1.Namespace, + app *spec.AppSpec, +) (interface{}, error) { + + if obj, ok := specObj.(*apiextensionsv1.CustomResourceDefinition); ok { + obj.Namespace = ns.Name + err := k8sApiExtensions.RegisterCRD(obj) + + if k8serrors.IsAlreadyExists(err) { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRD(obj.Name, options); err == nil { + log.Infof("[%v] Found existing CRD: %v", app.Key, crd.Name) + return crd, nil + } + } + + if err != nil { + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Register CRD: %v. Err: %v", obj.Name, err), + } + } else { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRD(obj.Name, options); err == nil { + log.Infof("[%v] Registered CRD: %v", app.Key, crd.Name) + return crd, nil + } else { + // if it fails, then you need to `validate` before `get` + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Get CRD after Registration: %v. Err: %v", obj.Name, err), + } + } + } + } else if obj, ok := specObj.(*apiextensionsv1beta1.CustomResourceDefinition); ok { + obj.Namespace = ns.Name + err := k8sApiExtensions.RegisterCRDV1beta1(obj) + + if k8serrors.IsAlreadyExists(err) { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRDV1beta1(obj.Name, options); err == nil { + log.Infof("[%v] Found existing CRDV1beta1: %v", app.Key, crd.Name) + return crd, nil + } + } + + if err != nil { + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Register CRDV1beta1: %v. Err: %v", obj.Name, err), + } + } else { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRDV1beta1(obj.Name, options); err == nil { + log.Infof("[%v] Registered CRDV1beta1: %v", app.Key, crd.Name) + return crd, nil + } else { + // if it fails, then you need to `validate` before `get` + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Get CRDV1beta1 after Registration: %v. Err: %v", obj.Name, err), + } + } + } + } + + return nil, nil +} + func (k *K8s) createMigrationObjects( specObj interface{}, ns *corev1.Namespace, @@ -4683,6 +4940,47 @@ func (k *K8s) createRbacObjects( return nil, nil } +// destroyRbacObjects destroys objects in the `Rbac.authorization` group (like ClusterRole, ClusterRoleBinding, ServiceAccount) +func (k *K8s) destroyRbacObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*rbacv1.ClusterRole); ok { + err := k8sRbac.DeleteClusterRole(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ClusterRole: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ClusterRole: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*rbacv1.ClusterRoleBinding); ok { + err := k8sRbac.DeleteClusterRoleBinding(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ClusterRoleBinding: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ClusterRoleBinding: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*corev1.ServiceAccount); ok { + err := k8sCore.DeleteServiceAccount(obj.Name, obj.Namespace) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ServiceAccount: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ServiceAccount: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + func (k *K8s) createNetworkingObjects( spec interface{}, ns *corev1.Namespace, @@ -6553,3 +6851,12 @@ func init() { k := &K8s{} scheduler.Register(SchedName, k) } + +// ClusterVersion returns the cluster version of the kubernetes cluster as a string (like "1.23.0") +func ClusterVersion() (string, error) { + ver, err := k8sCore.GetVersion() + if err != nil { + return "", err + } + return strings.TrimLeft(ver.String(), "v"), nil +} diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/common.go b/vendor/github.com/portworx/torpedo/drivers/volume/common.go index 73b61c0c23..8b22b8acbd 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/common.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/common.go @@ -643,7 +643,7 @@ func (d *DefaultDriver) ValidateStoragePools() error { } // ExpandPool resizes a pool of a given ID -func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { return &errors.ErrNotSupported{ Type: "Function", Operation: "ExpandPool()", @@ -651,7 +651,7 @@ func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ } // ExpandPoolUsingPxctlCmd resizes pool of a given ID using CLI Command -func (d *DefaultDriver) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *DefaultDriver) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { return &errors.ErrNotSupported{ Type: "Function", Operation: "ExpandPoolUsingPxctlCmd()", @@ -813,7 +813,15 @@ func (d *DefaultDriver) UpdateIOPriority(volumeName string, priorityType string) } } -// ValidateMountOptions for pure volumes +// UpdateStickyFlag update sticky flag on volume +func (d *DefaultDriver) UpdateStickyFlag(volumeName, stickyOption string) error { + return &errors.ErrNotSupported{ + Type: "Function", + Operation: "UpdateStickyFlag", + } +} + +// ValidatePureFaFbMountOptions Validates MountOptions for pure volumes func (d *DefaultDriver) ValidatePureFaFbMountOptions(volumeName string, mountoption []string, volumeNode *node.Node) error { return &errors.ErrNotSupported{ Type: "Function", @@ -821,6 +829,14 @@ func (d *DefaultDriver) ValidatePureFaFbMountOptions(volumeName string, mountopt } } +// ValidatePureFaCreateOptions validates createoptions for pure volumes +func (d *DefaultDriver) ValidatePureFaCreateOptions(volumeName string, FSType string, volumeNode *node.Node) error { + return &errors.ErrNotSupported{ + Type: "Function", + Operation: "ValidateCreateOptions", + } +} + // Contains return func (d *DefaultDriver) Contains(nodes []*api.StorageNode, n *api.StorageNode) bool { return false diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go b/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go index e6cf07e742..78bacfa0aa 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go @@ -100,6 +100,7 @@ const ( pxReleaseManifestURLEnvVarName = "PX_RELEASE_MANIFEST_URL" pxServiceLocalEndpoint = "portworx-service.kube-system.svc.cluster.local" mountGrepVolume = "mount | grep %s" + mountGrepFirstColumn = "mount | grep %s | awk '{print $1}'" ) const ( @@ -108,8 +109,8 @@ const ( podUpRetryInterval = 30 * time.Second maintenanceOpTimeout = 1 * time.Minute maintenanceWaitTimeout = 2 * time.Minute - inspectVolumeTimeout = 1 * time.Minute - inspectVolumeRetryInterval = 2 * time.Second + inspectVolumeTimeout = 2 * time.Minute + inspectVolumeRetryInterval = 3 * time.Second validateDeleteVolumeTimeout = 6 * time.Minute validateReplicationUpdateTimeout = 60 * time.Minute validateClusterStartTimeout = 2 * time.Minute @@ -147,8 +148,9 @@ const ( ) const ( - driveAddSuccessStatus = "Drive add done" - driveExitsStatus = "Device already exists" + driveAddSuccessStatus = "Drive add done" + driveExitsStatus = "Device already exists" + metadataAddSuccessStatus = "Successfully added metadata device" ) // Provisioners types of supported provisioners @@ -214,7 +216,7 @@ type statusJSON struct { } // ExpandPool resizes a pool of a given ID -func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { log.Infof("Initiating pool %v resize by %v with operationtype %v", poolUUID, size, operation.String()) // start a task to check if pool resize is done @@ -224,13 +226,14 @@ func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_Resi ResizeFactor: &api.SdkStoragePoolResizeRequest_Size{ Size: size, }, - OperationType: operation, + OperationType: operation, + SkipWaitForCleanVolumes: skipWaitForCleanVolumes, }) if err != nil { return nil, true, err } if jobListResp.String() != "" { - log.Debugf("Resize respone: %v", jobListResp.String()) + log.Debugf("Resize response: %v", jobListResp.String()) } return nil, false, nil } @@ -242,7 +245,7 @@ func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_Resi } // ExpandPoolUsingPxctlCmd resizes a pool of a given UUID using CLI command -func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { var operationString string @@ -257,6 +260,9 @@ func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operati log.InfoD("Initiate Pool %v resize by %v with operationtype %v using CLI", poolUUID, size, operation.String()) cmd := fmt.Sprintf("pxctl sv pool expand --uid %v --size %v --operation %v", poolUUID, size, operationString) + if skipWaitForCleanVolumes { + cmd = fmt.Sprintf("%s -f", cmd) + } out, err := d.nodeDriver.RunCommandWithNoRetry( n, cmd, @@ -1014,6 +1020,8 @@ func (d *portworx) EnterMaintenance(n node.Node) error { } return nil, false, nil } + log.Infof("waiting for 3 mins allowing node to completely transition to maintenance mode") + time.Sleep(3 * time.Minute) if _, err := task.DoRetryWithTimeout(t, maintenanceOpTimeout, defaultRetryInterval); err != nil { return err @@ -1035,6 +1043,7 @@ func (d *portworx) EnterMaintenance(n node.Node) error { Cause: err.Error(), } } + return nil } @@ -1080,6 +1089,8 @@ func (d *portworx) EnterPoolMaintenance(n node.Node) error { return fmt.Errorf("error when entering pool maintenance on node [%s], Err: %v", n.Name, err) } log.Infof("Enter pool maintenance %s", out) + log.Infof("waiting for 3 mins allowing pool to completely transition to maintenance mode") + time.Sleep(3 * time.Minute) return nil } @@ -1439,6 +1450,22 @@ func (d *portworx) UpdateIOPriority(volumeName string, priorityType string) erro return nil } +func (d *portworx) UpdateStickyFlag(volumeName, stickyOption string) error { + nodes := node.GetStorageDriverNodes() + cmd := fmt.Sprintf("%s %s --sticky %s", pxctlVolumeUpdate, volumeName, stickyOption) + _, err := d.nodeDriver.RunCommandWithNoRetry( + nodes[0], + cmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("failed setting sticky option to %s for volume %s, Err: %v", stickyOption, volumeName, err) + } + return nil +} + func (d *portworx) ValidatePureFaFbMountOptions(volumeName string, mountoption []string, volumeNode *node.Node) error { cmd := fmt.Sprintf(mountGrepVolume, volumeName) out, err := d.nodeDriver.RunCommandWithNoRetry( @@ -1462,6 +1489,87 @@ func (d *portworx) ValidatePureFaFbMountOptions(volumeName string, mountoption [ } +// ValidatePureFaCreateOptions validates FStype and createoptions with block size 2048 on those FStypes +func (d *portworx) ValidatePureFaCreateOptions(volumeName string, FStype string, volumeNode *node.Node) error { + // Checking if file systems are properly set + FScmd := fmt.Sprintf(mountGrepVolume, volumeName) + FSout, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + FScmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get mount response for volume %s, Err: %v", volumeName, err) + } + if strings.Contains(FSout, FStype) { + log.Infof("%s file system is available in the volume %s", FStype, volumeName) + } else { + return fmt.Errorf("Failed to get %s File system, Err: %v", FStype, err) + } + + // Getting mapper volumename where createoptions are applied + mapperCmd := fmt.Sprintf(mountGrepFirstColumn, volumeName) + mapperOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + mapperCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get attached volume for create option for pvc %s, Err: %v", volumeName, err) + } + + // Validating implementation of create options + if FStype == "xfs" { + xfsInfoCmd := fmt.Sprintf("xfs_info %s ", strings.ReplaceAll(mapperOut, "\n", "")) + xfsInfoOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + xfsInfoCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get bsize for create option for pvc %s, Err: %v", volumeName, err) + } + if strings.Contains(xfsInfoOut, "bsize=2048") { + log.Infof("Blocksize 2048 is correctly configured by the create option of volume %s", volumeName) + } else { + log.Warnf("The filesystem type %s isn't properly implemented as block size 2048 has not been set, Err: %v", FStype, err) + return fmt.Errorf("Failed to get %s proper block size in the %s file system, Err: %v", xfsInfoOut, FStype, err) + } + } else if FStype == "ext4" { + ext4InfoCmd := fmt.Sprintf("tune2fs -l %s ", strings.ReplaceAll(mapperOut, "\n", "")) + ext4InfoOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + ext4InfoCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get bsize for create option for pvc %s, Err: %v", volumeName, err) + } + blockSize := false + for _, b := range strings.Split(ext4InfoOut, "\n") { + if strings.Contains(b, "Block size") && strings.Contains(b, "2048") { + blockSize = true + break + } + } + if blockSize { + log.Infof("Blocksize 2048 is correctly configured by the create options of volume %s", volumeName) + } else { + log.Warnf("The filesystem type %s isn't properly implemented as block size 2048 has not been set, Err: %v", FStype, err) + return fmt.Errorf("Failed to get %s proper block size in the %s file system, Err: %v", ext4InfoOut, FStype, err) + } + } + return nil +} + func (d *portworx) UpdateSharedv4FailoverStrategyUsingPxctl(volumeName string, strategy api.Sharedv4FailoverStrategy_Value) error { nodes := node.GetStorageDriverNodes() var strategyStr string @@ -3474,7 +3582,7 @@ func (d *portworx) GetKvdbMembers(n node.Node) (map[string]*torpedovolume.Metada url = netutil.MakeURL("http://", endpoint, int(pxdRestPort)) } // TODO replace by sdk call whenever it is available - log.Infof("Url to call %v", url) + log.Debugf("Url to call %v", url) c, err := client.NewClient(url, "", "") if err != nil { return nil, err @@ -4792,7 +4900,7 @@ func addDrive(n node.Node, drivePath string, poolID int32, d *portworx) error { return fmt.Errorf("failed to add drive [%s] on node [%s]", drivePath, n.Name) } - if !strings.Contains(addDriveStatus.Status, driveAddSuccessStatus) { + if !strings.Contains(addDriveStatus.Status, driveAddSuccessStatus) && !strings.Contains(addDriveStatus.Status, metadataAddSuccessStatus) { return fmt.Errorf("failed to add drive [%s] on node [%s], AddDrive Status: %+v", drivePath, n.Name, addDriveStatus) } diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/volume.go b/vendor/github.com/portworx/torpedo/drivers/volume/volume.go index a49a8d0d55..7d78c645cd 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/volume.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/volume.go @@ -336,9 +336,15 @@ type Driver interface { //UpdateIOPriority IO priority using pxctl command UpdateIOPriority(volumeName string, priorityType string) error - //validate mount options by executing mount command + //UpdateStickyFlag update sticky flag using pxctl command + UpdateStickyFlag(volumeName, stickyOption string) error + + //ValidatePureFaFbMountOptions validates mount options by executing mount command ValidatePureFaFbMountOptions(volumeName string, mountoption []string, volumeNode *node.Node) error + //ValidatePureFaCreateOptions validates create options using xfs_info and tune2fs commands + ValidatePureFaCreateOptions(volumeName string, FSType string, volumeNode *node.Node) error + // UpdateSharedv4FailoverStrategyUsingPxctl updates the sharedv4 failover strategy using pxctl UpdateSharedv4FailoverStrategyUsingPxctl(volumeName string, strategy api.Sharedv4FailoverStrategy_Value) error @@ -355,10 +361,10 @@ type Driver interface { ValidateDriver(endpointVersion string, autoUpdateComponents bool) error // ExpandPool resizes a pool of a given ID - ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error + ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error // ExpandPoolUsingPxctlCmd resizes pool of a given ID using CLI Command - ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error + ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error // ListStoragePools lists all existing storage pools ListStoragePools(labelSelector metav1.LabelSelector) (map[string]*api.StoragePool, error) diff --git a/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go b/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go index 6d7fb080a4..4223e6b7f5 100644 --- a/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go +++ b/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go @@ -31,8 +31,8 @@ var ( const ( //DashBoardBaseURL for posting logs - DashBoardBaseURL = "http://aetos.pwx.purestorage.com/dashboard" //"http://aetos-dm.pwx.purestorage.com:3939/dashboard" - AetosBaseURL = "http://aetos.pwx.purestorage.com" + DashBoardBaseURL = "https://aetos.pwx.purestorage.com/dashboard" //"http://aetos-dm.pwx.purestorage.com:3939/dashboard" + AetosBaseURL = "https://aetos.pwx.purestorage.com" ) const ( @@ -163,10 +163,12 @@ func (d *Dashboard) TestSetBegin(testSet *TestSet) { if err != nil { logrus.Errorf("TestSetId creation failed. Cause : %v", err) } - dashURL = fmt.Sprintf("Dashboard URL : %s/resultSet/testSetID/%d", AetosBaseURL, d.TestSetID) - os.Setenv("DASH_UID", fmt.Sprint(d.TestSetID)) } } + if d.TestSetID != 0 { + dashURL = fmt.Sprintf("Dashboard URL : %s/resultSet/testSetID/%d", AetosBaseURL, d.TestSetID) + os.Setenv("DASH_UID", fmt.Sprint(d.TestSetID)) + } logrus.Info(dashURL) } @@ -260,7 +262,7 @@ func (d *Dashboard) TestSetUpdate(testSet *TestSet) { resp, respStatusCode, err := rest.PUT(updateTestSetURL, testSet, nil, nil) if err != nil { - logrus.Errorf("Error in updating TestSet, Caose: %v", err) + logrus.Errorf("Error in updating TestSet, Cause: %v", err) } else if respStatusCode != http.StatusOK { logrus.Errorf("Failed to update TestSet, Resp : %s", string(resp)) } diff --git a/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go b/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go index 7905dbb274..f47978acd5 100644 --- a/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go +++ b/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go @@ -2,6 +2,7 @@ package restutil import ( "bytes" + "crypto/tls" "encoding/json" "github.com/sirupsen/logrus" "io" @@ -22,8 +23,8 @@ const ( defaultRestTimeOut = 10 * time.Second ) -// Get rest get call -func Get(url string, auth *Auth, headers map[string]string) ([]byte, int, error) { +// GET rest get call +func GET(url string, auth *Auth, headers map[string]string) ([]byte, int, error) { respBody, respStatusCode, err := getResponse(http.MethodGet, url, nil, auth, headers) if err != nil { @@ -97,6 +98,11 @@ func getResponse(httpMethod, url string, payload interface{}, auth *Auth, header setBasicAuthAndHeaders(req, auth, headers) client := &http.Client{ Timeout: defaultRestTimeOut, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, } var resp *http.Response resp, err = client.Do(req) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8bf3d8c74e..803c8c9e16 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -985,7 +985,7 @@ github.com/portworx/sched-ops/k8s/rbac github.com/portworx/sched-ops/k8s/storage github.com/portworx/sched-ops/k8s/stork github.com/portworx/sched-ops/task -# github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 +# github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 => github.com/portworx/torpedo v0.0.0-20230512010535-e219f683f7f9 ## explicit; go 1.19 github.com/portworx/torpedo/drivers/api github.com/portworx/torpedo/drivers/node @@ -2274,7 +2274,7 @@ sigs.k8s.io/yaml # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20230324214216-7f88436db3de # github.com/portworx/kdmp => github.com/portworx/kdmp v0.4.1-0.20230316085313-95fc97e8493b # github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 -# github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 +# github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230512010535-e219f683f7f9 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 # helm.sh/helm/v3 => helm.sh/helm/v3 v3.10.3 # k8s.io/api => k8s.io/api v0.25.0