diff --git a/NOTICE.MD b/NOTICE.MD
index 50111e529..367a8662c 100644
--- a/NOTICE.MD
+++ b/NOTICE.MD
@@ -853,6 +853,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--------------------------------------------------------------------------------
+#### Module : github.com/gorilla/securecookie
+Version : v1.1.2
+Time : 2023-10-18T11:18:40Z
+Licence : BSD-3-Clause
+
+Contents of probable licence file $GOMODCACHE/github.com/gorilla/securecookie@v1.1.2/LICENSE:
+
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
--------------------------------------------------------------------------------
#### Module : github.com/gorilla/sessions
Version : v1.4.0
@@ -1660,37 +1697,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---------------------------------------------------------------------------------
-#### Module : github.com/knadh/koanf/providers/env
-Version : v1.0.0
-Time : 2024-09-26T07:31:31Z
-Licence : MIT
-
-Contents of probable licence file $GOMODCACHE/github.com/knadh/koanf/providers/env@v1.0.0/LICENSE:
-
-The MIT License
-
-Copyright (c) 2019, Kailash Nadh. https://github.com/knadh
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
--------------------------------------------------------------------------------
#### Module : github.com/knadh/koanf/providers/file
Version : v1.1.2
@@ -1724,11 +1730,11 @@ THE SOFTWARE.
--------------------------------------------------------------------------------
#### Module : github.com/knadh/koanf/v2
-Version : v2.1.1
-Time : 2024-04-03T07:16:15Z
+Version : v2.1.2
+Time : 2024-11-06T08:48:20Z
Licence : MIT
-Contents of probable licence file $GOMODCACHE/github.com/knadh/koanf/v2@v2.1.1/LICENSE:
+Contents of probable licence file $GOMODCACHE/github.com/knadh/koanf/v2@v2.1.2/LICENSE:
The MIT License
@@ -3864,6 +3870,39 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+--------------------------------------------------------------------------------
+#### Module : github.com/creack/pty
+Version : v1.1.9
+Time : 2019-09-25T15:36:33Z
+Licence : MIT
+
+Contents of probable licence file $GOMODCACHE/github.com/creack/pty@v1.1.9/LICENSE:
+
+Copyright (c) 2011 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall
+be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
--------------------------------------------------------------------------------
#### Module : github.com/davecgh/go-spew
Version : v1.1.2-0.20180830191138-d8f796af33cc
@@ -5595,11 +5634,11 @@ SOFTWARE.
--------------------------------------------------------------------------------
#### Module : github.com/go-viper/mapstructure/v2
-Version : v2.0.0-alpha.1
-Time : 2023-12-19T21:02:02Z
+Version : v2.2.1
+Time : 2024-09-22T22:42:12Z
Licence : MIT
-Contents of probable licence file $GOMODCACHE/github.com/go-viper/mapstructure/v2@v2.0.0-alpha.1/LICENSE:
+Contents of probable licence file $GOMODCACHE/github.com/go-viper/mapstructure/v2@v2.2.1/LICENSE:
The MIT License (MIT)
@@ -6152,43 +6191,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------------
-#### Module : github.com/gorilla/securecookie
-Version : v1.1.2
-Time : 2023-10-18T11:18:40Z
-Licence : BSD-3-Clause
-
-Contents of probable licence file $GOMODCACHE/github.com/gorilla/securecookie@v1.1.2/LICENSE:
-
-Copyright (c) 2023 The Gorilla Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
--------------------------------------------------------------------------------
#### Module : github.com/gorilla/websocket
Version : v1.4.2
diff --git a/README.md b/README.md
index f9ed17ba7..05a76d2ad 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@
LinkedIn |
Twitter |
YouTube |
-Docs
+Docs
Quesma is an actively developed database gateway currently in pre-alpha Early Access. Route your queries through a translation layer that seamlessly connects your apps with the modern database platforms of your choice. No more query refactoring during migrations.
diff --git a/ci/it/configs/quesma-with-dual-writes-and-common-table.yml.template b/ci/it/configs/quesma-with-dual-writes-and-common-table.yml.template
index 6971eab85..3b467060c 100644
--- a/ci/it/configs/quesma-with-dual-writes-and-common-table.yml.template
+++ b/ci/it/configs/quesma-with-dual-writes-and-common-table.yml.template
@@ -33,11 +33,13 @@ processors:
logs-dual-query:
target: [ c, e ]
logs-4:
- useCommonTable: true
- target: [ c ]
+ target:
+ - c:
+ useCommonTable: true
logs-5:
- useCommonTable: true
- target: [ c ]
+ target:
+ - c:
+ useCommonTable: true
"*":
target: [ e ]
- name: IP
@@ -53,13 +55,13 @@ processors:
logs-dual-query:
target: [ c, e ]
logs-4:
- useCommonTable: true
- target: [ c ]
+ target:
+ - c:
+ useCommonTable: true
+ logs-5:
+ target:
"*":
target: [ e ]
- logs-5:
- useCommonTable: true
- target: [ ]
pipelines:
- name: my-elasticsearch-proxy-read
diff --git a/docs/public/README.md b/docs/public/README.md
index 1dac618f9..3bdcb0f93 100644
--- a/docs/public/README.md
+++ b/docs/public/README.md
@@ -1,6 +1,6 @@
-# Quesma EAP documentation
+# Quesma documentation
-This folder contains our EAP documentation available at https://eap.quesma.com.
+This folder contains our documentation available at https://docs.quesma.com (formerly https://eap.quesma.com).
These docs are just static files generated with [Vitepress](https://vitepress.dev) and published via CloudFlare Pages.
diff --git a/docs/public/docs/.vitepress/config.mts b/docs/public/docs/.vitepress/config.mts
index 829cec411..9d666b8b6 100644
--- a/docs/public/docs/.vitepress/config.mts
+++ b/docs/public/docs/.vitepress/config.mts
@@ -41,8 +41,6 @@ export default defineConfig({
{ text: 'Query Hydrolix tables as Elasticsearch indices', link: '/example-2-0'}
]
},
- //{ text: 'Scenario I', link: '/scenario-1' },
- //{ text: 'Reference Docker compose configurations', link: '/reference-conf' }
],
},
{ text: 'Advanced configuration',
diff --git a/docs/public/docs/config-primer.md b/docs/public/docs/config-primer.md
index 85e887738..f572a1aa4 100644
--- a/docs/public/docs/config-primer.md
+++ b/docs/public/docs/config-primer.md
@@ -70,12 +70,17 @@ frontendConnectors:
listenPort: 8080
```
+The supported configuration options for frontend connectors (under `config`):
+* `listenPort` - port number on which the frontend connector will listen for incoming requests
+* `disableAuth` - when set to `true`, disables authentication for incoming requests (optional, defaults to false). If you use Elasticsearch/Kibana without authentication, set it to `true`.
+
+
#### Backend connectors
Backend connector has to have a `name`, `type` and `config` fields.
* `name` is a unique identifier for the connector
* `type` specifies the type of the connector.\
- At this moment, only three backend connector types are allowed: `elasticsearch`, `clickhouse` (used for ClickHouse Cloud SaaS service, `clickhouse-os` and `hydrolix`.
+ At this moment, only three backend connector types are allowed: `elasticsearch`, `clickhouse` (used for ClickHouse Cloud SaaS service), `clickhouse-os` (self-hosted ClickHouse) and `hydrolix`.
* `config` is a set of configuration options for the connector.
```yaml
backendConnectors:
@@ -84,16 +89,25 @@ backendConnectors:
config:
user: "elastic"
password: "change-me"
- url: "http://elasticsearch:9200"
+ url: "http://192.168.0.7:9200"
- name: my-clickhouse-data-source
type: clickhouse-os
config:
user: "username"
password: "username-is-password"
database: "dbname"
- url: "clickhouse://clickhouse:9000"
+ url: "clickhouse://192.168.0.9:9000"
```
-**WARNING:** When connecting to ClickHouse or Hydrolix, only the native protocol connection (`clickhouse://`) is supported.
+
+The supported configuration options for backend connectors (under `config`):
+* `url` - connection string to the backend service in a URL format (`protocol://host:port`):
+ * for Elastic/OpenSearch the expected format is `http://host:port` (Elastic/OpenSearch default port is 9200)
+ * for ClickHouse/Hydrolix the expected format is `clickhouse://host:port` (ClickHouse default port is 9000, ClickHouse/Hydrolix default encrypted port is 9440). Note that Quesma supports only the ClickHouse native protocol (`clickhouse://`) and does not support the HTTP protocol.
+* `user` - username for authentication
+* `password` - password for authentication
+* `database` - name of the database to connect to. It is optional for ClickHouse, but strictly required for Hydrolix, where it is also referred as "project".
+* `adminUrl` - URL for administrative operations to render a handy link in Quesma management UI (optional)
+* `disableTLS` - when set to true, disables TLS for the connection (optional)
### Processors
@@ -178,14 +192,33 @@ The configuration for an index consists of the following configuration options:
will dual write ingest requests to `my_index` to both ElasticSearch and ClickHouse.
Note that ElasticSearch/OpenSearch is the only supported backend for the `*` entry.
If no targets are provided (example: `target: []`) in the configuration of an index in the ingest processor, ingest for that index will be disabled and incoming data will be dropped.
-- `override` (optional): override the name of table in Hydrolix/ClickHouse (by default Quesma uses the same table name as the index name)
-- `useCommonTable` (optional): if enabled, Quesma will store data in a single Hydrolix/ClickHouse table named `quesma_common_table`. See [ingest documentation](/ingest.md) for more details.
+
+ Some backend connectors have additional attributes which may be used. For example the following configuration sets `useCommonTable` for `backend-clickhouse` target:
+ ```yaml
+ my_index:
+ target:
+ - backend-clickhouse:
+ useCommonTable: true
+ ```
+ Currently only the ClickHouse backend connector supports the following attributes:
+ - `useCommonTable` (optional): if enabled, Quesma will store data in a single Hydrolix/ClickHouse table named `quesma_common_table`. See [ingest documentation](/ingest.md) for more details.
+ - `tableName` (optional): override the name of table in Hydrolix/ClickHouse (by default Quesma uses the same table name as the index name)
+- `schemaOverrides` (optional): manual overrides of schema information for an index. Quesma infers schema for an index based on the data ingested and the schema information fetched from ClickHouse. `schemaOverrides` allows you to override this inferred schema with for some fields. For example the following configuration:
+ ```yaml
+ my_index:
+ target: [ backend-clickhouse ]
+ schemaOverrides:
+ "product_name":
+ type: "text"
+ ```
+ changes the type of `product_name` field to `text`. Note: `schemaOverrides` are currently not supported in `*` configuration.
## Optional configuration options
### Quesma licensing configuration
-In order to be able to use `hydrolix` or `clickhouse` backend connectors, one needs to supply `licenseKey` in the configuration file. Contact us at suppor@quesma.com if you need one.
+In order to be able to use `hydrolix` or `clickhouse` backend connectors Quesma requires a commercial license key (supplied in the `licenseKey` field of the configuration file).
+Contact us at support@quesma.com if you need one.
```yaml
licenseKey: ZXlKcGJuTjBZV3hz...
```
diff --git a/docs/public/docs/example-1.md b/docs/public/docs/example-1.md
index 4fae8d393..e1243b2ed 100644
--- a/docs/public/docs/example-1.md
+++ b/docs/public/docs/example-1.md
@@ -32,7 +32,7 @@ flowchart LR
- name: backend-elasticsearch
type: elasticsearch
config:
- url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE
+ url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE, for example: http://192.168.0.7:9200
user: #PLACE_YOUR_ELASTICSEARCH_USERNAME_HERE
password: #PLACE_YOUR_ELASTICSEARCH_PASSWORD_HERE
processors:
diff --git a/docs/public/docs/example-2-0-clickhouse-specific.md b/docs/public/docs/example-2-0-clickhouse-specific.md
index a61fbdb7e..5cc2a5e52 100644
--- a/docs/public/docs/example-2-0-clickhouse-specific.md
+++ b/docs/public/docs/example-2-0-clickhouse-specific.md
@@ -55,14 +55,14 @@ flowchart LR
- name: minimal-elasticsearch
type: elasticsearch
config:
- url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE
+ url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE, for example: http://192.168.0.7:9200
user: #PLACE_YOUR_ELASTICSEARCH_USERNAME_HERE
password: #PLACE_YOUR_ELASTICSEARCH_PASSWORD_HERE
- name: clickhouse-instance
type: clickhouse-os
#type: clickhouse # use for ClickHouse cloud service only
config:
- url: #PLACE_YOUR_CLICKHOUSE_URL_HERE
+ url: #PLACE_YOUR_CLICKHOUSE_URL_HERE, for example: clickhouse://192.168.0.7:9000
user: #PLACE_YOUR_CLICKHOUSE_USER_HERE
password: #PLACE_YOUR_CLICKHOUSE_PASSWORD_HERE
database: #PLACE_YOUR_CLICKHOUSE_DATABASE_NAME_HERE
diff --git a/docs/public/docs/example-2-0.md b/docs/public/docs/example-2-0.md
index 6932af069..d51000d19 100644
--- a/docs/public/docs/example-2-0.md
+++ b/docs/public/docs/example-2-0.md
@@ -42,13 +42,13 @@ flowchart LR
- name: minimal-elasticsearch
type: elasticsearch
config:
- url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE
+ url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE, for example: http://192.168.0.7:9200
user: #PLACE_YOUR_ELASTICSEARCH_USERNAME_HERE
password: #PLACE_YOUR_ELASTICSEARCH_PASSWORD_HERE
- name: hydrolix-instance
type: hydrolix
config:
- url: #PLACE_YOUR_HYDROLIX_URL_HERE
+ url: #PLACE_YOUR_HYDROLIX_URL_HERE, for example: clickhouse://companyname.hydrolix.live:9440
user: #PLACE_YOUR_HYDROLIX_USER_HERE
password: #PLACE_YOUR_HYDROLIX_PASSWORD_HERE
database: #PLACE_YOUR_HYDROLIX_DATABASE_NAME_HERE
diff --git a/docs/public/docs/example-2-1-hydro-specific.md b/docs/public/docs/example-2-1-hydro-specific.md
index f7584f0b7..f7dc8ccc2 100644
--- a/docs/public/docs/example-2-1-hydro-specific.md
+++ b/docs/public/docs/example-2-1-hydro-specific.md
@@ -44,13 +44,13 @@ flowchart LR
- name: elasticsearch-instance
type: elasticsearch
config:
- url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE
+ url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE, for example: http://192.168.0.7:9200
user: #PLACE_YOUR_ELASTICSEARCH_USERNAME_HERE
password: #PLACE_YOUR_ELASTICSEARCH_PASSWORD_HERE
- name: hydrolix-instance
type: hydrolix
config:
- url: #PLACE_YOUR_HYDROLIX_URL_HERE
+ url: #PLACE_YOUR_HYDROLIX_URL_HERE, for example: clickhouse://companyname.hydrolix.live:9440
user: #PLACE_YOUR_HYDROLIX_USER_HERE
password: #PLACE_YOUR_HYDROLIX_PASSWORD_HERE
database: #PLACE_YOUR_HYDROLIX_DATABASE_NAME_HERE
diff --git a/docs/public/docs/example-2-1.md b/docs/public/docs/example-2-1.md
index 75c21be6d..ff988c329 100644
--- a/docs/public/docs/example-2-1.md
+++ b/docs/public/docs/example-2-1.md
@@ -52,14 +52,14 @@ K[Kibana/OSD] --> Q((Quesma))
- name: elasticsearch-instance
type: elasticsearch
config:
- url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE
+ url: #PLACE_YOUR_ELASTICSEARCH_URL_HERE, for example: http://192.168.0.7:9200
user: #PLACE_YOUR_ELASTICSEARCH_USERNAME_HERE
password: #PLACE_YOUR_ELASTICSEARCH_PASSWORD_HERE
- name: clickhouse-instance
type: clickhouse-os
#type: clickhouse # use for ClickHouse cloud service only
config:
- url: #PLACE_YOUR_CLICKHOUSE_URL_HERE
+ url: #PLACE_YOUR_CLICKHOUSE_URL_HERE, for example: clickhouse://192.168.0.7:9000
user: #PLACE_YOUR_CLICKHOUSE_USER_HERE
password: #PLACE_YOUR_CLICKHOUSE_PASSWORD_HERE
database: #PLACE_YOUR_CLICKHOUSE_DATABASE_NAME_HERE
diff --git a/docs/public/docs/index.md b/docs/public/docs/index.md
index 8931b644e..93d7f309c 100644
--- a/docs/public/docs/index.md
+++ b/docs/public/docs/index.md
@@ -6,9 +6,9 @@ hero:
image:
light: /logo/quesma-logo-black-full-svg.svg
dark: /logo/quesma-logo-white-full-svg.svg
- name: "Quesma Early Access Preview (EAP)"
+ name: "Quesma Documentation"
#text: "Quesma EAP docs"
- tagline: Welcome to the Quesma Early Access Preview! We're excited to have you on board and can't wait to hear your feedback! This website will help you get started.
+ tagline: Welcome to the Quesma Documentation! This website will help you get started.
actions:
- theme: brand
text: Getting started
diff --git a/docs/public/docs/ingest.md b/docs/public/docs/ingest.md
index addec9666..90821fcf3 100644
--- a/docs/public/docs/ingest.md
+++ b/docs/public/docs/ingest.md
@@ -100,17 +100,19 @@ However, you can also store multiple indexes in a single table. To do this, conf
```yaml
indexes:
first_index:
- target: [ backend-clickhouse ]
- useCommonTable: true
+ target:
+ - backend-clickhouse:
+ useCommonTable: true
second_index:
- target: [ backend-clickhouse ]
- useCommonTable: true
+ target:
+ - backend-clickhouse:
+ useCommonTable: true
"*":
target: [ backend-elastic ]
...
```
-These indexes will then be stored in the quesma_common_table table.
+These indexes will then be stored in the `quesma_common_table` table.
### Schema evolution: adding new fields
diff --git a/docs/public/docs/pipeline-example-1.md b/docs/public/docs/pipeline-example-1.md
deleted file mode 100644
index c290722c1..000000000
--- a/docs/public/docs/pipeline-example-1.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Transparent Elasticsearch pipeline
-
-In this scenario, we configure a query pipeline to read from Elasticsearch and an ingest pipeline to write to Elasticsearch. The pipelines use the `quesma-v1-processor-noop`, which acts as a transparent proxy so the traffic is not changed whatsoever.
-
-```mermaid
-flowchart LR
- K[Kibana] --> Q((Quesma)) --> E[(Elasticsearch)]
-```
-
-Relevant Quesma configuration fragment:
-```yaml
-processors:
- - name: noop-query-processor
- type: quesma-v1-processor-noop
-pipelines:
- - name: my-elasticsearch-proxy-read
- frontendConnectors: [ elastic-query ]
- processors: [ noop-query-processor ]
- backendConnectors: [ my-minimal-elasticsearch ]
- - name: my-elasticsearch-proxy-write
- frontendConnectors: [ elastic-ingest ]
- processors: [ noop-query-processor ]
- backendConnectors: [ my-minimal-elasticsearch ]
-```
\ No newline at end of file
diff --git a/docs/public/docs/pipeline-example-2.md b/docs/public/docs/pipeline-example-2.md
deleted file mode 100644
index 6f06978c2..000000000
--- a/docs/public/docs/pipeline-example-2.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Reading from Elasticsearch and dual writing to Elasticsearch and ClickHouse
-
-In this scenario, we configure a query pipeline to read from Elasticsearch for index `my-index-1` and an ingest pipeline to write to both Elasticsearch and ClickHouse for index `my-index-1`.
-
-```mermaid
-flowchart LR
- K[Kibana] --> QQ(("Quesma
(query pipeline)")) --> |queries to my-index-1| E[(Elasticsearch)]
- L[Logstash] --> QI(("Quesma
(ingest pipeline)")) --> |ingest to my-index-1| CH[(ClickHouse)]
-```
-
-Note that for both pipelines we need to connect the ElasticSearch backend connector that's used for all internal Kibana queries.
-
-Relevant Quesma configuration fragment:
-```yaml
-processors:
- - name: a-query-processor
- type: quesma-v1-processor-query
- config:
- indexes:
- "my-index-1":
- target: [ my-minimal-elasticsearch ]
- - name: a-ingest-processor
- type: quesma-v1-processor-ingest
- config:
- indexes:
- "my-index-1":
- target: [ my-clickhouse-data-source, my-minimal-elasticsearch ]
-pipelines:
- - name: my-elasticsearch-read
- frontendConnectors: [ elastic-query ]
- processors: [ a-query-processor ]
- backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
- - name: my-dual-write
- frontendConnectors: [ elastic-ingest ]
- processors: [ a-ingest-processor ]
- backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
-```
diff --git a/docs/public/docs/pipeline-example-3.md b/docs/public/docs/pipeline-example-3.md
deleted file mode 100644
index d3325337e..000000000
--- a/docs/public/docs/pipeline-example-3.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Reading and writing from/to ClickHouse
-
-In this scenario, we configure a query pipeline to read from ClickHouse for index `my-index-1` and an ingest pipeline to write to both Elasticsearch and ClickHouse for index `my-index-1`.
-
-```mermaid
-flowchart LR
- K[Kibana] --> QQ(("Quesma
(query pipeline)")) --> |queries to my-index-1| CH[(ClickHouse)]
- L[Logstash] --> QI(("Quesma
(ingest pipeline)")) --> |ingest to my-index-1| CH[(ClickHouse)]
-```
-
-Note that for both pipelines we need to connect the ElasticSearch backend connector that's used for all internal Kibana queries.
-
-Relevant Quesma configuration fragment:
-```yaml
-processors:
- - name: a-query-processor
- type: quesma-v1-processor-query
- config:
- indexes:
- "my-index-1":
- target: [ my-clickhouse-data-source ]
- - name: a-ingest-processor
- type: quesma-v1-processor-ingest
- config:
- indexes:
- "my-index-1":
- target: [ my-clickhouse-data-source ]
-pipelines:
- - name: my-clickhouse-read
- frontendConnectors: [ elastic-query ]
- processors: [ a-query-processor ]
- backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
- - name: my-clickhouse-write
- frontendConnectors: [ elastic-ingest ]
- processors: [ a-ingest-processor ]
- backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
-```
diff --git a/docs/public/docs/scenario-1.md b/docs/public/docs/scenario-1.md
deleted file mode 100644
index 330222b4f..000000000
--- a/docs/public/docs/scenario-1.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# Scenario I: No ClickHouse installed
-
-## Installation prerequisites
-
-Make sure you have the following before proceeding:
-1) Quesma Docker container image (later referred as `QUESMA_DOCKER_IMAGE`). To obtain one, email [support@quesma.com](mailto:support@quesma.com).
-2) [Docker engine](https://docs.docker.com/engine/install/) installed (e.g. `docker ps` should pass).
-
-If you are connecting Quesma to already installed and working backend databases, you also need credentials for them.
-
-### Software and hardware requirements
-
-We recommend setting up at least 2 CPU cores and 2GBs of RAM for the Quesma container. The requirements may vary depending on the workload and the number of concurrent connections.
-
-Quesma has been tested with the following software versions:
-
-| Software | Version |
-|----------------------------------|-----------------|
-| Docker | `24.0.7` |
-| Elasticsearch/Kibana | `8.11.1` |
-| ClickHouse | `24.1`, `23.12` |
-| OpenSearch/OpenSearch Dashboards | `2.12.0` |
-| Hydrolix | `v4.8.12` |
-
-
-## Installation steps
-
-Quesma is delivered as a Docker container image. Follow these steps.
-1) **Pull Quesma Docker image**
- ```shell
- export QUESMA_DOCKER_IMAGE=quesma/quesma:0.8.0-eap
- docker pull $QUESMA_DOCKER_IMAGE
- ```
- it should succeed.
-2) **Run reference setup (recommended example Quesma configuration and docker compose files for Quesma, ClickHouse, and Kibana/Elastic or OpenSearch/OSDashboards), unless you have them already installed** \
- [Download the reference setup](https://eap.quesma.com/2024-05-10/reference.tgz) for Kibana or OpenSearch Dashboard:
- ```shell
- curl -O https://eap.quesma.com/2024-07-05/reference.tgz
- tar -xzvf reference.tgz
- cd reference/
- ```
- - Alternative 1: Kibana/Elasticsearch mode (runs Quesma, ClickHouse, Elasticsearch and Kibana)
- ```shell
- docker compose -f kibana.yml up -d
- ```
- You should see Kibana running on [`http://localhost:5601`](http://localhost:5601) and Quesma debugging interface [`http://localhost:9999`](http://localhost:9999/dashboard).
- - Alternative 2: OpenSearch Dashboard/OpenSearch mode (runs Quesma, ClickHouse, OpenSearch and OpenSearch Dashboards)
- ```shell
- docker compose -f osd.yml up -d
- ```
- You should see OpenSearch Dashboard running on [`http://localhost:5601`](http://localhost:5601) and Quesma debugging interface [`http://localhost:9999`](http://localhost:9999/dashboard).
- \
-
- In case you're using ClickHouse from the reference setup - it does not come with any data, but you can execute following commands to populate it with sample data:
-
- ```shell
- ## For Kibana
- curl -w "HTTP %{http_code}" -k -o /dev/null --no-progress-meter -X POST "localhost:5601/api/sample_data/flights" \
- -H "kbn-xsrf: true" \
- -H 'Content-Type: application/json'
-
- ## For OpenSearch Dashboards
- curl -w "HTTP %{http_code}" -k -o /dev/null --no-progress-meter -X POST "localhost:5601/api/sample_data/flights" \
- -H "osd-xsrf: true" \
- -H 'Content-Type: application/json'
- ```
-3) **Customize configuration file** \
- To connect Quesma to another database, you need to create new or modify existing the YAML configuration file.
- Edit the `quesma.yml` file and replace the Clickhouse url and credentials with the new target Clickhouse or Hydrolix url and credentials.
- Please refer to the Configuration section below.
- The file is referenced using the `QUESMA_CONFIG_FILE` environment variable. \
- You may apply the setup by one of the below commands:
- ```shell
- docker compose -f kibana.yml restart
- docker compose -f osd.yml restart
- ```
- Once you set up the `indexes` configuration section and have them pointing to your ClickHouse/Hydrolix tables, you can start querying your data!
- * **Kibana**
- Navigate to `Management` -> `Stack Management` -> `Data Views (in Kibana section)` and click `Create data view` to create your first data view with your tables.
- * **OpenSearch Dashboards** \
- Navigate to `Dashboards Management` -> `Index Patterns` and click `Create Index Pattern` to create your first index pattern with your tables.
-4) **Port the configuration to the production environment**
- Feel free to use the provided docker compose for your desired environment.
- You may want to use Terraform/OpenTofu, Pulumi, or other tools to automate the deployment.
- Quesma engineers are more than happy to help at [support@quesma.com](mailto:support@quesma.com).
-
-
-## Configuration reference
-
-Quesma can be configured using dedicated configuration files written in YAML format, which looks like this:
-```yaml
-port: 8080
-logging:
- disableFileLogging: false
-```
-Additionally, you can use environment variables which **override** the configuration file settings. Please refer to the `Environment variables` section for more details.
-
-### Configuration file
-
-Quesma container has to have read access to the configuration file, which absolute path should be passed with `QUESMA_CONFIG_FILE` environment variable.
-Configuration is being loaded at the start of the service and is **not** being reloaded during runtime. For any changes to take effect, Quesma restart is required.
-
-We encourage you to take a look at **typical deployment scenarios** along with relevant example configurations
-
-For the full list of configuration options, please refer to the [Configuration primer](./config-primer.md).
-
-### Environment variables
-
-Environment variable names are case-sensitive and follow the pattern `QUESMA_`, except here delimiter being `_` instead of `.`.
-
-Examples:
-* `QUESMA_logging_level=debug` overrides `logging.level` in the config file
-* `QUESMA_licenseKey=xyz` overrides `licenseKey` in the config file
diff --git a/quesma/ab_testing/collector/collector.go b/quesma/ab_testing/collector/collector.go
index e75cbdf08..989e1d734 100644
--- a/quesma/ab_testing/collector/collector.go
+++ b/quesma/ab_testing/collector/collector.go
@@ -17,6 +17,7 @@ type ResponseMismatch struct {
Mismatches string `json:"mismatches"` // JSON array of differences
Message string `json:"message"` // human readable variant of the array above
+ SHA1 string `json:"sha1"` // SHA1 of the differences
Count int `json:"count"` // number of differences
TopMismatchType string `json:"top_mismatch_type"` // most common difference type
@@ -36,7 +37,8 @@ type EnrichedResults struct {
QuesmaBuildHash string `json:"quesma_hash"`
Errors []string `json:"errors,omitempty"`
- KibanaDashboardId string `json:"kibana_dashboard_id,omitempty"`
+ KibanaDashboardId string `json:"kibana_dashboard_id,omitempty"`
+ KibanaDashboardPanelId string `json:"kibana_dashboard_panel_id,omitempty"`
}
type pipelineProcessor interface {
diff --git a/quesma/ab_testing/collector/diff.go b/quesma/ab_testing/collector/diff.go
index 612d83b61..d5095cc46 100644
--- a/quesma/ab_testing/collector/diff.go
+++ b/quesma/ab_testing/collector/diff.go
@@ -3,6 +3,7 @@
package collector
import (
+ "crypto/sha1"
"encoding/json"
"fmt"
"quesma/jsondiff"
@@ -36,37 +37,73 @@ func (t *diffTransformer) mostCommonMismatchType(mismatches []jsondiff.JSONMisma
func (t *diffTransformer) process(in EnrichedResults) (out EnrichedResults, drop bool, err error) {
+ mismatches := jsondiff.Mismatches{}
+
d, err := jsondiff.NewElasticResponseJSONDiff()
if err != nil {
return in, false, err
}
- jsonA, err := types.ParseJSON(in.A.Body)
- if err != nil {
- in.Mismatch.IsOK = false
- in.Mismatch.Message = fmt.Sprintf("failed to parse A response: %v", err)
- err = fmt.Errorf("failed to parse A response: %w", err)
- in.Errors = append(in.Errors, err.Error())
- return in, false, nil
- }
+ if in.A.Error != "" || in.B.Error != "" {
- jsonB, err := types.ParseJSON(in.B.Body)
- if err != nil {
- in.Mismatch.IsOK = false
- in.Mismatch.Message = fmt.Sprintf("failed to parse B response: %v", err)
- err = fmt.Errorf("failed to parse B response: %w", err)
- in.Errors = append(in.Errors, err.Error())
- return in, false, nil
- }
+ if in.A.Error != "" {
+ mismatches = append(mismatches, jsondiff.JSONMismatch{
+ Type: "error",
+ Message: fmt.Sprintf("\nA response has an error: %s", in.A.Error),
+ Path: "n/a",
+ Expected: "n/a",
+ Actual: "n/a",
+ })
+ }
- mismatches, err := d.Diff(jsonA, jsonB)
+ if in.B.Error != "" {
+ mismatches = append(mismatches, jsondiff.JSONMismatch{
+ Type: "error",
+ Message: fmt.Sprintf("\nB response has an error: %s", in.B.Error),
+ Path: "n/a",
+ Expected: "n/a",
+ Actual: "n/a",
+ })
+ }
+
+ } else {
+
+ jsonA, err := types.ParseJSON(in.A.Body)
+ if err != nil {
+ in.Mismatch.IsOK = false
+ in.Mismatch.Message = fmt.Sprintf("failed to parse A response: %v", err)
+ err = fmt.Errorf("failed to parse A response: %w", err)
+ in.Errors = append(in.Errors, err.Error())
+ return in, false, nil
+ }
+
+ jsonB, err := types.ParseJSON(in.B.Body)
+ if err != nil {
+ in.Mismatch.IsOK = false
+ in.Mismatch.Message = fmt.Sprintf("failed to parse B response: %v", err)
+ err = fmt.Errorf("failed to parse B response: %w", err)
+ in.Errors = append(in.Errors, err.Error())
+ return in, false, nil
+ }
+
+ mismatches, err = d.Diff(jsonA, jsonB)
+ if err != nil {
+ return in, false, err
+ }
- if err != nil {
- return in, false, err
}
if len(mismatches) > 0 {
+ b, err := json.Marshal(mismatches)
+
+ if err != nil {
+ return in, false, fmt.Errorf("failed to marshal mismatches: %w", err)
+ }
+
+ in.Mismatch.Mismatches = string(b)
+ hash := sha1.Sum(b)
+ in.Mismatch.SHA1 = fmt.Sprintf("%x", hash)
in.Mismatch.IsOK = false
in.Mismatch.Count = len(mismatches)
@@ -75,20 +112,20 @@ func (t *diffTransformer) process(in EnrichedResults) (out EnrichedResults, drop
in.Mismatch.TopMismatchType = topMismatchType
}
+ size := len(mismatches)
+
// if there are too many mismatches, we only show the first 20
// this is to avoid overwhelming the user with too much information
const mismatchesSize = 20
if len(mismatches) > mismatchesSize {
mismatches = mismatches[:mismatchesSize]
+ mismatches = append(mismatches, jsondiff.JSONMismatch{
+ Type: "info",
+ Message: fmt.Sprintf("only first %d mismatches, total %d", mismatchesSize, size),
+ })
}
- b, err := json.MarshalIndent(mismatches, "", " ")
-
- if err != nil {
- return in, false, fmt.Errorf("failed to marshal mismatches: %w", err)
- }
- in.Mismatch.Mismatches = string(b)
in.Mismatch.Message = mismatches.String()
} else {
diff --git a/quesma/ab_testing/collector/processors.go b/quesma/ab_testing/collector/processors.go
index 3a84603a8..275f98a94 100644
--- a/quesma/ab_testing/collector/processors.go
+++ b/quesma/ab_testing/collector/processors.go
@@ -68,25 +68,32 @@ func (t *extractKibanaIds) name() string {
}
var opaqueIdKibanaDashboardIdRegexp = regexp.MustCompile(`dashboards:([0-9a-f-]+)`)
+var opaqueIdKibanaPanelIdRegexp = regexp.MustCompile(`dashboard:dashboards:.*;.*:.*:([0-9a-f-]+)`)
func (t *extractKibanaIds) process(in EnrichedResults) (out EnrichedResults, drop bool, err error) {
opaqueId := in.OpaqueID
- // TODO maybe we should extract panel id as well
+ in.KibanaDashboardId = "n/a"
+ in.KibanaDashboardPanelId = "n/a"
if opaqueId == "" {
- in.KibanaDashboardId = "n/a"
return in, false, nil
}
matches := opaqueIdKibanaDashboardIdRegexp.FindStringSubmatch(opaqueId)
if len(matches) < 2 {
- in.KibanaDashboardId = "n/a"
return in, false, nil
}
in.KibanaDashboardId = matches[1]
+
+ panelsMatches := opaqueIdKibanaPanelIdRegexp.FindStringSubmatch(opaqueId)
+ if len(panelsMatches) < 2 {
+ return in, false, nil
+ }
+ in.KibanaDashboardPanelId = panelsMatches[1]
+
return in, false, nil
}
diff --git a/quesma/clickhouse/clickhouse.go b/quesma/clickhouse/clickhouse.go
index b1e916407..5d32fe765 100644
--- a/quesma/clickhouse/clickhouse.go
+++ b/quesma/clickhouse/clickhouse.go
@@ -215,6 +215,10 @@ func (lm *LogManager) executeRawQuery(query string) (*sql.Rows, error) {
}
}
+func (lm *LogManager) GetDB() *sql.DB {
+ return lm.chDb
+}
+
/* The logic below contains a simple checks that are executed by connectors to ensure that they are
not connected to the data sources which are not allowed by current license. */
diff --git a/quesma/go.mod b/quesma/go.mod
index 6f1a49535..2fbe0ad4e 100644
--- a/quesma/go.mod
+++ b/quesma/go.mod
@@ -10,14 +10,14 @@ require (
github.com/coreos/go-semver v0.3.1
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
+ github.com/gorilla/securecookie v1.1.2
github.com/gorilla/sessions v1.4.0
github.com/hashicorp/go-multierror v1.1.1
github.com/k0kubun/pp v3.0.1+incompatible
github.com/knadh/koanf/parsers/json v0.1.0
github.com/knadh/koanf/parsers/yaml v0.1.0
- github.com/knadh/koanf/providers/env v1.0.0
github.com/knadh/koanf/providers/file v1.1.2
- github.com/knadh/koanf/v2 v2.1.1
+ github.com/knadh/koanf/v2 v2.1.2
github.com/markbates/goth v1.80.0
github.com/rs/zerolog v1.33.0
github.com/shirou/gopsutil/v3 v3.24.5
@@ -30,11 +30,11 @@ require (
)
require (
- github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect
- github.com/gorilla/securecookie v1.1.2 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
+ github.com/kr/text v0.2.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
diff --git a/quesma/go.sum b/quesma/go.sum
index bc865f1cc..059600576 100644
--- a/quesma/go.sum
+++ b/quesma/go.sum
@@ -15,6 +15,7 @@ github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:h
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -29,8 +30,8 @@ github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AY
github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c=
-github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
@@ -70,12 +71,10 @@ github.com/knadh/koanf/parsers/json v0.1.0 h1:dzSZl5pf5bBcW0Acnu20Djleto19T0CfHc
github.com/knadh/koanf/parsers/json v0.1.0/go.mod h1:ll2/MlXcZ2BfXD6YJcjVFzhG9P0TdJ207aIBKQhV2hY=
github.com/knadh/koanf/parsers/yaml v0.1.0 h1:ZZ8/iGfRLvKSaMEECEBPM1HQslrZADk8fP1XFUxVI5w=
github.com/knadh/koanf/parsers/yaml v0.1.0/go.mod h1:cvbUDC7AL23pImuQP0oRw/hPuccrNBS2bps8asS0CwY=
-github.com/knadh/koanf/providers/env v1.0.0 h1:ufePaI9BnWH+ajuxGGiJ8pdTG0uLEUWC7/HDDPGLah0=
-github.com/knadh/koanf/providers/env v1.0.0/go.mod h1:mzFyRZueYhb37oPmC1HAv/oGEEuyvJDA98r3XAa8Gak=
github.com/knadh/koanf/providers/file v1.1.2 h1:aCC36YGOgV5lTtAFz2qkgtWdeQsgfxUkxDOe+2nQY3w=
github.com/knadh/koanf/providers/file v1.1.2/go.mod h1:/faSBcv2mxPVjFrXck95qeoyoZ5myJ6uxN8OOVNJJCI=
-github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM=
-github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es=
+github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
+github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
diff --git a/quesma/jsondiff/elastic_response_diff.go b/quesma/jsondiff/elastic_response_diff.go
index 83a27c57b..0c708b5b4 100644
--- a/quesma/jsondiff/elastic_response_diff.go
+++ b/quesma/jsondiff/elastic_response_diff.go
@@ -6,7 +6,22 @@ import "fmt"
// NewElasticResponseJSONDiff creates a JSONDiff instance that is tailored to compare Elasticsearch response JSONs.
func NewElasticResponseJSONDiff() (*JSONDiff, error) {
- d, err := NewJSONDiff("^id$", ".*Quesma_key_.*", "^took$", ".*__quesma_total_count", ".*\\._id", "^_shards.*", ".*\\._score", ".*\\._source", ".*\\.__quesma_originalKey")
+
+ var ignorePaths []string
+
+ // quesma specific fields that we want to ignore
+ ignorePaths = append(ignorePaths, ".*Quesma_key_.*", ".*__quesma_total_count", ".*\\.__quesma_originalKey")
+
+ // well known fields that we want to ignore
+ ignorePaths = append(ignorePaths, "^id$", "^took$", ".*\\._id", "^_shards.*", ".*\\._score", ".*\\._source", ".*\\._version$")
+
+ // elastic has some fields that are suffixed with ".keyword" that we want to ignore
+ ignorePaths = append(ignorePaths, ".*\\.keyword$")
+
+ // ignore some fields that are related to location, just for now (we want to compare them in the future)
+ ignorePaths = append(ignorePaths, ".*Location$", ".*\\.lat$", ".*\\.lon$")
+
+ d, err := NewJSONDiff(ignorePaths...)
if err != nil {
return nil, fmt.Errorf("could not create JSONDiff: %v", err)
diff --git a/quesma/jsondiff/jsondiff.go b/quesma/jsondiff/jsondiff.go
index 1ce9160ca..3532ffcf7 100644
--- a/quesma/jsondiff/jsondiff.go
+++ b/quesma/jsondiff/jsondiff.go
@@ -5,6 +5,7 @@ package jsondiff
import (
"fmt"
"math"
+ "time"
"quesma/quesma/types"
"reflect"
@@ -324,10 +325,10 @@ func (d *JSONDiff) compareArray(expected []any, actual []any) {
}
if lenDiff > 1 {
- d.addMismatch(invalidArrayLength, fmt.Sprintf("%d", len(actual)), fmt.Sprintf("%d", len(expected)))
+ d.addMismatch(invalidArrayLength, fmt.Sprintf("%d", len(expected)), fmt.Sprintf("%d", len(actual)))
return
} else if lenDiff == 1 {
- d.addMismatch(invalidArrayLengthOffByOne, fmt.Sprintf("%d", len(actual)), fmt.Sprintf("%d", len(expected)))
+ d.addMismatch(invalidArrayLengthOffByOne, fmt.Sprintf("%d", len(expected)), fmt.Sprintf("%d", len(actual)))
return
}
@@ -344,7 +345,7 @@ func (d *JSONDiff) compareArray(expected []any, actual []any) {
for i := range len(actual) {
d.pushPath(fmt.Sprintf("[%d]", i))
- d.compare(actual[i], expected[i])
+ d.compare(expected[i], actual[i])
d.popPath()
}
}
@@ -357,7 +358,28 @@ func (d *JSONDiff) asType(a any) string {
return fmt.Sprintf("%T", a)
}
-var dateRx = regexp.MustCompile(`\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}`)
+var dateRx = regexp.MustCompile(`\d{4}-\d{2}-\d{2}.\d{2}:\d{2}:`)
+
+func (d *JSONDiff) uniformTimeFormat(date string) string {
+ returnFormat := time.RFC3339Nano
+
+ inputFormats := []string{
+ "2006-01-02T15:04:05.000-07:00",
+ "2006-01-02T15:04:05.000Z",
+ "2006-01-02T15:04:05.000",
+ "2006-01-02 15:04:05",
+ }
+
+ var parsedDate time.Time
+ var err error
+ for _, format := range inputFormats {
+ parsedDate, err = time.Parse(format, date)
+ if err == nil {
+ return parsedDate.UTC().Format(returnFormat)
+ }
+ }
+ return date
+}
func (d *JSONDiff) compare(expected any, actual any) {
@@ -379,12 +401,12 @@ func (d *JSONDiff) compare(expected any, actual any) {
return
}
- switch aVal := expected.(type) {
+ switch expectedVal := expected.(type) {
case map[string]any:
switch bVal := actual.(type) {
case map[string]any:
- d.compareObject(aVal, bVal)
+ d.compareObject(expectedVal, bVal)
default:
d.addMismatch(invalidType, d.asType(expected), d.asType(actual))
return
@@ -428,20 +450,12 @@ func (d *JSONDiff) compare(expected any, actual any) {
switch actualString := actual.(type) {
case string:
- if dateRx.MatchString(aVal) && dateRx.MatchString(actualString) {
-
- // TODO add better date comparison here
- // parse both date and compare them with desired precision
-
- // elastics returns date in formats
- // "2024-10-24T00:00:00.000+02:00"
- // "2024-10-24T00:00:00.000Z"
+ if dateRx.MatchString(expectedVal) {
- // quesma returns
- // 2024-10-23T22:00:00.000
- compareOnly := "2000-01-"
+ aDate := d.uniformTimeFormat(expectedVal)
+ bDate := d.uniformTimeFormat(actualString)
- if aVal[:len(compareOnly)] != actualString[:len(compareOnly)] {
+ if aDate != bDate {
d.addMismatch(invalidDateValue, d.asValue(expected), d.asValue(actual))
}
diff --git a/quesma/jsondiff/jsondiff_test.go b/quesma/jsondiff/jsondiff_test.go
index ce8ddcb56..2bd0efda9 100644
--- a/quesma/jsondiff/jsondiff_test.go
+++ b/quesma/jsondiff/jsondiff_test.go
@@ -4,6 +4,7 @@ package jsondiff
import (
"fmt"
+ "strings"
"github.com/k0kubun/pp"
@@ -118,11 +119,37 @@ func TestJSONDiff(t *testing.T) {
actual: `{"bar": [5, 2, 4, 3, 1, -1], "b": 2, "c": 3}`,
problems: []JSONMismatch{mismatch("bar", arrayKeysDifferenceSlightly)},
},
+ {
+ name: "dates",
+ expected: `{"a": "2021-01-01T00:00:00.000Z"}`,
+ actual: `{"a": "2021-01-01T00:00:00.001Z"}`,
+ problems: []JSONMismatch{mismatch("a", invalidDateValue)},
+ },
+ {
+ name: "dates 2",
+ expected: `{"a": "2021-01-01T00:00:00.000Z"}`,
+ actual: `{"a": "2021-01-01T00:00:00.000"}`,
+ problems: []JSONMismatch{},
+ },
+ {
+ name: "dates 3",
+ expected: `{"a": "2024-10-24T10:00:00.000"}`,
+ actual: `{"a": "2024-10-24T12:00:00.000+02:00"}`,
+ },
+ {
+ name: "dates 4",
+ expected: `{"a": "2024-10-31T11:00:00.000"}`,
+ actual: `{"a": "2024-10-31T12:00:00.000+01:00"}`,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ if strings.HasPrefix(tt.name, "SKIP") {
+ return
+ }
+
diff, err := NewJSONDiff("_ignore")
if err != nil {
t.Fatalf("Expected no error, got %v", err)
diff --git a/quesma/queryparser/query_parser.go b/quesma/queryparser/query_parser.go
index dd14bf668..80a64e959 100644
--- a/quesma/queryparser/query_parser.go
+++ b/quesma/queryparser/query_parser.go
@@ -32,6 +32,11 @@ func NewEmptyHighlighter() model.Highlighter {
}
}
+const (
+ defaultQueryResultSize = 10
+ defaultTrackTotalHits = 10000
+)
+
func (cw *ClickhouseQueryTranslator) ParseQuery(body types.JSON) (*model.ExecutionPlan, error) {
simpleQuery, hitsInfo, highlighter, err := cw.parseQueryInternal(body)
@@ -148,17 +153,8 @@ func (cw *ClickhouseQueryTranslator) parseQueryInternal(body types.JSON) (*model
if sortPart, ok := queryAsMap["sort"]; ok {
parsedQuery.OrderBy = cw.parseSortFields(sortPart)
}
- const defaultSize = 10
- size := defaultSize
- if sizeRaw, ok := queryAsMap["size"]; ok {
- if sizeFloat, ok := sizeRaw.(float64); ok {
- size = int(sizeFloat)
- } else {
- logger.WarnWithCtx(cw.Ctx).Msgf("unknown size format, size value: %v type: %T. Using default (%d)", sizeRaw, sizeRaw, defaultSize)
- }
- }
+ size := cw.parseSize(queryAsMap, defaultQueryResultSize)
- const defaultTrackTotalHits = 10000
trackTotalHits := defaultTrackTotalHits
if trackTotalHitsRaw, ok := queryAsMap["track_total_hits"]; ok {
switch trackTotalHitsTyped := trackTotalHitsRaw.(type) {
diff --git a/quesma/quesma/config/config.go b/quesma/quesma/config/config.go
index 84489221c..5b1162555 100644
--- a/quesma/quesma/config/config.go
+++ b/quesma/quesma/config/config.go
@@ -177,16 +177,11 @@ func (c *QuesmaConfiguration) optimizersConfigAsString(s string, cfg map[string]
func (c *QuesmaConfiguration) OptimizersConfigAsString() string {
var lines []string
-
- lines = append(lines, "\n")
-
for indexName, indexConfig := range c.IndexConfig {
if len(indexConfig.Optimizers) > 0 {
lines = append(lines, c.optimizersConfigAsString(indexName, indexConfig.Optimizers))
}
}
-
- lines = append(lines, "\n")
return strings.Join(lines, "\n")
}
@@ -253,8 +248,16 @@ Quesma Configuration:
Log Level: %v
Public TCP Port: %d
Ingest Statistics: %t,
- Quesma Telemetry URL: %s
- Optimizers: %s`,
+ Quesma Telemetry URL: %s,
+ Optimizers: %s,
+ DisableAuth: %t,
+ AutodiscoveryEnabled: %t,
+ EnableIngest: %t,
+ CreateCommonTable: %t,
+ UseCommonTableForWildcard: %t,
+ DefaultIngestTarget: %v,
+ DefaultQueryTarget: %v,
+`,
c.TransparentProxy,
elasticUrl,
elasticsearchExtra,
@@ -268,6 +271,13 @@ Quesma Configuration:
c.IngestStatistics,
quesmaInternalTelemetryUrl,
c.OptimizersConfigAsString(),
+ c.DisableAuth,
+ c.AutodiscoveryEnabled,
+ c.EnableIngest,
+ c.CreateCommonTable,
+ c.UseCommonTableForWildcard,
+ c.DefaultIngestTarget,
+ c.DefaultQueryTarget,
)
}
diff --git a/quesma/quesma/config/config_v2.go b/quesma/quesma/config/config_v2.go
index 2f993e0d3..19b94527c 100644
--- a/quesma/quesma/config/config_v2.go
+++ b/quesma/quesma/config/config_v2.go
@@ -112,7 +112,8 @@ const DefaultWildcardIndexName = "*"
// Configuration of QuesmaV1ProcessorQuery and QuesmaV1ProcessorIngest
type QuesmaProcessorConfig struct {
- IndexConfig map[string]IndexConfiguration `koanf:"indexes"`
+ UseCommonTable bool `koanf:"useCommonTable"`
+ IndexConfig map[string]IndexConfiguration `koanf:"indexes"`
}
func LoadV2Config() QuesmaNewConfiguration {
@@ -296,6 +297,9 @@ func (c *QuesmaNewConfiguration) validatePipelines() error {
queryProcessor.Type != QuesmaV1ProcessorNoOp {
return fmt.Errorf("query pipeline must have query or noop processor")
}
+ if queryProcessor.Config.UseCommonTable != ingestProcessor.Config.UseCommonTable {
+ return fmt.Errorf("query and ingest processors must have the same configuration of 'useCommonTable'")
+ }
if !(queryProcessor.Type == QuesmaV1ProcessorNoOp) {
if _, found := queryProcessor.Config.IndexConfig[DefaultWildcardIndexName]; !found {
return fmt.Errorf("the default index configuration (under the name '%s') must be defined in the query processor", DefaultWildcardIndexName)
@@ -393,21 +397,9 @@ func (c *QuesmaNewConfiguration) validateProcessor(p Processor) error {
if errTarget != nil {
return errTarget
}
- // fallback to old style, simplified target configuration
- if len(targets) > 0 {
- for _, target := range targets {
- if c.getBackendConnectorByName(target.target) == nil {
- return fmt.Errorf("invalid target %s in configuration of index %s", target, indexName)
- }
- }
- }
- if len(targets) == 0 {
- if _, ok := indexConfig.Target.([]interface{}); ok {
- for _, target := range indexConfig.Target.([]interface{}) {
- if c.getBackendConnectorByName(target.(string)) == nil {
- return fmt.Errorf("invalid target %s in configuration of index %s", target, indexName)
- }
- }
+ for _, target := range targets {
+ if c.getBackendConnectorByName(target.target) == nil {
+ return fmt.Errorf("invalid target %s in configuration of index %s", target, indexName)
}
}
}
@@ -568,29 +560,19 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- conf.CreateCommonTable = val == "true"
- conf.UseCommonTableForWildcard = val == "true"
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := defaultConfig.Target.([]interface{}); ok {
- for _, target := range defaultConfig.Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ conf.CreateCommonTable = val == "true"
+ conf.UseCommonTableForWildcard = val == "true"
+ } else {
+ // inherit setting from the processor level
+ conf.CreateCommonTable = queryProcessor.Config.UseCommonTable
+ conf.UseCommonTableForWildcard = queryProcessor.Config.UseCommonTable
}
}
@@ -600,6 +582,9 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
conf.CreateCommonTable = true
conf.UseCommonTableForWildcard = true
}
+ if defaultConfig.SchemaOverrides != nil {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("schema overrides of default index ('%s') are not currently supported (only supported in configuration of a specific index)", DefaultWildcardIndexName))
+ }
if len(defaultConfig.QueryTarget) > 1 {
errAcc = multierror.Append(errAcc, fmt.Errorf("the target configuration of default index ('%s') of query processor is not currently supported", DefaultWildcardIndexName))
}
@@ -615,29 +600,22 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- processedConfig.UseCommonTable = val == "true"
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := indexConfig.Target.([]interface{}); ok {
- for _, target := range indexConfig.Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ processedConfig.UseCommonTable = val == "true"
+ } else {
+ // inherit setting from the processor level
+ processedConfig.UseCommonTable = queryProcessor.Config.UseCommonTable
}
+ if val, exists := target.properties["tableName"]; exists {
+ processedConfig.Override = val.(string)
+ }
+
}
if len(processedConfig.QueryTarget) == 2 && !((processedConfig.QueryTarget[0] == ClickhouseTarget && processedConfig.QueryTarget[1] == ElasticsearchTarget) ||
(processedConfig.QueryTarget[0] == ElasticsearchTarget && processedConfig.QueryTarget[1] == ClickhouseTarget)) {
@@ -684,31 +662,24 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- conf.CreateCommonTable = val == "true"
- conf.UseCommonTableForWildcard = val == "true"
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := defaultConfig.Target.([]interface{}); ok {
- for _, target := range defaultConfig.Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- defaultConfig.QueryTarget = append(defaultConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ conf.CreateCommonTable = val == "true"
+ conf.UseCommonTableForWildcard = val == "true"
+ } else {
+ // inherit setting from the processor level
+ conf.CreateCommonTable = queryProcessor.Config.UseCommonTable
+ conf.UseCommonTableForWildcard = queryProcessor.Config.UseCommonTable
}
}
+ if defaultConfig.SchemaOverrides != nil {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("schema overrides of default index ('%s') are not currently supported (only supported in configuration of a specific index)", DefaultWildcardIndexName))
+ }
if defaultConfig.UseCommonTable {
// We set both flags to true here
// as creating common table depends on the first one
@@ -721,31 +692,24 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- defaultConfig.IngestTarget = append(defaultConfig.IngestTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- conf.CreateCommonTable = val == "true"
- conf.UseCommonTableForWildcard = val == "true"
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ defaultConfig.IngestTarget = append(defaultConfig.IngestTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := ingestProcessor.Config.IndexConfig[DefaultWildcardIndexName].Target.([]interface{}); ok {
- for _, target := range ingestProcessor.Config.IndexConfig[DefaultWildcardIndexName].Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- defaultConfig.IngestTarget = append(defaultConfig.IngestTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of %s", target, DefaultWildcardIndexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ conf.CreateCommonTable = val == "true"
+ conf.UseCommonTableForWildcard = val == "true"
+ } else {
+ // inherit setting from the processor level
+ conf.CreateCommonTable = ingestProcessor.Config.UseCommonTable
+ conf.UseCommonTableForWildcard = ingestProcessor.Config.UseCommonTable
}
}
+ if ingestProcessorDefaultIndexConfig.SchemaOverrides != nil {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("schema overrides of default index ('%s') are not currently supported (only supported in configuration of a specific index)", DefaultWildcardIndexName))
+ }
if ingestProcessorDefaultIndexConfig.UseCommonTable {
// We set both flags to true here
// as creating common table depends on the first one
@@ -777,28 +741,20 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- processedConfig.UseCommonTable = val == true
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := indexConfig.Target.([]interface{}); ok {
- for _, target := range indexConfig.Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- processedConfig.QueryTarget = append(processedConfig.QueryTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ processedConfig.UseCommonTable = val == true
+ } else {
+ // inherit setting from the processor level
+ processedConfig.UseCommonTable = queryProcessor.Config.UseCommonTable
+ }
+ if val, exists := target.properties["tableName"]; exists {
+ processedConfig.Override = val.(string)
}
}
if len(processedConfig.QueryTarget) == 2 && !((processedConfig.QueryTarget[0] == ClickhouseTarget && processedConfig.QueryTarget[1] == ElasticsearchTarget) ||
@@ -837,28 +793,20 @@ func (c *QuesmaNewConfiguration) TranslateToLegacyConfig() QuesmaConfiguration {
if errTarget != nil {
errAcc = multierror.Append(errAcc, errTarget)
}
- if len(targets) > 0 {
- for _, target := range targets {
- if targetType, found := c.getTargetType(target.target); found {
- processedConfig.IngestTarget = append(processedConfig.IngestTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- if val, exists := target.properties["useCommonTable"]; exists {
- processedConfig.UseCommonTable = val == true
- }
+ for _, target := range targets {
+ if targetType, found := c.getTargetType(target.target); found {
+ processedConfig.IngestTarget = append(processedConfig.IngestTarget, targetType)
+ } else {
+ errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
}
- }
- // fallback to old style, simplified target configuration
- if len(targets) == 0 {
- if _, ok := indexConfig.Target.([]interface{}); ok {
- for _, target := range indexConfig.Target.([]interface{}) {
- if targetType, found := c.getTargetType(target.(string)); found {
- processedConfig.IngestTarget = append(processedConfig.IngestTarget, targetType)
- } else {
- errAcc = multierror.Append(errAcc, fmt.Errorf("invalid target %s in configuration of index %s", target, indexName))
- }
- }
+ if val, exists := target.properties["useCommonTable"]; exists {
+ processedConfig.UseCommonTable = val == true
+ } else {
+ // inherit setting from the processor level
+ processedConfig.UseCommonTable = ingestProcessor.Config.UseCommonTable
+ }
+ if val, exists := target.properties["tableName"]; exists {
+ processedConfig.Override = val.(string)
}
}
conf.IndexConfig[indexName] = processedConfig
@@ -980,6 +928,12 @@ func (c *QuesmaNewConfiguration) getTargetsExtendedConfig(target any) ([]struct
if targets, ok := target.([]interface{}); ok {
for _, target := range targets {
+ if targetName, ok := target.(string); ok {
+ result = append(result, struct {
+ target string
+ properties map[string]interface{}
+ }{target: targetName, properties: map[string]interface{}{}})
+ }
if targetMap, ok := target.(map[string]interface{}); ok {
for name, settings := range targetMap {
if settingsMap, ok := settings.(map[string]interface{}); ok {
diff --git a/quesma/quesma/config/config_v2_test.go b/quesma/quesma/config/config_v2_test.go
index b61ea7843..bf20feeef 100644
--- a/quesma/quesma/config/config_v2_test.go
+++ b/quesma/quesma/config/config_v2_test.go
@@ -238,10 +238,11 @@ func TestTargetNewVariant(t *testing.T) {
}
legacyConf := cfg.TranslateToLegacyConfig()
assert.False(t, legacyConf.TransparentProxy)
- assert.Equal(t, 3, len(legacyConf.IndexConfig))
+ assert.Equal(t, 4, len(legacyConf.IndexConfig))
ecommerce := legacyConf.IndexConfig["kibana_sample_data_ecommerce"]
flights := legacyConf.IndexConfig["kibana_sample_data_flights"]
logs := legacyConf.IndexConfig["kibana_sample_data_logs"]
+ override := legacyConf.IndexConfig["test_override"]
assert.Equal(t, []string{ClickhouseTarget}, ecommerce.QueryTarget)
assert.Equal(t, []string{ClickhouseTarget}, ecommerce.IngestTarget)
@@ -252,8 +253,39 @@ func TestTargetNewVariant(t *testing.T) {
assert.Equal(t, []string{ClickhouseTarget}, logs.QueryTarget)
assert.Equal(t, []string{ClickhouseTarget}, logs.IngestTarget)
+ assert.Equal(t, []string{ClickhouseTarget}, override.QueryTarget)
+ assert.Equal(t, []string{ClickhouseTarget}, override.IngestTarget)
+
assert.Equal(t, false, flights.UseCommonTable)
+ assert.Equal(t, "", flights.Override)
assert.Equal(t, false, ecommerce.UseCommonTable)
+ assert.Equal(t, "", ecommerce.Override)
assert.Equal(t, true, logs.UseCommonTable)
+ assert.Equal(t, "", logs.Override)
assert.Equal(t, true, legacyConf.EnableIngest)
+
+ const expectedOverride = "new_override"
+ assert.Equal(t, expectedOverride, override.Override)
+}
+
+func TestUseCommonTableGlobalProperty(t *testing.T) {
+ os.Setenv(configFileLocationEnvVar, "./test_configs/use_common_table_global_property.yaml")
+ cfg := LoadV2Config()
+ if err := cfg.Validate(); err != nil {
+ t.Fatalf("error validating config: %v", err)
+ }
+ legacyConf := cfg.TranslateToLegacyConfig()
+ assert.False(t, legacyConf.TransparentProxy)
+ assert.Equal(t, 2, len(legacyConf.IndexConfig))
+ ecommerce := legacyConf.IndexConfig["kibana_sample_data_ecommerce"]
+ flights := legacyConf.IndexConfig["kibana_sample_data_flights"]
+
+ assert.Equal(t, []string{ClickhouseTarget}, ecommerce.QueryTarget)
+ assert.Equal(t, []string{ClickhouseTarget}, ecommerce.IngestTarget)
+
+ assert.Equal(t, []string{ClickhouseTarget}, flights.QueryTarget)
+ assert.Equal(t, []string{ClickhouseTarget}, flights.IngestTarget)
+
+ assert.Equal(t, true, flights.UseCommonTable)
+ assert.Equal(t, false, ecommerce.UseCommonTable)
}
diff --git a/quesma/quesma/config/index_config.go b/quesma/quesma/config/index_config.go
index d5980cf5c..9b8b04046 100644
--- a/quesma/quesma/config/index_config.go
+++ b/quesma/quesma/config/index_config.go
@@ -16,7 +16,7 @@ const (
type IndexConfiguration struct {
SchemaOverrides *SchemaConfiguration `koanf:"schemaOverrides"`
Optimizers map[string]OptimizerConfiguration `koanf:"optimizers"`
- Override string `koanf:"override"`
+ Override string `koanf:"tableName"`
UseCommonTable bool `koanf:"useCommonTable"`
Target any `koanf:"target"`
diff --git a/quesma/quesma/config/test_configs/has_common_table.yaml b/quesma/quesma/config/test_configs/has_common_table.yaml
index 68a17960e..6bc80de57 100644
--- a/quesma/quesma/config/test_configs/has_common_table.yaml
+++ b/quesma/quesma/config/test_configs/has_common_table.yaml
@@ -32,11 +32,11 @@ processors:
logs-3:
target: [ C, E ]
logs-4:
- useCommonTable: true
- target: [ C ]
+ target:
+ - C:
+ useCommonTable: true
logs-5:
- useCommonTable: true
- target: [ C ]
+ target:
"*":
target: [ E ]
@@ -51,13 +51,13 @@ processors:
logs-3:
target: [ C, E ]
logs-4:
- useCommonTable: true
- target: [ C ]
+ target:
+ - C:
+ useCommonTable: true
"*":
target: [ E ]
logs-5:
- useCommonTable: true
- target: [ ]
+ target:
pipelines:
- name: my-elasticsearch-proxy-read
@@ -67,4 +67,4 @@ pipelines:
- name: my-elasticsearch-proxy-write
frontendConnectors: [ elastic-ingest ]
processors: [ IP ]
- backendConnectors: [ E, C ]
+ backendConnectors: [ E, C ]
\ No newline at end of file
diff --git a/quesma/quesma/config/test_configs/target_new_variant.yaml b/quesma/quesma/config/test_configs/target_new_variant.yaml
index 127deb721..8f907518a 100644
--- a/quesma/quesma/config/test_configs/target_new_variant.yaml
+++ b/quesma/quesma/config/test_configs/target_new_variant.yaml
@@ -40,6 +40,10 @@ processors:
target:
- my-clickhouse-data-source:
useCommonTable: true
+ "test_override":
+ target:
+ - my-clickhouse-data-source:
+ tableName: "new_override"
"*":
target: [ my-minimal-elasticsearch ]
@@ -58,6 +62,10 @@ processors:
target:
- my-clickhouse-data-source:
useCommonTable: true
+ "test_override":
+ target:
+ - my-clickhouse-data-source:
+ tableName: "new_override"
"*":
target: [ my-minimal-elasticsearch ]
pipelines:
diff --git a/quesma/quesma/config/test_configs/use_common_table_global_property.yaml b/quesma/quesma/config/test_configs/use_common_table_global_property.yaml
new file mode 100644
index 000000000..789697e0d
--- /dev/null
+++ b/quesma/quesma/config/test_configs/use_common_table_global_property.yaml
@@ -0,0 +1,65 @@
+# TEST CONFIGURATION
+licenseKey: "cdd749a3-e777-11ee-bcf8-0242ac150004"
+frontendConnectors:
+ - name: elastic-ingest
+ type: elasticsearch-fe-ingest
+ config:
+ listenPort: 8080
+ - name: elastic-query
+ type: elasticsearch-fe-query
+ config:
+ listenPort: 8080
+backendConnectors:
+ - name: my-minimal-elasticsearch
+ type: elasticsearch
+ config:
+ url: "http://localhost:9200"
+ - name: my-clickhouse-data-source
+ type: clickhouse-os
+ config:
+ url: "clickhouse://localhost:9000"
+ingestStatistics: true
+internalTelemetryUrl: "https://api.quesma.com/phone-home"
+logging:
+ remoteUrl: "https://api.quesma.com/phone-home"
+ path: "logs"
+ level: "info"
+processors:
+ - name: my-query-processor
+ type: quesma-v1-processor-query
+ config:
+ useCommonTable: true
+ indexes:
+ kibana_sample_data_ecommerce:
+ target:
+ - my-clickhouse-data-source:
+ useCommonTable: false
+ kibana_sample_data_flights:
+ target:
+ - my-clickhouse-data-source
+ "*":
+ target: [ my-minimal-elasticsearch ]
+
+ - name: my-ingest-processor
+ type: quesma-v1-processor-ingest
+ config:
+ useCommonTable: true
+ indexes:
+ kibana_sample_data_ecommerce:
+ target:
+ - my-clickhouse-data-source:
+ useCommonTable: false
+ kibana_sample_data_flights:
+ target:
+ - my-clickhouse-data-source
+ "*":
+ target: [ my-minimal-elasticsearch ]
+pipelines:
+ - name: my-pipeline-elasticsearch-query-clickhouse
+ frontendConnectors: [ elastic-query ]
+ processors: [ my-query-processor ]
+ backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
+ - name: my-pipeline-elasticsearch-ingest-to-clickhouse
+ frontendConnectors: [ elastic-ingest ]
+ processors: [ my-ingest-processor ]
+ backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
\ No newline at end of file
diff --git a/quesma/quesma/schema_transformer.go b/quesma/quesma/schema_transformer.go
index 90446a2ce..5b44e69b6 100644
--- a/quesma/quesma/schema_transformer.go
+++ b/quesma/quesma/schema_transformer.go
@@ -470,6 +470,15 @@ func (s *SchemaCheckPass) applyWildcardExpansion(indexSchema schema.Schema, quer
cols = append(cols, col.InternalPropertyName.AsString())
}
}
+
+ if query.RuntimeMappings != nil {
+ // add columns that are not in the schema but are in the runtime mappings
+ // these columns will be transformed later
+ for name := range query.RuntimeMappings {
+ cols = append(cols, name)
+ }
+ }
+
sort.Strings(cols)
for _, col := range cols {
@@ -683,10 +692,22 @@ func (s *SchemaCheckPass) applyRuntimeMappings(indexSchema schema.Schema, query
return query, nil
}
- visitor := model.NewBaseVisitor()
+ cols := query.SelectCommand.Columns
- visitor.OverrideVisitColumnRef = func(b *model.BaseExprVisitor, e model.ColumnRef) interface{} {
+ // replace column refs with runtime mappings with proper name
+ for i, col := range cols {
+ switch c := col.(type) {
+ case model.ColumnRef:
+ if mapping, ok := query.RuntimeMappings[c.ColumnName]; ok {
+ cols[i] = model.NewAliasedExpr(mapping.Expr, c.ColumnName)
+ }
+ }
+ }
+ query.SelectCommand.Columns = cols
+ // replace other places where column refs are used
+ visitor := model.NewBaseVisitor()
+ visitor.OverrideVisitColumnRef = func(b *model.BaseExprVisitor, e model.ColumnRef) interface{} {
if mapping, ok := query.RuntimeMappings[e.ColumnName]; ok {
return mapping.Expr
}
@@ -789,28 +810,3 @@ func (s *SchemaCheckPass) Transform(queries []*model.Query) ([]*model.Query, err
}
return queries, nil
}
-
-// ArrayResultTransformer is a transformer that transforms array columns into string representation
-type ArrayResultTransformer struct {
-}
-
-func (g *ArrayResultTransformer) Transform(result [][]model.QueryResultRow) ([][]model.QueryResultRow, error) {
-
- for i, rows := range result {
-
- for j, row := range rows {
- for k, col := range row.Cols {
-
- if ary, ok := col.Value.([]string); ok {
- aryStr := make([]string, 0, len(ary))
- for _, el := range ary {
- aryStr = append(aryStr, fmt.Sprintf("%v", el))
- }
- result[i][j].Cols[k].Value = fmt.Sprintf("[%s]", strings.Join(aryStr, ","))
- }
- }
- }
-
- }
- return result, nil
-}
diff --git a/quesma/quesma/search.go b/quesma/quesma/search.go
index a2b9659fc..dab7ca17d 100644
--- a/quesma/quesma/search.go
+++ b/quesma/quesma/search.go
@@ -803,7 +803,6 @@ func (q *QueryRunner) postProcessResults(plan *model.ExecutionPlan, results [][]
transformer model.ResultTransformer
}{
{"replaceColumNamesWithFieldNames", &replaceColumNamesWithFieldNames{indexSchema: indexSchema}},
- {"arrayResultTransformer", &ArrayResultTransformer{}},
}
var err error
diff --git a/quesma/quesma/search_ab_testing.go b/quesma/quesma/search_ab_testing.go
index 5bec6d5df..90ecf0583 100644
--- a/quesma/quesma/search_ab_testing.go
+++ b/quesma/quesma/search_ab_testing.go
@@ -13,8 +13,8 @@ import (
"quesma/elasticsearch"
"quesma/logger"
"quesma/model"
- "quesma/queryparser"
"quesma/quesma/async_search_storage"
+ "quesma/quesma/config"
"quesma/quesma/recovery"
"quesma/quesma/types"
"quesma/quesma/ui"
@@ -128,7 +128,7 @@ func (q *QueryRunner) executeABTesting(ctx context.Context, plan *model.Executio
case *table_resolver.ConnectorDecisionClickhouse:
planExecutor = func(ctx context.Context) ([]byte, error) {
- plan.Name = "clickhouse"
+ plan.Name = config.ClickhouseTarget
return q.executePlan(ctx, plan, queryTranslator, table, body, optAsync, optComparePlansCh, isMainPlan)
}
@@ -139,7 +139,7 @@ func (q *QueryRunner) executeABTesting(ctx context.Context, plan *model.Executio
QueryRowsTransformers: []model.QueryRowsTransformer{},
Queries: []*model.Query{},
StartTime: plan.StartTime,
- Name: "elastic",
+ Name: config.ElasticsearchTarget,
}
return q.executePlanElastic(ctx, elasticPlan, body, optAsync, optComparePlansCh, isMainPlan)
}
@@ -207,6 +207,8 @@ func (q *QueryRunner) executePlanElastic(ctx context.Context, plan *model.Execut
err = response.err
sendABResult(nil, err)
return nil, err
+ } else {
+ responseBody, err = response.response.Bytes()
}
pushSecondaryInfo(q.quesmaManagementConsole, id, "", path, bodyAsBytes, response.translatedQueryBody, responseBody, plan.StartTime)
@@ -327,7 +329,10 @@ func (q *QueryRunner) storeAsyncSearchWithRaw(qmc *ui.QuesmaManagementConsole, i
asyncResponse := WrapElasticResponseAsAsync(resultJSON, asyncId, false, &okStatus)
responseBody, err = json.MarshalIndent(asyncResponse, "", " ")
} else {
- responseBody, _ = queryparser.EmptyAsyncSearchResponse(asyncId, false, 503)
+ responseBody, err = resultJSON.Bytes()
+ if err == nil {
+ logger.Warn().Msgf("error while marshalling async search response: %v: ", err)
+ }
err = resultError
}
diff --git a/quesma/quesma/ui/ab_testing.go b/quesma/quesma/ui/ab_testing.go
new file mode 100644
index 000000000..893842eb1
--- /dev/null
+++ b/quesma/quesma/ui/ab_testing.go
@@ -0,0 +1,954 @@
+// Copyright Quesma, licensed under the Elastic License 2.0.
+// SPDX-License-Identifier: Elastic-2.0
+package ui
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "quesma/elasticsearch"
+ "quesma/jsondiff"
+ "quesma/logger"
+ "quesma/quesma/ui/internal/builder"
+ "strings"
+ "time"
+)
+
+const abTestingPath = "/ab-testing-dashboard"
+
+func (qmc *QuesmaManagementConsole) hasABTestingTable() bool {
+
+ db := qmc.logManager.GetDB()
+
+ sql := `SELECT count(*) FROM ab_testing_logs`
+
+ row := db.QueryRow(sql)
+ var count int
+ err := row.Scan(&count)
+ if err != nil {
+ logger.Error().Err(err).Msg("Error checking for ab_testing_logs table")
+ return false
+ }
+
+ return true
+}
+
+func (qmc *QuesmaManagementConsole) renderError(buff *builder.HtmlBuffer, err error) {
+
+ buff.Html(``)
+ buff.Html(`
Error
`)
+ buff.Html(`
`)
+ buff.Text(err.Error())
+ buff.Html(`
`)
+ buff.Html(`
`)
+
+}
+
+func (qmc *QuesmaManagementConsole) generateABTestingDashboard() []byte {
+
+ buffer := newBufferWithHead()
+ buffer.Write(qmc.generateTopNavigation("ab-testing-dashboard"))
+
+ buffer.Html(``)
+
+ explanation := `
+This table compares results and performance of Kibana dashboards and its panels as seen by Quesma.
+Every panel query returning similar results is a success,
+load times are calculated into performance gain as a percentage by comparing the average times of first and second backend connectors for successful responses.
+If the performance gain is positive, it means that the second backend connector is faster than the first one.
+ `
+
+ buffer.Html(`Kibana dashboards compatibility report ⓘ
`)
+
+ if qmc.hasABTestingTable() {
+
+ buffer.Html(``)
+ buffer.Html(``)
+
+ buffer.Html(`")
+ } else {
+ buffer.Html(`A/B Testing results are not available.
`)
+ }
+
+ buffer.Html("\n\n\n")
+ return buffer.Bytes()
+}
+
+type kibanaDashboard struct {
+ name string
+ panels map[string]string
+}
+
+type resolvedDashboards struct {
+ dashboards map[string]kibanaDashboard
+}
+
+func (d resolvedDashboards) dashboardName(dashboardId string) string {
+ if dashboard, ok := d.dashboards[dashboardId]; ok {
+ return dashboard.name
+ }
+ return dashboardId
+}
+
+func (d resolvedDashboards) panelName(dashboardId, panelId string) string {
+ if dashboard, ok := d.dashboards[dashboardId]; ok {
+ if name, ok := dashboard.panels[panelId]; ok {
+ return name
+ }
+ }
+ return panelId
+}
+
+func (qmc *QuesmaManagementConsole) readKibanaDashboards() (resolvedDashboards, error) {
+
+ result := resolvedDashboards{
+ dashboards: make(map[string]kibanaDashboard),
+ }
+
+ elasticQuery := `
+{
+ "_source": false,
+ "fields": [
+ "_id",
+ "dashboard.title",
+ "panelsJSON",
+ "dashboard.panelsJSON"
+ ],
+ "query": {
+ "bool": {
+ "filter": [
+ {
+ "term": {
+ "type": "dashboard"
+ }
+ }
+ ]
+ }
+ }
+}
+`
+ client := elasticsearch.NewSimpleClient(&qmc.cfg.Elasticsearch)
+
+ resp, err := client.Request(context.Background(), "POST", ".kibana_analytics/_search", []byte(elasticQuery))
+ if err != nil {
+ return result, err
+ }
+
+ if resp.StatusCode != 200 {
+ return result, fmt.Errorf("unexpected HTTP status: %s", resp.Status)
+ }
+
+ defer resp.Body.Close()
+
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return result, err
+ }
+
+ type responseSchema struct {
+ Hits struct {
+ Hits []struct {
+ Fields struct {
+ Id []string `json:"_id"`
+ Title []string `json:"dashboard.title"`
+ Panels []string `json:"dashboard.panelsJSON"`
+ } `json:"fields"`
+ } `json:"hits"`
+ } `json:"hits"`
+ }
+
+ type panelSchema struct {
+ Type string `json:"type"`
+ PanelID string `json:"panelIndex"`
+ Name string `json:"title"`
+ }
+
+ var response responseSchema
+ err = json.Unmarshal(data, &response)
+ if err != nil {
+ return result, err
+ }
+
+ for _, hit := range response.Hits.Hits {
+ if len(hit.Fields.Id) == 0 {
+ continue // no ID, skip
+ }
+ _id := hit.Fields.Id[0]
+
+ var title string
+ if len(hit.Fields.Title) > 0 {
+ title = hit.Fields.Title[0]
+ } else {
+ title = _id
+ }
+ _id = strings.TrimPrefix(_id, "dashboard:")
+
+ var panels string
+ if len(hit.Fields.Panels) > 0 {
+ panels = hit.Fields.Panels[0]
+ } else {
+ panels = "[]" // empty array, so we can unmarshal it
+ }
+
+ var panelsJson []panelSchema
+ err := json.Unmarshal([]byte(panels), &panelsJson)
+ if err != nil {
+ return result, err
+ }
+
+ dashboard := kibanaDashboard{
+ name: title,
+ panels: make(map[string]string),
+ }
+
+ for _, panel := range panelsJson {
+ if panel.Name == "" {
+ panel.Name = panel.PanelID
+ }
+ dashboard.panels[panel.PanelID] = panel.Name
+ }
+ result.dashboards[_id] = dashboard
+ }
+
+ return result, nil
+}
+
+func parseMismatches(mismatch string) ([]jsondiff.JSONMismatch, error) {
+ var mismatches []jsondiff.JSONMismatch
+ err := json.Unmarshal([]byte(mismatch), &mismatches)
+ return mismatches, err
+}
+
+func formatJSON(in *string) string {
+ if in == nil {
+ return "n/a"
+ }
+
+ m := make(map[string]interface{})
+
+ err := json.Unmarshal([]byte(*in), &m)
+ if err != nil {
+ return err.Error()
+ }
+
+ b, err := json.MarshalIndent(m, "", " ")
+ if err != nil {
+ return err.Error()
+ }
+ return string(b)
+}
+
+type abTestingReportRow struct {
+ dashboardId string
+ panelId string
+ dashboardUrl string
+ detailsUrl string
+ dashboardName string
+ panelName string
+ aName string
+ bName string
+ successRate *float64
+ performanceGain *float64
+ count int
+}
+
+func (qmc *QuesmaManagementConsole) abTestingReadReport(kibanaUrl, orderBy string) ([]abTestingReportRow, error) {
+
+ kibanaDashboards, err := qmc.readKibanaDashboards()
+ if err != nil {
+ logger.Warn().Msgf("Error reading dashboards %v", err)
+ }
+
+ orderByToSQL := map[string]string{
+ "default": "dashboard_id, panel_id, a_name, b_name",
+ "response_similarity": "response_similarity DESC, dashboard_id, panel_id, a_name, b_name",
+ "performance_gain": "performance_gain DESC,dashboard_id, panel_id, a_name, b_name",
+ "count": "count DESC,dashboard_id, panel_id, a_name, b_name",
+ }
+
+ orderBySQL, ok := orderByToSQL[orderBy]
+ if !ok {
+ orderBySQL = orderByToSQL["default"]
+ }
+
+ sql := `
+WITH subresults AS (
+SELECT
+ kibana_dashboard_id ,
+ kibana_dashboard_panel_id,
+ response_a_name AS a_name,
+ response_b_name AS b_name,
+ response_mismatch_is_ok AS ok ,
+ count(*) AS c,
+ avg(response_a_time) AS a_time,
+ avg(response_b_time) AS b_time
+FROM
+ ab_testing_logs GROUP BY 1,2,3,4,5
+)
+
+SELECT
+ kibana_dashboard_id AS dashboard_id,
+ kibana_dashboard_panel_id AS panel_id,
+ a_name,
+ b_name,
+ (sumIf(c,ok)/ sum(c)) * 100 as response_similarity,
+ ((avgIf(a_time,ok)- avgIf(b_time,ok))/avgIf(a_time,ok))*100.0 as performance_gain,
+ sum(c) as count
+FROM
+ subresults
+GROUP BY
+ kibana_dashboard_id,kibana_dashboard_panel_id,a_name,b_name
+`
+
+ sql = sql + " ORDER BY " + orderBySQL
+
+ var result []abTestingReportRow
+
+ db := qmc.logManager.GetDB()
+ rows, err := db.Query(sql, orderBySQL)
+ if err != nil {
+ return nil, err
+ }
+
+ for rows.Next() {
+ row := abTestingReportRow{}
+ err := rows.Scan(&row.dashboardId, &row.panelId, &row.aName, &row.bName, &row.successRate, &row.performanceGain, &row.count)
+ if err != nil {
+ return nil, err
+ }
+
+ row.dashboardUrl = fmt.Sprintf("%s/app/kibana#/dashboard/%s", kibanaUrl, row.dashboardId)
+ row.detailsUrl = fmt.Sprintf("%s/panel?dashboard_id=%s&panel_id=%s", abTestingPath, row.dashboardId, row.panelId)
+ row.dashboardName = kibanaDashboards.dashboardName(row.dashboardId)
+ row.panelName = kibanaDashboards.panelName(row.dashboardId, row.panelId)
+
+ result = append(result, row)
+ }
+
+ if rows.Err() != nil {
+ return nil, rows.Err()
+ }
+
+ return result, nil
+}
+
+func (qmc *QuesmaManagementConsole) generateABTestingReport(kibanaUrl, orderBy string) []byte {
+ buffer := newBufferWithHead()
+
+ buffer.Html("\n")
+ buffer.Html("\n")
+ buffer.Html(`` + "\n")
+ buffer.Html(`Dashboard | ` + "\n")
+ buffer.Html(`Panel | ` + "\n")
+ buffer.Html(`Count (since start) | ` + "\n")
+ buffer.Html(`Response similarity | ` + "\n")
+ buffer.Html(`Performance gain | ` + "\n")
+ buffer.Html(` | ` + "\n")
+ buffer.Html("
\n")
+ buffer.Html("\n")
+ buffer.Html("\n")
+
+ rows, err := qmc.abTestingReadReport(kibanaUrl, orderBy)
+ if err != nil {
+ qmc.renderError(&buffer, err)
+ return buffer.Bytes()
+ }
+
+ var lastDashboardId string
+ for _, row := range rows {
+ buffer.Html(`` + "\n")
+
+ if lastDashboardId != row.dashboardId {
+ buffer.Html(``)
+ buffer.Html(``).Text(row.dashboardName).Html(``)
+ buffer.Html(" ")
+ buffer.Text(fmt.Sprintf("(%s vs %s)", row.aName, row.bName))
+ buffer.Html(` | `)
+ lastDashboardId = row.dashboardId
+ } else {
+ buffer.Html(` | `)
+ }
+
+ buffer.Html(``)
+ buffer.Text(row.panelName)
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("%d", row.count))
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ if row.successRate != nil {
+ buffer.Text(fmt.Sprintf("%.01f%%", *row.successRate))
+ } else {
+ buffer.Text("n/a")
+ }
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ if row.performanceGain != nil {
+ buffer.Text(fmt.Sprintf("%.01f%%", *row.performanceGain))
+ } else {
+ buffer.Text("n/a")
+ }
+ buffer.Html(` | `)
+
+ buffer.Html("")
+
+ buffer.Html(``)
+ buffer.Text("Details")
+ buffer.Html(``)
+
+ buffer.Html(" | ")
+ buffer.Html("
")
+ }
+
+ buffer.Html("\n")
+ buffer.Html("
\n")
+
+ return buffer.Bytes()
+}
+
+type abTestingPanelDetailsRow struct {
+ mismatch string
+ mismatchId string
+ count int
+}
+
+func (qmc *QuesmaManagementConsole) abTestingReadPanelDetails(dashboardId, panelId string) ([]abTestingPanelDetailsRow, error) {
+
+ sql := `
+ select response_mismatch_mismatches, response_mismatch_sha1, count() as c
+ from ab_testing_logs
+ where kibana_dashboard_id = ? and
+ kibana_dashboard_panel_id = ? and
+ response_mismatch_is_ok = false
+ group by 1,2
+ order by c desc
+ limit 100
+`
+ db := qmc.logManager.GetDB()
+
+ rows, err := db.Query(sql, dashboardId, panelId)
+ if err != nil {
+ return nil, err
+ }
+
+ var result []abTestingPanelDetailsRow
+ for rows.Next() {
+
+ var mismatch string
+ var count int
+ var mismatchId string
+
+ err := rows.Scan(&mismatch, &mismatchId, &count)
+ if err != nil {
+ return nil, err
+ }
+
+ r := abTestingPanelDetailsRow{
+ mismatch: mismatch,
+ mismatchId: mismatchId,
+ count: count,
+ }
+ result = append(result, r)
+ }
+
+ if rows.Err() != nil {
+ return nil, rows.Err()
+ }
+
+ return result, nil
+}
+
+func (qmc *QuesmaManagementConsole) renderABTestingMismatch(buffer *builder.HtmlBuffer, mismatch jsondiff.JSONMismatch) {
+
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Text(mismatch.Message)
+ buffer.Text(" ")
+
+ if mismatch.Path != "" {
+ buffer.Html(``)
+ buffer.Text(`(`)
+ buffer.Text(mismatch.Path)
+ buffer.Text(`)`)
+ buffer.Html(`
`)
+ { // poor man's HTML indent
+ buffer.Html(`
`)
+
+ buffer.Html(`- `)
+ buffer.Html(`
`)
+ buffer.Text("Expected: ")
+ buffer.Text(mismatch.Expected)
+ buffer.Html(`
`)
+ buffer.Html(` `)
+
+ buffer.Html(`- `)
+ buffer.Html(`
`)
+ buffer.Text("Actual: ")
+ buffer.Text(mismatch.Actual)
+ buffer.Html(`
`)
+ buffer.Html(` `)
+
+ buffer.Html(`
`)
+ }
+ }
+ buffer.Html(``)
+ buffer.Html(``)
+
+}
+
+func (qmc *QuesmaManagementConsole) generateABPanelDetails(dashboardId, panelId string) []byte {
+ buffer := newBufferWithHead()
+
+ dashboards, err := qmc.readKibanaDashboards()
+ dashboardName := dashboardId
+ panelName := panelId
+
+ if err == nil {
+ dashboardName = dashboards.dashboardName(dashboardId)
+ panelName = dashboards.panelName(dashboardId, panelId)
+ } else {
+ logger.Warn().Err(err).Msgf("Error reading dashboards %v", err)
+ }
+
+ buffer.Html(``)
+
+ buffer.Html(`A/B Testing - Panel Details
`)
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("Dashboard: %s", dashboardName))
+ buffer.Html(`
`)
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("Panel: %s", panelName))
+ buffer.Html(`
`)
+
+ rows, err := qmc.abTestingReadPanelDetails(dashboardId, panelId)
+ if err != nil {
+ qmc.renderError(&buffer, err)
+ return buffer.Bytes()
+ }
+
+ if len(rows) > 0 {
+ buffer.Html("")
+ buffer.Html("")
+ buffer.Html(``)
+ buffer.Html(`Mismatch | `)
+ buffer.Html(`Count | `)
+ buffer.Html(` | `)
+ buffer.Html("
")
+
+ buffer.Html("\n")
+ buffer.Html("\n")
+
+ for _, row := range rows {
+ buffer.Html(``)
+ buffer.Html(``)
+
+ mismatches, err := parseMismatches(row.mismatch)
+ if err == nil {
+ const limit = 10
+ size := len(mismatches)
+ if size > limit {
+ mismatches = mismatches[:limit]
+ mismatches = append(mismatches, jsondiff.JSONMismatch{
+ Message: fmt.Sprintf("... and %d more", size-limit),
+ })
+ }
+
+ buffer.Html(``)
+ for _, m := range mismatches {
+ qmc.renderABTestingMismatch(&buffer, m)
+ }
+ buffer.Html(` `)
+
+ } else {
+ buffer.Text(row.mismatch)
+ }
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("%d", row.count))
+ buffer.Html(` | `)
+
+ buffer.Html("")
+ buffer.Html(``).Text("Requests").Html(``)
+ buffer.Html(" | ")
+
+ buffer.Html("
\n")
+ }
+
+ buffer.Html("\n")
+ buffer.Html("
\n")
+ buffer.Html("\n\n\n")
+ } else {
+ buffer.Html(`No mismatches found
`)
+
+ }
+
+ return buffer.Bytes()
+}
+
+type abTestingMismatchDetailsRow struct {
+ timestamp string
+ requestId string
+ requestPath string
+ opaqueId string
+}
+
+func (qmc *QuesmaManagementConsole) abTestingReadMismatchDetails(dashboardId, panelId, mismatchHash string) ([]abTestingMismatchDetailsRow, error) {
+
+ sql := `
+ select "@timestamp", request_id, request_path, opaque_id
+ from ab_testing_logs
+ where
+ kibana_dashboard_id = ? and
+ kibana_dashboard_panel_id = ? and
+ response_mismatch_sha1 = ?
+
+ order by 1 desc
+ limit 100
+`
+
+ db := qmc.logManager.GetDB()
+
+ rows, err := db.Query(sql, dashboardId, panelId, mismatchHash)
+ if err != nil {
+ return nil, err
+ }
+
+ var result []abTestingMismatchDetailsRow
+ for rows.Next() {
+
+ row := abTestingMismatchDetailsRow{}
+ err := rows.Scan(&row.timestamp, &row.requestId, &row.requestPath, &row.opaqueId)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, row)
+
+ }
+ if rows.Err() != nil {
+ return nil, rows.Err()
+ }
+ return result, nil
+}
+
+func (qmc *QuesmaManagementConsole) generateABMismatchDetails(dashboardId, panelId, mismatchHash string) []byte {
+ buffer := newBufferWithHead()
+
+ dashboards, err := qmc.readKibanaDashboards()
+ dashboardName := dashboardId
+ panelName := panelId
+
+ if err == nil {
+ dashboardName = dashboards.dashboardName(dashboardId)
+ panelName = dashboards.panelName(dashboardId, panelId)
+ } else {
+ logger.Warn().Err(err).Msgf("Error reading dashboards %v", err)
+ }
+
+ buffer.Html(``)
+
+ buffer.Html(`A/B Testing - Panel requests
`)
+
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("Dashboard: %s", dashboardName))
+ buffer.Html(`
`)
+ buffer.Html(``)
+ buffer.Text(fmt.Sprintf("Panel: %s", panelName))
+ buffer.Html(`
`)
+
+ buffer.Html("")
+ buffer.Html("")
+ buffer.Html(``)
+ buffer.Html(`Timestamp | `)
+ buffer.Html(`Request ID | `)
+ buffer.Html(`Request Path | `)
+ buffer.Html(`Opaque ID | `)
+ buffer.Html("
")
+ buffer.Html("")
+
+ buffer.Html("")
+
+ rows, err := qmc.abTestingReadMismatchDetails(dashboardId, panelId, mismatchHash)
+ if err != nil {
+ qmc.renderError(&buffer, err)
+ return buffer.Bytes()
+ }
+
+ for _, row := range rows {
+
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Text(row.timestamp)
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ buffer.Html(``).Text(row.requestId).Html(``)
+
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ buffer.Text(row.requestPath)
+ buffer.Html(` | `)
+
+ buffer.Html(``)
+ buffer.Text(row.opaqueId)
+ buffer.Html(` | `)
+
+ buffer.Html("
\n")
+ }
+
+ buffer.Html("\n")
+ buffer.Html("
\n")
+
+ buffer.Html(``)
+ return buffer.Bytes()
+}
+
+type abTestingTableRow struct {
+ requestID *string
+ requestPath *string
+ requestIndexName *string
+ requestBody *string
+ responseBTime *float64
+ responseBError *string
+ responseBName *string
+ responseBBody *string
+ quesmaHash *string
+ kibanaDashboardID *string
+ opaqueID *string
+ responseABody *string
+ responseATime *float64
+ responseAError *string
+ responseAName *string
+ timestamp time.Time
+ responseMismatchSHA1 *string
+ responseMismatchCount *int64
+ responseMismatchTopType *string
+ responseMismatchIsOK *bool
+ responseMismatchMismatches *string
+ responseMismatchMessage *string
+ quesmaVersion *string
+ kibanaDashboardPanelID *string
+}
+
+func (qmc *QuesmaManagementConsole) abTestingReadRow(requestId string) (abTestingTableRow, error) {
+ sql := `SELECT
+ request_id, request_path, request_index_name,
+ request_body, response_b_time, response_b_error, response_b_name, response_b_body,
+ quesma_hash, kibana_dashboard_id, opaque_id, response_a_body, response_a_time,
+ response_a_error, response_a_name, "@timestamp", response_mismatch_sha1,
+ response_mismatch_count, response_mismatch_top_mismatch_type, response_mismatch_is_ok,
+ response_mismatch_mismatches, response_mismatch_message, quesma_version,
+ kibana_dashboard_panel_id
+ FROM ab_testing_logs
+ WHERE request_id = ?`
+
+ db := qmc.logManager.GetDB()
+
+ row := db.QueryRow(sql, requestId)
+
+ rec := abTestingTableRow{}
+ err := row.Scan(
+ &rec.requestID, &rec.requestPath, &rec.requestIndexName,
+ &rec.requestBody, &rec.responseBTime, &rec.responseBError, &rec.responseBName, &rec.responseBBody,
+ &rec.quesmaHash, &rec.kibanaDashboardID, &rec.opaqueID, &rec.responseABody, &rec.responseATime,
+ &rec.responseAError, &rec.responseAName, &rec.timestamp, &rec.responseMismatchSHA1,
+ &rec.responseMismatchCount, &rec.responseMismatchTopType, &rec.responseMismatchIsOK,
+ &rec.responseMismatchMismatches, &rec.responseMismatchMessage, &rec.quesmaVersion,
+ &rec.kibanaDashboardPanelID)
+
+ if err != nil {
+ return rec, err
+ }
+
+ if row.Err() != nil {
+ return rec, row.Err()
+ }
+
+ return rec, nil
+}
+
+func (qmc *QuesmaManagementConsole) generateABSingleRequest(requestId string) []byte {
+ buffer := newBufferWithHead()
+ buffer.Html(``)
+
+ buffer.Html(`A/B Testing - Request Results
`)
+
+ fmtAny := func(value any) string {
+ if value == nil {
+ return "n/a"
+ }
+
+ switch v := value.(type) {
+ case *string:
+ return *v
+ case *float64:
+ return fmt.Sprintf("%f", *v)
+ case *int64:
+ return fmt.Sprintf("%d", *v)
+ case *bool:
+ return fmt.Sprintf("%t", *v)
+ default:
+ return fmt.Sprintf("%s", value)
+ }
+ }
+
+ tableRow := func(label string, value any, pre bool) {
+
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Text(label)
+ buffer.Html(` | `)
+ buffer.Html(``)
+ if pre {
+ buffer.Html(``)
+ }
+ buffer.Text(fmtAny(value))
+ if pre {
+ buffer.Html(` `)
+ }
+ buffer.Html(` | `)
+ buffer.Html("
\n")
+
+ }
+
+ var dashboardName string
+ var panelName string
+
+ dashboards, err := qmc.readKibanaDashboards()
+ if err != nil {
+ logger.Warn().Err(err).Msgf("Error reading dashboards %v", err)
+ }
+
+ row, err := qmc.abTestingReadRow(requestId)
+
+ if err == nil {
+
+ if row.kibanaDashboardID != nil {
+
+ dashboardName = dashboards.dashboardName(*row.kibanaDashboardID)
+ if row.kibanaDashboardPanelID != nil {
+ panelName = dashboards.panelName(*row.kibanaDashboardID, *row.kibanaDashboardPanelID)
+ }
+ }
+ } else {
+ logger.Warn().Err(err).Msgf("Error reading dashboards %v", err)
+ }
+
+ buffer.Html(``)
+ tableRow("Request ID", row.requestID, true)
+ tableRow("Timestamp", row.timestamp, true)
+ tableRow("Kibana Dashboard ID", dashboardName, false)
+ tableRow("Kibana Dashboard Panel ID", panelName, false)
+ tableRow("Opaque ID", row.opaqueID, true)
+ tableRow("Quesma Hash", row.quesmaHash, true)
+ tableRow("Quesma Version", row.quesmaVersion, true)
+ tableRow("Request Path", row.requestPath, true)
+ tableRow("Request Index Name", row.requestIndexName, false)
+ tableRow("Request Body", formatJSON(row.requestBody), true)
+ buffer.Html(`
`)
+
+ rowAB := func(label string, valueA any, valueB any, pre bool) {
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Text(label)
+ buffer.Html(` | `)
+ buffer.Html(``)
+ if pre {
+ buffer.Html(``)
+ }
+ buffer.Text(fmtAny(valueA))
+ if pre {
+ buffer.Html(` `)
+ }
+ buffer.Html(` | `)
+ buffer.Html(``)
+ if pre {
+ buffer.Html(``)
+ }
+ buffer.Text(fmtAny(valueB))
+ if pre {
+ buffer.Html(` `)
+ }
+ buffer.Html(` | `)
+ buffer.Html("
\n")
+ }
+
+ buffer.Html(`Response A vs Response B
`)
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Html(`Label | `)
+ buffer.Html(`Response A | `)
+ buffer.Html(`Response B | `)
+ buffer.Html("
")
+
+ rowAB("Name", row.responseAName, row.responseBName, false)
+ rowAB("Time", row.responseATime, row.responseBTime, false)
+ rowAB("Error", row.responseAError, row.responseBError, true)
+ rowAB("Response Body", formatJSON(row.responseABody), formatJSON(row.responseBBody), true)
+ buffer.Html(`
`)
+
+ buffer.Html(`Difference
`)
+ if row.responseMismatchSHA1 != nil {
+ mismaches, err := parseMismatches(*row.responseMismatchMismatches)
+ if err != nil {
+ buffer.Text(fmt.Sprintf("Error: %s", err))
+ } else {
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Html(`Message | `)
+ buffer.Html(`Path | `)
+ buffer.Html(`Expected (`)
+ buffer.Text(fmtAny(row.responseAName))
+ buffer.Html(`) | `)
+ buffer.Html(`Actual (`)
+ buffer.Text(fmtAny(row.responseBName))
+ buffer.Html(`) | `)
+ buffer.Html("
")
+
+ for _, m := range mismaches {
+ buffer.Html(``)
+ buffer.Html(``)
+ buffer.Text(m.Message)
+ buffer.Html(` | `)
+ buffer.Html(``)
+ buffer.Text(m.Path)
+ buffer.Html(` | `)
+ buffer.Html(``)
+ buffer.Text(m.Expected)
+ buffer.Html(` | `)
+ buffer.Html(``)
+ buffer.Text(m.Actual)
+ buffer.Html(` | `)
+ buffer.Html("
")
+ }
+ }
+ }
+
+ buffer.Html(``)
+ return buffer.Bytes()
+}
diff --git a/quesma/quesma/ui/asset/head.html b/quesma/quesma/ui/asset/head.html
index 070341ad7..d3ad6c3c9 100644
--- a/quesma/quesma/ui/asset/head.html
+++ b/quesma/quesma/ui/asset/head.html
@@ -563,6 +563,63 @@
font-size: small;
}
+ #ab_testing_dashboard table {
+ border-collapse: collapse;
+ table-layout: fixed;
+ //width: 98%;
+ word-wrap: break-word;
+ font-size: small;
+ }
+
+ #ab_testing_dashboard table th {
+ border: solid 1px black;
+ }
+
+ #ab_testing_dashboard table td {
+ border: solid 1px black;
+ overflow-x: auto;
+ vertical-align: top;
+ }
+
+ /* Tooltip container */
+ .tooltip {
+ position: relative;
+ cursor: pointer;
+ color: #0056b3; /* Optional: make it look like a link */
+ text-decoration: none;
+ font-size: small;
+ /* Optional: underline to indicate it's interactive */
+ }
+
+ /* Tooltip text */
+ .tooltip::after {
+ content: attr(data-tooltip); /* Get tooltip text from data attribute */
+ position: absolute;
+ top: 125%; /* Position below the span */
+ left: 50%;
+ transform: translateX(-20%);
+ background-color: #eee;
+ color: black;
+ padding: 1em;
+ border-radius: 5px;
+ width: 40em;
+ max-width: 50em; /* Set maximum width */
+ white-space: wrap; /* Allow text to wrap */
+ opacity: 0;
+ visibility: hidden;
+ transition: opacity 0.2s;
+ z-index: 10;
+ pointer-events: none;
+ text-align: left; /* Center-align text for readability */
+ }
+
+ /* Show the tooltip on hover */
+ .tooltip:hover::after {
+ opacity: 1;
+ visibility: visible;
+ }
+
+
#quesma_all_logs table {
border-collapse: collapse;
table-layout: fixed;
diff --git a/quesma/quesma/ui/console_routes.go b/quesma/quesma/ui/console_routes.go
index a88ef1410..18b30ff23 100644
--- a/quesma/quesma/ui/console_routes.go
+++ b/quesma/quesma/ui/console_routes.go
@@ -23,6 +23,7 @@ const (
uiTcpPort = "9999"
managementInternalPath = "/_quesma"
healthPath = managementInternalPath + "/health"
+ loginWithElasticSearch = "/login-with-elasticsearch"
)
//go:embed asset/*
@@ -70,7 +71,7 @@ func (qmc *QuesmaManagementConsole) createRouting() *mux.Router {
router.HandleFunc("/auth/{provider}/callback", authCallbackHandler)
// our logic for login
- router.HandleFunc("/login-with-elasticsearch", qmc.HandleElasticsearchLogin)
+ router.HandleFunc(loginWithElasticSearch, qmc.HandleElasticsearchLogin)
authenticatedRoutes := router.PathPrefix("/").Subrouter()
if qmc.cfg.Elasticsearch.User == "" && qmc.cfg.Elasticsearch.Password == "" {
@@ -107,6 +108,58 @@ func (qmc *QuesmaManagementConsole) createRouting() *mux.Router {
_, _ = writer.Write(buf)
})
+ checkIfAbAvailable := func(writer http.ResponseWriter, req *http.Request) bool {
+ if qmc.hasABTestingTable() {
+ return true
+ }
+
+ _, _ = writer.Write([]byte("AB Testing results are not available."))
+ return false
+ }
+
+ authenticatedRoutes.HandleFunc(abTestingPath, func(writer http.ResponseWriter, req *http.Request) {
+ buf := qmc.generateABTestingDashboard()
+ _, _ = writer.Write(buf)
+ })
+
+ authenticatedRoutes.HandleFunc(abTestingPath+"/report", func(writer http.ResponseWriter, req *http.Request) {
+ if checkIfAbAvailable(writer, req) {
+ kibanaUrl := req.PostFormValue("kibana_url")
+ orderBy := req.PostFormValue("order_by")
+ buf := qmc.generateABTestingReport(kibanaUrl, orderBy)
+ _, _ = writer.Write(buf)
+ }
+ })
+
+ authenticatedRoutes.HandleFunc(abTestingPath+"/panel", func(writer http.ResponseWriter, req *http.Request) {
+ if checkIfAbAvailable(writer, req) {
+ dashboardId := req.FormValue("dashboard_id")
+ panelId := req.FormValue("panel_id")
+
+ buf := qmc.generateABPanelDetails(dashboardId, panelId)
+ _, _ = writer.Write(buf)
+ }
+ })
+
+ authenticatedRoutes.HandleFunc(abTestingPath+"/mismatch", func(writer http.ResponseWriter, req *http.Request) {
+ if checkIfAbAvailable(writer, req) {
+ dashboardId := req.FormValue("dashboard_id")
+ panelId := req.FormValue("panel_id")
+ mismatchId := req.FormValue("mismatch_id")
+
+ buf := qmc.generateABMismatchDetails(dashboardId, panelId, mismatchId)
+ _, _ = writer.Write(buf)
+ }
+ })
+
+ authenticatedRoutes.HandleFunc(abTestingPath+"/request", func(writer http.ResponseWriter, req *http.Request) {
+ if checkIfAbAvailable(writer, req) {
+ requestId := req.FormValue("request_id")
+ buf := qmc.generateABSingleRequest(requestId)
+ _, _ = writer.Write(buf)
+ }
+ })
+
authenticatedRoutes.HandleFunc("/tables/reload", func(writer http.ResponseWriter, req *http.Request) {
qmc.logManager.ReloadTables()
diff --git a/quesma/quesma/ui/es_auth_provider.go b/quesma/quesma/ui/es_auth_provider.go
index 966f6375d..12b2afb59 100644
--- a/quesma/quesma/ui/es_auth_provider.go
+++ b/quesma/quesma/ui/es_auth_provider.go
@@ -25,7 +25,7 @@ func NewElasticsearchAuthProvider() *ElasticsearchAuthProvider {
type ElasticsearchSession struct{}
func (e ElasticsearchSession) GetAuthURL() (string, error) {
- return "http://localhost:9999/login-with-elasticsearch", nil
+ return loginWithElasticSearch, nil
}
func (e ElasticsearchSession) Marshal() string {
diff --git a/quesma/quesma/ui/html_utils.go b/quesma/quesma/ui/html_utils.go
index 3419d43ef..28dab3790 100644
--- a/quesma/quesma/ui/html_utils.go
+++ b/quesma/quesma/ui/html_utils.go
@@ -77,6 +77,14 @@ func (qmc *QuesmaManagementConsole) generateTopNavigation(target string) []byte
}
buffer.Html(`>Data sources`)
+ buffer.Html("A/B`)
+
if qmc.isAuthEnabled {
buffer.Html(`Logout`)
}
@@ -84,7 +92,7 @@ func (qmc *QuesmaManagementConsole) generateTopNavigation(target string) []byte
buffer.Html("\n\n")
buffer.Html("\n\n")
- if target != "tables" && target != "telemetry" && target != "table_resolver" {
+ if target != "tables" && target != "telemetry" && target != "table_resolver" && target != "ab-testing-dashboard" {
buffer.Html(`` + "\n")
buffer.Html(`
`)
buffer.Html(fmt.Sprintf(
diff --git a/quesma/quesma/ui/login.go b/quesma/quesma/ui/login.go
index af67fc71c..b173d2b51 100644
--- a/quesma/quesma/ui/login.go
+++ b/quesma/quesma/ui/login.go
@@ -20,7 +20,7 @@ func (qmc *QuesmaManagementConsole) generateLoginForm() []byte {
buffer.Html(`